arch/x86/c_start.S: Add proper x86_64 code

Don't truncate upper bits in assembly code and thus allow loading
of ramstage above 4GiB.

Tested on qemu with cbmem_top set to TOUUD.

Change-Id: Ifc9b45f69d0b7534b2faacaad0d099cef2667478
Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Co-authored-by: Benjamin Doron <benjamin.doron@9elements.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/59874
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Patrick Rudolph 2021-12-03 17:32:07 +01:00 committed by Lean Sheng Tan
parent 2fb1928b3c
commit 1c4c7ad1e5
1 changed files with 24 additions and 0 deletions

View File

@ -59,6 +59,24 @@ _start:
leal _stack, %edi
#endif
#if ENV_X86_64
/** poison the stack. Code should not count on the
* stack being full of zeros. This stack poisoning
* recently uncovered a bug in the broadcast SIPI
* code.
*/
movabs $_estack, %rcx
sub %rdi, %rcx
shr $3, %rcx /* it is 64 bit aligned, right? */
movq $0xDEADBEEFDEADBEEF, %rax
rep
stosq
/* Set new stack with enforced alignment. */
movabs $_estack, %rsp
movq $(0xfffffffffffffff0), %rax
and %rax, %rsp
#else
/** poison the stack. Code should not count on the
* stack being full of zeros. This stack poisoning
* recently uncovered a bug in the broadcast SIPI
@ -74,6 +92,7 @@ _start:
/* Set new stack with enforced alignment. */
movl $_estack, %esp
andl $(0xfffffff0), %esp
#endif
/*
* Now we are finished. Memory is up, data is copied and
@ -82,7 +101,12 @@ _start:
*/
post_code(POSTCODE_PRE_HARDWAREMAIN) /* post 6e */
#if ENV_X86_64
movq $0xFFFFFFFFFFFFFFF0, %rax
and %rax, %rsp
#else
andl $0xFFFFFFF0, %esp
#endif
#if CONFIG(ASAN_IN_RAMSTAGE)
call asan_init