arch/x86/boot: Jump to payload in protected mode

*   On ARCH_RAMSTAGE_X86_64 jump to the payload in protected mode.
*   Add a helper function to jump to arbitrary code in protected mode,
    similar to the real mode call handler.
*   Doesn't affect existing x86_32 code.
*   Add a macro to cast pointer to uint32_t that dies if it would overflow
    on conversion

Tested on QEMU Q35 using SeaBIOS as payload.
Tested on Lenovo T410 with additional x86_64 patches.

Change-Id: I6552ac30f1b6205e08e16d251328e01ce3fbfd14
Signed-off-by: Patrick Rudolph <siro@das-labor.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/30118
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
This commit is contained in:
Patrick Rudolph 2018-12-09 10:48:59 +01:00
parent ad7b2e23ab
commit e563815e05
6 changed files with 74 additions and 1 deletions

View File

@ -15,6 +15,8 @@ In order to add support for x86_64 the following assumptions are made:
* The high dword of pointers is always zero * The high dword of pointers is always zero
* The reference implementation is qemu * The reference implementation is qemu
* The CPU supports 1GiB hugepages * The CPU supports 1GiB hugepages
* x86 payloads are loaded below 4GiB in physical memory and are jumped
to in *protected mode*
## Assuptions for all stages using the reference implementation ## Assuptions for all stages using the reference implementation
* 0-4GiB are identity mapped using 2MiB-pages as WB * 0-4GiB are identity mapped using 2MiB-pages as WB
@ -47,7 +49,7 @@ At the moment *$n* is 4, which results in identity mapping the lower 4 GiB.
* Add assembly code for long mode - *DONE* * Add assembly code for long mode - *DONE*
* Add assembly code for SMM - *DONE* * Add assembly code for SMM - *DONE*
* Add assembly code for postcar stage - *DONE* * Add assembly code for postcar stage - *DONE*
* Add assembly code to return to protected mode - *TODO* * Add assembly code to return to protected mode - *DONE*
* Implement reference code for mainboard `emulation/qemu-q35` - *TODO* * Implement reference code for mainboard `emulation/qemu-q35` - *TODO*
## Future work ## Future work

View File

@ -245,6 +245,7 @@ ramstage-$(CONFIG_ACPI_BERT) += acpi_bert_storage.c
ramstage-y += boot.c ramstage-y += boot.c
ramstage-y += post.c ramstage-y += post.c
ramstage-y += c_start.S ramstage-y += c_start.S
ramstage-y += c_exit.S
ramstage-y += cpu.c ramstage-y += cpu.c
ramstage-y += cpu_common.c ramstage-y += cpu_common.c
ramstage-y += ebda.c ramstage-y += ebda.c

View File

@ -1,10 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
#include <arch/boot/boot.h>
#include <commonlib/helpers.h> #include <commonlib/helpers.h>
#include <console/console.h> #include <console/console.h>
#include <program_loading.h> #include <program_loading.h>
#include <ip_checksum.h> #include <ip_checksum.h>
#include <symbols.h> #include <symbols.h>
#include <assert.h>
int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size) int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
{ {
@ -19,6 +21,13 @@ int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
void arch_prog_run(struct prog *prog) void arch_prog_run(struct prog *prog)
{ {
#if ENV_RAMSTAGE && defined(__x86_64__)
const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog));
const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog));
/* On x86 coreboot payloads expect to be called in protected mode */
protected_mode_jump(entry, arg);
#else
#ifdef __x86_64__ #ifdef __x86_64__
void (*doit)(void *arg); void (*doit)(void *arg);
#else #else
@ -27,4 +36,5 @@ void arch_prog_run(struct prog *prog)
#endif #endif
doit = prog_entry(prog); doit = prog_entry(prog);
doit(prog_entry_arg(prog)); doit(prog_entry_arg(prog));
#endif
} }

38
src/arch/x86/c_exit.S Normal file
View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <arch/ram_segs.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/cr.h>
#ifdef __x86_64__
/*
* Functions to handle mode switches from long mode to protected
* mode.
*/
.text
.code64
.section ".text.protected_mode_jump", "ax", @progbits
.globl protected_mode_jump
protected_mode_jump:
push %rbp
mov %rsp, %rbp
/* Arguments to stack */
push %rdi
push %rsi
#include <cpu/x86/64bit/exit32.inc>
movl -8(%ebp), %eax /* Function to call */
movl -16(%ebp), %ebx /* Argument 0 */
/* Align the stack */
andl $0xFFFFFFF0, %esp
subl $12, %esp
pushl %ebx /* Argument 0 */
jmp *%eax
#endif

View File

@ -7,4 +7,15 @@
#define ELF_DATA ELFDATA2LSB #define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_386 #define ELF_ARCH EM_386
#include <types.h>
/*
* Jump to function in protected mode.
* @arg func_ptr Function to jump to in protected mode
* @arg Argument to pass to called function
*
* @noreturn
*/
void protected_mode_jump(uint32_t func_ptr,
uint32_t argument);
#endif /* ASM_I386_BOOT_H */ #endif /* ASM_I386_BOOT_H */

View File

@ -80,4 +80,15 @@ extern void _dead_code_assertion_failed(void) __attribute__((noreturn));
*(type *)(uintptr_t)0; \ *(type *)(uintptr_t)0; \
}) })
#ifdef __x86_64__
#define pointer_to_uint32_safe(x) ({ \
if ((uintptr_t)(x) > 0xffffffffUL) \
die("Cast from pointer to uint32_t overflows"); \
(uint32_t)(uintptr_t)(x); \
})
#else
#define pointer_to_uint32_safe(x) ({ \
(uint32_t)(uintptr_t)(x); \
})
#endif
#endif // __ASSERT_H__ #endif // __ASSERT_H__