From 7a359497cd83babdc5d3244a390cb775412105e6 Mon Sep 17 00:00:00 2001 From: Patrick Rudolph Date: Mon, 30 Nov 2020 15:56:59 +0100 Subject: [PATCH] cpu/x86/64bit: Add code to call function in protected mode This adds a helper function for long mode to call some code in protected mode and return back to long mode. The primary use case is to run binaries that have been compiled for protected mode, like the FSP or MRC binaries. Tested on Intel Skylake. The FSP-M runs and returns without error while coreboot runs in long mode. Change-Id: I22af2d224b546c0be9e7295330b4b6602df106d6 Signed-off-by: Patrick Rudolph Reviewed-on: https://review.coreboot.org/c/coreboot/+/48175 Tested-by: build bot (Jenkins) Reviewed-by: Arthur Heymans --- src/arch/x86/include/mode_switch.h | 70 ++++++++++++++++++++++++++++++ src/cpu/x86/64bit/mode_switch.S | 70 ++++++++++++++++++++++++++++++ src/cpu/x86/Makefile.inc | 3 ++ 3 files changed, 143 insertions(+) create mode 100644 src/arch/x86/include/mode_switch.h create mode 100644 src/cpu/x86/64bit/mode_switch.S diff --git a/src/arch/x86/include/mode_switch.h b/src/arch/x86/include/mode_switch.h new file mode 100644 index 0000000000..0c46da5c62 --- /dev/null +++ b/src/arch/x86/include/mode_switch.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include +#include + +#if ENV_X86_64 +int protected_mode_call_narg(uint32_t arg_count, + uint32_t func_ptr, + uint32_t opt_arg1, + uint32_t opt_arg2); + +/* + * Drops into protected mode and calls the function, which must have been compiled for x86_32. + * After the function returns it enters long mode again. + * The function pointer destination must be below 4GiB in physical memory. + * + * The called function doesn't have arguments and returns an int. + */ +static inline int protected_mode_call(void *func) +{ + return protected_mode_call_narg(0, (uintptr_t)func, 0, 0); +} + +/* + * Drops into protected mode and calls the function, which must have been compiled for x86_32. + * After the function returns it enters long mode again. + * The function pointer destination must be below 4GiB in physical memory. + * Only the lower 32bits of the argument are passed to the called function. + * + * The called function have one argument and returns an int. + */ +static inline int protected_mode_call_1arg(void *func, uint32_t arg1) +{ + return protected_mode_call_narg(1, (uintptr_t)func, arg1, 0); +} + +/* + * Drops into protected mode and calls the function, which must have been compiled for x86_32. + * After the function returns it enters long mode again. + * The function pointer destination must be below 4GiB in physical memory. + * Only the lower 32bits of the argument are passed to the called function. + * + * The called function has two arguments and returns an int. + */ +static inline int protected_mode_call_2arg(void *func, uint32_t arg1, uint32_t arg2) +{ + return protected_mode_call_narg(2, (uintptr_t)func, arg1, arg2); +} +#else +static inline int protected_mode_call(void *func) +{ + int (*doit)(void) = func; + + return doit(); +} + +static inline int protected_mode_call_1arg(void *func, uint32_t arg1) +{ + int (*doit)(uint32_t arg1) = func; + + return doit(arg1); +} + +static inline int protected_mode_call_2arg(void *func, uint32_t arg1, uint32_t arg2) +{ + int (*doit)(uint32_t arg1, uint32_t arg2) = func; + + return doit(arg1, arg2); +} +#endif diff --git a/src/cpu/x86/64bit/mode_switch.S b/src/cpu/x86/64bit/mode_switch.S new file mode 100644 index 0000000000..eea104bcf3 --- /dev/null +++ b/src/cpu/x86/64bit/mode_switch.S @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +.text +.code64 + .section ".text.protected_mode_call", "ax", @progbits + .globl protected_mode_call_narg +protected_mode_call_narg: + + push %rbp + mov %rsp, %rbp + /* Preserve registers */ + push %rbx + push %r12 + push %r13 + push %r14 + push %r15 + + /* Arguments to stack */ + push %rdi + push %rsi + push %rdx + push %rcx + + #include + + movl -48(%ebp), %eax /* Argument count */ + movl -64(%ebp), %edx /* Argument 0 */ + movl -72(%ebp), %ecx /* Argument 1 */ + + /* Align the stack */ + andl $0xFFFFFFF0, %esp + test %eax, %eax + je 1f /* Zero arguments */ + + subl $1, %eax + test %eax, %eax + je 2f /* One argument */ + + /* Two arguments */ + subl $8, %esp + pushl %ecx /* Argument 1 */ + pushl %edx /* Argument 0 */ + jmp 1f +2: + subl $12, %esp + pushl %edx /* Argument 0 */ + +1: + movl -56(%ebp), %ebx /* Function to call */ + call *%ebx + movl %eax, %ebx + + /* Preserves ebx */ + #include + + /* Place return value in rax */ + movl %ebx, %eax + + /* Restore registers */ + mov -40(%rbp), %r15 + mov -32(%rbp), %r14 + mov -24(%rbp), %r13 + mov -16(%rbp), %r12 + mov -8(%rbp), %rbx + + /* Restore stack pointer */ + mov %rbp, %rsp + pop %rbp + + ret diff --git a/src/cpu/x86/Makefile.inc b/src/cpu/x86/Makefile.inc index 2f789f7581..cd73b72fe3 100644 --- a/src/cpu/x86/Makefile.inc +++ b/src/cpu/x86/Makefile.inc @@ -1,4 +1,7 @@ subdirs-y += pae + +all-$(CONFIG_ARCH_ALL_STAGES_X86_64) += 64bit/mode_switch.S + subdirs-$(CONFIG_PARALLEL_MP) += name ramstage-$(CONFIG_PARALLEL_MP) += mp_init.c ramstage-y += backup_default_smm.c