cpu/x86/64bit: Add code to call function in protected mode

This adds a helper function for long mode to call some code in protected
mode and return back to long mode.

The primary use case is to run binaries that have been compiled for
protected mode, like the FSP or MRC binaries.

Tested on Intel Skylake. The FSP-M runs and returns without error while
coreboot runs in long mode.

Change-Id: I22af2d224b546c0be9e7295330b4b6602df106d6
Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/48175
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
This commit is contained in:
Patrick Rudolph 2020-11-30 15:56:59 +01:00 committed by Patrick Georgi
parent 22b42a87de
commit 7a359497cd
3 changed files with 143 additions and 0 deletions

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <stddef.h>
#include <stdint.h>
#if ENV_X86_64
int protected_mode_call_narg(uint32_t arg_count,
uint32_t func_ptr,
uint32_t opt_arg1,
uint32_t opt_arg2);
/*
* Drops into protected mode and calls the function, which must have been compiled for x86_32.
* After the function returns it enters long mode again.
* The function pointer destination must be below 4GiB in physical memory.
*
* The called function doesn't have arguments and returns an int.
*/
static inline int protected_mode_call(void *func)
{
return protected_mode_call_narg(0, (uintptr_t)func, 0, 0);
}
/*
* Drops into protected mode and calls the function, which must have been compiled for x86_32.
* After the function returns it enters long mode again.
* The function pointer destination must be below 4GiB in physical memory.
* Only the lower 32bits of the argument are passed to the called function.
*
* The called function have one argument and returns an int.
*/
static inline int protected_mode_call_1arg(void *func, uint32_t arg1)
{
return protected_mode_call_narg(1, (uintptr_t)func, arg1, 0);
}
/*
* Drops into protected mode and calls the function, which must have been compiled for x86_32.
* After the function returns it enters long mode again.
* The function pointer destination must be below 4GiB in physical memory.
* Only the lower 32bits of the argument are passed to the called function.
*
* The called function has two arguments and returns an int.
*/
static inline int protected_mode_call_2arg(void *func, uint32_t arg1, uint32_t arg2)
{
return protected_mode_call_narg(2, (uintptr_t)func, arg1, arg2);
}
#else
static inline int protected_mode_call(void *func)
{
int (*doit)(void) = func;
return doit();
}
static inline int protected_mode_call_1arg(void *func, uint32_t arg1)
{
int (*doit)(uint32_t arg1) = func;
return doit(arg1);
}
static inline int protected_mode_call_2arg(void *func, uint32_t arg1, uint32_t arg2)
{
int (*doit)(uint32_t arg1, uint32_t arg2) = func;
return doit(arg1, arg2);
}
#endif

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
.text
.code64
.section ".text.protected_mode_call", "ax", @progbits
.globl protected_mode_call_narg
protected_mode_call_narg:
push %rbp
mov %rsp, %rbp
/* Preserve registers */
push %rbx
push %r12
push %r13
push %r14
push %r15
/* Arguments to stack */
push %rdi
push %rsi
push %rdx
push %rcx
#include <cpu/x86/64bit/exit32.inc>
movl -48(%ebp), %eax /* Argument count */
movl -64(%ebp), %edx /* Argument 0 */
movl -72(%ebp), %ecx /* Argument 1 */
/* Align the stack */
andl $0xFFFFFFF0, %esp
test %eax, %eax
je 1f /* Zero arguments */
subl $1, %eax
test %eax, %eax
je 2f /* One argument */
/* Two arguments */
subl $8, %esp
pushl %ecx /* Argument 1 */
pushl %edx /* Argument 0 */
jmp 1f
2:
subl $12, %esp
pushl %edx /* Argument 0 */
1:
movl -56(%ebp), %ebx /* Function to call */
call *%ebx
movl %eax, %ebx
/* Preserves ebx */
#include <cpu/x86/64bit/entry64.inc>
/* Place return value in rax */
movl %ebx, %eax
/* Restore registers */
mov -40(%rbp), %r15
mov -32(%rbp), %r14
mov -24(%rbp), %r13
mov -16(%rbp), %r12
mov -8(%rbp), %rbx
/* Restore stack pointer */
mov %rbp, %rsp
pop %rbp
ret

View File

@ -1,4 +1,7 @@
subdirs-y += pae
all-$(CONFIG_ARCH_ALL_STAGES_X86_64) += 64bit/mode_switch.S
subdirs-$(CONFIG_PARALLEL_MP) += name
ramstage-$(CONFIG_PARALLEL_MP) += mp_init.c
ramstage-y += backup_default_smm.c