cpu/x86/sipi: Add x86_64 support

Enter long mode on secondary APs.

Tested on Lenovo T410 with additional x86_64 patches.
Tested on HP Z220 with additional x86_64 patches.

Still boots on x86_32.

Change-Id: I53eae082123d1a12cfa97ead1d87d84db4a334c0
Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/45187
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org>
This commit is contained in:
Patrick Rudolph 2019-12-01 07:23:59 +01:00 committed by Arthur Heymans
parent 45dc92a8c2
commit a169550479
2 changed files with 25 additions and 0 deletions

View File

@ -16,7 +16,12 @@
#endif #endif
#include <cpu/x86/msr.h> #include <cpu/x86/msr.h>
#if defined(__RAMSTAGE__)
#include <arch/ram_segs.h>
#else
#include <arch/rom_segs.h> #include <arch/rom_segs.h>
#endif
setup_longmode: setup_longmode:
/* Get page table address */ /* Get page table address */
@ -42,7 +47,12 @@ setup_longmode:
movl %eax, %cr0 movl %eax, %cr0
/* use long jump to switch to 64-bit code segment */ /* use long jump to switch to 64-bit code segment */
#if defined(__RAMSTAGE__)
ljmp $RAM_CODE_SEG64, $__longmode_start
#else
ljmp $ROM_CODE_SEG64, $__longmode_start ljmp $ROM_CODE_SEG64, $__longmode_start
#endif
.code64 .code64
__longmode_start: __longmode_start:

View File

@ -5,6 +5,8 @@
#include <cpu/x86/msr.h> #include <cpu/x86/msr.h>
#include <arch/ram_segs.h> #include <arch/ram_segs.h>
#define __RAMSTAGE__
/* The SIPI vector is responsible for initializing the APs in the system. It /* The SIPI vector is responsible for initializing the APs in the system. It
* loads microcode, sets up MSRs, and enables caching before calling into * loads microcode, sets up MSRs, and enables caching before calling into
* C code. */ * C code. */
@ -192,11 +194,24 @@ load_msr:
mov %eax, %cr4 mov %eax, %cr4
#endif #endif
#ifdef __x86_64__
/* entry64.inc preserves ebx. */
#include <cpu/x86/64bit/entry64.inc>
mov %rsi, %rdi /* cpu_num */
movl c_handler, %eax
call *%rax
#else
/* c_handler(cpu_num), preserve proper stack alignment */ /* c_handler(cpu_num), preserve proper stack alignment */
sub $12, %esp sub $12, %esp
push %esi /* cpu_num */ push %esi /* cpu_num */
mov c_handler, %eax mov c_handler, %eax
call *%eax call *%eax
#endif
halt_jump: halt_jump:
hlt hlt
jmp halt_jump jmp halt_jump