cpu/x86: Drop !CPU_INFO_V2 code
Now that all platforms use parallel_mp this is the only codepath used for cpu_info() local thread storage. Change-Id: I119214e703aea8a4fe93f83b784159cf86d859d3 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/69122 Reviewed-by: Elyes Haouas <ehaouas@noos.fr> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Angel Pons <th3fanbus@gmail.com>
This commit is contained in:
parent
66b2888b77
commit
f4c11dcb53
|
@ -715,7 +715,7 @@ config TIMER_QUEUE
|
|||
config COOP_MULTITASKING
|
||||
def_bool n
|
||||
select TIMER_QUEUE
|
||||
depends on ARCH_X86 && CPU_INFO_V2
|
||||
depends on ARCH_X86
|
||||
help
|
||||
Cooperative multitasking allows callbacks to be multiplexed on the
|
||||
main thread. With this enabled it allows for multiple execution paths
|
||||
|
|
|
@ -80,7 +80,6 @@ _start:
|
|||
|
||||
push_cpu_info
|
||||
|
||||
#if CONFIG(CPU_INFO_V2)
|
||||
/* Allocate the per_cpu_segment_data on the stack */
|
||||
push_per_cpu_segment_data
|
||||
|
||||
|
@ -93,7 +92,6 @@ _start:
|
|||
mov $per_cpu_segment_selector, %eax
|
||||
movl (%eax), %eax
|
||||
mov %eax, %gs
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now we are finished. Memory is up, data is copied and
|
||||
|
@ -222,7 +220,6 @@ gdt:
|
|||
.word 0xffff, 0x0000
|
||||
.byte 0x00, 0x9b, 0xaf, 0x00
|
||||
#endif
|
||||
#if CONFIG(CPU_INFO_V2)
|
||||
per_cpu_segment_descriptors:
|
||||
.rept CONFIG_MAX_CPUS
|
||||
/* flat data segment */
|
||||
|
@ -233,14 +230,11 @@ per_cpu_segment_descriptors:
|
|||
.byte 0x00, 0x93, 0xcf, 0x00
|
||||
#endif
|
||||
.endr
|
||||
#endif /* CPU_INFO_V2 */
|
||||
gdt_end:
|
||||
|
||||
#if CONFIG(CPU_INFO_V2)
|
||||
/* Segment selector pointing to the first per_cpu_segment_descriptor. */
|
||||
per_cpu_segment_selector:
|
||||
.long per_cpu_segment_descriptors - gdt
|
||||
#endif /* CPU_INFO_V2 */
|
||||
|
||||
.section ".text._start", "ax", @progbits
|
||||
#if ENV_X86_64
|
||||
|
|
|
@ -149,8 +149,6 @@ struct per_cpu_segment_data {
|
|||
|
||||
static inline struct cpu_info *cpu_info(void)
|
||||
{
|
||||
/* We use a #if because we don't want to mess with the &s below. */
|
||||
#if CONFIG(CPU_INFO_V2)
|
||||
struct cpu_info *ci = NULL;
|
||||
|
||||
__asm__("mov %%gs:%c[offset], %[ci]"
|
||||
|
@ -159,11 +157,6 @@ static inline struct cpu_info *cpu_info(void)
|
|||
);
|
||||
|
||||
return ci;
|
||||
#else
|
||||
char s;
|
||||
uintptr_t info = ALIGN_UP((uintptr_t)&s, CONFIG_STACK_SIZE) - sizeof(struct cpu_info);
|
||||
return (struct cpu_info *)info;
|
||||
#endif /* CPU_INFO_V2 */
|
||||
}
|
||||
|
||||
struct cpuinfo_x86 {
|
||||
|
|
|
@ -2,7 +2,6 @@ if ARCH_X86
|
|||
|
||||
config PARALLEL_MP
|
||||
def_bool y
|
||||
select CPU_INFO_V2
|
||||
help
|
||||
This option uses common MP infrastructure for bringing up APs
|
||||
in parallel. It additionally provides a more flexible mechanism
|
||||
|
@ -210,12 +209,4 @@ config RESERVE_MTRRS_FOR_OS
|
|||
However, modern OSes use PAT to control cacheability instead of
|
||||
using MTRRs.
|
||||
|
||||
config CPU_INFO_V2
|
||||
bool
|
||||
depends on PARALLEL_MP
|
||||
help
|
||||
Enables the new method of locating struct cpu_info. This new method
|
||||
uses the %gs segment to locate the cpu_info pointer. The old method
|
||||
relied on the stack being CONFIG_STACK_SIZE aligned.
|
||||
|
||||
endif # ARCH_X86
|
||||
|
|
|
@ -227,7 +227,7 @@ cond_clear_var_mtrrs:
|
|||
movd %esp, %xmm0
|
||||
movd %ebp, %xmm1
|
||||
|
||||
/* Backup %gs used by CPU_INFO_V2 */
|
||||
/* Backup %gs used by cpu_info() */
|
||||
movl %gs, %eax
|
||||
movd %eax, %xmm2
|
||||
|
||||
|
@ -265,7 +265,7 @@ cond_clear_var_mtrrs:
|
|||
movl %eax, %es
|
||||
movl %eax, %ss
|
||||
movl %eax, %fs
|
||||
/* Restore %gs used by CPU_INFO_V2 */
|
||||
/* Restore %gs used by cpu_info */
|
||||
movd %xmm2, %eax
|
||||
movl %eax, %gs
|
||||
|
||||
|
|
Loading…
Reference in New Issue