arch/x86: Use ENV_X86_64 instead of _x86_64_

Tested on Intel Sandybridge x86_64 and x86_32.

Change-Id: I152483d24af0512c0ee4fbbe8931b7312e487ac6
Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/44867
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
This commit is contained in:
Patrick Rudolph 2020-08-27 20:50:18 +02:00 committed by Patrick Georgi
parent e85e7af6d0
commit adcf7827bd
23 changed files with 44 additions and 44 deletions

View File

@ -15,7 +15,7 @@
#define _STACK_TOP _ecar_stack #define _STACK_TOP _ecar_stack
#endif #endif
#ifdef __x86_64__ #if ENV_X86_64
.code64 .code64
#else #else
.code32 .code32
@ -26,7 +26,7 @@
_start: _start:
/* Migrate GDT to this text segment */ /* Migrate GDT to this text segment */
#ifdef __x86_64__ #if ENV_X86_64
call gdt_init64 call gdt_init64
#else #else
call gdt_init call gdt_init

View File

@ -21,14 +21,14 @@ int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
void arch_prog_run(struct prog *prog) void arch_prog_run(struct prog *prog)
{ {
#if ENV_RAMSTAGE && defined(__x86_64__) #if ENV_RAMSTAGE && ENV_X86_64
const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog)); const uint32_t arg = pointer_to_uint32_safe(prog_entry_arg(prog));
const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog)); const uint32_t entry = pointer_to_uint32_safe(prog_entry(prog));
/* On x86 coreboot payloads expect to be called in protected mode */ /* On x86 coreboot payloads expect to be called in protected mode */
protected_mode_jump(entry, arg); protected_mode_jump(entry, arg);
#else #else
#ifdef __x86_64__ #if ENV_X86_64
void (*doit)(void *arg); void (*doit)(void *arg);
#else #else
/* Ensure the argument is pushed on the stack. */ /* Ensure the argument is pushed on the stack. */

View File

@ -5,7 +5,7 @@
#include <cpu/x86/cr.h> #include <cpu/x86/cr.h>
#ifdef __x86_64__ #if ENV_X86_64
/* /*
* Functions to handle mode switches from long mode to protected * Functions to handle mode switches from long mode to protected

View File

@ -24,7 +24,7 @@ thread_stacks:
#endif #endif
.section ".text._start", "ax", @progbits .section ".text._start", "ax", @progbits
#ifdef __x86_64__ #if ENV_X86_64
.code64 .code64
#else #else
.code32 .code32
@ -32,7 +32,7 @@ thread_stacks:
.globl _start .globl _start
_start: _start:
cli cli
#ifdef __x86_64__ #if ENV_X86_64
movabs $gdtaddr, %rax movabs $gdtaddr, %rax
lgdt (%rax) lgdt (%rax)
#else #else
@ -45,7 +45,7 @@ _start:
movl %eax, %ss movl %eax, %ss
movl %eax, %fs movl %eax, %fs
movl %eax, %gs movl %eax, %gs
#ifdef __x86_64__ #if ENV_X86_64
mov $RAM_CODE_SEG64, %ecx mov $RAM_CODE_SEG64, %ecx
call SetCodeSelector call SetCodeSelector
#endif #endif
@ -54,7 +54,7 @@ _start:
cld cld
#ifdef __x86_64__ #if ENV_X86_64
mov %rdi, %rax mov %rdi, %rax
movabs %rax, _cbmem_top_ptr movabs %rax, _cbmem_top_ptr
movabs $_stack, %rdi movabs $_stack, %rdi
@ -117,7 +117,7 @@ _start:
.globl gdb_stub_breakpoint .globl gdb_stub_breakpoint
gdb_stub_breakpoint: gdb_stub_breakpoint:
#ifdef __x86_64__ #if ENV_X86_64
pop %rax /* Return address */ pop %rax /* Return address */
pushfl pushfl
push %cs push %cs
@ -139,7 +139,7 @@ gdb_stub_breakpoint:
gdtaddr: gdtaddr:
.word gdt_end - gdt - 1 .word gdt_end - gdt - 1
#ifdef __x86_64__ #if ENV_X86_64
.quad gdt .quad gdt
#else #else
.long gdt /* we know the offset */ .long gdt /* we know the offset */
@ -176,7 +176,7 @@ gdt:
/* selgdt 0x18, flat data segment */ /* selgdt 0x18, flat data segment */
.word 0xffff, 0x0000 .word 0xffff, 0x0000
#ifdef __x86_64__ #if ENV_X86_64
.byte 0x00, 0x92, 0xcf, 0x00 .byte 0x00, 0x92, 0xcf, 0x00
#else #else
.byte 0x00, 0x93, 0xcf, 0x00 .byte 0x00, 0x93, 0xcf, 0x00
@ -210,7 +210,7 @@ gdt:
* limit * limit
*/ */
#ifdef __x86_64__ #if ENV_X86_64
/* selgdt 0x48, flat x64 code segment */ /* selgdt 0x48, flat x64 code segment */
.word 0xffff, 0x0000 .word 0xffff, 0x0000
.byte 0x00, 0x9b, 0xaf, 0x00 .byte 0x00, 0x9b, 0xaf, 0x00
@ -218,7 +218,7 @@ gdt:
gdt_end: gdt_end:
.section ".text._start", "ax", @progbits .section ".text._start", "ax", @progbits
#ifdef __x86_64__ #if ENV_X86_64
SetCodeSelector: SetCodeSelector:
# save rsp because iret will align it to a 16 byte boundary # save rsp because iret will align it to a 16 byte boundary
mov %rsp, %rdx mov %rsp, %rdx

View File

@ -13,7 +13,7 @@
#include <device/device.h> #include <device/device.h>
#include <smp/spinlock.h> #include <smp/spinlock.h>
#ifndef __x86_64__ #if ENV_X86_32
/* Standard macro to see if a specific flag is changeable */ /* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(uint32_t flag) static inline int flag_is_changeable_p(uint32_t flag)
{ {
@ -136,7 +136,7 @@ static void identify_cpu(struct device *cpu)
vendor_name[0] = '\0'; /* Unset */ vendor_name[0] = '\0'; /* Unset */
#ifndef __x86_64__ #if ENV_X86_32
/* Find the id and vendor_name */ /* Find the id and vendor_name */
if (!cpu_have_cpuid()) { if (!cpu_have_cpuid()) {
/* Its a 486 if we can modify the AC flag */ /* Its a 486 if we can modify the AC flag */

View File

@ -2,7 +2,7 @@
#include <cpu/cpu.h> #include <cpu/cpu.h>
#ifndef __x86_64__ #if ENV_X86_32
/* Standard macro to see if a specific flag is changeable */ /* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(uint32_t flag) static inline int flag_is_changeable_p(uint32_t flag)
{ {

View File

@ -492,7 +492,7 @@ void x86_exception(struct eregs *info)
logical_processor = cpu_index(); logical_processor = cpu_index();
#endif #endif
u8 *code; u8 *code;
#ifdef __x86_64__ #if ENV_X86_64
#define MDUMP_SIZE 0x100 #define MDUMP_SIZE 0x100
printk(BIOS_EMERG, printk(BIOS_EMERG,
"CPU Index %d - APIC %d Unexpected Exception:\n" "CPU Index %d - APIC %d Unexpected Exception:\n"

View File

@ -11,7 +11,7 @@ post_car_stack_top:
.long 0 .long 0
.long 0 .long 0
#if defined(__x86_64__) #if ENV_X86_64
.code64 .code64
.macro pop_eax_edx .macro pop_eax_edx
pop %rax pop %rax
@ -42,13 +42,13 @@ _start:
is expected to be implemented in assembly. */ is expected to be implemented in assembly. */
/* Migrate GDT to this text segment */ /* Migrate GDT to this text segment */
#if defined(__x86_64__) #if ENV_X86_64
call gdt_init64 call gdt_init64
#else #else
call gdt_init call gdt_init
#endif #endif
#ifdef __x86_64__ #if ENV_X86_64
mov %rdi, %rax mov %rdi, %rax
movabs %rax, _cbmem_top_ptr movabs %rax, _cbmem_top_ptr
#else #else
@ -61,7 +61,7 @@ _start:
cpuid cpuid
btl $CPUID_FEATURE_CLFLUSH_BIT, %edx btl $CPUID_FEATURE_CLFLUSH_BIT, %edx
jnc skip_clflush jnc skip_clflush
#ifdef __x86_64__ #if ENV_X86_64
movabs _cbmem_top_ptr, %rax movabs _cbmem_top_ptr, %rax
clflush (%rax) clflush (%rax)
#else #else
@ -73,7 +73,7 @@ skip_clflush:
call chipset_teardown_car call chipset_teardown_car
/* Enable caching if not already enabled. */ /* Enable caching if not already enabled. */
#ifdef __x86_64__ #if ENV_X86_64
mov %cr0, %rax mov %cr0, %rax
and $(~(CR0_CD | CR0_NW)), %eax and $(~(CR0_CD | CR0_NW)), %eax
mov %rax, %cr0 mov %rax, %cr0
@ -115,7 +115,7 @@ skip_clflush:
/* Need to align stack to 16 bytes at the call instruction. Therefore /* Need to align stack to 16 bytes at the call instruction. Therefore
account for the 1 push. */ account for the 1 push. */
andl $0xfffffff0, %esp andl $0xfffffff0, %esp
#if defined(__x86_64__) #if ENV_X86_64
mov %rbp, %rdi mov %rbp, %rdi
#else #else
sub $12, %esp sub $12, %esp

View File

@ -18,7 +18,7 @@ gdtptr:
.word gdt_end - gdt -1 /* compute the table limit */ .word gdt_end - gdt -1 /* compute the table limit */
.long gdt /* we know the offset */ .long gdt /* we know the offset */
#ifdef __x86_64__ #if ENV_X86_64
.code64 .code64
.section .init._gdt64_, "ax", @progbits .section .init._gdt64_, "ax", @progbits
.globl gdt_init64 .globl gdt_init64

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
.section ".text._idt", "ax", @progbits .section ".text._idt", "ax", @progbits
#ifdef __x86_64__ #if ENV_X86_64
.code64 .code64
#else #else
.code32 .code32
@ -109,7 +109,7 @@ vec19:
.global int_hand .global int_hand
int_hand: int_hand:
#ifdef __x86_64__ #if ENV_X86_64
/* At this point, on x86-64, on the stack there is: /* At this point, on x86-64, on the stack there is:
* 0(%rsp) vector * 0(%rsp) vector
* 8(%rsp) error code * 8(%rsp) error code

View File

@ -235,7 +235,7 @@ static inline struct cpu_info *cpu_info(void)
{ {
struct cpu_info *ci; struct cpu_info *ci;
__asm__( __asm__(
#ifdef __x86_64__ #if ENV_X86_64
"and %%rsp,%0; " "and %%rsp,%0; "
"or %2, %0 " "or %2, %0 "
#else #else

View File

@ -42,7 +42,7 @@
uint64_t r##A; \ uint64_t r##A; \
} __packed } __packed
#ifdef __ARCH_x86_64__ #if ENV_X86_64
struct eregs { struct eregs {
QUAD_DOWNTO8(a); QUAD_DOWNTO8(a);
QUAD_DOWNTO8(c); QUAD_DOWNTO8(c);

View File

@ -15,7 +15,7 @@ void *memcpy(void *dest, const void *src, size_t n)
#endif #endif
asm volatile( asm volatile(
#ifdef __x86_64__ #if ENV_X86_64
"rep ; movsd\n\t" "rep ; movsd\n\t"
"mov %4,%%rcx\n\t" "mov %4,%%rcx\n\t"
#else #else

View File

@ -6,7 +6,7 @@
/* CR0 bits */ /* CR0 bits */
#define PE (1 << 0) #define PE (1 << 0)
#ifdef __x86_64__ #if ENV_X86_64
.code64 .code64
#else #else
.code32 .code32
@ -14,7 +14,7 @@
.globl __wakeup .globl __wakeup
__wakeup: __wakeup:
#ifdef __x86_64__ #if ENV_X86_64
xor %rax,%rax xor %rax,%rax
mov %ss, %ax mov %ss, %ax
push %rax push %rax

View File

@ -83,7 +83,7 @@ pages_done:
#include <cpu/x86/64bit/entry64.inc> #include <cpu/x86/64bit/entry64.inc>
/* Restore the BIST result and timestamps. */ /* Restore the BIST result and timestamps. */
#if defined(__x86_64__) #if ENV_X86_64
movd %mm2, %rdi movd %mm2, %rdi
shlq $32, %rdi shlq $32, %rdi
movd %mm1, %rsi movd %mm1, %rsi

View File

@ -9,7 +9,7 @@
* Clobbers: eax, ecx, edx * Clobbers: eax, ecx, edx
*/ */
#if defined(__x86_64__) #if ENV_X86_64
.code32 .code32
#if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0 #if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0
#error pagetables must be 4KiB aligned! #error pagetables must be 4KiB aligned!

View File

@ -38,7 +38,7 @@ static int lowmem_backup_size;
static inline void setup_secondary_gdt(void) static inline void setup_secondary_gdt(void)
{ {
u16 *gdt_limit; u16 *gdt_limit;
#ifdef __x86_64__ #if ENV_X86_64
u64 *gdt_base; u64 *gdt_base;
#else #else
u32 *gdt_base; u32 *gdt_base;

View File

@ -214,7 +214,7 @@ load_msr:
mov %eax, %cr4 mov %eax, %cr4
#endif #endif
#ifdef __x86_64__ #if ENV_X86_64
/* entry64.inc preserves ebx. */ /* entry64.inc preserves ebx. */
#include <cpu/x86/64bit/entry64.inc> #include <cpu/x86/64bit/entry64.inc>

View File

@ -185,7 +185,7 @@ apicid_end:
/* Align stack to 16 bytes. Another 32 bytes are pushed below. */ /* Align stack to 16 bytes. Another 32 bytes are pushed below. */
andl $0xfffffff0, %esp andl $0xfffffff0, %esp
#ifdef __x86_64__ #if ENV_X86_64
mov %ecx, %edi mov %ecx, %edi
/* Backup IA32_EFER. Preserves ebx. */ /* Backup IA32_EFER. Preserves ebx. */
movl $(IA32_EFER), %ecx movl $(IA32_EFER), %ecx
@ -204,7 +204,7 @@ apicid_end:
* struct arg = { c_handler_params, cpu_num, smm_runtime, canary }; * struct arg = { c_handler_params, cpu_num, smm_runtime, canary };
* c_handler(&arg) * c_handler(&arg)
*/ */
#ifdef __x86_64__ #if ENV_X86_64
push %rbx /* uintptr_t *canary */ push %rbx /* uintptr_t *canary */
push %rcx /* size_t cpu */ push %rcx /* size_t cpu */

View File

@ -43,7 +43,7 @@
#define SMM_HANDLER_OFFSET 0x0000 #define SMM_HANDLER_OFFSET 0x0000
#if defined(__x86_64__) #if ENV_X86_64
.bss .bss
ia32efer_backup_eax: ia32efer_backup_eax:
.long 0 .long 0
@ -166,7 +166,7 @@ untampered_lapic:
addl $SMM_STACK_SIZE, %ebx addl $SMM_STACK_SIZE, %ebx
movl %ebx, %esp movl %ebx, %esp
#if defined(__x86_64__) #if ENV_X86_64
/* Backup IA32_EFER. Preserves ebx. */ /* Backup IA32_EFER. Preserves ebx. */
movl $(IA32_EFER), %ecx movl $(IA32_EFER), %ecx
rdmsr rdmsr
@ -180,7 +180,7 @@ untampered_lapic:
/* Call C handler */ /* Call C handler */
call smi_handler call smi_handler
#if defined(__x86_64__) #if ENV_X86_64
/* /*
* The only reason to go back to protected mode is that RSM doesn't restore * The only reason to go back to protected mode is that RSM doesn't restore
* MSR registers and MSR IA32_EFER was modified by entering long mode. * MSR registers and MSR IA32_EFER was modified by entering long mode.

View File

@ -94,7 +94,7 @@ extern void _dead_code_assertion_failed(void) __attribute__((noreturn));
*(type *)(uintptr_t)0; \ *(type *)(uintptr_t)0; \
}) })
#ifdef __x86_64__ #if ENV_X86_64
#define pointer_to_uint32_safe(x) ({ \ #define pointer_to_uint32_safe(x) ({ \
if ((uintptr_t)(x) > 0xffffffffUL) \ if ((uintptr_t)(x) > 0xffffffffUL) \
die("Cast from pointer to uint32_t overflows"); \ die("Cast from pointer to uint32_t overflows"); \

View File

@ -9,7 +9,7 @@
#define COMPILER_BARRIER "memory" #define COMPILER_BARRIER "memory"
#ifdef __x86_64__ #if ENV_X86_64
#define CRx_TYPE uint64_t #define CRx_TYPE uint64_t
#define CRx_IN "q" #define CRx_IN "q"
#define CRx_RET "=q" #define CRx_RET "=q"

View File

@ -274,7 +274,7 @@ void *memcpy_s(void *dest, const void *src, size_t n)
unsigned long d0, d1, d2; unsigned long d0, d1, d2;
asm volatile( asm volatile(
#ifdef __x86_64__ #if ENV_X86_64
"rep ; movsd\n\t" "rep ; movsd\n\t"
"mov %4,%%rcx\n\t" "mov %4,%%rcx\n\t"
#else #else