x86: Port x86 over to compile cleanly with x86-64

Change-Id: I26f1bbf027435be593f11bce4780111dcaf7cb86
Signed-off-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Signed-off-by: Scott Duplichan <scott@notabs.org>
Reviewed-on: http://review.coreboot.org/10586
Tested-by: build bot (Jenkins)
Tested-by: Raptor Engineering Automated Test Stand <noreply@raptorengineeringinc.com>
Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
This commit is contained in:
Stefan Reinauer 2015-06-18 01:23:48 -07:00
parent 492a07593b
commit 9693885ad8
13 changed files with 243 additions and 50 deletions

View File

@ -39,17 +39,74 @@ static void jmp_payload(void *entry, unsigned long buffer, unsigned long size)
/* Jump to kernel */
__asm__ __volatile__(
" cld \n\t"
#ifdef __x86_64__
/* switch back to 32-bit mode */
" push %4\n\t"
" push %3\n\t"
" push %2\n\t"
" push %1\n\t"
" push %0\n\t"
".intel_syntax noprefix\n\t"
/* use iret to switch to 32-bit code segment */
" xor rax,rax\n\t"
" mov ax, ss\n\t"
" push rax\n\t"
" mov rax, rsp\n\t"
" add rax, 8\n\t"
" push rax\n\t"
" pushfq\n\t"
" push 0x10\n\t"
" lea rax,[rip+3]\n\t"
" push rax\n\t"
" iretq\n\t"
".code32\n\t"
/* disable paging */
" mov eax, cr0\n\t"
" btc eax, 31\n\t"
" mov cr0, eax\n\t"
/* disable long mode */
" mov ecx, 0xC0000080\n\t"
" rdmsr\n\t"
" btc eax, 8\n\t"
" wrmsr\n\t"
" pop eax\n\t"
" add esp, 4\n\t"
" pop ebx\n\t"
" add esp, 4\n\t"
" pop ecx\n\t"
" add esp, 4\n\t"
" pop edx\n\t"
" add esp, 4\n\t"
" pop esi\n\t"
" add esp, 4\n\t"
".att_syntax prefix\n\t"
#endif
/* Save the callee save registers... */
" pushl %%esi\n\t"
" pushl %%edi\n\t"
" pushl %%ebx\n\t"
/* Save the parameters I was passed */
#ifdef __x86_64__
" pushl $0\n\t" /* 20 adjust */
" pushl %%eax\n\t" /* 16 lb_start */
" pushl %%ebx\n\t" /* 12 buffer */
" pushl %%ecx\n\t" /* 8 lb_size */
" pushl %%edx\n\t" /* 4 entry */
" pushl %%esi\n\t" /* 0 elf_boot_notes */
#else
" pushl $0\n\t" /* 20 adjust */
" pushl %0\n\t" /* 16 lb_start */
" pushl %1\n\t" /* 12 buffer */
" pushl %2\n\t" /* 8 lb_size */
" pushl %3\n\t" /* 4 entry */
" pushl %4\n\t" /* 0 elf_boot_notes */
#endif
/* Compute the adjustment */
" xorl %%eax, %%eax\n\t"
" subl 16(%%esp), %%eax\n\t"
@ -115,7 +172,9 @@ static void jmp_payload(void *entry, unsigned long buffer, unsigned long size)
" popl %%ebx\n\t"
" popl %%edi\n\t"
" popl %%esi\n\t"
#ifdef __x86_64__
".code64\n\t"
#endif
::
"ri" (lb_start), "ri" (buffer), "ri" (lb_size),
"ri" (entry),
@ -140,7 +199,12 @@ void arch_prog_run(struct prog *prog)
if (ENV_RAMSTAGE)
try_payload(prog);
__asm__ volatile (
#ifdef __x86_64__
"jmp *%%rdi\n"
#else
"jmp *%%edi\n"
#endif
:: "D"(prog_entry(prog))
);
}

View File

@ -26,7 +26,11 @@
/* i386 lgdt argument */
struct gdtarg {
u16 limit;
#ifdef __x86_64__
u64 base;
#else
u32 base;
#endif
} __attribute__((packed));
/* Copy GDT to new location and reload it.
@ -35,7 +39,7 @@ struct gdtarg {
static void move_gdt(int is_recovery)
{
void *newgdt;
u16 num_gdt_bytes = (u32)&gdt_end - (u32)&gdt;
u16 num_gdt_bytes = (uintptr_t)&gdt_end - (uintptr_t)&gdt;
struct gdtarg gdtarg;
newgdt = cbmem_find(CBMEM_ID_GDT);
@ -49,7 +53,7 @@ static void move_gdt(int is_recovery)
memcpy((void*)newgdt, &gdt, num_gdt_bytes);
}
gdtarg.base = (u32)newgdt;
gdtarg.base = (uintptr_t)newgdt;
gdtarg.limit = num_gdt_bytes - 1;
__asm__ __volatile__ ("lgdt %0\n\t" : : "m" (gdtarg));

View File

@ -73,7 +73,7 @@ static unsigned char smp_compute_checksum(void *v, int len)
return checksum;
}
static void *smp_write_floating_table_physaddr(u32 addr, u32 mpf_physptr, unsigned int virtualwire)
static void *smp_write_floating_table_physaddr(uintptr_t addr, uintptr_t mpf_physptr, unsigned int virtualwire)
{
struct intel_mp_floating *mf;
void *v;
@ -591,6 +591,7 @@ unsigned long __attribute__((weak)) write_smp_table(unsigned long addr)
mptable_lintsrc(mc, isa_bus);
tmp = mptable_finalize(mc);
printk(BIOS_INFO, "MPTABLE len: %d\n", (unsigned int)tmp - (unsigned int)v);
printk(BIOS_INFO, "MPTABLE len: %d\n", (unsigned int)((uintptr_t)tmp -
(uintptr_t)v));
return (unsigned long)tmp;
}

View File

@ -24,7 +24,12 @@
/* CR0 bits */
#define PE (1 << 0)
#ifdef __x86_64__
.code64
#else
.code32
#endif
.globl __wakeup
__wakeup:
/* First prepare the jmp to the resume vector */

View File

@ -179,8 +179,14 @@ struct cpu_info {
static inline struct cpu_info *cpu_info(void)
{
struct cpu_info *ci;
__asm__("andl %%esp,%0; "
__asm__(
#ifdef __x86_64__
"and %%rsp,%0; "
"or %2, %0 "
#else
"andl %%esp,%0; "
"orl %2, %0 "
#endif
:"=r" (ci)
: "0" (~(CONFIG_STACK_SIZE - 1)),
"r" (CONFIG_STACK_SIZE - sizeof(struct cpu_info))

View File

@ -3,5 +3,6 @@
#define ROM_CODE_SEG 0x08
#define ROM_DATA_SEG 0x10
#define ROM_CODE_SEG64 0x18
#endif /* ROM_SEGS_H */

View File

@ -27,7 +27,11 @@ void asmlinkage copy_and_run(void);
static inline void stage_exit(void *entry)
{
__asm__ volatile (
#ifdef __x86_64__
"jmp *%%rdi\n"
#else
"jmp *%%edi\n"
#endif
:: "D"(entry)
);
}

View File

@ -17,20 +17,30 @@ thread_stacks:
#endif
.section ".text._start", "ax", @progbits
#ifdef __x86_64__
.code64
#else
.code32
#endif
.globl _start
.globl __rmodule_entry
__rmodule_entry:
_start:
cli
lgdt %cs:gdtaddr
#ifndef __x86_64__
ljmp $0x10, $1f
#endif
1: movl $0x18, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
movl %eax, %fs
movl %eax, %gs
#ifdef __x86_64__
mov $0x48, %ecx
call SetCodeSelector
#endif
post_code(POST_ENTRY_C_START) /* post 13 */
@ -54,11 +64,11 @@ _start:
#if CONFIG_COOP_MULTITASKING
/* Push the thread pointer. */
pushl $0
push $0
#endif
/* Push the cpu index and struct cpu */
pushl $0
pushl $0
push $0
push $0
/* Initialize the Interrupt Descriptor table */
leal _idt, %edi
@ -76,7 +86,11 @@ _start:
jne 1b
/* Load the Interrupt descriptor table */
#ifndef __x86_64__
lidt idtarg
#else
// FIXME port table to x64 - lidt idtarg
#endif
/*
* Now we are finished. Memory is up, data is copied and
@ -97,122 +111,150 @@ _start:
jmp .Lhlt
vec0:
pushl $0 /* error code */
pushl $0 /* vector */
push $0 /* error code */
push $0 /* vector */
jmp int_hand
vec1:
pushl $0 /* error code */
pushl $1 /* vector */
push $0 /* error code */
push $1 /* vector */
jmp int_hand
vec2:
pushl $0 /* error code */
pushl $2 /* vector */
push $0 /* error code */
push $2 /* vector */
jmp int_hand
vec3:
pushl $0 /* error code */
pushl $3 /* vector */
push $0 /* error code */
push $3 /* vector */
jmp int_hand
vec4:
pushl $0 /* error code */
pushl $4 /* vector */
push $0 /* error code */
push $4 /* vector */
jmp int_hand
vec5:
pushl $0 /* error code */
pushl $5 /* vector */
push $0 /* error code */
push $5 /* vector */
jmp int_hand
vec6:
pushl $0 /* error code */
pushl $6 /* vector */
push $0 /* error code */
push $6 /* vector */
jmp int_hand
vec7:
pushl $0 /* error code */
pushl $7 /* vector */
push $0 /* error code */
push $7 /* vector */
jmp int_hand
vec8:
/* error code */
pushl $8 /* vector */
push $8 /* vector */
jmp int_hand
.word 0x9090
vec9:
pushl $0 /* error code */
pushl $9 /* vector */
push $0 /* error code */
push $9 /* vector */
jmp int_hand
vec10:
/* error code */
pushl $10 /* vector */
push $10 /* vector */
jmp int_hand
.word 0x9090
vec11:
/* error code */
pushl $11 /* vector */
push $11 /* vector */
jmp int_hand
.word 0x9090
vec12:
/* error code */
pushl $12 /* vector */
push $12 /* vector */
jmp int_hand
.word 0x9090
vec13:
/* error code */
pushl $13 /* vector */
push $13 /* vector */
jmp int_hand
.word 0x9090
vec14:
/* error code */
pushl $14 /* vector */
push $14 /* vector */
jmp int_hand
.word 0x9090
vec15:
pushl $0 /* error code */
pushl $15 /* vector */
push $0 /* error code */
push $15 /* vector */
jmp int_hand
vec16:
pushl $0 /* error code */
pushl $16 /* vector */
push $0 /* error code */
push $16 /* vector */
jmp int_hand
vec17:
/* error code */
pushl $17 /* vector */
push $17 /* vector */
jmp int_hand
.word 0x9090
vec18:
pushl $0 /* error code */
pushl $18 /* vector */
push $0 /* error code */
push $18 /* vector */
jmp int_hand
vec19:
pushl $0 /* error code */
pushl $19 /* vector */
push $0 /* error code */
push $19 /* vector */
jmp int_hand
int_hand:
/* At this point on the stack there is:
/* At this point, on x86-32, on the stack there is:
* 0(%esp) vector
* 4(%esp) error code
* 8(%esp) eip
* 12(%esp) cs
* 16(%esp) eflags
*/
#ifdef __x86_64__
push %rdi
push %rsi
push %rbp
/* Original stack pointer */
lea 32(%rsp), %rbp
push %rbp
push %rbx
push %rdx
push %rcx
push %rax
push %rsp /* Pointer to structure on the stack */
call x86_exception
pop %rax /* Drop the pointer */
pop %rax
pop %rcx
pop %rdx
pop %rbx
pop %rbp /* Ignore saved %rsp value */
pop %rbp
pop %rsi
pop %rdi
add $8, %rsp /* pop of the vector and error code */
#else
pushl %edi
pushl %esi
pushl %ebp
/* Original stack pointer */
leal 32(%esp), %ebp
pushl %ebp
@ -235,6 +277,7 @@ int_hand:
popl %edi
addl $8, %esp /* pop of the vector and error code */
#endif
iret
@ -242,21 +285,33 @@ int_hand:
.globl gdb_stub_breakpoint
gdb_stub_breakpoint:
#ifdef __x86_64__
pop %rax /* Return address */
pushfl
push %cs
push %rax /* Return address */
push $0 /* No error code */
push $32 /* vector 32 is user defined */
#else
popl %eax /* Return address */
pushfl
pushl %cs
pushl %eax /* Return address */
pushl $0 /* No error code */
pushl $32 /* vector 32 is user defined */
#endif
jmp int_hand
#endif
.globl gdt, gdt_end, idtarg
gdtaddr:
.word gdt_end - gdt - 1
#ifdef __x86_64__
.quad gdt
#else
.long gdt /* we know the offset */
#endif
.data
@ -287,7 +342,11 @@ gdt:
/* selgdt 0x18, flat data segment */
.word 0xffff, 0x0000
#ifdef __x86_64__
.byte 0x00, 0x92, 0xcf, 0x00
#else
.byte 0x00, 0x93, 0xcf, 0x00
#endif
/* selgdt 0x20, unused */
.word 0x0000, 0x0000 /* dummy */
@ -312,6 +371,12 @@ gdt:
/* selgdt 0x40, flat code segment 16 bit */
.word 0xffff, 0x0000
.byte 0x00, 0x9b, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for limit */
#ifdef __x86_64__
/* selgdt 0x48, flat x64 code segment */
.word 0xffff, 0x0000
.byte 0x00, 0x9b, 0xaf, 0x00
#endif
gdt_end:
idtarg:
@ -322,5 +387,35 @@ _idt:
.fill 20, 8, 0 # idt is uninitialized
_idt_end:
#ifdef __x86_64__
SetCodeSelector:
.intel_syntax noprefix
# save rsp because iret will align it to a 16 byte boundary
mov rdx, rsp
# use iret to jump to a 64-bit offset in a new code segment
# iret will pop cs:rip, flags, then ss:rsp
mov ax, ss # need to push ss..
push rax # push ss instuction not valid in x64 mode, so use ax
push rsp
pushfq
push rcx # cx is code segment selector from caller
mov rax, offset setCodeSelectorLongJump
push rax
# the iret will continue at next instruction, with the new cs value loaded
iretq
setCodeSelectorLongJump:
# restore rsp, it might not have been 16-byte aligned on entry
mov rsp, rdx
ret
.att_syntax prefix
.previous
.code64
#else
.previous
.code32
#endif

View File

@ -10,6 +10,7 @@
#include <device/device.h>
#include <smp/spinlock.h>
#ifndef __x86_64__
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(uint32_t flag)
{
@ -78,6 +79,7 @@ static int deep_magic_nexgen_probe(void)
: "=a" (ret) : : "cx", "dx" );
return ret;
}
#endif
/* List of cpu vendor strings along with their normalized
* id values.
@ -131,6 +133,7 @@ static void identify_cpu(struct device *cpu)
vendor_name[0] = '\0'; /* Unset */
#ifndef __x86_64__
/* Find the id and vendor_name */
if (!cpu_have_cpuid()) {
/* Its a 486 if we can modify the AC flag */
@ -148,6 +151,7 @@ static void identify_cpu(struct device *cpu)
memcpy(vendor_name, "NexGenDriven", 13);
}
}
#endif
if (cpu_have_cpuid()) {
int cpuid_level;
struct cpuid_result result;

View File

@ -10,6 +10,7 @@
#include <device/device.h>
#include <smp/spinlock.h>
#ifndef __x86_64__
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(uint32_t flag)
{
@ -37,6 +38,14 @@ int cpu_have_cpuid(void)
return flag_is_changeable_p(X86_EFLAGS_ID);
}
#else
int cpu_have_cpuid(void)
{
return 1;
}
#endif
int cpu_cpuid_extended_level(void)
{
return cpuid_eax(0x80000000);

View File

@ -40,8 +40,8 @@ void setup_ebda(u32 low_memory_size, u16 ebda_segment, u16 ebda_size)
write16(X86_EBDA_SEGMENT, ebda_segment);
/* Set up EBDA */
memset((void *)(ebda_segment << 4), 0, ebda_size);
write16((void*)(ebda_segment << 4), (ebda_size >> 10));
memset((void *)((uintptr_t)ebda_segment << 4), 0, ebda_size);
write16((void*)((uintptr_t)ebda_segment << 4), (ebda_size >> 10));
}
void setup_default_ebda(void)

View File

@ -494,16 +494,16 @@ void x86_exception(struct eregs *info)
info->error_code, info->eflags,
info->eax, info->ebx, info->ecx, info->edx,
info->edi, info->esi, info->ebp, info->esp);
u8 *code = (u8*)((u32)info->eip - (MDUMP_SIZE >>1));
u8 *code = (u8*)((uintptr_t)info->eip - (MDUMP_SIZE >>1));
/* Align to 8-byte boundary please, and print eight bytes per row.
* This is done to make DRAM burst timing/reordering errors more
* evident from the looking at the dump */
code = (u8*)((u32)code & ~0x7);
code = (u8*)((uintptr_t)code & ~0x7);
int i;
for(i = 0; i < MDUMP_SIZE; i++)
{
if( (i & 0x07) == 0 )
printk(BIOS_EMERG, "\n%.8x:\t", (int)code + i );
printk(BIOS_EMERG, "\n%p:\t", code + i);
printk(BIOS_EMERG, "%.2x ", code[i]);
}
die("");

View File

@ -10,7 +10,7 @@
*/
#define PCI_MMIO_ADDR(SEGBUS, DEVFN, WHERE, MASK) \
((void *)((CONFIG_MMCONF_BASE_ADDRESS |\
((void *)(((uintptr_t)CONFIG_MMCONF_BASE_ADDRESS |\
(((SEGBUS) & 0xFFF) << 20) |\
(((DEVFN) & 0xFF) << 12) |\
((WHERE) & 0xFFF)) & ~MASK))