arch/x86: remove .intel_syntax
Replace with the more familiar AT&T syntax. Tested by sha1sum(1)ing the object files, and checking the objdump that the code in question was actually compiled. Change-Id: Ie85b8ee5dad1794864c18683427e32f055745221 Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Reviewed-on: https://review.coreboot.org/13132 Tested-by: build bot (Jenkins) Reviewed-by: Martin Roth <martinroth@google.com> Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
parent
c7b2b7c67d
commit
0302b060b6
|
@ -60,43 +60,40 @@ static void jmp_payload(void *entry, unsigned long buffer, unsigned long size)
|
||||||
" push %1\n\t"
|
" push %1\n\t"
|
||||||
" push %0\n\t"
|
" push %0\n\t"
|
||||||
|
|
||||||
".intel_syntax noprefix\n\t"
|
|
||||||
/* use iret to switch to 32-bit code segment */
|
/* use iret to switch to 32-bit code segment */
|
||||||
" xor rax,rax\n\t"
|
" xor %%rax,%%rax\n\t"
|
||||||
" mov ax, ss\n\t"
|
" mov %%ss, %%ax\n\t"
|
||||||
" push rax\n\t"
|
" push %%rax\n\t"
|
||||||
" mov rax, rsp\n\t"
|
" mov %%rsp, %%rax\n\t"
|
||||||
" add rax, 8\n\t"
|
" add $8, %%rax\n\t"
|
||||||
" push rax\n\t"
|
" push %%rax\n\t"
|
||||||
" pushfq\n\t"
|
" pushfq\n\t"
|
||||||
" push 0x10\n\t"
|
" push $0x10\n\t"
|
||||||
" lea rax,[rip+3]\n\t"
|
" lea 3(%%rip), %%rax\n\t"
|
||||||
" push rax\n\t"
|
" push %%rax\n\t"
|
||||||
" iretq\n\t"
|
" iretq\n\t"
|
||||||
".code32\n\t"
|
".code32\n\t"
|
||||||
/* disable paging */
|
/* disable paging */
|
||||||
" mov eax, cr0\n\t"
|
" mov %%cr0, %%eax\n\t"
|
||||||
" btc eax, 31\n\t"
|
" btc $31, %%eax\n\t"
|
||||||
" mov cr0, eax\n\t"
|
" mov %%eax, %%cr0\n\t"
|
||||||
/* disable long mode */
|
/* disable long mode */
|
||||||
" mov ecx, 0xC0000080\n\t"
|
" mov $0xC0000080, %%ecx\n\t"
|
||||||
" rdmsr\n\t"
|
" rdmsr\n\t"
|
||||||
" btc eax, 8\n\t"
|
" btc $8, %%eax\n\t"
|
||||||
" wrmsr\n\t"
|
" wrmsr\n\t"
|
||||||
|
|
||||||
" pop eax\n\t"
|
" pop %%eax\n\t"
|
||||||
" add esp, 4\n\t"
|
" add $4, %%esp\n\t"
|
||||||
" pop ebx\n\t"
|
" pop %%ebx\n\t"
|
||||||
" add esp, 4\n\t"
|
" add $4, %%esp\n\t"
|
||||||
" pop ecx\n\t"
|
" pop %%ecx\n\t"
|
||||||
|
|
||||||
" add esp, 4\n\t"
|
" add $4, %%esp\n\t"
|
||||||
" pop edx\n\t"
|
" pop %%edx\n\t"
|
||||||
" add esp, 4\n\t"
|
" add $4, %%esp\n\t"
|
||||||
" pop esi\n\t"
|
" pop %%esi\n\t"
|
||||||
" add esp, 4\n\t"
|
" add $4, %%esp\n\t"
|
||||||
|
|
||||||
".att_syntax prefix\n\t"
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Save the callee save registers... */
|
/* Save the callee save registers... */
|
||||||
|
|
|
@ -400,29 +400,26 @@ _idt_end:
|
||||||
|
|
||||||
#ifdef __x86_64__
|
#ifdef __x86_64__
|
||||||
SetCodeSelector:
|
SetCodeSelector:
|
||||||
.intel_syntax noprefix
|
|
||||||
|
|
||||||
# save rsp because iret will align it to a 16 byte boundary
|
# save rsp because iret will align it to a 16 byte boundary
|
||||||
mov rdx, rsp
|
mov %rsp, %rdx
|
||||||
|
|
||||||
# use iret to jump to a 64-bit offset in a new code segment
|
# use iret to jump to a 64-bit offset in a new code segment
|
||||||
# iret will pop cs:rip, flags, then ss:rsp
|
# iret will pop cs:rip, flags, then ss:rsp
|
||||||
mov ax, ss # need to push ss..
|
mov %ss, %ax # need to push ss..
|
||||||
push rax # push ss instuction not valid in x64 mode, so use ax
|
push %rax # push ss instuction not valid in x64 mode, so use ax
|
||||||
push rsp
|
push %rsp
|
||||||
pushfq
|
pushfq
|
||||||
push rcx # cx is code segment selector from caller
|
push %rcx # cx is code segment selector from caller
|
||||||
mov rax, offset setCodeSelectorLongJump
|
mov $setCodeSelectorLongJump, %rax
|
||||||
push rax
|
push %rax
|
||||||
|
|
||||||
# the iret will continue at next instruction, with the new cs value loaded
|
# the iret will continue at next instruction, with the new cs value loaded
|
||||||
iretq
|
iretq
|
||||||
|
|
||||||
setCodeSelectorLongJump:
|
setCodeSelectorLongJump:
|
||||||
# restore rsp, it might not have been 16-byte aligned on entry
|
# restore rsp, it might not have been 16-byte aligned on entry
|
||||||
mov rsp, rdx
|
mov %rdx, %rsp
|
||||||
ret
|
ret
|
||||||
.att_syntax prefix
|
|
||||||
|
|
||||||
.previous
|
.previous
|
||||||
.code64
|
.code64
|
||||||
|
|
|
@ -29,32 +29,30 @@
|
||||||
.globl __wakeup
|
.globl __wakeup
|
||||||
__wakeup:
|
__wakeup:
|
||||||
#ifdef __x86_64__
|
#ifdef __x86_64__
|
||||||
.intel_syntax noprefix
|
xor %rax,%rax
|
||||||
xor rax,rax
|
mov %ss, %ax
|
||||||
mov ax, ss
|
push %rax
|
||||||
push rax
|
mov %rsp, %rax
|
||||||
mov rax, rsp
|
add $8, %rax
|
||||||
add rax, 8
|
push %rax
|
||||||
push rax
|
|
||||||
pushfq
|
pushfq
|
||||||
push 0x10
|
push $0x10
|
||||||
lea rax,[rip+3]
|
lea 3(%rip), %rax
|
||||||
push rax
|
push %rax
|
||||||
iretq
|
iretq
|
||||||
|
|
||||||
.code32
|
.code32
|
||||||
|
|
||||||
/* disable paging */
|
/* disable paging */
|
||||||
mov eax, cr0
|
mov %cr0, %eax
|
||||||
btc eax, 31
|
btc $31, %eax
|
||||||
mov cr0, eax
|
mov %eax, %cr0
|
||||||
|
|
||||||
/* disable long mode */
|
/* disable long mode */
|
||||||
mov ecx, 0xC0000080
|
mov $0xC0000080, %ecx
|
||||||
rdmsr
|
rdmsr
|
||||||
btc eax, 8
|
btc $8, %eax
|
||||||
wrmsr
|
wrmsr
|
||||||
.att_syntax prefix
|
|
||||||
#endif
|
#endif
|
||||||
/* First prepare the jmp to the resume vector */
|
/* First prepare the jmp to the resume vector */
|
||||||
mov 0x4(%esp), %eax /* vector */
|
mov 0x4(%esp), %eax /* vector */
|
||||||
|
|
Loading…
Reference in New Issue