coreboot-kgpe-d16/payloads/libpayload/arch/x86/head.S
Maximilian Brune 9d475bf6de libpayload/arch/x86: Update API handling of CBTABLE handoff
The payload API of coreboot described in
https://www.coreboot.org/Payload_API does not reflect the current
handoff mechanism to hand the coreboot tables off. Therefore the
arguments supplied by coreboot (cbtable) will currently never be parsed
correctly and libpayload has to search for the coreboot tables by
iterating through memory.

This patch removes the old payload API implementation and just takes the
coreboot table pointer from the first argument on the stack.

Tested: started prodrive/atlas with coreinfo payload

Signed-off-by: Maximilian Brune <maximilian.brune@9elements.com>
Change-Id: I51fb0cfc81043cbfe3fc9c8ea0776add2d6a42b2
Reviewed-on: https://review.coreboot.org/c/coreboot/+/74965
Reviewed-by: Julius Werner <jwerner@chromium.org>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
2023-05-24 11:49:51 +00:00

130 lines
3.2 KiB
ArmAsm

/*
*
* Copyright (C) 2008 Advanced Micro Devices, Inc.
* Copyright (C) 2017 Patrick Rudolph <siro@das-labor.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.code32
.global _entry
.text
.align 4
/*
* Our entry point - assume that the CPU is in 32 bit protected mode and
* all segments are in a flat model. That's our operating mode, so we won't
* change anything.
*/
_entry:
jmp _init
.align 4
#define MB_MAGIC 0x1BADB002
#define MB_FLAGS 0x00010003
mb_header:
.long MB_MAGIC
.long MB_FLAGS
.long -(MB_MAGIC + MB_FLAGS)
.long mb_header
.long _start
.long _edata
.long _end
.long _init
/*
* This function saves off the previous stack and switches us to our
* own execution environment.
*/
_init:
/* No interrupts, please. */
cli
/* Store EAX and EBX */
movl %eax, loader_eax
movl %ebx, loader_ebx
/* save pointer to coreboot tables */
movl 4(%esp), %eax
movl %eax, cb_header_ptr
/* Store current stack pointer and set up new stack. */
movl %esp, %eax
movl $_stack, %esp
pushl %eax
/* Enable special x86 functions if present. */
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl $0, %eax
cpuid
/* Test if CPUID(eax=1) is available. */
test %eax, %eax
je cpuid_done
/* Get CPU features. */
movl $1, %eax
cpuid
cpuid_fpu:
/* Test if x87 FPU is present */
test $1, %edx
je cpuid_sse
fninit
movl %cr0, %eax
andl $0xFFFFFFFB, %eax /* clear EM */
orl $0x00000022, %eax /* set MP, NE */
movl %eax, %cr0
cpuid_sse:
/* Test if SSE is available */
test $0x02000000, %edx
je cpuid_done
movl %cr4, %eax
orl $0x00000600, %eax /* set OSFXSR, OSXMMEXCPT */
movl %eax, %cr4
cpuid_done:
popl %edx
popl %ecx
popl %ebx
popl %eax
/* Let's rock. */
call start_main
/* %eax has the return value - pass it on unmolested */
_leave:
/* Restore old stack. */
popl %esp
/* Return to the original context. */
ret