coreboot-kgpe-d16/payloads/libpayload/arch/x86/head.S
Julius Werner 1143d08f7e libpayload: head.S: Avoid clearing BSS (and heap) again
3 out of 4 architectures currently zero out the payload BSS in early
assembly code, which is pointless since the code loading the payload has
already done that (with a more efficient memset). ARM64 has never had
any code like this and can run just fine without it. This also defeats
the new optimization of moving the heap out of the BSS, since all three
implementations assume that everything between _edata and _end is BSS.
We should just take this out.

Change-Id: I45cd2dabd94da43ff0f77e990f11c877cee6cda1
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://review.coreboot.org/16091
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2016-08-13 02:46:19 +02:00

96 lines
2.7 KiB
ArmAsm

/*
* This file is part of the libpayload project.
*
* Copyright (C) 2008 Advanced Micro Devices, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.code32
.global _entry, _leave
.text
.align 4
/*
* Our entry point - assume that the CPU is in 32 bit protected mode and
* all segments are in a flat model. That's our operating mode, so we won't
* change anything.
*/
_entry:
jmp _init
.align 4
#define MB_MAGIC 0x1BADB002
#define MB_FLAGS 0x00010003
mb_header:
.long MB_MAGIC
.long MB_FLAGS
.long -(MB_MAGIC + MB_FLAGS)
.long mb_header
.long _start
.long _edata
.long _end
.long _init
/*
* This function saves off the previous stack and switches us to our
* own execution environment.
*/
_init:
/* No interrupts, please. */
cli
/* There is a bunch of stuff missing here to take arguments on the stack
* See http://www.coreboot.org/Payload_API and exec.S.
*/
/* Store current stack pointer. */
movl %esp, %esi
/* Store EAX and EBX */
movl %eax,loader_eax
movl %ebx,loader_ebx
/* Setup new stack. */
movl $_stack, %ebx
movl %ebx, %esp
/* Save old stack pointer. */
pushl %esi
/* Let's rock. */
call start_main
/* %eax has the return value - pass it on unmolested */
_leave:
/* Get old stack pointer. */
popl %ebx
/* Restore old stack. */
movl %ebx, %esp
/* Return to the original context. */
ret