arm64: use one stage_entry for all stages

Ramstage needs an assembly entry point for setting up
the initial state of the CPU. Therefore, a function is
provided, arm64_el3_startup(), that bootstraps the state
of the processor, initializes the stack pointer, and
branches to a defined entry symbol. To make this work
without adding too much preprocessor macro conditions
provide _stack and _estack for all the stages.

Currently the entry point after initialization is 'main',
however it can be changed/extended to do more work such
as seeding the stack contents with tombstones, etc.

It should be noted that romstage and bootblock weren't
tested. Only ramstage is known to work.

BUG=chrome-os-partner:29923
BRANCH=None
TEST=Brought up 64-bit ramstage on rush.

Original-Change-Id: I1f07d5b6656e13e6667b038cdc1f4be8843d1960
Original-Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Original-Reviewed-on: https://chromium-review.googlesource.com/207262
Original-Reviewed-by: Furquan Shaikh <furquan@chromium.org>
(cherry picked from commit 7850ee3a7bf48c05f2e64147edb92161f8308f19)
Signed-off-by: Marc Jones <marc.jones@se-eng.com>

Change-Id: Ia87697f49638c8c249215d441d95f1ec621e0949
Reviewed-on: http://review.coreboot.org/8585
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin@google.com>
This commit is contained in:
Aaron Durbin 2014-07-10 12:40:30 -05:00 committed by Marc Jones
parent 6ba1b628ee
commit 0df877a65a
8 changed files with 74 additions and 74 deletions

View File

@ -109,6 +109,7 @@ ramstage-y += tables.c
ramstage-y += memset.S ramstage-y += memset.S
ramstage-y += memcpy.S ramstage-y += memcpy.S
ramstage-y += memmove.S ramstage-y += memmove.S
ramstage-y += stage_entry.S
ramstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c ramstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c
ramstage-srcs += $(wildcard src/mainboard/$(MAINBOARDDIR)/mainboard.c) ramstage-srcs += $(wildcard src/mainboard/$(MAINBOARDDIR)/mainboard.c)

View File

@ -19,68 +19,6 @@
* MA 02111-1307 USA * MA 02111-1307 USA
*/ */
.section ".start", "a", %progbits
.globl _start
_start: b reset
.balignl 16,0xdeadbeef
_cbfs_master_header:
/* The CBFS master header is inserted by cbfstool at the first
* aligned offset after the above anchor string is found.
* Hence, we leave some space for it.
* Assumes 64-byte alignment.
*/
.skip 128
reset:
/*
* Set the cpu to SVC32 mode and unmask aborts. Aborts might happen
* before logging is turned on and may crash the machine, but at least
* the problem will show up near the code that causes it.
*/
/* FIXME: Not using supervisor mode, does it apply for aarch64? */
msr daifclr, #0xc /* Unmask Debug and System exceptions */
msr daifset, #0x3 /* Mask IRQ, FIQ */
bl arm_init_caches
/*
* Initialize the stack to a known value. This is used to check for
* stack overflow later in the boot process.
*/
ldr x0, .Stack
ldr x1, .Stack_size
sub x0, x0, x1
ldr x1, .Stack
ldr x2, =0xdeadbeefdeadbeef
init_stack_loop:
str x2, [x0]
add x0, x0, #8
cmp x0, x1
bne init_stack_loop
/* Set stackpointer in internal RAM to call bootblock main() */
call_bootblock:
ldr x0, .Stack /* Set up stack pointer */
mov sp, x0
ldr x0, =0x00000000
sub sp, sp, #16
/*
* Switch to EL2 already because Linux requires to be
* in EL1 or EL2, see its "Booting AArch64 Linux" doc
*/
bl switch_el3_to_el2
bl main
.align 3
.Stack:
.word CONFIG_STACK_TOP
.align 3
.Stack_size:
.word CONFIG_STACK_SIZE
.section ".id", "a", %progbits .section ".id", "a", %progbits
.globl __id_start .globl __id_start

View File

@ -28,14 +28,14 @@ PHDRS
to_load PT_LOAD; to_load PT_LOAD;
} }
ENTRY(_start) ENTRY(stage_entry)
TARGET(binary) TARGET(binary)
SECTIONS SECTIONS
{ {
. = CONFIG_BOOTBLOCK_BASE; . = CONFIG_BOOTBLOCK_BASE;
.bootblock . : { .bootblock . : {
*(.text._start); *(.text.stage_entry);
KEEP(*(.id)); KEEP(*(.id));
*(.text); *(.text);
*(.text.*); *(.text.*);
@ -49,6 +49,10 @@ SECTIONS
*(.sbss.*); *(.sbss.*);
} : to_load = 0xff } : to_load = 0xff
/* arm64 chipsets need to define CONFIG_BOOTBLOCK_STACK_(TOP|BOTTOM) */
_stack = CONFIG_BOOTBLOCK_STACK_BOTTOM;
_estack = CONFIG_BOOTBLOCK_STACK_TOP;
preram_cbmem_console = CONFIG_CONSOLE_PRERAM_BUFFER_BASE; preram_cbmem_console = CONFIG_CONSOLE_PRERAM_BUFFER_BASE;
/DISCARD/ : { /DISCARD/ : {

View File

@ -22,7 +22,7 @@
extern void main(void); extern void main(void);
void stage_entry(void) __attribute__((section(".text.stage_entry.aarch64"))); void stage_entry(void);
void stage_exit(void *); void stage_exit(void *);
void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size); void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size);

View File

@ -39,7 +39,7 @@ SECTIONS
.text : { .text : {
_text = .; _text = .;
_start = .; _start = .;
*(.text.stage_entry.aarch64); *(.text.stage_entry);
*(.text); *(.text);
*(.text.*); *(.text.*);
. = ALIGN(16); . = ALIGN(16);
@ -116,8 +116,9 @@ SECTIONS
} }
_eheap = .; _eheap = .;
_stack = CONFIG_STACK_BOTTOM; /* arm64 chipsets need to define CONFIG_RAMSTAGE_STACK_(TOP|BOTTOM) */
_estack = CONFIG_STACK_TOP; _stack = CONFIG_RAMSTAGE_STACK_BOTTOM;
_estack = CONFIG_RAMSTAGE_STACK_TOP;
/* The ram segment. This includes all memory used by the memory /* The ram segment. This includes all memory used by the memory
* resident copy of coreboot, except the tables that are produced on * resident copy of coreboot, except the tables that are produced on

View File

@ -42,7 +42,7 @@ SECTIONS
.romtext . : { .romtext . : {
_start = .; _start = .;
*(.text.stage_entry.aarch64); *(.text.stage_entry);
*(.text.startup); *(.text.startup);
*(.text); *(.text);
*(.text.*); *(.text.*);
@ -76,6 +76,10 @@ SECTIONS
_end = .; _end = .;
/* arm64 chipsets need to define CONFIG_ROMSTAGE_STACK_(TOP|BOTTOM) */
_stack = CONFIG_ROMSTAGE_STACK_BOTTOM;
_estack = CONFIG_ROMSTAGE_STACK_TOP;
preram_cbmem_console = CONFIG_CONSOLE_PRERAM_BUFFER_BASE; preram_cbmem_console = CONFIG_CONSOLE_PRERAM_BUFFER_BASE;
/* Discard the sections we don't need/want */ /* Discard the sections we don't need/want */

View File

@ -0,0 +1,57 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <arch/asm.h>
ENTRY(arm64_el3_startup)
mov x0, xzr
msr SCTLR_EL3, x0
msr SCR_EL3, x0
/* Have stack pointer use SP_EL0. */
msr SPSel, #0
isb
/* Load up the stack if non-zero. */
ldr x0, .stack
cmp x0, #0
b.eq 1f
mov sp, x0
1:
ldr x1, .entry
br x1
.align 4
/*
* By default branch to main() and initialize the stack according
* to the Kconfig option for cpu0. However, this code can be relocated
* and reused to start up secondary cpus.
*/
.stack:
.quad _estack
.entry:
.quad main
ENDPROC(arm64_el3_startup)
.global arm64_el3_startup_end
arm64_el3_startup_end:
ENTRY(stage_entry)
b arm64_el3_startup
ENDPROC(stage_entry)

View File

@ -31,11 +31,6 @@
#include <arch/stages.h> #include <arch/stages.h>
#include <arch/cache.h> #include <arch/cache.h>
void stage_entry(void)
{
main();
}
/* we had marked 'doit' as 'noreturn'. /* we had marked 'doit' as 'noreturn'.
* There is no apparent harm in leaving it as something we can return from, and in the one * There is no apparent harm in leaving it as something we can return from, and in the one
* case where we call a payload, the payload is allowed to return. * case where we call a payload, the payload is allowed to return.