arm64: tegra132: tegra210: Remove old arm64/stage_entry.S

This patch removes the old arm64/stage_entry.S code that was too
specific to the Tegra SoC boot flow, and replaces it with code that
hides the peculiarities of switching to a different CPU/arch in ramstage
in the Tegra SoC directories.

BRANCH=None
BUG=None
TEST=Built Ryu and Smaug. !!!UNTESTED!!!

Change-Id: Ib3a0448b30ac9c7132581464573efd5e86e03698
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: http://review.coreboot.org/12078
Tested-by: build bot (Jenkins)
Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
Julius Werner 2015-10-16 13:10:02 -07:00
parent d3634c108d
commit 7dcf9d51e5
33 changed files with 126 additions and 440 deletions

View File

@ -20,7 +20,6 @@ config ARCH_RAMSTAGE_ARM64
default n
source src/arch/arm64/armv8/Kconfig
source src/arch/arm64/cpu/Kconfig
config ARM64_USE_ARM_TRUSTED_FIRMWARE
bool

View File

@ -23,7 +23,7 @@
# Take care of subdirectories
################################################################################
subdirs-y += armv8/ cpu/
subdirs-y += armv8/
################################################################################
# ARM specific options
@ -44,8 +44,6 @@ bootblock-y += id.S
$(obj)/arch/arm64/id.bootblock.o: $(obj)/build.h
bootblock-y += boot.c
bootblock-y += c_entry.c
bootblock-y += cpu-stubs.c
bootblock-y += eabi_compat.c
bootblock-y += transition.c transition_asm.S
@ -87,8 +85,6 @@ endif # CONFIG_ARCH_VERSTAGE_ARM64
ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM64),y)
romstage-y += boot.c
romstage-y += c_entry.c
romstage-y += cpu-stubs.c
romstage-y += div0.c
romstage-y += eabi_compat.c
romstage-y += memset.S
@ -113,7 +109,6 @@ endif # CONFIG_ARCH_ROMSTAGE_ARM64
ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
ramstage-y += c_entry.c
ramstage-y += div0.c
ramstage-y += eabi_compat.c
ramstage-y += boot.c
@ -121,15 +116,9 @@ ramstage-y += tables.c
ramstage-y += memset.S
ramstage-y += memcpy.S
ramstage-y += memmove.S
ramstage-y += cpu-stubs.c
ramstage-$(CONFIG_ARM64_USE_ARM_TRUSTED_FIRMWARE) += arm_tf.c
ramstage-y += transition.c transition_asm.S
# TODO: Replace this with a simpler ramstage entry point in soc/nvidia/tegra*
ifeq ($(CONFIG_SOC_NVIDIA_TEGRA132)$(CONFIG_SOC_NVIDIA_TEGRA210),y)
ramstage-y += stage_entry.S
endif
rmodules_arm64-y += memset.S
rmodules_arm64-y += memcpy.S
rmodules_arm64-y += memmove.S

View File

@ -16,21 +16,14 @@
#include <arch/asm.h>
/* NOTE: When making changes to general ARM64 initialization, keep in mind that
* there are other CPU entry points, using BOOTBLOCK_CUSTOM or entering the CPU
* in a later stage (like Tegra). Changes should generally be put into
* arm64_cpu_init so they can be shared between those instances. */
ENTRY(_start)
/* Initialize PSTATE, SCTLR and caches to clean state. */
/* Initialize PSTATE, SCTLR and caches to clean state, set up stack. */
bl arm64_init_cpu
/* Initialize stack with sentinel value to later check overflow. */
ldr x0, =_stack
ldr x1, =_estack
ldr x2, =0xdeadbeefdeadbeef
stack_init_loop:
stp x2, x2, [x0], #16
cmp x0, x1
bne stack_init_loop
/* Leave a line of beef dead for easier visibility in stack dumps. */
sub sp, x0, #16
bl main
ENDPROC(_start)

View File

@ -21,7 +21,8 @@
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
* known state regarding caches/SCTLR/PSTATE. Completely cleans and invalidates
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
* accesses, icache and branch prediction (if inactive). Clobbers R22 and R23.
* accesses, icache and branch prediction (if inactive). Seeds the stack and
* initializes SP_EL0. Clobbers R22 and R23.
*/
ENTRY(arm64_init_cpu)
/* Initialize PSTATE (unmask all exceptions, select SP_EL0). */
@ -60,5 +61,17 @@ ENTRY(arm64_init_cpu)
dsb sy
isb
/* Initialize stack with sentinel value to later check overflow. */
ldr x2, =0xdeadbeefdeadbeef
ldr x0, =_stack
ldr x1, =_estack
1:
stp x2, x2, [x0], #16
cmp x0, x1
bne 1b
/* Leave a line of beef dead for easier visibility in stack dumps. */
sub sp, x0, #16
ret x23
ENDPROC(arm64_init_cpu)

View File

@ -166,7 +166,6 @@ void exc_dispatch(struct exc_state *state, uint64_t idx)
exc_exit(&state->regs);
}
static int test_exception_handler(struct exc_state *state, uint64_t vector_id)
{
/* Update instruction pointer to next instrution. */

View File

@ -72,10 +72,8 @@ void arch_prog_run(struct prog *prog)
doit(prog_entry_arg(prog));
}
#if !IS_ENABLED(CONFIG_SOC_NVIDIA_TEGRA132)
/* Generic stage entry point. Can be overridden by board/SoC if needed. */
__attribute__((weak)) void stage_entry(void)
{
main();
}
#endif

View File

@ -1,128 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <arch/cache.h>
#include <arch/cpu.h>
#include <arch/lib_helpers.h>
#include <arch/mmu.h>
#include <arch/stages.h>
#include <gic.h>
#include "cpu-internal.h"
void __attribute__((weak)) arm64_soc_init(void)
{
/* Default weak implementation does nothing. */
}
static void seed_stack(void)
{
char *stack_begin;
uint64_t *slot;
int i;
int size;
stack_begin = cpu_get_stack();
stack_begin -= CONFIG_STACK_SIZE;
slot = (void *)stack_begin;
/* Pad out 256 bytes for current usage. */
size = CONFIG_STACK_SIZE - 256;
size /= sizeof(*slot);
for (i = 0; i < size; i++)
*slot++ = 0xdeadbeefdeadbeefULL;
}
/* Set up default SCR values. */
static void el3_init(void)
{
uint32_t scr;
if (get_current_el() != EL3)
return;
scr = raw_read_scr_el3();
/* Default to non-secure EL1 and EL0. */
scr &= ~(SCR_NS_MASK);
scr |= SCR_NS_ENABLE;
/* Disable IRQ, FIQ, and external abort interrupt routing. */
scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK);
scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE;
/* Enable HVC */
scr &= ~(SCR_HVC_MASK);
scr |= SCR_HVC_ENABLE;
/* Disable SMC */
scr &= ~(SCR_SMC_MASK);
scr |= SCR_SMC_DISABLE;
/* Disable secure instruction fetches. */
scr &= ~(SCR_SIF_MASK);
scr |= SCR_SIF_DISABLE;
/* All lower exception levels 64-bit by default. */
scr &= ~(SCR_RW_MASK);
scr |= SCR_LOWER_AARCH64;
/* Disable secure EL1 access to secure timer. */
scr &= ~(SCR_ST_MASK);
scr |= SCR_ST_DISABLE;
/* Don't trap on WFE or WFI instructions. */
scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK);
scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE;
raw_write_scr_el3(scr);
isb();
}
void __attribute__((weak)) arm64_arch_timer_init(void)
{
/* Default weak implementation does nothing. */
}
static void arm64_init(void)
{
seed_stack();
/* Set up default SCR values. */
el3_init();
/* Initialize the GIC. */
gic_init();
/*
* Disable coprocessor traps to EL3:
* TCPAC [20] = 0, disable traps for EL2 accesses to CPTR_EL2 or HCPTR
* and EL2/EL1 access to CPACR_EL1.
* TTA [20] = 0, disable traps for trace register access from any EL.
* TFP [10] = 0, disable traps for floating-point instructions from any
* EL.
*/
raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
CPTR_EL3_TFP_DISABLE);
/*
* Allow FPU accesses:
* FPEN [21:20] = 3, disable traps for floating-point instructions from
* EL0/EL1.
* TTA [28] = 0, disable traps for trace register access from EL0/EL1.
*/
raw_write_cpacr_el1(CPACR_TRAP_FP_DISABLE | CPACR_TTA_DISABLE);
/* Arch Timer init: setup cntfrq per CPU */
arm64_arch_timer_init();
arm64_soc_init();
main();
}
/* This variable holds entry point for CPU starting up. */
void (*c_entry)(void) = &arm64_init;

View File

@ -1,25 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef ARCH_CPU_INTERNAL_H
#define ARCH_CPU_INTERNAL_H
/* Return the top of the stack for the cpu. */
void *cpu_get_stack(void);
/* Return the top of the exception stack for the cpu. */
void *cpu_get_exception_stack(void);
#endif /* ARCH_CPU_INTERNAL_H */

View File

@ -1,21 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2015 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
void arm64_cpu_early_setup(void);
void __attribute__((weak)) arm64_cpu_early_setup(void)
{
/* Default empty implementation */
}

View File

@ -1,19 +0,0 @@
##
## This file is part of the coreboot project.
##
## Copyright (C) 2015 Google Inc
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; version 2 of the License.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
config ARCH_ARM64_CPU_CORTEX_A57
bool
default n
depends on ARCH_ARM64

View File

@ -1,20 +0,0 @@
################################################################################
##
## This file is part of the coreboot project.
##
## Copyright 2015 Google Inc.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; version 2 of the License.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
################################################################################
ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
ramstage-$(CONFIG_ARCH_ARM64_CPU_CORTEX_A57) += cortex_a57.S
endif

View File

@ -23,13 +23,8 @@
_ = ASSERT(size % 4K == 0, "TTB size must be divisible by 4K!");
/* ARM64 stacks need 16-byte alignment. */
#if !(IS_ENABLED(CONFIG_SOC_NVIDIA_TEGRA132) || \
IS_ENABLED(CONFIG_SOC_NVIDIA_TEGRA210))
#define STACK(addr, size) \
REGION(stack, addr, size, 16) \
_ = ASSERT(size >= 2K, "stack should be >= 2K, see toolchain.inc");
#else /* Hack around old Tegra stage_entry.S implementation. TODO: remove */
#define STACK(addr, size) REGION(preram_stack, addr, size, 16)
#endif
#endif /* __ARCH_MEMLAYOUT_H */

View File

@ -24,9 +24,4 @@ void stage_entry(void);
void stage_exit(void *);
void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size);
/* This function is called upon initial entry of each stage. It is called prior
* to main(). That means all of the common infrastructure will most likely not
* be available to be used (such as console). */
void arm64_soc_init(void);
#endif

View File

@ -20,18 +20,6 @@
static inline unsigned int smp_processor_id(void) { return 0; }
/*
* The arm64_cpu_startup() initializes CPU's exception stack and regular
* stack as well initializing the C environment for the processor. Finally it
* calls into c_entry.
*/
void arm64_cpu_startup(void);
/*
* The arm64_arch_timer_init() initializes the CPU's cntfrq register of
* ARM arch timer.
*/
void arm64_arch_timer_init(void);
#if !defined(__PRE_RAM__)
struct cpu_driver { };

View File

@ -58,10 +58,10 @@
#define SCR_EA_MASK (1 << SCR_EA_SHIFT)
#define SCR_EA_ENABLE (1 << SCR_EA_SHIFT)
#define SCR_EA_DISABLE (0 << SCR_EA_SHIFT)
#define SCR_SMC_SHIFT 7
#define SCR_SMC_MASK (1 << SCR_SMC_SHIFT)
#define SCR_SMC_DISABLE (1 << SCR_SMC_SHIFT)
#define SCR_SMC_ENABLE (0 << SCR_SMC_SHIFT)
#define SCR_SMD_SHIFT 7
#define SCR_SMD_MASK (1 << SCR_SMD_SHIFT)
#define SCR_SMD_DISABLE (1 << SCR_SMD_SHIFT)
#define SCR_SMD_ENABLE (0 << SCR_SMD_SHIFT)
#define SCR_HVC_SHIFT 8
#define SCR_HVC_MASK (1 << SCR_HVC_SHIFT)
#define SCR_HVC_DISABLE (0 << SCR_HVC_SHIFT)

View File

@ -13,8 +13,10 @@
* GNU General Public License for more details.
*/
#include <arch/cache.h>
#include <arch/lib_helpers.h>
#include <arch/transition.h>
#include <assert.h>
#include <console/console.h>
/* Litte-endian, No XN-forced, Instr cache disabled,
@ -66,8 +68,6 @@ void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state)
void transition(struct exc_state *exc_state)
{
uint32_t scr_mask;
uint64_t hcr_mask;
uint64_t sctlr;
uint32_t current_el = get_current_el();
@ -89,23 +89,27 @@ void transition(struct exc_state *exc_state)
if (elx->spsr & SPSR_ERET_32)
die("ARM64 Error: Do not support eret to Aarch32\n");
else {
scr_mask = SCR_LOWER_AARCH64;
hcr_mask = HCR_LOWER_AARCH64;
}
/* SCR: Write to SCR if current EL is EL3 */
if (current_el == EL3) {
uint32_t scr = raw_read_scr_el3();
scr |= scr_mask;
raw_write_scr_el3(scr);
}
/* HCR: Write to HCR if current EL is EL2 */
else if (current_el == EL2) {
uint64_t hcr = raw_read_hcr_el2();
hcr |= hcr_mask;
raw_write_hcr_el2(hcr);
}
/* Most parts of coreboot currently don't support EL2 anyway. */
assert(current_el == EL3);
/* Initialize SCR with defaults for running without secure monitor. */
raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */
SCR_TWI_DISABLE | /* don't trap WFI */
SCR_ST_ENABLE | /* allow secure timer access */
SCR_LOWER_AARCH64 | /* lower level is AArch64 */
SCR_SIF_DISABLE | /* disable secure ins. fetch */
SCR_HVC_ENABLE | /* allow HVC instruction */
SCR_SMD_ENABLE | /* disable SMC instruction */
SCR_RES1 | /* reserved-1 bits */
SCR_EA_DISABLE | /* disable ext. abort trap */
SCR_FIQ_DISABLE | /* disable FIQ trap to EL3 */
SCR_IRQ_DISABLE | /* disable IRQ trap to EL3 */
SCR_NS_ENABLE); /* lower level is non-secure */
/* Initialize CPTR to not trap anything to EL3. */
raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
CPTR_EL3_TFP_DISABLE);
/* ELR/SPSR: Write entry point and processor state of program */
raw_write_elr_current(elx->elr);
@ -118,6 +122,7 @@ void transition(struct exc_state *exc_state)
/* SP_ELx: Initialize stack pointer */
raw_write_sp_elx(elx->sp_elx, elx_el);
isb();
/* Eret to the entry point */
trans_switch(regs);

View File

@ -84,6 +84,7 @@ romstage-$(CONFIG_PRIMITIVE_MEMTEST) += primitive_memtest.c
ramstage-$(CONFIG_PRIMITIVE_MEMTEST) += primitive_memtest.c
romstage-$(CONFIG_CACHE_AS_RAM) += ramtest.c
romstage-$(CONFIG_GENERIC_GPIO_LIB) += gpio.c
romstage-y += stack.c
ifeq ($(CONFIG_EARLY_CBMEM_INIT),y)
romstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c

View File

@ -17,11 +17,6 @@ config SOC_NVIDIA_TEGRA132
if SOC_NVIDIA_TEGRA132
# TODO: Remove after replacing arch/arm64/stage_entry.S
config STACK_SIZE
hex
default 0x1000
config MAINBOARD_DO_DSI_INIT
bool "Use dsi graphics interface"
depends on MAINBOARD_DO_NATIVE_VGA_INIT
@ -36,10 +31,6 @@ config MAINBOARD_DO_SOR_INIT
help
Initialize dp display
config MAX_CPUS
int
default 2
config MTS_DIRECTORY
string "Directory where MTS microcode files are located"
default "3rdparty/blobs/cpu/nvidia/tegra132/current/prod"

View File

@ -90,6 +90,7 @@ ramstage-y += ramstage.c
ramstage-y += mmu_operations.c
ramstage-$(CONFIG_DRIVERS_UART) += uart.c
ramstage-y += ../tegra/usb.c
ramstage-y += stage_entry.S
modules_arm-y += monotonic_timer.c

View File

@ -14,6 +14,7 @@
*/
#include <memlayout.h>
#include <rules.h>
#include <arch/header.ld>
@ -29,7 +30,11 @@ SECTIONS
SRAM_START(0x40000000)
PRERAM_CBMEM_CONSOLE(0x40000000, 8K)
PRERAM_CBFS_CACHE(0x40002000, 84K)
STACK(0x40017000, 14K)
#if ENV_ARM64
STACK(0x40017000, 8K)
#else /* AVP gets a separate stack to avoid any chance of handoff races. */
STACK(0x40019000, 6K)
#endif
TIMESTAMP(0x4001A800, 2K)
BOOTBLOCK(0x4001B800, 22K)
ROMSTAGE(0x40021000, 124K)

View File

@ -14,6 +14,7 @@
*/
#include <memlayout.h>
#include <rules.h>
#include <arch/header.ld>
@ -29,8 +30,12 @@ SECTIONS
SRAM_START(0x40000000)
PRERAM_CBMEM_CONSOLE(0x40000000, 8K)
PRERAM_CBFS_CACHE(0x40002000, 72K)
VBOOT2_WORK(0x40014000, 16K)
STACK(0x40018000, 2K)
VBOOT2_WORK(0x40014000, 12K)
#if ENV_ARM64
STACK(0x40017000, 3K)
#else /* AVP gets a separate stack to avoid any chance of handoff races. */
STACK(0x40017C00, 3K)
#endif
TIMESTAMP(0x40018800, 2K)
BOOTBLOCK(0x40019000, 22K)
VERSTAGE(0x4001e800, 55K)

View File

@ -1,55 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <arch/asm.h>
ENTRY(maincpu_setup)
/*
* Set the cpu to System mode with IRQ and FIQ disabled. Prefetch/Data
* aborts may happen early and crash before the abort handlers are
* installed, but at least the problem will show up near the code that
* causes it.
*/
msr cpsr, #0xdf
ldr sp, maincpu_stack_pointer
eor lr, lr
ldr r0, maincpu_entry_point
bx r0
ENDPROC(maincpu_setup)
.align 2
.global maincpu_stack_pointer
maincpu_stack_pointer:
.word 0
.global maincpu_entry_point
maincpu_entry_point:
.word 0

View File

@ -14,6 +14,7 @@
*/
#include <arch/stages.h>
#include <gic.h>
#include <soc/addressmap.h>
#include <soc/clock.h>
#include <soc/mc.h>
@ -28,8 +29,14 @@ static void lock_down_vpr(void)
write32(&regs->video_protect_reg_ctrl, 1);
}
void arm64_soc_init(void)
/* Tegra-specific entry point, called from assembly in stage_entry.S */
void ramstage_entry(void);
void ramstage_entry(void)
{
/* TODO: Is this still needed? */
gic_init();
/* TODO: Move TrustZone setup to BL31? */
trustzone_region_init();
tegra132_mmu_init();
@ -40,4 +47,7 @@ void arm64_soc_init(void)
/* Lock down VPR */
lock_down_vpr();
/* Jump to boot state machine in common code. */
main();
}

View File

@ -19,6 +19,7 @@
#include <cbmem.h>
#include <console/cbmem_console.h>
#include <console/console.h>
#include <lib.h>
#include <program_loading.h>
#include <soc/addressmap.h>
#include <soc/ccplex.h>
@ -26,6 +27,7 @@
#include <soc/sdram.h>
#include <soc/sdram_configs.h>
#include <soc/romstage.h>
#include <symbols.h>
#include <timer.h>
#include <timestamp.h>
@ -82,6 +84,9 @@ void romstage(void)
void platform_prog_run(struct prog *prog)
{
/* We'll switch to a new stack, so validate our old one here. */
checkstack(_estack, 0);
ccplex_cpu_start(prog_entry(prog));
clock_halt_avp();

View File

@ -14,12 +14,11 @@
*/
#include <arch/asm.h>
#include <cpu/cortex_a57.h>
ENTRY(arm64_cpu_early_setup)
mrs x0, CPUECTLR_EL1
orr x0, x0, #(1 << SMPEN_SHIFT)
msr CPUECTLR_EL1, x0
isb
ret
ENDPROC(arm64_cpu_early_setup)
ENTRY(stage_entry)
/* Initialize PSTATE, SCTLR and caches to clean state, set up stack. */
bl arm64_init_cpu
/* Jump to Tegra-specific C entry point. */
bl ramstage_entry
ENDPROC(stage_entry)

View File

@ -6,7 +6,6 @@ config SOC_NVIDIA_TEGRA210
select ARCH_VERSTAGE_ARMV4
select ARCH_ROMSTAGE_ARMV4
select ARCH_RAMSTAGE_ARMV8_64
select ARCH_ARM64_CPU_CORTEX_A57
select BOOTBLOCK_CONSOLE
select GIC
select HAVE_MONOTONIC_TIMER
@ -22,11 +21,6 @@ if SOC_NVIDIA_TEGRA210
config CHROMEOS
select CHROMEOS_RAMOOPS_NON_ACPI
# TODO: Remove after replacing arch/arm64/stage_entry.S
config STACK_SIZE
hex
default 0x1000
config MAINBOARD_DO_DSI_INIT
bool "Use dsi graphics interface"
depends on MAINBOARD_DO_NATIVE_VGA_INIT
@ -41,10 +35,6 @@ config MAINBOARD_DO_SOR_INIT
help
Initialize dp display
config MAX_CPUS
int
default 4
choice CONSOLE_SERIAL_TEGRA210_UART_CHOICES
prompt "Serial Console UART"
default CONSOLE_SERIAL_TEGRA210_UARTA

View File

@ -23,7 +23,7 @@
static void enable_core_clocks(int cpu)
{
const uint32_t cpu_clocks[CONFIG_MAX_CPUS] = {
const uint32_t cpu_clocks[] = {
[0] = CRC_RST_CPUG_CLR_CPU0 | CRC_RST_CPUG_CLR_DBG0 |
CRC_RST_CPUG_CLR_CORE0 | CRC_RST_CPUG_CLR_CX0,
[1] = CRC_RST_CPUG_CLR_CPU1 | CRC_RST_CPUG_CLR_DBG1 |

View File

@ -14,6 +14,7 @@
*/
#include <memlayout.h>
#include <rules.h>
#include <arch/header.ld>
@ -29,7 +30,11 @@ SECTIONS
SRAM_START(0x40000000)
PRERAM_CBMEM_CONSOLE(0x40000000, 8K)
PRERAM_CBFS_CACHE(0x40002000, 84K)
STACK(0x40017000, 16K)
#if ENV_ARM64
STACK(0x40017000, 8K)
#else /* AVP gets a separate stack to avoid any chance of handoff races. */
STACK(0x40019000, 8K)
#endif
TIMESTAMP(0x4001B000, 2K)
BOOTBLOCK(0x4001B800, 24K)
ROMSTAGE(0x40022000, 120K)

View File

@ -14,6 +14,7 @@
*/
#include <memlayout.h>
#include <rules.h>
#include <arch/header.ld>
@ -29,8 +30,12 @@ SECTIONS
SRAM_START(0x40000000)
PRERAM_CBMEM_CONSOLE(0x40000000, 8K)
PRERAM_CBFS_CACHE(0x40002000, 36K)
VBOOT2_WORK(0x4000B000, 16K)
STACK(0x4000F000, 2K)
VBOOT2_WORK(0x4000B000, 12K)
#if ENV_ARM64
STACK(0x4000E000, 3K)
#else /* AVP gets a separate stack to avoid any chance of handoff races. */
STACK(0x4000EC00, 3K)
#endif
TIMESTAMP(0x4000F800, 2K)
BOOTBLOCK(0x40010000, 28K)
VERSTAGE(0x40017000, 64K)

View File

@ -1,55 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2013 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <arch/asm.h>
ENTRY(maincpu_setup)
/*
* Set the cpu to System mode with IRQ and FIQ disabled. Prefetch/Data
* aborts may happen early and crash before the abort handlers are
* installed, but at least the problem will show up near the code that
* causes it.
*/
msr cpsr, #0xdf
ldr sp, maincpu_stack_pointer
eor lr, lr
ldr r0, maincpu_entry_point
bx r0
ENDPROC(maincpu_setup)
.align 2
.global maincpu_stack_pointer
maincpu_stack_pointer:
.word 0
.global maincpu_entry_point
maincpu_entry_point:
.word 0

View File

@ -16,12 +16,13 @@
#include <arch/clock.h>
#include <arch/cpu.h>
#include <arch/stages.h>
#include <gic.h>
#include <soc/addressmap.h>
#include <soc/clock.h>
#include <soc/mmu_operations.h>
#include <soc/mtc.h>
void arm64_arch_timer_init(void)
static void arm64_arch_timer_init(void)
{
uint32_t freq = clock_get_osc_khz() * 1000;
// Set the cntfrq register.
@ -47,11 +48,20 @@ static void mselect_enable_wrap(void)
write32((void *)TEGRA_MSELECT_CONFIG, reg);
}
void arm64_soc_init(void)
/* Tegra-specific entry point, called from assembly in stage_entry.S */
void ramstage_entry(void);
void ramstage_entry(void)
{
/* TODO: Is this still needed? */
gic_init();
/* TODO: Move arch timer setup to BL31? */
arm64_arch_timer_init();
/* Enable WRAP to INCR burst type conversion in MSELECT */
mselect_enable_wrap();
/* TODO: Move TrustZone setup to BL31? */
trustzone_region_init();
tegra210_mmu_init();
@ -60,4 +70,7 @@ void arm64_soc_init(void)
if (tegra210_run_mtc() != 0)
printk(BIOS_ERR, "MTC: No training data.\n");
/* Jump to boot state machine in common code. */
main();
}

View File

@ -19,6 +19,7 @@
#include <cbmem.h>
#include <console/cbmem_console.h>
#include <console/console.h>
#include <lib.h>
#include <program_loading.h>
#include <soc/addressmap.h>
#include <soc/ccplex.h>
@ -27,6 +28,7 @@
#include <soc/sdram_configs.h>
#include <soc/romstage.h>
#include <soc/nvidia/tegra/apbmisc.h>
#include <symbols.h>
#include <timer.h>
#include <timestamp.h>
#include <vendorcode/google/chromeos/chromeos.h>
@ -86,6 +88,9 @@ void romstage(void)
void platform_prog_run(struct prog *prog)
{
/* We'll switch to a new stack, so validate our old one here. */
checkstack(_estack, 0);
ccplex_cpu_start(prog_entry(prog));
clock_halt_avp();

View File

@ -82,10 +82,10 @@
ENTRY(stage_entry)
t210_a57_fixup
b arm64_cpu_startup
ENDPROC(stage_entry)
ENTRY(tegra210_reset_handler)
t210_a57_fixup
b arm64_cpu_startup_resume
ENDPROC(tegra210_reset_handler)
/* Initialize PSTATE, SCTLR and caches to clean state, set up stack. */
bl arm64_init_cpu
/* Jump to Tegra-specific C entry point. */
bl ramstage_entry
ENDPROC(stage_entry)