When we first created the arm64 port, we weren't quite sure whether coreboot would always run in EL3 on all platforms. The AArch64 A.R.M. technically considers this exception level optional, but in practice all SoCs seem to support it. We have since accumulated a lot of code that already hardcodes an implicit or explicit assumption of executing in EL3 somewhere, so coreboot wouldn't work on a system that tries to enter it in EL1/2 right now anyway. However, some of our low level support libraries (in particular those for accessing architectural registers) still have provisions for running at different exception levels built-in, and often use switch statements over the current exception level to decide which register to access. This includes an unnecessarily large amount of code for what should be single-instruction operations and precludes further optimization via inlining. This patch removes any remaining code that dynamically depends on the current exception level and makes the assumption that coreboot executes at EL3 official. If this ever needs to change for a future platform, it would probably be cleaner to set the expected exception level in a Kconfig rather than always probing it at runtime. Change-Id: I1a9fb9b4227bd15a013080d1c7eabd48515fdb67 Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: https://review.coreboot.org/27880 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Patrick Rudolph <patrick.rudolph@9elements.com> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
112 lines
3.4 KiB
C
112 lines
3.4 KiB
C
/*
|
|
* This file is part of the coreboot project.
|
|
*
|
|
* Copyright 2014 Google Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; version 2 of the License.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#include <arch/cache.h>
|
|
#include <arch/lib_helpers.h>
|
|
#include <arch/mmu.h>
|
|
#include <arch/transition.h>
|
|
#include <assert.h>
|
|
#include <compiler.h>
|
|
#include <console/console.h>
|
|
|
|
/* Litte-endian, No XN-forced, Instr cache disabled,
|
|
* Stack alignment disabled, Data and unified cache
|
|
* disabled, Alignment check disabled, MMU disabled
|
|
*/
|
|
#define SCTLR_MASK (SCTLR_MMU_DISABLE | SCTLR_ACE_DISABLE | \
|
|
SCTLR_CACHE_DISABLE | SCTLR_SAE_DISABLE | SCTLR_RES1 | \
|
|
SCTLR_ICE_DISABLE | SCTLR_WXN_DISABLE | SCTLR_LITTLE_END)
|
|
|
|
void __weak exc_dispatch(struct exc_state *exc_state, uint64_t id)
|
|
{
|
|
/* Default weak implementation does nothing. */
|
|
}
|
|
|
|
void exc_entry(struct exc_state *exc_state, uint64_t id)
|
|
{
|
|
struct elx_state *elx = &exc_state->elx;
|
|
struct regs *regs = &exc_state->regs;
|
|
uint8_t elx_mode;
|
|
|
|
elx->spsr = raw_read_spsr_el3();
|
|
elx_mode = get_mode_from_spsr(elx->spsr);
|
|
|
|
if (elx_mode == SPSR_USE_H)
|
|
regs->sp = (uint64_t)&exc_state[1];
|
|
else
|
|
regs->sp = raw_read_sp_el0();
|
|
|
|
elx->elr = raw_read_elr_el3();
|
|
|
|
exc_dispatch(exc_state, id);
|
|
}
|
|
|
|
void transition_to_el2(void *entry, void *arg, uint64_t spsr)
|
|
{
|
|
struct exc_state exc_state;
|
|
struct elx_state *elx = &exc_state.elx;
|
|
struct regs *regs = &exc_state.regs;
|
|
uint32_t sctlr;
|
|
|
|
regs->x[X0_INDEX] = (uint64_t)arg;
|
|
elx->elr = (uint64_t)entry;
|
|
elx->spsr = spsr;
|
|
|
|
/*
|
|
* Policies enforced:
|
|
* 1. We support only transitions to EL2
|
|
* 2. We support transitions to Aarch64 mode only
|
|
*
|
|
* If any of the above conditions holds false, then we need a proper way
|
|
* to update SCR/HCR before removing the checks below
|
|
*/
|
|
assert(get_el_from_spsr(spsr) == EL2 && !(spsr & SPSR_ERET_32));
|
|
|
|
/* Initialize SCR with defaults for running without secure monitor. */
|
|
raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */
|
|
SCR_TWI_DISABLE | /* don't trap WFI */
|
|
SCR_ST_ENABLE | /* allow secure timer access */
|
|
SCR_LOWER_AARCH64 | /* lower level is AArch64 */
|
|
SCR_SIF_DISABLE | /* disable secure ins. fetch */
|
|
SCR_HVC_ENABLE | /* allow HVC instruction */
|
|
SCR_SMD_ENABLE | /* disable SMC instruction */
|
|
SCR_RES1 | /* reserved-1 bits */
|
|
SCR_EA_DISABLE | /* disable ext. abort trap */
|
|
SCR_FIQ_DISABLE | /* disable FIQ trap to EL3 */
|
|
SCR_IRQ_DISABLE | /* disable IRQ trap to EL3 */
|
|
SCR_NS_ENABLE); /* lower level is non-secure */
|
|
|
|
/* Initialize CPTR to not trap anything to EL3. */
|
|
raw_write_cptr_el3(CPTR_EL3_TCPAC_DISABLE | CPTR_EL3_TTA_DISABLE |
|
|
CPTR_EL3_TFP_DISABLE);
|
|
|
|
/* ELR/SPSR: Write entry point and processor state of program */
|
|
raw_write_elr_el3(elx->elr);
|
|
raw_write_spsr_el3(elx->spsr);
|
|
|
|
/* SCTLR: Initialize EL with selected properties */
|
|
sctlr = raw_read_sctlr_el2();
|
|
sctlr &= SCTLR_MASK;
|
|
raw_write_sctlr_el2(sctlr);
|
|
|
|
/* SP_ELx: Initialize stack pointer */
|
|
raw_write_sp_el2(elx->sp_elx);
|
|
|
|
/* Payloads expect to be entered with MMU disabled. Includes an ISB. */
|
|
mmu_disable();
|
|
|
|
/* Eret to the entry point */
|
|
trans_switch(regs);
|
|
}
|