arm64: Drop checks for current exception level, hardcode EL3 assumption
When we first created the arm64 port, we weren't quite sure whether coreboot would always run in EL3 on all platforms. The AArch64 A.R.M. technically considers this exception level optional, but in practice all SoCs seem to support it. We have since accumulated a lot of code that already hardcodes an implicit or explicit assumption of executing in EL3 somewhere, so coreboot wouldn't work on a system that tries to enter it in EL1/2 right now anyway. However, some of our low level support libraries (in particular those for accessing architectural registers) still have provisions for running at different exception levels built-in, and often use switch statements over the current exception level to decide which register to access. This includes an unnecessarily large amount of code for what should be single-instruction operations and precludes further optimization via inlining. This patch removes any remaining code that dynamically depends on the current exception level and makes the assumption that coreboot executes at EL3 official. If this ever needs to change for a future platform, it would probably be cleaner to set the expected exception level in a Kconfig rather than always probing it at runtime. Change-Id: I1a9fb9b4227bd15a013080d1c7eabd48515fdb67 Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: https://review.coreboot.org/27880 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Patrick Rudolph <patrick.rudolph@9elements.com> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
parent
73be9dd82c
commit
0c5f61a01c
|
@ -40,7 +40,7 @@
|
|||
void tlb_invalidate_all(void)
|
||||
{
|
||||
/* TLBIALL includes dTLB and iTLB on systems that have them. */
|
||||
tlbiall_current();
|
||||
tlbiall_el3();
|
||||
dsb();
|
||||
isb();
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ void dcache_invalidate_by_mva(void const *addr, size_t len)
|
|||
*/
|
||||
void arch_segment_loaded(uintptr_t start, size_t size, int flags)
|
||||
{
|
||||
uint32_t sctlr = raw_read_sctlr_current();
|
||||
uint32_t sctlr = raw_read_sctlr_el3();
|
||||
if (sctlr & SCTLR_C)
|
||||
dcache_clean_by_mva((void *)start, size);
|
||||
else if (sctlr & SCTLR_I)
|
||||
|
|
|
@ -79,9 +79,9 @@ static void print_regs(struct exc_state *exc_state)
|
|||
struct regs *regs = &exc_state->regs;
|
||||
|
||||
printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08x\n",
|
||||
elx->elr, raw_read_esr_current());
|
||||
elx->elr, raw_read_esr_el3());
|
||||
printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08x\n",
|
||||
raw_read_far_current(), raw_read_spsr_current());
|
||||
raw_read_far_el3(), raw_read_spsr_el3());
|
||||
for (i = 0; i < 30; i += 2) {
|
||||
printk(BIOS_DEBUG,
|
||||
"X%02d = 0x%016llx X%02d = 0x%016llx\n",
|
||||
|
@ -188,7 +188,7 @@ static int test_exception_handler(struct exc_state *state, uint64_t vector_id)
|
|||
{
|
||||
/* Update instruction pointer to next instrution. */
|
||||
state->elx.elr += sizeof(uint32_t);
|
||||
raw_write_elr_current(state->elx.elr);
|
||||
raw_write_elr_el3(state->elx.elr);
|
||||
return EXC_RET_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,12 +32,6 @@ uint32_t raw_read_current_el(void)
|
|||
return current_el;
|
||||
}
|
||||
|
||||
uint32_t get_current_el(void)
|
||||
{
|
||||
uint32_t current_el = raw_read_current_el();
|
||||
return ((current_el >> CURRENT_EL_SHIFT) & CURRENT_EL_MASK);
|
||||
}
|
||||
|
||||
/* DAIF */
|
||||
uint32_t raw_read_daif(void)
|
||||
{
|
||||
|
@ -164,28 +158,6 @@ void raw_write_elr_el3(uint64_t elr_el3)
|
|||
__asm__ __volatile__("msr ELR_EL3, %0\n\t" : : "r" (elr_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_elr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_elr(el);
|
||||
}
|
||||
|
||||
void raw_write_elr_current(uint64_t elr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_elr(elr, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_elr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_elr, elr, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_elr(uint64_t elr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_elr, elr, el);
|
||||
}
|
||||
|
||||
/* FPCR */
|
||||
uint32_t raw_read_fpcr(void)
|
||||
{
|
||||
|
@ -320,16 +292,6 @@ void raw_write_sp_el3(uint64_t sp_el3)
|
|||
raw_write_spsel(spsel);
|
||||
}
|
||||
|
||||
uint64_t raw_read_sp_elx(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_sp, sp, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_sp_elx(uint64_t sp_elx, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_sp, sp_elx, el);
|
||||
}
|
||||
|
||||
/* SPSR */
|
||||
uint32_t raw_read_spsr_abt(void)
|
||||
{
|
||||
|
@ -387,28 +349,6 @@ void raw_write_spsr_el3(uint32_t spsr_el3)
|
|||
__asm__ __volatile__("msr SPSR_EL3, %0\n\t" : : "r" ((uint64_t)spsr_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_spsr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_spsr(el);
|
||||
}
|
||||
|
||||
void raw_write_spsr_current(uint32_t spsr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_spsr(spsr, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_spsr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_spsr, spsr, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_spsr(uint32_t spsr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_spsr, spsr, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_spsr_fiq(void)
|
||||
{
|
||||
uint64_t spsr_fiq;
|
||||
|
|
|
@ -65,28 +65,6 @@ void raw_write_actlr_el3(uint32_t actlr_el3)
|
|||
__asm__ __volatile__("msr ACTLR_EL3, %0\n\t" : : "r" ((uint64_t)actlr_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_actlr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_actlr(el);
|
||||
}
|
||||
|
||||
void raw_write_actlr_current(uint32_t actlr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_actlr(actlr, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_actlr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_actlr, actlr, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_actlr(uint32_t actlr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_actlr, actlr, el);
|
||||
}
|
||||
|
||||
/* AFSR0 */
|
||||
uint32_t raw_read_afsr0_el1(void)
|
||||
{
|
||||
|
@ -130,28 +108,6 @@ void raw_write_afsr0_el3(uint32_t afsr0_el3)
|
|||
__asm__ __volatile__("msr AFSR0_EL3, %0\n\t" : : "r" ((uint64_t)afsr0_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_afsr0_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_afsr0(el);
|
||||
}
|
||||
|
||||
void raw_write_afsr0_current(uint32_t afsr0)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_afsr0(afsr0, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_afsr0(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_afsr0, afsr0, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_afsr0(uint32_t afsr0, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_afsr0, afsr0, el);
|
||||
}
|
||||
|
||||
/* AFSR1 */
|
||||
uint32_t raw_read_afsr1_el1(void)
|
||||
{
|
||||
|
@ -195,28 +151,6 @@ void raw_write_afsr1_el3(uint32_t afsr1_el3)
|
|||
__asm__ __volatile__("msr AFSR1_EL3, %0\n\t" : : "r" ((uint64_t)afsr1_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_afsr1_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_afsr1(el);
|
||||
}
|
||||
|
||||
void raw_write_afsr1_current(uint32_t afsr1)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_afsr1(afsr1, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_afsr1(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_afsr1, afsr1, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_afsr1(uint32_t afsr1, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_afsr1, afsr1, el);
|
||||
}
|
||||
|
||||
/* AIDR */
|
||||
uint32_t raw_read_aidr_el1(void)
|
||||
{
|
||||
|
@ -270,28 +204,6 @@ void raw_write_amair_el3(uint64_t amair_el3)
|
|||
__asm__ __volatile__("msr AMAIR_EL3, %0\n\t" : : "r" (amair_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_amair_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_amair(el);
|
||||
}
|
||||
|
||||
void raw_write_amair_current(uint64_t amair)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_amair(amair, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_amair(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_amair, amair, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_amair(uint64_t amair, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_amair, amair, el);
|
||||
}
|
||||
|
||||
/* CCSIDR */
|
||||
uint32_t raw_read_ccsidr_el1(void)
|
||||
{
|
||||
|
@ -424,28 +336,6 @@ void raw_write_esr_el3(uint32_t esr_el3)
|
|||
__asm__ __volatile__("msr ESR_EL3, %0\n\t" : : "r" ((uint64_t)esr_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_esr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_esr(el);
|
||||
}
|
||||
|
||||
void raw_write_esr_current(uint32_t esr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_esr(esr, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_esr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_esr, esr, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_esr(uint32_t esr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_esr, esr, el);
|
||||
}
|
||||
|
||||
/* FAR */
|
||||
uint64_t raw_read_far_el1(void)
|
||||
{
|
||||
|
@ -489,28 +379,6 @@ void raw_write_far_el3(uint64_t far_el3)
|
|||
__asm__ __volatile__("msr FAR_EL3, %0\n\t" : : "r" (far_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_far_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_far(el);
|
||||
}
|
||||
|
||||
void raw_write_far_current(uint64_t far)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_far(far, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_far(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_far, far, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_far(uint64_t far, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_far, far, el);
|
||||
}
|
||||
|
||||
/* HCR */
|
||||
uint64_t raw_read_hcr_el2(void)
|
||||
{
|
||||
|
@ -579,28 +447,6 @@ void raw_write_mair_el3(uint64_t mair_el3)
|
|||
__asm__ __volatile__("msr MAIR_EL3, %0\n\t" : : "r" (mair_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_mair_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_mair(el);
|
||||
}
|
||||
|
||||
void raw_write_mair_current(uint64_t mair)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_mair(mair, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_mair(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_mair, mair, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_mair(uint64_t mair, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_mair, mair, el);
|
||||
}
|
||||
|
||||
/* MIDR */
|
||||
uint32_t raw_read_midr_el1(void)
|
||||
{
|
||||
|
@ -664,28 +510,6 @@ void raw_write_rmr_el3(uint32_t rmr_el3)
|
|||
__asm__ __volatile__("msr RMR_EL3, %0\n\t" : : "r" ((uint64_t)rmr_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_rmr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_rmr(el);
|
||||
}
|
||||
|
||||
void raw_write_rmr_current(uint32_t rmr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_rmr(rmr, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_rmr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_rmr, rmr, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_rmr(uint32_t rmr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_rmr, rmr, el);
|
||||
}
|
||||
|
||||
/* RVBAR */
|
||||
uint64_t raw_read_rvbar_el1(void)
|
||||
{
|
||||
|
@ -729,28 +553,6 @@ void raw_write_rvbar_el3(uint64_t rvbar_el3)
|
|||
__asm__ __volatile__("msr RVBAR_EL3, %0\n\t" : : "r" (rvbar_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_rvbar_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_rvbar(el);
|
||||
}
|
||||
|
||||
void raw_write_rvbar_current(uint64_t rvbar)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_rvbar(rvbar, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_rvbar(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_rvbar, rvbar, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_rvbar(uint64_t rvbar, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_rvbar, rvbar, el);
|
||||
}
|
||||
|
||||
/* Scr */
|
||||
uint32_t raw_read_scr_el3(void)
|
||||
{
|
||||
|
@ -809,28 +611,6 @@ void raw_write_sctlr_el3(uint32_t sctlr_el3)
|
|||
__asm__ __volatile__("msr SCTLR_EL3, %0\n\t" : : "r" ((uint64_t)sctlr_el3) : "memory");
|
||||
}
|
||||
|
||||
uint32_t raw_read_sctlr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_sctlr(el);
|
||||
}
|
||||
|
||||
void raw_write_sctlr_current(uint32_t sctlr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_sctlr(sctlr, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_sctlr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_sctlr, sctlr, uint32_t, el);
|
||||
}
|
||||
|
||||
void raw_write_sctlr(uint32_t sctlr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_sctlr, sctlr, el);
|
||||
}
|
||||
|
||||
/* TCR */
|
||||
uint64_t raw_read_tcr_el1(void)
|
||||
{
|
||||
|
@ -874,34 +654,6 @@ void raw_write_tcr_el3(uint32_t tcr_el3)
|
|||
__asm__ __volatile__("msr TCR_EL3, %0\n\t" : : "r" ((uint64_t)tcr_el3) : "memory");
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IMPORTANT: TCR_EL1 is 64-bit whereas TCR_EL2 and TCR_EL3 are 32-bit. Thus,
|
||||
* 64-bit is used to read/write for tcr_current. tcr_el2 and tcr_el3 handle them
|
||||
* with appropriate 32-bit types.
|
||||
*/
|
||||
uint64_t raw_read_tcr_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_tcr(el);
|
||||
}
|
||||
|
||||
void raw_write_tcr_current(uint64_t tcr)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_tcr(tcr, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_tcr(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_tcr, tcr, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_tcr(uint64_t tcr, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_tcr, tcr, el);
|
||||
}
|
||||
|
||||
/* TTBR0 */
|
||||
uint64_t raw_read_ttbr0_el1(void)
|
||||
{
|
||||
|
@ -945,28 +697,6 @@ void raw_write_ttbr0_el3(uint64_t ttbr0_el3)
|
|||
__asm__ __volatile__("msr TTBR0_EL3, %0\n\t" : : "r" (ttbr0_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_ttbr0_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_ttbr0(el);
|
||||
}
|
||||
|
||||
void raw_write_ttbr0_current(uint64_t ttbr0)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_ttbr0(ttbr0, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_ttbr0(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_ttbr0, ttbr0, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_ttbr0(uint64_t ttbr0, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_ttbr0, ttbr0, el);
|
||||
}
|
||||
|
||||
/* TTBR1 */
|
||||
uint64_t raw_read_ttbr1_el1(void)
|
||||
{
|
||||
|
@ -1025,28 +755,6 @@ void raw_write_vbar_el3(uint64_t vbar_el3)
|
|||
__asm__ __volatile__("msr VBAR_EL3, %0\n\t" : : "r" (vbar_el3) : "memory");
|
||||
}
|
||||
|
||||
uint64_t raw_read_vbar_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
return raw_read_vbar(el);
|
||||
}
|
||||
|
||||
void raw_write_vbar_current(uint64_t vbar)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
raw_write_vbar(vbar, el);
|
||||
}
|
||||
|
||||
uint64_t raw_read_vbar(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_READ(raw_read_vbar, vbar, uint64_t, el);
|
||||
}
|
||||
|
||||
void raw_write_vbar(uint64_t vbar, uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_WRITE(raw_write_vbar, vbar, el);
|
||||
}
|
||||
|
||||
uint32_t raw_read_cntfrq_el0(void)
|
||||
{
|
||||
uint64_t cntfrq_el0;
|
||||
|
|
|
@ -37,17 +37,6 @@ void tlbiall_el3(void)
|
|||
__asm__ __volatile__("tlbi alle3\n\t" : : : "memory");
|
||||
}
|
||||
|
||||
void tlbiall_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
tlbiall(el);
|
||||
}
|
||||
|
||||
void tlbiall(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_TLBI(tlbiall, el);
|
||||
}
|
||||
|
||||
/* TLBIALLIS */
|
||||
void tlbiallis_el1(void)
|
||||
{
|
||||
|
@ -64,17 +53,6 @@ void tlbiallis_el3(void)
|
|||
__asm__ __volatile__("tlbi alle3is\n\t" : : : "memory");
|
||||
}
|
||||
|
||||
void tlbiallis_current(void)
|
||||
{
|
||||
uint32_t el = get_current_el();
|
||||
tlbiallis(el);
|
||||
}
|
||||
|
||||
void tlbiallis(uint32_t el)
|
||||
{
|
||||
SWITCH_CASE_TLBI(tlbiallis, el);
|
||||
}
|
||||
|
||||
/* TLBIVAA */
|
||||
void tlbivaa_el1(uint64_t va)
|
||||
{
|
||||
|
|
|
@ -242,7 +242,7 @@ void mmu_config_range(void *start, size_t size, uint64_t tag)
|
|||
|
||||
/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
|
||||
dsb();
|
||||
tlbiall_current();
|
||||
tlbiall_el3();
|
||||
dsb();
|
||||
isb();
|
||||
}
|
||||
|
|
|
@ -36,23 +36,8 @@ static void run_payload(struct prog *prog)
|
|||
|
||||
if (IS_ENABLED(CONFIG_ARM64_USE_ARM_TRUSTED_FIRMWARE))
|
||||
arm_tf_run_bl31((u64)doit, (u64)arg, payload_spsr);
|
||||
else {
|
||||
uint8_t current_el = get_current_el();
|
||||
|
||||
printk(BIOS_SPEW, "entry = %p\n", doit);
|
||||
|
||||
/* If current EL is not EL3, jump to payload at same EL. */
|
||||
if (current_el != EL3)
|
||||
doit(arg);
|
||||
else {
|
||||
/* If current EL is EL3, we transition to payload in EL2. */
|
||||
struct exc_state exc_state;
|
||||
memset(&exc_state, 0, sizeof(exc_state));
|
||||
exc_state.elx.spsr = payload_spsr;
|
||||
|
||||
transition_with_entry(doit, arg, &exc_state);
|
||||
}
|
||||
}
|
||||
else
|
||||
transition_to_el2(doit, arg, payload_spsr);
|
||||
}
|
||||
|
||||
void arch_prog_run(struct prog *prog)
|
||||
|
|
|
@ -163,16 +163,10 @@ static inline uint8_t get_mode_from_spsr(uint64_t spsr)
|
|||
*/
|
||||
|
||||
/*
|
||||
* User of transition library can make a call to transition_with_entry and pass
|
||||
* the entry point and its argument which are put into elr and x0 by this
|
||||
* function. After that it makes a call to transition.
|
||||
* Transitions to EL2 with given entry point and argument in X0. SPSR can be
|
||||
* partially configured, but the exception level given must be EL2.
|
||||
*/
|
||||
void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state);
|
||||
/*
|
||||
* transition function sets up all the registers as per the struct elx_state
|
||||
* before jumping to trans_switch.
|
||||
*/
|
||||
void transition(struct exc_state *exc_state);
|
||||
void transition_to_el2(void *entry, void *arg, uint64_t spsr);
|
||||
|
||||
/*
|
||||
* exc_exit it called while returning from an exception. It expects pointer to
|
||||
|
|
|
@ -136,210 +136,17 @@
|
|||
#define CPACR_TRAP_FP_EL0 (1 << CPACR_FPEN_SHIFT)
|
||||
#define CPACR_TRAP_FP_DISABLE (3 << CPACR_FPEN_SHIFT)
|
||||
|
||||
#ifdef __ASSEMBLER__
|
||||
|
||||
/* Macro to switch to label based on current el */
|
||||
.macro switch_el xreg label1 label2 label3
|
||||
mrs \xreg, CurrentEL
|
||||
/* Currently at EL1 */
|
||||
cmp \xreg, #(EL1 << CURRENT_EL_SHIFT)
|
||||
b.eq \label1
|
||||
/* Currently at EL2 */
|
||||
cmp \xreg, #(EL2 << CURRENT_EL_SHIFT)
|
||||
b.eq \label2
|
||||
/* Currently at EL3 */
|
||||
cmp \xreg, #(EL3 << CURRENT_EL_SHIFT)
|
||||
b.eq \label3
|
||||
.endm
|
||||
|
||||
/* Macro to read sysreg at current EL
|
||||
xreg - reg in which read value needs to be stored
|
||||
sysreg - system reg that is to be read
|
||||
*/
|
||||
.macro read_current xreg sysreg
|
||||
switch_el \xreg, 101f, 102f, 103f
|
||||
101:
|
||||
mrs \xreg, \sysreg\()_el1
|
||||
b 104f
|
||||
102:
|
||||
mrs \xreg, \sysreg\()_el2
|
||||
b 104f
|
||||
103:
|
||||
mrs \xreg, \sysreg\()_el3
|
||||
b 104f
|
||||
104:
|
||||
.endm
|
||||
|
||||
/* Macro to write sysreg at current EL
|
||||
xreg - reg from which value needs to be written
|
||||
sysreg - system reg that is to be written
|
||||
temp - temp reg that can be used to read current EL
|
||||
*/
|
||||
.macro write_current sysreg xreg temp
|
||||
switch_el \temp, 101f, 102f, 103f
|
||||
101:
|
||||
msr \sysreg\()_el1, \xreg
|
||||
b 104f
|
||||
102:
|
||||
msr \sysreg\()_el2, \xreg
|
||||
b 104f
|
||||
103:
|
||||
msr \sysreg\()_el3, \xreg
|
||||
b 104f
|
||||
104:
|
||||
.endm
|
||||
|
||||
/* Macro to read sysreg at current EL - 1
|
||||
xreg - reg in which read value needs to be stored
|
||||
sysreg - system reg that is to be read
|
||||
*/
|
||||
.macro read_lower xreg sysreg
|
||||
switch_el \xreg, 101f, 102f, 103f
|
||||
101:
|
||||
b 104f
|
||||
102:
|
||||
mrs \xreg, \sysreg\()_el1
|
||||
b 104f
|
||||
103:
|
||||
mrs \xreg, \sysreg\()_el2
|
||||
b 104f
|
||||
104:
|
||||
.endm
|
||||
|
||||
/* Macro to write sysreg at current EL - 1
|
||||
xreg - reg from which value needs to be written
|
||||
sysreg - system reg that is to be written
|
||||
temp - temp reg that can be used to read current EL
|
||||
*/
|
||||
.macro write_lower sysreg xreg temp
|
||||
switch_el \temp, 101f, 102f, 103f
|
||||
101:
|
||||
b 104f
|
||||
102:
|
||||
msr \sysreg\()_el1, \xreg
|
||||
b 104f
|
||||
103:
|
||||
msr \sysreg\()_el2, \xreg
|
||||
b 104f
|
||||
104:
|
||||
.endm
|
||||
|
||||
/* Macro to read from a register at EL3 only if we are currently at that
|
||||
level. This is required to ensure that we do not attempt to read registers
|
||||
from a level lower than el3. e.g. SCR is available for read only at EL3.
|
||||
IMPORTANT: if EL != EL3, macro silently doesn't perform the read.
|
||||
*/
|
||||
.macro read_el3 xreg sysreg
|
||||
switch_el \xreg, 402f, 402f, 401f
|
||||
401:
|
||||
mrs \xreg, \sysreg\()_el3
|
||||
402:
|
||||
.endm
|
||||
|
||||
/* Macro to write to a register at EL3 only if we are currently at that
|
||||
level. This is required to ensure that we do not attempt to write to
|
||||
registers from a level lower than el3. e.g. SCR is available to write only at
|
||||
EL3.
|
||||
IMPORTANT: if EL != EL3, macro silently doesn't perform the write.
|
||||
*/
|
||||
.macro write_el3 sysreg xreg temp
|
||||
switch_el \temp, 402f, 402f, 401f
|
||||
401:
|
||||
msr \sysreg\()_el3, \xreg
|
||||
402:
|
||||
.endm
|
||||
|
||||
/* Macro to read from an el1 register */
|
||||
.macro read_el1 xreg sysreg
|
||||
mrs \xreg, \sysreg\()_el1
|
||||
.endm
|
||||
|
||||
/* Macro to write to an el1 register */
|
||||
.macro write_el1 sysreg xreg temp
|
||||
msr \sysreg\()_el1, \xreg
|
||||
.endm
|
||||
|
||||
/* Macro to read from an el0 register */
|
||||
.macro read_el0 xreg sysreg
|
||||
mrs \xreg, \sysreg\()_el0
|
||||
.endm
|
||||
|
||||
/* Macro to write to an el0 register */
|
||||
.macro write_el0 sysreg xreg temp
|
||||
msr \sysreg\()_el0, \xreg
|
||||
.endm
|
||||
|
||||
/* Macro to invalidate all stage 1 TLB entries for current EL */
|
||||
.macro tlbiall_current temp
|
||||
switch_el \temp, 401f, 402f, 403f
|
||||
401:
|
||||
tlbi alle1
|
||||
b 404f
|
||||
402:
|
||||
tlbi alle2
|
||||
b 404f
|
||||
403:
|
||||
tlbi alle3
|
||||
b 404f
|
||||
404:
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#define DAIF_DBG_BIT (1<<3)
|
||||
#define DAIF_ABT_BIT (1<<2)
|
||||
#define DAIF_IRQ_BIT (1<<1)
|
||||
#define DAIF_FIQ_BIT (1<<0)
|
||||
|
||||
#define SWITCH_CASE_READ(func, var, type, el) do { \
|
||||
type var = -1; \
|
||||
switch(el) { \
|
||||
case EL1: \
|
||||
var = func##_el1(); \
|
||||
break; \
|
||||
case EL2: \
|
||||
var = func##_el2(); \
|
||||
break; \
|
||||
case EL3: \
|
||||
var = func##_el3(); \
|
||||
break; \
|
||||
} \
|
||||
return var; \
|
||||
} while (0)
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#define SWITCH_CASE_WRITE(func, var, el) do { \
|
||||
switch(el) { \
|
||||
case EL1: \
|
||||
func##_el1(var); \
|
||||
break; \
|
||||
case EL2: \
|
||||
func##_el2(var); \
|
||||
break; \
|
||||
case EL3: \
|
||||
func##_el3(var); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define SWITCH_CASE_TLBI(func, el) do { \
|
||||
switch(el) { \
|
||||
case EL1: \
|
||||
func##_el1(); \
|
||||
break; \
|
||||
case EL2: \
|
||||
func##_el2(); \
|
||||
break; \
|
||||
case EL3: \
|
||||
func##_el3(); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
#include <stdint.h>
|
||||
|
||||
/* PSTATE and special purpose register access functions */
|
||||
uint32_t raw_read_current_el(void);
|
||||
uint32_t get_current_el(void);
|
||||
uint32_t raw_read_daif(void);
|
||||
void raw_write_daif(uint32_t daif);
|
||||
void enable_debug_exceptions(void);
|
||||
|
@ -360,10 +167,6 @@ uint64_t raw_read_elr_el2(void);
|
|||
void raw_write_elr_el2(uint64_t elr_el2);
|
||||
uint64_t raw_read_elr_el3(void);
|
||||
void raw_write_elr_el3(uint64_t elr_el3);
|
||||
uint64_t raw_read_elr_current(void);
|
||||
void raw_write_elr_current(uint64_t elr);
|
||||
uint64_t raw_read_elr(uint32_t el);
|
||||
void raw_write_elr(uint64_t elr, uint32_t el);
|
||||
uint32_t raw_read_fpcr(void);
|
||||
void raw_write_fpcr(uint32_t fpcr);
|
||||
uint32_t raw_read_fpsr(void);
|
||||
|
@ -380,8 +183,6 @@ uint32_t raw_read_spsel(void);
|
|||
void raw_write_spsel(uint32_t spsel);
|
||||
uint64_t raw_read_sp_el3(void);
|
||||
void raw_write_sp_el3(uint64_t sp_el3);
|
||||
uint64_t raw_read_sp_elx(uint32_t el);
|
||||
void raw_write_sp_elx(uint64_t sp_elx, uint32_t el);
|
||||
uint32_t raw_read_spsr_abt(void);
|
||||
void raw_write_spsr_abt(uint32_t spsr_abt);
|
||||
uint32_t raw_read_spsr_el1(void);
|
||||
|
@ -390,10 +191,6 @@ uint32_t raw_read_spsr_el2(void);
|
|||
void raw_write_spsr_el2(uint32_t spsr_el2);
|
||||
uint32_t raw_read_spsr_el3(void);
|
||||
void raw_write_spsr_el3(uint32_t spsr_el3);
|
||||
uint32_t raw_read_spsr_current(void);
|
||||
void raw_write_spsr_current(uint32_t spsr);
|
||||
uint32_t raw_read_spsr(uint32_t el);
|
||||
void raw_write_spsr(uint32_t spsr, uint32_t el);
|
||||
uint32_t raw_read_spsr_fiq(void);
|
||||
void raw_write_spsr_fiq(uint32_t spsr_fiq);
|
||||
uint32_t raw_read_spsr_irq(void);
|
||||
|
@ -408,30 +205,18 @@ uint32_t raw_read_actlr_el2(void);
|
|||
void raw_write_actlr_el2(uint32_t actlr_el2);
|
||||
uint32_t raw_read_actlr_el3(void);
|
||||
void raw_write_actlr_el3(uint32_t actlr_el3);
|
||||
uint32_t raw_read_actlr_current(void);
|
||||
void raw_write_actlr_current(uint32_t actlr);
|
||||
uint32_t raw_read_actlr(uint32_t el);
|
||||
void raw_write_actlr(uint32_t actlr, uint32_t el);
|
||||
uint32_t raw_read_afsr0_el1(void);
|
||||
void raw_write_afsr0_el1(uint32_t afsr0_el1);
|
||||
uint32_t raw_read_afsr0_el2(void);
|
||||
void raw_write_afsr0_el2(uint32_t afsr0_el2);
|
||||
uint32_t raw_read_afsr0_el3(void);
|
||||
void raw_write_afsr0_el3(uint32_t afsr0_el3);
|
||||
uint32_t raw_read_afsr0_current(void);
|
||||
void raw_write_afsr0_current(uint32_t afsr0);
|
||||
uint32_t raw_read_afsr0(uint32_t el);
|
||||
void raw_write_afsr0(uint32_t afsr0, uint32_t el);
|
||||
uint32_t raw_read_afsr1_el1(void);
|
||||
void raw_write_afsr1_el1(uint32_t afsr1_el1);
|
||||
uint32_t raw_read_afsr1_el2(void);
|
||||
void raw_write_afsr1_el2(uint32_t afsr1_el2);
|
||||
uint32_t raw_read_afsr1_el3(void);
|
||||
void raw_write_afsr1_el3(uint32_t afsr1_el3);
|
||||
uint32_t raw_read_afsr1_current(void);
|
||||
void raw_write_afsr1_current(uint32_t afsr1);
|
||||
uint32_t raw_read_afsr1(uint32_t el);
|
||||
void raw_write_afsr1(uint32_t afsr1, uint32_t el);
|
||||
uint32_t raw_read_aidr_el1(void);
|
||||
uint64_t raw_read_amair_el1(void);
|
||||
void raw_write_amair_el1(uint64_t amair_el1);
|
||||
|
@ -439,10 +224,6 @@ uint64_t raw_read_amair_el2(void);
|
|||
void raw_write_amair_el2(uint64_t amair_el2);
|
||||
uint64_t raw_read_amair_el3(void);
|
||||
void raw_write_amair_el3(uint64_t amair_el3);
|
||||
uint64_t raw_read_amair_current(void);
|
||||
void raw_write_amair_current(uint64_t amair);
|
||||
uint64_t raw_read_amair(uint32_t el);
|
||||
void raw_write_amair(uint64_t amair, uint32_t el);
|
||||
uint32_t raw_read_ccsidr_el1(void);
|
||||
uint32_t raw_read_clidr_el1(void);
|
||||
uint32_t raw_read_cpacr_el1(void);
|
||||
|
@ -451,8 +232,6 @@ uint32_t raw_read_cptr_el2(void);
|
|||
void raw_write_cptr_el2(uint32_t cptr_el2);
|
||||
uint32_t raw_read_cptr_el3(void);
|
||||
void raw_write_cptr_el3(uint32_t cptr_el3);
|
||||
uint32_t raw_read_cptr_current(void);
|
||||
void raw_write_cptr_current(uint32_t cptr);
|
||||
uint32_t raw_read_csselr_el1(void);
|
||||
void raw_write_csselr_el1(uint32_t csselr_el1);
|
||||
uint32_t raw_read_ctr_el0(void);
|
||||
|
@ -462,20 +241,12 @@ uint32_t raw_read_esr_el2(void);
|
|||
void raw_write_esr_el2(uint32_t esr_el2);
|
||||
uint32_t raw_read_esr_el3(void);
|
||||
void raw_write_esr_el3(uint32_t esr_el3);
|
||||
uint32_t raw_read_esr_current(void);
|
||||
void raw_write_esr_current(uint32_t esr);
|
||||
uint32_t raw_read_esr(uint32_t el);
|
||||
void raw_write_esr(uint32_t esr, uint32_t el);
|
||||
uint64_t raw_read_far_el1(void);
|
||||
void raw_write_far_el1(uint64_t far_el1);
|
||||
uint64_t raw_read_far_el2(void);
|
||||
void raw_write_far_el2(uint64_t far_el2);
|
||||
uint64_t raw_read_far_el3(void);
|
||||
void raw_write_far_el3(uint64_t far_el3);
|
||||
uint64_t raw_read_far_current(void);
|
||||
void raw_write_far_current(uint64_t far);
|
||||
uint64_t raw_read_far(uint32_t el);
|
||||
void raw_write_far(uint64_t far, uint32_t el);
|
||||
uint64_t raw_read_hcr_el2(void);
|
||||
void raw_write_hcr_el2(uint64_t hcr_el2);
|
||||
uint64_t raw_read_aa64pfr0_el1(void);
|
||||
|
@ -485,10 +256,6 @@ uint64_t raw_read_mair_el2(void);
|
|||
void raw_write_mair_el2(uint64_t mair_el2);
|
||||
uint64_t raw_read_mair_el3(void);
|
||||
void raw_write_mair_el3(uint64_t mair_el3);
|
||||
uint64_t raw_read_mair_current(void);
|
||||
void raw_write_mair_current(uint64_t mair);
|
||||
uint64_t raw_read_mair(uint32_t el);
|
||||
void raw_write_mair(uint64_t mair, uint32_t el);
|
||||
uint32_t raw_read_midr_el1(void);
|
||||
uint64_t raw_read_mpidr_el1(void);
|
||||
uint32_t raw_read_rmr_el1(void);
|
||||
|
@ -497,20 +264,12 @@ uint32_t raw_read_rmr_el2(void);
|
|||
void raw_write_rmr_el2(uint32_t rmr_el2);
|
||||
uint32_t raw_read_rmr_el3(void);
|
||||
void raw_write_rmr_el3(uint32_t rmr_el3);
|
||||
uint32_t raw_read_rmr_current(void);
|
||||
void raw_write_rmr_current(uint32_t rmr);
|
||||
uint32_t raw_read_rmr(uint32_t el);
|
||||
void raw_write_rmr(uint32_t rmr, uint32_t el);
|
||||
uint64_t raw_read_rvbar_el1(void);
|
||||
void raw_write_rvbar_el1(uint64_t rvbar_el1);
|
||||
uint64_t raw_read_rvbar_el2(void);
|
||||
void raw_write_rvbar_el2(uint64_t rvbar_el2);
|
||||
uint64_t raw_read_rvbar_el3(void);
|
||||
void raw_write_rvbar_el3(uint64_t rvbar_el3);
|
||||
uint64_t raw_read_rvbar_current(void);
|
||||
void raw_write_rvbar_current(uint64_t rvbar);
|
||||
uint64_t raw_read_rvbar(uint32_t el);
|
||||
void raw_write_rvbar(uint64_t rvbar, uint32_t el);
|
||||
uint32_t raw_read_scr_el3(void);
|
||||
void raw_write_scr_el3(uint32_t scr_el3);
|
||||
uint32_t raw_read_sctlr_el1(void);
|
||||
|
@ -519,30 +278,18 @@ uint32_t raw_read_sctlr_el2(void);
|
|||
void raw_write_sctlr_el2(uint32_t sctlr_el2);
|
||||
uint32_t raw_read_sctlr_el3(void);
|
||||
void raw_write_sctlr_el3(uint32_t sctlr_el3);
|
||||
uint32_t raw_read_sctlr_current(void);
|
||||
void raw_write_sctlr_current(uint32_t sctlr);
|
||||
uint32_t raw_read_sctlr(uint32_t el);
|
||||
void raw_write_sctlr(uint32_t sctlr, uint32_t el);
|
||||
uint64_t raw_read_tcr_el1(void);
|
||||
void raw_write_tcr_el1(uint64_t tcr_el1);
|
||||
uint32_t raw_read_tcr_el2(void);
|
||||
void raw_write_tcr_el2(uint32_t tcr_el2);
|
||||
uint32_t raw_read_tcr_el3(void);
|
||||
void raw_write_tcr_el3(uint32_t tcr_el3);
|
||||
uint64_t raw_read_tcr_current(void);
|
||||
void raw_write_tcr_current(uint64_t tcr);
|
||||
uint64_t raw_read_tcr(uint32_t el);
|
||||
void raw_write_tcr(uint64_t tcr, uint32_t el);
|
||||
uint64_t raw_read_ttbr0_el1(void);
|
||||
void raw_write_ttbr0_el1(uint64_t ttbr0_el1);
|
||||
uint64_t raw_read_ttbr0_el2(void);
|
||||
void raw_write_ttbr0_el2(uint64_t ttbr0_el2);
|
||||
uint64_t raw_read_ttbr0_el3(void);
|
||||
void raw_write_ttbr0_el3(uint64_t ttbr0_el3);
|
||||
uint64_t raw_read_ttbr0_current(void);
|
||||
void raw_write_ttbr0_current(uint64_t ttbr0);
|
||||
uint64_t raw_read_ttbr0(uint32_t el);
|
||||
void raw_write_ttbr0(uint64_t ttbr0, uint32_t el);
|
||||
uint64_t raw_read_ttbr1_el1(void);
|
||||
void raw_write_ttbr1_el1(uint64_t ttbr1_el1);
|
||||
uint64_t raw_read_vbar_el1(void);
|
||||
|
@ -551,10 +298,6 @@ uint64_t raw_read_vbar_el2(void);
|
|||
void raw_write_vbar_el2(uint64_t vbar_el2);
|
||||
uint64_t raw_read_vbar_el3(void);
|
||||
void raw_write_vbar_el3(uint64_t vbar_el3);
|
||||
uint64_t raw_read_vbar_current(void);
|
||||
void raw_write_vbar_current(uint64_t vbar);
|
||||
uint64_t raw_read_vbar(uint32_t el);
|
||||
void raw_write_vbar(uint64_t vbar, uint32_t el);
|
||||
uint32_t raw_read_cntfrq_el0(void);
|
||||
void raw_write_cntfrq_el0(uint32_t cntfrq_el0);
|
||||
uint64_t raw_read_cntpct_el0(void);
|
||||
|
@ -576,13 +319,9 @@ void icivau(uint64_t ivau);
|
|||
void tlbiall_el1(void);
|
||||
void tlbiall_el2(void);
|
||||
void tlbiall_el3(void);
|
||||
void tlbiall_current(void);
|
||||
void tlbiall(uint32_t el);
|
||||
void tlbiallis_el1(void);
|
||||
void tlbiallis_el2(void);
|
||||
void tlbiallis_el3(void);
|
||||
void tlbiallis_current(void);
|
||||
void tlbiallis(uint32_t el);
|
||||
void tlbivaa_el1(uint64_t va);
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
|
|
@ -38,62 +38,41 @@ void exc_entry(struct exc_state *exc_state, uint64_t id)
|
|||
{
|
||||
struct elx_state *elx = &exc_state->elx;
|
||||
struct regs *regs = &exc_state->regs;
|
||||
uint8_t elx_mode, elx_el;
|
||||
uint8_t elx_mode;
|
||||
|
||||
elx->spsr = raw_read_spsr_current();
|
||||
elx->spsr = raw_read_spsr_el3();
|
||||
elx_mode = get_mode_from_spsr(elx->spsr);
|
||||
elx_el = get_el_from_spsr(elx->spsr);
|
||||
|
||||
if (elx_mode == SPSR_USE_H) {
|
||||
if (elx_el == get_current_el())
|
||||
regs->sp = (uint64_t)&exc_state[1];
|
||||
else
|
||||
regs->sp = raw_read_sp_elx(elx_el);
|
||||
} else {
|
||||
if (elx_mode == SPSR_USE_H)
|
||||
regs->sp = (uint64_t)&exc_state[1];
|
||||
else
|
||||
regs->sp = raw_read_sp_el0();
|
||||
}
|
||||
|
||||
elx->elr = raw_read_elr_current();
|
||||
elx->elr = raw_read_elr_el3();
|
||||
|
||||
exc_dispatch(exc_state, id);
|
||||
}
|
||||
|
||||
void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state)
|
||||
void transition_to_el2(void *entry, void *arg, uint64_t spsr)
|
||||
{
|
||||
/* Argument to entry point goes into X0 */
|
||||
exc_state->regs.x[X0_INDEX] = (uint64_t)arg;
|
||||
/* Entry point goes into ELR */
|
||||
exc_state->elx.elr = (uint64_t)entry;
|
||||
struct exc_state exc_state;
|
||||
struct elx_state *elx = &exc_state.elx;
|
||||
struct regs *regs = &exc_state.regs;
|
||||
uint32_t sctlr;
|
||||
|
||||
transition(exc_state);
|
||||
}
|
||||
|
||||
void transition(struct exc_state *exc_state)
|
||||
{
|
||||
uint64_t sctlr;
|
||||
uint32_t current_el = get_current_el();
|
||||
|
||||
struct elx_state *elx = &exc_state->elx;
|
||||
struct regs *regs = &exc_state->regs;
|
||||
|
||||
uint8_t elx_el = get_el_from_spsr(elx->spsr);
|
||||
regs->x[X0_INDEX] = (uint64_t)arg;
|
||||
elx->elr = (uint64_t)entry;
|
||||
elx->spsr = spsr;
|
||||
|
||||
/*
|
||||
* Policies enforced:
|
||||
* 1. We support only elx --> (elx - 1) transitions
|
||||
* 1. We support only transitions to EL2
|
||||
* 2. We support transitions to Aarch64 mode only
|
||||
*
|
||||
* If any of the above conditions holds false, then we need a proper way
|
||||
* to update SCR/HCR before removing the checks below
|
||||
*/
|
||||
if ((current_el - elx_el) != 1)
|
||||
die("ARM64 Error: Do not support transition\n");
|
||||
|
||||
if (elx->spsr & SPSR_ERET_32)
|
||||
die("ARM64 Error: Do not support eret to Aarch32\n");
|
||||
|
||||
/* Most parts of coreboot currently don't support EL2 anyway. */
|
||||
assert(current_el == EL3);
|
||||
assert(get_el_from_spsr(spsr) == EL2 && !(spsr & SPSR_ERET_32));
|
||||
|
||||
/* Initialize SCR with defaults for running without secure monitor. */
|
||||
raw_write_scr_el3(SCR_TWE_DISABLE | /* don't trap WFE */
|
||||
|
@ -114,16 +93,16 @@ void transition(struct exc_state *exc_state)
|
|||
CPTR_EL3_TFP_DISABLE);
|
||||
|
||||
/* ELR/SPSR: Write entry point and processor state of program */
|
||||
raw_write_elr_current(elx->elr);
|
||||
raw_write_spsr_current(elx->spsr);
|
||||
raw_write_elr_el3(elx->elr);
|
||||
raw_write_spsr_el3(elx->spsr);
|
||||
|
||||
/* SCTLR: Initialize EL with selected properties */
|
||||
sctlr = raw_read_sctlr(elx_el);
|
||||
sctlr = raw_read_sctlr_el2();
|
||||
sctlr &= SCTLR_MASK;
|
||||
raw_write_sctlr(sctlr, elx_el);
|
||||
raw_write_sctlr_el2(sctlr);
|
||||
|
||||
/* SP_ELx: Initialize stack pointer */
|
||||
raw_write_sp_elx(elx->sp_elx, elx_el);
|
||||
raw_write_sp_el2(elx->sp_elx);
|
||||
|
||||
/* Payloads expect to be entered with MMU disabled. Includes an ISB. */
|
||||
mmu_disable();
|
||||
|
|
|
@ -164,7 +164,7 @@ ENTRY(exception_init_asm)
|
|||
msr SPSel, #SPSR_USE_L
|
||||
|
||||
adr x0, exc_vectors
|
||||
write_current vbar, x0, x1
|
||||
msr vbar_el3, x0
|
||||
dsb sy
|
||||
isb
|
||||
ret
|
||||
|
|
Loading…
Reference in New Issue