libpayload: arm64: Keep instruction cache enabled at all times

This patch makes libpayload enable the instruction cache as the very
first thing, which is similar to how we treat it in coreboot. It also
prevents the icache from being disabled again during mmu_disable() as
part of the two-stage page table setup in post_sysinfo_scan_mmu_setup().
It replaces the existing mmu_disable() implementation with the assembly
version from coreboot which handles certain edge cases better (see
CB:27238 for details).

The SCTLR flag definitions in libpayload seem to have still been
copy&pasted from arm32, so replace with the actual arm64 defintions from
coreboot.

Change-Id: Ifdbec34f0875ecc69fedcbea5c20e943379a3d2d
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/38908
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Hung-Te Lin <hungte@chromium.org>
This commit is contained in:
Julius Werner 2020-02-14 12:42:01 -08:00 committed by Patrick Georgi
parent 6cf33858b6
commit bf33b03acf
5 changed files with 39 additions and 56 deletions

View File

@ -29,6 +29,7 @@
*/
#include <arch/asm.h>
#include <arch/lib_helpers.h>
.macro dcache_apply_all crm
dsb sy
@ -96,3 +97,17 @@ ENDPROC(dcache_clean_all)
ENTRY(dcache_clean_invalidate_all)
dcache_apply_all crm=cisw
ENDPROC(dcache_clean_invalidate_all)
/* This must be implemented in assembly to ensure there are no accesses to
memory (e.g. the stack) in between disabling and flushing the cache. */
ENTRY(mmu_disable)
str x30, [sp, #-0x8]
mrs x0, sctlr_el2
mov x1, #~(SCTLR_C | SCTLR_M)
and x0, x0, x1
msr sctlr_el2, x0
isb
bl dcache_clean_invalidate_all
ldr x30, [sp, #-0x8]
ret
ENDPROC(mmu_disable)

View File

@ -28,11 +28,15 @@
*/
#include <arch/asm.h>
#include <arch/lib_helpers.h>
/*
* Our entry point
*/
ENTRY(_entry)
/* Initialize SCTLR to intended state (icache and stack-alignment on) */
ldr w1, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA)
msr sctlr_el2, x1
/* Save off the location of the coreboot tables */
ldr x1, 1f

View File

@ -303,30 +303,6 @@ static uint32_t is_mmu_enabled(void)
return (sctlr & SCTLR_M);
}
/*
* Func: mmu_disable
* Desc: Invalidate caches and disable mmu
*/
void mmu_disable(void)
{
uint32_t sctlr;
sctlr = raw_read_sctlr_el2();
sctlr &= ~(SCTLR_C | SCTLR_M | SCTLR_I);
tlbiall_el2();
dcache_clean_invalidate_all();
dsb();
isb();
raw_write_sctlr_el2(sctlr);
dcache_clean_invalidate_all();
dsb();
isb();
}
/*
* Func: mmu_enable
* Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits

View File

@ -35,38 +35,6 @@
#include <stddef.h>
#include <stdint.h>
/* SCTLR bits */
#define SCTLR_M (1 << 0) /* MMU enable */
#define SCTLR_A (1 << 1) /* Alignment check enable */
#define SCTLR_C (1 << 2) /* Data/unified cache enable */
/* Bits 4:3 are reserved */
#define SCTLR_CP15BEN (1 << 5) /* CP15 barrier enable */
/* Bit 6 is reserved */
#define SCTLR_B (1 << 7) /* Endianness */
/* Bits 9:8 */
#define SCTLR_SW (1 << 10) /* SWP and SWPB enable */
#define SCTLR_Z (1 << 11) /* Branch prediction enable */
#define SCTLR_I (1 << 12) /* Instruction cache enable */
#define SCTLR_V (1 << 13) /* Low/high exception vectors */
#define SCTLR_RR (1 << 14) /* Round Robin select */
/* Bits 16:15 are reserved */
#define SCTLR_HA (1 << 17) /* Hardware Access flag enable */
/* Bit 18 is reserved */
/* Bits 20:19 reserved virtualization not supported */
#define SCTLR_WXN (1 << 19) /* Write permission implies XN */
#define SCTLR_UWXN (1 << 20) /* Unprivileged write permission
implies PL1 XN */
#define SCTLR_FI (1 << 21) /* Fast interrupt config enable */
#define SCTLR_U (1 << 22) /* Unaligned access behavior */
#define SCTLR_VE (1 << 24) /* Interrupt vectors enable */
#define SCTLR_EE (1 << 25) /* Exception endianness */
/* Bit 26 is reserved */
#define SCTLR_NMFI (1 << 27) /* Non-maskable FIQ support */
#define SCTLR_TRE (1 << 28) /* TEX remap enable */
#define SCTLR_AFE (1 << 29) /* Access flag enable */
#define SCTLR_TE (1 << 30) /* Thumb exception enable */
/* Bit 31 is reserved */
/*
* Cache maintenance API
*/

View File

@ -30,11 +30,29 @@
#ifndef __ARCH_LIB_HELPERS_H__
#define __ARCH_LIB_HELPERS_H__
#define SCTLR_M (1 << 0) /* MMU enable */
#define SCTLR_A (1 << 1) /* Alignment check enable */
#define SCTLR_C (1 << 2) /* Data/unified cache enable */
#define SCTLR_SA (1 << 3) /* Stack alignment check enable */
#define SCTLR_NAA (1 << 6) /* non-aligned access STA/LDR */
#define SCTLR_I (1 << 12) /* Instruction cache enable */
#define SCTLR_ENDB (1 << 13) /* Pointer auth (data B) */
#define SCTLR_WXN (1 << 19) /* Write permission implies XN */
#define SCTLR_IESB (1 << 21) /* Implicit error sync event */
#define SCTLR_EE (1 << 25) /* Exception endianness (BE) */
#define SCTLR_ENDA (1 << 27) /* Pointer auth (data A) */
#define SCTLR_ENIB (1 << 30) /* Pointer auth (insn B) */
#define SCTLR_ENIA (1 << 31) /* Pointer auth (insn A) */
#define SCTLR_RES1 ((0x3 << 4) | (0x1 << 11) | (0x1 << 16) | \
(0x1 << 18) | (0x3 << 22) | (0x3 << 28))
#define DAIF_DBG_BIT (1 << 3)
#define DAIF_ABT_BIT (1 << 2)
#define DAIF_IRQ_BIT (1 << 1)
#define DAIF_FIQ_BIT (1 << 0)
#ifndef __ASSEMBLER__
#include <stdint.h>
#define MAKE_REGISTER_ACCESSORS(reg) \
@ -273,4 +291,6 @@ static inline void tlbivaa_el1(uint64_t va)
#define dsb() dsb_opt(sy)
#define isb() isb_opt()
#endif /* __ASSEMBLER__ */
#endif /* __ARCH_LIB_HELPERS_H__ */