armv7: mmu: Make fine grained page tables work across stages
Among its other restrictions (which are noted in a comment above the function prototype and stay in place), our makeshift fine-grained page table support for ARM32 has the undocumented feature that it relies on a global bookkeeping variable, causing all sorts of fun surprises when you try to use it from multiple stages during the same boot. This patch redesigns the bookkeeping to stay completely inline in the (persistent) TTB which should resolve the issue. (This had not been a problem on any of our platforms for now... I just noticed this because I was trying to solve the same issue on ARM64.) BRANCH=None BUG=None TEST=Booted veyron_jerry. Mapped a second fine-grained memory range from romstage, confirmed that it finds the next free spot and leaves the bootblock table in place. Change-Id: I325866828b4ff251142e1131ce78b571edcc9cf9 Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: http://review.coreboot.org/12074 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@chromium.org> Reviewed-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
This commit is contained in:
parent
95b97848cc
commit
03a0a65172
|
@ -97,13 +97,17 @@ typedef uint64_t pte_t;
|
||||||
typedef uint32_t pte_t;
|
typedef uint32_t pte_t;
|
||||||
#endif /* CONFIG_ARM_LPAE */
|
#endif /* CONFIG_ARM_LPAE */
|
||||||
|
|
||||||
|
/* We set the first PTE to a sentinel value that cannot occur naturally (has
|
||||||
|
* attributes set but bits [1:0] are 0 -> unmapped) to mark unused subtables. */
|
||||||
|
#define ATTR_UNUSED 0xBADbA6E0
|
||||||
|
#define SUBTABLE_PTES (1 << (BLOCK_SHIFT - PAGE_SHIFT))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mask/shift/size for pages and blocks
|
* mask/shift/size for pages and blocks
|
||||||
*/
|
*/
|
||||||
#define PAGE_SHIFT 12
|
#define PAGE_SHIFT 12
|
||||||
#define PAGE_SIZE (1UL << PAGE_SHIFT)
|
#define PAGE_SIZE (1UL << PAGE_SHIFT)
|
||||||
#define BLOCK_SIZE (1UL << BLOCK_SHIFT)
|
#define BLOCK_SIZE (1UL << BLOCK_SHIFT)
|
||||||
#define SUBTABLE_SIZE ((1 << (BLOCK_SHIFT - PAGE_SHIFT)) * sizeof(pte_t))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MAIR Index
|
* MAIR Index
|
||||||
|
@ -113,7 +117,6 @@ typedef uint32_t pte_t;
|
||||||
#define MAIR_INDX_WB 2
|
#define MAIR_INDX_WB 2
|
||||||
|
|
||||||
static pte_t *const ttb_buff = (void *)_ttb;
|
static pte_t *const ttb_buff = (void *)_ttb;
|
||||||
static int used_tables = 0;
|
|
||||||
|
|
||||||
/* Not all boards want to use subtables and declare them in memlayout.ld. This
|
/* Not all boards want to use subtables and declare them in memlayout.ld. This
|
||||||
* outputs two 0x00000000 symbols if they don't, making _ttb_subtables_size 0.
|
* outputs two 0x00000000 symbols if they don't, making _ttb_subtables_size 0.
|
||||||
|
@ -156,12 +159,17 @@ static void mmu_fill_table(pte_t *table, u32 start_idx, u32 end_idx,
|
||||||
|
|
||||||
static pte_t *mmu_create_subtable(pte_t *pgd_entry)
|
static pte_t *mmu_create_subtable(pte_t *pgd_entry)
|
||||||
{
|
{
|
||||||
if (used_tables >= _ttb_subtables_size / SUBTABLE_SIZE)
|
pte_t *table = (pte_t *)_ttb_subtables;
|
||||||
die("Not enough room for another sub-pagetable!");
|
|
||||||
|
/* Find unused subtable (first PTE == ATTR_UNUSED). */
|
||||||
|
while (table[0] != ATTR_UNUSED) {
|
||||||
|
table += SUBTABLE_PTES;
|
||||||
|
if ((pte_t *)_ettb_subtables - table <= 0)
|
||||||
|
die("Not enough room for another sub-pagetable!");
|
||||||
|
}
|
||||||
|
|
||||||
/* We assume that *pgd_entry must already be a valid block mapping. */
|
/* We assume that *pgd_entry must already be a valid block mapping. */
|
||||||
uintptr_t start_addr = (uintptr_t)(*pgd_entry & BLOCK_MASK);
|
uintptr_t start_addr = (uintptr_t)(*pgd_entry & BLOCK_MASK);
|
||||||
pte_t *table = (void *)(_ttb_subtables + used_tables++ * SUBTABLE_SIZE);
|
|
||||||
printk(BIOS_DEBUG, "Creating new subtable @%p for [%#.8x:%#.8lx)\n",
|
printk(BIOS_DEBUG, "Creating new subtable @%p for [%#.8x:%#.8lx)\n",
|
||||||
table, start_addr, start_addr + BLOCK_SIZE);
|
table, start_addr, start_addr + BLOCK_SIZE);
|
||||||
|
|
||||||
|
@ -172,8 +180,7 @@ static pte_t *mmu_create_subtable(pte_t *pgd_entry)
|
||||||
attr = ((attr & ~(1 << 4)) | (1 << 0));
|
attr = ((attr & ~(1 << 4)) | (1 << 0));
|
||||||
if (attr & ATTR_BLOCK)
|
if (attr & ATTR_BLOCK)
|
||||||
attr = (attr & ~ATTR_BLOCK) | ATTR_PAGE;
|
attr = (attr & ~ATTR_BLOCK) | ATTR_PAGE;
|
||||||
mmu_fill_table(table, 0, SUBTABLE_SIZE / sizeof(pte_t),
|
mmu_fill_table(table, 0, SUBTABLE_PTES, start_addr, PAGE_SHIFT, attr);
|
||||||
start_addr, PAGE_SHIFT, attr);
|
|
||||||
|
|
||||||
/* Replace old entry in upper level table to point at subtable. */
|
/* Replace old entry in upper level table to point at subtable. */
|
||||||
*pgd_entry = (pte_t)(uintptr_t)table | ATTR_NEXTLEVEL;
|
*pgd_entry = (pte_t)(uintptr_t)table | ATTR_NEXTLEVEL;
|
||||||
|
@ -265,6 +272,11 @@ void mmu_config_range(u32 start_mb, u32 size_mb, enum dcache_policy policy)
|
||||||
*/
|
*/
|
||||||
void mmu_init(void)
|
void mmu_init(void)
|
||||||
{
|
{
|
||||||
|
/* Initially mark all subtables as unused (first PTE == ATTR_UNUSED). */
|
||||||
|
pte_t *table = (pte_t *)_ttb_subtables;
|
||||||
|
for (; (pte_t *)_ettb_subtables - table > 0; table += SUBTABLE_PTES)
|
||||||
|
table[0] = ATTR_UNUSED;
|
||||||
|
|
||||||
if (CONFIG_ARM_LPAE) {
|
if (CONFIG_ARM_LPAE) {
|
||||||
pte_t *const pgd_buff = (pte_t*)(_ttb + 16*KiB);
|
pte_t *const pgd_buff = (pte_t*)(_ttb + 16*KiB);
|
||||||
pte_t *pmd = ttb_buff;
|
pte_t *pmd = ttb_buff;
|
||||||
|
|
|
@ -384,7 +384,8 @@ void tlb_invalidate_all(void);
|
||||||
* Generalized setup/init functions
|
* Generalized setup/init functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* mmu initialization (set page table address, set permissions, etc) */
|
/* MMU initialization (set page table base, permissions, initialize subtable
|
||||||
|
* buffer, etc.). Must only be called ONCE PER BOOT, before any mappings. */
|
||||||
void mmu_init(void);
|
void mmu_init(void);
|
||||||
|
|
||||||
enum dcache_policy {
|
enum dcache_policy {
|
||||||
|
|
Loading…
Reference in New Issue