libpayload arm64: fix mmu bugs
1. keep functions and objects used entirely within mmu.c as static. 2. DMA region finding needs to terminate. Therefore, the next address to be attempted needs to be less then the current end address. 3. Ensure mmu_ranges passed to mmu_init_ranges_from_sysinfo() has 0 entries marked as used. BUG=chrome-os-partner:31634 BRANCH=None TEST=Booted ryu with RAM hole above cbmem tables below 4GiB. Change-Id: I71a9cb89466978aa63fca5d8bee97b8af75ea206 Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Original-Commit-Id: 66518fd86e676bbddf52e9d9afdd76d72c8e2222 Original-Change-Id: I5cb4e5009359cb04c4e1b5fe60845f80fbdff02c Original-Signed-off-by: Aaron Durbin <adurbin@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/221725 Original-Reviewed-by: Furquan Shaikh <furquan@chromium.org> Original-Tested-by: Furquan Shaikh <furquan@chromium.org> Original-Commit-Queue: Furquan Shaikh <furquan@chromium.org> Reviewed-on: http://review.coreboot.org/8793 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
parent
39292638af
commit
9425a545d7
|
@ -54,7 +54,7 @@ static uint8_t ttb_buffer[TTB_DEFAULT_SIZE] __attribute__((aligned(GRANULE_SIZE)
|
||||||
* the DMA buffer is being placed in a sane location and does not overlap any of
|
* the DMA buffer is being placed in a sane location and does not overlap any of
|
||||||
* the used mem ranges.
|
* the used mem ranges.
|
||||||
*/
|
*/
|
||||||
struct mmu_ranges usedmem_ranges;
|
static struct mmu_ranges usedmem_ranges;
|
||||||
|
|
||||||
static const uint64_t level_to_addr_mask[] = {
|
static const uint64_t level_to_addr_mask[] = {
|
||||||
L1_ADDR_MASK,
|
L1_ADDR_MASK,
|
||||||
|
@ -426,6 +426,29 @@ static int mmu_is_dma_range_valid(uint64_t dma_base,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Func: mmu_add_memrange
|
||||||
|
* Desc: Adds a new memory range
|
||||||
|
*/
|
||||||
|
static struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r,
|
||||||
|
uint64_t base, uint64_t size,
|
||||||
|
uint64_t type)
|
||||||
|
{
|
||||||
|
struct mmu_memrange *curr = NULL;
|
||||||
|
int i = r->used;
|
||||||
|
|
||||||
|
if (i < ARRAY_SIZE(r->entries)) {
|
||||||
|
curr = &r->entries[i];
|
||||||
|
curr->base = base;
|
||||||
|
curr->size = size;
|
||||||
|
curr->type = type;
|
||||||
|
|
||||||
|
r->used = i + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return curr;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Func: mmu_add_dma_range
|
* Func: mmu_add_dma_range
|
||||||
* Desc: Add a memrange for dma operations. This is special because we want to
|
* Desc: Add a memrange for dma operations. This is special because we want to
|
||||||
|
@ -458,7 +481,7 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
|
||||||
* We need to ensure that we do not step over payload regions or
|
* We need to ensure that we do not step over payload regions or
|
||||||
* the coreboot_table
|
* the coreboot_table
|
||||||
*/
|
*/
|
||||||
do {
|
while (1) {
|
||||||
/*
|
/*
|
||||||
* If end_addr is aligned to GRANULE_SIZE,
|
* If end_addr is aligned to GRANULE_SIZE,
|
||||||
* then base_addr will be too.
|
* then base_addr will be too.
|
||||||
|
@ -472,7 +495,13 @@ static struct mmu_memrange* mmu_add_dma_range(struct mmu_ranges *mmu_ranges)
|
||||||
|
|
||||||
if (base_addr < r[i].base)
|
if (base_addr < r[i].base)
|
||||||
break;
|
break;
|
||||||
} while (mmu_is_dma_range_valid(base_addr, end_addr) == 0);
|
|
||||||
|
if (mmu_is_dma_range_valid(base_addr, end_addr))
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Drop to the next address. */
|
||||||
|
end_addr -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (base_addr < r[i].base)
|
if (base_addr < r[i].base)
|
||||||
continue;
|
continue;
|
||||||
|
@ -557,6 +586,9 @@ struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
|
||||||
{
|
{
|
||||||
struct mmu_memrange *dma_range;
|
struct mmu_memrange *dma_range;
|
||||||
|
|
||||||
|
/* Initialize mmu_ranges to contain no entries. */
|
||||||
|
mmu_ranges->used = 0;
|
||||||
|
|
||||||
/* Extract ranges from memrange in lib_sysinfo */
|
/* Extract ranges from memrange in lib_sysinfo */
|
||||||
mmu_extract_ranges(cb_ranges, ncb, mmu_ranges);
|
mmu_extract_ranges(cb_ranges, ncb, mmu_ranges);
|
||||||
|
|
||||||
|
@ -569,28 +601,6 @@ struct mmu_memrange *mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
|
||||||
return dma_range;
|
return dma_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Func: mmu_add_memrange
|
|
||||||
* Desc: Adds a new memory range
|
|
||||||
*/
|
|
||||||
struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r, uint64_t base,
|
|
||||||
uint64_t size, uint64_t type)
|
|
||||||
{
|
|
||||||
struct mmu_memrange *curr = NULL;
|
|
||||||
int i = r->used;
|
|
||||||
|
|
||||||
if (i < ARRAY_SIZE(r->entries)) {
|
|
||||||
curr = &r->entries[i];
|
|
||||||
curr->base = base;
|
|
||||||
curr->size = size;
|
|
||||||
curr->type = type;
|
|
||||||
|
|
||||||
r->used = i + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return curr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Func: mmu_presysinfo_memory_used
|
* Func: mmu_presysinfo_memory_used
|
||||||
* Desc: Initializes all the memory used for presysinfo page table
|
* Desc: Initializes all the memory used for presysinfo page table
|
||||||
|
|
|
@ -200,10 +200,6 @@ struct mmu_memrange* mmu_init_ranges_from_sysinfo(struct memrange *cb_ranges,
|
||||||
uint64_t ncb,
|
uint64_t ncb,
|
||||||
struct mmu_ranges *mmu_ranges);
|
struct mmu_ranges *mmu_ranges);
|
||||||
|
|
||||||
/* Add a new mmu_memrange */
|
|
||||||
struct mmu_memrange* mmu_add_memrange(struct mmu_ranges *r, uint64_t base,
|
|
||||||
uint64_t size, uint64_t type);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Functions for handling the initialization of memory ranges and enabling mmu
|
* Functions for handling the initialization of memory ranges and enabling mmu
|
||||||
* before coreboot tables are parsed
|
* before coreboot tables are parsed
|
||||||
|
|
Loading…
Reference in New Issue