allocator_v4: Introduce RESOURCE_ALLOCATION_TOP_DOWN
Add option to resource allocator v4 that restores the top-down allocation approach at the domain level. This makes it easier to handle 64-bit resources natively. With the top-down approach, resources that can be placed either above or below 4G would be placed above, to save precious space below the 4G boundary. Change-Id: Iaf463d3e6b37d52e46761d8e210034fded58a8a4 Signed-off-by: Nico Huber <nico.h@gmx.de> Reviewed-on: https://review.coreboot.org/c/coreboot/+/41957 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org> Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com> Reviewed-by: Subrata Banik <subratabanik@google.com>
This commit is contained in:
parent
38688519cf
commit
526c64249a
|
@ -933,6 +933,11 @@ config RESOURCE_ALLOCATOR_V4
|
||||||
ranges for allocating resources. This allows allocation of resources
|
ranges for allocating resources. This allows allocation of resources
|
||||||
above 4G boundary as well.
|
above 4G boundary as well.
|
||||||
|
|
||||||
|
config RESOURCE_ALLOCATION_TOP_DOWN
|
||||||
|
bool "Allocate resources from top down"
|
||||||
|
default y
|
||||||
|
depends on RESOURCE_ALLOCATOR_V4
|
||||||
|
|
||||||
config XHCI_UTILS
|
config XHCI_UTILS
|
||||||
def_bool n
|
def_bool n
|
||||||
help
|
help
|
||||||
|
|
|
@ -372,6 +372,9 @@ static void print_resource_ranges(const struct device *dev, const struct memrang
|
||||||
static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
|
static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
|
||||||
unsigned long type_mask, unsigned long type_match)
|
unsigned long type_mask, unsigned long type_match)
|
||||||
{
|
{
|
||||||
|
const bool allocate_top_down =
|
||||||
|
bus->dev->path.type == DEVICE_PATH_DOMAIN &&
|
||||||
|
CONFIG(RESOURCE_ALLOCATION_TOP_DOWN);
|
||||||
struct resource *resource = NULL;
|
struct resource *resource = NULL;
|
||||||
const struct device *dev;
|
const struct device *dev;
|
||||||
|
|
||||||
|
@ -381,7 +384,7 @@ static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
|
if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
|
||||||
type_match, &resource->base) == false) {
|
type_match, &resource->base, allocate_top_down) == false) {
|
||||||
printk(BIOS_ERR, " ERROR: Resource didn't fit!!! ");
|
printk(BIOS_ERR, " ERROR: Resource didn't fit!!! ");
|
||||||
printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
|
printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
|
||||||
dev_path(dev), resource->index,
|
dev_path(dev), resource->index,
|
||||||
|
|
|
@ -161,15 +161,17 @@ struct range_entry *memranges_next_entry(struct memranges *ranges,
|
||||||
const struct range_entry *r);
|
const struct range_entry *r);
|
||||||
|
|
||||||
/* Steals memory from the available list in given ranges as per the constraints:
|
/* Steals memory from the available list in given ranges as per the constraints:
|
||||||
* limit = Upper bound for the memory range to steal (Inclusive).
|
* limit = Upper bound for the memory range to steal (Inclusive).
|
||||||
* size = Requested size for the stolen memory.
|
* size = Requested size for the stolen memory.
|
||||||
* align = Required alignment(log 2) for the starting address of the stolen memory.
|
* align = Required alignment(log 2) for the starting address of the stolen memory.
|
||||||
* tag = Use a range that matches the given tag.
|
* tag = Use a range that matches the given tag.
|
||||||
|
* from_top = Steal the highest possible range.
|
||||||
*
|
*
|
||||||
* If the constraints can be satisfied, this function creates a hole in the memrange,
|
* If the constraints can be satisfied, this function creates a hole in the memrange,
|
||||||
* writes the base address of that hole to stolen_base and returns true. Otherwise it returns
|
* writes the base address of that hole to stolen_base and returns true. Otherwise it returns
|
||||||
* false. */
|
* false. */
|
||||||
bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
|
bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
|
||||||
unsigned char align, unsigned long tag, resource_t *stolen_base);
|
unsigned char align, unsigned long tag, resource_t *stolen_base,
|
||||||
|
bool from_top);
|
||||||
|
|
||||||
#endif /* MEMRANGE_H_ */
|
#endif /* MEMRANGE_H_ */
|
||||||
|
|
|
@ -378,11 +378,11 @@ struct range_entry *memranges_next_entry(struct memranges *ranges,
|
||||||
|
|
||||||
/* Find a range entry that satisfies the given constraints to fit a hole that matches the
|
/* Find a range entry that satisfies the given constraints to fit a hole that matches the
|
||||||
* required alignment, is big enough, does not exceed the limit and has a matching tag. */
|
* required alignment, is big enough, does not exceed the limit and has a matching tag. */
|
||||||
static const struct range_entry *memranges_find_entry(struct memranges *ranges,
|
static const struct range_entry *
|
||||||
resource_t limit, resource_t size,
|
memranges_find_entry(struct memranges *ranges, resource_t limit, resource_t size,
|
||||||
unsigned char align, unsigned long tag)
|
unsigned char align, unsigned long tag, bool last)
|
||||||
{
|
{
|
||||||
const struct range_entry *r;
|
const struct range_entry *r, *last_entry = NULL;
|
||||||
resource_t base, end;
|
resource_t base, end;
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
|
@ -407,25 +407,35 @@ static const struct range_entry *memranges_find_entry(struct memranges *ranges,
|
||||||
if (end > limit)
|
if (end > limit)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
return r;
|
if (!last)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
last_entry = r;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return last_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
|
bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
|
||||||
unsigned char align, unsigned long tag, resource_t *stolen_base)
|
unsigned char align, unsigned long tag, resource_t *stolen_base,
|
||||||
|
bool from_top)
|
||||||
{
|
{
|
||||||
resource_t base;
|
const struct range_entry *r;
|
||||||
const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag);
|
|
||||||
|
|
||||||
|
r = memranges_find_entry(ranges, limit, size, align, tag, from_top);
|
||||||
if (r == NULL)
|
if (r == NULL)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
base = ALIGN_UP(r->begin, POWER_OF_2(align));
|
if (from_top) {
|
||||||
|
/* Ensure we're within the range, even aligned down.
|
||||||
memranges_create_hole(ranges, base, size);
|
Proof is simple: If ALIGN_UP(r->begin) would be
|
||||||
*stolen_base = base;
|
higher, the stolen range wouldn't fit.*/
|
||||||
|
assert(r->begin <= ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align)));
|
||||||
|
*stolen_base = ALIGN_DOWN(range_entry_end(r) - size, POWER_OF_2(align));
|
||||||
|
} else {
|
||||||
|
*stolen_base = ALIGN_UP(r->begin, POWER_OF_2(align));
|
||||||
|
}
|
||||||
|
memranges_create_hole(ranges, *stolen_base, size);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -457,8 +457,9 @@ static void test_memrange_holes(void **state)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This test verifies memranges_steal() function. Simple check is done by attempt so steal some
|
* This test verifies memranges_steal() function. Simple check is done by attempt
|
||||||
* memory from region with READONLY_TAG.
|
* to steal some memory from the top of region with CACHEABLE_TAG and some from
|
||||||
|
* the bottom of region with READONLY_TAG.
|
||||||
*
|
*
|
||||||
* Example memory ranges (res_mock1) for test_memrange_steal.
|
* Example memory ranges (res_mock1) for test_memrange_steal.
|
||||||
* Space marked with (/) is stolen during the test.
|
* Space marked with (/) is stolen during the test.
|
||||||
|
@ -466,8 +467,8 @@ static void test_memrange_holes(void **state)
|
||||||
* +--------CACHEABLE_TAG--------+ <-0xE000
|
* +--------CACHEABLE_TAG--------+ <-0xE000
|
||||||
* | |
|
* | |
|
||||||
* | |
|
* | |
|
||||||
* | |
|
* |/////////////////////////////| <-stolen_base
|
||||||
* +-----------------------------+ <-0x100000
|
* +-----------------------------+ <-0x100000 <-stolen_base + 0x4000
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
*
|
*
|
||||||
|
@ -501,13 +502,27 @@ static void test_memrange_steal(void **state)
|
||||||
|
|
||||||
status = memranges_steal(&test_memrange,
|
status = memranges_steal(&test_memrange,
|
||||||
res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
|
res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
|
||||||
stolen_range_size, 12, READONLY_TAG, &stolen);
|
stolen_range_size, 12, CACHEABLE_TAG, &stolen, true);
|
||||||
|
assert_true(status);
|
||||||
|
assert_in_range(stolen, res_mock[CACHEABLE_TAG].base,
|
||||||
|
res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size);
|
||||||
|
status = memranges_steal(&test_memrange,
|
||||||
|
res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
|
||||||
|
stolen_range_size, 12, READONLY_TAG, &stolen, false);
|
||||||
assert_true(status);
|
assert_true(status);
|
||||||
assert_in_range(stolen, res_mock[READONLY_TAG].base,
|
assert_in_range(stolen, res_mock[READONLY_TAG].base,
|
||||||
res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
|
res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
|
||||||
|
|
||||||
memranges_each_entry(ptr, &test_memrange)
|
memranges_each_entry(ptr, &test_memrange)
|
||||||
{
|
{
|
||||||
|
if (range_entry_tag(ptr) == CACHEABLE_TAG) {
|
||||||
|
assert_int_equal(range_entry_end(ptr),
|
||||||
|
ALIGN_DOWN(ALIGN_UP(res_mock[CACHEABLE_TAG].base
|
||||||
|
+ res_mock[CACHEABLE_TAG].size,
|
||||||
|
MEMRANGE_ALIGN)
|
||||||
|
- stolen_range_size,
|
||||||
|
MEMRANGE_ALIGN));
|
||||||
|
}
|
||||||
if (range_entry_tag(ptr) == READONLY_TAG) {
|
if (range_entry_tag(ptr) == READONLY_TAG) {
|
||||||
assert_int_equal(range_entry_base(ptr),
|
assert_int_equal(range_entry_base(ptr),
|
||||||
ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
|
ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
|
||||||
|
@ -518,20 +533,23 @@ static void test_memrange_steal(void **state)
|
||||||
assert_int_equal(count, 3);
|
assert_int_equal(count, 3);
|
||||||
count = 0;
|
count = 0;
|
||||||
|
|
||||||
/* Check if inserting range in previously stolen area will merge it. */
|
/* Check if inserting ranges in previously stolen areas will merge them. */
|
||||||
|
memranges_insert(&test_memrange,
|
||||||
|
res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size
|
||||||
|
- stolen_range_size - 0x12,
|
||||||
|
stolen_range_size, CACHEABLE_TAG);
|
||||||
memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
|
memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
|
||||||
READONLY_TAG);
|
READONLY_TAG);
|
||||||
memranges_each_entry(ptr, &test_memrange)
|
memranges_each_entry(ptr, &test_memrange)
|
||||||
{
|
{
|
||||||
if (range_entry_tag(ptr) == READONLY_TAG) {
|
const unsigned long tag = range_entry_tag(ptr);
|
||||||
assert_int_equal(
|
assert_true(tag == CACHEABLE_TAG || tag == READONLY_TAG || tag == RESERVED_TAG);
|
||||||
range_entry_base(ptr),
|
assert_int_equal(
|
||||||
ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN));
|
range_entry_base(ptr),
|
||||||
assert_int_equal(
|
ALIGN_DOWN(res_mock[tag].base, MEMRANGE_ALIGN));
|
||||||
range_entry_end(ptr),
|
assert_int_equal(
|
||||||
ALIGN_UP(res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size,
|
range_entry_end(ptr),
|
||||||
MEMRANGE_ALIGN));
|
ALIGN_UP(res_mock[tag].base + res_mock[tag].size, MEMRANGE_ALIGN));
|
||||||
}
|
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
assert_int_equal(count, 3);
|
assert_int_equal(count, 3);
|
||||||
|
|
Loading…
Reference in New Issue