coreboot-kgpe-d16/src/lib/bootmem.c
Ronald G. Minnich 83bd46e5e5 selfboot: remove bounce buffers
Bounce buffers used to be used in those cases where the payload
might overlap coreboot.

Bounce buffers are a problem for rampayloads as they need malloc.

They are also an artifact of our x86 past before we had relocatable
ramstage; only x86, out of the 5 architectures we support, needs them;
currently they only seem to matter on the following chipsets:
src/northbridge/amd/amdfam10/Kconfig
src/northbridge/amd/lx/Kconfig
src/northbridge/via/vx900/Kconfig
src/soc/intel/fsp_baytrail/Kconfig
src/soc/intel/fsp_broadwell_de/Kconfig

The first three are obsolete or at least could be changed
to avoid the need to have bounce buffers.
The last two should change to no longer need them.
In any event they can be fixed or pegged to a release which supports
them.

For these five chipsets we change CONFIG_RAMBASE from 0x100000 (the
value needed in 1999 for the 32-bit Linux kernel, the original ramstage)
to 0xe00000 (14 Mib) which will put the non-relocatable x86
ramstage out of the way of any reasonable payload until we can
get rid of it for good.

14 MiB was chosen after some discussion, but it does fit well:
o Fits in the 16 MiB cacheable range coreboot sets up by default
o Most small payloads are well under 14 MiB (even kernels!)
o Most large payloads get loaded at 16 MiB (especially kernels!)

With this change in place coreboot correctly still loads a bzImage payload.

Werner reports that the 0xe00000 setting works on his broadwell systems.

Change-Id: I602feb32f35e8af1d0dc4ea9f25464872c9b824c
Signed-off-by: Ronald G. Minnich <rminnich@gmail.com>
Reviewed-on: https://review.coreboot.org/28647
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
2018-10-11 17:42:41 +00:00

281 lines
6.5 KiB
C

/*
* This file is part of the coreboot project.
*
* Copyright (C) 2003-2004 Eric Biederman
* Copyright (C) 2005-2010 coresystems GmbH
* Copyright (C) 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <console/console.h>
#include <bootmem.h>
#include <cbmem.h>
#include <device/resource.h>
#include <stdlib.h>
#include <symbols.h>
#include <assert.h>
static int initialized;
static int table_written;
static struct memranges bootmem;
static struct memranges bootmem_os;
static int bootmem_is_initialized(void)
{
return initialized;
}
static int bootmem_memory_table_written(void)
{
return table_written;
}
/* Platform hook to add bootmem areas the platform / board controls. */
void __attribute__((weak)) bootmem_platform_add_ranges(void)
{
}
/* Convert bootmem tag to LB_MEM tag */
static uint32_t bootmem_to_lb_tag(const enum bootmem_type tag)
{
switch (tag) {
case BM_MEM_RAM:
return LB_MEM_RAM;
case BM_MEM_RESERVED:
return LB_MEM_RESERVED;
case BM_MEM_ACPI:
return LB_MEM_ACPI;
case BM_MEM_NVS:
return LB_MEM_NVS;
case BM_MEM_UNUSABLE:
return LB_MEM_UNUSABLE;
case BM_MEM_VENDOR_RSVD:
return LB_MEM_VENDOR_RSVD;
case BM_MEM_TABLE:
return LB_MEM_TABLE;
default:
printk(BIOS_ERR, "ERROR: Unsupported tag %u\n", tag);
return LB_MEM_RESERVED;
}
}
static void bootmem_init(void)
{
const unsigned long cacheable = IORESOURCE_CACHEABLE;
const unsigned long reserved = IORESOURCE_RESERVE;
struct memranges *bm = &bootmem;
initialized = 1;
/*
* Fill the memory map out. The order of operations is important in
* that each overlapping range will take over the next. Therefore,
* add cacheable resources as RAM then add the reserved resources.
*/
memranges_init(bm, cacheable, cacheable, BM_MEM_RAM);
memranges_add_resources(bm, reserved, reserved, BM_MEM_RESERVED);
memranges_clone(&bootmem_os, bm);
/* Add memory used by CBMEM. */
cbmem_add_bootmem();
bootmem_add_range((uintptr_t)_stack, _stack_size, BM_MEM_RAMSTAGE);
bootmem_add_range((uintptr_t)_program, _program_size, BM_MEM_RAMSTAGE);
bootmem_arch_add_ranges();
bootmem_platform_add_ranges();
}
void bootmem_add_range(uint64_t start, uint64_t size,
const enum bootmem_type tag)
{
assert(tag > BM_MEM_FIRST && tag < BM_MEM_LAST);
assert(bootmem_is_initialized());
memranges_insert(&bootmem, start, size, tag);
if (tag <= BM_MEM_OS_CUTOFF) {
/* Can't change OS tables anymore after they are written out. */
assert(!bootmem_memory_table_written());
memranges_insert(&bootmem_os, start, size, tag);
};
}
void bootmem_write_memory_table(struct lb_memory *mem)
{
const struct range_entry *r;
struct lb_memory_range *lb_r;
lb_r = &mem->map[0];
bootmem_init();
bootmem_dump_ranges();
memranges_each_entry(r, &bootmem_os) {
lb_r->start = pack_lb64(range_entry_base(r));
lb_r->size = pack_lb64(range_entry_size(r));
lb_r->type = bootmem_to_lb_tag(range_entry_tag(r));
lb_r++;
mem->size += sizeof(struct lb_memory_range);
}
table_written = 1;
}
struct range_strings {
enum bootmem_type tag;
const char *str;
};
static const struct range_strings type_strings[] = {
{ BM_MEM_RAM, "RAM" },
{ BM_MEM_RESERVED, "RESERVED" },
{ BM_MEM_ACPI, "ACPI" },
{ BM_MEM_NVS, "NVS" },
{ BM_MEM_UNUSABLE, "UNUSABLE" },
{ BM_MEM_VENDOR_RSVD, "VENDOR RESERVED" },
{ BM_MEM_TABLE, "CONFIGURATION TABLES" },
{ BM_MEM_RAMSTAGE, "RAMSTAGE" },
{ BM_MEM_PAYLOAD, "PAYLOAD" },
};
static const char *bootmem_range_string(const enum bootmem_type tag)
{
int i;
for (i = 0; i < ARRAY_SIZE(type_strings); i++) {
if (type_strings[i].tag == tag)
return type_strings[i].str;
}
return "UNKNOWN!";
}
void bootmem_dump_ranges(void)
{
int i;
const struct range_entry *r;
i = 0;
memranges_each_entry(r, &bootmem) {
printk(BIOS_DEBUG, "%2d. %016llx-%016llx: %s\n",
i, range_entry_base(r), range_entry_end(r) - 1,
bootmem_range_string(range_entry_tag(r)));
i++;
}
}
bool bootmem_walk_os_mem(range_action_t action, void *arg)
{
const struct range_entry *r;
assert(bootmem_is_initialized());
memranges_each_entry(r, &bootmem_os) {
if (!action(r, arg))
return true;
}
return false;
}
bool bootmem_walk(range_action_t action, void *arg)
{
const struct range_entry *r;
assert(bootmem_is_initialized());
memranges_each_entry(r, &bootmem) {
if (!action(r, arg))
return true;
}
return false;
}
static int bootmem_region_targets_ram(uint64_t start, uint64_t end,
struct memranges *bm)
{
const struct range_entry *r;
memranges_each_entry(r, bm) {
/* All further bootmem entries are beyond this range. */
if (end <= range_entry_base(r))
break;
if (start >= range_entry_base(r) && end <= range_entry_end(r)) {
if (range_entry_tag(r) == BM_MEM_RAM)
return 1;
}
}
return 0;
}
/* Common testcase for loading any segments to bootmem.
* Returns 1 if the requested memory range is all tagged as type BM_MEM_RAM.
* Otherwise returns 0.
*/
int bootmem_region_targets_usable_ram(uint64_t start, uint64_t size)
{
return bootmem_region_targets_ram(start, start + size, &bootmem);
}
void *bootmem_allocate_buffer(size_t size)
{
const struct range_entry *r;
const struct range_entry *region;
/* All allocated buffers fall below the 32-bit boundary. */
const resource_t max_addr = 1ULL << 32;
resource_t begin;
resource_t end;
if (!bootmem_is_initialized()) {
printk(BIOS_ERR, "%s: lib unitialized!\n", __func__);
return NULL;
}
/* 4KiB alignment. */
size = ALIGN(size, 4096);
region = NULL;
memranges_each_entry(r, &bootmem) {
if (range_entry_size(r) < size)
continue;
if (range_entry_tag(r) != BM_MEM_RAM)
continue;
if (range_entry_base(r) >= max_addr)
continue;
end = range_entry_end(r);
if (end > max_addr)
end = max_addr;
if ((end - range_entry_base(r)) < size)
continue;
region = r;
}
if (region == NULL)
return NULL;
/* region now points to the highest usable region for the given size. */
end = range_entry_end(region);
if (end > max_addr)
end = max_addr;
begin = end - size;
/* Mark buffer as unusuable for future buffer use. */
bootmem_add_range(begin, size, BM_MEM_PAYLOAD);
return (void *)(uintptr_t)begin;
}