selfboot: remove bounce buffers
Bounce buffers used to be used in those cases where the payload might overlap coreboot. Bounce buffers are a problem for rampayloads as they need malloc. They are also an artifact of our x86 past before we had relocatable ramstage; only x86, out of the 5 architectures we support, needs them; currently they only seem to matter on the following chipsets: src/northbridge/amd/amdfam10/Kconfig src/northbridge/amd/lx/Kconfig src/northbridge/via/vx900/Kconfig src/soc/intel/fsp_baytrail/Kconfig src/soc/intel/fsp_broadwell_de/Kconfig The first three are obsolete or at least could be changed to avoid the need to have bounce buffers. The last two should change to no longer need them. In any event they can be fixed or pegged to a release which supports them. For these five chipsets we change CONFIG_RAMBASE from 0x100000 (the value needed in 1999 for the 32-bit Linux kernel, the original ramstage) to 0xe00000 (14 Mib) which will put the non-relocatable x86 ramstage out of the way of any reasonable payload until we can get rid of it for good. 14 MiB was chosen after some discussion, but it does fit well: o Fits in the 16 MiB cacheable range coreboot sets up by default o Most small payloads are well under 14 MiB (even kernels!) o Most large payloads get loaded at 16 MiB (especially kernels!) With this change in place coreboot correctly still loads a bzImage payload. Werner reports that the 0xe00000 setting works on his broadwell systems. Change-Id: I602feb32f35e8af1d0dc4ea9f25464872c9b824c Signed-off-by: Ronald G. Minnich <rminnich@gmail.com> Reviewed-on: https://review.coreboot.org/28647 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
parent
ce1064edd6
commit
83bd46e5e5
|
@ -25,8 +25,3 @@ void arch_prog_run(struct prog *prog)
|
|||
doit = prog_entry(prog);
|
||||
doit(prog_entry_arg(prog));
|
||||
}
|
||||
|
||||
int arch_supports_bounce_buffer(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -55,11 +55,6 @@ void arch_prog_run(struct prog *prog)
|
|||
doit(prog_entry_arg(prog));
|
||||
}
|
||||
|
||||
int arch_supports_bounce_buffer(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Generic stage entry point. Can be overridden by board/SoC if needed. */
|
||||
__weak void stage_entry(void)
|
||||
{
|
||||
|
|
|
@ -23,8 +23,3 @@ void arch_prog_run(struct prog *prog)
|
|||
|
||||
doit(cb_tables);
|
||||
}
|
||||
|
||||
int arch_supports_bounce_buffer(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -21,8 +21,3 @@ void arch_prog_run(struct prog *prog)
|
|||
|
||||
doit(prog_entry_arg(prog));
|
||||
}
|
||||
|
||||
int arch_supports_bounce_buffer(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -48,8 +48,3 @@ void arch_prog_run(struct prog *prog)
|
|||
|
||||
doit(prog_entry_arg(prog));
|
||||
}
|
||||
|
||||
int arch_supports_bounce_buffer(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -80,13 +80,20 @@ config SIPI_VECTOR_IN_ROM
|
|||
default n
|
||||
depends on ARCH_X86
|
||||
|
||||
# Set the rambase for systems that still need it, only 5 chipsets as of
|
||||
# Sep 2018. This value was 0x100000, chosen to match the entry point
|
||||
# of Linux 2.2 in 1999. The new value, 14 MiB, makes a lot more sense
|
||||
# for as long as we need it; with luck, that won't be much longer.
|
||||
# In the long term, both RAMBASE and RAMTOP should be removed.
|
||||
# This value leaves more than 1 MiB which is required for fam10
|
||||
# and broadwell_de.
|
||||
config RAMBASE
|
||||
hex
|
||||
default 0x100000
|
||||
default 0xe00000
|
||||
|
||||
config RAMTOP
|
||||
hex
|
||||
default 0x200000
|
||||
default 0x1000000
|
||||
depends on ARCH_X86
|
||||
|
||||
# Traditionally BIOS region on SPI flash boot media was memory mapped right below
|
||||
|
|
|
@ -19,193 +19,6 @@
|
|||
#include <string.h>
|
||||
#include <symbols.h>
|
||||
|
||||
/* When the ramstage is relocatable the elf loading ensures an elf image cannot
|
||||
* be loaded over the ramstage code. */
|
||||
static void jmp_payload_no_bounce_buffer(void *entry)
|
||||
{
|
||||
/* Jump to kernel */
|
||||
__asm__ __volatile__(
|
||||
" cld\n\t"
|
||||
/* Now jump to the loaded image */
|
||||
" call *%0\n\t"
|
||||
|
||||
/* The loaded image returned? */
|
||||
" cli\n\t"
|
||||
" cld\n\t"
|
||||
|
||||
::
|
||||
"r" (entry)
|
||||
);
|
||||
}
|
||||
|
||||
static void jmp_payload(void *entry, unsigned long buffer, unsigned long size)
|
||||
{
|
||||
unsigned long lb_start, lb_size;
|
||||
|
||||
lb_start = (unsigned long)&_program;
|
||||
lb_size = _program_size;
|
||||
|
||||
printk(BIOS_SPEW, "entry = 0x%08lx\n", (unsigned long)entry);
|
||||
printk(BIOS_SPEW, "lb_start = 0x%08lx\n", lb_start);
|
||||
printk(BIOS_SPEW, "lb_size = 0x%08lx\n", lb_size);
|
||||
printk(BIOS_SPEW, "buffer = 0x%08lx\n", buffer);
|
||||
|
||||
/* Jump to kernel */
|
||||
__asm__ __volatile__(
|
||||
" cld\n\t"
|
||||
#ifdef __x86_64__
|
||||
/* switch back to 32-bit mode */
|
||||
" push %4\n\t"
|
||||
" push %3\n\t"
|
||||
" push %2\n\t"
|
||||
" push %1\n\t"
|
||||
" push %0\n\t"
|
||||
|
||||
/* use iret to switch to 32-bit code segment */
|
||||
" xor %%rax,%%rax\n\t"
|
||||
" mov %%ss, %%ax\n\t"
|
||||
" push %%rax\n\t"
|
||||
" mov %%rsp, %%rax\n\t"
|
||||
" add $8, %%rax\n\t"
|
||||
" push %%rax\n\t"
|
||||
" pushfq\n\t"
|
||||
" push $0x10\n\t"
|
||||
" lea 3(%%rip), %%rax\n\t"
|
||||
" push %%rax\n\t"
|
||||
" iretq\n\t"
|
||||
".code32\n\t"
|
||||
/* disable paging */
|
||||
" mov %%cr0, %%eax\n\t"
|
||||
" btc $31, %%eax\n\t"
|
||||
" mov %%eax, %%cr0\n\t"
|
||||
/* disable long mode */
|
||||
" mov $0xC0000080, %%ecx\n\t"
|
||||
" rdmsr\n\t"
|
||||
" btc $8, %%eax\n\t"
|
||||
" wrmsr\n\t"
|
||||
|
||||
" pop %%eax\n\t"
|
||||
" add $4, %%esp\n\t"
|
||||
" pop %%ebx\n\t"
|
||||
" add $4, %%esp\n\t"
|
||||
" pop %%ecx\n\t"
|
||||
|
||||
" add $4, %%esp\n\t"
|
||||
" pop %%edx\n\t"
|
||||
" add $4, %%esp\n\t"
|
||||
" pop %%esi\n\t"
|
||||
" add $4, %%esp\n\t"
|
||||
#endif
|
||||
|
||||
/* Save the callee save registers... */
|
||||
" pushl %%esi\n\t"
|
||||
" pushl %%edi\n\t"
|
||||
" pushl %%ebx\n\t"
|
||||
/* Save the parameters I was passed */
|
||||
#ifdef __x86_64__
|
||||
" pushl $0\n\t" /* 20 adjust */
|
||||
" pushl %%eax\n\t" /* 16 lb_start */
|
||||
" pushl %%ebx\n\t" /* 12 buffer */
|
||||
" pushl %%ecx\n\t" /* 8 lb_size */
|
||||
" pushl %%edx\n\t" /* 4 entry */
|
||||
" pushl %%esi\n\t" /* 0 elf_boot_notes */
|
||||
#else
|
||||
" pushl $0\n\t" /* 20 adjust */
|
||||
" pushl %0\n\t" /* 16 lb_start */
|
||||
" pushl %1\n\t" /* 12 buffer */
|
||||
" pushl %2\n\t" /* 8 lb_size */
|
||||
" pushl %3\n\t" /* 4 entry */
|
||||
" pushl %4\n\t" /* 0 elf_boot_notes */
|
||||
|
||||
#endif
|
||||
/* Compute the adjustment */
|
||||
" xorl %%eax, %%eax\n\t"
|
||||
" subl 16(%%esp), %%eax\n\t"
|
||||
" addl 12(%%esp), %%eax\n\t"
|
||||
" addl 8(%%esp), %%eax\n\t"
|
||||
" movl %%eax, 20(%%esp)\n\t"
|
||||
/* Place a copy of coreboot in its new location */
|
||||
/* Move ``longs'' the coreboot size is 4 byte aligned */
|
||||
" movl 12(%%esp), %%edi\n\t"
|
||||
" addl 8(%%esp), %%edi\n\t"
|
||||
" movl 16(%%esp), %%esi\n\t"
|
||||
" movl 8(%%esp), %%ecx\n\n"
|
||||
" shrl $2, %%ecx\n\t"
|
||||
" rep movsl\n\t"
|
||||
|
||||
/* Adjust the stack pointer to point into the new coreboot
|
||||
* image
|
||||
*/
|
||||
" addl 20(%%esp), %%esp\n\t"
|
||||
/* Adjust the instruction pointer to point into the new coreboot
|
||||
* image
|
||||
*/
|
||||
" movl $1f, %%eax\n\t"
|
||||
" addl 20(%%esp), %%eax\n\t"
|
||||
" jmp *%%eax\n\t"
|
||||
"1:\n\t"
|
||||
|
||||
/* Copy the coreboot bounce buffer over coreboot */
|
||||
/* Move ``longs'' the coreboot size is 4 byte aligned */
|
||||
" movl 16(%%esp), %%edi\n\t"
|
||||
" movl 12(%%esp), %%esi\n\t"
|
||||
" movl 8(%%esp), %%ecx\n\t"
|
||||
" shrl $2, %%ecx\n\t"
|
||||
" rep movsl\n\t"
|
||||
|
||||
/* Now jump to the loaded image */
|
||||
" movl %5, %%eax\n\t"
|
||||
" movl 0(%%esp), %%ebx\n\t"
|
||||
" call *4(%%esp)\n\t"
|
||||
|
||||
/* The loaded image returned? */
|
||||
" cli\n\t"
|
||||
" cld\n\t"
|
||||
|
||||
/* Copy the saved copy of coreboot where coreboot runs */
|
||||
/* Move ``longs'' the coreboot size is 4 byte aligned */
|
||||
" movl 16(%%esp), %%edi\n\t"
|
||||
" movl 12(%%esp), %%esi\n\t"
|
||||
" addl 8(%%esp), %%esi\n\t"
|
||||
" movl 8(%%esp), %%ecx\n\t"
|
||||
" shrl $2, %%ecx\n\t"
|
||||
" rep movsl\n\t"
|
||||
|
||||
/* Adjust the stack pointer to point into the old coreboot
|
||||
* image
|
||||
*/
|
||||
" subl 20(%%esp), %%esp\n\t"
|
||||
|
||||
/* Adjust the instruction pointer to point into the old coreboot
|
||||
* image
|
||||
*/
|
||||
" movl $1f, %%eax\n\t"
|
||||
" subl 20(%%esp), %%eax\n\t"
|
||||
" jmp *%%eax\n\t"
|
||||
"1:\n\t"
|
||||
|
||||
/* Drop the parameters I was passed */
|
||||
" addl $24, %%esp\n\t"
|
||||
|
||||
/* Restore the callee save registers */
|
||||
" popl %%ebx\n\t"
|
||||
" popl %%edi\n\t"
|
||||
" popl %%esi\n\t"
|
||||
#ifdef __x86_64__
|
||||
".code64\n\t"
|
||||
#endif
|
||||
::
|
||||
"ri" (lb_start), "ri" (buffer), "ri" (lb_size),
|
||||
"ri" (entry),
|
||||
"ri"(0), "ri" (0)
|
||||
);
|
||||
}
|
||||
|
||||
int arch_supports_bounce_buffer(void)
|
||||
{
|
||||
return !IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE);
|
||||
}
|
||||
|
||||
int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
|
||||
{
|
||||
if (start < 1 * MiB && (start + size) <= 1 * MiB) {
|
||||
|
@ -217,22 +30,8 @@ int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void try_payload(struct prog *prog)
|
||||
{
|
||||
if (prog_type(prog) == PROG_PAYLOAD) {
|
||||
if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE))
|
||||
jmp_payload_no_bounce_buffer(prog_entry(prog));
|
||||
else
|
||||
jmp_payload(prog_entry(prog),
|
||||
(uintptr_t)prog_start(prog),
|
||||
prog_size(prog));
|
||||
}
|
||||
}
|
||||
|
||||
void arch_prog_run(struct prog *prog)
|
||||
{
|
||||
if (ENV_RAMSTAGE)
|
||||
try_payload(prog);
|
||||
__asm__ volatile (
|
||||
#ifdef __x86_64__
|
||||
"jmp *%%rdi\n"
|
||||
|
|
|
@ -97,11 +97,6 @@ bool bootmem_walk(range_action_t action, void *arg);
|
|||
/* Return 1 if region targets usable RAM, 0 otherwise. */
|
||||
int bootmem_region_targets_usable_ram(uint64_t start, uint64_t size);
|
||||
|
||||
/* Return 1 if region targets usable RAM, and we allow memory ranges
|
||||
* with type >BM_MEM_OS_CUTOFF to be overwritten at the time we jump
|
||||
* to payload. 0 otherwise. */
|
||||
int bootmem_region_usable_with_bounce(uint64_t start, uint64_t size);
|
||||
|
||||
/* Allocate a temporary buffer from the unused RAM areas. */
|
||||
void *bootmem_allocate_buffer(size_t size);
|
||||
|
||||
|
|
|
@ -49,9 +49,6 @@ void prog_segment_loaded(uintptr_t start, size_t size, int flags);
|
|||
void platform_segment_loaded(uintptr_t start, size_t size, int flags);
|
||||
void arch_segment_loaded(uintptr_t start, size_t size, int flags);
|
||||
|
||||
/* Return true if arch supports bounce buffer. */
|
||||
int arch_supports_bounce_buffer(void);
|
||||
|
||||
/* Representation of a program. */
|
||||
struct prog {
|
||||
/* The region_device is the source of program content to load. After
|
||||
|
|
|
@ -228,15 +228,6 @@ int bootmem_region_targets_usable_ram(uint64_t start, uint64_t size)
|
|||
return bootmem_region_targets_ram(start, start + size, &bootmem);
|
||||
}
|
||||
|
||||
/* Special testcase to use when loading payload segments when bounce-buffer is
|
||||
* supported. Memory ranges tagged with >BM_MEM_OS_CUTOFF may be overwritten at
|
||||
* the time we jump to payload.
|
||||
*/
|
||||
int bootmem_region_usable_with_bounce(uint64_t start, uint64_t size)
|
||||
{
|
||||
return bootmem_region_targets_ram(start, start + size, &bootmem_os);
|
||||
}
|
||||
|
||||
void *bootmem_allocate_buffer(size_t size)
|
||||
{
|
||||
const struct range_entry *r;
|
||||
|
|
|
@ -30,200 +30,6 @@
|
|||
#include <timestamp.h>
|
||||
#include <cbmem.h>
|
||||
|
||||
static const unsigned long lb_start = (unsigned long)&_program;
|
||||
static const unsigned long lb_end = (unsigned long)&_eprogram;
|
||||
|
||||
struct segment {
|
||||
struct segment *next;
|
||||
struct segment *prev;
|
||||
unsigned long s_dstaddr;
|
||||
unsigned long s_srcaddr;
|
||||
unsigned long s_memsz;
|
||||
unsigned long s_filesz;
|
||||
int compression;
|
||||
};
|
||||
|
||||
static void segment_insert_before(struct segment *seg, struct segment *new)
|
||||
{
|
||||
new->next = seg;
|
||||
new->prev = seg->prev;
|
||||
seg->prev->next = new;
|
||||
seg->prev = new;
|
||||
}
|
||||
|
||||
static void segment_insert_after(struct segment *seg, struct segment *new)
|
||||
{
|
||||
new->next = seg->next;
|
||||
new->prev = seg;
|
||||
seg->next->prev = new;
|
||||
seg->next = new;
|
||||
}
|
||||
|
||||
/* The problem:
|
||||
* Static executables all want to share the same addresses
|
||||
* in memory because only a few addresses are reliably present on
|
||||
* a machine, and implementing general relocation is hard.
|
||||
*
|
||||
* The solution:
|
||||
* - Allocate a buffer the size of the coreboot image plus additional
|
||||
* required space.
|
||||
* - Anything that would overwrite coreboot copy into the lower part of
|
||||
* the buffer.
|
||||
* - After loading an ELF image copy coreboot to the top of the buffer.
|
||||
* - Then jump to the loaded image.
|
||||
*
|
||||
* Benefits:
|
||||
* - Nearly arbitrary standalone executables can be loaded.
|
||||
* - coreboot is preserved, so it can be returned to.
|
||||
* - The implementation is still relatively simple,
|
||||
* and much simpler than the general case implemented in kexec.
|
||||
*/
|
||||
|
||||
static unsigned long bounce_size, bounce_buffer;
|
||||
|
||||
static void get_bounce_buffer(unsigned long req_size)
|
||||
{
|
||||
unsigned long lb_size;
|
||||
void *buffer;
|
||||
|
||||
/* When the ramstage is relocatable there is no need for a bounce
|
||||
* buffer. All payloads should not overlap the ramstage.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE) ||
|
||||
!arch_supports_bounce_buffer()) {
|
||||
bounce_buffer = ~0UL;
|
||||
bounce_size = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
lb_size = lb_end - lb_start;
|
||||
/* Plus coreboot size so I have somewhere
|
||||
* to place a copy to return to.
|
||||
*/
|
||||
lb_size = req_size + lb_size;
|
||||
|
||||
buffer = bootmem_allocate_buffer(lb_size);
|
||||
|
||||
printk(BIOS_SPEW, "Bounce Buffer at %p, %lu bytes\n", buffer, lb_size);
|
||||
|
||||
bounce_buffer = (uintptr_t)buffer;
|
||||
bounce_size = req_size;
|
||||
}
|
||||
|
||||
static int overlaps_coreboot(struct segment *seg)
|
||||
{
|
||||
unsigned long start, end;
|
||||
start = seg->s_dstaddr;
|
||||
end = start + seg->s_memsz;
|
||||
return !((end <= lb_start) || (start >= lb_end));
|
||||
}
|
||||
|
||||
static int relocate_segment(unsigned long buffer, struct segment *seg)
|
||||
{
|
||||
/* Modify all segments that want to load onto coreboot
|
||||
* to load onto the bounce buffer instead.
|
||||
*/
|
||||
/* ret: 1 : A new segment is inserted before the seg.
|
||||
* 0 : A new segment is inserted after the seg, or no new one.
|
||||
*/
|
||||
unsigned long start, middle, end, ret = 0;
|
||||
|
||||
printk(BIOS_SPEW, "lb: [0x%016lx, 0x%016lx)\n",
|
||||
lb_start, lb_end);
|
||||
|
||||
/* I don't conflict with coreboot so get out of here */
|
||||
if (!overlaps_coreboot(seg))
|
||||
return 0;
|
||||
|
||||
if (!arch_supports_bounce_buffer())
|
||||
die("bounce buffer not supported");
|
||||
|
||||
start = seg->s_dstaddr;
|
||||
middle = start + seg->s_filesz;
|
||||
end = start + seg->s_memsz;
|
||||
|
||||
printk(BIOS_SPEW, "segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
|
||||
start, middle, end);
|
||||
|
||||
if (seg->compression == CBFS_COMPRESS_NONE) {
|
||||
/* Slice off a piece at the beginning
|
||||
* that doesn't conflict with coreboot.
|
||||
*/
|
||||
if (start < lb_start) {
|
||||
struct segment *new;
|
||||
unsigned long len = lb_start - start;
|
||||
new = malloc(sizeof(*new));
|
||||
*new = *seg;
|
||||
new->s_memsz = len;
|
||||
seg->s_memsz -= len;
|
||||
seg->s_dstaddr += len;
|
||||
seg->s_srcaddr += len;
|
||||
if (seg->s_filesz > len) {
|
||||
new->s_filesz = len;
|
||||
seg->s_filesz -= len;
|
||||
} else {
|
||||
seg->s_filesz = 0;
|
||||
}
|
||||
|
||||
/* Order by stream offset */
|
||||
segment_insert_before(seg, new);
|
||||
|
||||
/* compute the new value of start */
|
||||
start = seg->s_dstaddr;
|
||||
|
||||
printk(BIOS_SPEW,
|
||||
" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
|
||||
new->s_dstaddr,
|
||||
new->s_dstaddr + new->s_filesz,
|
||||
new->s_dstaddr + new->s_memsz);
|
||||
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
/* Slice off a piece at the end
|
||||
* that doesn't conflict with coreboot
|
||||
*/
|
||||
if (end > lb_end) {
|
||||
unsigned long len = lb_end - start;
|
||||
struct segment *new;
|
||||
new = malloc(sizeof(*new));
|
||||
*new = *seg;
|
||||
seg->s_memsz = len;
|
||||
new->s_memsz -= len;
|
||||
new->s_dstaddr += len;
|
||||
new->s_srcaddr += len;
|
||||
if (seg->s_filesz > len) {
|
||||
seg->s_filesz = len;
|
||||
new->s_filesz -= len;
|
||||
} else {
|
||||
new->s_filesz = 0;
|
||||
}
|
||||
/* Order by stream offset */
|
||||
segment_insert_after(seg, new);
|
||||
|
||||
printk(BIOS_SPEW,
|
||||
" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
|
||||
new->s_dstaddr,
|
||||
new->s_dstaddr + new->s_filesz,
|
||||
new->s_dstaddr + new->s_memsz);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now retarget this segment onto the bounce buffer */
|
||||
/* sort of explanation: the buffer is a 1:1 mapping to coreboot.
|
||||
* so you will make the dstaddr be this buffer, and it will get copied
|
||||
* later to where coreboot lives.
|
||||
*/
|
||||
seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
|
||||
|
||||
printk(BIOS_SPEW, " bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
|
||||
seg->s_dstaddr,
|
||||
seg->s_dstaddr + seg->s_filesz,
|
||||
seg->s_dstaddr + seg->s_memsz);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Decode a serialized cbfs payload segment
|
||||
* from memory into native endianness.
|
||||
*/
|
||||
|
@ -238,200 +44,36 @@ static void cbfs_decode_payload_segment(struct cbfs_payload_segment *segment,
|
|||
segment->mem_len = read_be32(&src->mem_len);
|
||||
}
|
||||
|
||||
static int build_self_segment_list(
|
||||
struct segment *head,
|
||||
struct cbfs_payload *cbfs_payload, uintptr_t *entry)
|
||||
static int segment_targets_usable_ram(void *dest, unsigned long memsz)
|
||||
{
|
||||
struct segment *new;
|
||||
struct cbfs_payload_segment *current_segment, *first_segment, segment;
|
||||
uintptr_t d = (uintptr_t) dest;
|
||||
if (bootmem_region_targets_usable_ram(d, memsz))
|
||||
return 1;
|
||||
|
||||
memset(head, 0, sizeof(*head));
|
||||
head->next = head->prev = head;
|
||||
if (payload_arch_usable_ram_quirk(d, memsz))
|
||||
return 1;
|
||||
|
||||
first_segment = &cbfs_payload->segments;
|
||||
|
||||
for (current_segment = first_segment;; ++current_segment) {
|
||||
printk(BIOS_DEBUG,
|
||||
"Loading segment from ROM address 0x%p\n",
|
||||
current_segment);
|
||||
|
||||
cbfs_decode_payload_segment(&segment, current_segment);
|
||||
|
||||
switch (segment.type) {
|
||||
case PAYLOAD_SEGMENT_PARAMS:
|
||||
printk(BIOS_DEBUG, " parameter section (skipped)\n");
|
||||
continue;
|
||||
|
||||
case PAYLOAD_SEGMENT_CODE:
|
||||
case PAYLOAD_SEGMENT_DATA:
|
||||
printk(BIOS_DEBUG, " %s (compression=%x)\n",
|
||||
segment.type == PAYLOAD_SEGMENT_CODE
|
||||
? "code" : "data", segment.compression);
|
||||
|
||||
new = malloc(sizeof(*new));
|
||||
new->s_dstaddr = segment.load_addr;
|
||||
new->s_memsz = segment.mem_len;
|
||||
new->compression = segment.compression;
|
||||
new->s_srcaddr = (uintptr_t)
|
||||
((unsigned char *)first_segment)
|
||||
+ segment.offset;
|
||||
new->s_filesz = segment.len;
|
||||
|
||||
printk(BIOS_DEBUG,
|
||||
" New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
|
||||
new->s_dstaddr, new->s_memsz, new->s_srcaddr,
|
||||
new->s_filesz);
|
||||
|
||||
/* Clean up the values */
|
||||
if (new->s_filesz > new->s_memsz) {
|
||||
new->s_filesz = new->s_memsz;
|
||||
printk(BIOS_DEBUG,
|
||||
" cleaned up filesize 0x%lx\n",
|
||||
new->s_filesz);
|
||||
}
|
||||
break;
|
||||
|
||||
case PAYLOAD_SEGMENT_BSS:
|
||||
printk(BIOS_DEBUG, " BSS 0x%p (%d byte)\n", (void *)
|
||||
(intptr_t)segment.load_addr, segment.mem_len);
|
||||
|
||||
new = malloc(sizeof(*new));
|
||||
new->s_filesz = 0;
|
||||
new->s_srcaddr = (uintptr_t)
|
||||
((unsigned char *)first_segment)
|
||||
+ segment.offset;
|
||||
new->s_dstaddr = segment.load_addr;
|
||||
new->s_memsz = segment.mem_len;
|
||||
new->compression = CBFS_COMPRESS_NONE;
|
||||
break;
|
||||
|
||||
case PAYLOAD_SEGMENT_ENTRY:
|
||||
printk(BIOS_DEBUG, " Entry Point 0x%p\n", (void *)
|
||||
(intptr_t)segment.load_addr);
|
||||
|
||||
*entry = segment.load_addr;
|
||||
/* Per definition, a payload always has the entry point
|
||||
* as last segment. Thus, we use the occurrence of the
|
||||
* entry point as break condition for the loop.
|
||||
* Can we actually just look at the number of section?
|
||||
*/
|
||||
return 1;
|
||||
|
||||
default:
|
||||
/* We found something that we don't know about. Throw
|
||||
* hands into the sky and run away!
|
||||
*/
|
||||
printk(BIOS_EMERG, "Bad segment type %x\n",
|
||||
segment.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* We have found another CODE, DATA or BSS segment */
|
||||
/* Insert new segment at the end of the list */
|
||||
segment_insert_before(head, new);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__weak int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
|
||||
{
|
||||
printk(BIOS_ERR, "SELF segment doesn't target RAM: 0x%p, %lu bytes\n", dest, memsz);
|
||||
bootmem_dump_ranges();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int payload_targets_usable_ram(struct segment *head)
|
||||
static int load_one_segment(uint8_t *dest,
|
||||
uint8_t *src,
|
||||
size_t len,
|
||||
size_t memsz,
|
||||
uint32_t compression,
|
||||
int flags)
|
||||
{
|
||||
struct segment *ptr;
|
||||
|
||||
for (ptr = head->next; ptr != head; ptr = ptr->next) {
|
||||
if (bootmem_region_targets_usable_ram(ptr->s_dstaddr,
|
||||
ptr->s_memsz))
|
||||
continue;
|
||||
|
||||
if (payload_arch_usable_ram_quirk(ptr->s_dstaddr, ptr->s_memsz))
|
||||
continue;
|
||||
|
||||
if (arch_supports_bounce_buffer() &&
|
||||
bootmem_region_usable_with_bounce(ptr->s_dstaddr,
|
||||
ptr->s_memsz)) {
|
||||
printk(BIOS_DEBUG,
|
||||
"Payload is loaded over non-relocatable "
|
||||
"ramstage. Will use bounce-buffer.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Payload segment not targeting RAM. */
|
||||
printk(BIOS_ERR, "SELF Payload doesn't target RAM:\n");
|
||||
printk(BIOS_ERR, "Failed Segment: 0x%lx, %lu bytes\n",
|
||||
ptr->s_dstaddr, ptr->s_memsz);
|
||||
bootmem_dump_ranges();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int load_self_segments(struct segment *head, struct prog *payload,
|
||||
bool check_regions)
|
||||
{
|
||||
struct segment *ptr;
|
||||
unsigned long bounce_high = lb_end;
|
||||
|
||||
if (check_regions) {
|
||||
if (!payload_targets_usable_ram(head))
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (ptr = head->next; ptr != head; ptr = ptr->next) {
|
||||
/*
|
||||
* Add segments to bootmem memory map before a bounce buffer is
|
||||
* allocated so that there aren't conflicts with the actual
|
||||
* payload.
|
||||
*/
|
||||
if (check_regions) {
|
||||
bootmem_add_range(ptr->s_dstaddr, ptr->s_memsz,
|
||||
BM_MEM_PAYLOAD);
|
||||
}
|
||||
|
||||
if (!overlaps_coreboot(ptr))
|
||||
continue;
|
||||
if (ptr->s_dstaddr + ptr->s_memsz > bounce_high)
|
||||
bounce_high = ptr->s_dstaddr + ptr->s_memsz;
|
||||
}
|
||||
get_bounce_buffer(bounce_high - lb_start);
|
||||
if (!bounce_buffer) {
|
||||
printk(BIOS_ERR, "Could not find a bounce buffer...\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (ptr = head->next; ptr != head; ptr = ptr->next) {
|
||||
unsigned char *dest, *src, *middle, *end;
|
||||
size_t len, memsz;
|
||||
printk(BIOS_DEBUG,
|
||||
"Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
|
||||
ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
|
||||
|
||||
/* Modify the segment to load onto the bounce_buffer if
|
||||
* necessary.
|
||||
*/
|
||||
if (relocate_segment(bounce_buffer, ptr)) {
|
||||
ptr = (ptr->prev)->prev;
|
||||
continue;
|
||||
}
|
||||
|
||||
printk(BIOS_DEBUG,
|
||||
"Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
|
||||
ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
|
||||
unsigned char *middle, *end;
|
||||
printk(BIOS_DEBUG, "Loading Segment: addr: 0x%p memsz: 0x%016zx filesz: 0x%016zx\n",
|
||||
dest, memsz, len);
|
||||
|
||||
/* Compute the boundaries of the segment */
|
||||
dest = (unsigned char *)(ptr->s_dstaddr);
|
||||
src = (unsigned char *)(ptr->s_srcaddr);
|
||||
len = ptr->s_filesz;
|
||||
memsz = ptr->s_memsz;
|
||||
end = dest + memsz;
|
||||
|
||||
/* Copy data from the initial buffer */
|
||||
switch (ptr->compression) {
|
||||
switch (compression) {
|
||||
case CBFS_COMPRESS_LZMA: {
|
||||
printk(BIOS_DEBUG, "using LZMA\n");
|
||||
timestamp_add_now(TS_START_ULZMA);
|
||||
|
@ -456,9 +98,8 @@ static int load_self_segments(struct segment *head, struct prog *payload,
|
|||
break;
|
||||
}
|
||||
default:
|
||||
printk(BIOS_INFO, "CBFS: Unknown compression type %d\n",
|
||||
ptr->compression);
|
||||
return -1;
|
||||
printk(BIOS_INFO, "CBFS: Unknown compression type %d\n", compression);
|
||||
return 0;
|
||||
}
|
||||
/* Calculate middle after any changes to len. */
|
||||
middle = dest + len;
|
||||
|
@ -479,52 +120,111 @@ static int load_self_segments(struct segment *head, struct prog *payload,
|
|||
memset(middle, 0, end - middle);
|
||||
}
|
||||
|
||||
/* Copy the data that's outside the area that shadows ramstage
|
||||
*/
|
||||
printk(BIOS_DEBUG, "dest %p, end %p, bouncebuffer %lx\n", dest,
|
||||
end, bounce_buffer);
|
||||
if ((unsigned long)end > bounce_buffer) {
|
||||
if ((unsigned long)dest < bounce_buffer) {
|
||||
unsigned char *from = dest;
|
||||
unsigned char *to = (unsigned char *)
|
||||
(lb_start - (bounce_buffer
|
||||
- (unsigned long)dest));
|
||||
unsigned long amount = bounce_buffer
|
||||
- (unsigned long)dest;
|
||||
printk(BIOS_DEBUG,
|
||||
"move prefix around: from %p, to %p, amount: %lx\n",
|
||||
from, to, amount);
|
||||
memcpy(to, from, amount);
|
||||
}
|
||||
if ((unsigned long)end > bounce_buffer + (lb_end
|
||||
- lb_start)) {
|
||||
unsigned long from = bounce_buffer + (lb_end
|
||||
- lb_start);
|
||||
unsigned long to = lb_end;
|
||||
unsigned long amount =
|
||||
(unsigned long)end - from;
|
||||
printk(BIOS_DEBUG,
|
||||
"move suffix around: from %lx, to %lx, amount: %lx\n",
|
||||
from, to, amount);
|
||||
memcpy((char *)to, (char *)from, amount);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Each architecture can perform additional operations
|
||||
* on the loaded segment
|
||||
*/
|
||||
prog_segment_loaded((uintptr_t)dest, ptr->s_memsz,
|
||||
ptr->next == head ? SEG_FINAL : 0);
|
||||
prog_segment_loaded((uintptr_t)dest, memsz, flags);
|
||||
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Note: this function is a bit dangerous so is not exported.
|
||||
* It assumes you're smart enough not to call it with the very
|
||||
* last segment, since it uses seg + 1 */
|
||||
static int last_loadable_segment(struct cbfs_payload_segment *seg)
|
||||
{
|
||||
return read_be32(&(seg + 1)->type) == PAYLOAD_SEGMENT_ENTRY;
|
||||
}
|
||||
|
||||
static int load_payload_segments(
|
||||
struct cbfs_payload_segment *cbfssegs,
|
||||
int check_regions,
|
||||
uintptr_t *entry)
|
||||
{
|
||||
uint8_t *dest, *src;
|
||||
size_t filesz, memsz;
|
||||
uint32_t compression;
|
||||
struct cbfs_payload_segment *first_segment, *seg, segment;
|
||||
int flags = 0;
|
||||
|
||||
for (first_segment = seg = cbfssegs;; ++seg) {
|
||||
printk(BIOS_DEBUG, "Loading segment from ROM address 0x%p\n", seg);
|
||||
|
||||
cbfs_decode_payload_segment(&segment, seg);
|
||||
dest = (uint8_t *)(uintptr_t)segment.load_addr;
|
||||
memsz = segment.mem_len;
|
||||
compression = segment.compression;
|
||||
filesz = segment.len;
|
||||
|
||||
switch (segment.type) {
|
||||
case PAYLOAD_SEGMENT_CODE:
|
||||
case PAYLOAD_SEGMENT_DATA:
|
||||
printk(BIOS_DEBUG, " %s (compression=%x)\n",
|
||||
segment.type == PAYLOAD_SEGMENT_CODE
|
||||
? "code" : "data", segment.compression);
|
||||
src = ((uint8_t *)first_segment) + segment.offset;
|
||||
printk(BIOS_DEBUG,
|
||||
" New segment dstaddr 0x%p memsize 0x%zx srcaddr 0x%p filesize 0x%zx\n",
|
||||
dest, memsz, src, filesz);
|
||||
|
||||
/* Clean up the values */
|
||||
if (filesz > memsz) {
|
||||
filesz = memsz;
|
||||
printk(BIOS_DEBUG, " cleaned up filesize 0x%zx\n", filesz);
|
||||
}
|
||||
break;
|
||||
|
||||
case PAYLOAD_SEGMENT_BSS:
|
||||
printk(BIOS_DEBUG, " BSS 0x%p (%d byte)\n", (void *)
|
||||
(intptr_t)segment.load_addr, segment.mem_len);
|
||||
filesz = 0;
|
||||
src = ((uint8_t *)first_segment) + segment.offset;
|
||||
compression = CBFS_COMPRESS_NONE;
|
||||
break;
|
||||
|
||||
case PAYLOAD_SEGMENT_ENTRY:
|
||||
printk(BIOS_DEBUG, " Entry Point 0x%p\n", (void *)
|
||||
(intptr_t)segment.load_addr);
|
||||
|
||||
*entry = segment.load_addr;
|
||||
/* Per definition, a payload always has the entry point
|
||||
* as last segment. Thus, we use the occurrence of the
|
||||
* entry point as break condition for the loop.
|
||||
*/
|
||||
return 0;
|
||||
|
||||
default:
|
||||
/* We found something that we don't know about. Throw
|
||||
* hands into the sky and run away!
|
||||
*/
|
||||
printk(BIOS_EMERG, "Bad segment type %x\n", segment.type);
|
||||
return -1;
|
||||
}
|
||||
if (check_regions && !segment_targets_usable_ram(dest, memsz))
|
||||
return -1;
|
||||
/* Note that the 'seg + 1' is safe as we only call this
|
||||
* function on "not the last" * items, since entry
|
||||
* is always last. */
|
||||
if (last_loadable_segment(seg))
|
||||
flags = SEG_FINAL;
|
||||
if (!load_one_segment(dest, src, filesz, memsz, compression, flags))
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__weak int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool selfload(struct prog *payload, bool check_regions)
|
||||
{
|
||||
uintptr_t entry = 0;
|
||||
struct segment head;
|
||||
struct cbfs_payload_segment *cbfssegs;
|
||||
void *data;
|
||||
|
||||
data = rdev_mmap_full(prog_rdev(payload));
|
||||
|
@ -532,21 +232,14 @@ bool selfload(struct prog *payload, bool check_regions)
|
|||
if (data == NULL)
|
||||
return false;
|
||||
|
||||
/* Preprocess the self segments */
|
||||
if (!build_self_segment_list(&head, data, &entry))
|
||||
goto out;
|
||||
|
||||
/* Load the segments */
|
||||
if (!load_self_segments(&head, payload, check_regions))
|
||||
cbfssegs = &((struct cbfs_payload *)data)->segments;
|
||||
if (load_payload_segments(cbfssegs, check_regions, &entry))
|
||||
goto out;
|
||||
|
||||
printk(BIOS_SPEW, "Loaded segments\n");
|
||||
|
||||
rdev_munmap(prog_rdev(payload), data);
|
||||
|
||||
/* Update the payload's area with the bounce buffer information. */
|
||||
prog_set_area(payload, (void *)(uintptr_t)bounce_buffer, bounce_size);
|
||||
|
||||
/* Pass cbtables to payload if architecture desires it. */
|
||||
prog_set_entry(payload, (void *)entry, cbmem_find(CBMEM_ID_CBTABLE));
|
||||
|
||||
|
|
|
@ -52,10 +52,6 @@ config HEAP_SIZE
|
|||
hex
|
||||
default 0xc0000
|
||||
|
||||
config RAMTOP
|
||||
hex
|
||||
default 0x400000
|
||||
|
||||
config BOOTBLOCK_NORTHBRIDGE_INIT
|
||||
string
|
||||
default "northbridge/amd/amdfam10/bootblock.c"
|
||||
|
|
|
@ -32,10 +32,6 @@ config CBFS_SIZE
|
|||
hex
|
||||
default 0x200000
|
||||
|
||||
config RAMTOP
|
||||
hex
|
||||
default 0x400000
|
||||
|
||||
config HEAP_SIZE
|
||||
hex
|
||||
default 0x100000
|
||||
|
|
Loading…
Reference in New Issue