2020-04-02 23:48:34 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2009-04-01 13:03:32 +02:00
|
|
|
|
2019-12-12 00:47:42 +01:00
|
|
|
#include <commonlib/bsd/compression.h>
|
2016-04-04 06:19:02 +02:00
|
|
|
#include <commonlib/endian.h>
|
2009-04-01 12:48:39 +02:00
|
|
|
#include <console/console.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
New mechanism to define SRAM/memory map with automatic bounds checking
This patch creates a new mechanism to define the static memory layout
(primarily in SRAM) for a given board, superseding the brittle mass of
Kconfigs that we were using before. The core part is a memlayout.ld file
in the mainboard directory (although boards are expected to just include
the SoC default in most cases), which is the primary linker script for
all stages (though not rmodules for now). It uses preprocessor macros
from <memlayout.h> to form a different valid linker script for all
stages while looking like a declarative, boilerplate-free map of memory
addresses to the programmer. Linker asserts will automatically guarantee
that the defined regions cannot overlap. Stages are defined with a
maximum size that will be enforced by the linker. The file serves to
both define and document the memory layout, so that the documentation
cannot go missing or out of date.
The mechanism is implemented for all boards in the ARM, ARM64 and MIPS
architectures, and should be extended onto all systems using SRAM in the
future. The CAR/XIP environment on x86 has very different requirements
and the layout is generally not as static, so it will stay like it is
and be unaffected by this patch (save for aligning some symbol names for
consistency and sharing the new common ramstage linker script include).
BUG=None
TEST=Booted normally and in recovery mode, checked suspend/resume and
the CBMEM console on Falco, Blaze (both normal and vboot2), Pinky and
Pit. Compiled Ryu, Storm and Urara, manually compared the disassemblies
with ToT and looked for red flags.
Change-Id: Ifd2276417f2036cbe9c056f17e42f051bcd20e81
Signed-off-by: Patrick Georgi <pgeorgi@chromium.org>
Original-Commit-Id: f1e2028e7ebceeb2d71ff366150a37564595e614
Original-Change-Id: I005506add4e8fcdb74db6d5e6cb2d4cb1bd3cda5
Original-Signed-off-by: Julius Werner <jwerner@chromium.org>
Original-Reviewed-on: https://chromium-review.googlesource.com/213370
Reviewed-on: http://review.coreboot.org/9283
Tested-by: build bot (Jenkins)
Reviewed-by: Stefan Tauner <stefan.tauner@gmx.at>
Reviewed-by: Aaron Durbin <adurbin@google.com>
2014-08-21 00:29:56 +02:00
|
|
|
#include <symbols.h>
|
2009-04-14 09:40:01 +02:00
|
|
|
#include <cbfs.h>
|
2009-10-28 17:13:28 +01:00
|
|
|
#include <lib.h>
|
2014-02-25 07:21:10 +01:00
|
|
|
#include <bootmem.h>
|
2015-03-17 17:43:44 +01:00
|
|
|
#include <program_loading.h>
|
2015-09-29 22:51:35 +02:00
|
|
|
#include <timestamp.h>
|
2018-04-26 09:53:16 +02:00
|
|
|
#include <cbmem.h>
|
2009-04-01 12:48:39 +02:00
|
|
|
|
2018-10-25 00:46:51 +02:00
|
|
|
/* The type syntax for C is essentially unparsable. -- Rob Pike */
|
2019-01-28 10:22:22 +01:00
|
|
|
typedef int (*checker_t)(struct cbfs_payload_segment *cbfssegs, void *args);
|
2018-10-25 00:46:51 +02:00
|
|
|
|
2016-04-04 06:19:02 +02:00
|
|
|
/* Decode a serialized cbfs payload segment
|
|
|
|
* from memory into native endianness.
|
|
|
|
*/
|
|
|
|
static void cbfs_decode_payload_segment(struct cbfs_payload_segment *segment,
|
|
|
|
const struct cbfs_payload_segment *src)
|
|
|
|
{
|
|
|
|
segment->type = read_be32(&src->type);
|
|
|
|
segment->compression = read_be32(&src->compression);
|
|
|
|
segment->offset = read_be32(&src->offset);
|
|
|
|
segment->load_addr = read_be64(&src->load_addr);
|
|
|
|
segment->len = read_be32(&src->len);
|
|
|
|
segment->mem_len = read_be32(&src->mem_len);
|
|
|
|
}
|
2009-04-01 12:48:39 +02:00
|
|
|
|
2019-01-28 10:22:22 +01:00
|
|
|
static int segment_targets_type(void *dest, unsigned long memsz,
|
|
|
|
enum bootmem_type dest_type)
|
2009-04-01 12:48:39 +02:00
|
|
|
{
|
2018-09-16 18:59:54 +02:00
|
|
|
uintptr_t d = (uintptr_t) dest;
|
2019-01-28 10:22:22 +01:00
|
|
|
if (bootmem_region_targets_type(d, memsz, dest_type))
|
2018-09-16 18:59:54 +02:00
|
|
|
return 1;
|
2016-04-04 06:19:02 +02:00
|
|
|
|
2018-09-16 18:59:54 +02:00
|
|
|
if (payload_arch_usable_ram_quirk(d, memsz))
|
|
|
|
return 1;
|
2009-07-24 00:03:14 +02:00
|
|
|
|
2019-12-09 22:03:29 +01:00
|
|
|
printk(BIOS_ERR, "SELF segment doesn't target RAM: %p, %lu bytes\n", dest, memsz);
|
2018-09-16 18:59:54 +02:00
|
|
|
bootmem_dump_ranges();
|
2018-06-07 05:31:43 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-16 18:59:54 +02:00
|
|
|
static int load_one_segment(uint8_t *dest,
|
|
|
|
uint8_t *src,
|
|
|
|
size_t len,
|
|
|
|
size_t memsz,
|
|
|
|
uint32_t compression,
|
|
|
|
int flags)
|
2016-09-05 00:37:04 +02:00
|
|
|
{
|
2018-09-16 18:59:54 +02:00
|
|
|
unsigned char *middle, *end;
|
2019-12-09 22:03:29 +01:00
|
|
|
printk(BIOS_DEBUG, "Loading Segment: addr: %p memsz: 0x%016zx filesz: 0x%016zx\n",
|
2018-09-16 18:59:54 +02:00
|
|
|
dest, memsz, len);
|
2009-05-13 18:27:25 +02:00
|
|
|
|
2009-04-01 12:48:39 +02:00
|
|
|
/* Compute the boundaries of the segment */
|
2016-07-11 22:36:50 +02:00
|
|
|
end = dest + memsz;
|
2009-11-05 11:02:59 +01:00
|
|
|
|
2009-04-01 12:48:39 +02:00
|
|
|
/* Copy data from the initial buffer */
|
2018-09-16 18:59:54 +02:00
|
|
|
switch (compression) {
|
2017-03-10 01:21:34 +01:00
|
|
|
case CBFS_COMPRESS_LZMA: {
|
|
|
|
printk(BIOS_DEBUG, "using LZMA\n");
|
|
|
|
timestamp_add_now(TS_START_ULZMA);
|
|
|
|
len = ulzman(src, len, dest, memsz);
|
|
|
|
timestamp_add_now(TS_END_ULZMA);
|
|
|
|
if (!len) /* Decompression Error. */
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CBFS_COMPRESS_LZ4: {
|
|
|
|
printk(BIOS_DEBUG, "using LZ4\n");
|
|
|
|
timestamp_add_now(TS_START_ULZ4F);
|
|
|
|
len = ulz4fn(src, len, dest, memsz);
|
|
|
|
timestamp_add_now(TS_END_ULZ4F);
|
|
|
|
if (!len) /* Decompression Error. */
|
|
|
|
return 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CBFS_COMPRESS_NONE: {
|
|
|
|
printk(BIOS_DEBUG, "it's not compressed!\n");
|
|
|
|
memcpy(dest, src, len);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2018-09-16 18:59:54 +02:00
|
|
|
printk(BIOS_INFO, "CBFS: Unknown compression type %d\n", compression);
|
|
|
|
return 0;
|
2016-07-11 22:36:50 +02:00
|
|
|
}
|
|
|
|
/* Calculate middle after any changes to len. */
|
|
|
|
middle = dest + len;
|
|
|
|
printk(BIOS_SPEW, "[ 0x%08lx, %08lx, 0x%08lx) <- %08lx\n",
|
|
|
|
(unsigned long)dest,
|
|
|
|
(unsigned long)middle,
|
|
|
|
(unsigned long)end,
|
|
|
|
(unsigned long)src);
|
|
|
|
|
|
|
|
/* Zero the extra bytes between middle & end */
|
|
|
|
if (middle < end) {
|
2017-03-11 00:23:24 +01:00
|
|
|
printk(BIOS_DEBUG,
|
|
|
|
"Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
|
|
|
|
(unsigned long)middle,
|
|
|
|
(unsigned long)(end - middle));
|
2016-07-11 22:36:50 +02:00
|
|
|
|
|
|
|
/* Zero the extra bytes */
|
|
|
|
memset(middle, 0, end - middle);
|
|
|
|
}
|
2015-01-09 14:14:20 +01:00
|
|
|
|
2016-07-11 22:36:50 +02:00
|
|
|
/*
|
2018-08-15 11:17:39 +02:00
|
|
|
* Each architecture can perform additional operations
|
2016-07-11 22:36:50 +02:00
|
|
|
* on the loaded segment
|
|
|
|
*/
|
2018-09-16 18:59:54 +02:00
|
|
|
prog_segment_loaded((uintptr_t)dest, memsz, flags);
|
|
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note: this function is a bit dangerous so is not exported.
|
|
|
|
* It assumes you're smart enough not to call it with the very
|
|
|
|
* last segment, since it uses seg + 1 */
|
|
|
|
static int last_loadable_segment(struct cbfs_payload_segment *seg)
|
|
|
|
{
|
|
|
|
return read_be32(&(seg + 1)->type) == PAYLOAD_SEGMENT_ENTRY;
|
|
|
|
}
|
|
|
|
|
2019-01-28 10:22:22 +01:00
|
|
|
static int check_payload_segments(struct cbfs_payload_segment *cbfssegs,
|
|
|
|
void *args)
|
2018-10-25 00:46:51 +02:00
|
|
|
{
|
|
|
|
uint8_t *dest;
|
|
|
|
size_t memsz;
|
2019-04-23 22:17:04 +02:00
|
|
|
struct cbfs_payload_segment *seg, segment;
|
2019-01-28 10:22:22 +01:00
|
|
|
enum bootmem_type dest_type = *(enum bootmem_type *)args;
|
2018-10-25 00:46:51 +02:00
|
|
|
|
2019-04-23 22:17:04 +02:00
|
|
|
for (seg = cbfssegs;; ++seg) {
|
2019-12-09 22:03:29 +01:00
|
|
|
printk(BIOS_DEBUG, "Checking segment from ROM address %p\n", seg);
|
2018-10-25 00:46:51 +02:00
|
|
|
cbfs_decode_payload_segment(&segment, seg);
|
|
|
|
dest = (uint8_t *)(uintptr_t)segment.load_addr;
|
|
|
|
memsz = segment.mem_len;
|
|
|
|
if (segment.type == PAYLOAD_SEGMENT_ENTRY)
|
|
|
|
break;
|
2019-01-28 10:22:22 +01:00
|
|
|
if (!segment_targets_type(dest, memsz, dest_type))
|
2018-10-25 00:46:51 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int load_payload_segments(struct cbfs_payload_segment *cbfssegs, uintptr_t *entry)
|
2018-09-16 18:59:54 +02:00
|
|
|
{
|
|
|
|
uint8_t *dest, *src;
|
|
|
|
size_t filesz, memsz;
|
|
|
|
uint32_t compression;
|
|
|
|
struct cbfs_payload_segment *first_segment, *seg, segment;
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
for (first_segment = seg = cbfssegs;; ++seg) {
|
2019-12-09 22:03:29 +01:00
|
|
|
printk(BIOS_DEBUG, "Loading segment from ROM address %p\n", seg);
|
2018-09-16 18:59:54 +02:00
|
|
|
|
|
|
|
cbfs_decode_payload_segment(&segment, seg);
|
|
|
|
dest = (uint8_t *)(uintptr_t)segment.load_addr;
|
|
|
|
memsz = segment.mem_len;
|
|
|
|
compression = segment.compression;
|
|
|
|
filesz = segment.len;
|
|
|
|
|
|
|
|
switch (segment.type) {
|
|
|
|
case PAYLOAD_SEGMENT_CODE:
|
|
|
|
case PAYLOAD_SEGMENT_DATA:
|
|
|
|
printk(BIOS_DEBUG, " %s (compression=%x)\n",
|
|
|
|
segment.type == PAYLOAD_SEGMENT_CODE
|
|
|
|
? "code" : "data", segment.compression);
|
|
|
|
src = ((uint8_t *)first_segment) + segment.offset;
|
|
|
|
printk(BIOS_DEBUG,
|
2019-12-09 22:03:29 +01:00
|
|
|
" New segment dstaddr %p memsize 0x%zx srcaddr %p filesize 0x%zx\n",
|
2018-09-16 18:59:54 +02:00
|
|
|
dest, memsz, src, filesz);
|
|
|
|
|
|
|
|
/* Clean up the values */
|
|
|
|
if (filesz > memsz) {
|
|
|
|
filesz = memsz;
|
|
|
|
printk(BIOS_DEBUG, " cleaned up filesize 0x%zx\n", filesz);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PAYLOAD_SEGMENT_BSS:
|
2019-12-09 22:03:29 +01:00
|
|
|
printk(BIOS_DEBUG, " BSS %p (%d byte)\n", (void *)
|
2018-09-16 18:59:54 +02:00
|
|
|
(intptr_t)segment.load_addr, segment.mem_len);
|
|
|
|
filesz = 0;
|
|
|
|
src = ((uint8_t *)first_segment) + segment.offset;
|
|
|
|
compression = CBFS_COMPRESS_NONE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PAYLOAD_SEGMENT_ENTRY:
|
2019-12-09 22:03:29 +01:00
|
|
|
printk(BIOS_DEBUG, " Entry Point %p\n", (void *)
|
2018-09-16 18:59:54 +02:00
|
|
|
(intptr_t)segment.load_addr);
|
|
|
|
|
|
|
|
*entry = segment.load_addr;
|
|
|
|
/* Per definition, a payload always has the entry point
|
|
|
|
* as last segment. Thus, we use the occurrence of the
|
|
|
|
* entry point as break condition for the loop.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* We found something that we don't know about. Throw
|
|
|
|
* hands into the sky and run away!
|
|
|
|
*/
|
|
|
|
printk(BIOS_EMERG, "Bad segment type %x\n", segment.type);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* Note that the 'seg + 1' is safe as we only call this
|
|
|
|
* function on "not the last" * items, since entry
|
|
|
|
* is always last. */
|
|
|
|
if (last_loadable_segment(seg))
|
|
|
|
flags = SEG_FINAL;
|
|
|
|
if (!load_one_segment(dest, src, filesz, memsz, compression, flags))
|
|
|
|
return -1;
|
2009-04-01 12:48:39 +02:00
|
|
|
}
|
2015-01-09 14:14:20 +01:00
|
|
|
|
2009-04-01 12:48:39 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-09-16 18:59:54 +02:00
|
|
|
__weak int payload_arch_usable_ram_quirk(uint64_t start, uint64_t size)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-25 00:46:51 +02:00
|
|
|
static void *selfprepare(struct prog *payload)
|
|
|
|
{
|
|
|
|
void *data;
|
|
|
|
data = rdev_mmap_full(prog_rdev(payload));
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2019-01-28 10:22:22 +01:00
|
|
|
static bool _selfload(struct prog *payload, checker_t f, void *args)
|
2009-04-01 12:48:39 +02:00
|
|
|
{
|
2013-11-11 19:36:28 +01:00
|
|
|
uintptr_t entry = 0;
|
2018-09-16 18:59:54 +02:00
|
|
|
struct cbfs_payload_segment *cbfssegs;
|
2015-05-16 06:39:23 +02:00
|
|
|
void *data;
|
|
|
|
|
2018-10-25 00:46:51 +02:00
|
|
|
data = selfprepare(payload);
|
2015-05-16 06:39:23 +02:00
|
|
|
if (data == NULL)
|
2018-04-26 09:53:16 +02:00
|
|
|
return false;
|
2009-04-01 12:48:39 +02:00
|
|
|
|
2018-09-16 18:59:54 +02:00
|
|
|
cbfssegs = &((struct cbfs_payload *)data)->segments;
|
2018-10-25 00:46:51 +02:00
|
|
|
|
2019-01-28 10:22:22 +01:00
|
|
|
if (f && f(cbfssegs, args))
|
2018-10-25 00:46:51 +02:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (load_payload_segments(cbfssegs, &entry))
|
2009-04-01 12:48:39 +02:00
|
|
|
goto out;
|
|
|
|
|
2010-03-22 12:42:32 +01:00
|
|
|
printk(BIOS_SPEW, "Loaded segments\n");
|
2009-04-01 12:48:39 +02:00
|
|
|
|
2015-05-20 19:08:55 +02:00
|
|
|
rdev_munmap(prog_rdev(payload), data);
|
|
|
|
|
2018-04-26 09:53:16 +02:00
|
|
|
/* Pass cbtables to payload if architecture desires it. */
|
|
|
|
prog_set_entry(payload, (void *)entry, cbmem_find(CBMEM_ID_CBTABLE));
|
2013-04-25 05:59:45 +02:00
|
|
|
|
2018-04-26 09:53:16 +02:00
|
|
|
return true;
|
2013-04-25 05:59:45 +02:00
|
|
|
out:
|
2015-05-20 19:08:55 +02:00
|
|
|
rdev_munmap(prog_rdev(payload), data);
|
2018-04-26 09:53:16 +02:00
|
|
|
return false;
|
2013-04-25 05:59:45 +02:00
|
|
|
}
|
2018-10-25 00:46:51 +02:00
|
|
|
|
2019-01-28 10:22:22 +01:00
|
|
|
bool selfload_check(struct prog *payload, enum bootmem_type dest_type)
|
2018-10-25 00:46:51 +02:00
|
|
|
{
|
2019-01-28 10:22:22 +01:00
|
|
|
return _selfload(payload, check_payload_segments, &dest_type);
|
2018-10-25 00:46:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool selfload(struct prog *payload)
|
|
|
|
{
|
2019-01-28 10:22:22 +01:00
|
|
|
return _selfload(payload, NULL, 0);
|
2018-10-25 00:46:51 +02:00
|
|
|
}
|