cbfs: Add unverified_area APIs
This patch adds a new ..._unverified_area_... group of functions to the cbfs_map/_load/_alloc() APIs. These functions can be used to access custom FMAP sections and are meant to replace the existing cbfs_locate_file_in_region(). The name is intended to highlight that accesses through this API will not be verified when CBFS_VERIFICATION is enabled and should always be treated as if they may return malicious data. (Due to laziness I'm not adding the combination of this API with the ..._type_... variant at this point, since it seems very unlikely that we'll ever have a use case for that. If we ever do, it should be easy to add later.) (Also remove the 'inline' from cbfs_file_hash_mismatch(). I'm not sure why I put it there in the first place, probably a bad copy&paste.) Signed-off-by: Julius Werner <jwerner@chromium.org> Change-Id: I402265900f7075aa0c2f58d812c67ea63ddf2900 Reviewed-on: https://review.coreboot.org/c/coreboot/+/59678 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Raul Rangel <rrangel@chromium.org>
This commit is contained in:
parent
0cd6ab338c
commit
05714ccab7
|
@ -55,6 +55,11 @@
|
|||
* section), even when running in an RW stage from one of the RW CBFSs. Only relevant if
|
||||
* CONFIG(VBOOT) is set.
|
||||
*
|
||||
* ..._unverified_area_...: Will look for the CBFS file in the named FMAP area, rather than
|
||||
* any of the default (RO or RW) CBFSs. Files accessed this way are *not* verified in any
|
||||
* way (even if CONFIG(CBFS_VERIFICATION) is enabled) and should always be treated as
|
||||
* untrusted (potentially malicious) data. Mutually exclusive with the ..._ro_... variant.
|
||||
*
|
||||
* ..._type_...: May pass in an extra enum cbfs_type *type parameter. If the value it points to
|
||||
* is CBFS_TYPE_QUERY, it will be replaced with the actual CBFS type of the found file. If
|
||||
* it is anything else, the type will be compared with the actually found type, and the
|
||||
|
@ -76,11 +81,15 @@ static inline size_t cbfs_type_load(const char *name, void *buf, size_t size,
|
|||
enum cbfs_type *type);
|
||||
static inline size_t cbfs_ro_type_load(const char *name, void *buf, size_t size,
|
||||
enum cbfs_type *type);
|
||||
static inline size_t cbfs_unverified_area_load(const char *area, const char *name,
|
||||
void *buf, size_t size);
|
||||
|
||||
static inline void *cbfs_map(const char *name, size_t *size_out);
|
||||
static inline void *cbfs_ro_map(const char *name, size_t *size_out);
|
||||
static inline void *cbfs_type_map(const char *name, size_t *size_out, enum cbfs_type *type);
|
||||
static inline void *cbfs_ro_type_map(const char *name, size_t *size_out, enum cbfs_type *type);
|
||||
static inline void *cbfs_unverified_area_map(const char *area, const char *name,
|
||||
size_t *size_out);
|
||||
|
||||
static inline void *cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
|
||||
size_t *size_out);
|
||||
|
@ -90,6 +99,9 @@ static inline void *cbfs_type_alloc(const char *name, cbfs_allocator_t allocator
|
|||
size_t *size_out, enum cbfs_type *type);
|
||||
static inline void *cbfs_ro_type_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
|
||||
size_t *size_out, enum cbfs_type *type);
|
||||
static inline void *cbfs_unverified_area_alloc(const char *area, const char *name,
|
||||
cbfs_allocator_t allocator, void *arg,
|
||||
size_t *size_out);
|
||||
|
||||
static inline void *cbfs_cbmem_alloc(const char *name, uint32_t cbmem_id, size_t *size_out);
|
||||
static inline void *cbfs_ro_cbmem_alloc(const char *name, uint32_t cbmem_id, size_t *size_out);
|
||||
|
@ -97,6 +109,8 @@ static inline void *cbfs_type_cbmem_alloc(const char *name, uint32_t cbmem_id, s
|
|||
enum cbfs_type *type);
|
||||
static inline void *cbfs_ro_type_cbmem_alloc(const char *name, uint32_t cbmem_id,
|
||||
size_t *size_out, enum cbfs_type *type);
|
||||
static inline void *cbfs_unverified_area_cbmem_alloc(const char *area, const char *name,
|
||||
uint32_t cbmem_id, size_t *size_out);
|
||||
|
||||
/*
|
||||
* Starts the processes of preloading a file into RAM.
|
||||
|
@ -194,6 +208,9 @@ cb_err_t _cbfs_boot_lookup(const char *name, bool force_ro,
|
|||
void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
|
||||
size_t *size_out, bool force_ro, enum cbfs_type *type);
|
||||
|
||||
void *_cbfs_unverified_area_alloc(const char *area, const char *name,
|
||||
cbfs_allocator_t allocator, void *arg, size_t *size_out);
|
||||
|
||||
struct _cbfs_default_allocator_arg {
|
||||
void *buf;
|
||||
size_t buf_size;
|
||||
|
@ -229,6 +246,13 @@ static inline void *cbfs_ro_type_alloc(const char *name, cbfs_allocator_t alloca
|
|||
return _cbfs_alloc(name, allocator, arg, size_out, true, type);
|
||||
}
|
||||
|
||||
static inline void *cbfs_unverified_area_alloc(const char *area, const char *name,
|
||||
cbfs_allocator_t allocator, void *arg,
|
||||
size_t *size_out)
|
||||
{
|
||||
return _cbfs_unverified_area_alloc(area, name, allocator, arg, size_out);
|
||||
}
|
||||
|
||||
static inline void *cbfs_map(const char *name, size_t *size_out)
|
||||
{
|
||||
return cbfs_type_map(name, size_out, NULL);
|
||||
|
@ -249,6 +273,12 @@ static inline void *cbfs_ro_type_map(const char *name, size_t *size_out, enum cb
|
|||
return cbfs_ro_type_alloc(name, NULL, NULL, size_out, type);
|
||||
}
|
||||
|
||||
static inline void *cbfs_unverified_area_map(const char *area, const char *name,
|
||||
size_t *size_out)
|
||||
{
|
||||
return _cbfs_unverified_area_alloc(area, name, NULL, NULL, size_out);
|
||||
}
|
||||
|
||||
static inline size_t _cbfs_load(const char *name, void *buf, size_t size, bool force_ro,
|
||||
enum cbfs_type *type)
|
||||
{
|
||||
|
@ -281,6 +311,16 @@ static inline size_t cbfs_ro_type_load(const char *name, void *buf, size_t size,
|
|||
return _cbfs_load(name, buf, size, true, type);
|
||||
}
|
||||
|
||||
static inline size_t cbfs_unverified_area_load(const char *area, const char *name,
|
||||
void *buf, size_t size)
|
||||
{
|
||||
struct _cbfs_default_allocator_arg arg = { .buf = buf, .buf_size = size };
|
||||
if (_cbfs_unverified_area_alloc(area, name, _cbfs_default_allocator, &arg, &size))
|
||||
return size;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *cbfs_cbmem_alloc(const char *name, uint32_t cbmem_id, size_t *size_out)
|
||||
{
|
||||
return cbfs_type_cbmem_alloc(name, cbmem_id, size_out, NULL);
|
||||
|
@ -305,6 +345,13 @@ static inline void *cbfs_ro_type_cbmem_alloc(const char *name, uint32_t cbmem_id
|
|||
size_out, type);
|
||||
}
|
||||
|
||||
static inline void *cbfs_unverified_area_cbmem_alloc(const char *area, const char *name,
|
||||
uint32_t cbmem_id, size_t *size_out)
|
||||
{
|
||||
return _cbfs_unverified_area_alloc(area, name, _cbfs_cbmem_allocator,
|
||||
(void *)(uintptr_t)cbmem_id, size_out);
|
||||
}
|
||||
|
||||
static inline size_t cbfs_get_size(const char *name)
|
||||
{
|
||||
union cbfs_mdata mdata;
|
||||
|
|
182
src/lib/cbfs.c
182
src/lib/cbfs.c
|
@ -190,30 +190,37 @@ static inline bool cbfs_lzma_enabled(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline bool cbfs_file_hash_mismatch(const void *buffer, size_t size,
|
||||
const struct vb2_hash *file_hash)
|
||||
static bool cbfs_file_hash_mismatch(const void *buffer, size_t size,
|
||||
const union cbfs_mdata *mdata, bool skip_verification)
|
||||
{
|
||||
/* Avoid linking hash functions when verification is disabled. */
|
||||
if (!CONFIG(CBFS_VERIFICATION))
|
||||
if (!CONFIG(CBFS_VERIFICATION) || skip_verification)
|
||||
return false;
|
||||
|
||||
/* If there is no file hash, always count that as a mismatch. */
|
||||
if (file_hash && vb2_hash_verify(buffer, size, file_hash) == VB2_SUCCESS)
|
||||
return false;
|
||||
|
||||
printk(BIOS_CRIT, "CBFS file hash mismatch!\n");
|
||||
const struct vb2_hash *hash = cbfs_file_hash(mdata);
|
||||
if (!hash) {
|
||||
ERROR("'%s' does not have a file hash!\n", mdata->h.filename);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (vb2_hash_verify(buffer, size, hash) != VB2_SUCCESS) {
|
||||
ERROR("'%s' file hash mismatch!\n", mdata->h.filename);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer,
|
||||
size_t buffer_size, uint32_t compression,
|
||||
const struct vb2_hash *file_hash)
|
||||
const union cbfs_mdata *mdata, bool skip_verification)
|
||||
{
|
||||
size_t in_size = region_device_sz(rdev);
|
||||
size_t out_size = 0;
|
||||
void *map;
|
||||
|
||||
DEBUG("Decompressing %zu bytes to %p with algo %d\n", in_size, buffer, compression);
|
||||
DEBUG("Decompressing %zu bytes from '%s' to %p with algo %d\n",
|
||||
in_size, mdata->h.filename, buffer, compression);
|
||||
|
||||
switch (compression) {
|
||||
case CBFS_COMPRESS_NONE:
|
||||
|
@ -221,7 +228,7 @@ static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *b
|
|||
return 0;
|
||||
if (rdev_readat(rdev, buffer, 0, in_size) != in_size)
|
||||
return 0;
|
||||
if (cbfs_file_hash_mismatch(buffer, in_size, file_hash))
|
||||
if (cbfs_file_hash_mismatch(buffer, in_size, mdata, skip_verification))
|
||||
return 0;
|
||||
return in_size;
|
||||
|
||||
|
@ -235,7 +242,7 @@ static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *b
|
|||
if (map == NULL)
|
||||
return 0;
|
||||
|
||||
if (!cbfs_file_hash_mismatch(map, in_size, file_hash)) {
|
||||
if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
|
||||
timestamp_add_now(TS_START_ULZ4F);
|
||||
out_size = ulz4fn(map, in_size, buffer, buffer_size);
|
||||
timestamp_add_now(TS_END_ULZ4F);
|
||||
|
@ -252,7 +259,7 @@ static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *b
|
|||
if (map == NULL)
|
||||
return 0;
|
||||
|
||||
if (!cbfs_file_hash_mismatch(map, in_size, file_hash)) {
|
||||
if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
|
||||
/* Note: timestamp not useful for memory-mapped media (x86) */
|
||||
timestamp_add_now(TS_START_ULZMA);
|
||||
out_size = ulzman(map, in_size, buffer, buffer_size);
|
||||
|
@ -411,13 +418,64 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void *do_alloc(union cbfs_mdata *mdata, struct region_device *rdev,
|
||||
cbfs_allocator_t allocator, void *arg, size_t *size_out,
|
||||
bool skip_verification)
|
||||
{
|
||||
size_t size = region_device_sz(rdev);
|
||||
void *loc = NULL;
|
||||
|
||||
uint32_t compression = CBFS_COMPRESS_NONE;
|
||||
const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(mdata,
|
||||
CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
|
||||
if (cattr) {
|
||||
compression = be32toh(cattr->compression);
|
||||
size = be32toh(cattr->decompressed_size);
|
||||
}
|
||||
|
||||
if (size_out)
|
||||
*size_out = size;
|
||||
|
||||
/* allocator == NULL means do a cbfs_map() */
|
||||
if (allocator) {
|
||||
loc = allocator(arg, size, mdata);
|
||||
} else if (compression == CBFS_COMPRESS_NONE) {
|
||||
void *mapping = rdev_mmap_full(rdev);
|
||||
if (!mapping)
|
||||
return NULL;
|
||||
if (cbfs_file_hash_mismatch(mapping, size, mdata, skip_verification)) {
|
||||
rdev_munmap(rdev, mapping);
|
||||
return NULL;
|
||||
}
|
||||
return mapping;
|
||||
} else if (!cbfs_cache.size) {
|
||||
/* In order to use the cbfs_cache you need to add a CBFS_CACHE to your
|
||||
* memlayout. For stages that don't have .data sections (x86 pre-RAM),
|
||||
* it is not possible to add a CBFS_CACHE. */
|
||||
ERROR("Cannot map compressed file %s without cbfs_cache\n", mdata->h.filename);
|
||||
return NULL;
|
||||
} else {
|
||||
loc = mem_pool_alloc(&cbfs_cache, size);
|
||||
}
|
||||
|
||||
if (!loc) {
|
||||
ERROR("'%s' allocation failure\n", mdata->h.filename);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size = cbfs_load_and_decompress(rdev, loc, size, compression, mdata, skip_verification);
|
||||
if (!size)
|
||||
return NULL;
|
||||
|
||||
return loc;
|
||||
}
|
||||
|
||||
void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
|
||||
size_t *size_out, bool force_ro, enum cbfs_type *type)
|
||||
{
|
||||
struct region_device rdev;
|
||||
bool preload_successful = false;
|
||||
union cbfs_mdata mdata;
|
||||
void *loc = NULL;
|
||||
|
||||
DEBUG("%s(name='%s', alloc=%p(%p), force_ro=%s, type=%d)\n", __func__, name, allocator,
|
||||
arg, force_ro ? "true" : "false", type ? *type : -1);
|
||||
|
@ -436,72 +494,44 @@ void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
|
|||
}
|
||||
}
|
||||
|
||||
size_t size = region_device_sz(&rdev);
|
||||
uint32_t compression = CBFS_COMPRESS_NONE;
|
||||
const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata,
|
||||
CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
|
||||
if (cattr) {
|
||||
compression = be32toh(cattr->compression);
|
||||
size = be32toh(cattr->decompressed_size);
|
||||
}
|
||||
|
||||
if (size_out)
|
||||
*size_out = size;
|
||||
|
||||
const struct vb2_hash *file_hash = NULL;
|
||||
if (CONFIG(CBFS_VERIFICATION))
|
||||
file_hash = cbfs_file_hash(&mdata);
|
||||
|
||||
/* Update the rdev with the preload content */
|
||||
if (!force_ro && get_preload_rdev(&rdev, name) == CB_SUCCESS)
|
||||
preload_successful = true;
|
||||
|
||||
/* allocator == NULL means do a cbfs_map() */
|
||||
if (allocator) {
|
||||
loc = allocator(arg, size, &mdata);
|
||||
} else if (compression == CBFS_COMPRESS_NONE) {
|
||||
void *mapping = rdev_mmap_full(&rdev);
|
||||
void *ret = do_alloc(&mdata, &rdev, allocator, arg, size_out, false);
|
||||
|
||||
if (!mapping)
|
||||
goto out;
|
||||
|
||||
if (cbfs_file_hash_mismatch(mapping, size, file_hash)) {
|
||||
rdev_munmap(&rdev, mapping);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return mapping;
|
||||
} else if (!cbfs_cache.size) {
|
||||
/*
|
||||
* In order to use the cbfs_cache you need to add a CBFS_CACHE to your
|
||||
* memlayout. For stages that don't have .data sections (x86 pre-RAM),
|
||||
* it is not possible to add a CBFS_CACHE.
|
||||
*/
|
||||
ERROR("Cannot map compressed file %s without cbfs_cache\n", mdata.h.filename);
|
||||
goto out;
|
||||
} else {
|
||||
loc = mem_pool_alloc(&cbfs_cache, size);
|
||||
}
|
||||
|
||||
if (!loc) {
|
||||
ERROR("'%s' allocation failure\n", mdata.h.filename);
|
||||
goto out;
|
||||
}
|
||||
|
||||
size = cbfs_load_and_decompress(&rdev, loc, size, compression, file_hash);
|
||||
|
||||
if (!size)
|
||||
loc = NULL;
|
||||
|
||||
out:
|
||||
/*
|
||||
* When using cbfs_preload we need to free the preload buffer after populating the
|
||||
* destination buffer.
|
||||
*/
|
||||
/* When using cbfs_preload we need to free the preload buffer after populating the
|
||||
* destination buffer. We know we must have a mem_rdev here, so extra mmap is fine. */
|
||||
if (preload_successful)
|
||||
cbfs_unmap(rdev_mmap_full(&rdev));
|
||||
|
||||
return loc;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *_cbfs_unverified_area_alloc(const char *area, const char *name,
|
||||
cbfs_allocator_t allocator, void *arg, size_t *size_out)
|
||||
{
|
||||
struct region_device area_rdev, file_rdev;
|
||||
union cbfs_mdata mdata;
|
||||
size_t data_offset;
|
||||
|
||||
DEBUG("%s(area='%s', name='%s', alloc=%p(%p))\n", __func__, area, name, allocator, arg);
|
||||
|
||||
if (fmap_locate_area_as_rdev(area, &area_rdev))
|
||||
return NULL;
|
||||
|
||||
if (cbfs_lookup(&area_rdev, name, &mdata, &data_offset, NULL)) {
|
||||
ERROR("'%s' not found in '%s'\n", name, area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rdev_chain(&file_rdev, &area_rdev, data_offset, be32toh(mdata.h.len)))
|
||||
return NULL;
|
||||
|
||||
if (tspi_measure_cbfs_hook(&file_rdev, name, be32toh(mdata.h.type)))
|
||||
ERROR("error measuring '%s' in '%s'\n", name, area);
|
||||
|
||||
return do_alloc(&mdata, &file_rdev, allocator, arg, size_out, true);
|
||||
}
|
||||
|
||||
void *_cbfs_default_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
|
||||
|
@ -546,17 +576,13 @@ cb_err_t cbfs_prog_stage_load(struct prog *pstage)
|
|||
prog_set_entry(pstage, prog_start(pstage) +
|
||||
be32toh(sattr->entry_offset), NULL);
|
||||
|
||||
const struct vb2_hash *file_hash = NULL;
|
||||
if (CONFIG(CBFS_VERIFICATION))
|
||||
file_hash = cbfs_file_hash(&mdata);
|
||||
|
||||
/* Hacky way to not load programs over read only media. The stages
|
||||
* that would hit this path initialize themselves. */
|
||||
if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
|
||||
!CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
|
||||
void *mapping = rdev_mmap_full(&rdev);
|
||||
rdev_munmap(&rdev, mapping);
|
||||
if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), file_hash))
|
||||
if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), &mdata, false))
|
||||
return CB_CBFS_HASH_MISMATCH;
|
||||
if (mapping == prog_start(pstage))
|
||||
return CB_SUCCESS;
|
||||
|
@ -573,7 +599,7 @@ cb_err_t cbfs_prog_stage_load(struct prog *pstage)
|
|||
}
|
||||
|
||||
size_t fsize = cbfs_load_and_decompress(&rdev, prog_start(pstage), prog_size(pstage),
|
||||
compression, file_hash);
|
||||
compression, &mdata, false);
|
||||
if (!fsize)
|
||||
return CB_ERR;
|
||||
|
||||
|
|
Loading…
Reference in New Issue