6482c22ec0
This patch changes the mem_pool implementation to track the last two allocations (instead of just the last) and allow them both to be freed if the mem_pool_free() calls come in in reverse order. This is intended as a specific optimization for the CBFS cache case when a compressed file is mapped on a platform that doesn't natively support memory-mapping flash. In this case, cbfs_map() (chaining through to _cbfs_alloc() with allocator == NULL) will call mem_pool_alloc(&cbfs_cache) to allocate space for the uncompressed file data. It will then call cbfs_load_and_decompress() to fill that allocation, which will notice the compression and in turn call rdev_mmap_full() to map the compressed data (which on platforms without memory-mapped flash usually results in a second call to mem_pool_alloc(&cbfs_cache)). It then runs the decompression algorithm and calls rdev_munmap() on the compressed data buffer (the latter one in the allocation sequence), leading to a mem_pool_free(). The remaining buffer with the uncompressed data is returned out of cbfs_map() to the caller, which should eventually call cbfs_unmap() to mem_pool_free() that as well. This patch allows this simple case to succeed without leaking any permanent allocations on the cache. (More complicated cases where the caller maps other files before cbfs_unmap()ing the first one may still lead to leaks, but those are very rare in practice.) Signed-off-by: Julius Werner <jwerner@chromium.org> Change-Id: Ic5c4c56a8482752ed65e10cf35565f9b2d3e4b17 Reviewed-on: https://review.coreboot.org/c/coreboot/+/52087 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Aaron Durbin <adurbin@chromium.org>
36 lines
820 B
C
36 lines
820 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
#include <commonlib/helpers.h>
|
|
#include <commonlib/mem_pool.h>
|
|
|
|
void *mem_pool_alloc(struct mem_pool *mp, size_t sz)
|
|
{
|
|
void *p;
|
|
|
|
/* Make all allocations be at least 8 byte aligned. */
|
|
sz = ALIGN_UP(sz, 8);
|
|
|
|
/* Determine if any space available. */
|
|
if ((mp->size - mp->free_offset) < sz)
|
|
return NULL;
|
|
|
|
p = &mp->buf[mp->free_offset];
|
|
|
|
mp->free_offset += sz;
|
|
mp->second_to_last_alloc = mp->last_alloc;
|
|
mp->last_alloc = p;
|
|
|
|
return p;
|
|
}
|
|
|
|
void mem_pool_free(struct mem_pool *mp, void *p)
|
|
{
|
|
/* Determine if p was the most recent allocation. */
|
|
if (p == NULL || mp->last_alloc != p)
|
|
return;
|
|
|
|
mp->free_offset = mp->last_alloc - mp->buf;
|
|
mp->last_alloc = mp->second_to_last_alloc;
|
|
/* No way to track allocation before this one. */
|
|
mp->second_to_last_alloc = NULL;
|
|
}
|