mem_pool: Track the last two allocations (not just one)

This patch changes the mem_pool implementation to track the last two
allocations (instead of just the last) and allow them both to be freed
if the mem_pool_free() calls come in in reverse order. This is intended
as a specific optimization for the CBFS cache case when a compressed
file is mapped on a platform that doesn't natively support
memory-mapping flash. In this case, cbfs_map() (chaining through to
_cbfs_alloc() with allocator == NULL) will call
mem_pool_alloc(&cbfs_cache) to allocate space for the uncompressed file
data. It will then call cbfs_load_and_decompress() to fill that
allocation, which will notice the compression and in turn call
rdev_mmap_full() to map the compressed data (which on platforms without
memory-mapped flash usually results in a second call to
mem_pool_alloc(&cbfs_cache)). It then runs the decompression algorithm
and calls rdev_munmap() on the compressed data buffer (the latter one in
the allocation sequence), leading to a mem_pool_free(). The remaining
buffer with the uncompressed data is returned out of cbfs_map() to the
caller, which should eventually call cbfs_unmap() to mem_pool_free()
that as well. This patch allows this simple case to succeed without
leaking any permanent allocations on the cache. (More complicated cases
where the caller maps other files before cbfs_unmap()ing the first one
may still lead to leaks, but those are very rare in practice.)

Signed-off-by: Julius Werner <jwerner@chromium.org>
Change-Id: Ic5c4c56a8482752ed65e10cf35565f9b2d3e4b17
Reviewed-on: https://review.coreboot.org/c/coreboot/+/52087
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
Julius Werner 2021-04-02 17:45:03 -07:00 committed by Patrick Georgi
parent 5a090bf125
commit 6482c22ec0
2 changed files with 20 additions and 12 deletions

View File

@ -7,11 +7,14 @@
#include <stdint.h> #include <stdint.h>
/* /*
* The memory pool allows one to allocate memory from a fixed size buffer * The memory pool allows one to allocate memory from a fixed size buffer that
* that also allows freeing semantics for reuse. However, the current * also allows freeing semantics for reuse. However, the current limitation is
* limitation is that the most recent allocation is the only one that * that only the two most recent allocations can be freed (in exact reverse
* can be freed. If one tries to free any allocation that isn't the * order). If one tries to free any allocation that isn't at the top of the
* most recently allocated it will result in a leak within the memory pool. * allocation stack, or one allocates more than two buffers in a row without
* freeing, it will result in a leak within the memory pool. (Two allocations
* were chosen to optimize for the CBFS cache case which may need two buffers
* to map a single compressed file, and will free them in reverse order.)
* *
* The memory returned by allocations are at least 8 byte aligned. Note * The memory returned by allocations are at least 8 byte aligned. Note
* that this requires the backing buffer to start on at least an 8 byte * that this requires the backing buffer to start on at least an 8 byte
@ -22,20 +25,23 @@ struct mem_pool {
uint8_t *buf; uint8_t *buf;
size_t size; size_t size;
uint8_t *last_alloc; uint8_t *last_alloc;
uint8_t *second_to_last_alloc;
size_t free_offset; size_t free_offset;
}; };
#define MEM_POOL_INIT(buf_, size_) \ #define MEM_POOL_INIT(buf_, size_) \
{ \ { \
.buf = (buf_), \ .buf = (buf_), \
.size = (size_), \ .size = (size_), \
.last_alloc = NULL, \ .last_alloc = NULL, \
.free_offset = 0, \ .second_to_last_alloc = NULL, \
.free_offset = 0, \
} }
static inline void mem_pool_reset(struct mem_pool *mp) static inline void mem_pool_reset(struct mem_pool *mp)
{ {
mp->last_alloc = NULL; mp->last_alloc = NULL;
mp->second_to_last_alloc = NULL;
mp->free_offset = 0; mp->free_offset = 0;
} }

View File

@ -17,6 +17,7 @@ void *mem_pool_alloc(struct mem_pool *mp, size_t sz)
p = &mp->buf[mp->free_offset]; p = &mp->buf[mp->free_offset];
mp->free_offset += sz; mp->free_offset += sz;
mp->second_to_last_alloc = mp->last_alloc;
mp->last_alloc = p; mp->last_alloc = p;
return p; return p;
@ -29,6 +30,7 @@ void mem_pool_free(struct mem_pool *mp, void *p)
return; return;
mp->free_offset = mp->last_alloc - mp->buf; mp->free_offset = mp->last_alloc - mp->buf;
mp->last_alloc = mp->second_to_last_alloc;
/* No way to track allocation before this one. */ /* No way to track allocation before this one. */
mp->last_alloc = NULL; mp->second_to_last_alloc = NULL;
} }