cbmem: Add initial allocation support

Add support to allocate a region just below CBMEM root.  This region is
reserved for FSP 1.1 to use for its stack and variables.

BRANCH=none
BUG=None
TEST=Build and run on Braswell

Change-Id: I1d4b36ab366e6f8e036335c56c1756f2dfaab3f5
Signed-off-by: Lee Leahy <leroy.p.leahy@intel.com>
Reviewed-on: http://review.coreboot.org/10148
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
Lee Leahy 2015-05-08 11:33:55 -07:00 committed by Aaron Durbin
parent 0a50d9b353
commit 522149c310
2 changed files with 38 additions and 9 deletions

View File

@ -146,12 +146,30 @@ struct cbmem_entry;
*/ */
#define DYN_CBMEM_ALIGN_SIZE (4096) #define DYN_CBMEM_ALIGN_SIZE (4096)
#define CBMEM_ROOT_SIZE DYN_CBMEM_ALIGN_SIZE
/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
#define CBMEM_ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
#define CBMEM_LG_ALIGN CBMEM_ROOT_MIN_SIZE
/* Small allocation parameters. */
#define CBMEM_SM_ROOT_SIZE 1024
#define CBMEM_SM_ALIGN 32
/* Determine the size for CBMEM root and the small allocations */
static inline size_t cbmem_overhead_size(void)
{
return 2 * CBMEM_ROOT_MIN_SIZE;
}
/* By default cbmem is attempted to be recovered. Returns 0 if cbmem was /* By default cbmem is attempted to be recovered. Returns 0 if cbmem was
* recovered or 1 if cbmem had to be reinitialized. */ * recovered or 1 if cbmem had to be reinitialized. */
int cbmem_initialize(void); int cbmem_initialize(void);
int cbmem_initialize_id_size(u32 id, u64 size);
/* Initialize cbmem to be empty. */ /* Initialize cbmem to be empty. */
void cbmem_initialize_empty(void); void cbmem_initialize_empty(void);
void cbmem_initialize_empty_id_size(u32 id, u64 size);
/* Return the top address for dynamic cbmem. The address returned needs to /* Return the top address for dynamic cbmem. The address returned needs to
* be consistent across romstage and ramstage, and it is required to be * be consistent across romstage and ramstage, and it is required to be

View File

@ -30,13 +30,6 @@
#include <arch/acpi.h> #include <arch/acpi.h>
#endif #endif
/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
#define LG_ALIGN ROOT_MIN_SIZE
/* Small allocation parameters. */
#define SM_ROOT_SIZE 1024
#define SM_ALIGN 32
static inline struct imd *cbmem_get_imd(void) static inline struct imd *cbmem_get_imd(void)
{ {
/* Only supply a backing store for imd in ramstage. */ /* Only supply a backing store for imd in ramstage. */
@ -115,6 +108,11 @@ static struct imd *imd_init_backing_with_recover(struct imd *backing)
} }
void cbmem_initialize_empty(void) void cbmem_initialize_empty(void)
{
cbmem_initialize_empty_id_size(0, 0);
}
void cbmem_initialize_empty_id_size(u32 id, u64 size)
{ {
struct imd *imd; struct imd *imd;
struct imd imd_backing; struct imd imd_backing;
@ -127,12 +125,16 @@ void cbmem_initialize_empty(void)
printk(BIOS_DEBUG, "CBMEM:\n"); printk(BIOS_DEBUG, "CBMEM:\n");
if (imd_create_tiered_empty(imd, ROOT_MIN_SIZE, LG_ALIGN, if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
SM_ROOT_SIZE, SM_ALIGN)) { CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
printk(BIOS_DEBUG, "failed.\n"); printk(BIOS_DEBUG, "failed.\n");
return; return;
} }
/* Add the specified range first */
if (size)
cbmem_add(id, size);
/* Complete migration to CBMEM. */ /* Complete migration to CBMEM. */
cbmem_run_init_hooks(); cbmem_run_init_hooks();
} }
@ -145,6 +147,11 @@ static inline int cbmem_fail_recovery(void)
} }
int cbmem_initialize(void) int cbmem_initialize(void)
{
return cbmem_initialize_id_size(0, 0);
}
int cbmem_initialize_id_size(u32 id, u64 size)
{ {
struct imd *imd; struct imd *imd;
struct imd imd_backing; struct imd imd_backing;
@ -167,6 +174,10 @@ int cbmem_initialize(void)
imd_lockdown(imd); imd_lockdown(imd);
#endif #endif
/* Add the specified range first */
if (size)
cbmem_add(id, size);
/* Complete migration to CBMEM. */ /* Complete migration to CBMEM. */
cbmem_run_init_hooks(); cbmem_run_init_hooks();