2013-03-13 18:41:44 +01:00
|
|
|
/*
|
|
|
|
* This file is part of the coreboot project.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Google, Inc.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; version 2 of the License.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
2015-03-07 06:26:06 +01:00
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2013-03-13 18:41:44 +01:00
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
2013-04-24 23:39:08 +02:00
|
|
|
#include <bootstate.h>
|
2014-02-19 04:55:02 +01:00
|
|
|
#include <bootmem.h>
|
2013-03-13 18:41:44 +01:00
|
|
|
#include <console/console.h>
|
|
|
|
#include <cbmem.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
2013-06-19 21:25:44 +02:00
|
|
|
#include <arch/early_variables.h>
|
2014-12-19 07:20:45 +01:00
|
|
|
#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
|
2013-03-13 18:41:44 +01:00
|
|
|
#include <arch/acpi.h>
|
|
|
|
#endif
|
|
|
|
#ifndef UINT_MAX
|
|
|
|
#define UINT_MAX 4294967295U
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The dynamic cbmem code uses a root region. The root region boundary
|
|
|
|
* addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below
|
|
|
|
* the address returned by cbmem_top() is a pointer that points to the
|
|
|
|
* root data structure. The root data structure provides the book keeping
|
|
|
|
* for each large entry.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
|
|
|
|
#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
|
|
|
|
#define CBMEM_POINTER_MAGIC 0xc0389479
|
|
|
|
#define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC)
|
|
|
|
|
|
|
|
/* The cbmem_root_pointer structure lives just below address returned
|
|
|
|
* from cbmem_top(). It points to the root data structure that
|
|
|
|
* maintains the entries. */
|
|
|
|
struct cbmem_root_pointer {
|
|
|
|
u32 magic;
|
|
|
|
u32 root;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
struct cbmem_entry {
|
|
|
|
u32 magic;
|
|
|
|
u32 start;
|
|
|
|
u32 size;
|
|
|
|
u32 id;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
struct cbmem_root {
|
|
|
|
u32 max_entries;
|
|
|
|
u32 num_entries;
|
|
|
|
u32 locked;
|
|
|
|
u32 size;
|
|
|
|
struct cbmem_entry entries[0];
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
|
|
|
|
2014-12-19 07:20:45 +01:00
|
|
|
#if !defined(__PRE_RAM__)
|
|
|
|
static void *cached_cbmem_top;
|
|
|
|
|
|
|
|
void cbmem_set_top(void * ramtop)
|
|
|
|
{
|
|
|
|
cached_cbmem_top = ramtop;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-13 18:41:44 +01:00
|
|
|
static inline void *cbmem_top_cached(void)
|
|
|
|
{
|
|
|
|
#if !defined(__PRE_RAM__)
|
|
|
|
if (cached_cbmem_top == NULL)
|
|
|
|
cached_cbmem_top = cbmem_top();
|
|
|
|
|
|
|
|
return cached_cbmem_top;
|
|
|
|
#else
|
|
|
|
return cbmem_top();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
static inline uintptr_t get_top_aligned(void)
|
2013-03-13 18:41:44 +01:00
|
|
|
{
|
2014-12-21 07:55:47 +01:00
|
|
|
uintptr_t top;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
|
|
|
/* Align down what is returned from cbmem_top(). */
|
2014-12-21 07:55:47 +01:00
|
|
|
top = (uintptr_t)cbmem_top_cached();
|
2013-03-13 18:41:44 +01:00
|
|
|
top &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
|
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
return top;
|
2013-03-13 18:41:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *get_root(void)
|
|
|
|
{
|
2014-10-16 12:58:09 +02:00
|
|
|
uintptr_t pointer_addr;
|
2013-03-13 18:41:44 +01:00
|
|
|
struct cbmem_root_pointer *pointer;
|
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
pointer_addr = get_top_aligned();
|
2014-12-19 07:20:45 +01:00
|
|
|
if (pointer_addr == 0)
|
|
|
|
return NULL;
|
|
|
|
|
2013-03-13 18:41:44 +01:00
|
|
|
pointer_addr -= sizeof(struct cbmem_root_pointer);
|
|
|
|
|
|
|
|
pointer = (void *)pointer_addr;
|
|
|
|
if (pointer->magic != CBMEM_POINTER_MAGIC)
|
|
|
|
return NULL;
|
|
|
|
|
2014-10-16 12:58:09 +02:00
|
|
|
pointer_addr = pointer->root;
|
|
|
|
return (void *)pointer_addr;
|
2013-03-13 18:41:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cbmem_entry_assign(struct cbmem_entry *entry,
|
|
|
|
u32 id, u32 start, u32 size)
|
|
|
|
{
|
|
|
|
entry->magic = CBMEM_ENTRY_MAGIC;
|
|
|
|
entry->start = start;
|
|
|
|
entry->size = size;
|
|
|
|
entry->id = id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct cbmem_entry *
|
|
|
|
cbmem_entry_append(struct cbmem_root *root, u32 id, u32 start, u32 size)
|
|
|
|
{
|
|
|
|
struct cbmem_entry *cbmem_entry;
|
|
|
|
|
|
|
|
cbmem_entry = &root->entries[root->num_entries];
|
|
|
|
root->num_entries++;
|
|
|
|
|
|
|
|
cbmem_entry_assign(cbmem_entry, id, start, size);
|
|
|
|
|
|
|
|
return cbmem_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
void cbmem_initialize_empty(void)
|
|
|
|
{
|
2014-12-21 07:55:47 +01:00
|
|
|
uintptr_t pointer_addr;
|
|
|
|
uintptr_t root_addr;
|
2013-03-13 18:41:44 +01:00
|
|
|
unsigned long max_entries;
|
|
|
|
struct cbmem_root *root;
|
|
|
|
struct cbmem_root_pointer *pointer;
|
|
|
|
|
|
|
|
/* Place the root pointer and the root. The number of entries is
|
|
|
|
* dictated by difference between the root address and the pointer
|
|
|
|
* where the root address is aligned down to
|
|
|
|
* DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the
|
|
|
|
* address returned by get_top_aligned(). */
|
2014-12-21 07:55:47 +01:00
|
|
|
pointer_addr = get_top_aligned();
|
2014-12-19 07:20:45 +01:00
|
|
|
if (pointer_addr == 0)
|
|
|
|
return;
|
|
|
|
|
2013-03-13 18:41:44 +01:00
|
|
|
root_addr = pointer_addr - ROOT_MIN_SIZE;
|
|
|
|
root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
|
|
|
|
pointer_addr -= sizeof(struct cbmem_root_pointer);
|
|
|
|
|
|
|
|
max_entries = (pointer_addr - (root_addr + sizeof(*root))) /
|
|
|
|
sizeof(struct cbmem_entry);
|
|
|
|
|
|
|
|
pointer = (void *)pointer_addr;
|
|
|
|
pointer->magic = CBMEM_POINTER_MAGIC;
|
|
|
|
pointer->root = root_addr;
|
|
|
|
|
|
|
|
root = (void *)root_addr;
|
|
|
|
root->max_entries = max_entries;
|
|
|
|
root->num_entries = 0;
|
|
|
|
root->locked = 0;
|
|
|
|
root->size = pointer_addr - root_addr +
|
|
|
|
sizeof(struct cbmem_root_pointer);
|
|
|
|
|
|
|
|
/* Add an entry covering the root region. */
|
|
|
|
cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size);
|
|
|
|
|
|
|
|
printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n",
|
|
|
|
root, root->max_entries);
|
|
|
|
|
2014-12-31 19:55:19 +01:00
|
|
|
/* Complete migration to CBMEM. */
|
2014-12-18 17:30:29 +01:00
|
|
|
cbmem_run_init_hooks();
|
2013-03-13 18:41:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int cbmem_fail_recovery(void)
|
|
|
|
{
|
|
|
|
cbmem_initialize_empty();
|
2014-01-03 14:15:22 +01:00
|
|
|
cbmem_fail_resume();
|
2013-03-13 18:41:44 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int validate_entries(struct cbmem_root *root)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
2014-02-24 19:00:26 +01:00
|
|
|
uintptr_t current_end;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
current_end = get_top_aligned();
|
2013-03-13 18:41:44 +01:00
|
|
|
|
|
|
|
printk(BIOS_DEBUG, "CBMEM: recovering %d/%d entries from root @ %p\n",
|
|
|
|
root->num_entries, root->max_entries, root);
|
|
|
|
|
|
|
|
/* Check that all regions are properly aligned and are just below
|
|
|
|
* the previous entry */
|
|
|
|
for (i = 0; i < root->num_entries; i++) {
|
|
|
|
struct cbmem_entry *entry = &root->entries[i];
|
|
|
|
|
|
|
|
if (entry->magic != CBMEM_ENTRY_MAGIC)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (entry->start & (DYN_CBMEM_ALIGN_SIZE - 1))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (entry->start + entry->size != current_end)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
current_end = entry->start;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cbmem_initialize(void)
|
|
|
|
{
|
|
|
|
struct cbmem_root *root;
|
2014-12-21 07:55:47 +01:00
|
|
|
uintptr_t top_according_to_root;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
|
|
|
root = get_root();
|
|
|
|
|
|
|
|
/* No recovery possible since root couldn't be recovered. */
|
|
|
|
if (root == NULL)
|
|
|
|
return cbmem_fail_recovery();
|
|
|
|
|
|
|
|
/* Sanity check the root. */
|
2014-12-21 07:55:47 +01:00
|
|
|
top_according_to_root = (root->size + (uintptr_t)root);
|
2013-03-13 18:41:44 +01:00
|
|
|
if (get_top_aligned() != top_according_to_root)
|
|
|
|
return cbmem_fail_recovery();
|
|
|
|
|
|
|
|
if (root->num_entries > root->max_entries)
|
|
|
|
return cbmem_fail_recovery();
|
|
|
|
|
|
|
|
if ((root->max_entries * sizeof(struct cbmem_entry)) >
|
|
|
|
(root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root)))
|
|
|
|
return cbmem_fail_recovery();
|
|
|
|
|
|
|
|
/* Validate current entries. */
|
|
|
|
if (validate_entries(root))
|
|
|
|
return cbmem_fail_recovery();
|
|
|
|
|
|
|
|
#if defined(__PRE_RAM__)
|
|
|
|
/* Lock the root in the romstage on a recovery. The assumption is that
|
|
|
|
* recovery is called during romstage on the S3 resume path. */
|
|
|
|
root->locked = 1;
|
|
|
|
#endif
|
|
|
|
|
2014-12-31 19:55:19 +01:00
|
|
|
/* Complete migration to CBMEM. */
|
2014-12-18 17:30:29 +01:00
|
|
|
cbmem_run_init_hooks();
|
2013-03-13 18:41:44 +01:00
|
|
|
|
|
|
|
/* Recovery successful. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-06 16:20:31 +01:00
|
|
|
int cbmem_recovery(int is_wakeup)
|
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
if (!is_wakeup)
|
|
|
|
cbmem_initialize_empty();
|
|
|
|
else
|
|
|
|
rv = cbmem_initialize();
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
static uintptr_t cbmem_base(void)
|
2013-03-13 18:41:44 +01:00
|
|
|
{
|
|
|
|
struct cbmem_root *root;
|
2014-10-16 12:58:09 +02:00
|
|
|
uintptr_t low_addr;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
|
|
|
root = get_root();
|
|
|
|
|
|
|
|
if (root == NULL)
|
2014-12-21 07:55:47 +01:00
|
|
|
return 0;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
2014-10-16 12:58:09 +02:00
|
|
|
low_addr = (uintptr_t)root;
|
|
|
|
/* a low address is low. */
|
|
|
|
low_addr &= 0xffffffff;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
|
|
|
/* Assume the lowest address is the last one added. */
|
|
|
|
if (root->num_entries > 0) {
|
|
|
|
low_addr = root->entries[root->num_entries - 1].start;
|
|
|
|
}
|
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
return low_addr;
|
2013-03-13 18:41:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
|
|
|
|
{
|
|
|
|
struct cbmem_root *root;
|
|
|
|
const struct cbmem_entry *entry;
|
2014-12-21 07:55:47 +01:00
|
|
|
uintptr_t base;
|
2013-03-13 18:41:44 +01:00
|
|
|
u32 size;
|
|
|
|
u32 aligned_size;
|
|
|
|
|
|
|
|
entry = cbmem_entry_find(id);
|
|
|
|
|
|
|
|
if (entry != NULL)
|
|
|
|
return entry;
|
|
|
|
|
|
|
|
/* Only handle sizes <= UINT_MAX internally. */
|
|
|
|
if (size64 > (u64)UINT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
size = size64;
|
|
|
|
|
|
|
|
root = get_root();
|
|
|
|
|
|
|
|
if (root == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Nothing can be added once it is locked down. */
|
|
|
|
if (root->locked)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (root->max_entries == root->num_entries)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
aligned_size = ALIGN(size, DYN_CBMEM_ALIGN_SIZE);
|
2014-12-21 07:55:47 +01:00
|
|
|
base = cbmem_base();
|
2013-03-13 18:41:44 +01:00
|
|
|
base -= aligned_size;
|
|
|
|
|
|
|
|
return cbmem_entry_append(root, id, base, aligned_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *cbmem_add(u32 id, u64 size)
|
|
|
|
{
|
|
|
|
const struct cbmem_entry *entry;
|
|
|
|
|
|
|
|
entry = cbmem_entry_add(id, size);
|
|
|
|
|
|
|
|
if (entry == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return cbmem_entry_start(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Retrieve a region provided a given id. */
|
|
|
|
const struct cbmem_entry *cbmem_entry_find(u32 id)
|
|
|
|
{
|
|
|
|
struct cbmem_root *root;
|
|
|
|
const struct cbmem_entry *entry;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
root = get_root();
|
|
|
|
|
|
|
|
if (root == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
entry = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < root->num_entries; i++) {
|
|
|
|
if (root->entries[i].id == id) {
|
|
|
|
entry = &root->entries[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *cbmem_find(u32 id)
|
|
|
|
{
|
|
|
|
const struct cbmem_entry *entry;
|
|
|
|
|
|
|
|
entry = cbmem_entry_find(id);
|
|
|
|
|
|
|
|
if (entry == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return cbmem_entry_start(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
|
|
|
|
* cannot be removed unless it was the last one added. */
|
|
|
|
int cbmem_entry_remove(const struct cbmem_entry *entry)
|
|
|
|
{
|
|
|
|
unsigned long entry_num;
|
|
|
|
struct cbmem_root *root;
|
|
|
|
|
|
|
|
root = get_root();
|
|
|
|
|
|
|
|
if (root == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (root->num_entries == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Nothing can be removed. */
|
|
|
|
if (root->locked)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
entry_num = entry - &root->entries[0];
|
|
|
|
|
|
|
|
/* If the entry is the last one in the root it can be removed. */
|
|
|
|
if (entry_num == (root->num_entries - 1)) {
|
|
|
|
root->num_entries--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 cbmem_entry_size(const struct cbmem_entry *entry)
|
|
|
|
{
|
|
|
|
return entry->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *cbmem_entry_start(const struct cbmem_entry *entry)
|
|
|
|
{
|
2014-10-16 12:58:09 +02:00
|
|
|
uintptr_t addr = entry->start;
|
|
|
|
return (void *)addr;
|
2013-03-13 18:41:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if !defined(__PRE_RAM__)
|
2014-12-19 07:20:45 +01:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
|
2013-03-13 18:41:44 +01:00
|
|
|
/* selected cbmem can be initialized early in ramstage. Additionally, that
|
|
|
|
* means cbmem console can be reinitialized early as well. The post_device
|
|
|
|
* function is empty since cbmem was initialized early in ramstage. */
|
2013-04-24 23:39:08 +02:00
|
|
|
static void init_cbmem_pre_device(void *unused)
|
2013-03-13 18:41:44 +01:00
|
|
|
{
|
|
|
|
cbmem_initialize();
|
|
|
|
}
|
|
|
|
|
2015-03-16 23:30:09 +01:00
|
|
|
BOOT_STATE_INIT_ENTRY(BS_PRE_DEVICE, BS_ON_ENTRY, init_cbmem_pre_device, NULL);
|
2014-12-19 07:20:45 +01:00
|
|
|
#else
|
|
|
|
|
|
|
|
static void init_cbmem_post_device(void *unused)
|
|
|
|
{
|
|
|
|
if (acpi_is_wakeup())
|
|
|
|
cbmem_initialize();
|
|
|
|
else
|
|
|
|
cbmem_initialize_empty();
|
|
|
|
}
|
|
|
|
|
2015-03-16 23:30:09 +01:00
|
|
|
BOOT_STATE_INIT_ENTRY(BS_POST_DEVICE, BS_ON_ENTRY,
|
|
|
|
init_cbmem_post_device, NULL);
|
2014-12-19 07:20:45 +01:00
|
|
|
#endif
|
|
|
|
|
2014-02-19 04:55:02 +01:00
|
|
|
void cbmem_add_bootmem(void)
|
2013-03-13 18:41:44 +01:00
|
|
|
{
|
2014-12-21 07:55:47 +01:00
|
|
|
uintptr_t base;
|
|
|
|
uintptr_t top;
|
2013-03-13 18:41:44 +01:00
|
|
|
|
2014-12-21 07:55:47 +01:00
|
|
|
base = cbmem_base();
|
|
|
|
top = get_top_aligned();
|
2014-02-19 04:55:02 +01:00
|
|
|
bootmem_add_range(base, top - base, LB_MEM_TABLE);
|
2013-03-13 18:41:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void cbmem_list(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct cbmem_root *root;
|
|
|
|
|
|
|
|
root = get_root();
|
|
|
|
|
|
|
|
if (root == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < root->num_entries; i++) {
|
|
|
|
struct cbmem_entry *entry;
|
|
|
|
|
|
|
|
entry = &root->entries[i];
|
|
|
|
|
|
|
|
cbmem_print_entry(i, entry->id, entry->start, entry->size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* __PRE_RAM__ */
|