cpu/x86: add limited runtime identity page mapping
When employing PAGING_IN_CACHE_AS_RAM more areas need to be mapped in at runtime. Therefore, paging_identity_map_addr() is added to support adding identity mappings. Because there are a fixed amount of pages in cache-as-ram paging only the existing paging structures can be used. As such that's a limitation on what regions and length one can map. Using util/x86/x86_page_tables.go to generate page tables will always populate all the page directory pages. Therefore, 2MiB mappings are easy to map in. BUG=b:72728953 Change-Id: Ibe33aa12972ff678d2e9b80874529380b4ce9fd7 Signed-off-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: https://review.coreboot.org/25718 Reviewed-by: Justin TerAvest <teravest@chromium.org> Reviewed-by: Furquan Shaikh <furquan@google.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
parent
d127be102b
commit
696c642afa
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include <cbfs.h>
|
||||
#include <compiler.h>
|
||||
#include <commonlib/helpers.h>
|
||||
#include <console/console.h>
|
||||
#include <cpu/cpu.h>
|
||||
#include <arch/cpu.h>
|
||||
|
@ -25,6 +26,45 @@
|
|||
#include <string.h>
|
||||
#include <symbols.h>
|
||||
|
||||
#define PDPTE_PRES (1ULL << 0)
|
||||
#define PDPTE_ADDR_MASK (~((1ULL << 12) - 1))
|
||||
|
||||
#define PDE_PRES (1ULL << 0)
|
||||
#define PDE_RW (1ULL << 1)
|
||||
#define PDE_US (1ULL << 2)
|
||||
#define PDE_PWT (1ULL << 3)
|
||||
#define PDE_PCD (1ULL << 4)
|
||||
#define PDE_A (1ULL << 5)
|
||||
#define PDE_D (1ULL << 6) // only valid with PS=1
|
||||
#define PDE_PS (1ULL << 7)
|
||||
#define PDE_G (1ULL << 8) // only valid with PS=1
|
||||
#define PDE_PAT (1ULL << 12) // only valid with PS=1
|
||||
#define PDE_XD (1ULL << 63)
|
||||
#define PDE_ADDR_MASK (~((1ULL << 12) - 1))
|
||||
|
||||
#define PTE_PRES (1ULL << 0)
|
||||
#define PTE_RW (1ULL << 1)
|
||||
#define PTE_US (1ULL << 2)
|
||||
#define PTE_PWT (1ULL << 3)
|
||||
#define PTE_PCD (1ULL << 4)
|
||||
#define PTE_A (1ULL << 5)
|
||||
#define PTE_D (1ULL << 6)
|
||||
#define PTE_PAT (1ULL << 7)
|
||||
#define PTE_G (1ULL << 8)
|
||||
#define PTE_XD (1ULL << 63)
|
||||
|
||||
#define PDPTE_IDX_SHIFT 30
|
||||
#define PDPTE_IDX_MASK 0x3
|
||||
|
||||
#define PDE_IDX_SHIFT 21
|
||||
#define PDE_IDX_MASK 0x1ff
|
||||
|
||||
#define PTE_IDX_SHIFT 12
|
||||
#define PTE_IDX_MASK 0x1ff
|
||||
|
||||
static const size_t s2MiB = 2 * MiB;
|
||||
static const size_t s4KiB = 4 * KiB;
|
||||
|
||||
void paging_enable_pae_cr3(uintptr_t cr3)
|
||||
{
|
||||
/* Load the page table address */
|
||||
|
@ -216,3 +256,177 @@ int paging_enable_for_car(const char *pdpt_name, const char *pt_name)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *get_pdpt_addr(void)
|
||||
{
|
||||
if (ENV_CACHE_AS_RAM)
|
||||
return _pdpt;
|
||||
return (void *)(uintptr_t)read_cr3();
|
||||
}
|
||||
|
||||
static uint64_t pde_pat_flags(int pat)
|
||||
{
|
||||
switch (pat) {
|
||||
case PAT_UC:
|
||||
return 0 | PDE_PCD | PDE_PWT;
|
||||
case PAT_WC:
|
||||
return 0 | 0 | PDE_PWT;
|
||||
case PAT_WT:
|
||||
return PDE_PAT | PDE_PCD | PDE_PWT;
|
||||
case PAT_WP:
|
||||
return PDE_PAT | 0 | PDE_PWT;
|
||||
case PAT_WB:
|
||||
return 0 | 0 | 0;
|
||||
case PAT_UC_MINUS:
|
||||
return 0 | PDE_PCD | 0;
|
||||
default:
|
||||
printk(BIOS_ERR, "PDE PAT defaulting to WB: %x\n", pat);
|
||||
return pde_pat_flags(PAT_WB);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t pde_page_flags(int pat)
|
||||
{
|
||||
uint64_t flags = PDE_PS | PDE_PRES | PDE_RW | PDE_A | PDE_D;
|
||||
|
||||
return flags | pde_pat_flags(pat);
|
||||
}
|
||||
|
||||
static uint64_t pte_pat_flags(int pat)
|
||||
{
|
||||
switch (pat) {
|
||||
case PAT_UC:
|
||||
return 0 | PTE_PCD | PTE_PWT;
|
||||
case PAT_WC:
|
||||
return 0 | 0 | PTE_PWT;
|
||||
case PAT_WT:
|
||||
return PTE_PAT | PTE_PCD | PTE_PWT;
|
||||
case PAT_WP:
|
||||
return PTE_PAT | 0 | PTE_PWT;
|
||||
case PAT_WB:
|
||||
return 0 | 0 | 0;
|
||||
case PAT_UC_MINUS:
|
||||
return 0 | PTE_PCD | 0;
|
||||
default:
|
||||
printk(BIOS_ERR, "PTE PAT defaulting to WB: %x\n", pat);
|
||||
return pte_pat_flags(PAT_WB);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t pte_page_flags(int pat)
|
||||
{
|
||||
uint64_t flags = PTE_PRES | PTE_RW | PTE_A | PTE_D;
|
||||
return flags | pte_pat_flags(pat);
|
||||
}
|
||||
|
||||
/* Identity map an address. This function does not handle splitting or adding
|
||||
* new pages to the page tables. It's assumed all the page tables are already
|
||||
* seeded with the correct amount and topology. */
|
||||
static int identity_map_one_page(uintptr_t base, size_t size, int pat,
|
||||
int commit)
|
||||
{
|
||||
uint64_t (*pdpt)[4];
|
||||
uint64_t pdpte;
|
||||
uint64_t (*pd)[512];
|
||||
uint64_t pde;
|
||||
|
||||
pdpt = get_pdpt_addr();
|
||||
|
||||
pdpte = (*pdpt)[(base >> PDPTE_IDX_SHIFT) & PDPTE_IDX_MASK];
|
||||
|
||||
/* No page table page allocation. */
|
||||
if (!(pdpte & PDPTE_PRES))
|
||||
return -1;
|
||||
|
||||
pd = (void *)(uintptr_t)(pdpte & PDPTE_ADDR_MASK);
|
||||
|
||||
/* Map in a 2MiB page. */
|
||||
if (size == s2MiB) {
|
||||
if (!commit)
|
||||
return 0;
|
||||
pde = base;
|
||||
pde |= pde_page_flags(pat);
|
||||
(*pd)[(base >> PDE_IDX_SHIFT) & PDE_IDX_MASK] = pde;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (size == s4KiB) {
|
||||
uint64_t (*pt)[512];
|
||||
uint64_t pte;
|
||||
|
||||
pde = (*pd)[(base >> PDE_IDX_SHIFT) & PDE_IDX_MASK];
|
||||
|
||||
/* No page table page allocation. */
|
||||
if (!(pde & PDE_PRES)) {
|
||||
printk(BIOS_ERR, "Cannot allocate page table for pde %p\n",
|
||||
(void *)base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* No splitting pages */
|
||||
if (pde & PDE_PS) {
|
||||
printk(BIOS_ERR, "Cannot split pde %p\n", (void *)base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!commit)
|
||||
return 0;
|
||||
|
||||
pt = (void *)(uintptr_t)(pde & PDE_ADDR_MASK);
|
||||
pte = base;
|
||||
pte |= pte_page_flags(pat);
|
||||
(*pt)[(base >> PTE_IDX_SHIFT) & PTE_IDX_MASK] = pte;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int _paging_identity_map_addr(uintptr_t base, size_t size, int pat,
|
||||
int commit)
|
||||
{
|
||||
while (size != 0) {
|
||||
size_t map_size;
|
||||
|
||||
map_size = IS_ALIGNED(base, s2MiB) ? s2MiB : s4KiB;
|
||||
map_size = MIN(size, map_size);
|
||||
|
||||
if (identity_map_one_page(base, map_size, pat, commit) < 0)
|
||||
return -1;
|
||||
|
||||
base += map_size;
|
||||
size -= map_size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int paging_is_enabled(void)
|
||||
{
|
||||
return !!(read_cr0() & CR0_PG);
|
||||
}
|
||||
|
||||
int paging_identity_map_addr(uintptr_t base, size_t size, int pat)
|
||||
{
|
||||
if (!paging_is_enabled()) {
|
||||
printk(BIOS_ERR, "Paging is not enabled.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED(base, s2MiB) && !IS_ALIGNED(base, s4KiB)) {
|
||||
printk(BIOS_ERR, "base %p is not aligned.\n", (void *)base);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!IS_ALIGNED(size, s2MiB) && !IS_ALIGNED(size, s4KiB)) {
|
||||
printk(BIOS_ERR, "size %zx is not aligned.\n", size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* First try without committing. If success commit. */
|
||||
if (_paging_identity_map_addr(base, size, pat, 0))
|
||||
return -1;
|
||||
|
||||
return _paging_identity_map_addr(base, size, pat, 1);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,11 @@ void paging_set_default_pat(void);
|
|||
* failure. */
|
||||
int paging_enable_for_car(const char *pdpt_name, const char *pt_name);
|
||||
|
||||
/* Identity map the region indicated by 'base' and 'size'. Both 'base' and
|
||||
* 'size' need to be 4KiB or 2 MiB aligned. 'pat' should be one of the
|
||||
* PAT defines above. 0 is returned on success, < 0 on failure. */
|
||||
int paging_identity_map_addr(uintptr_t base, size_t size, int pat);
|
||||
|
||||
#define MAPPING_ERROR ((void *)0xffffffffUL)
|
||||
void *map_2M_page(unsigned long page);
|
||||
|
||||
|
|
Loading…
Reference in New Issue