mips: Allow memory to be identity mapped in the TLB
Introduce identity_map() function. It takes a memory range and identity maps it entirely in the TLB table, if possible. As a result the virtual and physical address ranges are the same. The function attempts to use as large of a page size as possible for each region in order to conserve TLB entries. BUG=chrome-os-partner:36258 BRANCH=none TEST=Build and boot on Pistachio with the rest of the patches applied. Change-Id: I4d781b04699e069a71c49a0c6ca15c7a6b42a468 Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Original-Commit-Id: 234d32edfd201019b7a723316a79c932c62ce87e Original-Change-Id: If3e2392b19555cb6dbae8b5559c1b1e53a313637 Original-Signed-off-by: Andrew Bresticker <abrestic@chromium.org> Original-Signed-off-by: Vadim Bendebury <vbendeb@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/246693 Reviewed-on: http://review.coreboot.org/9815 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
parent
df4081e72c
commit
3537e956e1
|
@ -37,6 +37,7 @@ bootblock-y += boot.c
|
|||
bootblock-y += bootblock.S
|
||||
bootblock-y += bootblock_simple.c
|
||||
bootblock-y += cache.c
|
||||
bootblock-y += mmu.c
|
||||
bootblock-y += stages.c
|
||||
bootblock-y += ../../lib/memcpy.c
|
||||
bootblock-y += ../../lib/memmove.c
|
||||
|
@ -63,6 +64,7 @@ ifeq ($(CONFIG_ARCH_ROMSTAGE_MIPS),y)
|
|||
romstage-y += boot.c
|
||||
romstage-$(CONFIG_EARLY_CONSOLE) += early_console.c
|
||||
romstage-y += cache.c
|
||||
romstage-y += mmu.c
|
||||
romstage-y += stages.c
|
||||
romstage-y += ../../lib/memcpy.c
|
||||
romstage-y += ../../lib/memmove.c
|
||||
|
@ -83,6 +85,7 @@ ifeq ($(CONFIG_ARCH_RAMSTAGE_MIPS),y)
|
|||
ramstage-y += ashldi3.c
|
||||
ramstage-y += boot.c
|
||||
ramstage-y += cache.c
|
||||
ramstage-y += mmu.c
|
||||
ramstage-y += stages.c
|
||||
ramstage-y += tables.c
|
||||
ramstage-y += ../../lib/memcpy.c
|
||||
|
|
|
@ -79,13 +79,86 @@ do { \
|
|||
} while (0)
|
||||
|
||||
/* Shortcuts to access various internal registers, keep adding as needed. */
|
||||
#define read_c0_index() __read_32bit_c0_register($0, 0)
|
||||
#define write_c0_index(val) __write_32bit_c0_register($0, 0, (val))
|
||||
|
||||
#define read_c0_entrylo0() __read_32bit_c0_register($2, 0)
|
||||
#define write_c0_entrylo0(val) __write_32bit_c0_register($2, 0, (val))
|
||||
|
||||
#define read_c0_entrylo1() __read_32bit_c0_register($3, 0)
|
||||
#define write_c0_entrylo1(val) __write_32bit_c0_register($3, 0, (val))
|
||||
|
||||
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
|
||||
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, (val))
|
||||
|
||||
#define read_c0_wired() __read_32bit_c0_register($6, 0)
|
||||
#define write_c0_wired(val) __write_32bit_c0_register($6, 0, (val))
|
||||
|
||||
#define read_c0_count() __read_32bit_c0_register($9, 0)
|
||||
#define write_c0_count(val) __write_32bit_c0_register($9, 0, (val))
|
||||
|
||||
#define read_c0_entryhi() __read_32bit_c0_register($10, 0)
|
||||
#define write_c0_entryhi(val) __write_32bit_c0_register($10, 0, (val))
|
||||
|
||||
#define read_c0_cause() __read_32bit_c0_register($13, 0)
|
||||
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, (val))
|
||||
|
||||
#define read_c0_config1() __read_32bit_c0_register($16, 1)
|
||||
#define write_c0_config1(val) __write_32bit_c0_register($16, 1, (val))
|
||||
|
||||
#define C0_ENTRYLO_PFN_SHIFT 6
|
||||
#define C0_ENTRYLO_WB (0x3 << 3) /* Cacheable, write-back, non-coherent */
|
||||
#define C0_ENTRYLO_D (0x1 << 2) /* Writeable */
|
||||
#define C0_ENTRYLO_V (0x1 << 1) /* Valid */
|
||||
#define C0_ENTRYLO_G (0x1 << 0) /* Global */
|
||||
|
||||
#define C0_PAGEMASK_SHIFT 13
|
||||
#define C0_PAGEMASK_MASK 0xffff
|
||||
|
||||
#define C0_WIRED_MASK 0x3f
|
||||
|
||||
#define C0_CAUSE_DC (1 << 27)
|
||||
|
||||
#define C0_CONFIG1_MMUSIZE_SHIFT 25
|
||||
#define C0_CONFIG1_MMUSIZE_MASK 0x3f
|
||||
|
||||
/* Hazard handling */
|
||||
static inline void __nop(void)
|
||||
{
|
||||
__asm__ __volatile__("nop");
|
||||
}
|
||||
|
||||
static inline void __ssnop(void)
|
||||
{
|
||||
__asm__ __volatile__("sll\t$0, $0, 1");
|
||||
}
|
||||
|
||||
#define mtc0_tlbw_hazard() \
|
||||
do { \
|
||||
__nop(); \
|
||||
__nop(); \
|
||||
} while (0)
|
||||
|
||||
#define tlbw_use_hazard() \
|
||||
do { \
|
||||
__nop(); \
|
||||
__nop(); \
|
||||
__nop(); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_probe_hazard() \
|
||||
do { \
|
||||
__nop(); \
|
||||
__nop(); \
|
||||
__nop(); \
|
||||
} while (0)
|
||||
|
||||
#define back_to_back_c0_hazard() \
|
||||
do { \
|
||||
__ssnop(); \
|
||||
__ssnop(); \
|
||||
__ssnop(); \
|
||||
} while (0)
|
||||
/**************************************************************************/
|
||||
|
||||
#endif /* __MIPS_ARCH_CPU_H */
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2015 Google, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef __MIPS_ARCH_MMU_H
|
||||
#define __MIPS_ARCH_MMU_H
|
||||
|
||||
#include <arch/cpu.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
static inline void tlb_write_indexed(void)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
".set noreorder\n\t"
|
||||
"tlbwi\n\t"
|
||||
".set reorder");
|
||||
}
|
||||
|
||||
static inline uint32_t get_max_pagesize(void)
|
||||
{
|
||||
uint32_t max_pgsize;
|
||||
|
||||
write_c0_pagemask(C0_PAGEMASK_MASK << C0_PAGEMASK_SHIFT);
|
||||
back_to_back_c0_hazard();
|
||||
max_pgsize = (((read_c0_pagemask() >> C0_PAGEMASK_SHIFT) &
|
||||
C0_PAGEMASK_MASK) + 1) * 4 * KiB;
|
||||
|
||||
return max_pgsize;
|
||||
}
|
||||
|
||||
static inline uint32_t get_tlb_size(void)
|
||||
{
|
||||
uint32_t tlbsize;
|
||||
|
||||
tlbsize = ((read_c0_config1() >> C0_CONFIG1_MMUSIZE_SHIFT) &
|
||||
C0_CONFIG1_MMUSIZE_MASK) + 1;
|
||||
|
||||
return tlbsize;
|
||||
}
|
||||
|
||||
int identity_map(uint32_t start, size_t len);
|
||||
|
||||
#endif /* __MIPS_ARCH_MMU_H */
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2015 Google, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; version 2 of
|
||||
* the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
|
||||
* MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <arch/cpu.h>
|
||||
#include <arch/mmu.h>
|
||||
#include <console/console.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#define MIN_PAGE_SIZE (4 * KiB)
|
||||
|
||||
static int add_wired_tlb_entry(uint32_t entrylo0, uint32_t entrylo1,
|
||||
uint32_t entryhi, uint32_t pgsize)
|
||||
{
|
||||
uint32_t tlbindex;
|
||||
|
||||
tlbindex = read_c0_wired();
|
||||
if (tlbindex >= get_tlb_size() || tlbindex >= C0_WIRED_MASK) {
|
||||
printk(BIOS_ERR, "Ran out of TLB entries\n");
|
||||
return -1;
|
||||
}
|
||||
write_c0_wired(tlbindex + 1);
|
||||
write_c0_index(tlbindex);
|
||||
write_c0_pagemask(((pgsize / MIN_PAGE_SIZE) - 1) << C0_PAGEMASK_SHIFT);
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t pick_pagesize(uint32_t start, uint32_t len)
|
||||
{
|
||||
uint32_t pgsize, max_pgsize;
|
||||
|
||||
max_pgsize = get_max_pagesize();
|
||||
for (pgsize = max_pgsize;
|
||||
pgsize >= MIN_PAGE_SIZE;
|
||||
pgsize = pgsize / 4) {
|
||||
/*
|
||||
* Each TLB entry maps a pair of virtual pages. To avoid
|
||||
* aliasing, pick the largest page size that is at most
|
||||
* half the size of the region we're trying to map.
|
||||
*/
|
||||
if (IS_ALIGNED(start, 2 * pgsize) && (2 * pgsize <= len))
|
||||
break;
|
||||
}
|
||||
|
||||
return pgsize;
|
||||
}
|
||||
|
||||
/*
|
||||
* Identity map the memory from [start,start+len] in the TLB using the
|
||||
* largest suitable page size so as to conserve TLB entries.
|
||||
*/
|
||||
int identity_map(uint32_t start, size_t len)
|
||||
{
|
||||
uint32_t pgsize, pfn, entryhi, entrylo0, entrylo1;
|
||||
|
||||
while (len > 0) {
|
||||
pgsize = pick_pagesize(start, len);
|
||||
entryhi = start;
|
||||
pfn = start >> 12;
|
||||
entrylo0 = (pfn << C0_ENTRYLO_PFN_SHIFT) | C0_ENTRYLO_WB |
|
||||
C0_ENTRYLO_D | C0_ENTRYLO_V | C0_ENTRYLO_G;
|
||||
start += pgsize;
|
||||
len -= MIN(len, pgsize);
|
||||
if (len >= pgsize) {
|
||||
pfn = start >> 12;
|
||||
entrylo1 = (pfn << C0_ENTRYLO_PFN_SHIFT) |
|
||||
C0_ENTRYLO_WB | C0_ENTRYLO_D | C0_ENTRYLO_V |
|
||||
C0_ENTRYLO_G;
|
||||
start += pgsize;
|
||||
len -= MIN(len, pgsize);
|
||||
} else {
|
||||
entrylo1 = 0;
|
||||
}
|
||||
if (add_wired_tlb_entry(entrylo0, entrylo1, entryhi, pgsize))
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue