coreboot-kgpe-d16/src/cpu/x86/mtrr/mtrr.c
Aaron Durbin 9b027fe5b0 mtrr: honor IORESOURCE_WRCOMB
All resources that set the IORESOURCE_WRCOMB attribute which are
also marked as IORESOURCE_PREFETCH will have a MTRR set up that
is of the write-combining cacheable type. The only resources on
x86 that can be set to write-combining are prefetchable ones.

Change-Id: Iba7452cff3677e07d7e263b79982a49c93be9c54
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: http://review.coreboot.org/2892
Tested-by: build bot (Jenkins)
Reviewed-by: Ronald G. Minnich <rminnich@gmail.com>
2013-03-29 19:57:31 +01:00

605 lines
16 KiB
C

/*
* mtrr.c: setting MTRR to decent values for cache initialization on P6
*
* Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
*
* Copyright 2000 Silicon Integrated System Corporation
* Copyright 2013 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* Reference: Intel Architecture Software Developer's Manual, Volume 3: System Programming
*/
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <console/console.h>
#include <device/device.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/cache.h>
#include <cpu/x86/lapic.h>
#include <arch/cpu.h>
#include <arch/acpi.h>
#include <memrange.h>
#if CONFIG_X86_AMD_FIXED_MTRRS
#include <cpu/amd/mtrr.h>
#define MTRR_FIXED_WRBACK_BITS (MTRR_READ_MEM | MTRR_WRITE_MEM)
#else
#define MTRR_FIXED_WRBACK_BITS 0
#endif
/* 2 MTRRS are reserved for the operating system */
#define BIOS_MTRRS 6
#define OS_MTRRS 2
#define MTRRS (BIOS_MTRRS + OS_MTRRS)
static int total_mtrrs = MTRRS;
static int bios_mtrrs = BIOS_MTRRS;
static void detect_var_mtrrs(void)
{
msr_t msr;
msr = rdmsr(MTRRcap_MSR);
total_mtrrs = msr.lo & 0xff;
bios_mtrrs = total_mtrrs - OS_MTRRS;
}
void enable_fixed_mtrr(void)
{
msr_t msr;
msr = rdmsr(MTRRdefType_MSR);
msr.lo |= MTRRdefTypeEn | MTRRdefTypeFixEn;
wrmsr(MTRRdefType_MSR, msr);
}
static void enable_var_mtrr(unsigned char deftype)
{
msr_t msr;
msr = rdmsr(MTRRdefType_MSR);
msr.lo &= ~0xff;
msr.lo |= MTRRdefTypeEn | deftype;
wrmsr(MTRRdefType_MSR, msr);
}
/* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
static inline unsigned int fms(unsigned int x)
{
int r;
__asm__("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $0,%0\n"
"1:" : "=r" (r) : "g" (x));
return r;
}
/* fls: find least sigificant bit set */
static inline unsigned int fls(unsigned int x)
{
int r;
__asm__("bsfl %1,%0\n\t"
"jnz 1f\n\t"
"movl $32,%0\n"
"1:" : "=r" (r) : "g" (x));
return r;
}
#define MTRR_VERBOSE_LEVEL BIOS_NEVER
/* MTRRs are at a 4KiB granularity. Therefore all address calculations can
* be done with 32-bit numbers. This allows for the MTRR code to handle
* up to 2^44 bytes (16 TiB) of address space. */
#define RANGE_SHIFT 12
#define ADDR_SHIFT_TO_RANGE_SHIFT(x) \
(((x) > RANGE_SHIFT) ? ((x) - RANGE_SHIFT) : RANGE_SHIFT)
#define PHYS_TO_RANGE_ADDR(x) ((x) >> RANGE_SHIFT)
#define RANGE_TO_PHYS_ADDR(x) (((resource_t)(x)) << RANGE_SHIFT)
#define NUM_FIXED_MTRRS (NUM_FIXED_RANGES / RANGES_PER_FIXED_MTRR)
/* The minimum alignment while handling variable MTRR ranges is 64MiB. */
#define MTRR_MIN_ALIGN PHYS_TO_RANGE_ADDR(64 << 20)
/* Helpful constants. */
#define RANGE_1MB PHYS_TO_RANGE_ADDR(1 << 20)
#define RANGE_4GB (1 << (ADDR_SHIFT_TO_RANGE_SHIFT(32)))
static inline uint32_t range_entry_base_mtrr_addr(struct range_entry *r)
{
return PHYS_TO_RANGE_ADDR(range_entry_base(r));
}
static inline uint32_t range_entry_end_mtrr_addr(struct range_entry *r)
{
return PHYS_TO_RANGE_ADDR(range_entry_end(r));
}
static struct memranges *get_physical_address_space(void)
{
static struct memranges *addr_space;
static struct memranges addr_space_storage;
/* In order to handle some chipsets not being able to pre-determine
* uncacheable ranges, such as graphics memory, at resource inseration
* time remove unacheable regions from the cacheable ones. */
if (addr_space == NULL) {
struct range_entry *r;
unsigned long mask;
unsigned long match;
addr_space = &addr_space_storage;
mask = IORESOURCE_CACHEABLE;
/* Collect cacheable and uncacheable address ranges. The
* uncacheable regions take precedence over the cacheable
* regions. */
memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
memranges_add_resources(addr_space, mask, 0,
MTRR_TYPE_UNCACHEABLE);
/* Handle any write combining resources. Only prefetchable
* resources with the IORESOURCE_WRCOMB flag are appropriate
* for this MTRR type. */
match = IORESOURCE_PREFETCH | IORESOURCE_WRCOMB;
mask |= match;
memranges_add_resources(addr_space, mask, match,
MTRR_TYPE_WRCOMB);
/* The address space below 4GiB is special. It needs to be
* covered entirly by range entries so that MTRR calculations
* can be properly done for the full 32-bit address space.
* Therefore, ensure holes are filled up to 4GiB as
* uncacheable */
memranges_fill_holes_up_to(addr_space,
RANGE_TO_PHYS_ADDR(RANGE_4GB),
MTRR_TYPE_UNCACHEABLE);
printk(BIOS_DEBUG, "MTRR: Physical address space:\n");
memranges_each_entry(r, addr_space)
printk(BIOS_DEBUG,
"0x%016llx - 0x%016llx size 0x%08llx type %ld\n",
range_entry_base(r), range_entry_end(r),
range_entry_size(r), range_entry_tag(r));
}
return addr_space;
}
/* Fixed MTRR descriptor. This structure defines the step size and begin
* and end (exclusive) address covered by a set of fixe MTRR MSRs.
* It also describes the offset in byte intervals to store the calculated MTRR
* type in an array. */
struct fixed_mtrr_desc {
uint32_t begin;
uint32_t end;
uint32_t step;
int range_index;
int msr_index_base;
};
/* Shared MTRR calculations. Can be reused by APs. */
static uint8_t fixed_mtrr_types[NUM_FIXED_RANGES];
/* Fixed MTRR descriptors. */
static const struct fixed_mtrr_desc fixed_mtrr_desc[] = {
{ PHYS_TO_RANGE_ADDR(0x000000), PHYS_TO_RANGE_ADDR(0x080000),
PHYS_TO_RANGE_ADDR(64 * 1024), 0, MTRRfix64K_00000_MSR },
{ PHYS_TO_RANGE_ADDR(0x080000), PHYS_TO_RANGE_ADDR(0x0C0000),
PHYS_TO_RANGE_ADDR(16 * 1024), 8, MTRRfix16K_80000_MSR },
{ PHYS_TO_RANGE_ADDR(0x0C0000), PHYS_TO_RANGE_ADDR(0x100000),
PHYS_TO_RANGE_ADDR(4 * 1024), 24, MTRRfix4K_C0000_MSR },
};
static void calc_fixed_mtrrs(void)
{
static int fixed_mtrr_types_initialized;
struct memranges *phys_addr_space;
struct range_entry *r;
const struct fixed_mtrr_desc *desc;
const struct fixed_mtrr_desc *last_desc;
uint32_t begin;
uint32_t end;
int type_index;
if (fixed_mtrr_types_initialized)
return;
phys_addr_space = get_physical_address_space();
/* Set all fixed ranges to uncacheable first. */
memset(&fixed_mtrr_types[0], MTRR_TYPE_UNCACHEABLE, NUM_FIXED_RANGES);
desc = &fixed_mtrr_desc[0];
last_desc = &fixed_mtrr_desc[ARRAY_SIZE(fixed_mtrr_desc) - 1];
type_index = desc->range_index;
memranges_each_entry(r, phys_addr_space) {
begin = range_entry_base_mtrr_addr(r);
end = range_entry_end_mtrr_addr(r);
if (begin >= last_desc->end)
break;
if (end > last_desc->end)
end = last_desc->end;
/* Get to the correct fixed mtrr descriptor. */
while (begin >= desc->end)
desc++;
type_index = desc->range_index;
type_index += (begin - desc->begin) / desc->step;
while (begin != end) {
unsigned char type;
type = range_entry_tag(r);
printk(MTRR_VERBOSE_LEVEL,
"MTRR addr 0x%x-0x%x set to %d type @ %d\n",
begin, begin + desc->step, type, type_index);
if (type == MTRR_TYPE_WRBACK)
type |= MTRR_FIXED_WRBACK_BITS;
fixed_mtrr_types[type_index] = type;
type_index++;
begin += desc->step;
if (begin == desc->end)
desc++;
}
}
fixed_mtrr_types_initialized = 1;
}
static void commit_fixed_mtrrs(void)
{
int i;
int j;
int msr_num;
int type_index;
/* 8 ranges per msr. */
msr_t fixed_msrs[NUM_FIXED_MTRRS];
unsigned long msr_index[NUM_FIXED_MTRRS];
memset(&fixed_msrs, 0, sizeof(fixed_msrs));
disable_cache();
msr_num = 0;
type_index = 0;
for (i = 0; i < ARRAY_SIZE(fixed_mtrr_desc); i++) {
const struct fixed_mtrr_desc *desc;
int num_ranges;
desc = &fixed_mtrr_desc[i];
num_ranges = (desc->end - desc->begin) / desc->step;
for (j = 0; j < num_ranges; j += RANGES_PER_FIXED_MTRR) {
msr_index[msr_num] = desc->msr_index_base +
(j / RANGES_PER_FIXED_MTRR);
fixed_msrs[msr_num].lo |=
fixed_mtrr_types[type_index++] << 0;
fixed_msrs[msr_num].lo |=
fixed_mtrr_types[type_index++] << 8;
fixed_msrs[msr_num].lo |=
fixed_mtrr_types[type_index++] << 16;
fixed_msrs[msr_num].lo |=
fixed_mtrr_types[type_index++] << 24;
fixed_msrs[msr_num].hi |=
fixed_mtrr_types[type_index++] << 0;
fixed_msrs[msr_num].hi |=
fixed_mtrr_types[type_index++] << 8;
fixed_msrs[msr_num].hi |=
fixed_mtrr_types[type_index++] << 16;
fixed_msrs[msr_num].hi |=
fixed_mtrr_types[type_index++] << 24;
msr_num++;
}
}
for (i = 0; i < ARRAY_SIZE(fixed_msrs); i++) {
printk(BIOS_DEBUG, "MTRR: Fixed MSR 0x%lx 0x%08x%08x\n",
msr_index[i], fixed_msrs[i].hi, fixed_msrs[i].lo);
wrmsr(msr_index[i], fixed_msrs[i]);
}
enable_cache();
}
void x86_setup_fixed_mtrrs_no_enable(void)
{
calc_fixed_mtrrs();
commit_fixed_mtrrs();
}
void x86_setup_fixed_mtrrs(void)
{
x86_setup_fixed_mtrrs_no_enable();
printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
enable_fixed_mtrr();
}
struct var_mtrr_state {
struct memranges *addr_space;
int above4gb;
int address_bits;
int commit_mtrrs;
int mtrr_index;
int def_mtrr_type;
};
static void clear_var_mtrr(int index)
{
msr_t msr_val;
msr_val = rdmsr(MTRRphysMask_MSR(index));
msr_val.lo &= ~MTRRphysMaskValid;
wrmsr(MTRRphysMask_MSR(index), msr_val);
}
static void write_var_mtrr(struct var_mtrr_state *var_state,
uint32_t base, uint32_t size, int mtrr_type)
{
msr_t msr_val;
unsigned long msr_index;
resource_t rbase;
resource_t rsize;
resource_t mask;
/* Some variable MTRRs are attempted to be saved for the OS use.
* However, it's more important to try to map the full address space
* properly. */
if (var_state->mtrr_index >= bios_mtrrs)
printk(BIOS_WARNING, "Taking a reserved OS MTRR.\n");
if (var_state->mtrr_index >= total_mtrrs) {
printk(BIOS_ERR, "ERROR: Not enough MTTRs available!\n");
return;
}
rbase = base;
rsize = size;
rbase = RANGE_TO_PHYS_ADDR(rbase);
rsize = RANGE_TO_PHYS_ADDR(rsize);
rsize = -rsize;
mask = (1ULL << var_state->address_bits) - 1;
rsize = rsize & mask;
printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
var_state->mtrr_index, rbase, rsize, mtrr_type);
msr_val.lo = rbase;
msr_val.lo |= mtrr_type;
msr_val.hi = rbase >> 32;
msr_index = MTRRphysBase_MSR(var_state->mtrr_index);
wrmsr(msr_index, msr_val);
msr_val.lo = rsize;
msr_val.lo |= MTRRphysMaskValid;
msr_val.hi = rsize >> 32;
msr_index = MTRRphysMask_MSR(var_state->mtrr_index);
wrmsr(msr_index, msr_val);
}
static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
uint32_t base, uint32_t size, int mtrr_type)
{
while (size != 0) {
uint32_t addr_lsb;
uint32_t size_msb;
uint32_t mtrr_size;
addr_lsb = fls(base);
size_msb = fms(size);
/* All MTRR entries need to have their base aligned to the mask
* size. The maximum size is calculated by a function of the
* min base bit set and maximum size bit set. */
if (addr_lsb > size_msb)
mtrr_size = 1 << size_msb;
else
mtrr_size = 1 << addr_lsb;
if (var_state->commit_mtrrs)
write_var_mtrr(var_state, base, mtrr_size, mtrr_type);
size -= mtrr_size;
base += mtrr_size;
var_state->mtrr_index++;
}
}
static void setup_var_mtrrs_by_state(struct var_mtrr_state *var_state)
{
struct range_entry *r;
/*
* For each range that meets the non-default type process it in the
* following manner:
* +------------------+ c2 = end
* | 0 or more bytes |
* +------------------+ b2 = c1 = ALIGN_DOWN(end)
* | |
* +------------------+ b1 = a2 = ALIGN_UP(begin)
* | 0 or more bytes |
* +------------------+ a1 = begin
*
* Thus, there are 3 sub-ranges to configure variable MTRRs for.
*/
memranges_each_entry(r, var_state->addr_space) {
uint32_t a1, a2, b1, b2, c1, c2;
int mtrr_type = range_entry_tag(r);
/* Skip default type. */
if (var_state->def_mtrr_type == mtrr_type)
continue;
a1 = range_entry_base_mtrr_addr(r);
c2 = range_entry_end_mtrr_addr(r);
/* The end address is under 1MiB. The fixed MTRRs take
* precedence over the variable ones. Therefore this range
* can be ignored. */
if (c2 < RANGE_1MB)
continue;
/* Again, the fixed MTRRs take precedence so the beginning
* of the range can be set to 0 if it starts below 1MiB. */
if (a1 < RANGE_1MB)
a1 = 0;
/* If the range starts above 4GiB the processing is done. */
if (!var_state->above4gb && a1 >= RANGE_4GB)
break;
/* Clip the upper address to 4GiB if addresses above 4GiB
* are not being processed. */
if (!var_state->above4gb && c2 > RANGE_4GB)
c2 = RANGE_4GB;
/* Don't align up or down on the range if it is smaller
* than the minimum granularity. */
if ((c2 - a1) < MTRR_MIN_ALIGN) {
calc_var_mtrr_range(var_state, a1, c2 - a1, mtrr_type);
continue;
}
b1 = a2 = ALIGN_UP(a1, MTRR_MIN_ALIGN);
b2 = c1 = ALIGN_DOWN(c2, MTRR_MIN_ALIGN);
calc_var_mtrr_range(var_state, a1, a2 - a1, mtrr_type);
calc_var_mtrr_range(var_state, b1, b2 - b1, mtrr_type);
calc_var_mtrr_range(var_state, c1, c2 - c1, mtrr_type);
}
}
static int calc_var_mtrrs(struct memranges *addr_space,
int above4gb, int address_bits)
{
int wb_deftype_count;
int uc_deftype_count;
struct var_mtrr_state var_state;
/* The default MTRR cacheability type is determined by calculating
* the number of MTTRs required for each MTTR type as if it was the
* default. */
var_state.addr_space = addr_space;
var_state.above4gb = above4gb;
var_state.address_bits = address_bits;
var_state.commit_mtrrs = 0;
var_state.mtrr_index = 0;
var_state.def_mtrr_type = MTRR_TYPE_WRBACK;
setup_var_mtrrs_by_state(&var_state);
wb_deftype_count = var_state.mtrr_index;
var_state.mtrr_index = 0;
var_state.def_mtrr_type = MTRR_TYPE_UNCACHEABLE;
setup_var_mtrrs_by_state(&var_state);
uc_deftype_count = var_state.mtrr_index;
printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
wb_deftype_count, uc_deftype_count);
if (wb_deftype_count < uc_deftype_count) {
printk(BIOS_DEBUG, "MTRR: WB selected as default type.\n");
return MTRR_TYPE_WRBACK;
}
printk(BIOS_DEBUG, "MTRR: UC selected as default type.\n");
return MTRR_TYPE_UNCACHEABLE;
}
static void commit_var_mtrrs(struct memranges *addr_space, int def_type,
int above4gb, int address_bits)
{
struct var_mtrr_state var_state;
int i;
var_state.addr_space = addr_space;
var_state.above4gb = above4gb;
var_state.address_bits = address_bits;
/* Write the MSRs. */
var_state.commit_mtrrs = 1;
var_state.mtrr_index = 0;
var_state.def_mtrr_type = def_type;
setup_var_mtrrs_by_state(&var_state);
/* Clear all remaining variable MTTRs. */
for (i = var_state.mtrr_index; i < total_mtrrs; i++)
clear_var_mtrr(i);
}
void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb)
{
static int mtrr_default_type = -1;
struct memranges *addr_space;
addr_space = get_physical_address_space();
if (mtrr_default_type == -1) {
if (above4gb == 2)
detect_var_mtrrs();
mtrr_default_type =
calc_var_mtrrs(addr_space, !!above4gb, address_bits);
}
disable_cache();
commit_var_mtrrs(addr_space, mtrr_default_type, !!above4gb,
address_bits);
enable_var_mtrr(mtrr_default_type);
enable_cache();
}
void x86_setup_mtrrs(void)
{
int address_size;
x86_setup_fixed_mtrrs();
address_size = cpu_phys_address_size();
printk(BIOS_DEBUG, "CPU physical address size: %d bits\n", address_size);
x86_setup_var_mtrrs(address_size, 1);
}
int x86_mtrr_check(void)
{
/* Only Pentium Pro and later have MTRR */
msr_t msr;
printk(BIOS_DEBUG, "\nMTRR check\n");
msr = rdmsr(0x2ff);
msr.lo >>= 10;
printk(BIOS_DEBUG, "Fixed MTRRs : ");
if (msr.lo & 0x01)
printk(BIOS_DEBUG, "Enabled\n");
else
printk(BIOS_DEBUG, "Disabled\n");
printk(BIOS_DEBUG, "Variable MTRRs: ");
if (msr.lo & 0x02)
printk(BIOS_DEBUG, "Enabled\n");
else
printk(BIOS_DEBUG, "Disabled\n");
printk(BIOS_DEBUG, "\n");
post_code(0x93);
return ((int) msr.lo);
}