2004-10-14 21:29:29 +02:00
|
|
|
#ifndef EARLYMTRR_C
|
|
|
|
#define EARLYMTRR_C
|
|
|
|
#include <cpu/x86/cache.h>
|
|
|
|
#include <cpu/x86/mtrr.h>
|
2010-04-14 13:40:34 +02:00
|
|
|
#include <cpu/amd/mtrr.h>
|
2004-10-14 21:29:29 +02:00
|
|
|
#include <cpu/x86/msr.h>
|
|
|
|
|
2010-04-06 23:50:21 +02:00
|
|
|
#if 0
|
2004-10-14 21:29:29 +02:00
|
|
|
static void disable_var_mtrr(unsigned reg)
|
|
|
|
{
|
|
|
|
/* The invalid bit is kept in the mask so we simply
|
|
|
|
* clear the relevent mask register to disable a
|
|
|
|
* range.
|
|
|
|
*/
|
|
|
|
msr_t zero;
|
|
|
|
zero.lo = zero.hi = 0;
|
|
|
|
wrmsr(MTRRphysMask_MSR(reg), zero);
|
|
|
|
}
|
2010-04-06 23:50:21 +02:00
|
|
|
#endif
|
2004-10-14 21:29:29 +02:00
|
|
|
|
2005-07-08 04:49:49 +02:00
|
|
|
static void set_var_mtrr(
|
|
|
|
unsigned reg, unsigned base, unsigned size, unsigned type)
|
2004-10-14 21:29:29 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
/* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
|
2007-04-06 21:49:05 +02:00
|
|
|
/* FIXME: It only support 4G less range */
|
2004-10-14 21:29:29 +02:00
|
|
|
msr_t basem, maskm;
|
|
|
|
basem.lo = base | type;
|
|
|
|
basem.hi = 0;
|
|
|
|
wrmsr(MTRRphysBase_MSR(reg), basem);
|
|
|
|
maskm.lo = ~(size - 1) | 0x800;
|
2009-06-30 17:17:49 +02:00
|
|
|
maskm.hi = (1<<(CONFIG_CPU_ADDR_BITS-32))-1;
|
2004-10-14 21:29:29 +02:00
|
|
|
wrmsr(MTRRphysMask_MSR(reg), maskm);
|
|
|
|
}
|
|
|
|
|
2010-04-06 23:50:21 +02:00
|
|
|
#if 0
|
2006-10-04 22:46:15 +02:00
|
|
|
static void set_var_mtrr_x(
|
|
|
|
unsigned reg, uint32_t base_lo, uint32_t base_hi, uint32_t size_lo, uint32_t size_hi, unsigned type)
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
|
|
|
|
msr_t basem, maskm;
|
|
|
|
basem.lo = (base_lo & 0xfffff000) | type;
|
2009-06-30 17:17:49 +02:00
|
|
|
basem.hi = base_hi & ((1<<(CONFIG_CPU_ADDR_BITS-32))-1);
|
2006-10-04 22:46:15 +02:00
|
|
|
wrmsr(MTRRphysBase_MSR(reg), basem);
|
2009-06-30 17:17:49 +02:00
|
|
|
maskm.hi = (1<<(CONFIG_CPU_ADDR_BITS-32))-1;
|
2006-10-04 22:46:15 +02:00
|
|
|
if(size_lo) {
|
|
|
|
maskm.lo = ~(size_lo - 1) | 0x800;
|
|
|
|
} else {
|
|
|
|
maskm.lo = 0x800;
|
|
|
|
maskm.hi &= ~(size_hi - 1);
|
|
|
|
}
|
|
|
|
wrmsr(MTRRphysMask_MSR(reg), maskm);
|
|
|
|
}
|
2010-04-06 23:50:21 +02:00
|
|
|
#endif
|
2006-10-04 22:46:15 +02:00
|
|
|
|
2010-04-07 05:40:37 +02:00
|
|
|
static inline void cache_lbmem(int type)
|
2004-10-14 21:29:29 +02:00
|
|
|
{
|
|
|
|
/* Enable caching for 0 - 1MB using variable mtrr */
|
|
|
|
disable_cache();
|
2009-10-16 18:32:57 +02:00
|
|
|
set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, type);
|
2004-10-14 21:29:29 +02:00
|
|
|
enable_cache();
|
|
|
|
}
|
|
|
|
|
2010-08-30 19:53:13 +02:00
|
|
|
#if !defined(CONFIG_CACHE_AS_RAM) || (CONFIG_CACHE_AS_RAM == 0)
|
2004-10-14 21:29:29 +02:00
|
|
|
/* the fixed and variable MTTRs are power-up with random values,
|
|
|
|
* clear them to MTRR_TYPE_UNCACHEABLE for safty.
|
|
|
|
*/
|
|
|
|
static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
|
|
|
|
{
|
|
|
|
/* Precondition:
|
|
|
|
* The cache is not enabled in cr0 nor in MTRRdefType_MSR
|
|
|
|
* entry32.inc ensures the cache is not enabled in cr0
|
|
|
|
*/
|
|
|
|
msr_t msr;
|
|
|
|
const unsigned long *msr_addr;
|
|
|
|
|
|
|
|
/* Inialize all of the relevant msrs to 0 */
|
|
|
|
msr.lo = 0;
|
|
|
|
msr.hi = 0;
|
|
|
|
unsigned long msr_nr;
|
2005-07-08 04:49:49 +02:00
|
|
|
for(msr_addr = mtrr_msrs; (msr_nr = *msr_addr); msr_addr++) {
|
2004-10-14 21:29:29 +02:00
|
|
|
wrmsr(msr_nr, msr);
|
|
|
|
}
|
|
|
|
|
2009-06-30 17:17:49 +02:00
|
|
|
#if defined(CONFIG_XIP_ROM_SIZE)
|
2004-10-14 21:29:29 +02:00
|
|
|
/* enable write through caching so we can do execute in place
|
|
|
|
* on the flash rom.
|
|
|
|
*/
|
2010-02-11 22:51:04 +01:00
|
|
|
set_var_mtrr(1, REAL_XIP_ROM_BASE, CONFIG_XIP_ROM_SIZE, MTRR_TYPE_WRBACK);
|
2004-10-14 21:29:29 +02:00
|
|
|
#endif
|
|
|
|
|
2010-04-27 08:56:47 +02:00
|
|
|
/* Set the default memory type and enable fixed and variable MTRRs
|
2004-10-14 21:29:29 +02:00
|
|
|
*/
|
|
|
|
/* Enable Variable MTRRs */
|
|
|
|
msr.hi = 0x00000000;
|
|
|
|
msr.lo = 0x00000800;
|
|
|
|
wrmsr(MTRRdefType_MSR, msr);
|
2010-04-27 08:56:47 +02:00
|
|
|
|
2004-10-14 21:29:29 +02:00
|
|
|
}
|
|
|
|
|
2010-04-09 16:46:51 +02:00
|
|
|
static inline void early_mtrr_init(void)
|
2004-10-14 21:29:29 +02:00
|
|
|
{
|
|
|
|
static const unsigned long mtrr_msrs[] = {
|
|
|
|
/* fixed mtrr */
|
|
|
|
0x250, 0x258, 0x259,
|
|
|
|
0x268, 0x269, 0x26A,
|
|
|
|
0x26B, 0x26C, 0x26D,
|
|
|
|
0x26E, 0x26F,
|
|
|
|
/* var mtrr */
|
|
|
|
0x200, 0x201, 0x202, 0x203,
|
|
|
|
0x204, 0x205, 0x206, 0x207,
|
|
|
|
0x208, 0x209, 0x20A, 0x20B,
|
|
|
|
0x20C, 0x20D, 0x20E, 0x20F,
|
|
|
|
/* NULL end of table */
|
|
|
|
0
|
|
|
|
};
|
|
|
|
disable_cache();
|
|
|
|
do_early_mtrr_init(mtrr_msrs);
|
|
|
|
enable_cache();
|
|
|
|
}
|
2010-08-01 19:22:17 +02:00
|
|
|
#endif
|
2004-10-14 21:29:29 +02:00
|
|
|
|
2010-04-07 05:40:37 +02:00
|
|
|
static inline int early_mtrr_init_detected(void)
|
2005-10-25 23:07:34 +02:00
|
|
|
{
|
|
|
|
msr_t msr;
|
|
|
|
/* See if MTRR's are enabled.
|
|
|
|
* a #RESET disables them while an #INIT
|
|
|
|
* preserves their state. This works
|
|
|
|
* on both Intel and AMD cpus, at least
|
|
|
|
* according to the documentation.
|
|
|
|
*/
|
|
|
|
msr = rdmsr(MTRRdefType_MSR);
|
|
|
|
return msr.lo & 0x00000800;
|
|
|
|
}
|
|
|
|
|
2004-10-14 21:29:29 +02:00
|
|
|
#endif /* EARLYMTRR_C */
|