2004-10-14 22:13:01 +02:00
|
|
|
#ifndef CPU_X86_MTRR_H
|
|
|
|
#define CPU_X86_MTRR_H
|
|
|
|
|
|
|
|
/* These are the region types */
|
|
|
|
#define MTRR_TYPE_UNCACHEABLE 0
|
|
|
|
#define MTRR_TYPE_WRCOMB 1
|
|
|
|
/*#define MTRR_TYPE_ 2*/
|
|
|
|
/*#define MTRR_TYPE_ 3*/
|
|
|
|
#define MTRR_TYPE_WRTHROUGH 4
|
|
|
|
#define MTRR_TYPE_WRPROT 5
|
|
|
|
#define MTRR_TYPE_WRBACK 6
|
|
|
|
#define MTRR_NUM_TYPES 7
|
|
|
|
|
|
|
|
#define MTRRcap_MSR 0x0fe
|
|
|
|
#define MTRRdefType_MSR 0x2ff
|
|
|
|
|
2010-10-01 09:27:51 +02:00
|
|
|
#define MTRRdefTypeEn (1 << 11)
|
|
|
|
#define MTRRdefTypeFixEn (1 << 10)
|
|
|
|
|
2012-01-10 07:05:18 +01:00
|
|
|
#define SMRRphysBase_MSR 0x1f2
|
|
|
|
#define SMRRphysMask_MSR 0x1f3
|
|
|
|
|
2004-10-14 22:13:01 +02:00
|
|
|
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
|
|
|
|
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
|
|
|
|
|
2011-01-19 07:32:35 +01:00
|
|
|
#define MTRRphysMaskValid (1 << 11)
|
|
|
|
|
2004-10-14 22:13:01 +02:00
|
|
|
#define NUM_FIXED_RANGES 88
|
2013-03-26 20:09:47 +01:00
|
|
|
#define RANGES_PER_FIXED_MTRR 8
|
2004-10-14 22:13:01 +02:00
|
|
|
#define MTRRfix64K_00000_MSR 0x250
|
|
|
|
#define MTRRfix16K_80000_MSR 0x258
|
|
|
|
#define MTRRfix16K_A0000_MSR 0x259
|
|
|
|
#define MTRRfix4K_C0000_MSR 0x268
|
|
|
|
#define MTRRfix4K_C8000_MSR 0x269
|
|
|
|
#define MTRRfix4K_D0000_MSR 0x26a
|
|
|
|
#define MTRRfix4K_D8000_MSR 0x26b
|
|
|
|
#define MTRRfix4K_E0000_MSR 0x26c
|
|
|
|
#define MTRRfix4K_E8000_MSR 0x26d
|
|
|
|
#define MTRRfix4K_F0000_MSR 0x26e
|
|
|
|
#define MTRRfix4K_F8000_MSR 0x26f
|
|
|
|
|
2011-04-10 06:15:23 +02:00
|
|
|
#if !defined (__ASSEMBLER__) && !defined(__PRE_RAM__)
|
2013-03-26 20:09:47 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The MTRR code has some side effects that the callers should be aware for.
|
|
|
|
* 1. The call sequence matters. x86_setup_mtrrs() calls
|
|
|
|
* x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
|
|
|
|
* of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
|
|
|
|
* want to call the components of x86_setup_mtrrs() because of other
|
2013-07-10 05:46:01 +02:00
|
|
|
* requirements the ordering should still preserved.
|
2013-03-26 20:09:47 +01:00
|
|
|
* 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
|
|
|
|
* of the nature of the global MTRR enable flag. Therefore, all direct
|
|
|
|
* or indirect callers of enable_fixed_mtrr() should ensure that the
|
|
|
|
* variable MTRR MSRs do not contain bad ranges.
|
2013-03-26 18:47:47 +01:00
|
|
|
* 3. If CONFIG_CACHE_ROM is selected an MTRR is allocated for enabling
|
|
|
|
* the caching of the ROM. However, it is set to uncacheable (UC). It
|
2013-07-10 05:46:01 +02:00
|
|
|
* is the responsibility of the caller to enable it by calling
|
2013-03-26 18:47:47 +01:00
|
|
|
* x86_mtrr_enable_rom_caching().
|
2013-02-26 19:07:40 +01:00
|
|
|
*/
|
2012-01-10 12:01:43 +01:00
|
|
|
void x86_setup_mtrrs(void);
|
2013-03-26 20:09:47 +01:00
|
|
|
/*
|
|
|
|
* x86_setup_var_mtrrs() parameters:
|
|
|
|
* address_bits - number of physical address bits supported by cpu
|
|
|
|
* above4gb - 2 means dynamically detect number of variable MTRRs available.
|
|
|
|
* non-zero means handle memory ranges above 4GiB.
|
|
|
|
* 0 means ignore memory ranges above 4GiB
|
|
|
|
*/
|
|
|
|
void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
|
|
|
|
void enable_fixed_mtrr(void);
|
2009-10-27 15:29:29 +01:00
|
|
|
void x86_setup_fixed_mtrrs(void);
|
2013-03-20 21:50:59 +01:00
|
|
|
/* Set up fixed MTRRs but do not enable them. */
|
|
|
|
void x86_setup_fixed_mtrrs_no_enable(void);
|
2013-03-26 20:09:47 +01:00
|
|
|
int x86_mtrr_check(void);
|
2013-03-26 18:47:47 +01:00
|
|
|
/* ROM caching can be used after variable MTRRs are set up. Beware that
|
|
|
|
* enabling CONFIG_CACHE_ROM will eat through quite a few MTRRs based on
|
|
|
|
* one's IO hole size and WRCOMB resources. Be sure to check the console
|
2013-04-03 16:57:53 +02:00
|
|
|
* log when enabling CONFIG_CACHE_ROM or adding WRCOMB resources. Beware that
|
|
|
|
* on CPUs with core-scoped MTRR registers such as hyperthreaded CPUs the
|
|
|
|
* rom caching will be disabled if all threads run the MTRR code. Therefore,
|
|
|
|
* one needs to call x86_mtrr_enable_rom_caching() after all threads of the
|
|
|
|
* same core have run the MTRR code. */
|
2013-03-26 18:47:47 +01:00
|
|
|
#if CONFIG_CACHE_ROM
|
|
|
|
void x86_mtrr_enable_rom_caching(void);
|
|
|
|
void x86_mtrr_disable_rom_caching(void);
|
2013-03-26 19:09:39 +01:00
|
|
|
/* Return the variable range MTRR index of the ROM cache. */
|
|
|
|
long x86_mtrr_rom_cache_var_index(void);
|
2013-03-26 18:47:47 +01:00
|
|
|
#else
|
|
|
|
static inline void x86_mtrr_enable_rom_caching(void) {}
|
|
|
|
static inline void x86_mtrr_disable_rom_caching(void) {}
|
2013-03-26 19:09:39 +01:00
|
|
|
static inline long x86_mtrr_rom_cache_var_index(void) { return -1; }
|
2013-03-26 18:47:47 +01:00
|
|
|
#endif /* CONFIG_CACHE_ROM */
|
|
|
|
|
2010-03-28 23:26:54 +02:00
|
|
|
#endif
|
2004-10-14 22:13:01 +02:00
|
|
|
|
2013-12-12 11:27:53 +01:00
|
|
|
#if !defined(__ASSEMBLER__) && defined(__PRE_RAM__) && !defined(__ROMCC__)
|
|
|
|
void set_var_mtrr(unsigned reg, unsigned base, unsigned size, unsigned type);
|
|
|
|
#endif
|
|
|
|
|
2010-04-06 23:50:21 +02:00
|
|
|
#if !defined(CONFIG_RAMTOP)
|
|
|
|
# error "CONFIG_RAMTOP not defined"
|
|
|
|
#endif
|
|
|
|
|
2011-10-31 17:07:52 +01:00
|
|
|
#if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE -1)) != 0)
|
2010-04-06 23:50:21 +02:00
|
|
|
# error "CONFIG_XIP_ROM_SIZE is not a power of 2"
|
|
|
|
#endif
|
|
|
|
|
2012-06-30 10:41:08 +02:00
|
|
|
#if ((CONFIG_CACHE_ROM_SIZE & (CONFIG_CACHE_ROM_SIZE -1)) != 0)
|
|
|
|
# error "CONFIG_CACHE_ROM_SIZE is not a power of 2"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define CACHE_ROM_BASE (((1<<20) - (CONFIG_CACHE_ROM_SIZE>>12))<<12)
|
|
|
|
|
2011-04-21 22:24:43 +02:00
|
|
|
#if (CONFIG_RAMTOP & (CONFIG_RAMTOP - 1)) != 0
|
2010-04-06 23:50:21 +02:00
|
|
|
# error "CONFIG_RAMTOP must be a power of 2"
|
|
|
|
#endif
|
|
|
|
|
2004-10-14 22:13:01 +02:00
|
|
|
#endif /* CPU_X86_MTRR_H */
|