cpu/x86: move NXE and PAT accesses to paging module

The EFER and PAT MSRs are x86 architecturally defined. Therefore,
move the macro defintions to msr.h. Add 'paging' prefix to the
PAT and NXE pae/paging functions to namespace things a little better.

BUG=b:72728953

Change-Id: I1ab2c4ff827e19d5ba4e3b6eaedb3fee6aaef14d
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: https://review.coreboot.org/25713
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Furquan Shaikh <furquan@google.com>
Reviewed-by: Justin TerAvest <teravest@chromium.org>
This commit is contained in:
Aaron Durbin 2018-04-17 11:37:28 -06:00 committed by Patrick Georgi
parent 7f5e734638
commit ae18f80feb
6 changed files with 41 additions and 35 deletions

View File

@ -17,6 +17,7 @@
#include <console/console.h>
#include <cpu/cpu.h>
#include <arch/cpu.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/pae.h>
#include <rules.h>
#include <string.h>
@ -119,3 +120,23 @@ void *map_2M_page(unsigned long page)
return result;
}
#endif
void paging_set_nxe(int enable)
{
msr_t msr = rdmsr(IA32_EFER);
if (enable)
msr.lo |= EFER_NXE;
else
msr.lo &= ~EFER_NXE;
wrmsr(IA32_EFER, msr);
}
void paging_set_pat(uint64_t pat)
{
msr_t msr;
msr.lo = pat;
msr.hi = pat >> 32;
wrmsr(MSR_IA32_PAT, msr);
}

View File

@ -1,6 +1,18 @@
#ifndef CPU_X86_MSR_H
#define CPU_X86_MSR_H
/* Intel SDM: Table 2-1
* IA-32 architectural MSR: Extended Feature Enable Register
*/
#define IA32_EFER 0xC0000080
#define EFER_NXE (1 << 11)
#define EFER_LMA (1 << 10)
#define EFER_LME (1 << 8)
#define EFER_SCE (1 << 0)
/* Page attribute type MSR */
#define MSR_IA32_PAT 0x277
#if defined(__ROMCC__)
typedef __builtin_msr_t msr_t;

View File

@ -1,6 +1,14 @@
#ifndef CPU_X86_PAE_H
#define CPU_X86_PAE_H
#include <stdint.h>
/* Set/Clear NXE bit in IA32_EFER MSR */
void paging_set_nxe(int enable);
/* Set PAT MSR */
void paging_set_pat(uint64_t pat);
#define MAPPING_ERROR ((void *)0xffffffffUL)
void *map_2M_page(unsigned long page);

View File

@ -314,23 +314,3 @@ void mca_configure(void)
(msr_t) {.lo = 0xffffffff, .hi = 0xffffffff});
}
}
void set_nxe(uint8_t enable)
{
msr_t msr = rdmsr(IA32_EFER);
if (enable)
msr.lo |= EFER_NXE;
else
msr.lo &= ~EFER_NXE;
wrmsr(IA32_EFER, msr);
}
void set_pat(uint64_t pat)
{
msr_t msr;
msr.lo = pat;
msr.hi = pat >> 32;
wrmsr(MSR_IA32_PAT, msr);
}

View File

@ -159,9 +159,4 @@ uint32_t cpu_get_max_turbo_ratio(void);
/* Configure Machine Check Architecture support */
void mca_configure(void);
/* Set/Clear NXE bit in IA32_EFER MSR */
void set_nxe(uint8_t enable);
/* Set PAT MSR */
void set_pat(uint64_t pat);
#endif /* SOC_INTEL_COMMON_BLOCK_CPULIB_H */

View File

@ -72,7 +72,6 @@
#define PRMRR_PHYS_MASK_LOCK (1 << 10)
#define PRMRR_PHYS_MASK_VALID (1 << 11)
#define MSR_POWER_CTL 0x1fc
#define MSR_IA32_PAT 0x277
#define MSR_EVICT_CTL 0x2e0
#define MSR_SGX_OWNEREPOCH0 0x300
#define MSR_SGX_OWNEREPOCH1 0x301
@ -143,13 +142,4 @@
#define SGX_RESOURCE_MASK_LO (0xfffff000UL)
#define SGX_RESOURCE_MASK_HI (0xfffffUL)
/* Intel SDM: Table 2-1
* IA-32 architectural MSR: Extended Feature Enable Register
*/
#define IA32_EFER 0xC0000080
#define EFER_NXE (1 << 11)
#define EFER_LMA (1 << 10)
#define EFER_LME (1 << 8)
#define EFER_SCE (1 << 0)
#endif /* SOC_INTEL_COMMON_MSR_H */