soc/intel: Standardize names of common MSRs

Use defined name in Intel 64 and IA-32 Architectures Software
Developer’s Manual.
Renamed MSRs are (register address, register name):
0x35 MSR_CORE_THREAD_COUNT
0x121 MSR_EMULATE_PM_TIMER
0x1f4 MSR_PRMRR_PHYS_BASE
0x1f5 MSR_PRMRR_PHYS_MASK
0x2f4 MSR_UNCORE_PRMRR_PHYS_BASE
0x2f5 MSR_UNCORE_PRMRR_PHYS_MASK

Change-Id: I53f11a2ce831456d598aa21303a817d18ac89bba
Signed-off-by: Elyes HAOUAS <ehaouas@noos.fr>
Reviewed-on: https://review.coreboot.org/c/30288
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Lijian Zhao <lijian.zhao@intel.com>
This commit is contained in:
Elyes HAOUAS 2018-12-18 10:24:55 +01:00 committed by Patrick Georgi
parent 844eda0f3b
commit f212cf3506
11 changed files with 31 additions and 31 deletions

View File

@ -210,7 +210,7 @@ void enable_pm_timer_emulation(void)
msr.hi = (3579545ULL << 32) / CTC_FREQ;
/* Set PM1 timer IO port and enable */
msr.lo = EMULATE_PM_TMR_EN | (ACPI_BASE_ADDRESS + R_ACPI_PM1_TMR);
wrmsr(MSR_EMULATE_PM_TMR, msr);
wrmsr(MSR_EMULATE_PM_TIMER, msr);
}
static int rtc_failed(uint32_t gen_pmcon1)

View File

@ -622,7 +622,7 @@ static int get_cpu_count(void)
int num_threads;
int num_cores;
msr = rdmsr(CORE_THREAD_COUNT_MSR);
msr = rdmsr(MSR_CORE_THREAD_COUNT);
num_threads = (msr.lo >> 0) & 0xffff;
num_cores = (msr.lo >> 16) & 0xffff;
printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",

View File

@ -17,7 +17,7 @@
#define _BROADWELL_MSR_H_
#define MSR_PIC_MSG_CONTROL 0x2e
#define CORE_THREAD_COUNT_MSR 0x35
#define MSR_CORE_THREAD_COUNT 0x35
#define MSR_PLATFORM_INFO 0xce
#define PLATFORM_INFO_SET_TDP (1 << 29)
#define MSR_PKG_CST_CONFIG_CONTROL 0xe2
@ -33,12 +33,12 @@
#define MISC_PWR_MGMT_EIST_HW_DIS (1 << 0)
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define MSR_TEMPERATURE_TARGET 0x1a2
#define EMRRphysBase_MSR 0x1f4
#define EMRRphysMask_MSR 0x1f5
#define MSR_PRMRR_PHYS_BASE 0x1f4
#define MSR_PRMRR_PHYS_MASK 0x1f5
#define MSR_POWER_CTL 0x1fc
#define MSR_LT_LOCK_MEMORY 0x2e7
#define UNCORE_EMRRphysBase_MSR 0x2f4
#define UNCORE_EMRRphysMask_MSR 0x2f5
#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4
#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5
#define SMM_FEATURE_CONTROL_MSR 0x4e0
#define SMM_CPU_SAVE_EN (1 << 1)

View File

@ -45,8 +45,8 @@ static inline void write_emrr(struct smm_relocation_params *relo_params)
{
printk(BIOS_DEBUG, "Writing EMRR. base = 0x%08x, mask=0x%08x\n",
relo_params->emrr_base.lo, relo_params->emrr_mask.lo);
wrmsr(EMRRphysBase_MSR, relo_params->emrr_base);
wrmsr(EMRRphysMask_MSR, relo_params->emrr_mask);
wrmsr(MSR_PRMRR_PHYS_BASE, relo_params->emrr_base);
wrmsr(MSR_PRMRR_PHYS_MASK, relo_params->emrr_mask);
}
static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
@ -55,8 +55,8 @@ static inline void write_uncore_emrr(struct smm_relocation_params *relo_params)
"Writing UNCORE_EMRR. base = 0x%08x, mask=0x%08x\n",
relo_params->uncore_emrr_base.lo,
relo_params->uncore_emrr_mask.lo);
wrmsr(UNCORE_EMRRphysBase_MSR, relo_params->uncore_emrr_base);
wrmsr(UNCORE_EMRRphysMask_MSR, relo_params->uncore_emrr_mask);
wrmsr(MSR_UNCORE_PRMRR_PHYS_BASE, relo_params->uncore_emrr_base);
wrmsr(MSR_UNCORE_PRMRR_PHYS_MASK, relo_params->uncore_emrr_mask);
}
static void update_save_state(int cpu, uintptr_t curr_smbase,

View File

@ -185,7 +185,7 @@ static void enable_pm_timer_emulation(void)
/* Set PM1 timer IO port and enable*/
msr.lo = (EMULATE_DELAY_VALUE << EMULATE_DELAY_OFFSET_VALUE) |
EMULATE_PM_TMR_EN | (ACPI_BASE_ADDRESS + PM1_TMR);
wrmsr(MSR_EMULATE_PM_TMR, msr);
wrmsr(MSR_EMULATE_PM_TIMER, msr);
}
/* All CPUs including BSP will run the following function. */

View File

@ -33,7 +33,7 @@
#define MSR_POWER_MISC 0x120
#define ENABLE_IA_UNTRUSTED (1 << 6)
#define FLUSH_DL1_L2 (1 << 8)
#define MSR_EMULATE_PM_TMR 0x121
#define MSR_EMULATE_PM_TIMER 0x121
#define EMULATE_DELAY_OFFSET_VALUE 20
#define EMULATE_PM_TMR_EN (1 << 16)
#define EMULATE_DELAY_VALUE 0x13
@ -60,8 +60,8 @@
#define MISC_PWR_MGMT_ISST_EN_INT (1 << 7)
#define MISC_PWR_MGMT_ISST_EN_EPP (1 << 12)
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define PRMRR_PHYS_BASE_MSR 0x1f4
#define PRMRR_PHYS_MASK_MSR 0x1f5
#define MSR_PRMRR_PHYS_BASE 0x1f4
#define MSR_PRMRR_PHYS_MASK 0x1f5
#define PRMRR_PHYS_MASK_LOCK (1 << 10)
#define PRMRR_PHYS_MASK_VALID (1 << 11)
#define MSR_POWER_CTL 0x1fc

View File

@ -83,7 +83,7 @@ void prmrr_core_configure(void)
if (!soc_sgx_enabled() || !is_sgx_supported())
return;
msr = rdmsr(PRMRR_PHYS_MASK_MSR);
msr = rdmsr(MSR_PRMRR_PHYS_MASK);
/* If it is locked don't attempt to write PRMRR MSRs. */
if (msr.lo & PRMRR_PHYS_MASK_LOCK)
return;
@ -109,19 +109,19 @@ void prmrr_core_configure(void)
* - Clear the valid bit in PRMRR mask MSR
* - Lock PRMRR MASK MSR */
prmrr_base.data32.lo |= MTRR_TYPE_WRBACK;
wrmsr(PRMRR_PHYS_BASE_MSR, (msr_t) {.lo = prmrr_base.data32.lo,
wrmsr(MSR_PRMRR_PHYS_BASE, (msr_t) {.lo = prmrr_base.data32.lo,
.hi = prmrr_base.data32.hi});
prmrr_mask.data32.lo &= ~PRMRR_PHYS_MASK_VALID;
prmrr_mask.data32.lo |= PRMRR_PHYS_MASK_LOCK;
wrmsr(PRMRR_PHYS_MASK_MSR, (msr_t) {.lo = prmrr_mask.data32.lo,
wrmsr(MSR_PRMRR_PHYS_MASK, (msr_t) {.lo = prmrr_mask.data32.lo,
.hi = prmrr_mask.data32.hi});
}
static int is_prmrr_set(void)
{
msr_t prmrr_base, prmrr_mask;
prmrr_base = rdmsr(PRMRR_PHYS_BASE_MSR);
prmrr_mask = rdmsr(PRMRR_PHYS_MASK_MSR);
prmrr_base = rdmsr(MSR_PRMRR_PHYS_BASE);
prmrr_mask = rdmsr(MSR_PRMRR_PHYS_MASK);
/* If PRMRR base is zero and PRMRR mask is locked
* then PRMRR is not set */
@ -191,7 +191,7 @@ static void activate_sgx(void)
static int is_prmrr_approved(void)
{
msr_t msr;
msr = rdmsr(PRMRR_PHYS_MASK_MSR);
msr = rdmsr(MSR_PRMRR_PHYS_MASK);
if (msr.lo & PRMRR_PHYS_MASK_VALID) {
printk(BIOS_INFO, "SGX: MCHECK approved SGX PRMRR\n");
return 1;

View File

@ -19,7 +19,7 @@
#define _DENVERTON_NS_MSR_H_
#define MSR_PIC_MSG_CONTROL 0x2e
#define CORE_THREAD_COUNT_MSR 0x35
#define MSR_CORE_THREAD_COUNT 0x35
#define MSR_PLATFORM_INFO 0xce
#define PLATFORM_INFO_SET_TDP (1 << 29)
#define MSR_PKG_CST_CONFIG_CONTROL 0xe2
@ -36,11 +36,11 @@
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define MSR_TEMPERATURE_TARGET 0x1a2
#define EMRR_PHYS_BASE_MSR 0x1f4
#define EMRR_PHYS_MASK_MSR 0x1f5
#define MSR_PRMRR_PHYS_MASK 0x1f5
#define MSR_POWER_CTL 0x1fc
#define MSR_LT_LOCK_MEMORY 0x2e7
#define UNCORE_PRMRR_PHYS_BASE_MSR 0x2f4
#define UNCORE_PRMRR_PHYS_MASK_MSR 0x2f5
#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4
#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5
#define SMM_FEATURE_CONTROL_MSR 0x4e0
#define SMM_CPU_SAVE_EN (1 << 1)

View File

@ -35,7 +35,7 @@
/* MTRR_CAP_MSR bits */
#define SMRR_SUPPORTED (1 << 11)
#define PRMRR_SUPPORTED (1 << 12)
#define PRMRRphysBase_MSR 0x1f4
#define PRMRRphysMask_MSR 0x1f5
#define MSR_PRMRR_PHYS_BASE 0x1f4
#define MSR_PRMRR_PHYS_MASK 0x1f5
#endif /* _SOC_MSR_H_ */

View File

@ -46,8 +46,8 @@ static inline void write_prmrr(struct smm_relocation_params *relo_params)
{
printk(BIOS_DEBUG, "Writing PRMRR. base = 0x%08x, mask=0x%08x\n",
relo_params->prmrr_base.lo, relo_params->prmrr_mask.lo);
wrmsr(PRMRRphysBase_MSR, relo_params->prmrr_base);
wrmsr(PRMRRphysMask_MSR, relo_params->prmrr_mask);
wrmsr(MSR_PRMRR_PHYS_BASE, relo_params->prmrr_base);
wrmsr(MSR_PRMRR_PHYS_MASK, relo_params->prmrr_mask);
}
static void update_save_state(int cpu, uintptr_t curr_smbase,

View File

@ -420,7 +420,7 @@ static void enable_pm_timer_emulation(void)
/* Set PM1 timer IO port and enable*/
msr.lo = (EMULATE_DELAY_VALUE << EMULATE_DELAY_OFFSET_VALUE) |
EMULATE_PM_TMR_EN | (ACPI_BASE_ADDRESS + PM1_TMR);
wrmsr(MSR_EMULATE_PM_TMR, msr);
wrmsr(MSR_EMULATE_PM_TIMER, msr);
}
/* All CPUs including BSP will run the following function. */
@ -528,7 +528,7 @@ int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
* be reloaded after the core PRMRR MSRs are programmed.
*/
msr1 = rdmsr(MTRR_CAP_MSR);
msr2 = rdmsr(PRMRR_PHYS_BASE_MSR);
msr2 = rdmsr(MSR_PRMRR_PHYS_BASE);
if (msr2.lo && (current_patch_id == new_patch_id - 1))
return 0;
else