Make AMD SMM SMP aware
Move the SMM MSR init to a code run per CPU. Introduce global SMM_BASE define, later all 0xa0000 could be changed to use it. Remove the unnecessary test if the smm_init routine is called once (it is called by BSP only) and also remove if lock bit is set becuase this bit is cleared by INIT it seems. Add the defines for fam10h and famfh to respective files, we do not have any shared AMD MSR header file. Tested on M2V-MX SE with dualcore CPU. Change-Id: I1b2bf157d1cc79c566c9089689a9bfd9310f5683 Signed-off-by: Rudolf Marek <r.marek@assembler.cz> Reviewed-on: http://review.coreboot.org/82 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
parent
a68555f48d
commit
b5b3b3bf8c
|
@ -24,6 +24,7 @@
|
||||||
#include <device/pci.h>
|
#include <device/pci.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <cpu/x86/msr.h>
|
#include <cpu/x86/msr.h>
|
||||||
|
#include <cpu/x86/smm.h>
|
||||||
#include <cpu/x86/pae.h>
|
#include <cpu/x86/pae.h>
|
||||||
#include <pc80/mc146818rtc.h>
|
#include <pc80/mc146818rtc.h>
|
||||||
#include <cpu/x86/lapic.h>
|
#include <cpu/x86/lapic.h>
|
||||||
|
@ -118,7 +119,17 @@ static void model_10xxx_init(device_t dev)
|
||||||
msr.hi &= ~(1 << (35-32));
|
msr.hi &= ~(1 << (35-32));
|
||||||
wrmsr(BU_CFG2_MSR, msr);
|
wrmsr(BU_CFG2_MSR, msr);
|
||||||
|
|
||||||
/* Write protect SMM space with SMMLOCK. */
|
/* Set SMM base address for this CPU */
|
||||||
|
msr = rdmsr(SMM_BASE_MSR);
|
||||||
|
msr.lo = SMM_BASE - (lapicid() * 0x400);
|
||||||
|
wrmsr(SMM_BASE_MSR, msr);
|
||||||
|
|
||||||
|
/* Enable the SMM memory window */
|
||||||
|
msr = rdmsr(SMM_MASK_MSR);
|
||||||
|
msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */
|
||||||
|
wrmsr(SMM_MASK_MSR, msr);
|
||||||
|
|
||||||
|
/* Set SMMLOCK to avoid exploits messing with SMM */
|
||||||
msr = rdmsr(HWCR_MSR);
|
msr = rdmsr(HWCR_MSR);
|
||||||
msr.lo |= (1 << 0);
|
msr.lo |= (1 << 0);
|
||||||
wrmsr(HWCR_MSR, msr);
|
wrmsr(HWCR_MSR, msr);
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <cpu/cpu.h>
|
#include <cpu/cpu.h>
|
||||||
#include <cpu/x86/cache.h>
|
#include <cpu/x86/cache.h>
|
||||||
#include <cpu/x86/mtrr.h>
|
#include <cpu/x86/mtrr.h>
|
||||||
|
#include <cpu/x86/smm.h>
|
||||||
#include <cpu/amd/multicore.h>
|
#include <cpu/amd/multicore.h>
|
||||||
#include <cpu/amd/model_fxx_msr.h>
|
#include <cpu/amd/model_fxx_msr.h>
|
||||||
|
|
||||||
|
@ -547,6 +548,21 @@ static void model_fxx_init(device_t dev)
|
||||||
*/
|
*/
|
||||||
if (id.coreid == 0)
|
if (id.coreid == 0)
|
||||||
init_ecc_memory(id.nodeid); // only do it for core 0
|
init_ecc_memory(id.nodeid); // only do it for core 0
|
||||||
|
|
||||||
|
/* Set SMM base address for this CPU */
|
||||||
|
msr = rdmsr(SMM_BASE_MSR);
|
||||||
|
msr.lo = SMM_BASE - (lapicid() * 0x400);
|
||||||
|
wrmsr(SMM_BASE_MSR, msr);
|
||||||
|
|
||||||
|
/* Enable the SMM memory window */
|
||||||
|
msr = rdmsr(SMM_MASK_MSR);
|
||||||
|
msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */
|
||||||
|
wrmsr(SMM_MASK_MSR, msr);
|
||||||
|
|
||||||
|
/* Set SMMLOCK to avoid exploits messing with SMM */
|
||||||
|
msr = rdmsr(HWCR_MSR);
|
||||||
|
msr.lo |= (1 << 0);
|
||||||
|
wrmsr(HWCR_MSR, msr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct device_operations cpu_dev_ops = {
|
static struct device_operations cpu_dev_ops = {
|
||||||
|
|
|
@ -30,32 +30,12 @@
|
||||||
#include <cpu/x86/smm.h>
|
#include <cpu/x86/smm.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#define SMM_BASE_MSR 0xc0010111
|
|
||||||
#define SMM_ADDR_MSR 0xc0010112
|
|
||||||
#define SMM_MASK_MSR 0xc0010113
|
|
||||||
#define SMM_BASE 0xa0000
|
|
||||||
|
|
||||||
extern unsigned char _binary_smm_start;
|
extern unsigned char _binary_smm_start;
|
||||||
extern unsigned char _binary_smm_size;
|
extern unsigned char _binary_smm_size;
|
||||||
|
|
||||||
static int smm_handler_copied = 0;
|
|
||||||
|
|
||||||
void smm_init(void)
|
void smm_init(void)
|
||||||
{
|
{
|
||||||
msr_t msr;
|
msr_t msr, syscfg_orig, mtrr_aseg_orig;
|
||||||
|
|
||||||
msr = rdmsr(HWCR_MSR);
|
|
||||||
if (msr.lo & (1 << 0)) {
|
|
||||||
// This sounds like a bug... ?
|
|
||||||
printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Only copy SMM handler once, not once per CPU */
|
|
||||||
if (!smm_handler_copied) {
|
|
||||||
msr_t syscfg_orig, mtrr_aseg_orig;
|
|
||||||
|
|
||||||
smm_handler_copied = 1;
|
|
||||||
|
|
||||||
/* Back up MSRs for later restore */
|
/* Back up MSRs for later restore */
|
||||||
syscfg_orig = rdmsr(SYSCFG_MSR);
|
syscfg_orig = rdmsr(SYSCFG_MSR);
|
||||||
|
@ -65,6 +45,7 @@ void smm_init(void)
|
||||||
disable_cache();
|
disable_cache();
|
||||||
|
|
||||||
msr = syscfg_orig;
|
msr = syscfg_orig;
|
||||||
|
|
||||||
/* Allow changes to MTRR extended attributes */
|
/* Allow changes to MTRR extended attributes */
|
||||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||||
/* turn the extended attributes off until we fix
|
/* turn the extended attributes off until we fix
|
||||||
|
@ -74,7 +55,6 @@ void smm_init(void)
|
||||||
wrmsr(SYSCFG_MSR, msr);
|
wrmsr(SYSCFG_MSR, msr);
|
||||||
|
|
||||||
/* set DRAM access to 0xa0000 */
|
/* set DRAM access to 0xa0000 */
|
||||||
/* A0000 is memory */
|
|
||||||
msr.lo = 0x18181818;
|
msr.lo = 0x18181818;
|
||||||
msr.hi = 0x18181818;
|
msr.hi = 0x18181818;
|
||||||
wrmsr(MTRRfix16K_A0000_MSR, msr);
|
wrmsr(MTRRfix16K_A0000_MSR, msr);
|
||||||
|
@ -89,35 +69,17 @@ void smm_init(void)
|
||||||
/* copy the real SMM handler */
|
/* copy the real SMM handler */
|
||||||
memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
|
memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
|
||||||
wbinvd();
|
wbinvd();
|
||||||
|
|
||||||
/* Restore MTRR */
|
|
||||||
disable_cache();
|
disable_cache();
|
||||||
|
|
||||||
/* Restore SYSCFG */
|
/* Restore SYSCFG and MTRR */
|
||||||
wrmsr(SYSCFG_MSR, syscfg_orig);
|
wrmsr(SYSCFG_MSR, syscfg_orig);
|
||||||
|
|
||||||
wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
|
wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
|
||||||
enable_cache();
|
enable_cache();
|
||||||
}
|
|
||||||
|
|
||||||
|
/* CPU MSR are set in CPU init */
|
||||||
/* But set SMM base address on all CPUs/cores */
|
|
||||||
msr = rdmsr(SMM_BASE_MSR);
|
|
||||||
msr.lo = SMM_BASE - (lapicid() * 0x400);
|
|
||||||
wrmsr(SMM_BASE_MSR, msr);
|
|
||||||
|
|
||||||
/* enable the SMM memory window */
|
|
||||||
msr = rdmsr(SMM_MASK_MSR);
|
|
||||||
msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
|
|
||||||
wrmsr(SMM_MASK_MSR, msr);
|
|
||||||
|
|
||||||
/* Set SMMLOCK to avoid exploits messing with SMM */
|
|
||||||
msr = rdmsr(HWCR_MSR);
|
|
||||||
msr.lo |= (1 << 0);
|
|
||||||
wrmsr(HWCR_MSR, msr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void smm_lock(void)
|
void smm_lock(void)
|
||||||
{
|
{
|
||||||
/* We lock SMM per CPU core */
|
/* We lock SMM in CPU init */
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,10 @@
|
||||||
|
|
||||||
#include <cpu/x86/msr.h>
|
#include <cpu/x86/msr.h>
|
||||||
|
|
||||||
|
#define SMM_BASE_MSR 0xC0010111
|
||||||
|
#define SMM_ADDR_MSR 0xC0010112
|
||||||
|
#define SMM_MASK_MSR 0xC0010113
|
||||||
|
|
||||||
#define HWCR_MSR 0xC0010015
|
#define HWCR_MSR 0xC0010015
|
||||||
#define NB_CFG_MSR 0xC001001f
|
#define NB_CFG_MSR 0xC001001f
|
||||||
#define LS_CFG_MSR 0xC0011020
|
#define LS_CFG_MSR 0xC0011020
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
#ifndef CPU_AMD_MODEL_FXX_MSR_H
|
#ifndef CPU_AMD_MODEL_FXX_MSR_H
|
||||||
#define CPU_AMD_MODEL_FXX_MSR_H
|
#define CPU_AMD_MODEL_FXX_MSR_H
|
||||||
|
|
||||||
|
#define SMM_BASE_MSR 0xc0010111
|
||||||
|
#define SMM_ADDR_MSR 0xc0010112
|
||||||
|
#define SMM_MASK_MSR 0xc0010113
|
||||||
|
|
||||||
#define HWCR_MSR 0xC0010015
|
#define HWCR_MSR 0xC0010015
|
||||||
#define NB_CFG_MSR 0xC001001f
|
#define NB_CFG_MSR 0xC001001f
|
||||||
#define LS_CFG_MSR 0xC0011020
|
#define LS_CFG_MSR 0xC0011020
|
||||||
|
|
|
@ -24,6 +24,9 @@
|
||||||
#ifndef CPU_X86_SMM_H
|
#ifndef CPU_X86_SMM_H
|
||||||
#define CPU_X86_SMM_H
|
#define CPU_X86_SMM_H
|
||||||
|
|
||||||
|
/* used only by C programs so far */
|
||||||
|
#define SMM_BASE 0xa0000
|
||||||
|
|
||||||
#include <types.h>
|
#include <types.h>
|
||||||
typedef struct {
|
typedef struct {
|
||||||
u16 es_selector;
|
u16 es_selector;
|
||||||
|
|
Loading…
Reference in New Issue