SMM on AMD K8 Part 2/2
Signed-off-by: Rudolf Marek <r.marek@assembler.cz> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@6202 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
parent
cadc545838
commit
2c3662710a
|
@ -57,13 +57,13 @@ void smm_init(void)
|
|||
|
||||
smm_handler_copied = 1;
|
||||
|
||||
/* MTRR changes don't like an enabled cache */
|
||||
disable_cache();
|
||||
|
||||
/* Back up MSRs for later restore */
|
||||
syscfg_orig = rdmsr(SYSCFG_MSR);
|
||||
mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
|
||||
|
||||
/* MTRR changes don't like an enabled cache */
|
||||
disable_cache();
|
||||
|
||||
msr = syscfg_orig;
|
||||
/* Allow changes to MTRR extended attributes */
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||
|
@ -78,60 +78,46 @@ void smm_init(void)
|
|||
msr.lo = 0x18181818;
|
||||
msr.hi = 0x18181818;
|
||||
wrmsr(MTRRfix16K_A0000_MSR, msr);
|
||||
enable_cache();
|
||||
|
||||
/* disable the extended features */
|
||||
/* enable the extended features */
|
||||
msr = syscfg_orig;
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
|
||||
wrmsr(SYSCFG_MSR, msr);
|
||||
|
||||
/* enable the SMM memory window */
|
||||
// TODO does "Enable ASEG SMRAM Range" have to happen on
|
||||
// every CPU core?
|
||||
msr = rdmsr(SMM_MASK_MSR);
|
||||
msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
|
||||
msr.lo &= ~(1 << 2); // Open ASEG SMRAM Range
|
||||
wrmsr(SMM_MASK_MSR, msr);
|
||||
|
||||
enable_cache();
|
||||
/* copy the real SMM handler */
|
||||
memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
|
||||
wbinvd();
|
||||
|
||||
msr = rdmsr(SMM_MASK_MSR);
|
||||
msr.lo |= ~(1 << 2); // Close ASEG SMRAM Range
|
||||
wrmsr(SMM_MASK_MSR, msr);
|
||||
|
||||
/* Change SYSCFG so we can restore the MTRR */
|
||||
msr = syscfg_orig;
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||
msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
|
||||
wrmsr(SYSCFG_MSR, msr);
|
||||
|
||||
/* Restore MTRR */
|
||||
disable_cache();
|
||||
wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
|
||||
|
||||
/* Restore SYSCFG */
|
||||
wrmsr(SYSCFG_MSR, syscfg_orig);
|
||||
|
||||
wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
|
||||
enable_cache();
|
||||
}
|
||||
|
||||
|
||||
/* But set SMM base address on all CPUs/cores */
|
||||
msr = rdmsr(SMM_BASE_MSR);
|
||||
msr.lo = SMM_BASE - (lapicid() * 0x400);
|
||||
wrmsr(SMM_BASE_MSR, msr);
|
||||
}
|
||||
|
||||
void smm_lock(void)
|
||||
{
|
||||
// TODO I think this should be running on each CPU
|
||||
msr_t msr;
|
||||
|
||||
printk(BIOS_DEBUG, "Locking SMM.\n");
|
||||
/* enable the SMM memory window */
|
||||
msr = rdmsr(SMM_MASK_MSR);
|
||||
msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
|
||||
wrmsr(SMM_MASK_MSR, msr);
|
||||
|
||||
/* Set SMMLOCK to avoid exploits messing with SMM */
|
||||
msr = rdmsr(HWCR_MSR);
|
||||
msr.lo |= (1 << 0);
|
||||
wrmsr(HWCR_MSR, msr);
|
||||
}
|
||||
|
||||
void smm_lock(void)
|
||||
{
|
||||
/* We lock SMM per CPU core */
|
||||
}
|
||||
|
|
|
@ -208,9 +208,9 @@ smi_handler_t southbridge_smi[32] = {
|
|||
NULL, // [1]
|
||||
NULL, // [2]
|
||||
NULL, // [3]
|
||||
southbridge_smi_cmd, // [4]
|
||||
NULL, // [4]
|
||||
NULL, // [5]
|
||||
NULL, // [6]
|
||||
southbridge_smi_cmd, // [6]
|
||||
NULL, // [7]
|
||||
NULL, // [8]
|
||||
NULL, // [9]
|
||||
|
|
Loading…
Reference in New Issue