soc/intel: Backport SMRR locking support
Backport commit 0cded1f116
(soc/intel/tigerlake: Add SMRR Locking
support) to other client platforms. The SMRR MSRs are core-scoped on
Skylake and Ice Lake, at least. Older platforms do not support SMRR
locking, but now there's seven copies of the same file in the tree. A
follow-up will deduplicate smmrelocate.c files into common CPU code.
I cannot test Jasper Lake nor Elkhart Lake, but they should still work.
As per documentation I do not have access to, Elkhart Lake seems to
support SMRR locking. However, Jasper Lake documentation is unclear.
Tested on Purism Librem Mini v1 (WHL-U i7-8565U), still boots and SMRR
MSRs have the same value on all cores/threads (i7-8565U supports HT).
Change-Id: Icbee0985b04418e83cbf41b81f00934f5a663e30
Signed-off-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/50936
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Michael Niewöhner <foss@mniewoehner.de>
Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org>
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-by: Nico Huber <nico.h@gmx.de>
This commit is contained in:
parent
8eb3a342d1
commit
6d9af0ce6e
|
@ -1,6 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include <console/console.h>
|
||||
#include <cpu/intel/common/common.h>
|
||||
#include <cpu/intel/em64t101_save_state.h>
|
||||
#include <cpu/intel/smm_reloc.h>
|
||||
#include <cpu/x86/mp.h>
|
||||
|
@ -137,8 +138,24 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
|
|||
/* Make appropriate changes to the save state map. */
|
||||
update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
|
||||
|
||||
/*
|
||||
* The SMRR MSRs are core-level registers, so if two threads that share
|
||||
* a core try to both set the lock bit (in the same physical register),
|
||||
* a #GP will be raised on the second write to that register (which is
|
||||
* exactly what the lock is supposed to do), therefore secondary threads
|
||||
* should exit here.
|
||||
*/
|
||||
if (intel_ht_sibling())
|
||||
return;
|
||||
|
||||
/* Write SMRR MSRs based on indicated support. */
|
||||
mtrr_cap = rdmsr(MTRR_CAP_MSR);
|
||||
|
||||
/* Set Lock bit if supported */
|
||||
if (mtrr_cap.lo & SMRR_LOCK_SUPPORTED)
|
||||
relo_params->smrr_mask.lo |= SMRR_PHYS_MASK_LOCK;
|
||||
|
||||
/* Write SMRRs if supported */
|
||||
if (mtrr_cap.lo & SMRR_SUPPORTED)
|
||||
write_smrr(relo_params);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include <console/console.h>
|
||||
#include <cpu/intel/common/common.h>
|
||||
#include <cpu/intel/em64t101_save_state.h>
|
||||
#include <cpu/intel/smm_reloc.h>
|
||||
#include <cpu/x86/mp.h>
|
||||
|
@ -137,8 +138,24 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
|
|||
/* Make appropriate changes to the save state map. */
|
||||
update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
|
||||
|
||||
/*
|
||||
* The SMRR MSRs are core-level registers, so if two threads that share
|
||||
* a core try to both set the lock bit (in the same physical register),
|
||||
* a #GP will be raised on the second write to that register (which is
|
||||
* exactly what the lock is supposed to do), therefore secondary threads
|
||||
* should exit here.
|
||||
*/
|
||||
if (intel_ht_sibling())
|
||||
return;
|
||||
|
||||
/* Write SMRR MSRs based on indicated support. */
|
||||
mtrr_cap = rdmsr(MTRR_CAP_MSR);
|
||||
|
||||
/* Set Lock bit if supported */
|
||||
if (mtrr_cap.lo & SMRR_LOCK_SUPPORTED)
|
||||
relo_params->smrr_mask.lo |= SMRR_PHYS_MASK_LOCK;
|
||||
|
||||
/* Write SMRRs if supported */
|
||||
if (mtrr_cap.lo & SMRR_SUPPORTED)
|
||||
write_smrr(relo_params);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include <console/console.h>
|
||||
#include <cpu/intel/common/common.h>
|
||||
#include <cpu/intel/em64t101_save_state.h>
|
||||
#include <cpu/intel/smm_reloc.h>
|
||||
#include <cpu/x86/mp.h>
|
||||
|
@ -137,8 +138,24 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
|
|||
/* Make appropriate changes to the save state map. */
|
||||
update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
|
||||
|
||||
/*
|
||||
* The SMRR MSRs are core-level registers, so if two threads that share
|
||||
* a core try to both set the lock bit (in the same physical register),
|
||||
* a #GP will be raised on the second write to that register (which is
|
||||
* exactly what the lock is supposed to do), therefore secondary threads
|
||||
* should exit here.
|
||||
*/
|
||||
if (intel_ht_sibling())
|
||||
return;
|
||||
|
||||
/* Write SMRR MSRs based on indicated support. */
|
||||
mtrr_cap = rdmsr(MTRR_CAP_MSR);
|
||||
|
||||
/* Set Lock bit if supported */
|
||||
if (mtrr_cap.lo & SMRR_LOCK_SUPPORTED)
|
||||
relo_params->smrr_mask.lo |= SMRR_PHYS_MASK_LOCK;
|
||||
|
||||
/* Write SMRRs if supported */
|
||||
if (mtrr_cap.lo & SMRR_SUPPORTED)
|
||||
write_smrr(relo_params);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include <console/console.h>
|
||||
#include <cpu/intel/common/common.h>
|
||||
#include <cpu/intel/em64t101_save_state.h>
|
||||
#include <cpu/intel/smm_reloc.h>
|
||||
#include <cpu/x86/mp.h>
|
||||
|
@ -137,8 +138,24 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
|
|||
/* Make appropriate changes to the save state map. */
|
||||
update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
|
||||
|
||||
/*
|
||||
* The SMRR MSRs are core-level registers, so if two threads that share
|
||||
* a core try to both set the lock bit (in the same physical register),
|
||||
* a #GP will be raised on the second write to that register (which is
|
||||
* exactly what the lock is supposed to do), therefore secondary threads
|
||||
* should exit here.
|
||||
*/
|
||||
if (intel_ht_sibling())
|
||||
return;
|
||||
|
||||
/* Write SMRR MSRs based on indicated support. */
|
||||
mtrr_cap = rdmsr(MTRR_CAP_MSR);
|
||||
|
||||
/* Set Lock bit if supported */
|
||||
if (mtrr_cap.lo & SMRR_LOCK_SUPPORTED)
|
||||
relo_params->smrr_mask.lo |= SMRR_PHYS_MASK_LOCK;
|
||||
|
||||
/* Write SMRRs if supported */
|
||||
if (mtrr_cap.lo & SMRR_SUPPORTED)
|
||||
write_smrr(relo_params);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#include <console/console.h>
|
||||
#include <cpu/intel/common/common.h>
|
||||
#include <cpu/intel/em64t101_save_state.h>
|
||||
#include <cpu/intel/smm_reloc.h>
|
||||
#include <cpu/x86/mp.h>
|
||||
|
@ -137,8 +138,24 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
|
|||
/* Make appropriate changes to the save state map. */
|
||||
update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
|
||||
|
||||
/*
|
||||
* The SMRR MSRs are core-level registers, so if two threads that share
|
||||
* a core try to both set the lock bit (in the same physical register),
|
||||
* a #GP will be raised on the second write to that register (which is
|
||||
* exactly what the lock is supposed to do), therefore secondary threads
|
||||
* should exit here.
|
||||
*/
|
||||
if (intel_ht_sibling())
|
||||
return;
|
||||
|
||||
/* Write SMRR MSRs based on indicated support. */
|
||||
mtrr_cap = rdmsr(MTRR_CAP_MSR);
|
||||
|
||||
/* Set Lock bit if supported */
|
||||
if (mtrr_cap.lo & SMRR_LOCK_SUPPORTED)
|
||||
relo_params->smrr_mask.lo |= SMRR_PHYS_MASK_LOCK;
|
||||
|
||||
/* Write SMRRs if supported */
|
||||
if (mtrr_cap.lo & SMRR_SUPPORTED)
|
||||
write_smrr(relo_params);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue