cpu/amd: Move locking SMM as part of SMM init

Locking SMM as part of the AP init avoids the need for
CONFIG_PARALLEL_MP_AP_WORK to lock it down.

Change-Id: Ibcdfc0f9ae211644cf0911790b0b0c5d1b0b7dc9
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/64871
Reviewed-by: Paul Menzel <paulepanter@mailbox.org>
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Arthur Heymans 2022-05-31 21:50:51 +02:00 committed by Felix Held
parent e48dcb708c
commit 43ed5d2534
4 changed files with 11 additions and 38 deletions

View File

@ -10,30 +10,8 @@
#include <cpu/x86/msr.h>
#include <types.h>
static void per_core_finalize(void *unused)
{
/* Finalize SMM settings */
if (is_smm_locked()) /* Skip if already locked, avoid GPF */
return;
if (CONFIG(HAVE_SMI_HANDLER))
tseg_valid();
lock_smm();
}
static void finalize_cores(void)
{
printk(BIOS_SPEW, "Lock SMM configuration\n");
if (mp_run_on_all_cpus(per_core_finalize, NULL) != CB_SUCCESS)
printk(BIOS_WARNING, "Failed to finalize all cores\n");
}
static void soc_finalize(void *unused)
{
finalize_cores();
if (!acpi_is_wakeup_s3()) {
acpi_clear_pm_gpe_status();

View File

@ -30,20 +30,6 @@ void clear_tvalid(void)
wrmsr(SMM_MASK_MSR, mask);
}
void tseg_valid(void)
{
msr_t mask = rdmsr(SMM_MASK_MSR);
mask.lo |= SMM_TSEG_VALID;
wrmsr(SMM_MASK_MSR, mask);
}
bool is_smm_locked(void)
{
msr_t hwcr = rdmsr(HWCR_MSR);
return hwcr.lo & SMM_LOCK ? true : false;
}
void lock_smm(void)
{
msr_t hwcr = rdmsr(HWCR_MSR);

View File

@ -49,6 +49,14 @@ static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
*smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
}
static void tseg_valid(void)
{
msr_t mask = rdmsr(SMM_MASK_MSR);
mask.lo |= SMM_TSEG_VALID;
wrmsr(SMM_MASK_MSR, mask);
}
static void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
{
amd64_smm_state_save_area_t *smm_state;
@ -70,6 +78,9 @@ static void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t sta
smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase);
smm_state->smbase = staggered_smbase;
tseg_valid();
lock_smm();
}
const struct mp_ops amd_mp_ops_with_smm = {

View File

@ -10,8 +10,6 @@ void *get_smi_source_handler(int source);
void handle_smi_gsmi(void);
void handle_smi_store(void);
void clear_tvalid(void);
void tseg_valid(void);
bool is_smm_locked(void);
void lock_smm(void);
/* See SMITYPE_* for list possible of events. GEVENTS are handled with mainboard_smi_gpi. */
void mainboard_handle_smi(int event);