soc/amd/common/smm: Add option for late SMM locking

Pre-Zen SoCs like Stoneyridge call into an AGESA binary as part of S3
resume, which will fail if SMM is locked, causing the device to
(eventually) cold boot. To mitigate this, add a new Kconfig to enable
"late" SMM locking, which restores the previous behavior prior to
commit 43ed5d2534 ("cpu/amd: Move locking SMM as part of SMM init").

TEST=tested with rest of patch train

Change-Id: I9971814415271a6a107c327523a0a7c188a91df6
Signed-off-by: Matt DeVillier <matt.devillier@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/78352
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Matt DeVillier 2023-10-14 18:55:20 -05:00 committed by Felix Held
parent 51d1f30d0e
commit 33aa2901f8
5 changed files with 52 additions and 10 deletions

View File

@ -85,6 +85,15 @@ config SOC_AMD_COMMON_BLOCK_SMM
Add common SMM relocation, finalization and handler functionality to
the build.
config SOC_AMD_COMMON_LATE_SMM_LOCKING
bool
depends on SOC_AMD_COMMON_BLOCK_SMM
help
Select this option to perform SMM locking late in soc_finalize(), rather than earlier
in smm_relocation_handler(). This is required for pre-Zen SoCs like Stoneyridge which
call into an AGESA binary as part of S3 resume, and require SMM to still be unlocked
at that time.
config SOC_AMD_COMMON_BLOCK_SVI2
bool
help

View File

@ -9,8 +9,30 @@
#include <cpu/x86/msr.h>
#include <types.h>
static void late_smm_lock(void *unused)
{
/* Finalize SMM settings */
if (is_smm_locked()) /* Skip if already locked, avoid GPF */
return;
if (CONFIG(HAVE_SMI_HANDLER))
tseg_valid();
lock_smm();
}
static void late_smm_finalize(void)
{
printk(BIOS_SPEW, "Lock SMM configuration\n");
if (mp_run_on_all_cpus(late_smm_lock, NULL) != CB_SUCCESS)
printk(BIOS_WARNING, "Failed to finalize all cores\n");
}
static void soc_finalize(void *unused)
{
if (CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING))
late_smm_finalize();
if (!acpi_is_wakeup_s3()) {
acpi_clear_pm_gpe_status();

View File

@ -30,6 +30,20 @@ void clear_tvalid(void)
wrmsr(SMM_MASK_MSR, mask);
}
void tseg_valid(void)
{
msr_t mask = rdmsr(SMM_MASK_MSR);
mask.lo |= SMM_TSEG_VALID;
wrmsr(SMM_MASK_MSR, mask);
}
bool is_smm_locked(void)
{
msr_t hwcr = rdmsr(HWCR_MSR);
return hwcr.lo & SMM_LOCK ? true : false;
}
void lock_smm(void)
{
msr_t hwcr = rdmsr(HWCR_MSR);

View File

@ -52,14 +52,6 @@ static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
*smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
}
static void tseg_valid(void)
{
msr_t mask = rdmsr(SMM_MASK_MSR);
mask.lo |= SMM_TSEG_VALID;
wrmsr(SMM_MASK_MSR, mask);
}
static void smm_relocation_handler(void)
{
uintptr_t tseg_base;
@ -87,8 +79,11 @@ static void smm_relocation_handler(void)
};
wrmsr(SMM_BASE_MSR, smm_base);
tseg_valid();
lock_smm();
if (!CONFIG(SOC_AMD_COMMON_LATE_SMM_LOCKING)) {
tseg_valid();
lock_smm();
}
}
static void post_mp_init(void)

View File

@ -11,6 +11,8 @@ void handle_smi_gsmi(void);
void handle_smi_store(void);
void fch_apmc_smi_handler(void);
void clear_tvalid(void);
void tseg_valid(void);
bool is_smm_locked(void);
void lock_smm(void);
/* See SMITYPE_* for list possible of events. GEVENTS are handled with mainboard_smi_gpi. */
void mainboard_handle_smi(int event);