cpu/x86/smm_module_handler: Add relocatable module params

Instead of passing on parameters from the stub to the permanent
handler, add them directly to the permanent handler.

The parameters in the stub will be removed in a later patch.

Change-Id: Ib3bde78dd9e0c02dd1d86e03665fa9c65e3d07eb
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/50764
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
This commit is contained in:
Arthur Heymans 2021-02-15 16:02:10 +01:00 committed by Patrick Georgi
parent c9aecb4440
commit 823b1a8270
3 changed files with 31 additions and 12 deletions

View File

@ -18,6 +18,9 @@ typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
static volatile
__attribute__((aligned(4))) smi_semaphore smi_handler_status = SMI_UNLOCKED;
static const volatile
__attribute((aligned(4), __section__(".module_parameters"))) struct smm_runtime smm_runtime;
static int smi_obtain_lock(void)
{
u8 ret = SMI_LOCKED;
@ -87,8 +90,6 @@ static void smi_restore_pci_address(void)
outl(pci_orig, 0xcf8);
}
static const struct smm_runtime *smm_runtime;
struct global_nvs *gnvs;
void *smm_get_save_state(int cpu)
@ -97,9 +98,9 @@ void *smm_get_save_state(int cpu)
/* This function assumes all save states start at top of default
* SMRAM size space and are staggered down by save state size. */
base = (void *)(uintptr_t)smm_runtime->smbase;
base = (void *)(uintptr_t)smm_runtime.smbase;
base += SMM_DEFAULT_SIZE;
base -= (cpu + 1) * smm_runtime->save_state_size;
base -= (cpu + 1) * smm_runtime.save_state_size;
return base;
}
@ -108,12 +109,13 @@ uint32_t smm_revision(void)
{
const uintptr_t save_state = (uintptr_t)(smm_get_save_state(0));
return *(uint32_t *)(save_state + smm_runtime->save_state_size - SMM_REVISION_OFFSET_FROM_TOP);
return *(uint32_t *)(save_state + smm_runtime.save_state_size
- SMM_REVISION_OFFSET_FROM_TOP);
}
bool smm_region_overlaps_handler(const struct region *r)
{
const struct region r_smm = {smm_runtime->smbase, smm_runtime->smm_size};
const struct region r_smm = {smm_runtime.smbase, smm_runtime.smm_size};
const struct region r_aseg = {SMM_BASE, SMM_DEFAULT_SIZE};
return region_overlap(&r_smm, r) || region_overlap(&r_aseg, r);
@ -122,22 +124,17 @@ bool smm_region_overlaps_handler(const struct region *r)
asmlinkage void smm_handler_start(void *arg)
{
const struct smm_module_params *p;
const struct smm_runtime *runtime;
int cpu;
uintptr_t actual_canary;
uintptr_t expected_canary;
p = arg;
runtime = p->runtime;
cpu = p->cpu;
expected_canary = (uintptr_t)p->canary;
/* Make sure to set the global runtime. It's OK to race as the value
* will be the same across CPUs as well as multiple SMIs. */
if (smm_runtime == NULL) {
smm_runtime = runtime;
gnvs = (void *)(uintptr_t)smm_runtime->gnvs_ptr;
}
gnvs = (void *)(uintptr_t)smm_runtime.gnvs_ptr;
if (cpu >= CONFIG_MAX_CPUS) {
console_init();

View File

@ -324,6 +324,7 @@ int smm_setup_relocation_handler(struct smm_loader_params *params)
int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
{
struct rmodule smm_mod;
struct smm_runtime *handler_mod_params;
size_t total_stack_size;
size_t handler_size;
size_t module_alignment;
@ -390,6 +391,12 @@ int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
return -1;
params->handler = rmodule_entry(&smm_mod);
handler_mod_params = rmodule_parameters(&smm_mod);
handler_mod_params->smbase = (uintptr_t)smram;
handler_mod_params->smm_size = size;
handler_mod_params->save_state_size = params->per_cpu_save_state_size;
handler_mod_params->num_cpus = params->num_concurrent_stacks;
handler_mod_params->gnvs_ptr = (uintptr_t)acpi_get_gnvs();
return smm_module_setup_stub(smram, size, params, fxsave_area);
}

View File

@ -536,6 +536,7 @@ int smm_setup_relocation_handler(struct smm_loader_params *params)
int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
{
struct rmodule smm_mod;
struct smm_runtime *handler_mod_params;
size_t total_stack_size;
size_t handler_size;
size_t module_alignment;
@ -618,6 +619,12 @@ int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
return -1;
params->handler = rmodule_entry(&smm_mod);
handler_mod_params = rmodule_parameters(&smm_mod);
handler_mod_params->smbase = (uintptr_t)smram;
handler_mod_params->smm_size = size;
handler_mod_params->save_state_size = params->per_cpu_save_state_size;
handler_mod_params->num_cpus = params->num_concurrent_stacks;
handler_mod_params->gnvs_ptr = (uintptr_t)acpi_get_gnvs();
printk(BIOS_DEBUG, "%s: smram_start: 0x%p\n",
__func__, smram);
@ -638,6 +645,14 @@ int smm_load_module(void *smram, size_t size, struct smm_loader_params *params)
printk(BIOS_DEBUG, "%s: CONFIG_BIOS_RESOURCE_LIST_SIZE 0x%x\n",
__func__, CONFIG_BIOS_RESOURCE_LIST_SIZE);
printk(BIOS_DEBUG, "%s: handler_mod_params.smbase = 0x%x\n", __func__,
handler_mod_params->smbase);
printk(BIOS_DEBUG, "%s: per_cpu_save_state_size = 0x%x\n", __func__,
handler_mod_params->save_state_size);
printk(BIOS_DEBUG, "%s: num_cpus = 0x%x\n", __func__, handler_mod_params->num_cpus);
printk(BIOS_DEBUG, "%s: total_save_state_size = 0x%x\n", __func__,
(handler_mod_params->save_state_size * handler_mod_params->num_cpus));
/* CPU 0 smbase goes first, all other CPUs
* will be staggered below
*/