x86/smm: Initialize SMM on some CPUs one-by-one

We currently race in SMM init on Atom 230 (and potentially
other CPUs). At least on the 230, this leads to a hang on
RSM, likely because both hyperthreads mess around with
SMBASE and other SMM state variables in parallel without
coordination. The same behaviour occurs with Atom D5xx.

Change it so first APs are spun up and sent to sleep, then
BSP initializes SMM, then every CPU, one after another.

Only do this when SERIALIZE_SMM_INITIALIZATION is set.
Set the flag for Atom CPUs.

Change-Id: I1ae864e37546298ea222e81349c27cf774ed251f
Signed-off-by: Patrick Georgi <patrick@georgi-clan.de>
Signed-off-by: Damien Zammit <damien@zamaudio.com>
Reviewed-on: https://review.coreboot.org/6311
Tested-by: build bot (Jenkins)
Tested-by: BSI firmware lab <coreboot-labor@bsi.bund.de>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
Damien Zammit 2015-11-28 21:27:05 +11:00 committed by Stefan Reinauer
parent 003d15cab4
commit 149c4c5d01
4 changed files with 61 additions and 1 deletions

View File

@ -234,6 +234,9 @@ void cpu_initialize(unsigned int index)
die("CPU: missing cpu device structure"); die("CPU: missing cpu device structure");
} }
if (cpu->initialized)
return;
post_log_path(cpu); post_log_path(cpu);
/* Find what type of cpu we are dealing with */ /* Find what type of cpu we are dealing with */

View File

@ -11,6 +11,7 @@ config CPU_INTEL_MODEL_106CX
select AP_IN_SIPI_WAIT select AP_IN_SIPI_WAIT
select TSC_SYNC_MFENCE select TSC_SYNC_MFENCE
select SUPPORT_CPU_UCODE_IN_CBFS select SUPPORT_CPU_UCODE_IN_CBFS
select SERIALIZED_SMM_INITIALIZATION
if CPU_INTEL_MODEL_106CX if CPU_INTEL_MODEL_106CX

View File

@ -96,6 +96,17 @@ config SMM_LAPIC_REMAP_MITIGATION
default y if NORTHBRIDGE_INTEL_NEHALEM default y if NORTHBRIDGE_INTEL_NEHALEM
default n default n
config SERIALIZED_SMM_INITIALIZATION
bool
default n
help
On some CPUs, there is a race condition in SMM.
This can occur when both hyperthreads change SMM state
variables in parallel without coordination.
Setting this option serializes the SMM initialization
to avoid an ugly hang in the boot process at the cost
of a slightly longer boot time.
config X86_AMD_FIXED_MTRRS config X86_AMD_FIXED_MTRRS
bool bool
default n default n

View File

@ -458,6 +458,39 @@ static void start_other_cpus(struct bus *cpu_bus, struct device *bsp_cpu)
} }
static void smm_other_cpus(struct bus *cpu_bus, device_t bsp_cpu)
{
device_t cpu;
int pre_count = atomic_read(&active_cpus);
/* Loop through the cpus once to let them run through SMM relocator */
for(cpu = cpu_bus->children; cpu ; cpu = cpu->sibling) {
if (cpu->path.type != DEVICE_PATH_APIC) {
continue;
}
printk(BIOS_ERR, "considering CPU 0x%02x for SMM init\n",
cpu->path.apic.apic_id);
if (cpu == bsp_cpu)
continue;
if (!cpu->enabled) {
continue;
}
if (!start_cpu(cpu)) {
/* Record the error in cpu? */
printk(BIOS_ERR, "CPU 0x%02x would not start!\n",
cpu->path.apic.apic_id);
}
/* FIXME: endless loop */
while (atomic_read(&active_cpus) != pre_count) ;
}
}
static void wait_other_cpus_stop(struct bus *cpu_bus) static void wait_other_cpus_stop(struct bus *cpu_bus)
{ {
struct device *cpu; struct device *cpu;
@ -528,6 +561,7 @@ void initialize_cpus(struct bus *cpu_bus)
#endif #endif
#if CONFIG_HAVE_SMI_HANDLER #if CONFIG_HAVE_SMI_HANDLER
if (!IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION))
smm_init(); smm_init();
#endif #endif
@ -547,4 +581,15 @@ void initialize_cpus(struct bus *cpu_bus)
/* Now wait the rest of the cpus stop*/ /* Now wait the rest of the cpus stop*/
wait_other_cpus_stop(cpu_bus); wait_other_cpus_stop(cpu_bus);
#endif #endif
if (IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION)) {
/* At this point, all APs are sleeping:
* smm_init() will queue a pending SMI on all cpus
* and smm_other_cpus() will start them one by one */
smm_init();
#if CONFIG_SMP && CONFIG_MAX_CPUS > 1
last_cpu_index = 0;
smm_other_cpus(cpu_bus, info->cpu);
#endif
}
} }