Revert "model_206ax: Use parallel MP init"

This reverts commit 5fbe788bae.
This commit was submitted without its parent being submitted,
resulting in coreboot not building.

Change-Id: I87497093ccf6909b88e3a40d5f472afeb7f2c552
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/25616
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Nico Huber <nico.h@gmx.de>
This commit is contained in:
Arthur Heymans 2018-04-11 13:03:34 +02:00
parent 5fbe788bae
commit 68f688896c
12 changed files with 93 additions and 194 deletions

View File

@ -102,6 +102,7 @@ void intel_model_206ax_finalize_smm(void);
/* Configure power limits for turbo mode */ /* Configure power limits for turbo mode */
void set_power_limits(u8 power_limit_1_time); void set_power_limits(u8 power_limit_1_time);
int cpu_config_tdp_levels(void); int cpu_config_tdp_levels(void);
void smm_relocate(void);
#endif #endif
#endif #endif

View File

@ -91,6 +91,7 @@ void intel_model_2065x_finalize_smm(void);
/* Configure power limits for turbo mode */ /* Configure power limits for turbo mode */
void set_power_limits(u8 power_limit_1_time); void set_power_limits(u8 power_limit_1_time);
int cpu_config_tdp_levels(void); int cpu_config_tdp_levels(void);
void smm_relocate(void);
#endif #endif
#endif #endif

View File

@ -23,7 +23,6 @@ config CPU_SPECIFIC_OPTIONS
select TSC_SYNC_MFENCE select TSC_SYNC_MFENCE
select CPU_INTEL_COMMON select CPU_INTEL_COMMON
select CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM select CACHE_RELOCATED_RAMSTAGE_OUTSIDE_CBMEM
select PARALLEL_MP
config BOOTBLOCK_CPU_INIT config BOOTBLOCK_CPU_INIT
string string

View File

@ -121,6 +121,7 @@ void intel_model_206ax_finalize_smm(void);
/* Configure power limits for turbo mode */ /* Configure power limits for turbo mode */
void set_power_limits(u8 power_limit_1_time); void set_power_limits(u8 power_limit_1_time);
int cpu_config_tdp_levels(void); int cpu_config_tdp_levels(void);
void smm_relocate(void);
#endif #endif
#endif #endif

View File

@ -15,7 +15,6 @@
* GNU General Public License for more details. * GNU General Public License for more details.
*/ */
#include <assert.h>
#include <console/console.h> #include <console/console.h>
#include <device/device.h> #include <device/device.h>
#include <string.h> #include <string.h>
@ -24,7 +23,6 @@
#include <cpu/x86/mtrr.h> #include <cpu/x86/mtrr.h>
#include <cpu/x86/msr.h> #include <cpu/x86/msr.h>
#include <cpu/x86/lapic.h> #include <cpu/x86/lapic.h>
#include <cpu/x86/mp.h>
#include <cpu/intel/microcode.h> #include <cpu/intel/microcode.h>
#include <cpu/intel/speedstep.h> #include <cpu/intel/speedstep.h>
#include <cpu/intel/turbo.h> #include <cpu/intel/turbo.h>
@ -423,6 +421,83 @@ static void configure_mca(void)
wrmsr(IA32_MC0_STATUS + (i * 4), msr); wrmsr(IA32_MC0_STATUS + (i * 4), msr);
} }
int cpu_get_apic_id_map(int *apic_id_map)
{
struct cpuid_result result;
unsigned int threads_per_package, threads_per_core, i, shift = 0;
/* Logical processors (threads) per core */
result = cpuid_ext(0xb, 0);
threads_per_core = result.ebx & 0xffff;
/* Logical processors (threads) per package */
result = cpuid_ext(0xb, 1);
threads_per_package = result.ebx & 0xffff;
if (threads_per_core == 1)
shift++;
for (i = 0; i < threads_per_package && i < CONFIG_MAX_CPUS; i++)
apic_id_map[i] = i << shift;
return threads_per_package;
}
/*
* Initialize any extra cores/threads in this package.
*/
static void intel_cores_init(struct device *cpu)
{
struct cpuid_result result;
unsigned int threads_per_package, threads_per_core, i;
/* Logical processors (threads) per core */
result = cpuid_ext(0xb, 0);
threads_per_core = result.ebx & 0xffff;
/* Logical processors (threads) per package */
result = cpuid_ext(0xb, 1);
threads_per_package = result.ebx & 0xffff;
/* Only initialize extra cores from BSP */
if (cpu->path.apic.apic_id)
return;
printk(BIOS_DEBUG, "CPU: %u has %u cores, %u threads per core\n",
cpu->path.apic.apic_id, threads_per_package/threads_per_core,
threads_per_core);
for (i = 1; i < threads_per_package; ++i) {
struct device_path cpu_path;
struct device *new;
/* Build the CPU device path */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.apic.apic_id =
cpu->path.apic.apic_id + i;
/* Update APIC ID if no hyperthreading */
if (threads_per_core == 1)
cpu_path.apic.apic_id <<= 1;
/* Allocate the new CPU device structure */
new = alloc_dev(cpu->bus, &cpu_path);
if (!new)
continue;
printk(BIOS_DEBUG, "CPU: %u has core %u\n",
cpu->path.apic.apic_id,
new->path.apic.apic_id);
/* Start the new CPU */
if (is_smp_boot() && !start_cpu(new)) {
/* Record the error in cpu? */
printk(BIOS_ERR, "CPU %u would not start!\n",
new->path.apic.apic_id);
}
}
}
static void model_206ax_init(struct device *cpu) static void model_206ax_init(struct device *cpu)
{ {
char processor_name[49]; char processor_name[49];
@ -430,6 +505,8 @@ static void model_206ax_init(struct device *cpu)
/* Turn on caching if we haven't already */ /* Turn on caching if we haven't already */
x86_enable_cache(); x86_enable_cache();
intel_update_microcode_from_cbfs();
/* Clear out pending MCEs */ /* Clear out pending MCEs */
configure_mca(); configure_mca();
@ -437,6 +514,10 @@ static void model_206ax_init(struct device *cpu)
fill_processor_name(processor_name); fill_processor_name(processor_name);
printk(BIOS_INFO, "CPU: %s.\n", processor_name); printk(BIOS_INFO, "CPU: %s.\n", processor_name);
/* Setup MTRRs based on physical address size */
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
/* Setup Page Attribute Tables (PAT) */ /* Setup Page Attribute Tables (PAT) */
// TODO set up PAT // TODO set up PAT
@ -467,81 +548,9 @@ static void model_206ax_init(struct device *cpu)
/* Enable Turbo */ /* Enable Turbo */
enable_turbo(); enable_turbo();
}
/* MP initialization support. */ /* Start up extra cores */
static const void *microcode_patch; intel_cores_init(cpu);
static void pre_mp_init(void)
{
/* Setup MTRRs based on physical address size. */
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static int get_cpu_count(void)
{
struct cpuid_result result;
unsigned int threads_per_package, threads_per_core;
/* Logical processors (threads) per core */
result = cpuid_ext(0xb, 0);
threads_per_core = result.ebx & 0xffff;
ASSERT(threads_per_core != 0);
/* Logical processors (threads) per package */
result = cpuid_ext(0xb, 1);
threads_per_package = result.ebx & 0xffff;
printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
threads_per_package / threads_per_core, threads_per_package);
return threads_per_package;
}
static void get_microcode_info(const void **microcode, int *parallel)
{
microcode_patch = intel_microcode_find();
*microcode = microcode_patch;
*parallel = 1;
}
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
}
static void post_mp_init(void)
{
/* Now that all APs have been relocated as well as the BSP let SMIs
* start flowing. */
southbridge_smm_init();
/* Lock down the SMRAM space. */
smm_lock();
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
.pre_mp_smm_init = smm_initialize,
.per_cpu_smm_trigger = per_cpu_smm_trigger,
.relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};
void bsp_init_and_start_aps(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
} }
static struct device_operations cpu_dev_ops = { static struct device_operations cpu_dev_ops = {

View File

@ -11,10 +11,6 @@
* GNU General Public License for more details. * GNU General Public License for more details.
*/ */
#include <device/device.h>
void bsp_init_and_start_aps(struct bus *cpu_bus);
/* These helpers are for performing SMM relocation. */ /* These helpers are for performing SMM relocation. */
void southbridge_smm_init(void); void southbridge_smm_init(void);
void southbridge_trigger_smi(void); void southbridge_trigger_smi(void);
@ -22,10 +18,3 @@ void southbridge_clear_smi_status(void);
u32 northbridge_get_tseg_base(void); u32 northbridge_get_tseg_base(void);
int cpu_get_apic_id_map(int *apic_id_map); int cpu_get_apic_id_map(int *apic_id_map);
void northbridge_write_smram(u8 smram); void northbridge_write_smram(u8 smram);
void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
size_t *smm_save_state_size);
void smm_initialize(void);
void southbridge_smm_clear_state(void);
void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
uintptr_t staggered_smbase);
void smm_relocate(void);

View File

@ -23,12 +23,10 @@
#include <device/pci.h> #include <device/pci.h>
#include <cpu/cpu.h> #include <cpu/cpu.h>
#include <cpu/x86/cache.h> #include <cpu/x86/cache.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/msr.h> #include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h> #include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h> #include <cpu/x86/smm.h>
#include <console/console.h> #include <console/console.h>
#include <smp/node.h>
#include "smi.h" #include "smi.h"
#define SMRR_SUPPORTED (1 << 11) #define SMRR_SUPPORTED (1 << 11)
@ -294,78 +292,3 @@ void smm_lock(void)
northbridge_write_smram(D_LCK | G_SMRAME | C_BASE_SEG); northbridge_write_smram(D_LCK | G_SMRAME | C_BASE_SEG);
} }
void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
size_t *smm_save_state_size)
{
printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
fill_in_relocation_params(&smm_reloc_params);
setup_ied_area(&smm_reloc_params);
*perm_smbase = smm_reloc_params.smram_base;
*perm_smsize = smm_reloc_params.smram_size;
*smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
}
void smm_initialize(void)
{
/* Clear the SMM state in the southbridge. */
southbridge_smm_clear_state();
/*
* Run the relocation handler for on the BSP to check and set up
* parallel SMM relocation.
*/
smm_initiate_relocation();
}
/* The relocation work is actually performed in SMM context, but the code
* resides in the ramstage module. This occurs by trampolining from the default
* SMRAM entry point to here. */
void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
uintptr_t staggered_smbase)
{
msr_t mtrr_cap;
struct smm_relocation_params *relo_params = &smm_reloc_params;
em64t101_smm_state_save_area_t *save_state;
u32 smbase = staggered_smbase;
u32 iedbase = relo_params->ied_base;
printk(BIOS_DEBUG, "In relocation handler: cpu %d\n", cpu);
/* Make appropriate changes to the save state map. */
printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
smbase, iedbase);
save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
sizeof(*save_state));
save_state->smbase = smbase;
save_state->iedbase = iedbase;
/* Write EMRR and SMRR MSRs based on indicated support. */
mtrr_cap = rdmsr(MTRR_CAP_MSR);
if (mtrr_cap.lo & SMRR_SUPPORTED && !IS_ENABLED(CONFIG_HAS_NO_SMRR))
write_smrr(relo_params);
}
/*
* The default SMM entry can happen in parallel or serially. If the
* default SMM entry is done in parallel the BSP has already setup
* the saving state to each CPU's MSRs. At least one save state size
* is required for the initial SMM entry for the BSP to determine if
* parallel SMM relocation is even feasible.
*/
void smm_relocate(void)
{
/*
* If smm_save_state_in_msrs is non-zero then parallel SMM relocation
* shall take place. Run the relocation handler a second time on the
* BSP to do * the final move. For APs, a relocation handler always
* needs to be run.
*/
if (!boot_cpu())
smm_initiate_relocation();
}

View File

@ -525,7 +525,7 @@ static const struct pci_driver mc_driver_158 __pci_driver = {
static void cpu_bus_init(device_t dev) static void cpu_bus_init(device_t dev)
{ {
bsp_init_and_start_aps(dev->link_list); initialize_cpus(dev->link_list);
} }
static struct device_operations cpu_bus_ops = { static struct device_operations cpu_bus_ops = {

View File

@ -22,7 +22,7 @@
#include <stdint.h> #include <stdint.h>
#include <string.h> #include <string.h>
#include <elog.h> #include <elog.h>
#include <southbridge/intel/common/pmutils.h> #include "pch.h"
void pch_log_state(void) void pch_log_state(void)
{ {

View File

@ -68,6 +68,9 @@ int pch_silicon_revision(void);
int pch_silicon_type(void); int pch_silicon_type(void);
int pch_silicon_supported(int type, int rev); int pch_silicon_supported(int type, int rev);
void pch_iobp_update(u32 address, u32 andvalue, u32 orvalue); void pch_iobp_update(u32 address, u32 andvalue, u32 orvalue);
#if IS_ENABLED(CONFIG_ELOG)
void pch_log_state(void);
#endif
#else /* __PRE_RAM__ */ #else /* __PRE_RAM__ */
void enable_smbus(void); void enable_smbus(void);
void enable_usb_bar(void); void enable_usb_bar(void);

View File

@ -123,6 +123,5 @@ void southbridge_update_gnvs(u8 apm_cnt, int *smm_done);
void southbridge_finalize_all(void); void southbridge_finalize_all(void);
void southbridge_smi_monitor(void); void southbridge_smi_monitor(void);
em64t101_smm_state_save_area_t *smi_apmc_find_state_save(u8 cmd); em64t101_smm_state_save_area_t *smi_apmc_find_state_save(u8 cmd);
void pch_log_state(void);
#endif /*INTEL_COMMON_PMUTIL_H */ #endif /*INTEL_COMMON_PMUTIL_H */

View File

@ -159,29 +159,3 @@ void smm_setup_structures(void *gnvs, void *tcg, void *smi1)
"d" (APM_CNT) "d" (APM_CNT)
); );
} }
void southbridge_smm_clear_state(void)
{
u32 smi_en;
if (IS_ENABLED(CONFIG_ELOG))
/* Log events from chipset before clearing */
pch_log_state();
printk(BIOS_DEBUG, "Initializing Southbridge SMI...");
printk(BIOS_SPEW, " ... pmbase = 0x%04x\n", get_pmbase());
smi_en = inl(get_pmbase() + SMI_EN);
if (smi_en & APMC_EN) {
printk(BIOS_INFO, "SMI# handler already enabled?\n");
return;
}
printk(BIOS_DEBUG, "\n");
/* Dump and clear status registers */
reset_smi_status();
reset_pm1_status();
reset_tco_status();
reset_gpe0_status();
}