soc/intel/apollolake: Provide option to use Common MP Init

This patch provides the option to use the common CPU
Mp Init code by selecting a Config Token.

CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT config token can be
selected to use the Common MP Init Code, also where CPU MP Init is
done before FSP-S Init.

And if the config token is not selected, the old way of
implementation will exist, where MP Init is been done after
FSP-S.

CQ-DEPEND=CL:*397551
BUG=none
BRANCH=none
TEST=Build and boot Reef

Change-Id: I35d012785000d3f3bfcc34138cda9cd4591559f6
Signed-off-by: Barnali Sarkar <barnali.sarkar@intel.com>
Reviewed-on: https://review.coreboot.org/20895
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
This commit is contained in:
Barnali Sarkar 2017-08-07 18:26:31 +05:30 committed by Aaron Durbin
parent 65c8be4ccb
commit 1e6b980b1e
2 changed files with 74 additions and 20 deletions

View File

@ -315,6 +315,27 @@ config USE_APOLLOLAKE_FSP_CAR
endchoice
choice
prompt "MPINIT code implementation"
default NO_COMMON_MPINIT if SOC_INTEL_APOLLOLAKE
default COMMON_MPINIT
help
This option allows you to select MP Init Code path either
from Intel Common Code implementation, or from SOC files.
config NO_COMMON_MPINIT
bool "Not using Common MP Init code"
help
Common code MP Init path is not used.
config COMMON_MPINIT
bool "Using Common MP Init code"
select SOC_INTEL_COMMON_BLOCK_CPU_MPINIT
help
Common code MP Init path is used.
endchoice
#
# Each bit in QOS mask controls this many bytes. This is calculated as:
# (CACHE_WAYS / CACHE_BITS_PER_MASK) * CACHE_LINE_SIZE * CACHE_SETS

View File

@ -21,20 +21,23 @@
#include <cpu/x86/cache.h>
#include <cpu/x86/mp.h>
#include <cpu/intel/microcode.h>
#include <cpu/intel/turbo.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <device/device.h>
#include <device/pci.h>
#include <fsp/api.h>
#include <fsp/memmap.h>
#include <intelblocks/cpulib.h>
#include <intelblocks/fast_spi.h>
#include <intelblocks/mp_init.h>
#include <intelblocks/msr.h>
#include <intelblocks/smm.h>
#include <reg_script.h>
#include <romstage_handoff.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <soc/pm.h>
#include <cpu/intel/turbo.h>
static const struct reg_script core_msr_script[] = {
/* Enable C-state and IO/MWAIT redirect */
@ -57,7 +60,7 @@ static const struct reg_script core_msr_script[] = {
REG_SCRIPT_END
};
static void soc_core_init(device_t cpu)
void soc_core_init(device_t cpu, const void *microcode)
{
/* Set core MSRs */
reg_script_run(core_msr_script);
@ -69,8 +72,14 @@ static void soc_core_init(device_t cpu)
enable_pm_timer_emulation();
}
#if !IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
static void soc_init_core(device_t cpu)
{
soc_core_init(cpu, NULL);
}
static struct device_operations cpu_dev_ops = {
.init = soc_core_init,
.init = soc_init_core,
};
static struct cpu_device_id cpu_table[] = {
@ -85,6 +94,7 @@ static const struct cpu_driver driver __cpu_driver = {
.ops = &cpu_dev_ops,
.id_table = cpu_table,
};
#endif
/*
* MP and SMM loading initialization.
@ -97,6 +107,34 @@ struct smm_relocation_attrs {
static struct smm_relocation_attrs relo_attrs;
/*
* Do essential initialization tasks before APs can be fired up.
*
* IF (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) -
* Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP,
* that are set prior to ramstage.
* Real MTRRs are programmed after resource allocation.
*
* Do FSP loading before MP Init to ensure that the FSP component stored in
* external stage cache in TSEG does not flush off due to SMM relocation
* during MP Init stage.
*
* ELSE -
* Enable MTRRs on the BSP. This creates the MTRR solution that the
* APs will use. Otherwise APs will try to apply the incomplete solution
* as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) {
fsps_load(romstage_handoff_is_resume());
return;
}
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
#if !IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt)
{
msr_t msr;
@ -105,21 +143,8 @@ static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt)
*num_phys = (msr.lo >> 16) & 0xffff;
}
/*
* Do essential initialization tasks before APs can be fired up -
*
* 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
* creates the MTRR solution that the APs will use. Otherwise APs will try to
* apply the incomplete solution as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
/* Find CPU topology */
static int get_cpu_count(void)
int get_cpu_count(void)
{
unsigned int num_virt_cores, num_phys_cores;
@ -131,7 +156,7 @@ static int get_cpu_count(void)
return num_virt_cores;
}
static void get_microcode_info(const void **microcode, int *parallel)
void get_microcode_info(const void **microcode, int *parallel)
{
*microcode = intel_microcode_find();
*parallel = 1;
@ -139,6 +164,7 @@ static void get_microcode_info(const void **microcode, int *parallel)
/* Make sure BSP is using the microcode from cbfs */
intel_microcode_load_unlocked(*microcode);
}
#endif
static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
size_t *smm_save_state_size)
@ -197,11 +223,18 @@ static const struct mp_ops mp_ops = {
.post_mp_init = smm_southbridge_enable,
};
void apollolake_init_cpus(struct device *dev)
void soc_init_cpus(struct bus *cpu_bus, const void *microcode)
{
/* Clear for take-off */
if (mp_init_with_smm(dev->link_list, &mp_ops) < 0)
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
void apollolake_init_cpus(struct device *dev)
{
if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT))
return;
soc_init_cpus(dev->link_list, NULL);
/* Temporarily cache the memory-mapped boot media. */
if (IS_ENABLED(CONFIG_BOOT_DEVICE_MEMORY_MAPPED) &&