cpu/amd/smm: Move MP & SMM init in a common place

Change-Id: I7c457ab69581f8c29f2d79c054ca3bc7e58a896e
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/64870
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Arthur Heymans 2022-05-31 21:48:15 +02:00 committed by Felix Held
parent 44807acaef
commit e48dcb708c
6 changed files with 38 additions and 140 deletions

View File

@ -1,57 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <acpi/acpi.h>
#include <amdblocks/cpu.h>
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
#include <amdblocks/reset.h>
#include <amdblocks/smm.h>
#include <assert.h>
#include <console/console.h>
#include <cpu/amd/microcode.h>
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <acpi/acpi.h>
#include <device/device.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <types.h>
_Static_assert(CONFIG_MAX_CPUS == 16, "Do not override MAX_CPUS. To reduce the number of "
"available cores, use the downcore_mode and disable_smt devicetree settings instead.");
/* MP and SMM loading initialization */
/*
* Do essential initialization tasks before APs can be fired up -
*
* 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
* creates the MTRR solution that the APs will use. Otherwise APs will try to
* apply the incomplete solution as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
const msr_t syscfg = rdmsr(SYSCFG_MSR);
if (syscfg.lo & SYSCFG_MSR_TOM2WB)
x86_setup_mtrrs_with_detect_no_above_4gb();
else
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = get_smm_info,
.relocation_handler = smm_relocation_handler,
.post_mp_init = global_smi_enable,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
extern const struct mp_ops amd_mp_ops_with_smm;
if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");

View File

@ -1,15 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/x86/mtrr.h>
#include <cpu/x86/mp.h>
#include <amdblocks/cpu.h>
#include <amdblocks/smm.h>
#include <console/console.h>
#include <cpu/amd/amd64_save_state.h>
#include <cpu/amd/msr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/smm.h>
#include <types.h>
void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size)
/* AP MTRRs will be synced to the BSP in the SIPI vector so set them up before MP init. */
static void pre_mp_init(void)
{
const msr_t syscfg = rdmsr(SYSCFG_MSR);
if (syscfg.lo & SYSCFG_MSR_TOM2WB)
x86_setup_mtrrs_with_detect_no_above_4gb();
else
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
size_t *smm_save_state_size)
{
printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
@ -33,7 +49,7 @@ void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_
*smm_save_state_size = sizeof(amd64_smm_state_save_area_t);
}
void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
static void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
{
amd64_smm_state_save_area_t *smm_state;
@ -55,3 +71,11 @@ void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_
smm_state = (void *)(SMM_AMD64_SAVE_STATE_OFFSET + curr_smbase);
smm_state->smbase = staggered_smbase;
}
const struct mp_ops amd_mp_ops_with_smm = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = get_smm_info,
.relocation_handler = smm_relocation_handler,
.post_mp_init = global_smi_enable,
};

View File

@ -6,8 +6,6 @@
#include <cpu/x86/msr.h>
#include <types.h>
void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize, size_t *smm_save_state_size);
void smm_relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase);
void *get_smi_source_handler(int source);
void handle_smi_gsmi(void);
void handle_smi_store(void);

View File

@ -6,55 +6,25 @@
#include <amdblocks/cpu.h>
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
#include <amdblocks/reset.h>
#include <amdblocks/smm.h>
#include <assert.h>
#include <console/console.h>
#include <cpu/amd/microcode.h>
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <acpi/acpi.h>
#include <device/device.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <types.h>
_Static_assert(CONFIG_MAX_CPUS == 8, "Do not override MAX_CPUS. To reduce the number of "
"available cores, use the downcore_mode and disable_smt devicetree settings instead.");
/* MP and SMM loading initialization */
/*
* Do essential initialization tasks before APs can be fired up -
*
* 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
* creates the MTRR solution that the APs will use. Otherwise APs will try to
* apply the incomplete solution as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
const msr_t syscfg = rdmsr(SYSCFG_MSR);
if (syscfg.lo & SYSCFG_MSR_TOM2WB)
x86_setup_mtrrs_with_detect_no_above_4gb();
else
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = get_smm_info,
.relocation_handler = smm_relocation_handler,
.post_mp_init = global_smi_enable,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
extern const struct mp_ops amd_mp_ops_with_smm;
if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");

View File

@ -4,58 +4,24 @@
#include <amdblocks/cpu.h>
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
#include <amdblocks/reset.h>
#include <amdblocks/smm.h>
#include <assert.h>
#include <console/console.h>
#include <cpu/amd/microcode.h>
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <device/device.h>
#include <device/pci_ops.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <soc/pci_devs.h>
#include <soc/smi.h>
#include <types.h>
_Static_assert(CONFIG_MAX_CPUS == 8, "Do not override MAX_CPUS. To reduce the number of "
"available cores, use the downcore_mode and disable_smt devicetree settings instead.");
/* MP and SMM loading initialization. */
/*
* Do essential initialization tasks before APs can be fired up -
*
* 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
* creates the MTRR solution that the APs will use. Otherwise APs will try to
* apply the incomplete solution as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
const msr_t syscfg = rdmsr(SYSCFG_MSR);
if (syscfg.lo & SYSCFG_MSR_TOM2WB)
x86_setup_mtrrs_with_detect_no_above_4gb();
else
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = get_smm_info,
.relocation_handler = smm_relocation_handler,
.post_mp_init = global_smi_enable,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
extern const struct mp_ops amd_mp_ops_with_smm;
if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");

View File

@ -4,56 +4,25 @@
#include <amdblocks/iomap.h>
#include <amdblocks/mca.h>
#include <amdblocks/reset.h>
#include <amdblocks/smm.h>
#include <console/console.h>
#include <cpu/amd/msr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <cpu/x86/msr.h>
#include <device/device.h>
#include <device/pci_ops.h>
#include <soc/pci_devs.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <soc/northbridge.h>
#include <soc/pci_devs.h>
#include <soc/smi.h>
#include <types.h>
#include <console/console.h>
/*
* MP and SMM loading initialization.
*/
/*
* Do essential initialization tasks before APs can be fired up -
*
* 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
* creates the MTRR solution that the APs will use. Otherwise APs will try to
* apply the incomplete solution as the BSP is calculating it.
*/
static void pre_mp_init(void)
{
const msr_t syscfg = rdmsr(SYSCFG_MSR);
if (syscfg.lo & SYSCFG_MSR_TOM2WB)
x86_setup_mtrrs_with_detect_no_above_4gb();
else
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = get_smm_info,
.relocation_handler = smm_relocation_handler,
.post_mp_init = global_smi_enable,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops) != CB_SUCCESS)
extern const struct mp_ops amd_mp_ops_with_smm;
if (mp_init_with_smm(cpu_bus, &amd_mp_ops_with_smm) != CB_SUCCESS)
die_with_post_code(POST_HW_INIT_FAILURE,
"mp_init_with_smm failed. Halting.\n");