soc/intel/xeon_sp/spr: Add Sapphire Rapids ramstage code

It implements SPR ramstage including silicon initialization, MSR
programming, MP init and certain registers locking before booting
to payload.

Change-Id: I128fdc6e58c49fb5abf911d6ffa91e7411f6d1e2
Signed-off-by: Jonathan Zhang <jonzhang@meta.com>
Signed-off-by: Johnny Lin <johnny_lin@wiwynn.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/72443
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com>
This commit is contained in:
Jonathan Zhang 2023-01-25 11:37:27 -08:00 committed by Lean Sheng Tan
parent 15fc45982b
commit 3ed903fda9
11 changed files with 869 additions and 3 deletions

View File

@ -10,6 +10,10 @@
void xeon_sp_init_cpus(struct device *dev);
void mainboard_silicon_init_params(FSPS_UPD *params);
void mainboard_override_fsp_gpio(void);
/* lock or unlock community B and D pads after FSP-S */
void lock_gpio(bool lock);
void mainboard_override_usb_oc(void);
extern struct pci_operations soc_pci_ops;

View File

@ -0,0 +1,21 @@
## SPDX-License-Identifier: GPL-2.0-only
ifeq ($(CONFIG_SOC_INTEL_SAPPHIRERAPIDS_SP),y)
subdirs-y += ../../../../cpu/intel/turbo
subdirs-y += ../../../../cpu/x86/lapic
subdirs-y += ../../../../cpu/x86/mtrr
subdirs-y += ../../../../cpu/x86/tsc
subdirs-y += ../../../../cpu/intel/microcode
romstage-y += romstage.c soc_util.c ddr.c
romstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c
romstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c
ramstage-y += chip.c cpu.c soc_util.c ramstage.c soc_acpi.c xhci.c numa.c reset.c
ramstage-y += crashlog.c
ramstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c
ramstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c
CPPFLAGS_common += -I$(src)/soc/intel/xeon_sp/spr/include -I$(src)/soc/intel/xeon_sp/spr
endif ## CONFIG_SOC_INTEL_SAPPHIRERAPIDS_SP

View File

@ -0,0 +1,232 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <arch/ioapic.h>
#include <console/console.h>
#include <console/debug.h>
#include <cpu/x86/lapic.h>
#include <device/pci.h>
#include <device/pci_ids.h>
#include <device/pciexp.h>
#include <intelblocks/gpio.h>
#include <intelblocks/lpc_lib.h>
#include <intelblocks/p2sb.h>
#include <intelblocks/pcr.h>
#include <intelblocks/tco.h>
#include <soc/acpi.h>
#include <soc/chip_common.h>
#include <soc/crashlog.h>
#include <soc/numa.h>
#include <soc/p2sb.h>
#include <soc/pch.h>
#include <soc/soc_pch.h>
#include <soc/pci_devs.h>
#include <soc/ramstage.h>
#include <soc/soc_util.h>
#include <soc/util.h>
#include <soc/xhci.h>
__weak void mainboard_silicon_init_params(FSPS_UPD *params)
{
}
/* UPD parameters to be initialized before SiliconInit */
void platform_fsp_silicon_init_params_cb(FSPS_UPD *silupd)
{
mainboard_silicon_init_params(silupd);
}
#if CONFIG(HAVE_ACPI_TABLES)
const char *soc_acpi_name(const struct device *dev);
const char *soc_acpi_name(const struct device *dev)
{
if (dev->path.type == DEVICE_PATH_DOMAIN)
return "PC00";
return NULL;
}
#endif
static struct device_operations pci_domain_ops = {
.read_resources = &pci_domain_read_resources,
.set_resources = &xeonsp_pci_domain_set_resources,
.scan_bus = &xeonsp_pci_domain_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.write_acpi_tables = &northbridge_write_acpi_tables,
.acpi_name = soc_acpi_name
#endif
};
static struct device_operations cpu_bus_ops = {
.read_resources = noop_read_resources,
.set_resources = noop_set_resources,
.init = mp_cpu_bus_init,
.acpi_fill_ssdt = generate_cpu_entries,
};
struct pci_operations soc_pci_ops = {
.set_subsystem = pci_dev_set_subsystem,
};
static void chip_enable_dev(struct device *dev)
{
/* Set the operations if it is a special bus type */
if (dev->path.type == DEVICE_PATH_DOMAIN) {
dev->ops = &pci_domain_ops;
attach_iio_stacks(dev);
} else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
dev->ops = &cpu_bus_ops;
} else if (dev->path.type == DEVICE_PATH_GPIO) {
block_gpio_enable(dev);
}
}
static void pcu_pci_or_config32(u8 bus, u8 func, u32 reg, u32 orval)
{
u32 data;
const uint32_t pcie_offset = PCI_DEV(bus, PCU_DEV, func);
data = pci_s_read_config32(pcie_offset, reg);
data |= orval;
pci_s_write_config32(pcie_offset, reg, data);
}
static void set_pcu_locks(void)
{
for (uint32_t socket = 0; socket < soc_get_num_cpus(); ++socket) {
const uint32_t bus = get_ubox_busno(socket, UNCORE_BUS_1);
/* configure PCU_CR0_FUN csrs */
pcu_pci_or_config32(bus, PCU_CR0_FUN, PCU_CR0_P_STATE_LIMITS,
P_STATE_LIMITS_LOCK);
pcu_pci_or_config32(bus, PCU_CR0_FUN, PCU_CR0_PACKAGE_RAPL_LIMIT_UPR,
PKG_PWR_LIM_LOCK_UPR);
pcu_pci_or_config32(bus, PCU_CR0_FUN, PCU_CR0_TURBO_ACTIVATION_RATIO,
TURBO_ACTIVATION_RATIO_LOCK);
/* configure PCU_CR2_FUN csrs */
pcu_pci_or_config32(bus, PCU_CR2_FUN, PCU_CR2_DRAM_POWER_INFO_UPR,
DRAM_POWER_INFO_LOCK_UPR);
pcu_pci_or_config32(bus, PCU_CR2_FUN, PCU_CR2_DRAM_PLANE_POWER_LIMIT_UPR,
PP_PWR_LIM_LOCK_UPR);
/* configure PCU_CR3_FUN csrs */
pcu_pci_or_config32(bus, PCU_CR3_FUN, PCU_CR3_CONFIG_TDP_CONTROL, TDP_LOCK);
/* configure PCU_CR6_FUN csrs */
pcu_pci_or_config32(bus, PCU_CR6_FUN, PCU_CR6_PLATFORM_RAPL_LIMIT_CFG_UPR,
PLT_PWR_LIM_LOCK_UPR);
pcu_pci_or_config32(bus, PCU_CR6_FUN, PCU_CR6_PLATFORM_POWER_INFO_CFG_UPR,
PLT_PWR_INFO_LOCK_UPR);
}
}
static void chip_final(void *data)
{
/* Lock SBI */
pci_or_config32(PCH_DEV_P2SB, P2SBC, SBILOCK);
/* LOCK PAM */
pci_or_config32(pcidev_path_on_root(PCI_DEVFN(0, 0)), 0x80, 1 << 0);
set_pcu_locks();
tco_lockdown();
p2sb_hide();
/* Accessing xHCI CSR needs to be done after PCI enumeration. */
lock_oc_cfg(false);
mainboard_override_usb_oc();
lock_oc_cfg(true);
/* Disable CPU Crashlog to avoid conflict between CPU Crashlog and BMC ACD. */
disable_cpu_crashlog();
set_bios_init_completion();
}
static void chip_init(void *data)
{
printk(BIOS_DEBUG, "coreboot: calling fsp_silicon_init\n");
fsp_silicon_init();
override_hpet_ioapic_bdf();
pch_enable_ioapic();
pch_lock_dmictl();
p2sb_unhide();
lock_gpio(false);
mainboard_override_fsp_gpio();
lock_gpio(true);
}
struct chip_operations soc_intel_xeon_sp_spr_ops = {
CHIP_NAME("Intel SapphireRapids-SP").enable_dev = chip_enable_dev,
.init = chip_init,
.final = chip_final,
};
void lock_gpio(bool lock)
{
if (lock) {
pcr_write32(gpio_get_pad_portid(GPPC_B0), PAD_CFG_LOCK_B, 0xffffffff);
pcr_write32(gpio_get_pad_portid(GPP_D0), PAD_CFG_LOCK_D, 0xffffffff);
} else {
pcr_write32(gpio_get_pad_portid(GPPC_B0), PAD_CFG_LOCK_B, 0);
pcr_write32(gpio_get_pad_portid(GPP_D0), PAD_CFG_LOCK_D, 0);
}
}
/* Root Complex Event Collector */
static void rcec_init(struct device *dev)
{
/* Set up RCEC EA extended capability, section 7.9.10 of PCIe 5.0 spec */
const unsigned int rcecea_cap =
pciexp_find_extended_cap(dev, PCIE_EXT_CAP_RCECEA_ID, 0);
if (!rcecea_cap)
return;
pci_devfn_t ecrc_bdf = PCI_BDF(dev);
uint32_t ecrc_bus = (ecrc_bdf >> 20) & 0xFFF;
uint32_t ecrc_dev = (ecrc_bdf >> 15) & 0x1F;
/*
* Find all CXL devices, and match them with RCEC.
* With CXL 1.1, the bus# of CXL device (RCiEP) is 1 bigger than
* the bus# of RCEC.
*/
uint32_t ep_bus;
uint8_t i;
for (i = 0; i < pds.num_pds; i++) {
if (pds.pds[i].pd_type == PD_TYPE_PROCESSOR)
continue;
ep_bus = pds.pds[i].device_handle >> 20;
if (ep_bus == ecrc_bus + 1)
break;
}
if (i == pds.num_pds)
return;
printk(BIOS_DEBUG, "ep_bus: %x, ecrc_dev: %x\n", ep_bus, ecrc_dev);
u32 rcecea_bitmap = 0x1 << ecrc_dev;
u32 rcecea_busnum = (ep_bus << 8) | (ep_bus << 16);
pci_write_config32(dev, rcecea_cap + PCI_RCECEA_BITMAP, rcecea_bitmap);
pci_write_config32(dev, rcecea_cap + PCI_RCECEA_BUSNUM, rcecea_busnum);
}
#define SPR_IEH 0x0b23
static const unsigned short rcec_ids[] = {
SPR_IEH,
0
};
static struct device_operations rcec_ops = {
.read_resources = pci_dev_read_resources,
.set_resources = pci_dev_set_resources,
.enable_resources = pci_dev_enable_resources,
.init = rcec_init,
.ops_pci = &soc_pci_ops,
};
static const struct pci_driver rcec_driver __pci_driver = {
.ops = &rcec_ops,
.vendor = PCI_VID_INTEL,
.devices = rcec_ids,
};

View File

@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _SOC_CHIP_H_
#define _SOC_CHIP_H_
#include <gpio.h>
#include <intelblocks/cfg.h>
#include <soc/acpi.h>
#include <soc/irq.h>
#include <stdint.h>
#define MAX_PCH_PCIE_PORT 20
/**
UPD_PCH_PCIE_PORT:
ForceEnable - Enable/Disable PCH PCIe port
PortLinkSpeed - Port Link Speed. Use PCIE_LINK_SPEED to set
**/
struct pch_pcie_port {
uint8_t ForceEnable;
uint8_t PortLinkSpeed;
};
struct soc_intel_xeon_sp_spr_config {
/* Common struct containing soc config data required by common code */
struct soc_intel_common_config common_soc_config;
/* Struct for configuring PCH PCIe port */
struct pch_pcie_port pch_pci_port[MAX_PCH_PCIE_PORT];
/**
* Interrupt Routing configuration
* If bit7 is 1, the interrupt is disabled.
*/
uint8_t pirqa_routing;
uint8_t pirqb_routing;
uint8_t pirqc_routing;
uint8_t pirqd_routing;
uint8_t pirqe_routing;
uint8_t pirqf_routing;
uint8_t pirqg_routing;
uint8_t pirqh_routing;
/**
* Device Interrupt Routing configuration
* Interrupt Pin x Route.
* 0h = PIRQA#
* 1h = PIRQB#
* 2h = PIRQC#
* 3h = PIRQD#
* 4h = PIRQE#
* 5h = PIRQF#
* 6h = PIRQG#
* 7h = PIRQH#
*/
uint16_t ir00_routing;
uint16_t ir01_routing;
uint16_t ir02_routing;
uint16_t ir03_routing;
uint16_t ir04_routing;
/**
* Device Interrupt Polarity Control
* ipc0 - IRQ-00-31 - 1: Active low to IOAPIC, 0: Active high to IOAPIC
* ipc1 - IRQ-32-63 - 1: Active low to IOAPIC, 0: Active high to IOAPIC
* ipc2 - IRQ-64-95 - 1: Active low to IOAPIC, 0: Active high to IOAPIC
* ipc3 - IRQ-96-119 - 1: Active low to IOAPIC, 0: Active high to IOAPIC
*/
uint32_t ipc0;
uint32_t ipc1;
uint32_t ipc2;
uint32_t ipc3;
uint64_t turbo_ratio_limit;
uint64_t turbo_ratio_limit_cores;
uint32_t pstate_req_ratio;
uint8_t vtd_support;
uint8_t x2apic;
/* Generic IO decode ranges */
uint32_t gen1_dec;
uint32_t gen2_dec;
uint32_t gen3_dec;
uint32_t gen4_dec;
/* TCC activation offset */
uint32_t tcc_offset;
enum acpi_cstate_mode cstate_states;
};
typedef struct soc_intel_xeon_sp_spr_config config_t;
#endif

View File

@ -32,9 +32,7 @@ chip soc/intel/xeon_sp/spr
register "gen2_dec" = "0x000c0ca1" # IPMI KCS
register "cstate_states" = "CSTATES_C1C6"
device cpu_cluster 0 on
device lapic 0 on end
end
device cpu_cluster 0 on end
device domain 0 on
device pci 00.0 on end # Intel device 09a2: Memory Map/Intel VT-d
device pci 00.1 on end # Intel device 09a4: Mesh to IAL

View File

@ -0,0 +1,281 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <acpi/acpigen.h>
#include <acpi/acpi.h>
#include <console/console.h>
#include <console/debug.h>
#include <cpu/cpu.h>
#include <cpu/intel/cpu_ids.h>
#include <cpu/intel/common/common.h>
#include <cpu/intel/em64t101_save_state.h>
#include <cpu/intel/microcode.h>
#include <cpu/intel/smm_reloc.h>
#include <cpu/intel/turbo.h>
#include <cpu/x86/lapic.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/mtrr.h>
#include <device/pci_mmio_cfg.h>
#include <intelblocks/cpulib.h>
#include <intelblocks/mp_init.h>
#include <intelpch/lockdown.h>
#include <soc/msr.h>
#include <soc/pci_devs.h>
#include <soc/pm.h>
#include <soc/soc_util.h>
#include <soc/smmrelocate.h>
#include <soc/util.h>
#include "chip.h"
static const void *microcode_patch;
static const config_t *chip_config = NULL;
bool cpu_soc_is_in_untrusted_mode(void)
{
return false;
}
void cpu_soc_bios_done(void)
{
}
static void xeon_configure_mca(void)
{
msr_t msr;
struct cpuid_result cpuid_regs;
/*
* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
* and CPUID.(EAX=1):EDX[14]==1 MCA
*/
cpuid_regs = cpuid(1);
if ((cpuid_regs.edx & (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14))
return;
msr = rdmsr(IA32_MCG_CAP);
if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
/* Enable all error logging */
msr.lo = msr.hi = 0xffffffff;
wrmsr(IA32_MCG_CTL, msr);
}
mca_configure();
}
/*
* On server platforms the FIT mechanism only updates the microcode on
* the BSP. Loading MCU on AP in parallel seems to fail in 10% of the cases
* so do it serialized.
*/
void get_microcode_info(const void **microcode, int *parallel)
{
*microcode = intel_microcode_find();
*parallel = 0;
}
static void each_cpu_init(struct device *cpu)
{
msr_t msr;
printk(BIOS_SPEW, "%s dev: %s, cpu: %lu, apic_id: 0x%x\n", __func__, dev_path(cpu),
cpu_index(), cpu->path.apic.apic_id);
/*
* Enable PWR_PERF_PLTFRM_OVR and PROCHOT_LOCK.
* The value set by FSP is 20_005f, we set it to 1a_00a4_005b.
*/
msr = rdmsr(MSR_POWER_CTL);
msr.lo |= (0x16 << RESERVED1_SHIFT) | PWR_PERF_PLTFRM_OVR | PROCHOT_LOCK;
msr.hi = 0x1a;
wrmsr(MSR_POWER_CTL, msr);
/* Set static, idle, dynamic load line impedance */
msr = rdmsr(MSR_VR_MISC_CONFIG);
msr.lo = 0x1a1a1a;
wrmsr(MSR_VR_MISC_CONFIG, msr);
/* Set current limitation */
msr = rdmsr(MSR_VR_CURRENT_CONFIG);
msr.lo = 0x1130;
msr.lo |= CURRENT_LIMIT_LOCK;
wrmsr(MSR_VR_CURRENT_CONFIG, msr);
/* Set Turbo Ratio Limits */
msr.lo = chip_config->turbo_ratio_limit & 0xffffffff;
msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff;
wrmsr(MSR_TURBO_RATIO_LIMIT, msr);
/* Set Turbo Ratio Limit Cores */
msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff;
msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff;
wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr);
/* Set energy policy */
msr = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG);
msr.lo = 0x178fa038;
wrmsr(MSR_ENERGY_PERF_BIAS_CONFIG, msr);
msr.hi = 0x158d20;
msr.lo = 0x00158af0;
wrmsr(PACKAGE_RAPL_LIMIT, msr);
/*
* Set HWP base feature, EPP reg enumeration, lock thermal and msr
* This is package level MSR. Need to check if it updates correctly on
* multi-socket platform.
*/
msr = rdmsr(MSR_MISC_PWR_MGMT);
if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR
| LOCK_THERM_INT);
wrmsr(MSR_MISC_PWR_MGMT, msr);
}
/* Enable Fast Strings */
msr = rdmsr(IA32_MISC_ENABLE);
msr.lo |= FAST_STRINGS_ENABLE_BIT;
wrmsr(IA32_MISC_ENABLE, msr);
/* Enable Turbo */
enable_turbo();
/* Enable speed step. */
if (get_turbo_state() == TURBO_ENABLED) {
msr = rdmsr(IA32_MISC_ENABLE);
msr.lo |= SPEED_STEP_ENABLE_BIT;
wrmsr(IA32_MISC_ENABLE, msr);
}
/* Lock the supported Cstates */
msr = rdmsr(MSR_PKG_CST_CONFIG_CONTROL);
msr.lo |= CST_CFG_LOCK_MASK;
wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* Disable all writes to overclocking limits MSR */
msr = rdmsr(MSR_FLEX_RATIO);
msr.lo |= MSR_FLEX_RATIO_OC_LOCK;
wrmsr(MSR_FLEX_RATIO, msr);
/* Lock Power Plane Limit MSR */
msr = rdmsr(MSR_DRAM_PLANE_POWER_LIMIT);
msr.hi |= MSR_HI_PP_PWR_LIM_LOCK;
wrmsr(MSR_DRAM_PLANE_POWER_LIMIT, msr);
/* Clear out pending MCEs */
xeon_configure_mca();
/* Enable Vmx */
// set_vmx_and_lock();
/* only lock. let vmx enable by FSP */
set_feature_ctrl_lock();
}
static struct device_operations cpu_dev_ops = {
.init = each_cpu_init,
};
static const struct cpu_device_id cpu_table[] = {
{X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_D},
{X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E0},
{X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E2},
{X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E3},
{X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_E4},
{X86_VENDOR_INTEL, CPUID_SAPPHIRERAPIDS_SP_Ex},
{0, 0},
};
static const struct cpu_driver driver __cpu_driver = {
.ops = &cpu_dev_ops,
.id_table = cpu_table,
};
static void set_max_turbo_freq(void)
{
msr_t msr, perf_ctl;
FUNC_ENTER();
perf_ctl.hi = 0;
/* Check for configurable TDP option */
if (get_turbo_state() == TURBO_ENABLED) {
msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
perf_ctl.lo = (msr.lo & 0xff) << 8;
} else if (cpu_config_tdp_levels()) {
/* Set to nominal TDP ratio */
msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
perf_ctl.lo = (msr.lo & 0xff) << 8;
} else {
/* Platform Info bits 15:8 give max ratio */
msr = rdmsr(MSR_PLATFORM_INFO);
perf_ctl.lo = msr.lo & 0xff00;
}
wrmsr(IA32_PERF_CTL, perf_ctl);
printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
((perf_ctl.lo >> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ);
FUNC_EXIT();
}
/*
* Do essential initialization tasks before APs can be fired up
*/
static void pre_mp_init(void)
{
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
}
static int get_thread_count(void)
{
unsigned int num_phys = 0, num_virts = 0;
cpu_read_topology(&num_phys, &num_virts);
printk(BIOS_SPEW, "Detected %u cores and %u threads\n", num_phys, num_virts);
return num_virts * soc_get_num_cpus();
}
static void post_mp_init(void)
{
/* Set Max Ratio */
set_max_turbo_freq();
if (CONFIG(HAVE_SMI_HANDLER)) {
global_smi_enable();
if (get_lockdown_config() == CHIPSET_LOCKDOWN_COREBOOT)
pmc_lock_smi();
}
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_thread_count,
#if CONFIG(HAVE_SMI_HANDLER)
.get_smm_info = get_smm_info,
.pre_mp_smm_init = smm_southbridge_clear_state,
.relocation_handler = smm_relocation_handler,
#endif
.get_microcode_info = get_microcode_info,
.post_mp_init = post_mp_init,
};
void mp_init_cpus(struct bus *bus)
{
/*
* chip_config is used in cpu device callback. Other than cpu 0,
* rest of the CPU devices do not have chip_info updated.
*/
chip_config = bus->dev->chip_info;
microcode_patch = intel_microcode_find();
if (!microcode_patch)
printk(BIOS_ERR, "microcode not found in CBFS!\n");
intel_microcode_load_unlocked(microcode_patch);
if (mp_init_with_smm(bus, &mp_ops) < 0)
printk(BIOS_ERR, "MP initialization failure.\n");
/* update numa domain for all cpu devices */
xeonsp_init_cpu_config();
}

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <device/pci_ops.h>
#include <soc/crashlog.h>
#include <soc/pci_devs.h>
static uint32_t read_msm_config_reg(uint32_t func, uint32_t reg)
{
uint32_t pcie_offset = PCI_DEV(MSM_BUS, MSM_DEV, func);
return pci_s_read_config32(pcie_offset, reg);
}
static void write_msm_config_reg(uint32_t func, uint32_t reg, uint32_t value)
{
uint32_t pcie_offset = PCI_DEV(MSM_BUS, MSM_DEV, func);
pci_s_write_config32(pcie_offset, reg, value);
}
void disable_cpu_crashlog(void)
{
uint32_t ctl;
ctl = read_msm_config_reg(MSM_FUN, CRASHLOG_CTL);
ctl |= CRASHLOG_CTL_DIS;
write_msm_config_reg(MSM_FUN, CRASHLOG_CTL, ctl);
ctl = read_msm_config_reg(MSM_FUN_PMON, BIOS_CRASHLOG_CTL);
ctl |= CRASHLOG_CTL_DIS;
write_msm_config_reg(MSM_FUN_PMON, BIOS_CRASHLOG_CTL, ctl);
}

View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <console/console.h>
#include <commonlib/stdlib.h>
#include <device/device.h>
#include <device/pci_ops.h>
#include <device/pci.h>
#include <device/pciexp.h>
#include <soc/numa.h>
#include <soc/soc_util.h>
#include <soc/util.h>
#include <types.h>
void dump_pds(void)
{
printk(BIOS_DEBUG, "====== Proximity Domain Dump ======\n");
printk(BIOS_DEBUG, "number of proximity domains: %d\n", pds.num_pds);
for (uint8_t i = 0; i < pds.num_pds; i++) {
printk(BIOS_DEBUG, "\tproximity domain %d:\n", i);
printk(BIOS_DEBUG, "\t\ttype:%d\n", pds.pds[i].pd_type);
printk(BIOS_DEBUG, "\t\tsocket_bitmap:0x%x\n", pds.pds[i].socket_bitmap);
printk(BIOS_DEBUG, "\t\tdevice_handle:0x%x\n", pds.pds[i].device_handle);
printk(BIOS_DEBUG, "\t\tbase(64MB):0x%x\n", pds.pds[i].base);
printk(BIOS_DEBUG, "\t\tsize(64MB):0x%x\n", pds.pds[i].size);
}
}
enum cb_err fill_pds(void)
{
uint8_t num_sockets = soc_get_num_cpus();
uint8_t num_cxlnodes = get_cxl_node_count();
/*
* Rules/assumptions:
* 1. Each processor has a processor proximity domain regardless whether
* a processor has DIMM attached to it or not.
* 2. All system memory map elements are either from processor attached memory,
* or from CXL memory. Each CXL node info entry has a corresponding entry
* in system memory map elements.
* 3. Each CXL device may have multiple HDMs (Host-managed Device Memory). Each
* HDM has one and only one CXL node info entry. Each CXL node info entry
* represents a generic initiator proximity domain.
*/
pds.num_pds = num_cxlnodes + num_sockets;
pds.pds = xmalloc(sizeof(struct proximity_domain) * pds.num_pds);
if (!pds.pds)
die("%s %s out of memory.", __FILE__, __LINE__);
memset(pds.pds, 0, sizeof(struct proximity_domain) * pds.num_pds);
/* Fill in processor domains */
uint8_t i, j;
struct device *dev;
for (i = 0; i < num_sockets; i++) {
pds.pds[i].pd_type = PD_TYPE_PROCESSOR;
pds.pds[i].socket_bitmap = 1 << i;
pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
if (!pds.pds[i].distances)
die("%s %s out of memory.", __FILE__, __LINE__);
/* hard code the distances for now, till we know how to calculate them. */
for (j = 0; j < pds.num_pds; j++) {
if (j == i)
pds.pds[i].distances[j] = 0x0a;
else
pds.pds[i].distances[j] = 0x0e;
}
}
/* If there are no CXL nodes, we are done */
if (num_cxlnodes == 0)
return CB_SUCCESS;
/* There are CXL nodes, fill in generic initiator domain after the processors pds */
uint8_t skt_id, cxl_id;
const CXL_NODE_SOCKET *cxl_hob = get_cxl_node();
for (skt_id = 0, i = num_sockets; skt_id < MAX_SOCKET; skt_id++, i++) {
for (cxl_id = 0; cxl_id < cxl_hob[skt_id].CxlNodeCount; ++cxl_id) {
const CXL_NODE_INFO node = cxl_hob[skt_id].CxlNodeInfo[cxl_id];
pds.pds[i].pd_type = PD_TYPE_GENERIC_INITIATOR;
pds.pds[i].socket_bitmap = node.SocketBitmap;
pds.pds[i].base = node.Address;
pds.pds[i].size = node.Size;
dev = pcie_find_dsn(node.SerialNumber, node.VendorId, 0);
pds.pds[i].device_handle = PCI_BDF(dev);
pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
if (!pds.pds[i].distances)
die("%s %s out of memory.", __FILE__, __LINE__);
/* hard code the distances until we know how to calculate them */
for (j = 0; j < pds.num_pds; j++) {
if (j == i)
pds.pds[i].distances[j] = 0x0a;
else
pds.pds[i].distances[j] = 0x0e;
}
}
}
return CB_SUCCESS;
}
/*
* Return the total size of memory regions in generic initiator affinity domains.
* The size is in unit of 64MB.
*/
uint32_t get_generic_initiator_mem_size(void)
{
uint8_t i;
uint32_t size = 0;
for (i = 0; i < pds.num_pds; i++) {
if (pds.pds[i].pd_type == PD_TYPE_PROCESSOR)
continue;
size += pds.pds[i].size;
}
return size;
}

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/x86/smm.h>
#include <drivers/ocp/include/vpd.h>
#include <soc/ramstage.h>
#include <soc/soc_util.h>
#include <soc/util.h>
#include <soc/msr.h>
unsigned int smbios_cpu_get_voltage(void)
{
return 16; /* Per SMBIOS spec, voltage times 10 */
}
unsigned int smbios_cpu_get_current_speed_mhz(void)
{
msr_t msr;
msr = rdmsr(MSR_PLATFORM_INFO);
return ((msr.lo >> 8) & 0xff) * CONFIG_CPU_BCLK_MHZ;
}
__weak void mainboard_override_fsp_gpio(void)
{
/* Default weak implementation */
}
__weak void mainboard_override_usb_oc(void)
{
/* Default weak implementation */
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cf9_reset.h>
#include <intelblocks/cse.h>
#include <intelblocks/pmclib.h>
#include <soc/intel/common/reset.h>
void do_global_reset(void)
{
/* Ask CSE to do the global reset */
if (cse_request_global_reset())
return;
/* global reset if CSE fail to reset */
pmc_global_reset_enable(1);
do_full_reset();
}

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <console/console.h>
#include <device/pci.h>
#include <soc/pci_devs.h>
#include <soc/xhci.h>
#include <types.h>
static uint8_t *get_xhci_bar(void)
{
const struct resource *res;
res = probe_resource(PCH_DEV_XHCI, PCI_BASE_ADDRESS_0);
if (!res) {
printk(BIOS_ERR, "XHCI BAR is not found\n");
return NULL;
}
return (void *)(uintptr_t)res->base;
}
void write_usb_oc_mapping(const struct usb_oc_mapping *config, uint8_t pins)
{
uint8_t *mbar = get_xhci_bar();
uint8_t i;
if (mbar == NULL) {
printk(BIOS_ERR, "XHCI BAR is invalid, skip USB OC mapping configuration\n");
return;
}
for (i = 0; i < pins; i++)
write32(mbar + config[i].pin, config[i].port);
}
void lock_oc_cfg(bool lock)
{
uint32_t cfg = pci_read_config32(PCH_DEV_XHCI, SYS_BUS_CFG2);
if (lock)
cfg |= OCCFGDONE;
else
cfg &= ~(OCCFGDONE);
pci_write_config32(PCH_DEV_XHCI, SYS_BUS_CFG2, cfg);
}