soc/intel/broadwell: Drop now-unused CPU code

All boards now use Haswell's CPU code, which also supports Broadwell.

Change-Id: Ia0b8f7bf64334dd965baad0a30a7bb0ed81c4cac
Signed-off-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/46950
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Angel Pons 2020-11-23 13:25:33 +01:00
parent 9d733def59
commit b89c8bb135
9 changed files with 0 additions and 1545 deletions

View File

@ -1,10 +1,6 @@
ifeq ($(CONFIG_SOC_INTEL_BROADWELL),y)
ifeq ($(CONFIG_CPU_INTEL_HASWELL),y)
romstage-y += cpu/romstage.c
else
subdirs-y += cpu
endif
subdirs-y += pch

View File

@ -1,30 +0,0 @@
subdirs-y += ../../../../cpu/x86/lapic
subdirs-y += ../../../../cpu/x86/mtrr
subdirs-y += ../../../../cpu/x86/smm
subdirs-y += ../../../../cpu/x86/tsc
subdirs-y += ../../../../cpu/intel/microcode
subdirs-y += ../../../../cpu/intel/turbo
subdirs-y += ../../../../cpu/intel/common
bootblock-y += bootblock.c
bootblock-y += ../../../../cpu/intel/car/bootblock.c
bootblock-y += ../../../../cpu/intel/car/non-evict/cache_as_ram.S
bootblock-y += ../../../../cpu/x86/early_reset.S
romstage-y += romstage.c
romstage-y += ../../../../cpu/intel/car/romstage.c
postcar-y += ../../../../cpu/intel/car/non-evict/exit_car.S
ramstage-y += acpi.c
ramstage-y += cpu.c
ramstage-y += smmrelocate.c
bootblock-y += tsc_freq.c
ramstage-y += tsc_freq.c
romstage-y += tsc_freq.c
smm-y += tsc_freq.c
postcar-y += tsc_freq.c
verstage-y += tsc_freq.c
cpu_microcode_bins += 3rdparty/blobs/soc/intel/broadwell/microcode.bin

View File

@ -1,393 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <acpi/acpi.h>
#include <acpi/acpi_gnvs.h>
#include <acpi/acpigen.h>
#include <arch/ioapic.h>
#include <arch/smp/mpspec.h>
#include <cbmem.h>
#include <device/pci_ops.h>
#include <cpu/x86/smm.h>
#include <console/console.h>
#include <types.h>
#include <string.h>
#include <arch/cpu.h>
#include <cpu/x86/msr.h>
#include <cpu/intel/turbo.h>
#include <soc/acpi.h>
#include <soc/cpu.h>
#include <soc/iomap.h>
#include <soc/lpc.h>
#include <soc/msr.h>
#include <soc/pci_devs.h>
#include <soc/pm.h>
#include <soc/systemagent.h>
#include <soc/intel/broadwell/chip.h>
/*
* List of supported C-states in this processor. Only the ULT parts support C8,
* C9, and C10.
*/
enum {
C_STATE_C0, /* 0 */
C_STATE_C1, /* 1 */
C_STATE_C1E, /* 2 */
C_STATE_C3, /* 3 */
C_STATE_C6_SHORT_LAT, /* 4 */
C_STATE_C6_LONG_LAT, /* 5 */
C_STATE_C7_SHORT_LAT, /* 6 */
C_STATE_C7_LONG_LAT, /* 7 */
C_STATE_C7S_SHORT_LAT, /* 8 */
C_STATE_C7S_LONG_LAT, /* 9 */
C_STATE_C8, /* 10 */
C_STATE_C9, /* 11 */
C_STATE_C10, /* 12 */
NUM_C_STATES
};
#define MWAIT_RES(state, sub_state) \
{ \
.addrl = (((state) << 4) | (sub_state)), \
.space_id = ACPI_ADDRESS_SPACE_FIXED, \
.bit_width = ACPI_FFIXEDHW_VENDOR_INTEL, \
.bit_offset = ACPI_FFIXEDHW_CLASS_MWAIT, \
.access_size = ACPI_FFIXEDHW_FLAG_HW_COORD, \
}
static acpi_cstate_t cstate_map[NUM_C_STATES] = {
[C_STATE_C0] = { },
[C_STATE_C1] = {
.latency = 0,
.power = 1000,
.resource = MWAIT_RES(0, 0),
},
[C_STATE_C1E] = {
.latency = 0,
.power = 1000,
.resource = MWAIT_RES(0, 1),
},
[C_STATE_C3] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(0),
.power = 900,
.resource = MWAIT_RES(1, 0),
},
[C_STATE_C6_SHORT_LAT] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(1),
.power = 800,
.resource = MWAIT_RES(2, 0),
},
[C_STATE_C6_LONG_LAT] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(2),
.power = 800,
.resource = MWAIT_RES(2, 1),
},
[C_STATE_C7_SHORT_LAT] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(1),
.power = 700,
.resource = MWAIT_RES(3, 0),
},
[C_STATE_C7_LONG_LAT] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(2),
.power = 700,
.resource = MWAIT_RES(3, 1),
},
[C_STATE_C7S_SHORT_LAT] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(1),
.power = 700,
.resource = MWAIT_RES(3, 2),
},
[C_STATE_C7S_LONG_LAT] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(2),
.power = 700,
.resource = MWAIT_RES(3, 3),
},
[C_STATE_C8] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(3),
.power = 600,
.resource = MWAIT_RES(4, 0),
},
[C_STATE_C9] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(4),
.power = 500,
.resource = MWAIT_RES(5, 0),
},
[C_STATE_C10] = {
.latency = C_STATE_LATENCY_FROM_LAT_REG(5),
.power = 400,
.resource = MWAIT_RES(6, 0),
},
};
static int cstate_set_s0ix[3] = {
C_STATE_C1E,
C_STATE_C7S_LONG_LAT,
C_STATE_C10
};
static int cstate_set_non_s0ix[3] = {
C_STATE_C1E,
C_STATE_C3,
C_STATE_C7S_LONG_LAT
};
static int get_cores_per_package(void)
{
struct cpuinfo_x86 c;
struct cpuid_result result;
int cores = 1;
get_fms(&c, cpuid_eax(1));
if (c.x86 != 6)
return 1;
result = cpuid_ext(0xb, 1);
cores = result.ebx & 0xff;
return cores;
}
static acpi_tstate_t tss_table_fine[] = {
{ 100, 1000, 0, 0x00, 0 },
{ 94, 940, 0, 0x1f, 0 },
{ 88, 880, 0, 0x1e, 0 },
{ 82, 820, 0, 0x1d, 0 },
{ 75, 760, 0, 0x1c, 0 },
{ 69, 700, 0, 0x1b, 0 },
{ 63, 640, 0, 0x1a, 0 },
{ 57, 580, 0, 0x19, 0 },
{ 50, 520, 0, 0x18, 0 },
{ 44, 460, 0, 0x17, 0 },
{ 38, 400, 0, 0x16, 0 },
{ 32, 340, 0, 0x15, 0 },
{ 25, 280, 0, 0x14, 0 },
{ 19, 220, 0, 0x13, 0 },
{ 13, 160, 0, 0x12, 0 },
};
static acpi_tstate_t tss_table_coarse[] = {
{ 100, 1000, 0, 0x00, 0 },
{ 88, 875, 0, 0x1f, 0 },
{ 75, 750, 0, 0x1e, 0 },
{ 63, 625, 0, 0x1d, 0 },
{ 50, 500, 0, 0x1c, 0 },
{ 38, 375, 0, 0x1b, 0 },
{ 25, 250, 0, 0x1a, 0 },
{ 13, 125, 0, 0x19, 0 },
};
static void generate_T_state_entries(int core, int cores_per_package)
{
/* Indicate SW_ALL coordination for T-states */
acpigen_write_TSD_package(core, cores_per_package, SW_ALL);
/* Indicate FFixedHW so OS will use MSR */
acpigen_write_empty_PTC();
/* Set a T-state limit that can be modified in NVS */
acpigen_write_TPC("\\TLVL");
/*
* CPUID.(EAX=6):EAX[5] indicates support
* for extended throttle levels.
*/
if (cpuid_eax(6) & (1 << 5))
acpigen_write_TSS_package(
ARRAY_SIZE(tss_table_fine), tss_table_fine);
else
acpigen_write_TSS_package(
ARRAY_SIZE(tss_table_coarse), tss_table_coarse);
}
static void generate_C_state_entries(void)
{
acpi_cstate_t map[3];
int *set;
int i;
config_t *config = config_of_soc();
if (config->s0ix_enable)
set = cstate_set_s0ix;
else
set = cstate_set_non_s0ix;
for (i = 0; i < 3; i++) {
memcpy(&map[i], &cstate_map[set[i]], sizeof(acpi_cstate_t));
map[i].ctype = i + 1;
}
/* Generate C-state tables */
acpigen_write_CST_package(map, ARRAY_SIZE(map));
}
static int calculate_power(int tdp, int p1_ratio, int ratio)
{
u32 m;
u32 power;
/*
* M = ((1.1 - ((p1_ratio - ratio) * 0.00625)) / 1.1) ^ 2
*
* Power = (ratio / p1_ratio) * m * tdp
*/
m = (110000 - ((p1_ratio - ratio) * 625)) / 11;
m = (m * m) / 1000;
power = ((ratio * 100000 / p1_ratio) / 100);
power *= (m / 100) * (tdp / 1000);
power /= 1000;
return (int)power;
}
static void generate_P_state_entries(int core, int cores_per_package)
{
int ratio_min, ratio_max, ratio_turbo, ratio_step;
int coord_type, power_max, power_unit, num_entries;
int ratio, power, clock, clock_max;
msr_t msr;
/* Determine P-state coordination type from MISC_PWR_MGMT[0] */
msr = rdmsr(MSR_MISC_PWR_MGMT);
if (msr.lo & MISC_PWR_MGMT_EIST_HW_DIS)
coord_type = SW_ANY;
else
coord_type = HW_ALL;
/* Get bus ratio limits and calculate clock speeds */
msr = rdmsr(MSR_PLATFORM_INFO);
ratio_min = (msr.hi >> (40-32)) & 0xff; /* Max Efficiency Ratio */
/* Determine if this CPU has configurable TDP */
if (cpu_config_tdp_levels()) {
/* Set max ratio to nominal TDP ratio */
msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
ratio_max = msr.lo & 0xff;
} else {
/* Max Non-Turbo Ratio */
ratio_max = (msr.lo >> 8) & 0xff;
}
clock_max = ratio_max * CPU_BCLK;
/* Calculate CPU TDP in mW */
msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
power_unit = 2 << ((msr.lo & 0xf) - 1);
msr = rdmsr(MSR_PKG_POWER_SKU);
power_max = ((msr.lo & 0x7fff) / power_unit) * 1000;
/* Write _PCT indicating use of FFixedHW */
acpigen_write_empty_PCT();
/* Write _PPC with no limit on supported P-state */
acpigen_write_PPC_NVS();
/* Write PSD indicating configured coordination type */
acpigen_write_PSD_package(core, 1, coord_type);
/* Add P-state entries in _PSS table */
acpigen_write_name("_PSS");
/* Determine ratio points */
ratio_step = PSS_RATIO_STEP;
num_entries = (ratio_max - ratio_min) / ratio_step;
while (num_entries > PSS_MAX_ENTRIES-1) {
ratio_step <<= 1;
num_entries >>= 1;
}
/* P[T] is Turbo state if enabled */
if (get_turbo_state() == TURBO_ENABLED) {
/* _PSS package count including Turbo */
acpigen_write_package(num_entries + 2);
msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
ratio_turbo = msr.lo & 0xff;
/* Add entry for Turbo ratio */
acpigen_write_PSS_package(
clock_max + 1, /*MHz*/
power_max, /*mW*/
PSS_LATENCY_TRANSITION, /*lat1*/
PSS_LATENCY_BUSMASTER, /*lat2*/
ratio_turbo << 8, /*control*/
ratio_turbo << 8); /*status*/
} else {
/* _PSS package count without Turbo */
acpigen_write_package(num_entries + 1);
}
/* First regular entry is max non-turbo ratio */
acpigen_write_PSS_package(
clock_max, /*MHz*/
power_max, /*mW*/
PSS_LATENCY_TRANSITION, /*lat1*/
PSS_LATENCY_BUSMASTER, /*lat2*/
ratio_max << 8, /*control*/
ratio_max << 8); /*status*/
/* Generate the remaining entries */
for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
ratio >= ratio_min; ratio -= ratio_step) {
/* Calculate power at this ratio */
power = calculate_power(power_max, ratio_max, ratio);
clock = ratio * CPU_BCLK;
acpigen_write_PSS_package(
clock, /*MHz*/
power, /*mW*/
PSS_LATENCY_TRANSITION, /*lat1*/
PSS_LATENCY_BUSMASTER, /*lat2*/
ratio << 8, /*control*/
ratio << 8); /*status*/
}
/* Fix package length */
acpigen_pop_len();
}
void generate_cpu_entries(const struct device *device)
{
int coreID, cpuID, pcontrol_blk = ACPI_BASE_ADDRESS, plen = 6;
int totalcores = dev_count_cpu();
int cores_per_package = get_cores_per_package();
int numcpus = totalcores/cores_per_package;
printk(BIOS_DEBUG, "Found %d CPU(s) with %d core(s) each.\n",
numcpus, cores_per_package);
for (cpuID = 1; cpuID <= numcpus; cpuID++) {
for (coreID = 1; coreID <= cores_per_package; coreID++) {
if (coreID > 1) {
pcontrol_blk = 0;
plen = 0;
}
/* Generate processor \_SB.CPUx */
acpigen_write_processor(
(cpuID - 1) * cores_per_package+coreID - 1,
pcontrol_blk, plen);
/* Generate P-state tables */
generate_P_state_entries(
coreID - 1, cores_per_package);
/* Generate C-state tables */
generate_C_state_entries();
/* Generate T-state tables */
generate_T_state_entries(
cpuID - 1, cores_per_package);
acpigen_pop_len();
}
}
/* PPKG is usually used for thermal management
of the first and only package. */
acpigen_write_processor_package("PPKG", 0, cores_per_package);
/* Add a method to notify processor nodes */
acpigen_write_processor_cnot(cores_per_package);
}

View File

@ -1,65 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <stdint.h>
#include <arch/bootblock.h>
#include <arch/io.h>
#include <cpu/x86/msr.h>
#include <halt.h>
#include <soc/rcba.h>
#include <soc/msr.h>
#include <delay.h>
static void set_flex_ratio_to_tdp_nominal(void)
{
msr_t flex_ratio, msr;
u32 soft_reset;
u8 nominal_ratio;
/* Check for Flex Ratio support */
flex_ratio = rdmsr(MSR_FLEX_RATIO);
if (!(flex_ratio.lo & FLEX_RATIO_EN))
return;
/* Check for >0 configurable TDPs */
msr = rdmsr(MSR_PLATFORM_INFO);
if (((msr.hi >> 1) & 3) == 0)
return;
/* Use nominal TDP ratio for flex ratio */
msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
nominal_ratio = msr.lo & 0xff;
/* See if flex ratio is already set to nominal TDP ratio */
if (((flex_ratio.lo >> 8) & 0xff) == nominal_ratio)
return;
/* Set flex ratio to nominal TDP ratio */
flex_ratio.lo &= ~0xff00;
flex_ratio.lo |= nominal_ratio << 8;
flex_ratio.lo |= FLEX_RATIO_LOCK;
wrmsr(MSR_FLEX_RATIO, flex_ratio);
/* Set flex ratio in soft reset data register bits 11:6.
* RCBA region is enabled in southbridge bootblock */
soft_reset = RCBA32(SOFT_RESET_DATA);
soft_reset &= ~(0x3f << 6);
soft_reset |= (nominal_ratio & 0x3f) << 6;
RCBA32(SOFT_RESET_DATA) = soft_reset;
/* Set soft reset control to use register value */
RCBA32_OR(SOFT_RESET_CTRL, 1);
/* Delay before reset to avoid potential TPM lockout */
mdelay(30);
/* Issue warm reset, will be "CPU only" due to soft reset data */
outb(0x0, 0xcf9);
outb(0x6, 0xcf9);
halt();
}
void bootblock_early_cpu_init(void)
{
/* Set flex ratio and reset if needed */
set_flex_ratio_to_tdp_nominal();
}

View File

@ -1,640 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <console/console.h>
#include <device/device.h>
#include <device/pci.h>
#include <arch/cpu.h>
#include <cpu/cpu.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/lapic.h>
#include <cpu/x86/mp.h>
#include <cpu/intel/microcode.h>
#include <cpu/intel/smm_reloc.h>
#include <cpu/intel/speedstep.h>
#include <cpu/intel/turbo.h>
#include <cpu/x86/name.h>
#include <delay.h>
#include <soc/cpu.h>
#include <soc/msr.h>
#include <soc/pci_devs.h>
#include <soc/ramstage.h>
#include <soc/rcba.h>
#include <soc/systemagent.h>
#include <soc/intel/broadwell/chip.h>
#include <cpu/intel/common/common.h>
/* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
static const u8 power_limit_time_sec_to_msr[] = {
[0] = 0x00,
[1] = 0x0a,
[2] = 0x0b,
[3] = 0x4b,
[4] = 0x0c,
[5] = 0x2c,
[6] = 0x4c,
[7] = 0x6c,
[8] = 0x0d,
[10] = 0x2d,
[12] = 0x4d,
[14] = 0x6d,
[16] = 0x0e,
[20] = 0x2e,
[24] = 0x4e,
[28] = 0x6e,
[32] = 0x0f,
[40] = 0x2f,
[48] = 0x4f,
[56] = 0x6f,
[64] = 0x10,
[80] = 0x30,
[96] = 0x50,
[112] = 0x70,
[128] = 0x11,
};
/* Convert POWER_LIMIT_1_TIME MSR value to seconds */
static const u8 power_limit_time_msr_to_sec[] = {
[0x00] = 0,
[0x0a] = 1,
[0x0b] = 2,
[0x4b] = 3,
[0x0c] = 4,
[0x2c] = 5,
[0x4c] = 6,
[0x6c] = 7,
[0x0d] = 8,
[0x2d] = 10,
[0x4d] = 12,
[0x6d] = 14,
[0x0e] = 16,
[0x2e] = 20,
[0x4e] = 24,
[0x6e] = 28,
[0x0f] = 32,
[0x2f] = 40,
[0x4f] = 48,
[0x6f] = 56,
[0x10] = 64,
[0x30] = 80,
[0x50] = 96,
[0x70] = 112,
[0x11] = 128,
};
/* The core 100MHz BCLK is disabled in deeper c-states. One needs to calibrate
* the 100MHz BCLK against the 24MHz BCLK to restore the clocks properly
* when a core is woken up. */
static int pcode_ready(void)
{
int wait_count;
const int delay_step = 10;
wait_count = 0;
do {
if (!(MCHBAR32(BIOS_MAILBOX_INTERFACE) & MAILBOX_RUN_BUSY))
return 0;
wait_count += delay_step;
udelay(delay_step);
} while (wait_count < 1000);
return -1;
}
static void calibrate_24mhz_bclk(void)
{
int err_code;
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
return;
}
/* A non-zero value initiates the PCODE calibration. */
MCHBAR32(BIOS_MAILBOX_DATA) = ~0;
MCHBAR32(BIOS_MAILBOX_INTERFACE) =
MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL;
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
return;
}
err_code = MCHBAR32(BIOS_MAILBOX_INTERFACE) & 0xff;
printk(BIOS_DEBUG, "PCODE: 24MHz BCLK calibration response: %d\n",
err_code);
/* Read the calibrated value. */
MCHBAR32(BIOS_MAILBOX_INTERFACE) =
MAILBOX_RUN_BUSY | MAILBOX_BIOS_CMD_READ_CALIBRATION;
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on read.\n");
return;
}
printk(BIOS_DEBUG, "PCODE: 24MHz BCLK calibration value: 0x%08x\n",
MCHBAR32(BIOS_MAILBOX_DATA));
}
static u32 pcode_mailbox_read(u32 command)
{
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
return 0;
}
/* Send command and start transaction */
MCHBAR32(BIOS_MAILBOX_INTERFACE) = command | MAILBOX_RUN_BUSY;
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
return 0;
}
/* Read mailbox */
return MCHBAR32(BIOS_MAILBOX_DATA);
}
static int pcode_mailbox_write(u32 command, u32 data)
{
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on wait ready.\n");
return -1;
}
MCHBAR32(BIOS_MAILBOX_DATA) = data;
/* Send command and start transaction */
MCHBAR32(BIOS_MAILBOX_INTERFACE) = command | MAILBOX_RUN_BUSY;
if (pcode_ready() < 0) {
printk(BIOS_ERR, "PCODE: mailbox timeout on completion.\n");
return -1;
}
return 0;
}
static void initialize_vr_config(void)
{
config_t *conf = config_of_soc();
msr_t msr;
printk(BIOS_DEBUG, "Initializing VR config.\n");
/* Configure VR_CURRENT_CONFIG. */
msr = rdmsr(MSR_VR_CURRENT_CONFIG);
/* Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
* on ULT systems. */
msr.hi &= 0xc0000000;
msr.hi |= (0x01 << (52 - 32)); /* PSI3 threshold - 1A. */
msr.hi |= (0x05 << (42 - 32)); /* PSI2 threshold - 5A. */
msr.hi |= (0x14 << (32 - 32)); /* PSI1 threshold - 20A. */
msr.hi |= (1 << (62 - 32)); /* Enable PSI4 */
/* Leave the max instantaneous current limit (12:0) to default. */
wrmsr(MSR_VR_CURRENT_CONFIG, msr);
/* Configure VR_MISC_CONFIG MSR. */
msr = rdmsr(MSR_VR_MISC_CONFIG);
/* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format. */
msr.hi &= ~(0x3ff << (40 - 32));
msr.hi |= (0x200 << (40 - 32)); /* 1.0 */
/* Set IOUT_OFFSET to 0. */
msr.hi &= ~0xff;
/* Set entry ramp rate to slow. */
msr.hi &= ~(1 << (51 - 32));
/* Enable decay mode on C-state entry. */
msr.hi |= (1 << (52 - 32));
/* Set the slow ramp rate */
msr.hi &= ~(0x3 << (53 - 32));
/* Configure the C-state exit ramp rate. */
if (conf->vr_slow_ramp_rate_enable) {
/* Configured slow ramp rate. */
msr.hi |= ((conf->vr_slow_ramp_rate_set & 0x3) << (53 - 32));
/* Set exit ramp rate to slow. */
msr.hi &= ~(1 << (50 - 32));
} else {
/* Fast ramp rate / 4. */
msr.hi |= (0x01 << (53 - 32));
/* Set exit ramp rate to fast. */
msr.hi |= (1 << (50 - 32));
}
/* Set MIN_VID (31:24) to allow CPU to have full control. */
msr.lo &= ~0xff000000;
msr.lo |= (conf->vr_cpu_min_vid & 0xff) << 24;
wrmsr(MSR_VR_MISC_CONFIG, msr);
/* Configure VR_MISC_CONFIG2 MSR. */
msr = rdmsr(MSR_VR_MISC_CONFIG2);
msr.lo &= ~0xffff;
/* Allow CPU to control minimum voltage completely (15:8) and
* set the fast ramp voltage in 10mV steps. */
if (cpu_family_model() == BROADWELL_FAMILY_ULT)
msr.lo |= 0x006a; /* 1.56V */
else
msr.lo |= 0x006f; /* 1.60V */
wrmsr(MSR_VR_MISC_CONFIG2, msr);
/* Set C9/C10 VCC Min */
pcode_mailbox_write(MAILBOX_BIOS_CMD_WRITE_C9C10_VOLTAGE, 0x1f1f);
}
static void configure_pch_power_sharing(void)
{
u32 pch_power, pch_power_ext, pmsync, pmsync2;
int i;
/* Read PCH Power levels from PCODE */
pch_power = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER);
pch_power_ext = pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT);
printk(BIOS_INFO, "PCH Power: PCODE Levels 0x%08x 0x%08x\n",
pch_power, pch_power_ext);
pmsync = RCBA32(PMSYNC_CONFIG);
pmsync2 = RCBA32(PMSYNC_CONFIG2);
/* Program PMSYNC_TPR_CONFIG PCH power limit values
* pmsync[0:4] = mailbox[0:5]
* pmsync[8:12] = mailbox[6:11]
* pmsync[16:20] = mailbox[12:17]
*/
for (i = 0; i < 3; i++) {
u32 level = pch_power & 0x3f;
pch_power >>= 6;
pmsync &= ~(0x1f << (i * 8));
pmsync |= (level & 0x1f) << (i * 8);
}
RCBA32(PMSYNC_CONFIG) = pmsync;
/* Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
* pmsync2[0:4] = mailbox[23:18]
* pmsync2[8:12] = mailbox_ext[6:11]
* pmsync2[16:20] = mailbox_ext[12:17]
* pmsync2[24:28] = mailbox_ext[18:22]
*/
pmsync2 &= ~0x1f;
pmsync2 |= pch_power & 0x1f;
for (i = 1; i < 4; i++) {
u32 level = pch_power_ext & 0x3f;
pch_power_ext >>= 6;
pmsync2 &= ~(0x1f << (i * 8));
pmsync2 |= (level & 0x1f) << (i * 8);
}
RCBA32(PMSYNC_CONFIG2) = pmsync2;
}
int cpu_config_tdp_levels(void)
{
msr_t platform_info;
/* Bits 34:33 indicate how many levels supported */
platform_info = rdmsr(MSR_PLATFORM_INFO);
return (platform_info.hi >> 1) & 3;
}
/*
* Configure processor power limits if possible
* This must be done AFTER set of BIOS_RESET_CPL
*/
void set_power_limits(u8 power_limit_1_time)
{
msr_t msr = rdmsr(MSR_PLATFORM_INFO);
msr_t limit;
unsigned int power_unit;
unsigned int tdp, min_power, max_power, max_time;
u8 power_limit_1_val;
if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr))
power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr) - 1;
if (!(msr.lo & PLATFORM_INFO_SET_TDP))
return;
/* Get units */
msr = rdmsr(MSR_PKG_POWER_SKU_UNIT);
power_unit = 2 << ((msr.lo & 0xf) - 1);
/* Get power defaults for this SKU */
msr = rdmsr(MSR_PKG_POWER_SKU);
tdp = msr.lo & 0x7fff;
min_power = (msr.lo >> 16) & 0x7fff;
max_power = msr.hi & 0x7fff;
max_time = (msr.hi >> 16) & 0x7f;
printk(BIOS_DEBUG, "CPU TDP: %u Watts\n", tdp / power_unit);
if (power_limit_time_msr_to_sec[max_time] > power_limit_1_time)
power_limit_1_time = power_limit_time_msr_to_sec[max_time];
if (min_power > 0 && tdp < min_power)
tdp = min_power;
if (max_power > 0 && tdp > max_power)
tdp = max_power;
power_limit_1_val = power_limit_time_sec_to_msr[power_limit_1_time];
/* Set long term power limit to TDP */
limit.lo = 0;
limit.lo |= tdp & PKG_POWER_LIMIT_MASK;
limit.lo |= PKG_POWER_LIMIT_EN;
limit.lo |= (power_limit_1_val & PKG_POWER_LIMIT_TIME_MASK) <<
PKG_POWER_LIMIT_TIME_SHIFT;
/* Set short term power limit to 1.25 * TDP */
limit.hi = 0;
limit.hi |= ((tdp * 125) / 100) & PKG_POWER_LIMIT_MASK;
limit.hi |= PKG_POWER_LIMIT_EN;
/* Power limit 2 time is only programmable on server SKU */
wrmsr(MSR_PKG_POWER_LIMIT, limit);
/* Set power limit values in MCHBAR as well */
MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = limit.lo;
MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = limit.hi;
/* Set DDR RAPL power limit by copying from MMIO to MSR */
msr.lo = MCHBAR32(MCH_DDR_POWER_LIMIT_LO);
msr.hi = MCHBAR32(MCH_DDR_POWER_LIMIT_HI);
wrmsr(MSR_DDR_RAPL_LIMIT, msr);
/* Use nominal TDP values for CPUs with configurable TDP */
if (cpu_config_tdp_levels()) {
msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
limit.hi = 0;
limit.lo = msr.lo & 0xff;
wrmsr(MSR_TURBO_ACTIVATION_RATIO, limit);
}
}
static void configure_c_states(void)
{
msr_t msr;
msr = rdmsr(MSR_PKG_CST_CONFIG_CONTROL);
msr.lo |= (1 << 31); // Timed MWAIT Enable
msr.lo |= (1 << 30); // Package c-state Undemotion Enable
msr.lo |= (1 << 29); // Package c-state Demotion Enable
msr.lo |= (1 << 28); // C1 Auto Undemotion Enable
msr.lo |= (1 << 27); // C3 Auto Undemotion Enable
msr.lo |= (1 << 26); // C1 Auto Demotion Enable
msr.lo |= (1 << 25); // C3 Auto Demotion Enable
msr.lo &= ~(1 << 10); // Disable IO MWAIT redirection
/* The deepest package c-state defaults to factory-configured value. */
wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
msr = rdmsr(MSR_MISC_PWR_MGMT);
msr.lo &= ~(1 << 0); // Enable P-state HW_ALL coordination
wrmsr(MSR_MISC_PWR_MGMT, msr);
msr = rdmsr(MSR_POWER_CTL);
msr.lo |= (1 << 18); // Enable Energy Perf Bias MSR 0x1b0
msr.lo |= (1 << 1); // C1E Enable
msr.lo |= (1 << 0); // Bi-directional PROCHOT#
wrmsr(MSR_POWER_CTL, msr);
/* C-state Interrupt Response Latency Control 0 - package C3 latency */
msr.hi = 0;
msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_0_LIMIT;
wrmsr(MSR_C_STATE_LATENCY_CONTROL_0, msr);
/* C-state Interrupt Response Latency Control 1 */
msr.hi = 0;
msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_1_LIMIT;
wrmsr(MSR_C_STATE_LATENCY_CONTROL_1, msr);
/* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
msr.hi = 0;
msr.lo = IRTL_VALID | IRTL_1024_NS | C_STATE_LATENCY_CONTROL_2_LIMIT;
wrmsr(MSR_C_STATE_LATENCY_CONTROL_2, msr);
/* C-state Interrupt Response Latency Control 3 - package C8 */
msr.hi = 0;
msr.lo = IRTL_VALID | IRTL_1024_NS |
C_STATE_LATENCY_CONTROL_3_LIMIT;
wrmsr(MSR_C_STATE_LATENCY_CONTROL_3, msr);
/* C-state Interrupt Response Latency Control 4 - package C9 */
msr.hi = 0;
msr.lo = IRTL_VALID | IRTL_1024_NS |
C_STATE_LATENCY_CONTROL_4_LIMIT;
wrmsr(MSR_C_STATE_LATENCY_CONTROL_4, msr);
/* C-state Interrupt Response Latency Control 5 - package C10 */
msr.hi = 0;
msr.lo = IRTL_VALID | IRTL_1024_NS |
C_STATE_LATENCY_CONTROL_5_LIMIT;
wrmsr(MSR_C_STATE_LATENCY_CONTROL_5, msr);
}
static void configure_thermal_target(void)
{
config_t *conf;
struct device *lapic;
msr_t msr;
/* Find pointer to CPU configuration */
lapic = dev_find_lapic(SPEEDSTEP_APIC_MAGIC);
if (!lapic || !lapic->chip_info)
return;
conf = lapic->chip_info;
/* Set TCC activation offset if supported */
msr = rdmsr(MSR_PLATFORM_INFO);
if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
msr = rdmsr(MSR_TEMPERATURE_TARGET);
msr.lo &= ~(0xf << 24); /* Bits 27:24 */
msr.lo |= (conf->tcc_offset & 0xf) << 24;
wrmsr(MSR_TEMPERATURE_TARGET, msr);
}
}
static void configure_misc(void)
{
msr_t msr;
msr = rdmsr(IA32_MISC_ENABLE);
msr.lo |= (1 << 0); /* Fast String enable */
msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
msr.lo |= (1 << 16); /* Enhanced SpeedStep Enable */
wrmsr(IA32_MISC_ENABLE, msr);
/* Disable Thermal interrupts */
msr.lo = 0;
msr.hi = 0;
wrmsr(IA32_THERM_INTERRUPT, msr);
/* Enable package critical interrupt only */
msr.lo = 1 << 4;
msr.hi = 0;
wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
}
static void set_max_ratio(void)
{
msr_t msr, perf_ctl;
perf_ctl.hi = 0;
/* Check for configurable TDP option */
if (get_turbo_state() == TURBO_ENABLED) {
msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
perf_ctl.lo = (msr.lo & 0xff) << 8;
} else if (cpu_config_tdp_levels()) {
/* Set to nominal TDP ratio */
msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
perf_ctl.lo = (msr.lo & 0xff) << 8;
} else {
/* Platform Info bits 15:8 give max ratio */
msr = rdmsr(MSR_PLATFORM_INFO);
perf_ctl.lo = msr.lo & 0xff00;
}
wrmsr(IA32_PERF_CTL, perf_ctl);
printk(BIOS_DEBUG, "CPU: frequency set to %d\n",
((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
}
static void configure_mca(void)
{
msr_t msr;
int i;
int num_banks;
msr = rdmsr(IA32_MCG_CAP);
num_banks = msr.lo & 0xff;
msr.lo = msr.hi = 0;
/* TODO(adurbin): This should only be done on a cold boot. Also, some
* of these banks are core vs package scope. For now every CPU clears
* every bank. */
for (i = 0; i < num_banks; i++)
wrmsr(IA32_MC0_STATUS + (i * 4), msr);
}
/* All CPUs including BSP will run the following function. */
static void cpu_core_init(struct device *cpu)
{
/* Clear out pending MCEs */
configure_mca();
/* Enable the local CPU APICs */
enable_lapic_tpr();
setup_lapic();
/* Set virtualization based on Kconfig option */
set_vmx_and_lock();
/* Configure C States */
configure_c_states();
/* Configure Enhanced SpeedStep and Thermal Sensors */
configure_misc();
/* Thermal throttle activation offset */
configure_thermal_target();
/* Enable Direct Cache Access */
configure_dca_cap();
/* Set energy policy */
set_energy_perf_bias(ENERGY_POLICY_NORMAL);
/* Enable Turbo */
enable_turbo();
}
/* MP initialization support. */
static const void *microcode_patch;
static void pre_mp_init(void)
{
/* Setup MTRRs based on physical address size. */
x86_setup_mtrrs_with_detect();
x86_mtrr_check();
initialize_vr_config();
calibrate_24mhz_bclk();
configure_pch_power_sharing();
}
static int get_cpu_count(void)
{
msr_t msr;
int num_threads;
int num_cores;
msr = rdmsr(MSR_CORE_THREAD_COUNT);
num_threads = (msr.lo >> 0) & 0xffff;
num_cores = (msr.lo >> 16) & 0xffff;
printk(BIOS_DEBUG, "CPU has %u cores, %u threads enabled.\n",
num_cores, num_threads);
return num_threads;
}
static void get_microcode_info(const void **microcode, int *parallel)
{
microcode_patch = intel_microcode_find();
*microcode = microcode_patch;
*parallel = 1;
}
static void per_cpu_smm_trigger(void)
{
/* Relocate the SMM handler. */
smm_relocate();
/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_patch);
}
static void post_mp_init(void)
{
/* Set Max Ratio */
set_max_ratio();
/* Now that all APs have been relocated as well as the BSP let SMIs
* start flowing. */
global_smi_enable();
/* Lock down the SMRAM space. */
smm_lock();
}
static const struct mp_ops mp_ops = {
.pre_mp_init = pre_mp_init,
.get_cpu_count = get_cpu_count,
.get_smm_info = smm_info,
.get_microcode_info = get_microcode_info,
.pre_mp_smm_init = smm_initialize,
.per_cpu_smm_trigger = per_cpu_smm_trigger,
.relocation_handler = smm_relocation_handler,
.post_mp_init = post_mp_init,
};
void mp_init_cpus(struct bus *cpu_bus)
{
if (mp_init_with_smm(cpu_bus, &mp_ops))
printk(BIOS_ERR, "MP initialization failure.\n");
}
static struct device_operations cpu_dev_ops = {
.init = cpu_core_init,
};
static const struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_INTEL, CPUID_HASWELL_ULT },
{ X86_VENDOR_INTEL, CPUID_BROADWELL_C0 },
{ X86_VENDOR_INTEL, CPUID_BROADWELL_D0 },
{ X86_VENDOR_INTEL, CPUID_BROADWELL_E0 },
{ 0, 0 },
};
static const struct cpu_driver driver __cpu_driver = {
.ops = &cpu_dev_ops,
.id_table = cpu_table,
};

View File

@ -1,264 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <types.h>
#include <string.h>
#include <device/device.h>
#include <device/pci.h>
#include <device/pci_ops.h>
#include <cpu/x86/mp.h>
#include <cpu/x86/msr.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/smm.h>
#include <cpu/intel/em64t101_save_state.h>
#include <cpu/intel/smm_reloc.h>
#include <console/console.h>
#include <smp/node.h>
#include <soc/cpu.h>
#include <soc/msr.h>
#include <soc/pci_devs.h>
#include <soc/systemagent.h>
static void update_save_state(int cpu, uintptr_t curr_smbase,
uintptr_t staggered_smbase,
struct smm_relocation_params *relo_params)
{
u32 smbase;
u32 iedbase;
/* The relocated handler runs with all CPUs concurrently. Therefore
* stagger the entry points adjusting SMBASE downwards by save state
* size * CPU num. */
smbase = staggered_smbase;
iedbase = relo_params->ied_base;
printk(BIOS_DEBUG, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
smbase, iedbase);
/* All threads need to set IEDBASE and SMBASE to the relocated
* handler region. However, the save state location depends on the
* smm_save_state_in_msrs field in the relocation parameters. If
* smm_save_state_in_msrs is non-zero then the CPUs are relocating
* the SMM handler in parallel, and each CPUs save state area is
* located in their respective MSR space. If smm_save_state_in_msrs
* is zero then the SMM relocation is happening serially so the
* save state is at the same default location for all CPUs. */
if (relo_params->smm_save_state_in_msrs) {
msr_t smbase_msr;
msr_t iedbase_msr;
smbase_msr.lo = smbase;
smbase_msr.hi = 0;
/* According the BWG the IEDBASE MSR is in bits 63:32. It's
* not clear why it differs from the SMBASE MSR. */
iedbase_msr.lo = 0;
iedbase_msr.hi = iedbase;
wrmsr(SMBASE_MSR, smbase_msr);
wrmsr(IEDBASE_MSR, iedbase_msr);
} else {
em64t101_smm_state_save_area_t *save_state;
save_state = (void *)(curr_smbase + SMM_DEFAULT_SIZE -
sizeof(*save_state));
save_state->smbase = smbase;
save_state->iedbase = iedbase;
}
}
/* Returns 1 if SMM MSR save state was set. */
static int bsp_setup_msr_save_state(struct smm_relocation_params *relo_params)
{
msr_t smm_mca_cap;
smm_mca_cap = rdmsr(SMM_MCA_CAP_MSR);
if (smm_mca_cap.hi & SMM_CPU_SVRSTR_MASK) {
msr_t smm_feature_control;
smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
smm_feature_control.hi = 0;
smm_feature_control.lo |= SMM_CPU_SAVE_EN;
wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
relo_params->smm_save_state_in_msrs = 1;
}
return relo_params->smm_save_state_in_msrs;
}
/* The relocation work is actually performed in SMM context, but the code
* resides in the ramstage module. This occurs by trampolining from the default
* SMRAM entry point to here. */
void smm_relocation_handler(int cpu, uintptr_t curr_smbase,
uintptr_t staggered_smbase)
{
msr_t mtrr_cap;
struct smm_relocation_params *relo_params = &smm_reloc_params;
printk(BIOS_DEBUG, "In relocation handler: CPU %d\n", cpu);
/* Determine if the processor supports saving state in MSRs. If so,
* enable it before the non-BSPs run so that SMM relocation can occur
* in parallel in the non-BSP CPUs. */
if (cpu == 0) {
/* If smm_save_state_in_msrs is 1 then that means this is the
* 2nd time through the relocation handler for the BSP.
* Parallel SMM handler relocation is taking place. However,
* it is desired to access other CPUs save state in the real
* SMM handler. Therefore, disable the SMM save state in MSRs
* feature. */
if (relo_params->smm_save_state_in_msrs) {
msr_t smm_feature_control;
smm_feature_control = rdmsr(SMM_FEATURE_CONTROL_MSR);
smm_feature_control.lo &= ~SMM_CPU_SAVE_EN;
wrmsr(SMM_FEATURE_CONTROL_MSR, smm_feature_control);
} else if (bsp_setup_msr_save_state(relo_params))
/* Just return from relocation handler if MSR save
* state is enabled. In that case the BSP will come
* back into the relocation handler to setup the new
* SMBASE as well disabling SMM save state in MSRs. */
return;
}
/* Make appropriate changes to the save state map. */
update_save_state(cpu, curr_smbase, staggered_smbase, relo_params);
/* Write PRMRR and SMRR MSRs based on indicated support. */
mtrr_cap = rdmsr(MTRR_CAP_MSR);
if (mtrr_cap.lo & SMRR_SUPPORTED)
write_smrr(relo_params);
if (mtrr_cap.lo & PRMRR_SUPPORTED) {
write_prmrr(relo_params);
/* UNCORE_PRMRR msrs are package level. Therefore, only
* configure these MSRs on the BSP. */
if (cpu == 0)
write_uncore_prmrr(relo_params);
}
}
static void fill_in_relocation_params(struct smm_relocation_params *params)
{
uintptr_t tseg_base;
size_t tseg_size;
u32 prmrr_base;
u32 prmrr_size;
int phys_bits;
/* All range registers are aligned to 4KiB */
const u32 rmask = ~((1 << 12) - 1);
/* Some of the range registers are dependent on the number of physical
* address bits supported. */
phys_bits = cpuid_eax(0x80000008) & 0xff;
/* The range bounded by the TSEGMB and BGSM registers encompasses the
* SMRAM range as well as the IED range. However, the SMRAM available
* to the handler is 4MiB since the IEDRAM lives TSEGMB + 4MiB.
*/
smm_region(&tseg_base, &tseg_size);
/* SMRR has 32-bits of valid address aligned to 4KiB. */
params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
params->smrr_base.hi = 0;
params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
params->smrr_mask.hi = 0;
smm_subregion(SMM_SUBREGION_CHIPSET, &params->ied_base, &params->ied_size);
/* The PRMRR and UNCORE_PRMRR are at IEDBASE + 2MiB */
prmrr_base = (params->ied_base + (2 << 20)) & rmask;
prmrr_size = params->ied_size - (2 << 20);
/* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
* on the number of physical address bits supported. */
params->prmrr_base.lo = prmrr_base | MTRR_TYPE_WRBACK;
params->prmrr_base.hi = 0;
params->prmrr_mask.lo = (~(prmrr_size - 1) & rmask)
| MTRR_PHYS_MASK_VALID;
params->prmrr_mask.hi = (1 << (phys_bits - 32)) - 1;
/* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
params->uncore_prmrr_base.lo = prmrr_base;
params->uncore_prmrr_base.hi = 0;
params->uncore_prmrr_mask.lo = (~(prmrr_size - 1) & rmask) |
MTRR_PHYS_MASK_VALID;
params->uncore_prmrr_mask.hi = (1 << (39 - 32)) - 1;
}
static void setup_ied_area(struct smm_relocation_params *params)
{
char *ied_base;
struct ied_header ied = {
.signature = "INTEL RSVD",
.size = params->ied_size,
.reserved = {0},
};
ied_base = (void *)params->ied_base;
/* Place IED header at IEDBASE. */
memcpy(ied_base, &ied, sizeof(ied));
/* Zero out 32KiB at IEDBASE + 1MiB */
memset(ied_base + (1 << 20), 0, (32 << 10));
}
void smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
size_t *smm_save_state_size)
{
printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
fill_in_relocation_params(&smm_reloc_params);
smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
setup_ied_area(&smm_reloc_params);
*smm_save_state_size = sizeof(em64t101_smm_state_save_area_t);
}
void smm_initialize(void)
{
/* Clear the SMM state in the southbridge. */
smm_southbridge_clear_state();
/*
* Run the relocation handler for on the BSP to check and set up
* parallel SMM relocation.
*/
smm_initiate_relocation();
if (smm_reloc_params.smm_save_state_in_msrs)
printk(BIOS_DEBUG, "Doing parallel SMM relocation.\n");
}
/* The default SMM entry can happen in parallel or serially. If the
* default SMM entry is done in parallel the BSP has already setup
* the saving state to each CPU's MSRs. At least one save state size
* is required for the initial SMM entry for the BSP to determine if
* parallel SMM relocation is even feasible. */
void smm_relocate(void)
{
/*
* If smm_save_state_in_msrs is non-zero then parallel SMM relocation
* shall take place. Run the relocation handler a second time on the
* BSP to do * the final move. For APs, a relocation handler always
* needs to be run.
*/
if (smm_reloc_params.smm_save_state_in_msrs)
smm_initiate_relocation_parallel();
else if (!boot_cpu())
smm_initiate_relocation();
}
void smm_lock(void)
{
struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
/* LOCK the SMM memory window and enable normal SMM.
* After running this function, only a full reset can
* make the SMM registers writable again.
*/
printk(BIOS_DEBUG, "Locking SMM.\n");
pci_write_config8(sa_dev, SMRAM, D_LCK | G_SMRAME | C_BASE_SEG);
}

View File

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/x86/msr.h>
#include <cpu/x86/tsc.h>
#include <soc/cpu.h>
#include <soc/msr.h>
unsigned long tsc_freq_mhz(void)
{
msr_t platform_info;
platform_info = rdmsr(MSR_PLATFORM_INFO);
return CPU_BCLK * ((platform_info.lo >> 8) & 0xff);
}

View File

@ -1,57 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _BROADWELL_CPU_H_
#define _BROADWELL_CPU_H_
#include <arch/cpu.h>
#include <device/device.h>
#include <stdint.h>
/* CPU types */
#define HASWELL_FAMILY_ULT 0x40650
#define BROADWELL_FAMILY_ULT 0x306d0
/* Supported CPUIDs */
#define CPUID_HASWELL_A0 0x306c1
#define CPUID_HASWELL_B0 0x306c2
#define CPUID_HASWELL_C0 0x306c3
#define CPUID_HASWELL_ULT_B0 0x40650
#define CPUID_HASWELL_ULT 0x40651
#define CPUID_HASWELL_HALO 0x40661
#define CPUID_BROADWELL_C0 0x306d2
#define CPUID_BROADWELL_D0 0x306d3
#define CPUID_BROADWELL_E0 0x306d4
/* CPU bus clock is fixed at 100MHz */
#define CPU_BCLK 100
/* Latency times in units of 1024ns. */
#define C_STATE_LATENCY_CONTROL_0_LIMIT 0x42
#define C_STATE_LATENCY_CONTROL_1_LIMIT 0x73
#define C_STATE_LATENCY_CONTROL_2_LIMIT 0x91
#define C_STATE_LATENCY_CONTROL_3_LIMIT 0xe4
#define C_STATE_LATENCY_CONTROL_4_LIMIT 0x145
#define C_STATE_LATENCY_CONTROL_5_LIMIT 0x1ef
#define C_STATE_LATENCY_MICRO_SECONDS(limit, base) \
(((1 << ((base)*5)) * (limit)) / 1000)
#define C_STATE_LATENCY_FROM_LAT_REG(reg) \
C_STATE_LATENCY_MICRO_SECONDS(C_STATE_LATENCY_CONTROL_ ##reg## _LIMIT, \
(IRTL_1024_NS >> 10))
/* Configure power limits for turbo mode */
void set_power_limits(u8 power_limit_1_time);
int cpu_config_tdp_levels(void);
/* CPU identification */
static inline u32 cpu_family_model(void)
{
return cpuid_eax(1) & 0x0fff0ff0;
}
static inline u32 cpu_stepping(void)
{
return cpuid_eax(1) & 0xf;
}
#endif

View File

@ -1,78 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _BROADWELL_MSR_H_
#define _BROADWELL_MSR_H_
#define MSR_CORE_THREAD_COUNT 0x35
#define MSR_PLATFORM_INFO 0xce
#define PLATFORM_INFO_SET_TDP (1 << 29)
#define MSR_PKG_CST_CONFIG_CONTROL 0xe2
#define MSR_PMG_IO_CAPTURE_BASE 0xe4
#define MSR_FEATURE_CONFIG 0x13c
#define SMM_MCA_CAP_MSR 0x17d
#define SMM_CPU_SVRSTR_BIT 57
#define SMM_CPU_SVRSTR_MASK (1 << (SMM_CPU_SVRSTR_BIT - 32))
#define MSR_FLEX_RATIO 0x194
#define FLEX_RATIO_LOCK (1 << 20)
#define FLEX_RATIO_EN (1 << 16)
#define MSR_MISC_PWR_MGMT 0x1aa
#define MISC_PWR_MGMT_EIST_HW_DIS (1 << 0)
#define MSR_TURBO_RATIO_LIMIT 0x1ad
#define MSR_TEMPERATURE_TARGET 0x1a2
#define MSR_PRMRR_PHYS_BASE 0x1f4
#define MSR_PRMRR_PHYS_MASK 0x1f5
#define MSR_POWER_CTL 0x1fc
#define MSR_LT_LOCK_MEMORY 0x2e7
#define MSR_UNCORE_PRMRR_PHYS_BASE 0x2f4
#define MSR_UNCORE_PRMRR_PHYS_MASK 0x2f5
#define SMM_FEATURE_CONTROL_MSR 0x4e0
#define SMM_CPU_SAVE_EN (1 << 1)
#define MSR_C_STATE_LATENCY_CONTROL_0 0x60a
#define MSR_C_STATE_LATENCY_CONTROL_1 0x60b
#define MSR_C_STATE_LATENCY_CONTROL_2 0x60c
#define MSR_C_STATE_LATENCY_CONTROL_3 0x633
#define MSR_C_STATE_LATENCY_CONTROL_4 0x634
#define MSR_C_STATE_LATENCY_CONTROL_5 0x635
#define IRTL_VALID (1 << 15)
#define IRTL_1_NS (0 << 10)
#define IRTL_32_NS (1 << 10)
#define IRTL_1024_NS (2 << 10)
#define IRTL_32768_NS (3 << 10)
#define IRTL_1048576_NS (4 << 10)
#define IRTL_33554432_NS (5 << 10)
#define IRTL_RESPONSE_MASK (0x3ff)
#define MSR_COUNTER_24_MHZ 0x637
/* Long duration in low dword, short duration in high dword */
#define MSR_PKG_POWER_LIMIT 0x610
#define PKG_POWER_LIMIT_MASK 0x7fff
#define PKG_POWER_LIMIT_EN (1 << 15)
#define PKG_POWER_LIMIT_CLAMP (1 << 16)
#define PKG_POWER_LIMIT_TIME_SHIFT 17
#define PKG_POWER_LIMIT_TIME_MASK 0x7f
#define MSR_VR_CURRENT_CONFIG 0x601
#define MSR_VR_MISC_CONFIG 0x603
#define MSR_PKG_POWER_SKU_UNIT 0x606
#define MSR_PKG_POWER_SKU 0x614
#define MSR_DDR_RAPL_LIMIT 0x618
#define MSR_VR_MISC_CONFIG2 0x636
#define MSR_PP0_POWER_LIMIT 0x638
#define MSR_PP1_POWER_LIMIT 0x640
#define MSR_CONFIG_TDP_NOMINAL 0x648
#define MSR_CONFIG_TDP_LEVEL1 0x649
#define MSR_CONFIG_TDP_LEVEL2 0x64a
#define MSR_CONFIG_TDP_CONTROL 0x64b
#define MSR_TURBO_ACTIVATION_RATIO 0x64c
/* SMM save state MSRs */
#define SMBASE_MSR 0xc20
#define IEDBASE_MSR 0xc22
/* MTRR_CAP_MSR bits */
#define SMRR_SUPPORTED (1<<11)
#define PRMRR_SUPPORTED (1<<12)
#endif