2020-03-17 06:46:57 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
|
2020-11-02 20:41:12 +01:00
|
|
|
#include <assert.h>
|
|
|
|
#include <commonlib/sort.h>
|
2020-03-17 06:46:57 +01:00
|
|
|
#include <console/console.h>
|
2020-11-02 20:41:12 +01:00
|
|
|
#include <device/device.h>
|
2020-03-17 06:46:57 +01:00
|
|
|
#include <device/pci.h>
|
2020-11-02 20:41:12 +01:00
|
|
|
#include <intelblocks/cpulib.h>
|
2020-03-17 06:46:57 +01:00
|
|
|
#include <soc/pci_devs.h>
|
2020-10-15 23:16:45 +02:00
|
|
|
#include <soc/msr.h>
|
2020-03-17 06:46:57 +01:00
|
|
|
#include <soc/util.h>
|
|
|
|
|
|
|
|
void get_stack_busnos(uint32_t *bus)
|
|
|
|
{
|
|
|
|
uint32_t reg1, reg2;
|
|
|
|
|
|
|
|
reg1 = pci_mmio_read_config32(PCI_DEV(UBOX_DECS_BUS, UBOX_DECS_DEV, UBOX_DECS_FUNC),
|
|
|
|
0xcc);
|
|
|
|
reg2 = pci_mmio_read_config32(PCI_DEV(UBOX_DECS_BUS, UBOX_DECS_DEV, UBOX_DECS_FUNC),
|
|
|
|
0xd0);
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; ++i)
|
|
|
|
bus[i] = ((reg1 >> (i * 8)) & 0xff);
|
|
|
|
for (int i = 0; i < 2; ++i)
|
|
|
|
bus[4+i] = ((reg2 >> (i * 8)) & 0xff);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unlock_pam_regions(void)
|
|
|
|
{
|
|
|
|
uint32_t bus1 = 0;
|
|
|
|
uint32_t pam0123_unlock_dram = 0x33333330;
|
|
|
|
uint32_t pam456_unlock_dram = 0x00333333;
|
|
|
|
|
|
|
|
get_cpubusnos(NULL, &bus1, NULL, NULL);
|
|
|
|
pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
|
|
|
|
SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
|
|
|
|
pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
|
|
|
|
SAD_ALL_PAM456_CSR, pam456_unlock_dram);
|
|
|
|
|
|
|
|
uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
|
|
|
|
SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
|
|
|
|
uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
|
|
|
|
SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
|
|
|
|
printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
|
|
|
|
__FILE__, __func__, reg1, reg2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void get_cpubusnos(uint32_t *bus0, uint32_t *bus1, uint32_t *bus2, uint32_t *bus3)
|
|
|
|
{
|
|
|
|
uint32_t bus = pci_io_read_config32(PCI_DEV(UBOX_DECS_BUS, UBOX_DECS_DEV,
|
|
|
|
UBOX_DECS_FUNC), UBOX_DECS_CPUBUSNO_CSR);
|
|
|
|
if (bus0)
|
|
|
|
*bus0 = (bus & 0xff);
|
|
|
|
if (bus1)
|
|
|
|
*bus1 = (bus >> 8) & 0xff;
|
|
|
|
if (bus2)
|
|
|
|
*bus2 = (bus >> 16) & 0xff;
|
|
|
|
if (bus3)
|
|
|
|
*bus3 = (bus >> 24) & 0xff;
|
|
|
|
}
|
2020-10-15 23:16:45 +02:00
|
|
|
|
|
|
|
msr_t read_msr_ppin(void)
|
|
|
|
{
|
|
|
|
msr_t ppin = {0};
|
|
|
|
msr_t msr;
|
|
|
|
|
|
|
|
/* If MSR_PLATFORM_INFO PPIN_CAP is 0, PPIN capability is not supported */
|
|
|
|
msr = rdmsr(MSR_PLATFORM_INFO);
|
|
|
|
if ((msr.lo & MSR_PPIN_CAP) == 0) {
|
|
|
|
printk(BIOS_ERR, "MSR_PPIN_CAP is 0, PPIN is not supported\n");
|
|
|
|
return ppin;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Access to MSR_PPIN is permitted only if MSR_PPIN_CTL LOCK is 0 and ENABLE is 1 */
|
|
|
|
msr = rdmsr(MSR_PPIN_CTL);
|
|
|
|
if (msr.lo & MSR_PPIN_CTL_LOCK) {
|
|
|
|
printk(BIOS_ERR, "MSR_PPIN_CTL_LOCK is 1, PPIN access is not allowed\n");
|
|
|
|
return ppin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((msr.lo & MSR_PPIN_CTL_ENABLE) == 0) {
|
|
|
|
/* Set MSR_PPIN_CTL ENABLE to 1 */
|
|
|
|
msr.lo |= MSR_PPIN_CTL_ENABLE;
|
|
|
|
wrmsr(MSR_PPIN_CTL, msr);
|
|
|
|
}
|
|
|
|
ppin = rdmsr(MSR_PPIN);
|
|
|
|
/* Set enable to 0 after reading MSR_PPIN */
|
|
|
|
msr.lo &= ~MSR_PPIN_CTL_ENABLE;
|
|
|
|
wrmsr(MSR_PPIN_CTL, msr);
|
|
|
|
return ppin;
|
|
|
|
}
|
2020-11-02 20:41:12 +01:00
|
|
|
|
|
|
|
int get_threads_per_package(void)
|
|
|
|
{
|
|
|
|
unsigned int core_count, thread_count;
|
|
|
|
cpu_read_topology(&core_count, &thread_count);
|
|
|
|
return thread_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
int get_platform_thread_count(void)
|
|
|
|
{
|
|
|
|
return soc_get_num_cpus() * get_threads_per_package();
|
|
|
|
}
|
|
|
|
|
|
|
|
const IIO_UDS *get_iio_uds(void)
|
|
|
|
{
|
|
|
|
size_t hob_size;
|
|
|
|
const IIO_UDS *hob;
|
|
|
|
const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID;
|
|
|
|
|
|
|
|
hob = fsp_find_extension_hob_by_guid(fsp_hob_iio_universal_data_guid, &hob_size);
|
|
|
|
assert(hob != NULL && hob_size != 0);
|
|
|
|
return hob;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int soc_get_num_cpus(void)
|
|
|
|
{
|
|
|
|
/* The FSP IIO UDS HOB has field numCpus, it is actually socket count */
|
|
|
|
return get_iio_uds()->SystemStatus.numCpus;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if ENV_RAMSTAGE /* Setting devtree variables is only allowed in ramstage. */
|
|
|
|
static void get_core_thread_bits(uint32_t *core_bits, uint32_t *thread_bits)
|
|
|
|
{
|
|
|
|
register int ecx;
|
|
|
|
struct cpuid_result cpuid_regs;
|
|
|
|
|
|
|
|
/* get max index of CPUID */
|
|
|
|
cpuid_regs = cpuid(0);
|
|
|
|
assert(cpuid_regs.eax >= 0xb); /* cpuid_regs.eax is max input value for cpuid */
|
|
|
|
|
|
|
|
*thread_bits = *core_bits = 0;
|
|
|
|
ecx = 0;
|
|
|
|
while (1) {
|
|
|
|
cpuid_regs = cpuid_ext(0xb, ecx);
|
|
|
|
if (ecx == 0) {
|
|
|
|
*thread_bits = (cpuid_regs.eax & 0x1f);
|
|
|
|
} else {
|
|
|
|
*core_bits = (cpuid_regs.eax & 0x1f) - *thread_bits;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ecx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_cpu_info_from_apicid(uint32_t apicid, uint32_t core_bits, uint32_t thread_bits,
|
|
|
|
uint8_t *package, uint8_t *core, uint8_t *thread)
|
|
|
|
{
|
|
|
|
if (package != NULL)
|
|
|
|
*package = (apicid >> (thread_bits + core_bits));
|
|
|
|
if (core != NULL)
|
|
|
|
*core = (uint32_t)((apicid >> thread_bits) & ~((~0) << core_bits));
|
|
|
|
if (thread != NULL)
|
|
|
|
*thread = (uint32_t)(apicid & ~((~0) << thread_bits));
|
|
|
|
}
|
|
|
|
|
|
|
|
void xeonsp_init_cpu_config(void)
|
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
int apic_ids[CONFIG_MAX_CPUS] = {0}, apic_ids_by_thread[CONFIG_MAX_CPUS] = {0};
|
|
|
|
int num_apics = 0;
|
|
|
|
uint32_t core_bits, thread_bits;
|
|
|
|
unsigned int core_count, thread_count;
|
|
|
|
unsigned int num_sockets;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sort APIC ids in asending order to identify apicid ranges for
|
|
|
|
* each numa domain
|
|
|
|
*/
|
|
|
|
for (dev = all_devices; dev; dev = dev->next) {
|
|
|
|
if ((dev->path.type != DEVICE_PATH_APIC) ||
|
|
|
|
(dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!dev->enabled)
|
|
|
|
continue;
|
|
|
|
if (num_apics >= ARRAY_SIZE(apic_ids))
|
|
|
|
break;
|
|
|
|
apic_ids[num_apics++] = dev->path.apic.apic_id;
|
|
|
|
}
|
|
|
|
if (num_apics > 1)
|
|
|
|
bubblesort(apic_ids, num_apics, NUM_ASCENDING);
|
|
|
|
|
|
|
|
num_sockets = soc_get_num_cpus();
|
|
|
|
cpu_read_topology(&core_count, &thread_count);
|
|
|
|
assert(num_apics == (num_sockets * thread_count));
|
|
|
|
|
|
|
|
/* sort them by thread i.e., all cores with thread 0 and then thread 1 */
|
|
|
|
int index = 0;
|
|
|
|
for (int id = 0; id < num_apics; ++id) {
|
|
|
|
int apic_id = apic_ids[id];
|
|
|
|
if (apic_id & 0x1) { /* 2nd thread */
|
|
|
|
apic_ids_by_thread[index + (num_apics/2) - 1] = apic_id;
|
|
|
|
} else { /* 1st thread */
|
|
|
|
apic_ids_by_thread[index++] = apic_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* update apic_id, node_id in sorted order */
|
|
|
|
num_apics = 0;
|
|
|
|
get_core_thread_bits(&core_bits, &thread_bits);
|
|
|
|
for (dev = all_devices; dev; dev = dev->next) {
|
|
|
|
uint8_t package;
|
|
|
|
|
|
|
|
if ((dev->path.type != DEVICE_PATH_APIC) ||
|
|
|
|
(dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!dev->enabled)
|
|
|
|
continue;
|
|
|
|
if (num_apics >= ARRAY_SIZE(apic_ids))
|
|
|
|
break;
|
|
|
|
dev->path.apic.apic_id = apic_ids_by_thread[num_apics];
|
|
|
|
get_cpu_info_from_apicid(dev->path.apic.apic_id, core_bits, thread_bits,
|
|
|
|
&package, NULL, NULL);
|
|
|
|
dev->path.apic.node_id = package;
|
|
|
|
printk(BIOS_DEBUG, "CPU %d apic_id: 0x%x (%d), node_id: 0x%x\n",
|
|
|
|
num_apics, dev->path.apic.apic_id,
|
|
|
|
dev->path.apic.apic_id, dev->path.apic.node_id);
|
|
|
|
|
|
|
|
++num_apics;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|