cpu/x86/smm: Add PCI resource store functionality

In certain cases data within protected memmory areas like SMRAM could
be leaked or modified if an attacker remaps PCI BARs to point within
that area. Add support to the existing SMM runtime to allow storing
PCI resources in SMRAM and then later retrieving them.

BRANCH=guybrush
BUG=b:186792595
TEST=builds

Signed-off-by: Robert Zieba <robertzieba@google.com>
Change-Id: I23fb1e935dd1b89f1cc5c834cc2025f0fe5fda37
Reviewed-on: https://review.coreboot.org/c/coreboot/+/67931
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Karthik Ramasubramanian <kramasub@google.com>
This commit is contained in:
Robert Zieba 2022-09-07 16:25:15 -06:00 committed by Martin L Roth
parent f1a4cffc88
commit ac8c378777
7 changed files with 129 additions and 0 deletions

View File

@ -168,6 +168,19 @@ config SMM_LAPIC_REMAP_MITIGATION
|| NORTHBRIDGE_INTEL_E7505 || NORTHBRIDGE_INTEL_IRONLAKE || NORTHBRIDGE_INTEL_E7505 || NORTHBRIDGE_INTEL_IRONLAKE
default n default n
config SMM_PCI_RESOURCE_STORE
bool
default n
help
This option enables support for storing PCI resources in SMRAM so
SMM can tell if they've been altered.
config SMM_PCI_RESOURCE_STORE_NUM_SLOTS
int
default 8
help
Number of slots available to store PCI BARs in SMRAM
config X86_AMD_FIXED_MTRRS config X86_AMD_FIXED_MTRRS
bool bool
default n default n

View File

@ -1,6 +1,9 @@
## SPDX-License-Identifier: GPL-2.0-only ## SPDX-License-Identifier: GPL-2.0-only
ramstage-y += smm_module_loader.c ramstage-y += smm_module_loader.c
ramstage-$(CONFIG_SMM_PCI_RESOURCE_STORE) += pci_resource_store.c
smm-$(CONFIG_SMM_PCI_RESOURCE_STORE) += pci_resource_store.c
ifeq ($(CONFIG_ARCH_RAMSTAGE_X86_32),y) ifeq ($(CONFIG_ARCH_RAMSTAGE_X86_32),y)
$(eval $(call create_class_compiler,smm,x86_32)) $(eval $(call create_class_compiler,smm,x86_32))

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <cpu/x86/smm.h>
#include <device/device.h>
#include <device/pci_def.h>
#include <device/pci_ops.h>
void smm_pci_get_stored_resources(const volatile struct smm_pci_resource_info **out_slots,
size_t *out_size)
{
*out_slots = smm_get_pci_resource_store();
*out_size = CONFIG_SMM_PCI_RESOURCE_STORE_NUM_SLOTS;
}
bool smm_pci_resource_store_fill_resources(struct smm_pci_resource_info *slots, size_t num_slots,
const struct device **devices, size_t num_devices)
{
size_t i_slot = 0;
for (size_t i_dev = 0; i_dev < num_devices; i_dev++) {
if (i_slot >= num_slots) {
printk(BIOS_ERR, "Failed to store all PCI resources, number of devices exceeds %zd slots\n",
num_slots);
return false;
}
if (!is_pci(devices[i_dev])) {
printk(BIOS_WARNING, "Skipping storing PCI resources for device at index %zd, not a PCI device\n",
i_dev);
continue;
}
pci_devfn_t pci_addr = PCI_BDF(devices[i_dev]);
slots[i_slot].pci_addr = pci_addr;
slots[i_slot].class_device = PCI_CLASS_GET_DEVICE(devices[i_dev]->class);
slots[i_slot].class_prog = PCI_CLASS_GET_PROG(devices[i_dev]->class);
/* Use the resource list to get our BARs. */
if (!devices[i_dev]->resource_list)
continue;
size_t i_res = 0;
for (const struct resource *res = devices[i_dev]->resource_list; res != NULL;
res = res->next) {
slots[i_slot].resources[i_res] = *res;
slots[i_slot].resources[i_res].next = NULL;
if (i_res > 0)
slots[i_slot].resources[i_res - 1].next = (struct resource *)&slots[i_slot].resources[i_res];
if (++i_res >= SMM_PCI_RESOURCE_STORE_NUM_RESOURCES) {
if (res->next)
printk(BIOS_WARNING, "Number of PCI resources exceeds supported storage count\n");
break;
}
}
i_slot++;
}
return true;
}
void __weak smm_mainboard_pci_resource_store_init(struct smm_pci_resource_info *slots,
size_t size)
{
}
void smm_pci_resource_store_init(struct smm_runtime *smm_runtime)
{
smm_mainboard_pci_resource_store_init(&smm_runtime->pci_resources[0],
CONFIG_SMM_PCI_RESOURCE_STORE_NUM_SLOTS);
}

View File

@ -195,6 +195,13 @@ asmlinkage void smm_handler_start(void *arg)
southbridge_smi_set_eos(); southbridge_smi_set_eos();
} }
#if CONFIG(SMM_PCI_RESOURCE_STORE)
const volatile struct smm_pci_resource_info *smm_get_pci_resource_store(void)
{
return &smm_runtime.pci_resources[0];
}
#endif
RMODULE_ENTRY(smm_handler_start); RMODULE_ENTRY(smm_handler_start);
/* Provide a default implementation for all weak handlers so that relocation /* Provide a default implementation for all weak handlers so that relocation

View File

@ -343,6 +343,9 @@ static void setup_smihandler_params(struct smm_runtime *mod_params,
mod_params->smm_log_level = mainboard_set_smm_log_level(); mod_params->smm_log_level = mainboard_set_smm_log_level();
else else
mod_params->smm_log_level = 0; mod_params->smm_log_level = 0;
if (CONFIG(SMM_PCI_RESOURCE_STORE))
smm_pci_resource_store_init(mod_params);
} }
static void print_region(const char *name, const struct region region) static void print_region(const char *name, const struct region region)

View File

@ -5,6 +5,8 @@
#include <arch/cpu.h> #include <arch/cpu.h>
#include <commonlib/region.h> #include <commonlib/region.h>
#include <device/pci_type.h>
#include <device/resource.h>
#include <types.h> #include <types.h>
#define SMM_DEFAULT_BASE 0x30000 #define SMM_DEFAULT_BASE 0x30000
@ -29,6 +31,8 @@
#define APM_CNT_ELOG_GSMI 0xef #define APM_CNT_ELOG_GSMI 0xef
#define APM_STS 0xb3 #define APM_STS 0xb3
#define SMM_PCI_RESOURCE_STORE_NUM_RESOURCES 6
/* Send cmd to APM_CNT with HAVE_SMI_HANDLER checking. */ /* Send cmd to APM_CNT with HAVE_SMI_HANDLER checking. */
int apm_control(u8 cmd); int apm_control(u8 cmd);
u8 apm_get_apmc(void); u8 apm_get_apmc(void);
@ -58,6 +62,13 @@ void smm_soc_exit(void);
extern unsigned char _binary_smm_start[]; extern unsigned char _binary_smm_start[];
extern unsigned char _binary_smm_end[]; extern unsigned char _binary_smm_end[];
struct smm_pci_resource_info {
pci_devfn_t pci_addr;
uint16_t class_device;
uint8_t class_prog;
struct resource resources[SMM_PCI_RESOURCE_STORE_NUM_RESOURCES];
};
struct smm_runtime { struct smm_runtime {
u32 smbase; u32 smbase;
u32 smm_size; u32 smm_size;
@ -66,6 +77,9 @@ struct smm_runtime {
u32 gnvs_ptr; u32 gnvs_ptr;
u32 cbmemc_size; u32 cbmemc_size;
void *cbmemc; void *cbmemc;
#if CONFIG(SMM_PCI_RESOURCE_STORE)
struct smm_pci_resource_info pci_resources[CONFIG_SMM_PCI_RESOURCE_STORE_NUM_SLOTS];
#endif
uintptr_t save_state_top[CONFIG_MAX_CPUS]; uintptr_t save_state_top[CONFIG_MAX_CPUS];
int smm_log_level; int smm_log_level;
} __packed; } __packed;
@ -198,4 +212,16 @@ uint32_t smm_revision(void);
On AMD systems it is sometimes configurable. */ On AMD systems it is sometimes configurable. */
uint16_t pm_acpi_smi_cmd_port(void); uint16_t pm_acpi_smi_cmd_port(void);
const volatile struct smm_pci_resource_info *smm_get_pci_resource_store(void);
void smm_pci_get_stored_resources(const volatile struct smm_pci_resource_info **out_slots,
size_t *out_size);
/* Weak handler function to store PCI BARs. */
void smm_mainboard_pci_resource_store_init(struct smm_pci_resource_info *slots, size_t size);
/* Helper function to fill BARs from an array of device pointers. */
bool smm_pci_resource_store_fill_resources(struct smm_pci_resource_info *slots, size_t num_slots,
const struct device **devices, size_t num_devices);
void smm_pci_resource_store_init(struct smm_runtime *smm_runtime);
#endif /* CPU_X86_SMM_H */ #endif /* CPU_X86_SMM_H */

View File

@ -590,4 +590,8 @@
#define PCI_DEV2DEVFN(sdev) (((sdev)>>12) & 0xff) #define PCI_DEV2DEVFN(sdev) (((sdev)>>12) & 0xff)
#define PCI_DEV2SEGBUS(sdev) (((sdev)>>20) & 0xfff) #define PCI_DEV2SEGBUS(sdev) (((sdev)>>20) & 0xfff)
/* Fields from within the device's class value. */
#define PCI_CLASS_GET_DEVICE(c) (c >> 8)
#define PCI_CLASS_GET_PROG(c) (c & 0xff)
#endif /* PCI_DEF_H */ #endif /* PCI_DEF_H */