soc/intel/alderlake/hsphy: Add possibility to cache HSPHY in flash

The patch adds a possibility to cache the PCIe 5.0 HSPHY firmware in
the SPI flash. New flashmap region is created for that purpose. The
goal of caching is to reduce the dependency on CSME and the HECI IP
LOAD command which may fail when the CSME is disabled, e.g. soft
disabled by HECI command or HAP disabled. This change allows to
keep PCIe 5.0 root ports functioning even if CSME/HECI is not
functional.

TEST=Boot Ubuntu 22.04 on MSI PRO Z690-A and notice PCIe 5.0 port
is functional after loading the HSPHY from cache.

Signed-off-by: Michał Żygowski <michal.zygowski@3mdeb.com>
Change-Id: I5a37f5b06706ff30d92f60f1bf5dc900edbde96f
Reviewed-on: https://review.coreboot.org/c/coreboot/+/68987
Reviewed-by: Krystian Hebel <krystian.hebel@3mdeb.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Michał Żygowski 2022-10-29 21:32:54 +02:00
parent 558d8b79e6
commit 95be012c11
5 changed files with 309 additions and 38 deletions

View File

@ -1029,6 +1029,15 @@ else
FMAP_VPD_ENTRY := FMAP_VPD_ENTRY :=
endif endif
ifeq ($(CONFIG_INCLUDE_HSPHY_IN_FMAP),y)
FMAP_HSPHY_FW_BASE := $(call int-align, $(FMAP_CURRENT_BASE), 0x1000)
FMAP_HSPHY_FW_SIZE := $(CONFIG_HSPHY_FW_MAX_SIZE)
FMAP_HSPHY_FW_ENTRY := HSPHY_FW@$(FMAP_HSPHY_FW_BASE) $(FMAP_HSPHY_FW_SIZE)
FMAP_CURRENT_BASE := $(call int-add, $(FMAP_HSPHY_FW_BASE) $(FMAP_HSPHY_FW_SIZE))
else
FMAP_HSPHY_FW_ENTRY :=
endif
# #
# X86 FMAP region # X86 FMAP region
# #
@ -1107,6 +1116,7 @@ $(obj)/fmap.fmd: $(top)/Makefile.inc $(DEFAULT_FLASHMAP) $(obj)/config.h
-e "s,##SMMSTORE_ENTRY##,$(FMAP_SMMSTORE_ENTRY)," \ -e "s,##SMMSTORE_ENTRY##,$(FMAP_SMMSTORE_ENTRY)," \
-e "s,##SPD_CACHE_ENTRY##,$(FMAP_SPD_CACHE_ENTRY)," \ -e "s,##SPD_CACHE_ENTRY##,$(FMAP_SPD_CACHE_ENTRY)," \
-e "s,##VPD_ENTRY##,$(FMAP_VPD_ENTRY)," \ -e "s,##VPD_ENTRY##,$(FMAP_VPD_ENTRY)," \
-e "s,##HSPHY_FW_ENTRY##,$(FMAP_HSPHY_FW_ENTRY)," \
-e "s,##CBFS_BASE##,$(FMAP_CBFS_BASE)," \ -e "s,##CBFS_BASE##,$(FMAP_CBFS_BASE)," \
-e "s,##CBFS_SIZE##,$(FMAP_CBFS_SIZE)," \ -e "s,##CBFS_SIZE##,$(FMAP_CBFS_SIZE)," \
$(DEFAULT_FLASHMAP) > $@.tmp $(DEFAULT_FLASHMAP) > $@.tmp

View File

@ -527,4 +527,30 @@ config FSP_PUBLISH_MBP_HOB
later platforms so creation of MBP HOB can be skipped for ADL-N and RPL based later platforms so creation of MBP HOB can be skipped for ADL-N and RPL based
platforms. platforms.
config INCLUDE_HSPHY_IN_FMAP
bool "Include PCIe 5.0 HSPHY firmware in flash"
default n
help
Set this option to cache the PCIe 5.0 HSPHY firmware after it is
fetched from ME during boot. By default coreboot will fetch the
HSPHY FW from ME, but if for some reason ME is not enabled or
visible, the cached blob will be attempted to initialize the PCIe
5.0 root port. Select it if ME is soft disabled or disabled with HAP
bit. If possible, the HSPHY FW will be saved to flashmap region if
the firmware file is not provided directly in the HSPHY_FW_FILE
Kconfig.
config HSPHY_FW_FILE
string "HSPHY firmware file path"
depends on INCLUDE_HSPHY_IN_FMAP
help
Path pointing to the PCIe 5.0 HSPHY file. The file can be extracted
from full firmware image or ME region using UEFITool. If left empty,
HSPHY loading procedure will try to save the firmware to the flashmap
region if fetched successfully from ME.
config HSPHY_FW_MAX_SIZE
hex
default 0x8000
endif endif

View File

@ -114,4 +114,28 @@ $(eval $(call cse_add_input,bp2,IUNP))
endif endif
ifeq ($(CONFIG_INCLUDE_HSPHY_IN_FMAP),y)
ifneq ($(call strip_quotes,$(CONFIG_HSPHY_FW_FILE)),)
# Create the target HSPHY file that will be put into flashmap region.
# First goes the HSPHY size, then hash algorithm (3 - SHA384, default for now),
# the hash digest, padding to max digest size (SHA512 - 64 bytes) and at last the
# HSPHY firmware itself
$(obj)/hsphy_fw.bin: $(call strip_quotes,$(top)/$(CONFIG_HSPHY_FW_FILE))
printf " HSPHY $(obj)/hsphy_fw.bin\n"
$(shell wc -c $< | awk '{print $$1}' | tr -d '\n' | xargs -0 printf '%08X' | \
tac -rs .. | xxd -r -p > $@)
$(shell printf '%02X' 3 | xxd -r -p >> $@)
$(shell sha384sum $< | awk '{print $$1}' | tac -rs .. | xxd -r -p >> $@)
$(shell dd if=/dev/zero bs=1 count=16 2> /dev/null >> $@)
$(shell cat $< >> $@)
add_hsphy_firmware: $(obj)/hsphy_fw.bin $(obj)/fmap.fmap $(obj)/coreboot.pre $(CBFSTOOL)
$(CBFSTOOL) $(obj)/coreboot.pre write -u -r HSPHY_FW -f $(obj)/hsphy_fw.bin
$(call add_intermediate, add_hsphy_firmware)
endif
endif
endif endif

View File

@ -8,6 +8,7 @@
#include <device/mmio.h> #include <device/mmio.h>
#include <device/pci_def.h> #include <device/pci_def.h>
#include <device/pci_ops.h> #include <device/pci_ops.h>
#include <fmap.h>
#include <intelblocks/cse.h> #include <intelblocks/cse.h>
#include <intelblocks/systemagent.h> #include <intelblocks/systemagent.h>
#include <intelblocks/vtd.h> #include <intelblocks/vtd.h>
@ -29,6 +30,13 @@
#define CPU_PID_PCIE_PHYX16_BROADCAST 0x55 #define CPU_PID_PCIE_PHYX16_BROADCAST 0x55
struct hsphy_cache {
uint32_t hsphy_size;
uint8_t hash_algo;
uint8_t digest[MAX_HASH_SIZE];
uint8_t hsphy_fw[0];
} __packed;
struct ip_push_model { struct ip_push_model {
uint16_t count; uint16_t count;
uint16_t address; uint16_t address;
@ -105,7 +113,7 @@ static int heci_get_hsphy_payload(void *buf, uint32_t *buf_size, uint8_t *hash_b
return 0; return 0;
} }
static int verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, uint8_t hash_alg) static bool verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, uint8_t hash_alg)
{ {
struct vb2_hash hash; struct vb2_hash hash;
@ -125,14 +133,13 @@ static int verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, ui
hash.algo = VB2_HASH_SHA384; hash.algo = VB2_HASH_SHA384;
break; break;
} }
memcpy(hash.raw, hash_buf, vb2_digest_size(hash.algo)); memcpy(hash.raw, hash_buf, vb2_digest_size(hash.algo));
if (vb2_hash_verify(vboot_hwcrypto_allowed(), buf, buf_size, &hash) != VB2_SUCCESS) { if (vb2_hash_verify(vboot_hwcrypto_allowed(), buf, buf_size, &hash) != VB2_SUCCESS)
printk(BIOS_ERR, "HSPHY SHA hashes do not match\n"); return false;
return -1;
}
return 0; return true;
} }
static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size) static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size)
@ -156,31 +163,168 @@ static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size)
} }
} }
void load_and_init_hsphy(void) static bool hsphy_cache_valid(struct hsphy_cache *hsphy_fw_cache)
{ {
void *hsphy_buf; if (!hsphy_fw_cache) {
uint8_t hsphy_hash[MAX_HASH_SIZE] = { 0 }; printk(BIOS_WARNING, "Failed to mmap HSPHY cache\n");
uint8_t hash_type; return false;
uint32_t buf_size = HSPHY_PAYLOAD_SIZE; }
size_t dma_buf_size;
pci_devfn_t dev = PCH_DEV_CSE;
const uint16_t pci_cmd_bme_mem = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
uint32_t status;
if (!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_0) && if (hsphy_fw_cache->hsphy_size == 0 ||
!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_1)) { hsphy_fw_cache->hsphy_size > HSPHY_PAYLOAD_SIZE ||
printk(BIOS_DEBUG, "All HSPHY ports disabled, skipping HSPHY loading\n"); hsphy_fw_cache->hash_algo <= HASHALG_SHA1 ||
hsphy_fw_cache->hash_algo > HASHALG_SHA512)
return false;
if (!verify_hsphy_hash(hsphy_fw_cache->hsphy_fw, hsphy_fw_cache->hsphy_size,
hsphy_fw_cache->digest, hsphy_fw_cache->hash_algo))
return false;
return true;
}
static bool load_hsphy_from_cache(void)
{
struct region_device rdev;
struct hsphy_cache *hsphy_fw_cache;
if (fmap_locate_area_as_rdev("HSPHY_FW", &rdev) < 0) {
printk(BIOS_ERR, "HSPHY: Cannot find HSPHY_FW region\n");
return false;
}
hsphy_fw_cache = (struct hsphy_cache *)rdev_mmap_full(&rdev);
if (!hsphy_cache_valid(hsphy_fw_cache)) {
printk(BIOS_ERR, "HSPHY: HSPHY cache invalid\n");
if (hsphy_fw_cache)
rdev_munmap(&rdev, hsphy_fw_cache);
return false;
}
printk(BIOS_INFO, "Loading HSPHY FW from cache\n");
upload_hsphy_to_cpu_pcie(hsphy_fw_cache->hsphy_fw, hsphy_fw_cache->hsphy_size);
rdev_munmap(&rdev, hsphy_fw_cache);
return true;
}
static void cache_hsphy_fw_in_flash(void *buf, uint32_t buf_size, uint8_t *hash_buf,
uint8_t hash_alg)
{
struct region_device rdev;
struct hsphy_cache *hsphy_fw_cache;
size_t ret;
if (!buf || buf_size == 0 || buf_size > (HSPHY_PAYLOAD_SIZE - sizeof(*hsphy_fw_cache))
|| !hash_buf || hash_alg <= HASHALG_SHA1 || hash_alg > HASHALG_SHA512) {
printk(BIOS_ERR, "Invalid parameters, HSPHY will not be cached in flash.\n");
return; return;
} }
/* Locate the area as RO rdev, otherwise mmap will fail */
if (fmap_locate_area_as_rdev("HSPHY_FW", &rdev) < 0) {
printk(BIOS_ERR, "HSPHY: Could not find HSPHY_FW region\n");
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
return;
}
hsphy_fw_cache = (struct hsphy_cache *)rdev_mmap_full(&rdev);
if (hsphy_cache_valid(hsphy_fw_cache)) {
/* If the cache is valid, check the buffer against the cache hash */
if (verify_hsphy_hash(buf, buf_size, hsphy_fw_cache->digest,
hsphy_fw_cache->hash_algo)) {
printk(BIOS_INFO, "HSPHY: cache does not need update\n");
rdev_munmap(&rdev, hsphy_fw_cache);
return;
} else {
printk(BIOS_INFO, "HSPHY: cache needs update\n");
}
} else {
printk(BIOS_INFO, "HSPHY: cache invalid, updating\n");
}
if (region_device_sz(&rdev) < (buf_size + sizeof(*hsphy_fw_cache))) {
printk(BIOS_ERR, "HSPHY: HSPHY_FW region too small: %zx < %zx\n",
region_device_sz(&rdev), buf_size + sizeof(*hsphy_fw_cache));
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
rdev_munmap(&rdev, hsphy_fw_cache);
return;
}
rdev_munmap(&rdev, hsphy_fw_cache);
hsphy_fw_cache = malloc(sizeof(*hsphy_fw_cache));
if (!hsphy_fw_cache) {
printk(BIOS_ERR, "HSPHY: Could not allocate memory for HSPHY cache buffer\n");
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
return;
}
hsphy_fw_cache->hsphy_size = buf_size;
hsphy_fw_cache->hash_algo = hash_alg;
switch (hash_alg) {
case HASHALG_SHA256:
hash_alg = VB2_HASH_SHA256;
break;
case HASHALG_SHA384:
hash_alg = VB2_HASH_SHA384;
break;
case HASHALG_SHA512:
hash_alg = VB2_HASH_SHA512;
break;
}
memset(hsphy_fw_cache->digest, 0, sizeof(hsphy_fw_cache->digest));
memcpy(hsphy_fw_cache->digest, hash_buf, vb2_digest_size(hash_alg));
/* Now that we want to write to flash, locate the area as RW rdev */
if (fmap_locate_area_as_rdev_rw("HSPHY_FW", &rdev) < 0) {
printk(BIOS_ERR, "HSPHY: Could not find HSPHY_FW region\n");
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
free(hsphy_fw_cache);
return;
}
if (rdev_eraseat(&rdev, 0, region_device_sz(&rdev)) < 0) {
printk(BIOS_ERR, "Failed to erase HSPHY cache region\n");
free(hsphy_fw_cache);
return;
}
ret = rdev_writeat(&rdev, hsphy_fw_cache, 0, sizeof(*hsphy_fw_cache));
if (ret != sizeof(*hsphy_fw_cache)) {
printk(BIOS_ERR, "Failed to write HSPHY cache metadata\n");
free(hsphy_fw_cache);
return;
}
ret = rdev_writeat(&rdev, buf, sizeof(*hsphy_fw_cache), buf_size);
if (ret != buf_size) {
printk(BIOS_ERR, "Failed to write HSPHY FW to cache\n");
free(hsphy_fw_cache);
return;
}
printk(BIOS_INFO, "HSPHY cached to flash successfully\n");
free(hsphy_fw_cache);
}
static void *allocate_hsphy_buf(void)
{
void *hsphy_buf;
size_t dma_buf_size;
if (CONFIG(ENABLE_EARLY_DMA_PROTECTION)) { if (CONFIG(ENABLE_EARLY_DMA_PROTECTION)) {
hsphy_buf = vtd_get_dma_buffer(&dma_buf_size); hsphy_buf = vtd_get_dma_buffer(&dma_buf_size);
if (!hsphy_buf || dma_buf_size < HSPHY_PAYLOAD_SIZE) { if (!hsphy_buf || dma_buf_size < HSPHY_PAYLOAD_SIZE) {
printk(BIOS_ERR, "DMA protection enabled but DMA buffer does not" printk(BIOS_ERR, "DMA protection enabled but DMA buffer does not"
" exist or is too small\n"); " exist or is too small\n");
printk(BIOS_ERR, "Aborting HSPHY firmware loading, " return NULL;
"PCIe Gen5 won't work.\n");
return;
} }
/* Rather impossible scenario, but check alignment anyways */ /* Rather impossible scenario, but check alignment anyways */
@ -192,20 +336,73 @@ void load_and_init_hsphy(void)
hsphy_buf = memalign(4 * KiB, HSPHY_PAYLOAD_SIZE); hsphy_buf = memalign(4 * KiB, HSPHY_PAYLOAD_SIZE);
if (!hsphy_buf) { if (!hsphy_buf) {
printk(BIOS_ERR, "Could not allocate memory for HSPHY blob\n"); printk(BIOS_ERR, "Failed to allocate memory for HSPHY blob\n");
printk(BIOS_ERR, "Aborting HSPHY firmware loading, " return NULL;
"PCIe Gen5 won't work.\n");
return;
} }
} }
return hsphy_buf;
}
void load_and_init_hsphy(void)
{
void *hsphy_buf;
uint8_t hsphy_hash[MAX_HASH_SIZE] = { 0 };
uint8_t hash_type;
uint32_t buf_size = HSPHY_PAYLOAD_SIZE;
pci_devfn_t dev = PCH_DEV_CSE;
const uint16_t pci_cmd_bme_mem = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
uint32_t status;
if (!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_0) &&
!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_1)) {
printk(BIOS_DEBUG, "All HSPHY ports disabled, skipping HSPHY loading\n");
return;
}
/*
* Try to get HSPHY payload from CSME first, so we can always keep our
* HSPHY cache up to date. If we cannot allocate the buffer for it, the
* cache is our last resort.
*/
hsphy_buf = allocate_hsphy_buf();
if (!hsphy_buf) {
printk(BIOS_ERR, "Could not allocate memory for HSPHY blob\n");
if (CONFIG(INCLUDE_HSPHY_IN_FMAP)) {
printk(BIOS_INFO, "Trying to load HSPHY FW from cache\n");
if (load_hsphy_from_cache()) {
printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
return;
}
printk(BIOS_ERR, "Failed to load HSPHY FW from cache\n");
}
printk(BIOS_ERR, "Aborting HSPHY FW loading, PCIe Gen5 won't work.\n");
return;
}
memset(hsphy_buf, 0, HSPHY_PAYLOAD_SIZE); memset(hsphy_buf, 0, HSPHY_PAYLOAD_SIZE);
/*
* If CSME is not present, try cached HSPHY FW. We still want to use
* CSME just in case CSME is updated along with HSPHY FW, so that we
* can update our cache if needed.
*/
if (!is_cse_enabled()) { if (!is_cse_enabled()) {
if (CONFIG(INCLUDE_HSPHY_IN_FMAP)) {
printk(BIOS_INFO, "Trying to load HSPHY FW from cache"
" because CSME is not enabled or not visible\n");
if (load_hsphy_from_cache()) {
printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
return;
}
printk(BIOS_ERR, "Failed to load HSPHY FW from cache\n");
}
printk(BIOS_ERR, "%s: CSME not enabled or not visible, but required\n", printk(BIOS_ERR, "%s: CSME not enabled or not visible, but required\n",
__func__); __func__);
printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n"); printk(BIOS_ERR, "Aborting HSPHY FW loading, PCIe Gen5 won't work.\n");
goto hsphy_exit; if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
free(hsphy_buf);
return;
} }
/* Ensure BAR, BME and memory space are enabled */ /* Ensure BAR, BME and memory space are enabled */
@ -219,19 +416,32 @@ void load_and_init_hsphy(void)
pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem); pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem);
} }
if (heci_get_hsphy_payload(hsphy_buf, &buf_size, hsphy_hash, &hash_type, &status)) { /* Try to get HSPHY payload from CSME and cache it if possible. */
printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n"); if (!heci_get_hsphy_payload(hsphy_buf, &buf_size, hsphy_hash, &hash_type, &status)) {
goto hsphy_exit; if (verify_hsphy_hash(hsphy_buf, buf_size, hsphy_hash, hash_type)) {
upload_hsphy_to_cpu_pcie(hsphy_buf, buf_size);
if (CONFIG(INCLUDE_HSPHY_IN_FMAP))
cache_hsphy_fw_in_flash(hsphy_buf, buf_size, hsphy_hash,
hash_type);
if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
free(hsphy_buf);
return;
} else {
printk(BIOS_ERR, "Failed to verify HSPHY FW hash.\n");
}
} else {
printk(BIOS_ERR, "Failed to get HSPHY FW over HECI.\n");
} }
if (verify_hsphy_hash(hsphy_buf, buf_size, hsphy_hash, hash_type)) {
printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
goto hsphy_exit;
}
upload_hsphy_to_cpu_pcie(hsphy_buf, buf_size);
hsphy_exit:
if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION)) if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
free(hsphy_buf); free(hsphy_buf);
/* We failed to get HSPHY payload from CSME, cache is our last chance. */
if (CONFIG(INCLUDE_HSPHY_IN_FMAP) && load_hsphy_from_cache()) {
printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
return;
}
printk(BIOS_ERR, "Failed to load HSPHY FW, PCIe Gen5 won't work.\n");
} }

View File

@ -14,6 +14,7 @@ FLASH@##ROM_BASE## ##ROM_SIZE## {
##SMMSTORE_ENTRY## ##SMMSTORE_ENTRY##
##SPD_CACHE_ENTRY## ##SPD_CACHE_ENTRY##
##VPD_ENTRY## ##VPD_ENTRY##
##HSPHY_FW_ENTRY##
FMAP@##FMAP_BASE## ##FMAP_SIZE## FMAP@##FMAP_BASE## ##FMAP_SIZE##
COREBOOT(CBFS)@##CBFS_BASE## ##CBFS_SIZE## COREBOOT(CBFS)@##CBFS_BASE## ##CBFS_SIZE##
} }