From 47be2d9f70c0112da29d560cbef13b6f2bcd5697 Mon Sep 17 00:00:00 2001 From: Arthur Heymans Date: Sat, 12 Oct 2019 17:32:09 +0200 Subject: [PATCH] cpu/x86: Add a prog_run hook to set up caching of XIP stages Some platforms lack a non-eviction mode and therefore caching the whole ROM to speed up XIP stages can be dangerous as it could result in eviction if too much of the ROM is being accessed. The solution is to only cache a region, about the size of the stage that the bootblock is about to load: verstage and/or romstage. TODO: now a limit of 256KiB is set for the total amount of cache that can be used. This should fit most use cases for the time being. Change-Id: I94d5771a57ffd74d53db3e35fe169d77d7fbb8cd Signed-off-by: Arthur Heymans Reviewed-on: https://review.coreboot.org/c/coreboot/+/35993 Tested-by: build bot (Jenkins) Reviewed-by: Nico Huber --- src/cpu/x86/Kconfig | 10 ++++ src/cpu/x86/mtrr/Makefile.inc | 4 ++ src/cpu/x86/mtrr/xip_cache.c | 109 ++++++++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+) create mode 100644 src/cpu/x86/mtrr/xip_cache.c diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig index a8cf54d89e..b316c1ffc3 100644 --- a/src/cpu/x86/Kconfig +++ b/src/cpu/x86/Kconfig @@ -78,6 +78,16 @@ config XIP_ROM_SIZE depends on !NO_FIXED_XIP_ROM_SIZE default 0x10000 +config SETUP_XIP_CACHE + bool + depends on C_ENVIRONMENT_BOOTBLOCK + depends on !NO_XIP_EARLY_STAGES + help + Select this option to set up an MTRR to cache XIP stages loaded + from the bootblock. This is useful on platforms lacking a + non-eviction mode and therefore need to be careful to avoid + eviction. + config CPU_ADDR_BITS int default 36 diff --git a/src/cpu/x86/mtrr/Makefile.inc b/src/cpu/x86/mtrr/Makefile.inc index caa6e9c5d2..129d05d41b 100644 --- a/src/cpu/x86/mtrr/Makefile.inc +++ b/src/cpu/x86/mtrr/Makefile.inc @@ -2,8 +2,12 @@ ramstage-y += mtrr.c romstage-y += earlymtrr.c bootblock-y += earlymtrr.c +verstage-y += earlymtrr.c bootblock-y += debug.c romstage-y += debug.c postcar-y += debug.c ramstage-y += debug.c + +bootblock-$(CONFIG_SETUP_XIP_CACHE) += xip_cache.c +verstage-$(CONFIG_SETUP_XIP_CACHE) += xip_cache.c diff --git a/src/cpu/x86/mtrr/xip_cache.c b/src/cpu/x86/mtrr/xip_cache.c new file mode 100644 index 0000000000..112c0dfb90 --- /dev/null +++ b/src/cpu/x86/mtrr/xip_cache.c @@ -0,0 +1,109 @@ +/* + * This file is part of the coreboot project. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +/* For now this is a good lowest common denominator for the total CPU cache. + TODO: fetch the total amount of cache from CPUID leaf2. */ +#define MAX_CPU_CACHE (256 * KiB) + +/* This makes the 'worst' case assumption that all cachelines covered by + the MTRR, no matter the caching type, are filled and not overlapping. */ +static uint32_t max_cache_used(void) +{ + msr_t msr = rdmsr(MTRR_CAP_MSR); + int i, total_mtrrs = msr.lo & MTRR_CAP_VCNT; + uint32_t total_cache = 0; + + for (i = 0; i < total_mtrrs; i++) { + msr_t mtrr = rdmsr(MTRR_PHYS_MASK(i)); + if (!(mtrr.lo & MTRR_PHYS_MASK_VALID)) + continue; + total_cache += ~(mtrr.lo & 0xfffff000) + 1; + } + return total_cache; +} + +void platform_prog_run(struct prog *prog) +{ + const uint32_t base = region_device_offset(&prog->rdev); + const uint32_t size = region_device_sz(&prog->rdev); + const uint32_t end = base + size; + const uint32_t cache_used = max_cache_used(); + /* This will accumulate MTRR's as XIP stages are run. + For now this includes bootblock which sets ups its own + caching elsewhere, verstage and romstage */ + int mtrr_num = get_free_var_mtrr(); + uint32_t mtrr_base; + uint32_t mtrr_size = 4 * KiB; + struct cpuinfo_x86 cpu_info; + + get_fms(&cpu_info, cpuid_eax(1)); + /* + * An unidentified combination of speculative reads and branch + * predictions inside WRPROT-cacheable memory can cause invalidation + * of cachelines and loss of stack on models based on NetBurst + * microarchitecture. Therefore disable WRPROT region entirely for + * all family F models. + */ + if (cpu_info.x86 == 0xf) { + printk(BIOS_NOTICE, + "PROG_RUN: CPU does not support caching ROM\n" + "The next stage will run slowly\n"); + return; + } + + if (mtrr_num == -1) { + printk(BIOS_NOTICE, + "PROG_RUN: No MTRR available to cache ROM!\n" + "The next stage will run slowly!\n"); + return; + } + + if (cache_used + mtrr_size > MAX_CPU_CACHE) { + printk(BIOS_NOTICE, + "PROG_RUN: No more cache available for the next stage\n" + "The next stage will run slowly!\n"); + return; + } + + while (1) { + if (ALIGN_DOWN(base, mtrr_size) + mtrr_size >= end) + break; + if (cache_used + mtrr_size * 2 > MAX_CPU_CACHE) + break; + mtrr_size *= 2; + } + + mtrr_base = ALIGN_DOWN(base, mtrr_size); + if (mtrr_base + mtrr_size < end) { + printk(BIOS_NOTICE, "PROG_RUN: Limiting XIP cache to %uKiB!\n", + mtrr_size / KiB); + /* Check if we can cover a bigger range by aligning up. */ + const uint32_t alt_base = ALIGN_UP(base, mtrr_size); + const uint32_t lower_coverage = mtrr_base + mtrr_size - base; + const uint32_t upper_coverage = MIN(alt_base + mtrr_size, end) - alt_base; + if (upper_coverage > lower_coverage) + mtrr_base = alt_base; + } + + printk(BIOS_DEBUG, + "PROG_RUN: Setting MTRR to cache XIP stage. base: 0x%08x, size: 0x%08x\n", + mtrr_base, mtrr_size); + + set_var_mtrr(mtrr_num, mtrr_base, mtrr_size, MTRR_TYPE_WRPROT); +}