lib/prog_loaders, soc/amd/: Make payload_preload use cbfs_preload
Now that CBFS has this functionality built in, we no longer need to manually code it. payload_preload used to use the payload_preload_cache region to store the raw payload contents. This region was placed outside the firmware reserved region, so it was available for use by the OS. This was possible because the payload isn't loaded again on S3 resume. cbfs_preload only uses the cbfs_cache region. This region must be reserved because it gets used on the S3 resume path. Unfortunately this means that cbfs_cache must be increased to hold the payload. Cezanne is the only platform currently using payload_preload, and the size of cbfs_cache has already been adjusted. In the future we could look into adding an option to cbfs_preload that would allow it to use a different memory pool for the cache allocation. BUG=b:179699789 TEST=Boot guybrush and verify preloading the payload was successful CBFS DEBUG: get_preload_rdev(name='fallback/payload') preload successful Signed-off-by: Raul E Rangel <rrangel@chromium.org> Change-Id: Idc521b238620ff52b8ba481cd3c10e5c4f1394bd Reviewed-on: https://review.coreboot.org/c/coreboot/+/58962 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Julius Werner <jwerner@chromium.org> Reviewed-by: Karthik Ramasubramanian <kramasub@google.com>
This commit is contained in:
parent
d7e7d60e0f
commit
571e7f02de
|
@ -52,7 +52,6 @@ DECLARE_REGION(asan_shadow)
|
|||
|
||||
/* Regions for execution units. */
|
||||
|
||||
DECLARE_REGION(payload_preload_cache)
|
||||
DECLARE_REGION(payload)
|
||||
/* "program" always refers to the current execution unit. */
|
||||
DECLARE_REGION(program)
|
||||
|
|
|
@ -114,14 +114,3 @@ config CBFS_PRELOAD
|
|||
in the background before they are actually required. This feature
|
||||
depends on the read-only boot_device having a DMA controller to
|
||||
perform the background transfer.
|
||||
|
||||
config PAYLOAD_PRELOAD
|
||||
bool
|
||||
depends on COOP_MULTITASKING
|
||||
help
|
||||
On some systems with SPI DMA controllers, it is possible to preload
|
||||
the payload while ramstage is executing. This can be selected by the
|
||||
SoC to enable payload preloading.
|
||||
|
||||
The SoC needs to define a payload_preload_cache region where the
|
||||
raw payload can be placed.
|
||||
|
|
|
@ -127,71 +127,37 @@ fail:
|
|||
static struct prog global_payload =
|
||||
PROG_INIT(PROG_PAYLOAD, CONFIG_CBFS_PREFIX "/payload");
|
||||
|
||||
static struct thread_handle payload_preload_handle;
|
||||
|
||||
static enum cb_err payload_preload_thread_entry(void *arg)
|
||||
{
|
||||
size_t size;
|
||||
struct prog *payload = &global_payload;
|
||||
|
||||
printk(BIOS_DEBUG, "Preloading payload\n");
|
||||
|
||||
payload->cbfs_type = CBFS_TYPE_QUERY;
|
||||
|
||||
size = cbfs_type_load(prog_name(payload), _payload_preload_cache,
|
||||
REGION_SIZE(payload_preload_cache), &payload->cbfs_type);
|
||||
|
||||
if (!size) {
|
||||
printk(BIOS_ERR, "ERROR: Preloading payload failed\n");
|
||||
return CB_ERR;
|
||||
}
|
||||
|
||||
printk(BIOS_DEBUG, "Preloading payload complete\n");
|
||||
|
||||
return CB_SUCCESS;
|
||||
}
|
||||
|
||||
void payload_preload(void)
|
||||
{
|
||||
struct thread_handle *handle = &payload_preload_handle;
|
||||
|
||||
if (!CONFIG(PAYLOAD_PRELOAD))
|
||||
if (!CONFIG(CBFS_PRELOAD))
|
||||
return;
|
||||
|
||||
if (thread_run(handle, payload_preload_thread_entry, NULL))
|
||||
printk(BIOS_ERR, "ERROR: Failed to start payload preload thread\n");
|
||||
cbfs_preload(global_payload.name);
|
||||
}
|
||||
|
||||
void payload_load(void)
|
||||
{
|
||||
struct prog *payload = &global_payload;
|
||||
struct thread_handle *handle = &payload_preload_handle;
|
||||
void *mapping = NULL;
|
||||
void *buffer;
|
||||
void *mapping;
|
||||
|
||||
timestamp_add_now(TS_LOAD_PAYLOAD);
|
||||
|
||||
if (prog_locate_hook(payload))
|
||||
goto out;
|
||||
|
||||
if (CONFIG(PAYLOAD_PRELOAD) && thread_join(handle) == CB_SUCCESS) {
|
||||
buffer = _payload_preload_cache;
|
||||
} else {
|
||||
payload->cbfs_type = CBFS_TYPE_QUERY;
|
||||
mapping = cbfs_type_map(prog_name(payload), NULL, &payload->cbfs_type);
|
||||
buffer = mapping;
|
||||
}
|
||||
payload->cbfs_type = CBFS_TYPE_QUERY;
|
||||
mapping = cbfs_type_map(prog_name(payload), NULL, &payload->cbfs_type);
|
||||
|
||||
if (!buffer)
|
||||
if (!mapping)
|
||||
goto out;
|
||||
|
||||
switch (prog_cbfs_type(payload)) {
|
||||
case CBFS_TYPE_SELF: /* Simple ELF */
|
||||
selfload_mapped(payload, buffer, BM_MEM_RAM);
|
||||
selfload_mapped(payload, mapping, BM_MEM_RAM);
|
||||
break;
|
||||
case CBFS_TYPE_FIT: /* Flattened image tree */
|
||||
if (CONFIG(PAYLOAD_FIT_SUPPORT)) {
|
||||
fit_payload(payload, buffer);
|
||||
fit_payload(payload, mapping);
|
||||
break;
|
||||
} /* else fall-through */
|
||||
default:
|
||||
|
@ -200,8 +166,7 @@ void payload_load(void)
|
|||
break;
|
||||
}
|
||||
|
||||
if (mapping)
|
||||
cbfs_unmap(mapping);
|
||||
cbfs_unmap(mapping);
|
||||
out:
|
||||
if (prog_entry(payload) == NULL)
|
||||
die_with_post_code(POST_INVALID_ROM, "Payload not loaded.\n");
|
||||
|
|
|
@ -197,7 +197,6 @@ config ASYNC_FILE_LOADING
|
|||
select COOP_MULTITASKING
|
||||
select SOC_AMD_COMMON_BLOCK_LPC_SPI_DMA
|
||||
select CBFS_PRELOAD
|
||||
select PAYLOAD_PRELOAD
|
||||
help
|
||||
When enabled, the platform will use the LPC SPI DMA controller to
|
||||
asynchronously load contents from the SPI ROM. This will improve
|
||||
|
|
|
@ -29,16 +29,6 @@ config CBFS_CACHE_SIZE
|
|||
help
|
||||
The size of the cbfs_cache region.
|
||||
|
||||
config PAYLOAD_PRELOAD_CACHE_SIZE
|
||||
hex
|
||||
default 0x30000
|
||||
depends on PAYLOAD_PRELOAD
|
||||
help
|
||||
This config sets the size of the payload_preload_cache memory region.
|
||||
It is used as the destination for the raw payload. This space is only
|
||||
populated during non-S3, so it doesn't need to be reserved in the
|
||||
EARLY_RESERVED_DRAM region.
|
||||
|
||||
endif # SOC_AMD_COMMON_BLOCK_NONCAR
|
||||
|
||||
config SOC_AMD_COMMON_BLOCK_MCA_COMMON
|
||||
|
|
|
@ -106,16 +106,6 @@ SECTIONS
|
|||
|
||||
EARLY_RESERVED_DRAM_END(.)
|
||||
|
||||
#if CONFIG(PAYLOAD_PRELOAD)
|
||||
/*
|
||||
* This section is outside the early_reserved_dram section. We only read
|
||||
* the payload on non-S3 boots, so we don't need to reserve it from the
|
||||
* OS. The 64 byte alignment is required by the SPI DMA controller.
|
||||
*/
|
||||
. = ALIGN(ARCH_CACHELINE_ALIGN_SIZE);
|
||||
REGION(payload_preload_cache, ., CONFIG_PAYLOAD_PRELOAD_CACHE_SIZE, ARCH_CACHELINE_ALIGN_SIZE)
|
||||
#endif
|
||||
|
||||
RAMSTAGE(CONFIG_RAMBASE, 8M)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue