ACPI S3: Depend on RELOCATABLE_RAMSTAGE
With RELOCATABLE_RAMSTAGE, S3 resume path only uses memory that is reserved from OS. So there is no need for low memory backup and recovery. Change-Id: If7f83711685ac445abf4cd1aa6b66c3391e0e554 Signed-off-by: Kyösti Mälkki <kyosti.malkki@gmail.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/26834 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-by: Aaron Durbin <adurbin@chromium.org> Reviewed-by: Angel Pons <th3fanbus@gmail.com>
This commit is contained in:
parent
546a2e8468
commit
7cd2c07317
|
@ -484,6 +484,7 @@ source "src/console/Kconfig"
|
||||||
config HAVE_ACPI_RESUME
|
config HAVE_ACPI_RESUME
|
||||||
bool
|
bool
|
||||||
default n
|
default n
|
||||||
|
depends on RELOCATABLE_RAMSTAGE
|
||||||
|
|
||||||
config RESUME_PATH_SAME_AS_BOOT
|
config RESUME_PATH_SAME_AS_BOOT
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -70,124 +70,20 @@ void acpi_fail_wakeup(void)
|
||||||
}
|
}
|
||||||
#endif /* ENV_RAMSTAGE */
|
#endif /* ENV_RAMSTAGE */
|
||||||
|
|
||||||
struct resume_backup {
|
|
||||||
uint64_t cbmem;
|
|
||||||
uint64_t lowmem;
|
|
||||||
uint64_t size;
|
|
||||||
uint8_t valid;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define BACKUP_PAGE_SZ 4096
|
|
||||||
|
|
||||||
static int backup_create_or_update(struct resume_backup *backup_mem,
|
|
||||||
uintptr_t base, size_t size)
|
|
||||||
{
|
|
||||||
uintptr_t top;
|
|
||||||
|
|
||||||
/* Align backup region to complete pages. */
|
|
||||||
top = ALIGN_UP(base + size, BACKUP_PAGE_SZ);
|
|
||||||
base = ALIGN_DOWN(base, BACKUP_PAGE_SZ);
|
|
||||||
size = top - base;
|
|
||||||
|
|
||||||
/* Cannot extend existing region, should not happen. */
|
|
||||||
if (backup_mem && (backup_mem->size < size))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
/* Allocate backup with room for header. */
|
|
||||||
if (!backup_mem) {
|
|
||||||
size_t header_sz = ALIGN_UP(sizeof(*backup_mem),
|
|
||||||
BACKUP_PAGE_SZ);
|
|
||||||
backup_mem = cbmem_add(CBMEM_ID_RESUME, header_sz + size);
|
|
||||||
if (!backup_mem)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
/* Container starts from boundary after header. */
|
|
||||||
backup_mem->cbmem = (uintptr_t)backup_mem + header_sz;
|
|
||||||
}
|
|
||||||
|
|
||||||
backup_mem->valid = 0;
|
|
||||||
backup_mem->lowmem = base;
|
|
||||||
backup_mem->size = size;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void backup_ramstage_section(uintptr_t base, size_t size)
|
|
||||||
{
|
|
||||||
struct resume_backup *backup_mem = cbmem_find(CBMEM_ID_RESUME);
|
|
||||||
|
|
||||||
/* For first boot we exit here as CBMEM_ID_RESUME is only
|
|
||||||
* created late in ramstage with acpi_prepare_resume_backup().
|
|
||||||
*/
|
|
||||||
if (!backup_mem)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Check that the backup is not done twice. */
|
|
||||||
if (backup_mem->valid)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* When we are called from ramstage loader, update header with
|
|
||||||
* properties of the ramstage we will load.
|
|
||||||
*/
|
|
||||||
if (backup_create_or_update(backup_mem, base, size) < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Back up the OS-controlled memory where ramstage will be loaded. */
|
|
||||||
memcpy((void *)(uintptr_t)backup_mem->cbmem,
|
|
||||||
(void *)(uintptr_t)backup_mem->lowmem,
|
|
||||||
(size_t)backup_mem->size);
|
|
||||||
backup_mem->valid = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Let's prepare the ACPI S3 Resume area now already, so we can rely on
|
|
||||||
* it being there during reboot time. If this fails, ACPI resume will
|
|
||||||
* be disabled. We assume that ramstage does not change while in suspend,
|
|
||||||
* so base and size of the currently running ramstage are used
|
|
||||||
* for allocation.
|
|
||||||
*/
|
|
||||||
void acpi_prepare_resume_backup(void)
|
|
||||||
{
|
|
||||||
if (!acpi_s3_resume_allowed())
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (CONFIG(RELOCATABLE_RAMSTAGE))
|
|
||||||
return;
|
|
||||||
|
|
||||||
backup_create_or_update(NULL, (uintptr_t)_program,
|
|
||||||
REGION_SIZE(program));
|
|
||||||
}
|
|
||||||
|
|
||||||
#define WAKEUP_BASE 0x600
|
#define WAKEUP_BASE 0x600
|
||||||
|
|
||||||
asmlinkage void (*acpi_do_wakeup)(uintptr_t vector, u32 backup_source,
|
asmlinkage void (*acpi_do_wakeup)(uintptr_t vector) = (void *)WAKEUP_BASE;
|
||||||
u32 backup_target, u32 backup_size) = (void *)WAKEUP_BASE;
|
|
||||||
|
|
||||||
extern unsigned char __wakeup;
|
extern unsigned char __wakeup;
|
||||||
extern unsigned int __wakeup_size;
|
extern unsigned int __wakeup_size;
|
||||||
|
|
||||||
static void acpi_jump_to_wakeup(void *vector)
|
static void acpi_jump_to_wakeup(void *vector)
|
||||||
{
|
{
|
||||||
uintptr_t source = 0, target = 0;
|
|
||||||
size_t size = 0;
|
|
||||||
|
|
||||||
if (!acpi_s3_resume_allowed()) {
|
if (!acpi_s3_resume_allowed()) {
|
||||||
printk(BIOS_WARNING, "ACPI: S3 resume not allowed.\n");
|
printk(BIOS_WARNING, "ACPI: S3 resume not allowed.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!CONFIG(RELOCATABLE_RAMSTAGE)) {
|
|
||||||
struct resume_backup *backup_mem = cbmem_find(CBMEM_ID_RESUME);
|
|
||||||
if (backup_mem && backup_mem->valid) {
|
|
||||||
backup_mem->valid = 0;
|
|
||||||
target = backup_mem->lowmem;
|
|
||||||
source = backup_mem->cbmem;
|
|
||||||
size = backup_mem->size;
|
|
||||||
} else {
|
|
||||||
printk(BIOS_WARNING, "ACPI: Backup memory missing. "
|
|
||||||
"No S3 resume.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Copy wakeup trampoline in place. */
|
/* Copy wakeup trampoline in place. */
|
||||||
memcpy((void *)WAKEUP_BASE, &__wakeup, __wakeup_size);
|
memcpy((void *)WAKEUP_BASE, &__wakeup, __wakeup_size);
|
||||||
|
|
||||||
|
@ -195,7 +91,7 @@ static void acpi_jump_to_wakeup(void *vector)
|
||||||
|
|
||||||
timestamp_add_now(TS_ACPI_WAKE_JUMP);
|
timestamp_add_now(TS_ACPI_WAKE_JUMP);
|
||||||
|
|
||||||
acpi_do_wakeup((uintptr_t)vector, source, target, size);
|
acpi_do_wakeup((uintptr_t)vector);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __weak mainboard_suspend_resume(void)
|
void __weak mainboard_suspend_resume(void)
|
||||||
|
|
|
@ -934,7 +934,6 @@ unsigned long acpi_create_hest_error_source(acpi_hest_t *hest,
|
||||||
/* For ACPI S3 support. */
|
/* For ACPI S3 support. */
|
||||||
void acpi_fail_wakeup(void);
|
void acpi_fail_wakeup(void);
|
||||||
void acpi_resume(void *wake_vec);
|
void acpi_resume(void *wake_vec);
|
||||||
void acpi_prepare_resume_backup(void);
|
|
||||||
void mainboard_suspend_resume(void);
|
void mainboard_suspend_resume(void);
|
||||||
void *acpi_find_wakeup_vector(void);
|
void *acpi_find_wakeup_vector(void);
|
||||||
|
|
||||||
|
|
|
@ -64,14 +64,6 @@ __wakeup:
|
||||||
shr $4, %eax
|
shr $4, %eax
|
||||||
movw %ax, (__wakeup_segment)
|
movw %ax, (__wakeup_segment)
|
||||||
|
|
||||||
/* Then overwrite coreboot with our backed up memory */
|
|
||||||
cld
|
|
||||||
movl 8(%esp), %esi
|
|
||||||
movl 12(%esp), %edi
|
|
||||||
movl 16(%esp), %ecx
|
|
||||||
shrl $2, %ecx
|
|
||||||
rep movsl
|
|
||||||
|
|
||||||
/* Activate the right segment descriptor real mode. */
|
/* Activate the right segment descriptor real mode. */
|
||||||
ljmp $0x28, $RELOCATED(1f)
|
ljmp $0x28, $RELOCATED(1f)
|
||||||
1:
|
1:
|
||||||
|
|
|
@ -187,8 +187,6 @@ static boot_state_t bs_os_resume_check(void *arg)
|
||||||
boot_states[BS_OS_RESUME].arg = wake_vector;
|
boot_states[BS_OS_RESUME].arg = wake_vector;
|
||||||
return BS_OS_RESUME;
|
return BS_OS_RESUME;
|
||||||
}
|
}
|
||||||
|
|
||||||
acpi_prepare_resume_backup();
|
|
||||||
#endif
|
#endif
|
||||||
timestamp_add_now(TS_CBMEM_POST);
|
timestamp_add_now(TS_CBMEM_POST);
|
||||||
|
|
||||||
|
|
|
@ -107,18 +107,6 @@ static int load_relocatable_ramstage(struct prog *ramstage)
|
||||||
return rmodule_stage_load(&rmod_ram);
|
return rmodule_stage_load(&rmod_ram);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int load_nonrelocatable_ramstage(struct prog *ramstage)
|
|
||||||
{
|
|
||||||
if (CONFIG(HAVE_ACPI_RESUME)) {
|
|
||||||
uintptr_t base = 0;
|
|
||||||
size_t size = cbfs_prog_stage_section(ramstage, &base);
|
|
||||||
if (size)
|
|
||||||
backup_ramstage_section(base, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return cbfs_prog_stage_load(ramstage);
|
|
||||||
}
|
|
||||||
|
|
||||||
void run_ramstage(void)
|
void run_ramstage(void)
|
||||||
{
|
{
|
||||||
struct prog ramstage =
|
struct prog ramstage =
|
||||||
|
@ -147,7 +135,7 @@ void run_ramstage(void)
|
||||||
if (CONFIG(RELOCATABLE_RAMSTAGE)) {
|
if (CONFIG(RELOCATABLE_RAMSTAGE)) {
|
||||||
if (load_relocatable_ramstage(&ramstage))
|
if (load_relocatable_ramstage(&ramstage))
|
||||||
goto fail;
|
goto fail;
|
||||||
} else if (load_nonrelocatable_ramstage(&ramstage))
|
} else if (cbfs_prog_stage_load(&ramstage))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
stage_cache_add(STAGE_RAMSTAGE, &ramstage);
|
stage_cache_add(STAGE_RAMSTAGE, &ramstage);
|
||||||
|
|
Loading…
Reference in New Issue