cpu/x86: Add support to run function with argument over APs

This patch ensures that user can pass a function with given argument
list to execute over APs.

BUG=b:74436746
BRANCH=none
TEST=Able to run functions over APs with argument.

Change-Id: I668b36752f6b21cb99cd1416c385d53e96117213
Signed-off-by: Subrata Banik <subrata.banik@intel.com>
Reviewed-on: https://review.coreboot.org/25725
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Nico Huber <nico.h@gmx.de>
This commit is contained in:
Subrata Banik 2018-04-24 13:45:30 +05:30 committed by Patrick Georgi
parent 223fb436fe
commit 3337497d2a
14 changed files with 35 additions and 28 deletions

View File

@ -41,7 +41,8 @@
#define MAX_APIC_IDS 256 #define MAX_APIC_IDS 256
struct mp_callback { struct mp_callback {
void (*func)(void); void (*func)(void *);
void *arg;
}; };
/* /*
@ -191,7 +192,7 @@ static void ap_do_flight_plan(void)
} }
} }
static void park_this_cpu(void) static void park_this_cpu(void *unused)
{ {
stop_this_cpu(); stop_this_cpu();
} }
@ -222,7 +223,7 @@ static void asmlinkage ap_init(unsigned int cpu)
ap_do_flight_plan(); ap_do_flight_plan();
/* Park the AP. */ /* Park the AP. */
park_this_cpu(); park_this_cpu(NULL);
} }
static void setup_default_sipi_vector_params(struct sipi_params *sp) static void setup_default_sipi_vector_params(struct sipi_params *sp)
@ -941,21 +942,22 @@ static void ap_wait_for_instruction(void)
memcpy(&lcb, cb, sizeof(lcb)); memcpy(&lcb, cb, sizeof(lcb));
mfence(); mfence();
store_callback(per_cpu_slot, NULL); store_callback(per_cpu_slot, NULL);
lcb.func(); lcb.func(lcb.arg);
} }
} }
int mp_run_on_aps(void (*func)(void), long expire_us) int mp_run_on_aps(void (*func)(void *), void *arg, long expire_us)
{ {
struct mp_callback lcb = { .func = func }; struct mp_callback lcb = { .func = func, .arg = arg };
return run_ap_work(&lcb, expire_us); return run_ap_work(&lcb, expire_us);
} }
int mp_run_on_all_cpus(void (*func)(void), long expire_us) int mp_run_on_all_cpus(void (*func)(void *), void *arg, long expire_us)
{ {
/* Run on BSP first. */ /* Run on BSP first. */
func(); func(arg);
return mp_run_on_aps(func, expire_us);
return mp_run_on_aps(func, arg, expire_us);
} }
int mp_park_aps(void) int mp_park_aps(void)
@ -966,7 +968,7 @@ int mp_park_aps(void)
stopwatch_init(&sw); stopwatch_init(&sw);
ret = mp_run_on_aps(park_this_cpu, 250 * USECS_PER_MSEC); ret = mp_run_on_aps(park_this_cpu, NULL, 250 * USECS_PER_MSEC);
duration_msecs = stopwatch_duration_msecs(&sw); duration_msecs = stopwatch_duration_msecs(&sw);

View File

@ -126,10 +126,10 @@ int mp_init_with_smm(struct bus *cpu_bus, const struct mp_ops *mp_ops);
* *
* All functions return < 0 on error, 0 on success. * All functions return < 0 on error, 0 on success.
*/ */
int mp_run_on_aps(void (*func)(void), long expire_us); int mp_run_on_aps(void (*func)(void *), void *arg, long expire_us);
/* Like mp_run_on_aps() but also runs func on BSP. */ /* Like mp_run_on_aps() but also runs func on BSP. */
int mp_run_on_all_cpus(void (*func)(void), long expire_us); int mp_run_on_all_cpus(void (*func)(void *), void *arg, long expire_us);
/* /*
* Park all APs to prepare for OS boot. This is handled automatically * Park all APs to prepare for OS boot. This is handled automatically

View File

@ -192,7 +192,7 @@ static struct agesa_data {
* BSP deploys APs to callout_ap_entry(), which calls * BSP deploys APs to callout_ap_entry(), which calls
* agesawrapper_amdlaterunaptask with the agesadata. * agesawrapper_amdlaterunaptask with the agesadata.
*/ */
static void callout_ap_entry(void) static void callout_ap_entry(void *unused)
{ {
AGESA_STATUS Status = AGESA_UNSUPPORTED; AGESA_STATUS Status = AGESA_UNSUPPORTED;
@ -219,7 +219,7 @@ AGESA_STATUS agesa_RunFuncOnAp(UINT32 Func, UINTN Data, VOID *ConfigPtr)
agesadata.Func = Func; agesadata.Func = Func;
agesadata.Data = Data; agesadata.Data = Data;
agesadata.ConfigPtr = ConfigPtr; agesadata.ConfigPtr = ConfigPtr;
mp_run_on_aps(callout_ap_entry, 100 * USECS_PER_MSEC); mp_run_on_aps(callout_ap_entry, NULL, 100 * USECS_PER_MSEC);
return AGESA_SUCCESS; return AGESA_SUCCESS;
} }
@ -231,7 +231,7 @@ AGESA_STATUS agesa_RunFcnOnAllAps(UINT32 Func, UINTN Data, VOID *ConfigPtr)
agesadata.Func = Func; agesadata.Func = Func;
agesadata.Data = Data; agesadata.Data = Data;
agesadata.ConfigPtr = ConfigPtr; agesadata.ConfigPtr = ConfigPtr;
mp_run_on_aps(callout_ap_entry, 100 * USECS_PER_MSEC); mp_run_on_aps(callout_ap_entry, NULL, 100 * USECS_PER_MSEC);
return AGESA_SUCCESS; return AGESA_SUCCESS;
} }

View File

@ -20,7 +20,7 @@
#include <timer.h> #include <timer.h>
#include <console/console.h> #include <console/console.h>
static void per_core_finalize(void) static void per_core_finalize(void *unused)
{ {
msr_t hwcr, mask; msr_t hwcr, mask;
@ -44,7 +44,7 @@ static void finalize_cores(void)
int r; int r;
printk(BIOS_SPEW, "Lock SMM configuration\n"); printk(BIOS_SPEW, "Lock SMM configuration\n");
r = mp_run_on_all_cpus(per_core_finalize, 10 * USECS_PER_MSEC); r = mp_run_on_all_cpus(per_core_finalize, NULL, 10 * USECS_PER_MSEC);
if (r) if (r)
printk(BIOS_WARNING, "Failed to finalize all cores\n"); printk(BIOS_WARNING, "Failed to finalize all cores\n");
} }

View File

@ -607,7 +607,7 @@ struct chip_operations soc_intel_apollolake_ops = {
static void drop_privilege_all(void) static void drop_privilege_all(void)
{ {
/* Drop privilege level on all the CPUs */ /* Drop privilege level on all the CPUs */
if (mp_run_on_all_cpus(&cpu_enable_untrusted_mode, 1000) < 0) if (mp_run_on_all_cpus(&cpu_enable_untrusted_mode, NULL, 1000) < 0)
printk(BIOS_ERR, "failed to enable untrusted mode\n"); printk(BIOS_ERR, "failed to enable untrusted mode\n");
} }

View File

@ -243,7 +243,7 @@ static void post_mp_init(void)
smm_southbridge_enable(PWRBTN_EN | GBL_EN); smm_southbridge_enable(PWRBTN_EN | GBL_EN);
if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_SGX)) if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_SGX))
mp_run_on_all_cpus(sgx_configure, 2000); mp_run_on_all_cpus(sgx_configure, NULL, 2000);
} }
static const struct mp_ops mp_ops = { static const struct mp_ops mp_ops = {

View File

@ -219,7 +219,7 @@ void cpu_disable_eist(void)
* Set Bit 6 (ENABLE_IA_UNTRUSTED_MODE) of MSR 0x120 * Set Bit 6 (ENABLE_IA_UNTRUSTED_MODE) of MSR 0x120
* UCODE_PCR_POWER_MISC MSR to enter IA Untrusted Mode. * UCODE_PCR_POWER_MISC MSR to enter IA Untrusted Mode.
*/ */
void cpu_enable_untrusted_mode(void) void cpu_enable_untrusted_mode(void *unused)
{ {
msr_t msr; msr_t msr;

View File

@ -130,10 +130,15 @@ static void init_cpus(void *unused)
soc_init_cpus(dev->link_list); soc_init_cpus(dev->link_list);
} }
static void wrapper_x86_setup_mtrrs(void *unused)
{
x86_setup_mtrrs_with_detect();
}
/* Ensure to re-program all MTRRs based on DRAM resource settings */ /* Ensure to re-program all MTRRs based on DRAM resource settings */
static void post_cpus_init(void *unused) static void post_cpus_init(void *unused)
{ {
if (mp_run_on_all_cpus(&x86_setup_mtrrs_with_detect, 1000) < 0) if (mp_run_on_all_cpus(&wrapper_x86_setup_mtrrs, NULL, 1000) < 0)
printk(BIOS_ERR, "MTRR programming failure\n"); printk(BIOS_ERR, "MTRR programming failure\n");
x86_mtrr_check(); x86_mtrr_check();

View File

@ -112,7 +112,7 @@ void cpu_disable_eist(void);
* Set Bit 6 (ENABLE_IA_UNTRUSTED_MODE) of MSR 0x120 * Set Bit 6 (ENABLE_IA_UNTRUSTED_MODE) of MSR 0x120
* UCODE_PCR_POWER_MISC MSR to enter IA Untrusted Mode. * UCODE_PCR_POWER_MISC MSR to enter IA Untrusted Mode.
*/ */
void cpu_enable_untrusted_mode(void); void cpu_enable_untrusted_mode(void *unused);
/* /*
* This function fills in the number of Cores(physical) and Threads(virtual) * This function fills in the number of Cores(physical) and Threads(virtual)

View File

@ -38,7 +38,7 @@ void prmrr_core_configure(void);
/* /*
* Configure SGX. * Configure SGX.
*/ */
void sgx_configure(void); void sgx_configure(void *unused);
/* SOC specific API to get SGX params. /* SOC specific API to get SGX params.
* returns 0, if able to get SGX params; otherwise returns -1 */ * returns 0, if able to get SGX params; otherwise returns -1 */

View File

@ -21,7 +21,7 @@ struct vmx_param {
/* /*
* Configure VMX. * Configure VMX.
*/ */
void vmx_configure(void); void vmx_configure(void *unused);
/* SOC specific API to get VMX params. /* SOC specific API to get VMX params.
* returns 0, if able to get VMX params; otherwise returns -1 */ * returns 0, if able to get VMX params; otherwise returns -1 */

View File

@ -201,7 +201,7 @@ static int is_prmrr_approved(void)
return 0; return 0;
} }
void sgx_configure(void) void sgx_configure(void *unused)
{ {
const void *microcode_patch = intel_mp_current_microcode(); const void *microcode_patch = intel_mp_current_microcode();

View File

@ -43,7 +43,7 @@ static int soc_vmx_enabled(void)
return vmx_param ? vmx_param->enable : 0; return vmx_param ? vmx_param->enable : 0;
} }
void vmx_configure(void) void vmx_configure(void *unused)
{ {
msr_t msr; msr_t msr;
struct cpuid_result regs; struct cpuid_result regs;

View File

@ -477,9 +477,9 @@ static void post_mp_init(void)
smm_lock(); smm_lock();
#endif #endif
mp_run_on_all_cpus(vmx_configure, 2000); mp_run_on_all_cpus(vmx_configure, NULL, 2000);
mp_run_on_all_cpus(sgx_configure, 2000); mp_run_on_all_cpus(sgx_configure, NULL, 2000);
} }
static const struct mp_ops mp_ops = { static const struct mp_ops mp_ops = {