lib/thread: Switch to using CPU_INFO_V2
CPU_INFO_V2 changes the behavior of cpu_info(). There is now only 1 cpu_info struct per cpu. This means that we no longer need to allocate it at the top of each threads stack. We can now in theory remove the CONFIG_STACK_SIZE alignment on the thread stack sizes. We can also in theory use threads in SMM if you are feeling venturesome. BUG=b:194391185, b:179699789 TEST=Perform reboot stress test on guybrush with COOP_MULTITASKING enabled. Signed-off-by: Raul E Rangel <rrangel@chromium.org> Change-Id: I5e04d254a00db43714ec60ebed7c4aa90e23190a Reviewed-on: https://review.coreboot.org/c/coreboot/+/57628 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Eric Peers <epeers@google.com> Reviewed-by: Karthik Ramasubramanian <kramasub@google.com>
This commit is contained in:
parent
b2346a56f1
commit
c842c59b3e
|
@ -677,7 +677,7 @@ config TIMER_QUEUE
|
||||||
|
|
||||||
config COOP_MULTITASKING
|
config COOP_MULTITASKING
|
||||||
def_bool n
|
def_bool n
|
||||||
depends on TIMER_QUEUE && ARCH_X86
|
depends on TIMER_QUEUE && ARCH_X86 && CPU_INFO_V2
|
||||||
help
|
help
|
||||||
Cooperative multitasking allows callbacks to be multiplexed on the
|
Cooperative multitasking allows callbacks to be multiplexed on the
|
||||||
main thread of ramstage. With this enabled it allows for multiple
|
main thread of ramstage. With this enabled it allows for multiple
|
||||||
|
|
|
@ -36,28 +36,17 @@ static struct thread all_threads[TOTAL_NUM_THREADS];
|
||||||
static struct thread *runnable_threads;
|
static struct thread *runnable_threads;
|
||||||
static struct thread *free_threads;
|
static struct thread *free_threads;
|
||||||
|
|
||||||
static inline struct cpu_info *thread_cpu_info(const struct thread *t)
|
|
||||||
{
|
|
||||||
return (void *)(t->stack_orig);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int thread_can_yield(const struct thread *t)
|
static inline int thread_can_yield(const struct thread *t)
|
||||||
{
|
{
|
||||||
return (t != NULL && t->can_yield > 0);
|
return (t != NULL && t->can_yield > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Assumes current CPU info can switch. */
|
|
||||||
static inline struct thread *cpu_info_to_thread(const struct cpu_info *ci)
|
|
||||||
{
|
|
||||||
return ci->thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct thread *current_thread(void)
|
static inline struct thread *current_thread(void)
|
||||||
{
|
{
|
||||||
if (!initialized)
|
if (!initialized)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return cpu_info_to_thread(cpu_info());
|
return cpu_info()->thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int thread_list_empty(struct thread **list)
|
static inline int thread_list_empty(struct thread **list)
|
||||||
|
@ -94,21 +83,12 @@ static inline struct thread *pop_runnable(void)
|
||||||
static inline struct thread *get_free_thread(void)
|
static inline struct thread *get_free_thread(void)
|
||||||
{
|
{
|
||||||
struct thread *t;
|
struct thread *t;
|
||||||
struct cpu_info *ci;
|
|
||||||
struct cpu_info *new_ci;
|
|
||||||
|
|
||||||
if (thread_list_empty(&free_threads))
|
if (thread_list_empty(&free_threads))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
t = pop_thread(&free_threads);
|
t = pop_thread(&free_threads);
|
||||||
|
|
||||||
ci = cpu_info();
|
|
||||||
|
|
||||||
/* Initialize the cpu_info structure on the new stack. */
|
|
||||||
new_ci = thread_cpu_info(t);
|
|
||||||
*new_ci = *ci;
|
|
||||||
new_ci->thread = t;
|
|
||||||
|
|
||||||
/* Reset the current stack value to the original. */
|
/* Reset the current stack value to the original. */
|
||||||
t->stack_current = t->stack_orig;
|
t->stack_current = t->stack_orig;
|
||||||
|
|
||||||
|
@ -134,6 +114,7 @@ __noreturn static enum cb_err idle_thread(void *unused)
|
||||||
static void schedule(struct thread *t)
|
static void schedule(struct thread *t)
|
||||||
{
|
{
|
||||||
struct thread *current = current_thread();
|
struct thread *current = current_thread();
|
||||||
|
struct cpu_info *ci = cpu_info();
|
||||||
|
|
||||||
/* If t is NULL need to find new runnable thread. */
|
/* If t is NULL need to find new runnable thread. */
|
||||||
if (t == NULL) {
|
if (t == NULL) {
|
||||||
|
@ -148,6 +129,8 @@ static void schedule(struct thread *t)
|
||||||
if (t->handle)
|
if (t->handle)
|
||||||
t->handle->state = THREAD_STARTED;
|
t->handle->state = THREAD_STARTED;
|
||||||
|
|
||||||
|
ci->thread = t;
|
||||||
|
|
||||||
switch_to_thread(t->stack_current, ¤t->stack_current);
|
switch_to_thread(t->stack_current, ¤t->stack_current);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,7 +262,7 @@ static void threads_initialize(void)
|
||||||
t->id = 0;
|
t->id = 0;
|
||||||
t->can_yield = 1;
|
t->can_yield = 1;
|
||||||
|
|
||||||
stack_top = &thread_stacks[CONFIG_STACK_SIZE] - sizeof(struct cpu_info);
|
stack_top = &thread_stacks[CONFIG_STACK_SIZE];
|
||||||
for (i = 1; i < TOTAL_NUM_THREADS; i++) {
|
for (i = 1; i < TOTAL_NUM_THREADS; i++) {
|
||||||
t = &all_threads[i];
|
t = &all_threads[i];
|
||||||
t->stack_orig = (uintptr_t)stack_top;
|
t->stack_orig = (uintptr_t)stack_top;
|
||||||
|
|
Loading…
Reference in New Issue