2020-04-02 23:48:34 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2019-03-21 09:55:49 +01:00
|
|
|
|
2021-07-12 21:49:59 +02:00
|
|
|
#include <assert.h>
|
2013-05-06 19:20:52 +02:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <bootstate.h>
|
|
|
|
#include <console/console.h>
|
2021-10-08 21:10:38 +02:00
|
|
|
#include <smp/node.h>
|
2013-05-06 19:20:52 +02:00
|
|
|
#include <thread.h>
|
2019-03-21 09:55:49 +01:00
|
|
|
#include <timer.h>
|
2013-05-06 19:20:52 +02:00
|
|
|
|
2021-09-24 22:00:56 +02:00
|
|
|
static u8 thread_stacks[CONFIG_STACK_SIZE * CONFIG_NUM_THREADS] __aligned(sizeof(uint64_t));
|
2021-07-14 19:44:51 +02:00
|
|
|
static bool initialized;
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
static void idle_thread_init(void);
|
|
|
|
|
|
|
|
/* There needs to be at least one thread to run the ramstate state machine. */
|
|
|
|
#define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
|
|
|
|
|
|
|
|
/* Storage space for the thread structs .*/
|
|
|
|
static struct thread all_threads[TOTAL_NUM_THREADS];
|
|
|
|
|
|
|
|
/* All runnable (but not running) and free threads are kept on their
|
|
|
|
* respective lists. */
|
|
|
|
static struct thread *runnable_threads;
|
|
|
|
static struct thread *free_threads;
|
|
|
|
|
2021-10-08 21:10:38 +02:00
|
|
|
static struct thread *active_thread;
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
static inline int thread_can_yield(const struct thread *t)
|
|
|
|
{
|
2021-07-15 21:52:03 +02:00
|
|
|
return (t != NULL && t->can_yield > 0);
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
|
|
|
|
2021-10-08 21:10:38 +02:00
|
|
|
static inline void set_current_thread(struct thread *t)
|
|
|
|
{
|
|
|
|
assert(boot_cpu());
|
|
|
|
active_thread = t;
|
|
|
|
}
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
static inline struct thread *current_thread(void)
|
|
|
|
{
|
2021-10-08 21:10:38 +02:00
|
|
|
if (!initialized || !boot_cpu())
|
2021-07-14 19:44:51 +02:00
|
|
|
return NULL;
|
|
|
|
|
2021-10-08 21:10:38 +02:00
|
|
|
return active_thread;
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int thread_list_empty(struct thread **list)
|
|
|
|
{
|
|
|
|
return *list == NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct thread *pop_thread(struct thread **list)
|
|
|
|
{
|
|
|
|
struct thread *t;
|
|
|
|
|
|
|
|
t = *list;
|
|
|
|
*list = t->next;
|
|
|
|
t->next = NULL;
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void push_thread(struct thread **list, struct thread *t)
|
|
|
|
{
|
|
|
|
t->next = *list;
|
|
|
|
*list = t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void push_runnable(struct thread *t)
|
|
|
|
{
|
|
|
|
push_thread(&runnable_threads, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct thread *pop_runnable(void)
|
|
|
|
{
|
|
|
|
return pop_thread(&runnable_threads);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct thread *get_free_thread(void)
|
|
|
|
{
|
|
|
|
struct thread *t;
|
|
|
|
|
|
|
|
if (thread_list_empty(&free_threads))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
t = pop_thread(&free_threads);
|
|
|
|
|
|
|
|
/* Reset the current stack value to the original. */
|
2021-09-24 22:00:56 +02:00
|
|
|
if (!t->stack_orig)
|
|
|
|
die("%s: Invalid stack value\n", __func__);
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
t->stack_current = t->stack_orig;
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_thread(struct thread *t)
|
|
|
|
{
|
|
|
|
push_thread(&free_threads, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The idle thread is ran whenever there isn't anything else that is runnable.
|
|
|
|
* It's sole responsibility is to ensure progress is made by running the timer
|
|
|
|
* callbacks. */
|
2021-07-12 21:43:48 +02:00
|
|
|
__noreturn static enum cb_err idle_thread(void *unused)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
/* This thread never voluntarily yields. */
|
2021-07-16 01:34:05 +02:00
|
|
|
thread_coop_disable();
|
2017-03-09 02:37:06 +01:00
|
|
|
while (1)
|
2013-05-06 19:20:52 +02:00
|
|
|
timers_run();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void schedule(struct thread *t)
|
|
|
|
{
|
|
|
|
struct thread *current = current_thread();
|
|
|
|
|
|
|
|
/* If t is NULL need to find new runnable thread. */
|
|
|
|
if (t == NULL) {
|
|
|
|
if (thread_list_empty(&runnable_threads))
|
|
|
|
die("Runnable thread list is empty!\n");
|
|
|
|
t = pop_runnable();
|
|
|
|
} else {
|
|
|
|
/* current is still runnable. */
|
|
|
|
push_runnable(current);
|
|
|
|
}
|
2021-07-12 21:43:48 +02:00
|
|
|
|
|
|
|
if (t->handle)
|
|
|
|
t->handle->state = THREAD_STARTED;
|
|
|
|
|
2021-10-08 21:10:38 +02:00
|
|
|
set_current_thread(t);
|
2021-09-13 22:24:55 +02:00
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
switch_to_thread(t->stack_current, ¤t->stack_current);
|
|
|
|
}
|
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
static void terminate_thread(struct thread *t, enum cb_err error)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
2021-07-12 21:43:48 +02:00
|
|
|
if (t->handle) {
|
|
|
|
t->handle->error = error;
|
|
|
|
t->handle->state = THREAD_DONE;
|
|
|
|
}
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
free_thread(t);
|
|
|
|
schedule(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void asmlinkage call_wrapper(void *unused)
|
|
|
|
{
|
|
|
|
struct thread *current = current_thread();
|
2021-07-12 21:43:48 +02:00
|
|
|
enum cb_err error;
|
|
|
|
|
|
|
|
error = current->entry(current->entry_arg);
|
2013-05-06 19:20:52 +02:00
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
terminate_thread(current, error);
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
struct block_boot_state {
|
|
|
|
boot_state_t state;
|
|
|
|
boot_state_sequence_t seq;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Block the provided state until thread is complete. */
|
|
|
|
static void asmlinkage call_wrapper_block_state(void *arg)
|
|
|
|
{
|
|
|
|
struct block_boot_state *bbs = arg;
|
|
|
|
struct thread *current = current_thread();
|
2021-07-12 21:43:48 +02:00
|
|
|
enum cb_err error;
|
2013-05-06 19:20:52 +02:00
|
|
|
|
|
|
|
boot_state_block(bbs->state, bbs->seq);
|
2021-07-12 21:43:48 +02:00
|
|
|
error = current->entry(current->entry_arg);
|
2013-05-06 19:20:52 +02:00
|
|
|
boot_state_unblock(bbs->state, bbs->seq);
|
2021-07-12 21:43:48 +02:00
|
|
|
terminate_thread(current, error);
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare a thread so that it starts by executing thread_entry(thread_arg).
|
|
|
|
* Within thread_entry() it will call func(arg). */
|
2021-07-12 21:43:48 +02:00
|
|
|
static void prepare_thread(struct thread *t, struct thread_handle *handle,
|
|
|
|
enum cb_err (*func)(void *), void *arg,
|
|
|
|
asmlinkage void (*thread_entry)(void *), void *thread_arg)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
/* Stash the function and argument to run. */
|
|
|
|
t->entry = func;
|
|
|
|
t->entry_arg = arg;
|
|
|
|
|
|
|
|
/* All new threads can yield by default. */
|
|
|
|
t->can_yield = 1;
|
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
/* Pointer used to publish the state of thread */
|
|
|
|
t->handle = handle;
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
arch_prepare_thread(t, thread_entry, thread_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void thread_resume_from_timeout(struct timeout_callback *tocb)
|
|
|
|
{
|
|
|
|
struct thread *to;
|
|
|
|
|
|
|
|
to = tocb->priv;
|
|
|
|
schedule(to);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void idle_thread_init(void)
|
|
|
|
{
|
|
|
|
struct thread *t;
|
|
|
|
|
|
|
|
t = get_free_thread();
|
|
|
|
|
2017-03-09 02:37:06 +01:00
|
|
|
if (t == NULL)
|
2013-05-06 19:20:52 +02:00
|
|
|
die("No threads available for idle thread!\n");
|
|
|
|
|
|
|
|
/* Queue idle thread to run once all other threads have yielded. */
|
2021-07-12 21:43:48 +02:00
|
|
|
prepare_thread(t, NULL, idle_thread, NULL, call_wrapper, NULL);
|
2013-05-06 19:20:52 +02:00
|
|
|
push_runnable(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Don't inline this function so the timeout_callback won't have its storage
|
|
|
|
* space on the stack cleaned up before the call to schedule(). */
|
|
|
|
static int __attribute__((noinline))
|
2017-03-11 00:23:24 +01:00
|
|
|
thread_yield_timed_callback(struct timeout_callback *tocb,
|
|
|
|
unsigned int microsecs)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
tocb->priv = current_thread();
|
|
|
|
tocb->callback = thread_resume_from_timeout;
|
|
|
|
|
|
|
|
if (timer_sched_callback(tocb, microsecs))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* The timer callback will wake up the current thread. */
|
|
|
|
schedule(NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *thread_alloc_space(struct thread *t, size_t bytes)
|
|
|
|
{
|
|
|
|
/* Allocate the amount of space on the stack keeping the stack
|
|
|
|
* aligned to the pointer size. */
|
|
|
|
t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
|
|
|
|
|
|
|
|
return (void *)t->stack_current;
|
|
|
|
}
|
|
|
|
|
2021-07-22 19:16:19 +02:00
|
|
|
static void threads_initialize(void)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct thread *t;
|
2013-08-22 01:03:32 +02:00
|
|
|
u8 *stack_top;
|
2013-05-06 19:20:52 +02:00
|
|
|
|
2021-07-22 19:16:19 +02:00
|
|
|
if (initialized)
|
|
|
|
return;
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
t = &all_threads[0];
|
2021-10-08 21:10:38 +02:00
|
|
|
|
|
|
|
set_current_thread(t);
|
|
|
|
|
2021-09-24 22:00:56 +02:00
|
|
|
t->stack_orig = (uintptr_t)NULL; /* We never free the main thread */
|
2013-05-06 19:20:52 +02:00
|
|
|
t->id = 0;
|
2021-07-16 01:28:13 +02:00
|
|
|
t->can_yield = 1;
|
2013-05-06 19:20:52 +02:00
|
|
|
|
2021-09-13 22:24:55 +02:00
|
|
|
stack_top = &thread_stacks[CONFIG_STACK_SIZE];
|
2013-05-06 19:20:52 +02:00
|
|
|
for (i = 1; i < TOTAL_NUM_THREADS; i++) {
|
|
|
|
t = &all_threads[i];
|
|
|
|
t->stack_orig = (uintptr_t)stack_top;
|
|
|
|
t->id = i;
|
|
|
|
stack_top += CONFIG_STACK_SIZE;
|
|
|
|
free_thread(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
idle_thread_init();
|
2021-07-16 01:28:13 +02:00
|
|
|
|
|
|
|
initialized = 1;
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
struct thread *current;
|
|
|
|
struct thread *t;
|
|
|
|
|
2021-07-22 19:16:19 +02:00
|
|
|
/* Lazy initialization */
|
|
|
|
threads_initialize();
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
current = current_thread();
|
|
|
|
|
|
|
|
if (!thread_can_yield(current)) {
|
2022-01-22 02:06:20 +01:00
|
|
|
printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__);
|
2013-05-06 19:20:52 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = get_free_thread();
|
|
|
|
|
|
|
|
if (t == NULL) {
|
2022-01-22 02:06:20 +01:00
|
|
|
printk(BIOS_ERR, "%s: No more threads!\n", __func__);
|
2013-05-06 19:20:52 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-07-15 21:20:58 +02:00
|
|
|
prepare_thread(t, handle, func, arg, call_wrapper, NULL);
|
2013-05-06 19:20:52 +02:00
|
|
|
schedule(t);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg,
|
2017-03-10 01:21:34 +01:00
|
|
|
boot_state_t state, boot_state_sequence_t seq)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
struct thread *current;
|
|
|
|
struct thread *t;
|
|
|
|
struct block_boot_state *bbs;
|
|
|
|
|
2021-07-22 20:40:26 +02:00
|
|
|
/* This is a ramstage specific API */
|
|
|
|
if (!ENV_RAMSTAGE)
|
|
|
|
dead_code();
|
|
|
|
|
2021-07-22 19:16:19 +02:00
|
|
|
/* Lazy initialization */
|
|
|
|
threads_initialize();
|
|
|
|
|
2013-05-06 19:20:52 +02:00
|
|
|
current = current_thread();
|
|
|
|
|
|
|
|
if (!thread_can_yield(current)) {
|
2022-01-22 02:06:20 +01:00
|
|
|
printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__);
|
2013-05-06 19:20:52 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = get_free_thread();
|
|
|
|
|
|
|
|
if (t == NULL) {
|
2022-01-22 02:06:20 +01:00
|
|
|
printk(BIOS_ERR, "%s: No more threads!\n", __func__);
|
2013-05-06 19:20:52 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
bbs = thread_alloc_space(t, sizeof(*bbs));
|
|
|
|
bbs->state = state;
|
|
|
|
bbs->seq = seq;
|
2021-07-12 21:43:48 +02:00
|
|
|
prepare_thread(t, handle, func, arg, call_wrapper_block_state, bbs);
|
2013-05-06 19:20:52 +02:00
|
|
|
schedule(t);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-15 19:48:48 +02:00
|
|
|
int thread_yield(void)
|
|
|
|
{
|
|
|
|
return thread_yield_microseconds(0);
|
|
|
|
}
|
|
|
|
|
2017-03-09 01:34:12 +01:00
|
|
|
int thread_yield_microseconds(unsigned int microsecs)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
struct thread *current;
|
|
|
|
struct timeout_callback tocb;
|
|
|
|
|
|
|
|
current = current_thread();
|
|
|
|
|
|
|
|
if (!thread_can_yield(current))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (thread_yield_timed_callback(&tocb, microsecs))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-16 01:34:05 +02:00
|
|
|
void thread_coop_enable(void)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
struct thread *current;
|
|
|
|
|
|
|
|
current = current_thread();
|
|
|
|
|
2021-07-15 21:52:03 +02:00
|
|
|
if (current == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(current->can_yield <= 0);
|
|
|
|
|
|
|
|
current->can_yield++;
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
|
|
|
|
2021-07-16 01:34:05 +02:00
|
|
|
void thread_coop_disable(void)
|
2013-05-06 19:20:52 +02:00
|
|
|
{
|
|
|
|
struct thread *current;
|
|
|
|
|
|
|
|
current = current_thread();
|
|
|
|
|
2021-07-15 21:52:03 +02:00
|
|
|
if (current == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
current->can_yield--;
|
2013-05-06 19:20:52 +02:00
|
|
|
}
|
2021-07-12 21:49:59 +02:00
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
enum cb_err thread_join(struct thread_handle *handle)
|
|
|
|
{
|
|
|
|
struct stopwatch sw;
|
|
|
|
struct thread *current = current_thread();
|
|
|
|
|
|
|
|
assert(handle);
|
|
|
|
assert(current);
|
|
|
|
assert(current->handle != handle);
|
|
|
|
|
|
|
|
if (handle->state == THREAD_UNINITIALIZED)
|
|
|
|
return CB_ERR_ARG;
|
|
|
|
|
|
|
|
printk(BIOS_SPEW, "waiting for thread\n");
|
|
|
|
|
2021-11-04 22:57:00 +01:00
|
|
|
stopwatch_init(&sw);
|
|
|
|
|
2021-07-12 21:43:48 +02:00
|
|
|
while (handle->state != THREAD_DONE)
|
|
|
|
assert(thread_yield() == 0);
|
|
|
|
|
|
|
|
printk(BIOS_SPEW, "took %lu us\n", stopwatch_duration_usecs(&sw));
|
|
|
|
|
|
|
|
return handle->error;
|
|
|
|
}
|
|
|
|
|
2021-07-12 21:49:59 +02:00
|
|
|
void thread_mutex_lock(struct thread_mutex *mutex)
|
|
|
|
{
|
|
|
|
struct stopwatch sw;
|
|
|
|
|
|
|
|
stopwatch_init(&sw);
|
|
|
|
|
|
|
|
while (mutex->locked)
|
|
|
|
assert(thread_yield() == 0);
|
|
|
|
mutex->locked = true;
|
|
|
|
|
|
|
|
printk(BIOS_SPEW, "took %lu us to acquire mutex\n", stopwatch_duration_usecs(&sw));
|
|
|
|
}
|
|
|
|
|
|
|
|
void thread_mutex_unlock(struct thread_mutex *mutex)
|
|
|
|
{
|
|
|
|
assert(mutex->locked);
|
|
|
|
mutex->locked = 0;
|
|
|
|
}
|