os-k/kaleid/include/extras/locks.h

156 lines
3.3 KiB
C
Raw Normal View History

2019-01-21 15:00:04 +01:00
//----------------------------------------------------------------------------//
// GNU GPL OS/K //
// //
// Authors: spectral` //
// NeoX //
// //
// Desc: Spinlocks and mutexes //
//----------------------------------------------------------------------------//
#ifndef _KALBASE_H
#include <kalbase.h>
#endif
#ifdef _KALEID_KERNEL
#ifndef _KALKERN_BASE_H
#include <kernel/base.h>
#endif
#endif
#ifndef _KALEXTRAS_LOCKS_H
#define _KALEXTRAS_LOCKS_H
//------------------------------------------//
typedef enum eLockType_t {
2019-02-06 14:07:38 +01:00
2019-01-21 15:00:04 +01:00
// Mutex-type lock
//
// WARNING
// AquireLock() panics when used on a mutex while not running a process
KLOCK_MUTEX,
// Spinlock-type lock
KLOCK_SPINLOCK,
} LockType_t;
// "volatile" may not be actually needed
typedef struct sLock_t {
unsigned int initDone; // initialized?
LockType_t type; // lock type?
2019-02-06 14:07:38 +01:00
volatile int locked; // is locked?
2019-01-21 15:00:04 +01:00
#ifdef _KALEID_KERNEL
Thread_t *ownerThread; // unused
Thread_t *waitingThread; // unused
#endif
2019-02-06 14:07:38 +01:00
} Lock_t;
2019-01-21 15:00:04 +01:00
//------------------------------------------//
//
// Linux syscall vs unimplemented syscall...
//
#ifndef _KALEID_KERNEL
#ifdef _OSK_SOURCE
int KalYieldCPU(void),
#else
int sched_yield(void);
#endif
#endif
//
// Initialize a lock
//
static inline
void InitLock(Lock_t *lock, LockType_t type)
{
lock->type = type;
lock->locked = FALSE;
lock->initDone = INITOK;
#ifdef _KALEID_KERNEL
lock->ownerThread = NULL;
lock->waitingThread = NULL;
#endif
}
//
// Alternative way to initalize a lock
//
#ifdef _KALEID_KERNEL
# define INITLOCK(type) { INITOK, FALSE, (type), NULL, NULL }
#else
# define INITLOCK(type) { INITOK, FALSE, (type) }
#endif
//
// Destroy a lock
//
static inline
void DestroyLock(Lock_t *lock)
{
KalAssert(lock->initDone);
__sync_synchronize();
lock->initDone = 0;
}
//
// Aquire the lock
// Panic on double aquisition since that should never happen
// until we have at least a basic scheduler
//
static inline
void AquireLock(Lock_t *lock)
{
KalAssert(lock->initDone == INITOK);
while (!__sync_bool_compare_and_swap(&lock->locked, 0, 1)) {
#ifdef _KALEID_KERNEL
StartPanic("AquireLock on an already locked object");
#else
if likely (lock->type == KLOCK_SPINLOCK) continue;
#ifdef _OSK_SOURCE
2019-02-06 14:07:38 +01:00
else (void)KalYieldCPU();
2019-01-21 15:00:04 +01:00
#else
2019-02-06 14:07:38 +01:00
else (void)sched_yield();
2019-01-21 15:00:04 +01:00
#endif
#endif
}
__sync_synchronize();
}
//
// Release an already aquired lock
// Panic if the lock was never aquired
//
static inline
void ReleaseLock(Lock_t *lock)
{
#ifdef _KALEID_KERNEL
KalAssert(lock->ownerThread == GetCurThread());
#endif
__sync_synchronize();
lock->locked = 0;
}
//
// Tries to aquire lock
//
static inline
bool AttemptLock(Lock_t *lock)
{
KalAssert(lock->initDone == INITOK);
bool retval = __sync_bool_compare_and_swap(&lock->locked, 0, 1);
__sync_synchronize();
return retval;
}
//------------------------------------------//
#endif