os-k/kaleid/include/kernel/kernlocks.h

179 lines
3.9 KiB
C
Raw Normal View History

2019-01-14 14:31:49 +01:00
//----------------------------------------------------------------------------//
// GNU GPL OS/K //
// //
// Authors: spectral` //
// NeoX //
// //
// Desc: Spinlocks and mutexes //
//----------------------------------------------------------------------------//
//------------------------------------------//
// Dependencies //
//------------------------------------------//
#ifdef _KALEID_KERNEL
#ifndef _KALKERN_BASE_H
#include "kernbase.h"
#endif
#else
#ifndef _KALEID_H
#include <kaleid.h>
#endif
#endif
//------------------------------------------//
// Start of header //
//------------------------------------------//
#ifndef _KALKERN_LOCKS_H
#define _KALKERN_LOCKS_H
//------------------------------------------//
// Types //
//------------------------------------------//
typedef enum eLockType_t {
//
// Mutex-type lock
//
// WARNING
// AquireLock() panics when used on a mutex while not running a process
//
KLOCK_MUTEX,
//
// Spinlock-type lock
//
KLOCK_SPINLOCK,
} LockType_t;
//
// "volatile" may not be actually needed
//
typedef struct sLock_t {
unsigned int initDone; // initialized?
int locked; // is locked?
LockType_t type; // lock type?
#ifdef _KALEID_KERNEL
Process_t *ownerProc; // unused
Process_t *waitingProc; // unused
#endif
} volatile Lock_t;
//------------------------------------------//
// Functions //
//------------------------------------------//
//
2019-01-19 22:36:38 +01:00
// Linux syscall vs unimplemented syscall...
2019-01-14 14:31:49 +01:00
//
#ifndef _KALEID_KERNEL
2019-01-19 22:36:38 +01:00
#ifdef _OSK_SOURCE
int KalYieldCPU(void),
#else
2019-01-14 14:31:49 +01:00
int sched_yield(void);
#endif
2019-01-19 22:36:38 +01:00
#endif
2019-01-14 14:31:49 +01:00
//
// Initialize a lock
//
static inline
void InitLock(Lock_t *lock, LockType_t type)
{
lock->type = type;
lock->locked = FALSE;
lock->initDone = INITOK;
#ifdef _KALEID_KERNEL
lock->ownerProc = NULL;
lock->waitingProc = NULL;
#endif
}
//
// Alternative way to initalize a lock
//
#ifdef _KALEID_KERNEL
# define INITLOCK(type) { INITOK, FALSE, (type), NULL, NULL }
#else
# define INITLOCK(type) { INITOK, FALSE, (type) }
#endif
//
// Destroy a lock
//
static inline
void DestroyLock(Lock_t *lock)
{
KalAssert(lock->initDone);
__sync_synchronize();
lock->initDone = 0;
}
//
// Aquire the lock
// Panic on double aquisition since that should never happen
// until we have at least a basic scheduler
//
static inline
void AquireLock(Lock_t *lock)
{
KalAssert(lock->initDone == INITOK);
while (!__sync_bool_compare_and_swap(&lock->locked, 0, 1)) {
#ifdef _KALEID_KERNEL
StartPanic("AquireLock on an already locked object");
#else
if likely (lock->type == KLOCK_SPINLOCK) continue;
2019-01-19 22:36:38 +01:00
#ifdef _OSK_SOURCE
else KalYieldCPU();
#else
2019-01-14 14:31:49 +01:00
else sched_yield();
2019-01-19 22:36:38 +01:00
#endif
2019-01-14 14:31:49 +01:00
#endif
}
__sync_synchronize();
}
//
// Release an already aquired lock
// Panic if the lock was never aquired
//
static inline
void ReleaseLock(Lock_t *lock)
{
#ifdef _KALEID_KERNEL
KalAssert(lock->ownerProc == GetCurProc());
#endif
__sync_synchronize();
lock->locked = 0;
}
//
// Tries to aquire lock
//
static inline
bool AttemptLock(Lock_t *lock)
{
KalAssert(lock->initDone == INITOK);
bool retval = __sync_bool_compare_and_swap(&lock->locked, 0, 1);
__sync_synchronize();
return retval;
}
//------------------------------------------//
// End of header //
//------------------------------------------//
#endif