mirror of
https://github.com/usatiuk/ficus.git
synced 2025-10-29 00:27:52 +01:00
spinlocks
This commit is contained in:
@@ -1,8 +1,9 @@
|
||||
#include "kmem.hpp"
|
||||
|
||||
#include "LockGuard.hpp"
|
||||
#include "Spinlock.hpp"
|
||||
#include "globals.hpp"
|
||||
#include "memman.hpp"
|
||||
#include "mutex.hpp"
|
||||
#include "paging.hpp"
|
||||
#include "serial.hpp"
|
||||
#include "task.hpp"
|
||||
@@ -12,8 +13,7 @@ uintptr_t KERN_HeapEnd;// Past the end
|
||||
|
||||
static bool initialized = false;
|
||||
|
||||
static struct Mutex kmem_lock;
|
||||
static char kmem_lock_tasklist[256];//FIXME:
|
||||
static Spinlock kmem_lock;
|
||||
|
||||
void init_kern_heap() {
|
||||
KERN_HeapBegin = static_cast<HeapEntry *>(get4k());
|
||||
@@ -24,13 +24,10 @@ void init_kern_heap() {
|
||||
map((void *) KERN_HeapVirtBegin, (void *) HHDM_V2P(KERN_HeapBegin), PAGE_RW, KERN_AddressSpace);
|
||||
KERN_HeapBegin = (struct HeapEntry *) KERN_HeapVirtBegin;
|
||||
KERN_HeapEnd = (KERN_HeapVirtBegin + 4096);
|
||||
kmem_lock.waiters = (struct TaskList *) kmem_lock_tasklist;
|
||||
initialized = true;
|
||||
}
|
||||
|
||||
static void extend_heap(size_t n_pages) {
|
||||
assert(kmem_lock.owner == cur_task());
|
||||
|
||||
for (size_t i = 0; i < n_pages; i++) {
|
||||
void *p = get4k();
|
||||
assert2(p != NULL, "Kernel out of memory!");
|
||||
@@ -41,8 +38,6 @@ static void extend_heap(size_t n_pages) {
|
||||
|
||||
// n is required length!
|
||||
struct HeapEntry *split_entry(struct HeapEntry *what, size_t n) {
|
||||
assert(kmem_lock.owner == cur_task());
|
||||
|
||||
assert2(what->len > (n + sizeof(struct HeapEntry)), "Trying to split a heap entry that's too small!");
|
||||
struct HeapEntry *new_entry = (struct HeapEntry *) (((void *) what) + sizeof(struct HeapEntry) + n);
|
||||
|
||||
@@ -62,93 +57,96 @@ struct HeapEntry *split_entry(struct HeapEntry *what, size_t n) {
|
||||
|
||||
void *kmalloc(size_t n) {
|
||||
assert(initialized);
|
||||
m_lock(&kmem_lock);
|
||||
struct HeapEntry *entry = KERN_HeapBegin;
|
||||
assert2(entry->magic == KERN_HeapMagicFree, "Bad heap!");
|
||||
|
||||
struct HeapEntry *res = NULL;
|
||||
struct HeapEntry *prev = NULL;
|
||||
|
||||
do {
|
||||
{
|
||||
LockGuard l(kmem_lock);
|
||||
struct HeapEntry *entry = KERN_HeapBegin;
|
||||
assert2(entry->magic == KERN_HeapMagicFree, "Bad heap!");
|
||||
|
||||
if (prev) {
|
||||
assert(entry->prev == prev);
|
||||
assert(prev->next == entry);
|
||||
assert(entry->prev->next == entry);
|
||||
}
|
||||
struct HeapEntry *prev = NULL;
|
||||
|
||||
do {
|
||||
assert2(entry->magic == KERN_HeapMagicFree, "Bad heap!");
|
||||
|
||||
if (entry->len == n) {
|
||||
res = entry;
|
||||
if (prev) {
|
||||
prev->next = entry->next;
|
||||
if (entry->next)
|
||||
entry->next->prev = prev;
|
||||
} else {
|
||||
if (entry->next) {
|
||||
KERN_HeapBegin = entry->next;
|
||||
entry->next->prev = NULL;
|
||||
assert(entry->prev == prev);
|
||||
assert(prev->next == entry);
|
||||
assert(entry->prev->next == entry);
|
||||
}
|
||||
|
||||
if (entry->len == n) {
|
||||
res = entry;
|
||||
if (prev) {
|
||||
prev->next = entry->next;
|
||||
if (entry->next)
|
||||
entry->next->prev = prev;
|
||||
} else {
|
||||
KERN_HeapBegin = (struct HeapEntry *) KERN_HeapEnd;
|
||||
extend_heap(1);
|
||||
KERN_HeapBegin->next = NULL;
|
||||
KERN_HeapBegin->prev = NULL;
|
||||
KERN_HeapBegin->magic = KERN_HeapMagicFree;
|
||||
KERN_HeapBegin->len = 4096 - (sizeof(struct HeapEntry));
|
||||
if (entry->next) {
|
||||
KERN_HeapBegin = entry->next;
|
||||
entry->next->prev = NULL;
|
||||
} else {
|
||||
KERN_HeapBegin = (struct HeapEntry *) KERN_HeapEnd;
|
||||
extend_heap(1);
|
||||
KERN_HeapBegin->next = NULL;
|
||||
KERN_HeapBegin->prev = NULL;
|
||||
KERN_HeapBegin->magic = KERN_HeapMagicFree;
|
||||
KERN_HeapBegin->len = 4096 - (sizeof(struct HeapEntry));
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (entry->len > n + sizeof(struct HeapEntry)) {
|
||||
res = entry;
|
||||
struct HeapEntry *new_split_entry = split_entry(res, n);
|
||||
if (entry->len > n + sizeof(struct HeapEntry)) {
|
||||
res = entry;
|
||||
struct HeapEntry *new_split_entry = split_entry(res, n);
|
||||
|
||||
if (prev) {
|
||||
prev->next = new_split_entry;
|
||||
new_split_entry->prev = prev;
|
||||
} else {
|
||||
KERN_HeapBegin = new_split_entry;
|
||||
new_split_entry->prev = NULL;
|
||||
if (prev) {
|
||||
prev->next = new_split_entry;
|
||||
new_split_entry->prev = prev;
|
||||
} else {
|
||||
KERN_HeapBegin = new_split_entry;
|
||||
new_split_entry->prev = NULL;
|
||||
}
|
||||
if (new_split_entry->prev)
|
||||
assert(new_split_entry->prev->magic == KERN_HeapMagicFree);
|
||||
break;
|
||||
}
|
||||
|
||||
prev = entry;
|
||||
entry = entry->next;
|
||||
} while (entry);
|
||||
|
||||
if (!res) {
|
||||
entry = prev;
|
||||
|
||||
assert2(entry->magic == KERN_HeapMagicFree, "Expected last tried entry to be free");
|
||||
assert2(entry->next == NULL, "Expected last tried entry to be the last");
|
||||
|
||||
size_t data_needed = n + (2 * sizeof(struct HeapEntry));
|
||||
|
||||
size_t pages_needed = ((data_needed & 0xFFF) == 0)
|
||||
? data_needed >> 12
|
||||
: ((data_needed & (~0xFFF)) + 0x1000) >> 12;
|
||||
|
||||
struct HeapEntry *new_entry = (struct HeapEntry *) KERN_HeapEnd;
|
||||
extend_heap(pages_needed);
|
||||
new_entry->next = NULL;
|
||||
new_entry->prev = entry;
|
||||
new_entry->magic = KERN_HeapMagicFree;
|
||||
new_entry->len = (pages_needed * 4096) - (sizeof(struct HeapEntry));
|
||||
assert2(new_entry->len >= n, "Expected allocated heap entry to fit what we wanted");
|
||||
res = new_entry;
|
||||
if (new_entry->len > n) {
|
||||
struct HeapEntry *new_split_entry = split_entry(res, n);
|
||||
entry->next = new_split_entry;
|
||||
new_split_entry->prev = entry;
|
||||
if (new_split_entry->prev)
|
||||
assert(new_split_entry->prev->magic == KERN_HeapMagicFree);
|
||||
}
|
||||
if (new_split_entry->prev)
|
||||
assert(new_split_entry->prev->magic == KERN_HeapMagicFree);
|
||||
break;
|
||||
}
|
||||
|
||||
prev = entry;
|
||||
entry = entry->next;
|
||||
} while (entry);
|
||||
|
||||
if (!res) {
|
||||
entry = prev;
|
||||
|
||||
assert2(entry->magic == KERN_HeapMagicFree, "Expected last tried entry to be free");
|
||||
assert2(entry->next == NULL, "Expected last tried entry to be the last");
|
||||
|
||||
size_t data_needed = n + (2 * sizeof(struct HeapEntry));
|
||||
|
||||
size_t pages_needed = ((data_needed & 0xFFF) == 0)
|
||||
? data_needed >> 12
|
||||
: ((data_needed & (~0xFFF)) + 0x1000) >> 12;
|
||||
|
||||
struct HeapEntry *new_entry = (struct HeapEntry *) KERN_HeapEnd;
|
||||
extend_heap(pages_needed);
|
||||
new_entry->next = NULL;
|
||||
new_entry->prev = entry;
|
||||
new_entry->magic = KERN_HeapMagicFree;
|
||||
new_entry->len = (pages_needed * 4096) - (sizeof(struct HeapEntry));
|
||||
assert2(new_entry->len >= n, "Expected allocated heap entry to fit what we wanted");
|
||||
res = new_entry;
|
||||
if (new_entry->len > n) {
|
||||
struct HeapEntry *new_split_entry = split_entry(res, n);
|
||||
entry->next = new_split_entry;
|
||||
new_split_entry->prev = entry;
|
||||
if (new_split_entry->prev)
|
||||
assert(new_split_entry->prev->magic == KERN_HeapMagicFree);
|
||||
if (!res) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
if (res) {
|
||||
|
||||
// if (res->next) res->next->prev = res->prev;
|
||||
// if (res->prev) res->prev->next = res->next;
|
||||
@@ -156,18 +154,12 @@ void *kmalloc(size_t n) {
|
||||
res->next = NULL;
|
||||
res->prev = NULL;
|
||||
res->magic = KERN_HeapMagicTaken;
|
||||
m_unlock(&kmem_lock);
|
||||
for (size_t i = 0; i < n; i++) res->data[i] = 0xFEU;
|
||||
return res->data;
|
||||
} else {
|
||||
m_unlock(&kmem_lock);
|
||||
return NULL;
|
||||
}
|
||||
for (size_t i = 0; i < n; i++) res->data[i] = 0xFEU;
|
||||
return res->data;
|
||||
}
|
||||
|
||||
static void try_merge_fwd(struct HeapEntry *entry) {
|
||||
assert(kmem_lock.owner == cur_task());
|
||||
|
||||
assert2(entry->magic == KERN_HeapMagicFree, "Bad merge!");
|
||||
assert(entry->prev == NULL);
|
||||
|
||||
@@ -198,8 +190,6 @@ static void try_merge_fwd(struct HeapEntry *entry) {
|
||||
}
|
||||
|
||||
static struct HeapEntry *try_shrink_heap(struct HeapEntry *entry) {
|
||||
assert(kmem_lock.owner == cur_task());
|
||||
|
||||
assert(entry->prev == NULL);
|
||||
if ((uint64_t) entry + sizeof(struct HeapEntry) + entry->len == KERN_HeapEnd) {
|
||||
// Shrink it if it's at least two pages
|
||||
@@ -240,7 +230,7 @@ static struct HeapEntry *try_shrink_heap(struct HeapEntry *entry) {
|
||||
|
||||
void kfree(void *addr) {
|
||||
assert(initialized);
|
||||
m_lock(&kmem_lock);
|
||||
LockGuard l(kmem_lock);
|
||||
|
||||
struct HeapEntry *freed = (struct HeapEntry *) (addr - (sizeof(struct HeapEntry)));
|
||||
struct HeapEntry *entry = KERN_HeapBegin;
|
||||
@@ -260,8 +250,6 @@ void kfree(void *addr) {
|
||||
KERN_HeapBegin = try_shrink_heap(freed);
|
||||
assert(KERN_HeapBegin != NULL);
|
||||
assert2(KERN_HeapBegin->prev == NULL, "Bad free!");
|
||||
|
||||
m_unlock(&kmem_lock);
|
||||
}
|
||||
|
||||
void *krealloc(void *addr, size_t newsize) {
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
//
|
||||
|
||||
#include "memman.hpp"
|
||||
#include "LockGuard.hpp"
|
||||
#include "Spinlock.hpp"
|
||||
#include "misc.hpp"
|
||||
#include "mutex.hpp"
|
||||
#include "paging.hpp"
|
||||
#include "serial.hpp"
|
||||
#include <stddef.h>
|
||||
@@ -16,7 +17,7 @@
|
||||
// Expected to be nulled by the bootloader
|
||||
static struct FourPages used_bitmap[BITMAP_SIZE];
|
||||
|
||||
static struct Mutex memman_lock;
|
||||
static Spinlock memman_lock;
|
||||
|
||||
static uint64_t maxPid = 0;// Past the end
|
||||
static uint64_t minPid = 0;
|
||||
@@ -93,7 +94,7 @@ void parse_limine_memmap(struct limine_memmap_entry *entries, unsigned int num,
|
||||
}
|
||||
|
||||
void *get4k() {
|
||||
m_lock(&memman_lock);
|
||||
LockGuard l(memman_lock);
|
||||
if (totalMem == 0) return NULL;
|
||||
|
||||
uint64_t curPid = minPid;
|
||||
@@ -105,12 +106,11 @@ void *get4k() {
|
||||
totalMem -= 4;
|
||||
assert2(getSts(curPid) == MEMMAN_STATE_FREE, "Sanity check");
|
||||
setSts(curPid, MEMMAN_STATE_USED);
|
||||
m_unlock(&memman_lock);
|
||||
return (void *) (HHDM_P2V(curPid << 12));
|
||||
}
|
||||
|
||||
void free4k(void *page) {
|
||||
m_lock(&memman_lock);
|
||||
LockGuard l(memman_lock);
|
||||
if ((uint64_t) page >= HHDM_BEGIN) page = (void *) HHDM_V2P(page);
|
||||
else
|
||||
assert2(0, "Tried to free memory not in HHDM!");
|
||||
@@ -122,7 +122,6 @@ void free4k(void *page) {
|
||||
setSts(pid, MEMMAN_STATE_FREE);
|
||||
totalMem += 4;
|
||||
if (minPid > pid) minPid = pid;
|
||||
m_unlock(&memman_lock);
|
||||
}
|
||||
|
||||
uint64_t get_free() {
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
//
|
||||
|
||||
#include "task.hpp"
|
||||
#include "LockGuard.hpp"
|
||||
#include "Spinlock.hpp"
|
||||
#include "cv.hpp"
|
||||
#include "gdt.hpp"
|
||||
#include "kmem.hpp"
|
||||
@@ -36,11 +38,11 @@ struct TaskListNode *RunningTask;
|
||||
struct TaskList NextTasks;
|
||||
|
||||
// New tasks
|
||||
struct Mutex NewTasks_lock;
|
||||
struct Spinlock NewTasks_lock;
|
||||
struct TaskList NewTasks;
|
||||
|
||||
// Unblocked tasks
|
||||
struct Mutex UnblockedTasks_lock;
|
||||
struct Spinlock UnblockedTasks_lock;
|
||||
struct TaskList UnblockedTasks;
|
||||
|
||||
// Task freer
|
||||
@@ -184,9 +186,10 @@ struct Task *new_ktask(void (*fn)(), char *name) {
|
||||
|
||||
sanity_check_frame(&newt->frame);
|
||||
|
||||
m_lock(&NewTasks_lock);
|
||||
append_task(&NewTasks, newt);
|
||||
m_unlock(&NewTasks_lock);
|
||||
{
|
||||
LockGuard l(NewTasks_lock);
|
||||
append_task(&NewTasks, newt);
|
||||
}
|
||||
return newt;
|
||||
}
|
||||
|
||||
@@ -259,7 +262,7 @@ extern "C" void switch_task(struct task_frame *cur_frame) {
|
||||
}
|
||||
}
|
||||
|
||||
if (TasksToFreeTemp.cur && !m_test(&UnblockedTasks_lock) && m_try_lock(&TasksToFree_lock)) {
|
||||
if (TasksToFreeTemp.cur && !UnblockedTasks_lock.test() && m_try_lock(&TasksToFree_lock)) {
|
||||
TasksToFree.cur = TasksToFreeTemp.cur;
|
||||
TasksToFree.last = TasksToFreeTemp.last;
|
||||
TasksToFreeTemp.cur = NULL;
|
||||
@@ -270,18 +273,18 @@ extern "C" void switch_task(struct task_frame *cur_frame) {
|
||||
|
||||
RunningTask = NULL;
|
||||
|
||||
if (m_try_lock(&NewTasks_lock)) {
|
||||
if (NewTasks_lock.try_lock()) {
|
||||
while (peek_front(&NewTasks)) {
|
||||
append_task_node(&NextTasks, pop_front_node(&NewTasks));
|
||||
}
|
||||
m_unlock(&NewTasks_lock);
|
||||
NewTasks_lock.unlock();
|
||||
}
|
||||
|
||||
if (m_try_lock(&UnblockedTasks_lock)) {
|
||||
if (UnblockedTasks_lock.try_lock()) {
|
||||
while (peek_front(&UnblockedTasks)) {
|
||||
append_task_node(&NextTasks, pop_front_node(&UnblockedTasks));
|
||||
}
|
||||
m_unlock(&UnblockedTasks_lock);
|
||||
UnblockedTasks_lock.unlock();
|
||||
}
|
||||
|
||||
struct TaskListNode *next = pop_front_node(&NextTasks);
|
||||
@@ -335,9 +338,10 @@ void m_unlock_sched_hook(struct Mutex *m) {
|
||||
|
||||
if (newt) {
|
||||
newt->task->state = TS_RUNNING;
|
||||
m_spin_lock(&UnblockedTasks_lock);
|
||||
append_task_node(&UnblockedTasks, newt);
|
||||
m_unlock(&UnblockedTasks_lock);
|
||||
{
|
||||
LockGuard l(UnblockedTasks_lock);
|
||||
append_task_node(&UnblockedTasks, newt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -363,9 +367,10 @@ void cv_unlock_sched_hook(struct CV *cv, int who) {
|
||||
|
||||
if (newt) {
|
||||
newt->task->state = TS_RUNNING;
|
||||
m_spin_lock(&UnblockedTasks_lock);
|
||||
append_task_node(&UnblockedTasks, newt);
|
||||
m_unlock(&UnblockedTasks_lock);
|
||||
{
|
||||
LockGuard l(UnblockedTasks_lock);
|
||||
append_task_node(&UnblockedTasks, newt);
|
||||
}
|
||||
}
|
||||
} while (newt && (who == CV_NOTIFY_ALL));
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
target_include_directories(kernel.elf PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
target_sources(kernel.elf PRIVATE mutex.cpp cv.cpp cppsupport.cpp)
|
||||
target_sources(kernel.elf PRIVATE mutex.cpp cv.cpp cppsupport.cpp Spinlock.cpp LockGuard.cpp)
|
||||
5
src/kernel/LockGuard.cpp
Normal file
5
src/kernel/LockGuard.cpp
Normal file
@@ -0,0 +1,5 @@
|
||||
//
|
||||
// Created by Stepan Usatiuk on 21.10.2023.
|
||||
//
|
||||
|
||||
#include "LockGuard.hpp"
|
||||
23
src/kernel/LockGuard.hpp
Normal file
23
src/kernel/LockGuard.hpp
Normal file
@@ -0,0 +1,23 @@
|
||||
//
|
||||
// Created by Stepan Usatiuk on 21.10.2023.
|
||||
//
|
||||
|
||||
#ifndef OS2_LOCKGUARD_H
|
||||
#define OS2_LOCKGUARD_H
|
||||
|
||||
template<typename T>
|
||||
class LockGuard {
|
||||
public:
|
||||
LockGuard(T &lock) : lock(&lock) {
|
||||
this->lock->lock();
|
||||
}
|
||||
~LockGuard() {
|
||||
lock->unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
T *lock;
|
||||
};
|
||||
|
||||
|
||||
#endif//OS2_LOCKGUARD_H
|
||||
5
src/kernel/Spinlock.cpp
Normal file
5
src/kernel/Spinlock.cpp
Normal file
@@ -0,0 +1,5 @@
|
||||
//
|
||||
// Created by Stepan Usatiuk on 21.10.2023.
|
||||
//
|
||||
|
||||
#include "Spinlock.hpp"
|
||||
46
src/kernel/Spinlock.hpp
Normal file
46
src/kernel/Spinlock.hpp
Normal file
@@ -0,0 +1,46 @@
|
||||
//
|
||||
// Created by Stepan Usatiuk on 21.10.2023.
|
||||
//
|
||||
|
||||
#ifndef OS2_SPINLOCK_H
|
||||
#define OS2_SPINLOCK_H
|
||||
|
||||
#include "serial.hpp"
|
||||
#include "task.hpp"
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <type_traits>
|
||||
|
||||
class Spinlock {
|
||||
public:
|
||||
bool try_lock() {
|
||||
bool expected = false;
|
||||
if (!locked.compare_exchange_strong(expected, true)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void lock() {
|
||||
while (!try_lock()) { yield_self(); }
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
bool expected = true;
|
||||
if (!locked.compare_exchange_strong(expected, false))
|
||||
writestr("Unlocking an unlocked spinlock!\n");
|
||||
}
|
||||
|
||||
bool test() {
|
||||
return locked.load();
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<bool> locked = false;
|
||||
struct TaskList *waiters = nullptr;
|
||||
};
|
||||
|
||||
static_assert(std::is_trivially_copyable_v<Spinlock> == true);
|
||||
static_assert(std::is_trivially_destructible_v<Spinlock> == true);
|
||||
|
||||
#endif//OS2_SPINLOCK_H
|
||||
Reference in New Issue
Block a user