slightly leaking but otherwise working and arguably prettier scheduler

This commit is contained in:
2023-10-24 14:44:19 +02:00
parent 8b470f0fa1
commit b44cb7cc68
5 changed files with 135 additions and 243 deletions

View File

@@ -4,6 +4,7 @@
#include "task.hpp"
#include "LockGuard.hpp"
#include "SkipList.hpp"
#include "Spinlock.hpp"
#include "gdt.hpp"
#include "kmem.hpp"
@@ -25,37 +26,19 @@ void sanity_check_frame(struct task_frame *cur_frame) {
assert2((cur_frame->ss == GDTSEL(gdt_data) || cur_frame->ss == GDTSEL(gdt_data_user)), "SS wrong!");
}
struct TaskListNode {
struct Task *task;
struct TaskListNode *next;
};
struct TaskList {
struct TaskListNode *cur;
struct TaskListNode *last;
};
List<Task *>::Node *RunningTask;
struct TaskListNode *RunningTask;
// Should be touched only in the scheduler
struct TaskList NextTasks;
// New tasks
struct Spinlock NewTasks_lock;
struct TaskList NewTasks;
// Unblocked tasks
struct Spinlock UnblockedTasks_lock;
struct TaskList UnblockedTasks;
Spinlock NextTasks_lock;
List<Task *> NextTasks;
// Task freer
Spinlock TasksToFree_lock;
struct TaskList TasksToFree;
struct TaskList TasksToFreeTemp;
List<Task *> TasksToFree;
// Waiting
//struct Mutex WaitingTasks_lock = DefaultMutex;
struct TaskList WaitingTasks;
Spinlock WaitingTasks_lock;
SkipList<uint64_t, Task *> WaitingTasks;
static std::atomic<bool> initialized = false;
@@ -66,109 +49,15 @@ static void free_task(struct Task *t) {
kfree(t);
}
static void free_task_list_node(struct TaskListNode *t) {
kfree(t);
}
static struct TaskListNode *new_task_list_node() {
struct TaskListNode *ret = static_cast<TaskListNode *>(kmalloc(sizeof(struct TaskListNode)));
ret->task = NULL;
ret->next = NULL;
return ret;
}
static void append_task(struct TaskList *list, struct Task *task) {
if (list == &NextTasks) {
assert2(task->state == TS_RUNNING, "Trying to add blocked task to run queue!");
}
struct TaskListNode *newNode = new_task_list_node();
newNode->task = task;
if (!list->cur) {
list->cur = newNode;
list->last = newNode;
} else {
list->last->next = newNode;
list->last = newNode;
}
}
static void append_task_node(struct TaskList *list, struct TaskListNode *newNode) {
if (list == &NextTasks) {
assert2(newNode->task->state == TS_RUNNING, "Trying to add blocked task to run queue!");
}
newNode->next = NULL;
if (!list->cur) {
assert(list->last == NULL);
list->cur = newNode;
list->last = newNode;
} else {
list->last->next = newNode;
list->last = newNode;
}
}
static struct Task *peek_front(struct TaskList *list) {
struct Task *ret = NULL;
if (list->cur) {
ret = list->cur->task;
}
return ret;
}
static struct Task *pop_front(struct TaskList *list) {
struct Task *ret = NULL;
if (list->cur) {
struct TaskListNode *node;
node = list->cur;
ret = node->task;
list->cur = node->next;
free_task_list_node(node);
if (list->cur == NULL) list->last = NULL;
}
return ret;
}
static struct TaskListNode *pop_front_node(struct TaskList *list) {
struct TaskListNode *ret = NULL;
if (list->cur) {
struct TaskListNode *node;
node = list->cur;
ret = node;
list->cur = node->next;
if (list->cur == NULL) list->last = NULL;
} else {
assert(list->last == NULL);
}
if (ret) ret->next = NULL;
return ret;
}
static void task_freer() {
while (true) {
sleep_self(10000);
{
LockGuard l(TasksToFree_lock);
if (peek_front(&TasksToFree) == NULL)
continue;
assert2(peek_front(&TasksToFree) != NULL, "Sanity check");
while (peek_front(&TasksToFree) && peek_front(&TasksToFree)->state == TS_TO_REMOVE) {
free_task(pop_front(&TasksToFree));
while (!TasksToFree.empty()) {
auto t = TasksToFree.back();
TasksToFree.pop_back();
free_task(t);
}
}
}
@@ -199,157 +88,156 @@ struct Task *new_ktask(void (*fn)(), const char *name) {
sanity_check_frame(&newt->frame);
auto new_node = NextTasks.create_node(newt);
{
LockGuard l(NewTasks_lock);
append_task(&NewTasks, newt);
LockGuard l(NextTasks_lock);
NextTasks.emplace_front(new_node);
}
return newt;
}
void remove_self() {
{
LockGuard l(TasksToFree_lock);
TasksToFree.emplace_front(cur_task());
NextTasks_lock.lock();
RunningTask->val->state = TS_BLOCKED;
}
NextTasks_lock.unlock();
yield_self();
assert2(0, "should be removed!");
}
void sleep_self(uint64_t diff) {
uint64_t wake_time = micros + diff;
while (micros <= wake_time) {
{
LockGuard l(WaitingTasks_lock);
// TODO this is all ugly
uint64_t l1 = 0;
for(auto cur = &*WaitingTasks.begin(); !cur->end; cur=cur->next[0])l1++;
assert(WaitingTasks.add(wake_time, cur_task()) != nullptr);
uint64_t l2 = 0;
for(auto cur = &*WaitingTasks.begin(); !cur->end; cur=cur->next[0])l2++;
assert(l2 - l1 == 1);
NextTasks_lock.lock();
RunningTask->val->state = TS_BLOCKED;
}
NextTasks_lock.unlock();
yield_self();
}
}
void yield_self() {
if (!RunningTask) return;
NO_INT(
if (RunningTask->val->mode == TASKMODE_KERN) {
_yield_self_kern();
})
}
static void task_waker() {
while (true) {
{
LockGuard l(WaitingTasks_lock);
while (WaitingTasks.begin() != WaitingTasks.end() && WaitingTasks.begin()->key <= micros && WaitingTasks.begin()->data->state != TS_RUNNING) {
auto *node = &*WaitingTasks.begin();
auto task = WaitingTasks.begin()->data;
// TODO this is all ugly
uint64_t l1 = 0;
for(auto cur = node; !cur->end; cur=cur->next[0])l1++;
WaitingTasks.erase(node, node->next[0], false);
uint64_t l2 = 0;
for(auto *cur = &*WaitingTasks.begin(); !cur->end; cur=cur->next[0])l2++;
assert(l1 - l2 == 1);
task->sleep_until = 0;
task->state = TS_RUNNING;
auto new_node = NextTasks.create_node(task);
{
LockGuard l(NextTasks_lock);
NextTasks.emplace_front(new_node);
}
}
}
}
}
void init_tasks() {
// FIXME: not actually thread-safe, but it probably doesn't matter
assert2(!atomic_load(&initialized), "Tasks should be initialized once!");
new_ktask(task_freer, "freer");
new_ktask(task_waker, "waker");
atomic_store(&initialized, true);
}
void remove_self() {
RunningTask->task->state = TS_TO_REMOVE;
yield_self();
assert2(0, "should be removed!");
}
void sleep_self(uint64_t diff) {
RunningTask->task->sleep_until = micros + diff;
RunningTask->task->state = TS_TO_SLEEP;
yield_self();
}
void yield_self() {
if (!RunningTask) return;
NO_INT(
if (RunningTask->task->mode == TASKMODE_KERN) {
_yield_self_kern();
})
}
extern "C" void switch_task(struct task_frame *cur_frame) {
assert2(!are_interrupts_enabled(), "Switching tasks with enabled interrupts!");
if (!atomic_load(&initialized)) return;
sanity_check_frame(cur_frame);
struct TaskListNode *node = WaitingTasks.cur;
while (node) {
if (node->task->sleep_until <= micros && node->task->state == TS_TO_SLEEP) {
assert2(node->task->sleep_until, "Sleeping until 0?");
node->task->sleep_until = 0;
node->task->state = TS_RUNNING;
append_task_node(&NextTasks, pop_front_node(&WaitingTasks));
node = WaitingTasks.cur;
} else {
break;
}
}
assert2(!are_interrupts_enabled(), "Switching tasks with enabled interrupts!");
if (!NextTasks_lock.try_lock()) return;
if (RunningTask) {
RunningTask->task->frame = *cur_frame;
memcpy(RunningTask->task->fxsave, temp_fxsave, 512);
if (RunningTask->task->state == TS_RUNNING) {
assert2(RunningTask->next == NULL, "next should be removed from RunningTask!");
append_task_node(&NextTasks, RunningTask);
} else if (RunningTask->task->state == TS_TO_SLEEP) {
if (!WaitingTasks.cur) {
assert(WaitingTasks.last == NULL);
WaitingTasks.cur = RunningTask;
WaitingTasks.last = RunningTask;
} else {
struct TaskListNode *prev = NULL;
struct TaskListNode *cur = WaitingTasks.cur;
while (cur && cur->task->sleep_until <= RunningTask->task->sleep_until) {
prev = cur;
cur = cur->next;
}
if (prev) {
prev->next = RunningTask;
RunningTask->next = cur;
if (cur == NULL) WaitingTasks.last = RunningTask;
} else {
RunningTask->next = WaitingTasks.cur;
WaitingTasks.cur = RunningTask;
}
// if (cur == WaitingTasks.last) WaitingTasks.last = RunningTask;
}
} else if (RunningTask->task->state == TS_TO_REMOVE) {
append_task_node(&TasksToFreeTemp, RunningTask);
RunningTask->val->frame = *cur_frame;
memcpy(RunningTask->val->fxsave, temp_fxsave, 512);
if (RunningTask->val->state == TS_RUNNING) {
NextTasks.emplace_front(RunningTask);
}
// TODO: leak of nodes
}
if (TasksToFreeTemp.cur && !UnblockedTasks_lock.test() && TasksToFree_lock.try_lock()) {
if (peek_front(&TasksToFree) == NULL) {
TasksToFree.cur = TasksToFreeTemp.cur;
TasksToFree.last = TasksToFreeTemp.last;
TasksToFreeTemp.cur = NULL;
TasksToFreeTemp.last = NULL;
}
TasksToFree_lock.unlock();
}
RunningTask = NULL;
if (NewTasks_lock.try_lock()) {
while (peek_front(&NewTasks)) {
append_task_node(&NextTasks, pop_front_node(&NewTasks));
}
NewTasks_lock.unlock();
}
if (UnblockedTasks_lock.try_lock()) {
while (peek_front(&UnblockedTasks)) {
append_task_node(&NextTasks, pop_front_node(&UnblockedTasks));
}
UnblockedTasks_lock.unlock();
}
struct TaskListNode *next = pop_front_node(&NextTasks);
List<Task *>::Node *next = NextTasks.extract_back();
assert2(next != NULL, "Kernel left with no tasks!");
assert2(next->task != NULL, "Kernel left with no tasks!");
assert2(next->task->state == TS_RUNNING, "Blocked task in run queue!");
assert2(next->val != NULL, "Kernel left with no tasks!");
assert2(next->val->state == TS_RUNNING, "Blocked task in run queue!");
NextTasks_lock.unlock();
RunningTask = next;
*cur_frame = RunningTask->task->frame;
memcpy(temp_fxsave, RunningTask->task->fxsave, 512);
*cur_frame = RunningTask->val->frame;
memcpy(temp_fxsave, RunningTask->val->fxsave, 512);
sanity_check_frame(cur_frame);
}
void self_block() {
RunningTask->task->state = TS_BLOCKED;
{
LockGuard l(NextTasks_lock);
RunningTask->val->state = TS_BLOCKED;
}
yield_self();
}
void self_block(Spinlock &to_unlock) {
NO_INT(to_unlock.unlock();
RunningTask->task->state = TS_BLOCKED;)
{
LockGuard l(NextTasks_lock);
to_unlock.unlock();
RunningTask->val->state = TS_BLOCKED;
}
yield_self();
}
void unblock(Task *what) {
what->state = TS_RUNNING;
auto new_node = NextTasks.create_node(what);
{
LockGuard l(UnblockedTasks_lock);
append_task(&UnblockedTasks, what);
LockGuard l(NextTasks_lock);
NextTasks.emplace_front(new_node);
}
};
struct Task *cur_task() {
if (!RunningTask) return NULL;
return RunningTask->task;
return RunningTask->val;
}

View File

@@ -18,9 +18,7 @@ enum TaskMode {
enum TaskState {
TS_RUNNING,
TS_BLOCKED,
TS_TO_REMOVE,
TS_TO_SLEEP
TS_BLOCKED
};
struct Task {

View File

@@ -28,7 +28,7 @@ public:
private:
std::atomic<bool> locked = false;
List<Task *> waiters;
List<Task *> waiters; // leaking List<Task *>::Node
Spinlock waiters_lock;
Task *owner = nullptr;

View File

@@ -36,6 +36,11 @@ public:
emplace_front(new Node{std::forward<Args>(args)..., nullptr});
}
template<class... Args>
Node* create_node(Args &&...args) {
return new Node{std::forward<Args>(args)..., nullptr};
}
void emplace_front(Node *new_node) {
if (head) {
assert(tail != nullptr);

View File

@@ -224,8 +224,8 @@ public:
toUpdate[i] = cur;
}
cur = cur->next[0];
if (cur->key == k && !cur->end) return nullptr;
// Without this it's a multimap TODO: multiple variants of this and merge it with set
// if (cur->key == k && !cur->end) return nullptr;
}
size_t newLevel = randomL();
@@ -350,18 +350,19 @@ public:
}
// TODO: pair
struct SkipListIterator {
// using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = const V;
using value_type = Node;
using pointer = value_type *;
using reference = value_type &;
explicit SkipListIterator(Node *n) : n(std::move(n)){};
reference operator*() const { return n->data; }
reference operator*() const { return *n; }
pointer operator->() const { return &(n->data); }
pointer operator->() const { return n; }
SkipListIterator &operator--() {
if (!n->end)
@@ -389,12 +390,12 @@ public:
Node *n;
};
// using iterator = SkipListIterator;
using const_iterator = SkipListIterator;
using iterator = SkipListIterator;
// using const_iterator = SkipListIterator;
const_iterator begin() const { return SkipListIterator(root->next[0]); }
iterator begin() const { return SkipListIterator(root->next[0]); }
const_iterator end() const { return SkipListIterator(endnode); }
iterator end() const { return SkipListIterator(endnode); }
// void print() const {
// std::cout << "LIST STATUS" << std::endl;