Tasks make a bit more sense now

This commit is contained in:
2024-03-22 16:57:13 +01:00
parent 02ec0c6105
commit f143296493
14 changed files with 285 additions and 283 deletions

View File

@@ -44,9 +44,9 @@ SerialTty::SerialTty() : Tty() {
outb(PORT + 3, 0x00); // Disable DLAB
outb(PORT + 1, 0x01); // Enable data available interrupt
Task *task = new_ktask((void (*)(void))(&SerialTty::this_pooler), "serialpooler", false);
task->frame.rdi = reinterpret_cast<uint64_t>(this);
start_task(task);
Task *task = new Task(Task::TaskMode::TASKMODE_KERN, (void (*)(void))(&SerialTty::this_pooler), "serialpooler");
task->_frame.rdi = reinterpret_cast<uint64_t>(this);
task->start();
attach_interrupt(4, &SerialTty::isr, this);
IRQ_clear_mask(4);

View File

@@ -166,9 +166,9 @@ uint16_t pic_get_isr(void) {
static int_handler_t handlers[256];
static void *handlers_args[256];
extern "C" void pic1_irq_real_0(struct task_frame *frame) {
extern "C" void pic1_irq_real_0(TaskFrame *frame) {
timer_tick();
switch_task(frame);
Scheduler::switch_task(frame);
PIC_sendEOI(0);
}
extern "C" void pic1_irq_real_1() {

View File

@@ -55,7 +55,7 @@ typedef struct {
#define IDT_GUARD 0xdeadbe3fdeadb3efULL
// Assuming the compiler understands that this is pushed on the stack in the correct order
struct task_frame {
struct TaskFrame {
uint64_t guard;
uint64_t guard2; // To keep stack aligned after pushaq
@@ -81,8 +81,8 @@ struct task_frame {
uint64_t flags;
uint64_t sp;
uint64_t ss;
} __attribute__((packed));
}
__attribute__((packed));
extern "C" void exception_handler(void);

View File

@@ -40,13 +40,12 @@ void ktask2() {
for (uint32_t c = 0; c < 2; c++) {
// Note: we assume the framebuffer model is RGB with 32-bit pixels.
for (size_t i = 0; i < 100; i++) {
sleep_self(25000);
Scheduler::sleep_self(25000);
uint32_t *fb_ptr = static_cast<uint32_t *>(framebuffer->address);
fb_ptr[i * (framebuffer->pitch / 4) + i + 100] = c ? 0 : 0xFFFFFF;
}
}
new_ktask(ktask, "one");
remove_self();
(new Task(Task::TaskMode::TASKMODE_KERN, ktask, "one"))->start();
}
@@ -59,13 +58,12 @@ void ktask() {
for (uint32_t c = 0; c < 2; c++) {
// Note: we assume the framebuffer model is RGB with 32-bit pixels.
for (size_t i = 0; i < 100; i++) {
sleep_self(25000);
Scheduler::sleep_self(25000);
uint32_t *fb_ptr = static_cast<uint32_t *>(framebuffer->address);
fb_ptr[i * (framebuffer->pitch / 4) + i] = c ? 0 : 0xFFFFFF;
}
}
new_ktask(ktask2, "two");
remove_self();
(new Task(Task::TaskMode::TASKMODE_KERN, ktask2, "two"))->start();
}
void freeprinter() {
@@ -89,19 +87,19 @@ void freeprinter() {
buf += "\n";
buf += "=====\n";
GlobalTtyManager.all_tty_putstr(buf.c_str());
sleep_self(1000000);
Scheduler::sleep_self(1000000);
}
}
void statprinter() {
SkipList<uint64_t, std::pair<String, uint64_t>> last_times = getTaskTimePerPid();
SkipList<uint64_t, std::pair<String, uint64_t>> last_times = Scheduler::getTaskTimePerPid();
std::atomic<uint64_t> last_print_time = micros;
while (1) {
sleep_self(1000000);
Scheduler::sleep_self(1000000);
uint64_t prev_print_time = last_print_time;
last_print_time = micros;
SkipList<uint64_t, std::pair<String, uint64_t>> prev_times = std::move(last_times);
last_times = getTaskTimePerPid();
last_times = Scheduler::getTaskTimePerPid();
uint64_t slice = last_print_time - prev_print_time;
if (slice == 0) continue;
@@ -138,45 +136,41 @@ void mtest1() {
{
LockGuard l(testmutex);
GlobalTtyManager.all_tty_putstr("Locked1\n");
sleep_self(100000);
Scheduler::sleep_self(100000);
}
GlobalTtyManager.all_tty_putstr("Unlocked1\n");
remove_self();
}
void mtest2() {
{
LockGuard l(testmutex);
GlobalTtyManager.all_tty_putstr("Locked2\n");
sleep_self(100000);
Scheduler::sleep_self(100000);
}
GlobalTtyManager.all_tty_putstr("Unlocked2\n");
remove_self();
}
void mtest3() {
{
LockGuard l(testmutex);
GlobalTtyManager.all_tty_putstr("Locked3\n");
sleep_self(100000);
Scheduler::sleep_self(100000);
}
GlobalTtyManager.all_tty_putstr("Unlocked3\n");
remove_self();
}
void stress() {
static std::atomic<int> i = 0;
int curi = i++;
if (curi > 1500) remove_self();
if (curi > 1500) return;
sleep_self(100000 - curi * 10);
Scheduler::sleep_self(100000 - curi * 10);
char buf[69];
itoa(curi, buf, 10);
// GlobalTtyManager.all_tty_putstr("stress ");
// GlobalTtyManager.all_tty_putstr(buf);
// GlobalTtyManager.all_tty_putstr("\n");
remove_self();
}
void templates_tester() {
@@ -184,17 +178,13 @@ void templates_tester() {
for (int i = 0; i < 100; i++)
test_templates();
GlobalTtyManager.all_tty_putstr("Testing templates OK\n");
remove_self();
}
void stress_tester() {
for (int i = 0; i < 100; i++)
new_ktask(stress, "stress");
(new Task(Task::TaskMode::TASKMODE_KERN, stress, "stress"))->start();
GlobalTtyManager.all_tty_putstr("Finished stress\n");
remove_self();
}
@@ -210,42 +200,39 @@ void user_task() {
void vfs_tester() {
VFSTester vfsTester;
vfsTester.test();
remove_self();
}
void ktask_main() {
GlobalTtyManager.add_tty(new SerialTty());
new_ktask(ktask, "one");
new_ktask(freeprinter, "freeprinter");
new_ktask(statprinter, "statprinter");
new_ktask(mtest1, "mtest1");
new_ktask(mtest2, "mtest2");
new_ktask(mtest3, "mtest3");
new_ktask(templates_tester, "templates_tester");
new_ktask(templates_tester, "templates_tester2");
new_ktask(stress_tester, "stress_tester");
(new Task(Task::TaskMode::TASKMODE_KERN, ktask, "one"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, freeprinter, "freeprinter"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, statprinter, "statprinter"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, mtest1, "mtest1"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, mtest2, "mtest2"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, mtest3, "mtest3"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, templates_tester, "templates_tester"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, templates_tester, "templates_tester2"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, stress_tester, "stress_tester"))->start();
VFSGlobals::mounts.add_mount(new MemFs(&VFSGlobals::root));
new_ktask(vfs_tester, "vfs_tester");
(new Task(Task::TaskMode::TASKMODE_KERN, vfs_tester, "vfs_tester"))->start();
for (int i = 0; i < saved_modules_size; i++) {
GlobalTtyManager.all_tty_putstr("Starting ");
GlobalTtyManager.all_tty_putstr(saved_modules_names[i]);
GlobalTtyManager.all_tty_putchar('\n');
Task *utask = new_utask((void (*)()) 0x00020000, saved_modules_names[i]);
Task *utask = new Task(Task::TaskMode::TASKMODE_USER, (void (*)()) 0x00020000, saved_modules_names[i]);
assert(saved_modules_size > 0);
utask->vma->mmap_phys((void *) 0x00020000, (void *) KERN_V2P(saved_modules_data[i]),
max_saved_module_file_size, PAGE_USER | PAGE_RW);
start_task(utask);
utask->_vma->mmap_phys((void *) 0x00020000, (void *) KERN_V2P(saved_modules_data[i]),
max_saved_module_file_size, PAGE_USER | PAGE_RW);
utask->start();
}
remove_self();
}
void dummy_task() {
for (;;) {
yield_self();
Scheduler::yield_self();
}
}
@@ -260,10 +247,10 @@ void kmain() {
srand(micros); // NOLINT
new_ktask(ktask_main, "ktask_main");
new_ktask(dummy_task, "dummy");
(new Task(Task::TaskMode::TASKMODE_KERN, ktask_main, "ktask_main"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, dummy_task, "dummy"))->start();
setup_syscalls();
init_tasks();
Scheduler::init_tasks();
for (;;) {
__asm__ __volatile__("hlt");
}

View File

@@ -39,7 +39,7 @@ int serial_received() {
char read_serial() {
while (serial_received() == 0) {
yield_self();
Scheduler::yield_self();
}
return inb(PORT);
@@ -51,7 +51,7 @@ int is_transmit_empty() {
void write_serial(char a) {
while (is_transmit_empty() == 0) {
yield_self();
Scheduler::yield_self();
}
outb(PORT, a);

View File

@@ -54,7 +54,7 @@ uint64_t syscall_putchar(char c) {
}
uint64_t syscall_sleep(uint64_t micros) {
sleep_self(micros);
Scheduler::sleep_self(micros);
return 0;
}

View File

@@ -21,7 +21,7 @@
char temp_fxsave[512] __attribute__((aligned(16)));
void sanity_check_frame(struct task_frame *cur_frame) {
void sanity_check_frame(TaskFrame *cur_frame) {
// TODO: This makes sense to check when entering, but not when switching
// assert((((uintptr_t) cur_frame) & 0xFULL) == 0);
assert2((void *) cur_frame->ip != NULL, "Sanity check");
@@ -34,14 +34,14 @@ void sanity_check_frame(struct task_frame *cur_frame) {
assert2((cur_frame->cs == Arch::GDT::gdt_code.selector() || (cur_frame->ss == Arch::GDT::gdt_code_user.selector()) | 0x3), "CS wrong!");
}
std::atomic<uint64_t> max_pid = 0;
Mutex AllTasks_lock;
SkipList<uint64_t, Task *> AllTasks;
std::atomic<uint64_t> max_pid = 0;
Mutex AllTasks_lock;
SkipList<uint64_t, UniquePtr<Task>> AllTasks;
static List<Task *>::Node *RunningTask;
static List<Task *>::Node *RunningTask;
static Spinlock NextTasks_lock;
static List<Task *> NextTasks;
static Spinlock NextTasks_lock;
static List<Task *> NextTasks;
// Task freer
Mutex TasksToFree_lock;
@@ -55,19 +55,103 @@ SkipList<uint64_t, List<Task *>::Node *> WaitingTasks;
static std::atomic<bool> initialized = false;
static void free_task(struct Task *t) {
kfree(t->kstack);
kfree(t->name);
kfree(t->fxsave);
kfree(t);
//
static void remove_self() {
assert(RunningTask != nullptr);
{
LockGuard l(TasksToFree_lock);
// TasksToFree is expected to do nothing with TS_RUNNING tasks
TasksToFree.emplace_front(RunningTask);
}
// This might not cause freeing of this task, as it might be preempted
// and still be running and task freer won't delete it
// But eventually it will get cleaned
TasksToFree_cv.notify_one();
Scheduler::self_block();
assert2(0, "should be removed!");
}
SkipList<uint64_t, std::pair<String, TaskPID>> getTaskTimePerPid() {
SkipList<uint64_t, std::pair<String, TaskPID>> ret;
static void trampoline(void *rdi, void (*rsi_entrypoint)()) {
rsi_entrypoint();
remove_self();
}
Task::Task(Task::TaskMode mode, void (*entrypoint)(), const char *name) {
_name = name;
_frame.ip = reinterpret_cast<uint64_t>(&trampoline);
_frame.rsi = (uint64_t) entrypoint;
if (mode == TaskMode::TASKMODE_KERN) {
_frame.cs = Arch::GDT::gdt_code.selector();
_frame.ss = Arch::GDT::gdt_data.selector();
} else if (mode == TaskMode::TASKMODE_USER) {
_frame.cs = Arch::GDT::gdt_code_user.selector() | 0x3;
_frame.ss = Arch::GDT::gdt_data_user.selector() | 0x3;
} else {
assert(false);
}
for (int i = 0; i < 512; i++) _fxsave->_fxsave[i] = 0;
_frame.flags = flags();
_frame.guard = IDT_GUARD;
_addressSpace = mode == TaskMode::TASKMODE_KERN ? KERN_AddressSpace : new AddressSpace();
_vma = new VMA(_addressSpace);
_state = TaskState::TS_BLOCKED;
_mode = mode;
_pid = max_pid.fetch_add(1);
_used_time = 0;
if (mode == TaskMode::TASKMODE_USER) {
task_pointer *taskptr = static_cast<task_pointer *>(
_vma->mmap_mem(reinterpret_cast<void *>(TASK_POINTER),
sizeof(task_pointer), 0, PAGE_RW | PAGE_USER)); // FIXME: this is probably unsafe
assert((uintptr_t) taskptr == TASK_POINTER);
task_pointer *taskptr_real = reinterpret_cast<task_pointer *>(HHDM_P2V(_addressSpace->virt2real(taskptr)));
_entry_ksp_val = ((((uintptr_t) _kstack->_ptr) + (TASK_SS - 9) - 1) & (~0xFULL)); // Ensure 16byte alignment
// It should be aligned before call, therefore it actually should be aligned here
assert((_entry_ksp_val & 0xFULL) == 0);
taskptr_real->taskptr = this;
taskptr_real->entry_ksp_val = _entry_ksp_val;
taskptr_real->ret_sp = 0x0;
}
if (mode == TaskMode::TASKMODE_USER) {
void *ustack = _vma->mmap_mem(NULL, TASK_SS, 0, PAGE_RW | PAGE_USER);
_vma->map_kern();
// Ensure 16byte alignment
_frame.sp = ((((uintptr_t) ustack) + (TASK_SS - 17) - 1) & (~0xFULL)) + 8;
} else {
_frame.sp = ((((uintptr_t) _kstack->_ptr) + (TASK_SS - 9) - 1) & (~0xFULL)) + 8;
}
// It should be aligned before call, therefore on function entry it should be misaligned by 8 bytes
assert((_frame.sp & 0xFULL) == 8);
sanity_check_frame(&_frame);
{
LockGuard l(AllTasks_lock);
AllTasks.add(_pid, UniquePtr(this));
}
}
Task::~Task() {
assert(_state != TaskState::TS_RUNNING);
}
SkipList<uint64_t, std::pair<String, Task::TaskPID>> Scheduler::getTaskTimePerPid() {
SkipList<uint64_t, std::pair<String, Task::TaskPID>> ret;
{
LockGuard l(AllTasks_lock);
for (const auto &t: AllTasks) {
ret.add(t.data->pid, std::make_pair(t.data->name, t.data->used_time.load()));
ret.add(t.data->pid(), std::make_pair(t.data->name(), t.data->used_time()));
}
}
return ret;
@@ -85,17 +169,15 @@ static void task_freer() {
{
LockGuard l(TasksToFree_lock);
t = TasksToFree.back();
if (t->val->state == TS_RUNNING) break;
if (t->val->state() == Task::TaskState::TS_RUNNING) break;
TasksToFree.pop_back();
}
{
uint64_t pid = t->val->pid;
uint64_t pid = t->val->pid();
{
LockGuard l(AllTasks_lock);
AllTasks.erase(pid);
}
free_task(t->val);
delete t;
}
}
}
@@ -108,130 +190,18 @@ static void task_freer() {
}
}
struct Task *new_ktask(void (*fn)(), const char *name, bool start) {
struct Task *newt = static_cast<Task *>(kmalloc(sizeof(struct Task)));
newt->kstack = static_cast<uint64_t *>(kmalloc(TASK_SS));
newt->name = static_cast<char *>(kmalloc(strlen(name) + 1));
newt->fxsave = static_cast<char *>(kmalloc(512));
strcpy(name, newt->name);
newt->frame.sp = ((((uintptr_t) newt->kstack) + (TASK_SS - 9) - 1) & (~0xFULL)) + 8; // Ensure 16byte alignment
// It should be aligned before call, therefore on function entry it should be misaligned by 8 bytes
assert((newt->frame.sp & 0xFULL) == 8);
newt->frame.ip = (uint64_t) fn;
newt->frame.cs = Arch::GDT::gdt_code.selector();
newt->frame.ss = Arch::GDT::gdt_data.selector();
for (int i = 0; i < 512; i++) newt->fxsave[i] = 0;
newt->frame.flags = flags();
newt->frame.guard = IDT_GUARD;
newt->addressSpace = KERN_AddressSpace;
newt->state = start ? TS_RUNNING : TS_BLOCKED;
newt->mode = TASKMODE_KERN;
newt->pid = max_pid.fetch_add(1);
newt->used_time = 0;
sanity_check_frame(&newt->frame);
if (start) {
auto new_node = NextTasks.create_node(newt);
{
SpinlockLockNoInt l(NextTasks_lock);
NextTasks.emplace_front(new_node);
}
}
{
LockGuard l(AllTasks_lock);
AllTasks.add(newt->pid, newt);
}
return newt;
}
struct Task *new_utask(void (*entrypoint)(), const char *name) {
Task *newt = static_cast<Task *>(kmalloc(sizeof(struct Task)));
newt->kstack = static_cast<uint64_t *>(kmalloc(TASK_SS));
newt->name = static_cast<char *>(kmalloc(strlen(name) + 1));
newt->fxsave = static_cast<char *>(kmalloc(512));
strcpy(name, newt->name);
newt->frame.ip = (uint64_t) entrypoint;
newt->frame.cs = Arch::GDT::gdt_code_user.selector() | 0x3;
newt->frame.ss = Arch::GDT::gdt_data_user.selector() | 0x3;
for (int i = 0; i < 512; i++) newt->fxsave[i] = 0;
newt->frame.flags = flags();
newt->frame.guard = IDT_GUARD;
newt->addressSpace = new AddressSpace();
newt->vma = new VMA(newt->addressSpace);
newt->state = TS_BLOCKED;
newt->mode = TASKMODE_USER;
newt->pid = max_pid.fetch_add(1);
newt->used_time = 0;
task_pointer *taskptr = static_cast<task_pointer *>(
newt->vma->mmap_mem(reinterpret_cast<void *>(TASK_POINTER),
sizeof(task_pointer), 0, PAGE_RW | PAGE_USER)); // FIXME: this is probably unsafe
assert((uintptr_t) taskptr == TASK_POINTER);
task_pointer *taskptr_real = reinterpret_cast<task_pointer *>(HHDM_P2V(newt->addressSpace->virt2real(taskptr)));
newt->entry_ksp_val = ((((uintptr_t) newt->kstack) + (TASK_SS - 9) - 1) & (~0xFULL)); // Ensure 16byte alignment
// It should be aligned before call, therefore it actually should be aligned here
assert((newt->entry_ksp_val & 0xFULL) == 0);
taskptr_real->taskptr = newt;
taskptr_real->entry_ksp_val = newt->entry_ksp_val;
taskptr_real->ret_sp = 0x0;
void *ustack = newt->vma->mmap_mem(NULL, TASK_SS, 0, PAGE_RW | PAGE_USER);
newt->frame.sp = ((((uintptr_t) ustack) + (TASK_SS - 17) - 1) & (~0xFULL)) + 8; // Ensure 16byte alignment
// It should be aligned before call, therefore on function entry it should be misaligned by 8 bytes
assert((newt->frame.sp & 0xFULL) == 8);
newt->vma->map_kern();
sanity_check_frame(&newt->frame);
{
LockGuard l(AllTasks_lock);
AllTasks.add(newt->pid, newt);
}
return newt;
}
List<Task *>::Node *start_task(struct Task *task) {
assert(task->state != TS_RUNNING);
task->state = TS_RUNNING;
auto new_node = NextTasks.create_node(task);
void Task::start() {
assert(_state != TaskState::TS_RUNNING);
_state = TaskState::TS_RUNNING;
auto new_node = NextTasks.create_node(this);
{
SpinlockLockNoInt l(NextTasks_lock);
NextTasks.emplace_front(new_node);
}
return new_node;
}
void remove_self() {
assert(RunningTask != nullptr);
{
LockGuard l(TasksToFree_lock);
// TasksToFree is expected to do nothing with TS_RUNNING tasks
TasksToFree.emplace_front(RunningTask);
}
// This might not cause freeing of this task, as it might be preempted
// and still be running and task freer won't delete it
// But eventually it will get cleaned
TasksToFree_cv.notify_one();
self_block();
assert2(0, "should be removed!");
}
void sleep_self(uint64_t diff) {
void Scheduler::sleep_self(uint64_t diff) {
uint64_t wake_time = micros + diff;
while (micros <= wake_time) {
{
@@ -250,11 +220,11 @@ void sleep_self(uint64_t diff) {
assert(len2 - len1 == 1);
}
self_block();
Scheduler::self_block();
}
}
void yield_self() {
void Scheduler::yield_self() {
if (!RunningTask) return;
NO_INT(
_yield_self_kern();)
@@ -265,7 +235,7 @@ static void task_waker() {
{
WaitingTasks_mlock.lock();
while (WaitingTasks.begin() != WaitingTasks.end() && WaitingTasks.begin()->key <= micros && WaitingTasks.begin()->data->val->state != TS_RUNNING) {
while (WaitingTasks.begin() != WaitingTasks.end() && WaitingTasks.begin()->key <= micros && WaitingTasks.begin()->data->val->state() != Task::TaskState::TS_RUNNING) {
auto *node = &*WaitingTasks.begin();
auto task = WaitingTasks.begin()->data;
@@ -281,8 +251,8 @@ static void task_waker() {
WaitingTasks_mlock.unlock();
assert(l1 - l2 == 1);
task->val->sleep_until = 0;
task->val->state = TS_RUNNING;
task->val->_sleep_until = 0;
task->val->_state = Task::TaskState::TS_RUNNING;
{
SpinlockLockNoInt l(NextTasks_lock);
@@ -302,15 +272,15 @@ static void task_waker() {
}
}
void init_tasks() {
void Scheduler::init_tasks() {
// FIXME: not actually thread-safe, but it probably doesn't matter
assert2(!atomic_load(&initialized), "Tasks should be initialized once!");
start_task(new_ktask(task_freer, "freer", false));
new_ktask(task_waker, "waker");
(new Task(Task::TaskMode::TASKMODE_KERN, task_freer, "freer"))->start();
(new Task(Task::TaskMode::TASKMODE_KERN, task_waker, "waker"))->start();
atomic_store(&initialized, true);
}
extern "C" void switch_task(struct task_frame *cur_frame) {
extern "C" void Scheduler::switch_task(TaskFrame *cur_frame) {
assert2(!are_interrupts_enabled(), "Switching tasks with enabled interrupts!");
if (!atomic_load(&initialized)) return;
sanity_check_frame(cur_frame);
@@ -336,11 +306,11 @@ extern "C" void switch_task(struct task_frame *cur_frame) {
lastSwitchMicros = micros;
if (RunningTask) {
RunningTask->val->frame = *cur_frame;
__builtin_memcpy(RunningTask->val->fxsave, temp_fxsave, 512);
oldspace = RunningTask->val->addressSpace;
RunningTask->val->used_time.fetch_add(lastSwitchMicros - prevSwitchMicros);
if (RunningTask->val->state == TS_RUNNING) {
RunningTask->val->_frame = *cur_frame;
__builtin_memcpy(RunningTask->val->_fxsave->_fxsave, temp_fxsave, 512);
oldspace = RunningTask->val->_addressSpace;
RunningTask->val->_used_time.fetch_add(lastSwitchMicros - prevSwitchMicros);
if (RunningTask->val->_state == Task::TaskState::TS_RUNNING) {
NextTasks.emplace_front(RunningTask);
}
}
@@ -348,14 +318,14 @@ extern "C" void switch_task(struct task_frame *cur_frame) {
next = NextTasks.extract_back();
assert2(next != NULL, "Kernel left with no tasks!");
assert2(next->val != NULL, "Kernel left with no tasks!");
assert2(next->val->state == TS_RUNNING, "Blocked task in run queue!");
assert2(next->val->_state == Task::TaskState::TS_RUNNING, "Blocked task in run queue!");
}
RunningTask = next;
*cur_frame = RunningTask->val->frame;
__builtin_memcpy(temp_fxsave, RunningTask->val->fxsave, 512);
*cur_frame = RunningTask->val->_frame;
__builtin_memcpy(temp_fxsave, RunningTask->val->_fxsave->_fxsave, 512);
AddressSpace *newspace = RunningTask->val->addressSpace;
AddressSpace *newspace = RunningTask->val->_addressSpace;
if (newspace != oldspace) {
uint64_t real_new_cr3 = (uint64_t) HHDM_V2P(newspace->get_cr3());
@@ -368,59 +338,59 @@ extern "C" void switch_task(struct task_frame *cur_frame) {
sanity_check_frame(cur_frame);
}
void self_block() {
void Scheduler::self_block() {
// TODO: clarify this function
NO_INT(
{
{
SpinlockLockNoInt l(NextTasks_lock);
RunningTask->val->state = TS_BLOCKED;
RunningTask->val->_state = Task::TaskState::TS_BLOCKED;
}
yield_self();
Scheduler::yield_self();
})
}
void self_block(Spinlock &to_unlock) {
void Scheduler::self_block(Spinlock &to_unlock) {
assert2(!are_interrupts_enabled(), "Self blocking with enabled interrupts!");
{
SpinlockLockNoInt l(NextTasks_lock);
to_unlock.unlock();
RunningTask->val->state = TS_BLOCKED;
RunningTask->val->_state = Task::TaskState::TS_BLOCKED;
}
yield_self();
Scheduler::yield_self();
}
void unblock(Task *what) {
void Scheduler::unblock(Task *what) {
assert(false);
assert(what != nullptr);
assert(what->state != TS_RUNNING);
sanity_check_frame(&what->frame);
assert(what->_state != Task::TaskState::TS_RUNNING);
sanity_check_frame(&what->_frame);
auto new_node = NextTasks.create_node(what);
{
SpinlockLockNoInt l(NextTasks_lock);
what->state = TS_RUNNING;
what->_state = Task::TaskState::TS_RUNNING;
NextTasks.emplace_front(new_node);
}
};
void unblock(List<Task *>::Node *what) {
void Scheduler::unblock(List<Task *>::Node *what) {
assert(what != nullptr);
assert(what->val->state != TS_RUNNING);
sanity_check_frame(&what->val->frame);
assert(what->val->_state != Task::TaskState::TS_RUNNING);
sanity_check_frame(&what->val->_frame);
{
SpinlockLockNoInt l(NextTasks_lock);
what->val->state = TS_RUNNING;
what->val->_state = Task::TaskState::TS_RUNNING;
NextTasks.emplace_front(what);
}
};
struct Task *cur_task() {
Task *Scheduler::cur_task() {
if (!RunningTask) return NULL;
return RunningTask->val;
}
List<Task *>::Node *extract_running_task_node() {
List<Task *>::Node *Scheduler::extract_running_task_node() {
if (!RunningTask) return nullptr;
return RunningTask;
}

View File

@@ -6,6 +6,7 @@
#define OS1_TASK_H
#include "List.hpp"
#include "PointersCollection.hpp"
#include "SkipList.hpp"
#include "String.hpp"
#include "idt.hpp"
@@ -13,35 +14,64 @@
#define TASK_SS 16384
class Mutex;
enum TaskMode {
TASKMODE_KERN,
TASKMODE_USER
};
enum TaskState {
TS_RUNNING,
TS_BLOCKED
};
struct AddressSpace;
class VMA;
class Spinlock;
struct Task {
uint64_t entry_ksp_val;
struct task_frame frame;
uint64_t pid;
std::atomic<uint64_t> used_time;
AddressSpace *addressSpace;
VMA *vma;
uint64_t *kstack;
char *fxsave;
char *name;
enum TaskMode mode;
uint64_t sleep_until;
enum TaskState state;
class Task {
public:
using TaskPID = uint64_t;
enum class TaskMode {
TASKMODE_KERN,
TASKMODE_USER
};
enum class TaskState {
TS_RUNNING,
TS_BLOCKED
};
Task(TaskMode mode, void (*entrypoint)(), const char *name);
Task(const Task &) = delete;
Task(Task &&) = delete;
Task &operator=(const Task &) = delete;
Task &operator=(Task &&) = delete;
void start();
[[nodiscard]] const String &name() const { return _name; }
[[nodiscard]] TaskPID pid() const { return _pid; }
[[nodiscard]] uint64_t used_time() const { return _used_time; }
[[nodiscard]] TaskState state() const { return _state; }
~Task();
//private:
struct KernStack {
uint64_t _ptr[TASK_SS] __attribute__((aligned(16)));
} __attribute__((aligned(16)));
struct FxSave {
uint64_t _fxsave[512] __attribute__((aligned(16)));
} __attribute__((aligned(16)));
uint64_t _entry_ksp_val;
TaskFrame _frame;
TaskPID _pid;
std::atomic<uint64_t> _used_time;
AddressSpace *_addressSpace;
VMA *_vma;
UniquePtr<KernStack> _kstack{new KernStack()};
UniquePtr<FxSave> _fxsave{new FxSave()};
String _name;
TaskMode _mode;
uint64_t _sleep_until;
TaskState _state;
};
struct task_pointer {
Task *taskptr;
uint64_t entry_ksp_val;
@@ -49,32 +79,29 @@ struct task_pointer {
uint64_t ret_flags;
} __attribute__((packed));
struct Task *cur_task();
List<Task *>::Node *extract_running_task_node();
namespace Scheduler {
Task *cur_task();
List<Task *>::Node *extract_running_task_node();
void init_tasks();
struct Task *new_ktask(void (*fn)(), const char *name, bool start = true);
struct Task *new_utask(void (*entrypoint)(), const char *name);
List<Task *>::Node *start_task(struct Task *task);
void remove_self();
void sleep_self(uint64_t diff);
void init_tasks();
void self_block();
void sleep_self(uint64_t diff);
class Spinlock;
void self_block(Spinlock &to_unlock);
void unblock(Task *what);
void unblock(List<Task *>::Node *what);
void self_block();
extern "C" void switch_task(struct task_frame *cur_frame);
void self_block(Spinlock &to_unlock);
void unblock(Task *what);
void unblock(List<Task *>::Node *what);
using TaskPID = uint64_t;
extern "C" void switch_task(TaskFrame *cur_frame);
// TODO: that's quite inefficient!
SkipList<uint64_t, std::pair<String, TaskPID>> getTaskTimePerPid();
// TODO: that's quite inefficient!
SkipList<uint64_t, std::pair<String, Task::TaskPID>> getTaskTimePerPid();
void yield_self();
void yield_self();
} // namespace Scheduler
extern "C" void _yield_self_kern(); // Expects the caller to save interrupt state
// Expects the caller to save interrupt state
extern "C" void _yield_self_kern();
#endif //OS1_TASK_H
#endif //OS1_TASK_H

View File

@@ -12,6 +12,7 @@ target_sources(kernel.elf PRIVATE
Tty.cpp
cv.cpp
BytesFormatter.cpp
string.c
)
add_subdirectory(templates)

View File

@@ -18,18 +18,18 @@ public:
if (!locked.compare_exchange_strong(expected, true)) {
return false;
}
owner = cur_task();
owner = Scheduler::cur_task();
return true;
}
void spinlock() {
assert2(!are_interrupts_enabled(), "Assuming all spinlocks are without interrupts");
while (!try_lock()) { yield_self(); } // FIXME: Should be pause!
while (!try_lock()) { Scheduler::yield_self(); } // FIXME: Should be pause!
}
void unlock() {
bool expected = true;
assert(owner == cur_task());
assert(owner == Scheduler::cur_task());
owner = nullptr;
assert(locked.compare_exchange_strong(expected, false));
// if (!locked.compare_exchange_strong(expected, false))

View File

@@ -27,8 +27,8 @@ public:
waiters_lock.spinlock();
l.unlock();
// TODO: recheck this is correct
waiters.emplace_front(extract_running_task_node());
self_block(waiters_lock);)
waiters.emplace_front(Scheduler::extract_running_task_node());
Scheduler::self_block(waiters_lock);)
l.lock();
}
void notify_one() {
@@ -39,7 +39,7 @@ public:
t = waiters.extract_back();
}
}
if (t) unblock(t);
if (t) Scheduler::unblock(t);
}
void notify_all() {
List<Task *> waiters_new;
@@ -49,7 +49,7 @@ public:
}
while (!waiters_new.empty()) {
auto t = waiters_new.extract_back();
unblock(t);
Scheduler::unblock(t);
}
}
};

View File

@@ -14,7 +14,7 @@ bool Mutex::try_lock() {
if (!locked.compare_exchange_strong(expected, true)) {
return false;
}
_owner = cur_task();
_owner = Scheduler::cur_task();
return true;
}
@@ -41,7 +41,7 @@ void Mutex::lock() {
}
// TODO: this isn't really a spinlock, but for now we don't have SMP
yield_self();
Scheduler::yield_self();
}
}
@@ -60,8 +60,8 @@ void Mutex::lock() {
while (!Mutex::try_lock()) {
NO_INT(
waiters_lock.spinlock();
waiters.emplace_front(extract_running_task_node());
self_block(waiters_lock););
waiters.emplace_front(Scheduler::extract_running_task_node());
Scheduler::self_block(waiters_lock););
}
}
}
@@ -78,7 +78,7 @@ void Mutex::unlock() {
t = waiters.extract_back();
}
}
if (t) unblock(t);
if (t) Scheduler::unblock(t);
}
bool Mutex::test() {

17
src/kernel/string.c Normal file
View File

@@ -0,0 +1,17 @@
//
// Created by Stepan Usatiuk on 22.03.2024.
//
#include <stddef.h>
#include <stdint.h>
// GCC Bug? even with freestanding and no-builtin gcc tries calling that
void *memset(void *s, int c, size_t n) {
uint8_t *p = (uint8_t *) s;
for (size_t i = 0; i < n; i++) {
p[i] = (uint8_t) c;
}
return s;
}

View File

@@ -42,7 +42,7 @@ File *FDT::get(FDT::FD fd) const {
}
FDT *FDT::current() {
return cur_task()->addressSpace->getFdt();
return Scheduler::cur_task()->_addressSpace->getFdt();
}
FDHandle::FDHandle(FDT::FD fd) : _fd(fd) {
}