diff --git a/run.sh b/run.sh index 961609874..42071540d 100755 --- a/run.sh +++ b/run.sh @@ -34,6 +34,16 @@ while [[ $# -gt 0 ]]; do QEMU_OPTS="$QEMU_OPTS -serial stdio" shift ;; + -mon) + if [ $TERM_USED ]; then + echo "Conflicting options!"; # (todo: there must be a way to use both...) + exit 1 + fi + TERM_USED=true + # serial + QEMU_OPTS="$QEMU_OPTS -monitor stdio" + shift + ;; -int) if [ $TERM_USED ]; then echo "Conflicting options!"; # (todo: there must be a way to use both...) diff --git a/src/arch/x86/task.cpp b/src/arch/x86/task.cpp index ef437d792..745b7e02d 100644 --- a/src/arch/x86/task.cpp +++ b/src/arch/x86/task.cpp @@ -208,22 +208,11 @@ void Scheduler::sleep_self(uint64_t diff) { uint64_t wake_time = micros + diff; while (micros <= wake_time) { { - LockGuard lm(WaitingTasks_mlock); - - // TODO this is all ugly - // TODO: also maybe it breaks if it wakes before self_block? - uint64_t len1 = 0; - for (auto cur = &*WaitingTasks.begin(); !cur->end; cur = cur->next[0]) len1++; - - assert(RunningTask != nullptr); - assert(WaitingTasks.add(wake_time, RunningTask) != nullptr); - - uint64_t len2 = 0; - for (auto cur = &*WaitingTasks.begin(); !cur->end; cur = cur->next[0]) len2++; - - assert(len2 - len1 == 1); + WaitingTasks_mlock.lock(); + assert(cur_task() != nullptr); + assert(WaitingTasks.add(wake_time, extract_running_task_node()) != nullptr); + Scheduler::self_block(WaitingTasks_mlock); } - Scheduler::self_block(); } } @@ -354,8 +343,6 @@ void Scheduler::self_block() { } void Scheduler::self_block(Spinlock &to_unlock) { - assert2(!are_interrupts_enabled(), "Self blocking with enabled interrupts!"); - { SpinlockLockNoInt l(NextTasks_lock); to_unlock.unlock(); @@ -364,6 +351,15 @@ void Scheduler::self_block(Spinlock &to_unlock) { Scheduler::yield_self(); } +void Scheduler::self_block(Mutex &to_unlock) { + { + SpinlockLockNoInt l(NextTasks_lock); + to_unlock.unlock_nolock(); + RunningTask->val->_state = Task::TaskState::TS_BLOCKED; + } + Scheduler::yield_self(); +} + void Scheduler::unblock(Task *what) { assert(false); assert(what != nullptr); @@ -372,6 +368,7 @@ void Scheduler::unblock(Task *what) { auto new_node = NextTasks.create_node(what); { SpinlockLockNoInt l(NextTasks_lock); + assert(what->_state != Task::TaskState::TS_RUNNING); what->_state = Task::TaskState::TS_RUNNING; NextTasks.emplace_front(new_node); } @@ -383,6 +380,18 @@ void Scheduler::unblock(List::Node *what) { sanity_check_frame(&what->val->_frame); { SpinlockLockNoInt l(NextTasks_lock); + assert(what->val->_state != Task::TaskState::TS_RUNNING); + what->val->_state = Task::TaskState::TS_RUNNING; + NextTasks.emplace_front(what); + } +}; +void Scheduler::unblock_nolock(List::Node *what) { + assert(what != nullptr); + assert(what->val->_state != Task::TaskState::TS_RUNNING); + sanity_check_frame(&what->val->_frame); + { + assert(NextTasks_lock.test() && NextTasks_lock.get_owner() == cur_task()); + assert(what->val->_state != Task::TaskState::TS_RUNNING); what->val->_state = Task::TaskState::TS_RUNNING; NextTasks.emplace_front(what); } diff --git a/src/arch/x86/task.hpp b/src/arch/x86/task.hpp index 5ce55a58f..cdb26dac3 100644 --- a/src/arch/x86/task.hpp +++ b/src/arch/x86/task.hpp @@ -90,10 +90,12 @@ namespace Scheduler { void sleep_self(uint64_t diff); void self_block(); - void self_block(Spinlock &to_unlock); + void self_block(Mutex &to_unlock); + void unblock(Task *what); void unblock(List::Node *what); + void unblock_nolock(List::Node *what); extern "C" void switch_task(TaskFrame *cur_frame); diff --git a/src/kernel/Spinlock.hpp b/src/kernel/Spinlock.hpp index 8cec23c89..b4c18f518 100644 --- a/src/kernel/Spinlock.hpp +++ b/src/kernel/Spinlock.hpp @@ -24,7 +24,7 @@ public: void spinlock() { assert2(!are_interrupts_enabled(), "Assuming all spinlocks are without interrupts"); - while (!try_lock()) { Scheduler::yield_self(); } // FIXME: Should be pause! + while (!try_lock()) __builtin_ia32_pause(); } void unlock() { diff --git a/src/kernel/mutex.cpp b/src/kernel/mutex.cpp index 8d1dcb49e..762effa1c 100644 --- a/src/kernel/mutex.cpp +++ b/src/kernel/mutex.cpp @@ -80,6 +80,22 @@ void Mutex::unlock() { } if (t) Scheduler::unblock(t); } +void Mutex::unlock_nolock() { + bool expected = true; + _owner = nullptr; + + if (!locked.compare_exchange_strong(expected, false)) + assert2(false, "Unlocking an unlocked mutex!\n"); + + List::Node *t = nullptr; + { + SpinlockLockNoInt l(waiters_lock); + if (!waiters.empty()) { + t = waiters.extract_back(); + } + } + if (t) Scheduler::unblock_nolock(t); +} bool Mutex::test() { return atomic_load(&locked); diff --git a/src/kernel/mutex.hpp b/src/kernel/mutex.hpp index d6f50b844..201bc4dac 100644 --- a/src/kernel/mutex.hpp +++ b/src/kernel/mutex.hpp @@ -21,8 +21,10 @@ public: void lock(); // void spin_lock(); - bool try_lock(); - void unlock(); + bool try_lock(); + void unlock(); + // Same as unlock, but assumes scheduler lock is taken + void unlock_nolock(); bool test(); Task *owner() { return _owner; }