mirror of
https://github.com/usatiuk/ficus.git
synced 2025-10-28 16:17:51 +01:00
Fix self sleep
This commit is contained in:
10
run.sh
10
run.sh
@@ -34,6 +34,16 @@ while [[ $# -gt 0 ]]; do
|
||||
QEMU_OPTS="$QEMU_OPTS -serial stdio"
|
||||
shift
|
||||
;;
|
||||
-mon)
|
||||
if [ $TERM_USED ]; then
|
||||
echo "Conflicting options!"; # (todo: there must be a way to use both...)
|
||||
exit 1
|
||||
fi
|
||||
TERM_USED=true
|
||||
# serial
|
||||
QEMU_OPTS="$QEMU_OPTS -monitor stdio"
|
||||
shift
|
||||
;;
|
||||
-int)
|
||||
if [ $TERM_USED ]; then
|
||||
echo "Conflicting options!"; # (todo: there must be a way to use both...)
|
||||
|
||||
@@ -208,22 +208,11 @@ void Scheduler::sleep_self(uint64_t diff) {
|
||||
uint64_t wake_time = micros + diff;
|
||||
while (micros <= wake_time) {
|
||||
{
|
||||
LockGuard lm(WaitingTasks_mlock);
|
||||
|
||||
// TODO this is all ugly
|
||||
// TODO: also maybe it breaks if it wakes before self_block?
|
||||
uint64_t len1 = 0;
|
||||
for (auto cur = &*WaitingTasks.begin(); !cur->end; cur = cur->next[0]) len1++;
|
||||
|
||||
assert(RunningTask != nullptr);
|
||||
assert(WaitingTasks.add(wake_time, RunningTask) != nullptr);
|
||||
|
||||
uint64_t len2 = 0;
|
||||
for (auto cur = &*WaitingTasks.begin(); !cur->end; cur = cur->next[0]) len2++;
|
||||
|
||||
assert(len2 - len1 == 1);
|
||||
WaitingTasks_mlock.lock();
|
||||
assert(cur_task() != nullptr);
|
||||
assert(WaitingTasks.add(wake_time, extract_running_task_node()) != nullptr);
|
||||
Scheduler::self_block(WaitingTasks_mlock);
|
||||
}
|
||||
Scheduler::self_block();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,8 +343,6 @@ void Scheduler::self_block() {
|
||||
}
|
||||
|
||||
void Scheduler::self_block(Spinlock &to_unlock) {
|
||||
assert2(!are_interrupts_enabled(), "Self blocking with enabled interrupts!");
|
||||
|
||||
{
|
||||
SpinlockLockNoInt l(NextTasks_lock);
|
||||
to_unlock.unlock();
|
||||
@@ -364,6 +351,15 @@ void Scheduler::self_block(Spinlock &to_unlock) {
|
||||
Scheduler::yield_self();
|
||||
}
|
||||
|
||||
void Scheduler::self_block(Mutex &to_unlock) {
|
||||
{
|
||||
SpinlockLockNoInt l(NextTasks_lock);
|
||||
to_unlock.unlock_nolock();
|
||||
RunningTask->val->_state = Task::TaskState::TS_BLOCKED;
|
||||
}
|
||||
Scheduler::yield_self();
|
||||
}
|
||||
|
||||
void Scheduler::unblock(Task *what) {
|
||||
assert(false);
|
||||
assert(what != nullptr);
|
||||
@@ -372,6 +368,7 @@ void Scheduler::unblock(Task *what) {
|
||||
auto new_node = NextTasks.create_node(what);
|
||||
{
|
||||
SpinlockLockNoInt l(NextTasks_lock);
|
||||
assert(what->_state != Task::TaskState::TS_RUNNING);
|
||||
what->_state = Task::TaskState::TS_RUNNING;
|
||||
NextTasks.emplace_front(new_node);
|
||||
}
|
||||
@@ -383,6 +380,18 @@ void Scheduler::unblock(List<Task *>::Node *what) {
|
||||
sanity_check_frame(&what->val->_frame);
|
||||
{
|
||||
SpinlockLockNoInt l(NextTasks_lock);
|
||||
assert(what->val->_state != Task::TaskState::TS_RUNNING);
|
||||
what->val->_state = Task::TaskState::TS_RUNNING;
|
||||
NextTasks.emplace_front(what);
|
||||
}
|
||||
};
|
||||
void Scheduler::unblock_nolock(List<Task *>::Node *what) {
|
||||
assert(what != nullptr);
|
||||
assert(what->val->_state != Task::TaskState::TS_RUNNING);
|
||||
sanity_check_frame(&what->val->_frame);
|
||||
{
|
||||
assert(NextTasks_lock.test() && NextTasks_lock.get_owner() == cur_task());
|
||||
assert(what->val->_state != Task::TaskState::TS_RUNNING);
|
||||
what->val->_state = Task::TaskState::TS_RUNNING;
|
||||
NextTasks.emplace_front(what);
|
||||
}
|
||||
|
||||
@@ -90,10 +90,12 @@ namespace Scheduler {
|
||||
void sleep_self(uint64_t diff);
|
||||
|
||||
void self_block();
|
||||
|
||||
void self_block(Spinlock &to_unlock);
|
||||
void self_block(Mutex &to_unlock);
|
||||
|
||||
void unblock(Task *what);
|
||||
void unblock(List<Task *>::Node *what);
|
||||
void unblock_nolock(List<Task *>::Node *what);
|
||||
|
||||
extern "C" void switch_task(TaskFrame *cur_frame);
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ public:
|
||||
|
||||
void spinlock() {
|
||||
assert2(!are_interrupts_enabled(), "Assuming all spinlocks are without interrupts");
|
||||
while (!try_lock()) { Scheduler::yield_self(); } // FIXME: Should be pause!
|
||||
while (!try_lock()) __builtin_ia32_pause();
|
||||
}
|
||||
|
||||
void unlock() {
|
||||
|
||||
@@ -80,6 +80,22 @@ void Mutex::unlock() {
|
||||
}
|
||||
if (t) Scheduler::unblock(t);
|
||||
}
|
||||
void Mutex::unlock_nolock() {
|
||||
bool expected = true;
|
||||
_owner = nullptr;
|
||||
|
||||
if (!locked.compare_exchange_strong(expected, false))
|
||||
assert2(false, "Unlocking an unlocked mutex!\n");
|
||||
|
||||
List<Task *>::Node *t = nullptr;
|
||||
{
|
||||
SpinlockLockNoInt l(waiters_lock);
|
||||
if (!waiters.empty()) {
|
||||
t = waiters.extract_back();
|
||||
}
|
||||
}
|
||||
if (t) Scheduler::unblock_nolock(t);
|
||||
}
|
||||
|
||||
bool Mutex::test() {
|
||||
return atomic_load(&locked);
|
||||
|
||||
@@ -21,8 +21,10 @@ public:
|
||||
|
||||
void lock();
|
||||
// void spin_lock();
|
||||
bool try_lock();
|
||||
void unlock();
|
||||
bool try_lock();
|
||||
void unlock();
|
||||
// Same as unlock, but assumes scheduler lock is taken
|
||||
void unlock_nolock();
|
||||
bool test();
|
||||
Task *owner() { return _owner; }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user