mirror of
https://github.com/usatiuk/ficus.git
synced 2025-10-29 00:27:52 +01:00
simple vm allocation
This commit is contained in:
@@ -65,7 +65,8 @@ extern "C" void _start(void) {
|
||||
|
||||
// TODO: Accurate kernel length
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
KERN_AddressSpace->map((void *) (kernel_virt_base + i * PAGE_SIZE), (void *) (kernel_phys_base + i * PAGE_SIZE), PAGE_RW);
|
||||
// FIXME:
|
||||
KERN_AddressSpace->map((void *) (kernel_virt_base + i * PAGE_SIZE), (void *) (kernel_phys_base + i * PAGE_SIZE), PAGE_RW | PAGE_USER);
|
||||
}
|
||||
|
||||
uint64_t real_new_cr3 = (uint64_t) HHDM_V2P(KERN_AddressSpace_PML4);
|
||||
|
||||
@@ -123,8 +123,7 @@ int AddressSpace::map(void *virt, void *real, uint32_t flags) {
|
||||
|
||||
uint64_t *ptsb = (uint64_t *) HHDM_P2V((*pdee & 0x000FFFFFFFFFF000ULL));
|
||||
uint64_t *ptse = &ptsb[ptsi];
|
||||
// FIXME:
|
||||
*ptse = ((uint64_t) real & 0x000FFFFFFFFFF000ULL) | (flags & 0xFFF) | PAGE_PRESENT | PAGE_USER;
|
||||
*ptse = ((uint64_t) real & 0x000FFFFFFFFFF000ULL) | (flags & 0xFFF) | PAGE_PRESENT;
|
||||
invlpg((void *) ((uint64_t) virt & 0x000FFFFFFFFFF000ULL));
|
||||
return 1;
|
||||
}
|
||||
@@ -207,8 +206,7 @@ void map_hddm(uint64_t *pml4) {
|
||||
uint64_t *pdpeb = (uint64_t *) (*pml4e & 0x000FFFFFFFFFF000ULL);
|
||||
uint64_t *pdpee = &pdpeb[pdpei];
|
||||
assert2((!(*pdpee & PAGE_PRESENT)), "HHDM area is already mapped!");
|
||||
// FIXME:
|
||||
*pdpee = PAGE_RW | PAGE_PRESENT | PAGE_PS | PAGE_USER;
|
||||
*pdpee = PAGE_RW | PAGE_PRESENT | PAGE_PS;
|
||||
*pdpee |= (uint64_t) real & (uint64_t) 0x000FFFFFFFFFF000ULL;
|
||||
}
|
||||
_tlb_flush();
|
||||
|
||||
@@ -47,7 +47,7 @@ SkipList<uint64_t, List<Task *>::Node *> WaitingTasks;
|
||||
static std::atomic<bool> initialized = false;
|
||||
|
||||
static void free_task(struct Task *t) {
|
||||
kfree(t->stack);
|
||||
kfree(t->kstack);
|
||||
kfree(t->name);
|
||||
kfree(t->fxsave);
|
||||
kfree(t);
|
||||
@@ -86,12 +86,12 @@ static void task_freer() {
|
||||
|
||||
struct Task *new_ktask(void (*fn)(), const char *name) {
|
||||
struct Task *newt = static_cast<Task *>(kmalloc(sizeof(struct Task)));
|
||||
newt->stack = static_cast<uint64_t *>(kmalloc(TASK_SS));
|
||||
newt->kstack = static_cast<uint64_t *>(kmalloc(TASK_SS));
|
||||
newt->name = static_cast<char *>(kmalloc(strlen(name) + 1));
|
||||
newt->fxsave = static_cast<char *>(kmalloc(512));
|
||||
strcpy(name, newt->name);
|
||||
|
||||
newt->frame.sp = ((((uintptr_t) newt->stack) + (TASK_SS - 9) - 1) & (~0xFULL)) + 8;// Ensure 16byte alignment
|
||||
newt->frame.sp = ((((uintptr_t) newt->kstack) + (TASK_SS - 9) - 1) & (~0xFULL)) + 8;// Ensure 16byte alignment
|
||||
// It should be aligned before call, therefore on function entry it should be misaligned by 8 bytes
|
||||
assert((newt->frame.sp & 0xFULL) == 8);
|
||||
|
||||
@@ -126,15 +126,11 @@ struct Task *new_ktask(void (*fn)(), const char *name) {
|
||||
}
|
||||
struct Task *new_utask(void (*fn)(), const char *name) {
|
||||
struct Task *newt = static_cast<Task *>(kmalloc(sizeof(struct Task)));
|
||||
newt->stack = static_cast<uint64_t *>(kmalloc(TASK_SS));
|
||||
newt->kstack = static_cast<uint64_t *>(kmalloc(TASK_SS));
|
||||
newt->name = static_cast<char *>(kmalloc(strlen(name) + 1));
|
||||
newt->fxsave = static_cast<char *>(kmalloc(512));
|
||||
strcpy(name, newt->name);
|
||||
|
||||
newt->frame.sp = ((((uintptr_t) newt->stack) + (TASK_SS - 9) - 1) & (~0xFULL)) + 8;// Ensure 16byte alignment
|
||||
// It should be aligned before call, therefore on function entry it should be misaligned by 8 bytes
|
||||
assert((newt->frame.sp & 0xFULL) == 8);
|
||||
|
||||
newt->frame.ip = (uint64_t) fn;
|
||||
newt->frame.cs = GDTSEL(gdt_code_user) | 0x3;
|
||||
newt->frame.ss = GDTSEL(gdt_data_user) | 0x3;
|
||||
@@ -150,6 +146,12 @@ struct Task *new_utask(void (*fn)(), const char *name) {
|
||||
newt->pid = max_pid.fetch_add(1);
|
||||
newt->used_time = 0;
|
||||
|
||||
void *ustack = newt->vma->mmap_mem_any(TASK_SS, 0, 0);
|
||||
|
||||
newt->frame.sp = ((((uintptr_t) ustack) + (TASK_SS - 17) - 1) & (~0xFULL)) + 8;// Ensure 16byte alignment
|
||||
// It should be aligned before call, therefore on function entry it should be misaligned by 8 bytes
|
||||
assert((newt->frame.sp & 0xFULL) == 8);
|
||||
|
||||
newt->vma->map_kern();
|
||||
|
||||
sanity_check_frame(&newt->frame);
|
||||
|
||||
@@ -33,7 +33,7 @@ struct Task {
|
||||
std::atomic<uint64_t> used_time;
|
||||
AddressSpace *addressSpace;
|
||||
VMA *vma;
|
||||
uint64_t *stack;
|
||||
uint64_t *kstack;
|
||||
char *fxsave;
|
||||
char *name;
|
||||
enum TaskMode mode;
|
||||
|
||||
@@ -4,24 +4,32 @@
|
||||
|
||||
#include "VMA.hpp"
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include "LockGuard.hpp"
|
||||
#include "asserts.hpp"
|
||||
#include "kmem.hpp"
|
||||
#include "memman.hpp"
|
||||
#include "paging.hpp"
|
||||
|
||||
VMA::VMA(AddressSpace *space) : space(space) {
|
||||
LockGuard l(regions_lock);
|
||||
regions.add(0x10000, {0x10000, 0xFFF8000000000000ULL - 0x20000, true});
|
||||
}
|
||||
|
||||
void VMA::mark_taken(void *addr, size_t length) {
|
||||
}
|
||||
|
||||
void VMA::map_kern() {
|
||||
for (uintptr_t i = (uint64_t) (0xFFF8000000000000ULL >> 39) & 0x01FF; i < 512; i++) {
|
||||
LockGuard l(space_lock);
|
||||
for (uintptr_t i = (uint64_t) (0xFFFF800000000000ULL >> 39) & 0x01FF; i < 512; i++) {
|
||||
assert(i >= 256);
|
||||
space->get_cr3()[i] = KERN_AddressSpace->get_cr3()[i];
|
||||
}
|
||||
}
|
||||
|
||||
void *VMA::mmap_phys(void *v_addr, void *real_addr, size_t length, int flags) {
|
||||
LockGuard l(space_lock);
|
||||
assert((((uintptr_t) v_addr) & PAGE_SIZE) == 0);
|
||||
|
||||
for (size_t i = 0; i < length; i += PAGE_SIZE) {
|
||||
@@ -29,8 +37,38 @@ void *VMA::mmap_phys(void *v_addr, void *real_addr, size_t length, int flags) {
|
||||
}
|
||||
return v_addr;
|
||||
}
|
||||
|
||||
void *VMA::mmap_mem_any(size_t length, int prot, int flags) {
|
||||
return nullptr;
|
||||
if ((length & PAGE_SIZE) != 0) {
|
||||
length += PAGE_SIZE - 1;
|
||||
length &= ~PAGE_SIZE;
|
||||
}
|
||||
assert((length & PAGE_SIZE) == 0);
|
||||
uint64_t page_len = length / PAGE_SIZE;
|
||||
|
||||
std::optional<ListEntry> found;
|
||||
{
|
||||
LockGuard l(regions_lock);
|
||||
|
||||
for (auto &n: regions) {
|
||||
if (n.data.available && n.data.length >= length) {
|
||||
found = n.data;
|
||||
}
|
||||
}
|
||||
if (!found) return nullptr;
|
||||
regions.erase(found->begin);
|
||||
regions.add(found->begin + length, {found->begin + length, found->length - length, true});
|
||||
regions.add(found->begin, {found->begin, length, false});
|
||||
}
|
||||
|
||||
for (int i = 0; i < page_len; i++) {
|
||||
void *p = get4k();
|
||||
{
|
||||
LockGuard l(space_lock);
|
||||
space->map(reinterpret_cast<void *>(found->begin + i * PAGE_SIZE), (void *) HHDM_V2P(p), PAGE_RW | PAGE_USER);
|
||||
}
|
||||
}
|
||||
return reinterpret_cast<void *>(found->begin);
|
||||
}
|
||||
int VMA::munmap(void *addr, size_t length) {
|
||||
return 0;
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#ifndef OS2_VMA_HPP
|
||||
#define OS2_VMA_HPP
|
||||
|
||||
#include "SkipList.hpp"
|
||||
#include "Spinlock.hpp"
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
@@ -25,6 +27,16 @@ public:
|
||||
|
||||
private:
|
||||
AddressSpace *space = nullptr;
|
||||
Spinlock space_lock;
|
||||
|
||||
struct ListEntry {
|
||||
uintptr_t begin;
|
||||
uint64_t length;
|
||||
bool available;
|
||||
};
|
||||
|
||||
SkipList<uintptr_t, ListEntry> regions;
|
||||
Spinlock regions_lock;
|
||||
};
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user