This commit is contained in:
2023-09-09 12:08:04 +02:00
parent f99fc6681a
commit 22be3b098a
56 changed files with 3814 additions and 0 deletions

79
.gitignore vendored
View File

@@ -1 +1,80 @@
/toolchain
/cmake-build-debug
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser

8
.idea/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

9
.idea/misc.xml generated Normal file
View File

@@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CMakeWorkspace" PROJECT_DIR="$PROJECT_DIR$" />
<component name="CidrRootsConfiguration">
<excludeRoots>
<file path="$PROJECT_DIR$/toolchain" />
</excludeRoots>
</component>
</project>

8
.idea/modules.xml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/os2.iml" filepath="$PROJECT_DIR$/.idea/os2.iml" />
</modules>
</component>
</project>

2
.idea/os2.iml generated Normal file
View File

@@ -0,0 +1,2 @@
<?xml version="1.0" encoding="UTF-8"?>
<module classpath="CMake" type="CPP_MODULE" version="4" />

6
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

View File

@@ -0,0 +1,26 @@
set(CMAKE_SYSTEM_NAME Generic)
set(CMAKE_SYSTEM_PROCESSOR x86_64)
set(triple x86_64-elf)
set(tools ${OS2_ROOT}/toolchain)
set(CMAKE_C_COMPILER ${tools}/gcc-x86_64-elf-prefix/bin/x86_64-elf-gcc)
set(CMAKE_CXX_COMPILER ${tools}/gcc-x86_64-elf-prefix/bin/x86_64-elf-g++)
set(CMAKE_ASM_NASM_OBJECT_FORMAT elf64)
set(CMAKE_ASM_NASM_SOURCE_FILE_EXTENSIONS asm)
set(cxxflags -ffreestanding -nostdlib -mno-red-zone -mcmodel=large -fno-exceptions -fno-rtti)
set(cflags -ffreestanding -nostdlib -mno-red-zone -mcmodel=large)
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:${cxxflags}>")
add_compile_options("$<$<COMPILE_LANGUAGE:C>:${cflags}>")
add_link_options(-ffreestanding -nostdlib -mno-red-zone -mcmodel=large -fno-exceptions -fno-rtti)
include_directories(${tools}/limine/prefix/include)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE NEVER)

7
CMakeLists.txt Normal file
View File

@@ -0,0 +1,7 @@
cmake_minimum_required(VERSION 3.26)
project(os2 C CXX ASM_NASM)
set(CMAKE_CXX_STANDARD 17)
add_subdirectory(./src/)

5
src/CMakeLists.txt Normal file
View File

@@ -0,0 +1,5 @@
add_executable(kernel)
add_subdirectory(./arch/)
add_subdirectory(./iso/)

7
src/arch/CMakeLists.txt Normal file
View File

@@ -0,0 +1,7 @@
if (CMAKE_CROSSCOMPILING)
if (CMAKE_SYSTEM_PROCESSOR MATCHES x86_64)
add_subdirectory(./x86)
else ()
error("Unsupported architecture!")
endif ()
endif ()

View File

@@ -0,0 +1,32 @@
target_sources(kernel PRIVATE
limine_mm.c
mutex.c
task.asm
tty.c
kmem.c
kmain.asm
paging.asm
gdt.asm
misc.asm
limine_fb.c
idt.c
cv.c
serial.c
idt.asm
globals.c
memman.c
timer.c
boot.c
io.c
task.c
paging.c
kmain.c
gdt.c
misc.c)
target_include_directories(kernel PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_link_options(kernel PRIVATE "SHELL:-T${CMAKE_CURRENT_SOURCE_DIR}/linker.ld")
set_target_properties(kernel PROPERTIES LINK_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/linker.ld")

72
src/arch/x86/boot.c Normal file
View File

@@ -0,0 +1,72 @@
#include <stddef.h>
#include <stdint.h>
#include "gdt.h"
#include "globals.h"
#include "idt.h"
#include "kmem.h"
#include "limine.h"
#include "limine_fb.h"
#include "limine_mm.h"
#include "memman.h"
#include "misc.h"
#include "paging.h"
#include "serial.h"
struct AddressSpace BOOT_AddressSpace;
extern void kmain();
// Do final preparations in the new address space then call kmain
__attribute__((noreturn))
__attribute__((used))
void real_start() {
parse_limine_memmap(limine_mm_entries, limine_mm_count, LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE);
limine_fb_remap(KERN_AddressSpace);
init_kern_heap();
kmain();
}
// Set up the address space for the kernel and prepare other structures to work without the bootloader,
// then call real_start with this address space and the new stack.
void _start(void) {
_sse_setup();
barrier();
gdt_setup();
barrier();
idt_init();
barrier();
init_serial();
barrier();
limine_kern_save_response();
barrier();
map_hddm(get_cr3());
barrier();
BOOT_AddressSpace.PML4 = (uint64_t *) HHDM_P2V(get_cr3());
limine_fb_save_response(&BOOT_AddressSpace);
limine_mm_save_response();
parse_limine_memmap(limine_mm_entries, limine_mm_count, LIMINE_MEMMAP_USABLE);
KERN_AddressSpace = get4k();
assert2(!init_addr_space(KERN_AddressSpace), "Couldn't init kernel address space!");
for (int i = 0; i < 512; i++)
((struct AddressSpace *) (KERN_AddressSpace))->PML4[i] = 0x02;
map_hddm((uint64_t *) HHDM_V2P(((struct AddressSpace *) (KERN_AddressSpace))->PML4));
// TODO: Accurate kernel length
for (int i = 0; i < 100000; i++) {
map((void *) (kernel_virt_base + i * 4096), (void *) (kernel_phys_base + i * 4096), PAGE_RW, KERN_AddressSpace);
}
uint64_t real_new_cr3 = (uint64_t) HHDM_V2P(((struct AddressSpace *) (KERN_AddressSpace))->PML4);
uint64_t *new_stack_top = &KERN_stack[KERN_STACK_SIZE - 1];// Don't forget in which direction the stack grows...
barrier();
__asm__ volatile("movq %[new_stack_top], %%rsp; movq %[real_new_cr3], %%cr3; call real_start"
:
: [real_new_cr3] "r"(real_new_cr3), [new_stack_top] "r"(new_stack_top));
}

23
src/arch/x86/cv.c Normal file
View File

@@ -0,0 +1,23 @@
//
// Created by Stepan Usatiuk on 20.08.2023.
//
#include "cv.h"
#include "mutex.h"
#include "serial.h"
#include "task.h"
void cv_wait(struct Mutex *m, struct CV *cv) {
m_unlock(m);
wait_cv_on_self(cv);
m_lock(m);
}
void cv_notify_one(struct CV *cv) {
cv_unlock_sched_hook(cv, CV_NOTIFY_ONE);
}
void cv_notify_all(struct CV *cv) {
cv_unlock_sched_hook(cv, CV_NOTIFY_ALL);
}

36
src/arch/x86/cv.h Normal file
View File

@@ -0,0 +1,36 @@
//
// Created by Stepan Usatiuk on 20.08.2023.
//
#ifndef OS1_CV_H
#define OS1_CV_H
#include <stdatomic.h>
#include <stddef.h>
#if !(ATOMIC_INT_LOCK_FREE == 2)
#error Atomic int isnt lock free!
#endif
struct Mutex;
enum CV_NOTIFY {
CV_NOTIFY_NONE = 0,
CV_NOTIFY_ONE = 1,
CV_NOTIFY_ALL = 2,
};
struct CV {
atomic_int_fast8_t notified;
struct TaskList *waiters;
};
static const struct CV DefaultCV = {
.notified = ATOMIC_VAR_INIT(CV_NOTIFY_NONE),
.waiters = NULL};
void cv_wait(struct Mutex *m, struct CV *cv);
void cv_notify_one(struct CV *cv);
void cv_notify_all(struct CV *cv);
#endif//OS1_CV_H

112
src/arch/x86/gdt.asm Normal file
View File

@@ -0,0 +1,112 @@
[BITS 64]
; Access bits
PRESENT equ 1 << 7
NOT_SYS equ 1 << 4
EXEC equ 1 << 3
DC equ 1 << 2
RW equ 1 << 1
ACCESSED equ 1 << 0
USER equ 1 << 6 | 1 << 5
; Flags bits
GRAN_4K equ 1 << 7
SZ_32 equ 1 << 6
LONG_MODE equ 1 << 5
section .gdt
global gdt_null:data
gdt_null:
dq 0
global gdt_code_16:data
gdt_code_16:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | NOT_SYS | EXEC | RW ; Access
db GRAN_4K | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_data_16:data
gdt_data_16:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | NOT_SYS | RW ; Access
db GRAN_4K | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_code_32:data
gdt_code_32:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | NOT_SYS | EXEC | RW ; Access
db GRAN_4K | SZ_32 | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_data_32:data
gdt_data_32:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | NOT_SYS | RW ; Access
db GRAN_4K | SZ_32 | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_code:data
gdt_code:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | NOT_SYS | EXEC | RW ; Access
db GRAN_4K | LONG_MODE | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_data:data
gdt_data:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | NOT_SYS | RW ; Access
db GRAN_4K | SZ_32 | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_code_user:data
gdt_code_user:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | USER | NOT_SYS | EXEC | RW ; Access
db GRAN_4K | LONG_MODE | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_data_user:data
gdt_data_user:
dd 0xFFFF ; Limit & Base (low, bits 0-15)
db 0 ; Base (mid, bits 16-23)
db PRESENT | USER | NOT_SYS | RW ; Access
db GRAN_4K | SZ_32 | 0xF ; Flags & Limit (high, bits 16-19)
db 0 ; Base (high, bits 24-31)
global gdt_tss:data
gdt_tss:
dq 0x00000000 ;TODO
dq 0x00000000
global gdt_tss_user:data
gdt_tss_user:
dq 0x00000000 ;TODO
dq 0x00000000
global gdt_end:data
gdt_end:
global gdtr:data
gdtr:
dw gdt_end - gdt_null - 1
dq gdt_null
section .text
global _gdt_setup:function (_gdt_setup.end - _gdt_setup)
_gdt_setup:
LGDT [gdtr]
; Reload CS register:
PUSH (gdt_code - gdt_null); Push code segment to stack, 0x08 is a stand-in for your code segment
LEA RAX, [rel .flush] ; Load address of .reload_CS into RAX
PUSH RAX ; Push this value to the stack
RETFQ ; Perform a far return, RETFQ or LRETQ depending on syntax
.flush:
; Reload data segment registers
MOV AX, (gdt_data - gdt_null) ; 0x10 is a stand-in for your data segment
MOV DS, AX
MOV ES, AX
MOV FS, AX
MOV GS, AX
MOV SS, AX
MOV AX, (gdt_tss - gdt_null)
ltr AX
RET
.end:

36
src/arch/x86/gdt.c Normal file
View File

@@ -0,0 +1,36 @@
//
// Created by Stepan Usatiuk on 13.08.2023.
//
#include "gdt.h"
#include "misc.h"
static struct tss_entry_struct tss_entry;
static struct tss_entry_struct tss_entry_user;
#define INT_STACK_SIZE 16384
#define RSP_STACK_SIZE 16384
static uint64_t int_stack[INT_STACK_SIZE];
static uint64_t rsp_stack[RSP_STACK_SIZE];
void gdt_setup() {
uint32_t tss_limit = sizeof(tss_entry);
uint64_t tss_base = (uint64_t) &tss_entry;
gdt_tss.limit_low = tss_limit & 0xFFFF;
gdt_tss.base_low = tss_base & 0xFFFFFF;
gdt_tss.type = 0b1001;// Available 64 bit TSS
gdt_tss.zero = 0;
gdt_tss.DPL = 0;
gdt_tss.present = 1;
gdt_tss.limit_high = (tss_limit >> 16) & 0xF;
gdt_tss.available = 0;
gdt_tss.unused = 0;
gdt_tss.gran = 0;
gdt_tss.base_high = (tss_base >> 24) & 0xFFFFFFFFFF;
tss_entry.ist1 = (uint64_t) &int_stack[INT_STACK_SIZE - 1];
tss_entry.rsp0 = (uint64_t) &rsp_stack[RSP_STACK_SIZE - 1];
barrier();// The asm function might clobber registers
_gdt_setup();
}

80
src/arch/x86/gdt.h Normal file
View File

@@ -0,0 +1,80 @@
#ifndef OS1_GDT_H
#define OS1_GDT_H
#include "stdint.h"
struct gdt_entry_bits {
unsigned int limit_low : 16;
unsigned int base_low : 24;
unsigned int accessed : 1;
unsigned int read_write : 1; // readable for code, writable for data
unsigned int conforming_expand_down : 1;// conforming for code, expand down for data
unsigned int code : 1; // 1 for code, 0 for data
unsigned int code_data_segment : 1; // should be 1 for everything but TSS and LDT
unsigned int DPL : 2; // privilege level
unsigned int present : 1;
unsigned int limit_high : 4;
unsigned int available : 1;// only used in software; has no effect on hardware
unsigned int long_mode : 1;
unsigned int big : 1; // 32-bit opcodes for code, uint32_t stack for data
unsigned int gran : 1;// 1 to use 4k page addressing, 0 for byte addressing
unsigned int base_high : 8;
} __attribute__((packed));
struct gdt_tss_entry_bits {
unsigned int limit_low : 16;
unsigned int base_low : 24;
unsigned int type : 4;
unsigned int zero : 1;
unsigned int DPL : 2;
unsigned int present : 1;
unsigned int limit_high : 4;
unsigned int available : 1;
unsigned int unused : 2;
unsigned int gran : 1;
uint64_t base_high : 40;
unsigned int zeros : 32;
} __attribute__((packed));
struct tss_entry_struct {
uint32_t reserved;
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint64_t reserved2;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint64_t reserved3;
uint32_t reserved4;
} __attribute__((packed));
void _gdt_setup();
void gdt_setup();
extern volatile struct gdt_entry_bits gdt_null;
extern volatile struct gdt_entry_bits gdt_code_16;
extern volatile struct gdt_entry_bits gdt_data_16;
extern volatile struct gdt_entry_bits gdt_code_32;
extern volatile struct gdt_entry_bits gdt_data_32;
extern volatile struct gdt_entry_bits gdt_code;
extern volatile struct gdt_entry_bits gdt_data;
extern volatile struct gdt_entry_bits gdt_code_user;
extern volatile struct gdt_entry_bits gdt_data_user;
extern volatile struct gdt_tss_entry_bits gdt_tss;
extern volatile struct gdt_tss_entry_bits gdt_tss_user;
extern volatile struct gdt_entry_bits gdt_end;/// It is not a pointer!
extern struct {
uint16_t limit;
uint64_t base;
} gdtr;
#define GDTSEL(x) (((uint64_t) &x) - ((uint64_t) &gdt_null))
#endif

7
src/arch/x86/globals.c Normal file
View File

@@ -0,0 +1,7 @@
//
// Created by Stepan Usatiuk on 13.08.2023.
//
#include "globals.h"
uint64_t KERN_stack[KERN_STACK_SIZE] __attribute__((aligned(16)));

13
src/arch/x86/globals.h Normal file
View File

@@ -0,0 +1,13 @@
//
// Created by Stepan Usatiuk on 13.08.2023.
//
#ifndef OS1_GLOBALS_H
#define OS1_GLOBALS_H
#include <stdint.h>
#define KERN_STACK_SIZE (1024 * 1024)
extern uint64_t KERN_stack[KERN_STACK_SIZE] __attribute__((aligned(16)));
#endif//OS1_GLOBALS_H

117
src/arch/x86/idt.asm Normal file
View File

@@ -0,0 +1,117 @@
[BITS 64]
%include "task.inc.asm"
section .text
%macro isr_err_stub 1
isr_stub_%+%1:
pop rdi ; Keep the stacktrace
call exception_handler
iretq
%endmacro
%macro isr_no_err_stub 1
isr_stub_%+%1:
call exception_handler
iretq
%endmacro
extern exception_handler
isr_no_err_stub 0
isr_no_err_stub 1
isr_no_err_stub 2
isr_no_err_stub 3
isr_no_err_stub 4
isr_no_err_stub 5
isr_no_err_stub 6
isr_no_err_stub 7
isr_err_stub 8
isr_no_err_stub 9
isr_err_stub 10
isr_err_stub 11
isr_err_stub 12
isr_err_stub 13
isr_err_stub 14
isr_no_err_stub 15
isr_no_err_stub 16
isr_err_stub 17
isr_no_err_stub 18
isr_no_err_stub 19
isr_no_err_stub 20
isr_no_err_stub 21
isr_no_err_stub 22
isr_no_err_stub 23
isr_no_err_stub 24
isr_no_err_stub 25
isr_no_err_stub 26
isr_no_err_stub 27
isr_no_err_stub 28
isr_no_err_stub 29
isr_err_stub 30
isr_no_err_stub 31
section .text
%macro pic1_irq 1
extern pic1_irq_real_%+%1
global pic1_irq_%+%1
pic1_irq_%+%1:
pushaq
call pic1_irq_real_%+%1
popaq
iretq
%endmacro
%macro pic2_irq 1
extern pic2_irq_real_%+%1
global pic2_irq_%+%1
pic2_irq_%+%1:
pushaq
call pic2_irq_real_%+%1
popaq
iretq
%endmacro
extern pic1_irq_real_0
global pic1_irq_0
pic1_irq_0:
pushaq
mov rdi, 0xdeadbe3fdeadb3ef ; IDT_GUARD
push rdi ; IDT_GUARD
; pass the "pointer" to the stack as pointer to the interrupt_frame argument,
; the stack and the struct must match!
mov rdi, rsp
call pic1_irq_real_0
add rsp, 8 ; remove IDT_GUARD
popaq
iretq
pic1_irq 1
pic1_irq 2
pic1_irq 3
pic1_irq 4
pic1_irq 5
pic1_irq 6
pic1_irq 7
pic2_irq 0
pic2_irq 1
pic2_irq 2
pic2_irq 3
pic2_irq 4
pic2_irq 5
pic2_irq 6
pic2_irq 7
section .data
global isr_stub_table
isr_stub_table:
%assign i 0
%rep 32
dq isr_stub_%+i ; use DQ instead if targeting 64-bit
%assign i i+1
%endrep

230
src/arch/x86/idt.c Normal file
View File

@@ -0,0 +1,230 @@
#include "idt.h"
#include "gdt.h"
#include "io.h"
#include "misc.h"
#include "serial.h"
#include "task.h"
#include "timer.h"
__attribute__((aligned(0x10))) static idt_entry_t idt[256];// Create an array of IDT entries; aligned for performance
static idtr_t idtr;
__attribute__((noreturn)) void exception_handler(void) {
_hcf();
}
extern void pic1_irq_0();
extern void pic1_irq_1();
extern void pic1_irq_2();
extern void pic1_irq_3();
extern void pic1_irq_4();
extern void pic1_irq_5();
extern void pic1_irq_6();
extern void pic1_irq_7();
extern void pic2_irq_0();
extern void pic2_irq_1();
extern void pic2_irq_2();
extern void pic2_irq_3();
extern void pic2_irq_4();
extern void pic2_irq_5();
extern void pic2_irq_6();
extern void pic2_irq_7();
void idt_set_descriptor(uint8_t vector, void *isr, uint8_t flags) {
idt_entry_t *descriptor = &idt[vector];
descriptor->isr_low = (uint64_t) isr & 0xFFFF;
descriptor->kernel_cs = GDTSEL(gdt_code);
descriptor->ist = 0;
descriptor->attributes = flags;
descriptor->isr_mid = ((uint64_t) isr >> 16) & 0xFFFF;
descriptor->isr_high = ((uint64_t) isr >> 32) & 0xFFFFFFFF;
descriptor->reserved = 0;
}
void idt_init() {
idtr.base = (uintptr_t) &idt[0];
idtr.limit = (uint16_t) ((uint64_t) &idt[255] - (uint64_t) &idt[0]);
for (uint8_t vector = 0; vector < 32; vector++) {
idt_set_descriptor(vector, isr_stub_table[vector], 0x8E);
}
idt_set_descriptor(PIC1_OFFSET + 0, pic1_irq_0, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 1, pic1_irq_1, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 2, pic1_irq_2, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 3, pic1_irq_3, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 4, pic1_irq_4, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 5, pic1_irq_5, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 6, pic1_irq_6, 0x8e);
idt_set_descriptor(PIC1_OFFSET + 7, pic1_irq_7, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 0, pic2_irq_0, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 1, pic2_irq_1, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 2, pic2_irq_2, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 3, pic2_irq_3, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 4, pic2_irq_4, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 5, pic2_irq_5, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 6, pic2_irq_6, 0x8e);
idt_set_descriptor(PIC2_OFFSET + 7, pic2_irq_7, 0x8e);
barrier();
__asm__ volatile("lidt %0"
:
: "m"(idtr));// load the new IDT
__asm__ volatile("sti"); // set the interrupt flag
barrier();
PIC_init();
}
void PIC_sendEOI(unsigned char irq) {
if (irq >= 8)
outb(PIC2_COMMAND, PIC_EOI);
outb(PIC1_COMMAND, PIC_EOI);
}
void PIC_init() {
unsigned char a1, a2;
a1 = inb(PIC1_DATA);// save masks
a2 = inb(PIC2_DATA);
outb(PIC1_COMMAND, ICW1_INIT | ICW1_ICW4);// starts the initialization sequence (in cascade mode)
io_wait();
outb(PIC2_COMMAND, ICW1_INIT | ICW1_ICW4);
io_wait();
outb(PIC1_DATA, PIC1_OFFSET);// ICW2: Master PIC vector offset
io_wait();
outb(PIC2_DATA, PIC2_OFFSET);// ICW2: Slave PIC vector offset
io_wait();
outb(PIC1_DATA, 4);// ICW3: tell Master PIC that there is a slave PIC at IRQ2 (0000 0100)
io_wait();
outb(PIC2_DATA, 2);// ICW3: tell Slave PIC its cascade identity (0000 0010)
io_wait();
outb(PIC1_DATA, ICW4_8086);// ICW4: have the PICs use 8086 mode (and not 8080 mode)
io_wait();
outb(PIC2_DATA, ICW4_8086);
io_wait();
outb(PIC1_DATA, a1);// restore saved masks.
outb(PIC2_DATA, a2);
}
void IRQ_set_mask(unsigned char IRQline) {
uint16_t port;
uint8_t value;
if (IRQline < 8) {
port = PIC1_DATA;
} else {
port = PIC2_DATA;
IRQline -= 8;
}
value = inb(port) | (1 << IRQline);
outb(port, value);
}
void IRQ_clear_mask(unsigned char IRQline) {
uint16_t port;
uint8_t value;
if (IRQline < 8) {
port = PIC1_DATA;
} else {
port = PIC2_DATA;
IRQline -= 8;
}
value = inb(port) & ~(1 << IRQline);
outb(port, value);
}
/* Helper func */
static uint16_t __pic_get_irq_reg(int ocw3) {
/* OCW3 to PIC CMD to get the register values. PIC2 is chained, and
* represents IRQs 8-15. PIC1 is IRQs 0-7, with 2 being the chain */
outb(PIC1_COMMAND, ocw3);
outb(PIC2_COMMAND, ocw3);
return (inb(PIC2_COMMAND) << 8) | inb(PIC1_COMMAND);
}
/* Returns the combined value of the cascaded PICs irq request register */
uint16_t pic_get_irr(void) {
return __pic_get_irq_reg(PIC_READ_IRR);
}
/* Returns the combined value of the cascaded PICs in-service register */
uint16_t pic_get_isr(void) {
return __pic_get_irq_reg(PIC_READ_ISR);
}
void pic1_irq_real_0(struct task_frame *frame) {
timer_tick();
assert2(frame->guard == IDT_GUARD, "IDT Guard wrong!");
assert2((frame->ss == GDTSEL(gdt_data) || frame->ss == GDTSEL(gdt_data_user)), "SS wrong!");
switch_task_int(frame);
assert2(frame->guard == IDT_GUARD, "IDT Guard wrong!");
assert2((frame->ss == GDTSEL(gdt_data) || frame->ss == GDTSEL(gdt_data_user)), "SS wrong!");
PIC_sendEOI(0);
}
void pic1_irq_real_1() {
PIC_sendEOI(1);
}
void pic1_irq_real_2() {
_hcf();
PIC_sendEOI(2);
}
void pic1_irq_real_3() {
PIC_sendEOI(3);
}
void pic1_irq_real_4() {
PIC_sendEOI(4);
}
void pic1_irq_real_5() {
PIC_sendEOI(5);
}
void pic1_irq_real_6() {
PIC_sendEOI(6);
}
void pic1_irq_real_7() {
int irr = pic_get_irr();
if (!(irr & 0x80)) return;
PIC_sendEOI(7);
}
void pic2_irq_real_0() {
PIC_sendEOI(8);
}
void pic2_irq_real_1() {
PIC_sendEOI(9);
}
void pic2_irq_real_2() {
PIC_sendEOI(10);
}
void pic2_irq_real_3() {
PIC_sendEOI(11);
}
void pic2_irq_real_4() {
PIC_sendEOI(12);
}
void pic2_irq_real_5() {
PIC_sendEOI(13);
}
void pic2_irq_real_6() {
PIC_sendEOI(14);
}
void pic2_irq_real_7() {
// Probaby wrong
int irr = pic_get_irr();
if (!(irr & (0x80 << 8))) {
outb(PIC1_COMMAND, PIC_EOI);
return;
}
PIC_sendEOI(15);
}

96
src/arch/x86/idt.h Normal file
View File

@@ -0,0 +1,96 @@
#ifndef OS1_IDT_H
#define OS1_IDT_H
#include <stdint.h>
#include <stddef.h>
#define PIC1 0x20 /* IO base address for master PIC */
#define PIC2 0xA0 /* IO base address for slave PIC */
#define PIC1_COMMAND PIC1
#define PIC1_DATA (PIC1+1)
#define PIC2_COMMAND PIC2
#define PIC2_DATA (PIC2+1)
#define PIC_EOI 0x20 /* End-of-interrupt command code */
#define ICW1_ICW4 0x01 /* Indicates that ICW4 will be present */
#define ICW1_SINGLE 0x02 /* Single (cascade) mode */
#define ICW1_INTERVAL4 0x04 /* Call address interval 4 (8) */
#define ICW1_LEVEL 0x08 /* Level triggered (edge) mode */
#define ICW1_INIT 0x10 /* Initialization - required! */
#define ICW4_8086 0x01 /* 8086/88 (MCS-80/85) mode */
#define ICW4_AUTO 0x02 /* Auto (normal) EOI */
#define ICW4_BUF_SLAVE 0x08 /* Buffered mode/slave */
#define ICW4_BUF_MASTER 0x0C /* Buffered mode/master */
#define ICW4_SFNM 0x10 /* Special fully nested (not) */
#define PIC_READ_IRR 0x0a /* OCW3 irq ready next CMD read */
#define PIC_READ_ISR 0x0b /* OCW3 irq service next CMD read */
#define PIC1_OFFSET 0x20
#define PIC2_OFFSET 0x28
void PIC_sendEOI(unsigned char irq);
void PIC_init();
void IRQ_set_mask(unsigned char IRQline) ;
void IRQ_clear_mask(unsigned char IRQline);
uint16_t pic_get_irr(void);
uint16_t pic_get_isr(void);
typedef struct {
uint16_t isr_low; // The lower 16 bits of the ISR's address
uint16_t kernel_cs; // The GDT segment selector that the CPU will load into CS before calling the ISR
uint8_t ist; // The IST in the TSS that the CPU will load into RSP; set to zero for now
uint8_t attributes; // Type and attributes; see the IDT page
uint16_t isr_mid; // The higher 16 bits of the lower 32 bits of the ISR's address
uint32_t isr_high; // The higher 32 bits of the ISR's address
uint32_t reserved; // Set to zero
} __attribute__((packed)) idt_entry_t;
typedef struct {
uint16_t limit;
uint64_t base;
} __attribute__((packed)) idtr_t;
#define IDT_GUARD 0xdeadbe3fdeadb3efULL
// Assuming the compiler understands that this is pushed on the stack in the correct order
struct task_frame {
uint64_t guard;
char ssestate[512];
uint64_t r15;
uint64_t r14;
uint64_t r13;
uint64_t r12;
uint64_t r11;
uint64_t r10;
uint64_t r9;
uint64_t r8;
uint64_t rdi;
uint64_t rsi;
uint64_t rbp;
uint64_t rbx;
uint64_t rdx;
uint64_t rcx;
uint64_t rax;
uint64_t ip;
uint64_t cs;
uint64_t flags;
uint64_t sp;
uint64_t ss;
} __attribute__((packed));
void exception_handler(void);
void idt_set_descriptor(uint8_t vector, void* isr, uint8_t flags);
void idt_init(void);
extern void* isr_stub_table[];
#endif

5
src/arch/x86/io.c Normal file
View File

@@ -0,0 +1,5 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#include "io.h"

30
src/arch/x86/io.h Normal file
View File

@@ -0,0 +1,30 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#ifndef OS1_IO_H
#define OS1_IO_H
#include <stdint.h>
static inline void outb(uint16_t port, uint8_t val) {
__asm__ volatile("outb %0, %1"
:
: "a"(val), "Nd"(port)
: "memory");
}
static inline uint8_t inb(uint16_t port) {
uint8_t ret;
__asm__ volatile("inb %1, %0"
: "=a"(ret)
: "Nd"(port)
: "memory");
return ret;
}
static inline void io_wait(void) {
outb(0x80, 0);
}
#endif//OS1_IO_H

1
src/arch/x86/kmain.asm Normal file
View File

@@ -0,0 +1 @@
[BITS 64]

145
src/arch/x86/kmain.c Normal file
View File

@@ -0,0 +1,145 @@
//
// Created by Stepan Usatiuk on 13.08.2023.
//
#include <stdatomic.h>
#include <stddef.h>
#include "globals.h"
#include "kmem.h"
#include "limine_fb.h"
#include "memman.h"
#include "misc.h"
#include "mutex.h"
#include "serial.h"
#include "task.h"
#include "timer.h"
#include "tty.h"
void ktask();
void ktask2() {
// Ensure we got a framebuffer.
assert2(framebuffer_count >= 1, "No framebuffer!");
struct limine_framebuffer *framebuffer = &framebuffers[0];
for (uint32_t c = 0; c < 2; c++) {
// Note: we assume the framebuffer model is RGB with 32-bit pixels.
for (size_t i = 0; i < 100; i++) {
sleep_self(250);
uint32_t *fb_ptr = framebuffer->address;
fb_ptr[i * (framebuffer->pitch / 4) + i + 100] = c ? 0 : 0xFFFFFF;
}
}
new_ktask(ktask, "one");
remove_self();
}
void ktask() {
// Ensure we got a framebuffer.
assert2(framebuffer_count >= 1, "No framebuffer!");
struct limine_framebuffer *framebuffer = &framebuffers[0];
for (uint32_t c = 0; c < 2; c++) {
// Note: we assume the framebuffer model is RGB with 32-bit pixels.
for (size_t i = 0; i < 100; i++) {
sleep_self(250);
uint32_t *fb_ptr = framebuffer->address;
fb_ptr[i * (framebuffer->pitch / 4) + i] = c ? 0 : 0xFFFFFF;
}
}
new_ktask(ktask2, "two");
remove_self();
}
void freeprinter() {
char buf[69];
while (1) {
itoa(get_free(), buf, 10);
all_tty_putstr("Free mem: ");
all_tty_putstr(buf);
write_serial('\n');
sleep_self(10000);
}
}
static struct Mutex testmutex = DefaultMutex;
void mtest1() {
m_lock(&testmutex);
all_tty_putstr("Locked1\n");
sleep_self(100000);
m_unlock(&testmutex);
all_tty_putstr("Unlocked1\n");
remove_self();
}
void mtest2() {
m_lock(&testmutex);
all_tty_putstr("Locked2\n");
sleep_self(100000);
m_unlock(&testmutex);
all_tty_putstr("Unlocked2\n");
remove_self();
}
void mtest3() {
m_lock(&testmutex);
all_tty_putstr("Locked3\n");
sleep_self(100000);
m_unlock(&testmutex);
all_tty_putstr("Unlocked3\n");
remove_self();
}
void stress() {
static atomic_int i = 0;
int curi = i++;
if (curi > 1500) remove_self();
sleep_self(10000 - curi * 10);
char buf[69];
itoa(curi, buf, 10);
all_tty_putstr("stress ");
all_tty_putstr(buf);
all_tty_putstr("\n");
remove_self();
}
void ktask_main() {
new_ktask(ktask, "one");
new_ktask(freeprinter, "freeprinter");
new_ktask(mtest1, "mtest1");
new_ktask(mtest2, "mtest2");
new_ktask(mtest3, "mtest3");
for (int i = 0; i < 2000; i++)
new_ktask(stress, "stress");
all_tty_putstr("Finished stress");
remove_self();
}
void dummy_task() {
for (;;) {
__asm__ __volatile__("hlt");
}
}
void kmain() {
struct tty_funcs serial_tty = {.putchar = write_serial};
add_tty(serial_tty);
init_timer();
new_ktask(ktask_main, "ktask_main");
new_ktask(dummy_task, "dummy");
init_tasks();
for (;;) {
__asm__ __volatile__("hlt");
}
}

345
src/arch/x86/kmem.c Normal file
View File

@@ -0,0 +1,345 @@
#include "kmem.h"
#include "globals.h"
#include "memman.h"
#include "mutex.h"
#include "paging.h"
#include "serial.h"
#include "task.h"
struct HeapEntry *KERN_HeapBegin;
uintptr_t KERN_HeapEnd;// Past the end
static bool initialized = false;
static struct Mutex kmem_lock = DefaultMutex;
static char kmem_lock_tasklist[256];//FIXME:
void init_kern_heap() {
KERN_HeapBegin = get4k();
KERN_HeapBegin->magic = KERN_HeapMagicFree;
KERN_HeapBegin->len = 4096 - (sizeof(struct HeapEntry));
KERN_HeapBegin->next = NULL;
KERN_HeapBegin->prev = NULL;
map((void *) KERN_HeapVirtBegin, (void *) HHDM_V2P(KERN_HeapBegin), PAGE_RW, KERN_AddressSpace);
KERN_HeapBegin = (struct HeapEntry *) KERN_HeapVirtBegin;
KERN_HeapEnd = (KERN_HeapVirtBegin + 4096);
kmem_lock.waiters = (struct TaskList *) kmem_lock_tasklist;
initialized = true;
}
static void extend_heap(size_t n_pages) {
assert(kmem_lock.owner == cur_task());
for (size_t i = 0; i < n_pages; i++) {
void *p = get4k();
assert2(p != NULL, "Kernel out of memory!");
map((void *) KERN_HeapEnd, (void *) HHDM_V2P(p), PAGE_RW, KERN_AddressSpace);
KERN_HeapEnd += 4096;
}
}
// n is required length!
struct HeapEntry *split_entry(struct HeapEntry *what, size_t n) {
assert(kmem_lock.owner == cur_task());
assert2(what->len > (n + sizeof(struct HeapEntry)), "Trying to split a heap entry that's too small!");
struct HeapEntry *new_entry = (((void *) what) + sizeof(struct HeapEntry) + n);
new_entry->magic = KERN_HeapMagicFree;
new_entry->next = what->next;
new_entry->prev = what;
new_entry->len = what->len - n - sizeof(struct HeapEntry);
what->len = n;
if (new_entry->next)
new_entry->next->prev = new_entry;
what->next = new_entry;
return new_entry;
}
void *kmalloc(size_t n) {
assert(initialized);
m_lock(&kmem_lock);
struct HeapEntry *entry = KERN_HeapBegin;
assert2(entry->magic == KERN_HeapMagicFree, "Bad heap!");
struct HeapEntry *res = NULL;
struct HeapEntry *prev = NULL;
do {
assert2(entry->magic == KERN_HeapMagicFree, "Bad heap!");
if (prev) {
assert(entry->prev == prev);
assert(prev->next == entry);
assert(entry->prev->next == entry);
}
if (entry->len == n) {
res = entry;
if (prev) {
prev->next = entry->next;
if (entry->next)
entry->next->prev = prev;
} else {
if (entry->next) {
KERN_HeapBegin = entry->next;
entry->next->prev = NULL;
} else {
KERN_HeapBegin = (struct HeapEntry *) KERN_HeapEnd;
extend_heap(1);
KERN_HeapBegin->next = NULL;
KERN_HeapBegin->prev = NULL;
KERN_HeapBegin->magic = KERN_HeapMagicFree;
KERN_HeapBegin->len = 4096 - (sizeof(struct HeapEntry));
}
}
break;
}
if (entry->len > n + sizeof(struct HeapEntry)) {
res = entry;
struct HeapEntry *new_split_entry = split_entry(res, n);
if (prev) {
prev->next = new_split_entry;
new_split_entry->prev = prev;
} else {
KERN_HeapBegin = new_split_entry;
new_split_entry->prev = NULL;
}
if (new_split_entry->prev)
assert(new_split_entry->prev->magic == KERN_HeapMagicFree);
break;
}
prev = entry;
entry = entry->next;
} while (entry);
if (!res) {
entry = prev;
assert2(entry->magic == KERN_HeapMagicFree, "Expected last tried entry to be free");
assert2(entry->next == NULL, "Expected last tried entry to be the last");
size_t data_needed = n + (2 * sizeof(struct HeapEntry));
size_t pages_needed = ((data_needed & 0xFFF) == 0)
? data_needed >> 12
: ((data_needed & (~0xFFF)) + 0x1000) >> 12;
struct HeapEntry *new_entry = (struct HeapEntry *) KERN_HeapEnd;
extend_heap(pages_needed);
new_entry->next = NULL;
new_entry->prev = entry;
new_entry->magic = KERN_HeapMagicFree;
new_entry->len = (pages_needed * 4096) - (sizeof(struct HeapEntry));
assert2(new_entry->len >= n, "Expected allocated heap entry to fit what we wanted");
res = new_entry;
if (new_entry->len > n) {
struct HeapEntry *new_split_entry = split_entry(res, n);
entry->next = new_split_entry;
new_split_entry->prev = entry;
if (new_split_entry->prev)
assert(new_split_entry->prev->magic == KERN_HeapMagicFree);
}
}
if (res) {
// if (res->next) res->next->prev = res->prev;
// if (res->prev) res->prev->next = res->next;
res->next = NULL;
res->prev = NULL;
res->magic = KERN_HeapMagicTaken;
m_unlock(&kmem_lock);
for (size_t i = 0; i < n; i++) res->data[i] = 0xFEU;
return res->data;
} else {
m_unlock(&kmem_lock);
return NULL;
}
}
static void try_merge_fwd(struct HeapEntry *entry) {
assert(kmem_lock.owner == cur_task());
assert2(entry->magic == KERN_HeapMagicFree, "Bad merge!");
assert(entry->prev == NULL);
struct HeapEntry *nextEntry = (struct HeapEntry *) ((uint64_t) entry + ((uint64_t) sizeof(struct HeapEntry)) + entry->len);
while ((uint64_t) nextEntry < KERN_HeapEnd && nextEntry->magic == KERN_HeapMagicFree) {
if (nextEntry->prev) assert(nextEntry->prev->magic == KERN_HeapMagicFree);
if (nextEntry->next) assert(nextEntry->next->magic == KERN_HeapMagicFree);
if (nextEntry == entry->next) {
nextEntry->next->prev = entry;
entry->next = nextEntry->next;
} else {
assert(nextEntry->prev && nextEntry->prev->magic == KERN_HeapMagicFree);
struct HeapEntry *victimR = nextEntry->next;
if (victimR) {
assert(victimR->magic == KERN_HeapMagicFree);
victimR->prev = nextEntry->prev;
nextEntry->prev->next = victimR;
} else {
nextEntry->prev->next = NULL;
}
}
entry->len = entry->len + sizeof(struct HeapEntry) + nextEntry->len;
nextEntry = (struct HeapEntry *) ((uint64_t) entry + sizeof(struct HeapEntry) + entry->len);
}
}
static struct HeapEntry *try_shrink_heap(struct HeapEntry *entry) {
assert(kmem_lock.owner == cur_task());
assert(entry->prev == NULL);
if ((uint64_t) entry + sizeof(struct HeapEntry) + entry->len == KERN_HeapEnd) {
// Shrink it if it's at least two pages
if (entry->len + sizeof(struct HeapEntry) < 4096 * 2) {
return entry;
}
struct HeapEntry *ret = NULL;
// Check alignment, in case of non-alignment, split
if (((uint64_t) entry & 0xFFF) != 0) {
uint64_t diff = (uint64_t) entry & 0xFFF;
// Should always work as we're checking if the length is at least two pages
entry = split_entry(entry, (0x1000ULL - diff) - sizeof(struct HeapEntry));
ret = entry->prev;
ret->next = entry->next;
if (entry->next)
entry->next->prev = ret;
} else {
ret = entry->next;
ret->prev = NULL;
}
assert(((uint64_t) entry & 0xFFF) == 0);
KERN_HeapEnd = (uintptr_t) entry;
uint64_t totallen = entry->len + sizeof(struct HeapEntry);
assert(((uint64_t) totallen & 0xFFF) == 0);
uint64_t total_pages = totallen / 4096;
for (uint64_t i = 0; i < total_pages; i++) {
free4k((void *) HHDM_P2V(virt2real((void *) (KERN_HeapEnd + 4096 * i), KERN_AddressSpace)));
unmap((void *) (KERN_HeapEnd + 4096 * i), KERN_AddressSpace);
}
return ret;
}
return entry;
}
void kfree(void *addr) {
assert(initialized);
m_lock(&kmem_lock);
struct HeapEntry *freed = addr - (sizeof(struct HeapEntry));
struct HeapEntry *entry = KERN_HeapBegin;
assert2(freed->magic == KERN_HeapMagicTaken, "Bad free!");
assert2(freed->next == NULL, "Bad free!");
assert2(freed->prev == NULL, "Bad free!");
assert2(entry->magic == KERN_HeapMagicFree, "Bad free!");
assert2(entry->prev == NULL, "Bad free!");
freed->next = entry;
entry->prev = freed;
KERN_HeapBegin = freed;
freed->magic = KERN_HeapMagicFree;
try_merge_fwd(freed);
assert2(freed->prev == NULL, "Bad free!");
KERN_HeapBegin = try_shrink_heap(freed);
assert(KERN_HeapBegin != NULL);
assert2(KERN_HeapBegin->prev == NULL, "Bad free!");
m_unlock(&kmem_lock);
}
void *krealloc(void *addr, size_t newsize) {
assert(initialized);
struct HeapEntry *info = addr - (sizeof(struct HeapEntry));
assert2(info->magic == KERN_HeapMagicTaken, "Bad realloc!");
void *new = kmalloc(newsize);
memcpy(new, addr, newsize > info->len ? info->len : newsize);
kfree(addr);
return new;
}
void *memcpy(void *dest, const void *src, size_t n) {
uint8_t *pdest = (uint8_t *) dest;
const uint8_t *psrc = (const uint8_t *) src;
for (size_t i = 0; i < n; i++) {
pdest[i] = psrc[i];
}
return dest;
}
void *memset(void *s, int c, size_t n) {
uint8_t *p = (uint8_t *) s;
for (size_t i = 0; i < n; i++) {
p[i] = (uint8_t) c;
}
return s;
}
void *memmove(void *dest, const void *src, size_t n) {
uint8_t *pdest = (uint8_t *) dest;
const uint8_t *psrc = (const uint8_t *) src;
if (src > dest) {
for (size_t i = 0; i < n; i++) {
pdest[i] = psrc[i];
}
} else if (src < dest) {
for (size_t i = n; i > 0; i--) {
pdest[i - 1] = psrc[i - 1];
}
}
return dest;
}
int memcmp(const void *s1, const void *s2, size_t n) {
const uint8_t *p1 = (const uint8_t *) s1;
const uint8_t *p2 = (const uint8_t *) s2;
for (size_t i = 0; i < n; i++) {
if (p1[i] != p2[i]) {
return p1[i] < p2[i] ? -1 : 1;
}
}
return 0;
}
uint64_t strlen(char *str) {
uint64_t res = 0;
while (*(str++) != '\0') res++;
return res;
}
void strcpy(const char *src, char *dst) {
int i = 0;
while (src[i] != '\0') {
dst[i] = src[i];
i++;
}
dst[i] = '\0';
}

35
src/arch/x86/kmem.h Normal file
View File

@@ -0,0 +1,35 @@
#ifndef OS1_KMEM_H
#define OS1_KMEM_H
#include <stddef.h>
#include <stdint.h>
void *memcpy(void *dest, const void *src, size_t n);
void *memset(void *s, int c, size_t n);
void *memmove(void *dest, const void *src, size_t n);
int memcmp(const void *s1, const void *s2, size_t n);
uint64_t strlen(char *str);
void strcpy(const char *src, char *dst);
#define KERN_HeapVirtBegin (0xffffc00000000000ULL)
#define KERN_HeapMagicFree 0xDEDE
#define KERN_HeapMagicTaken 0xADAD
void init_kern_heap();
struct HeapEntry {
uint_fast16_t magic;
struct HeapEntry *next;
struct HeapEntry *prev;
uint64_t len;
char data[];
};
extern struct HeapEntry *KERN_HeapBegin;
extern uintptr_t KERN_HeapEnd;// Past the end
void *kmalloc(size_t n);
void kfree(void *addr);
void *krealloc(void *addr, size_t newsize);
#endif

555
src/arch/x86/limine.h Normal file
View File

@@ -0,0 +1,555 @@
/* BSD Zero Clause License */
/* Copyright (C) 2022-2023 mintsuki and contributors.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _LIMINE_H
#define _LIMINE_H 1
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
/* Misc */
#ifdef LIMINE_NO_POINTERS
# define LIMINE_PTR(TYPE) uint64_t
#else
# define LIMINE_PTR(TYPE) TYPE
#endif
#ifdef __GNUC__
# define LIMINE_DEPRECATED __attribute__((__deprecated__))
# define LIMINE_DEPRECATED_IGNORE_START \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
# define LIMINE_DEPRECATED_IGNORE_END \
_Pragma("GCC diagnostic pop")
#else
# define LIMINE_DEPRECATED
# define LIMINE_DEPRECATED_IGNORE_START
# define LIMINE_DEPRECATED_IGNORE_END
#endif
#define LIMINE_COMMON_MAGIC 0xc7b1dd30df4c8b88, 0x0a82e883a194f07b
struct limine_uuid {
uint32_t a;
uint16_t b;
uint16_t c;
uint8_t d[8];
};
#define LIMINE_MEDIA_TYPE_GENERIC 0
#define LIMINE_MEDIA_TYPE_OPTICAL 1
#define LIMINE_MEDIA_TYPE_TFTP 2
struct limine_file {
uint64_t revision;
LIMINE_PTR(void *) address;
uint64_t size;
LIMINE_PTR(char *) path;
LIMINE_PTR(char *) cmdline;
uint32_t media_type;
uint32_t unused;
uint32_t tftp_ip;
uint32_t tftp_port;
uint32_t partition_index;
uint32_t mbr_disk_id;
struct limine_uuid gpt_disk_uuid;
struct limine_uuid gpt_part_uuid;
struct limine_uuid part_uuid;
};
/* Boot info */
#define LIMINE_BOOTLOADER_INFO_REQUEST { LIMINE_COMMON_MAGIC, 0xf55038d8e2a1202f, 0x279426fcf5f59740 }
struct limine_bootloader_info_response {
uint64_t revision;
LIMINE_PTR(char *) name;
LIMINE_PTR(char *) version;
};
struct limine_bootloader_info_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_bootloader_info_response *) response;
};
/* Stack size */
#define LIMINE_STACK_SIZE_REQUEST { LIMINE_COMMON_MAGIC, 0x224ef0460a8e8926, 0xe1cb0fc25f46ea3d }
struct limine_stack_size_response {
uint64_t revision;
};
struct limine_stack_size_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_stack_size_response *) response;
uint64_t stack_size;
};
/* HHDM */
#define LIMINE_HHDM_REQUEST { LIMINE_COMMON_MAGIC, 0x48dcf1cb8ad2b852, 0x63984e959a98244b }
struct limine_hhdm_response {
uint64_t revision;
uint64_t offset;
};
struct limine_hhdm_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_hhdm_response *) response;
};
/* Framebuffer */
#define LIMINE_FRAMEBUFFER_REQUEST { LIMINE_COMMON_MAGIC, 0x9d5827dcd881dd75, 0xa3148604f6fab11b }
#define LIMINE_FRAMEBUFFER_RGB 1
struct limine_video_mode {
uint64_t pitch;
uint64_t width;
uint64_t height;
uint16_t bpp;
uint8_t memory_model;
uint8_t red_mask_size;
uint8_t red_mask_shift;
uint8_t green_mask_size;
uint8_t green_mask_shift;
uint8_t blue_mask_size;
uint8_t blue_mask_shift;
};
struct limine_framebuffer {
LIMINE_PTR(void *) address;
uint64_t width;
uint64_t height;
uint64_t pitch;
uint16_t bpp;
uint8_t memory_model;
uint8_t red_mask_size;
uint8_t red_mask_shift;
uint8_t green_mask_size;
uint8_t green_mask_shift;
uint8_t blue_mask_size;
uint8_t blue_mask_shift;
uint8_t unused[7];
uint64_t edid_size;
LIMINE_PTR(void *) edid;
/* Response revision 1 */
uint64_t mode_count;
LIMINE_PTR(struct limine_video_mode **) modes;
};
struct limine_framebuffer_response {
uint64_t revision;
uint64_t framebuffer_count;
LIMINE_PTR(struct limine_framebuffer **) framebuffers;
};
struct limine_framebuffer_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_framebuffer_response *) response;
};
/* Terminal */
#define LIMINE_TERMINAL_REQUEST { LIMINE_COMMON_MAGIC, 0xc8ac59310c2b0844, 0xa68d0c7265d38878 }
#define LIMINE_TERMINAL_CB_DEC 10
#define LIMINE_TERMINAL_CB_BELL 20
#define LIMINE_TERMINAL_CB_PRIVATE_ID 30
#define LIMINE_TERMINAL_CB_STATUS_REPORT 40
#define LIMINE_TERMINAL_CB_POS_REPORT 50
#define LIMINE_TERMINAL_CB_KBD_LEDS 60
#define LIMINE_TERMINAL_CB_MODE 70
#define LIMINE_TERMINAL_CB_LINUX 80
#define LIMINE_TERMINAL_CTX_SIZE ((uint64_t)(-1))
#define LIMINE_TERMINAL_CTX_SAVE ((uint64_t)(-2))
#define LIMINE_TERMINAL_CTX_RESTORE ((uint64_t)(-3))
#define LIMINE_TERMINAL_FULL_REFRESH ((uint64_t)(-4))
/* Response revision 1 */
#define LIMINE_TERMINAL_OOB_OUTPUT_GET ((uint64_t)(-10))
#define LIMINE_TERMINAL_OOB_OUTPUT_SET ((uint64_t)(-11))
#define LIMINE_TERMINAL_OOB_OUTPUT_OCRNL (1 << 0)
#define LIMINE_TERMINAL_OOB_OUTPUT_OFDEL (1 << 1)
#define LIMINE_TERMINAL_OOB_OUTPUT_OFILL (1 << 2)
#define LIMINE_TERMINAL_OOB_OUTPUT_OLCUC (1 << 3)
#define LIMINE_TERMINAL_OOB_OUTPUT_ONLCR (1 << 4)
#define LIMINE_TERMINAL_OOB_OUTPUT_ONLRET (1 << 5)
#define LIMINE_TERMINAL_OOB_OUTPUT_ONOCR (1 << 6)
#define LIMINE_TERMINAL_OOB_OUTPUT_OPOST (1 << 7)
LIMINE_DEPRECATED_IGNORE_START
struct LIMINE_DEPRECATED limine_terminal;
typedef void (*limine_terminal_write)(struct limine_terminal *, const char *, uint64_t);
typedef void (*limine_terminal_callback)(struct limine_terminal *, uint64_t, uint64_t, uint64_t, uint64_t);
struct LIMINE_DEPRECATED limine_terminal {
uint64_t columns;
uint64_t rows;
LIMINE_PTR(struct limine_framebuffer *) framebuffer;
};
struct LIMINE_DEPRECATED limine_terminal_response {
uint64_t revision;
uint64_t terminal_count;
LIMINE_PTR(struct limine_terminal **) terminals;
LIMINE_PTR(limine_terminal_write) write;
};
struct LIMINE_DEPRECATED limine_terminal_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_terminal_response *) response;
LIMINE_PTR(limine_terminal_callback) callback;
};
LIMINE_DEPRECATED_IGNORE_END
/* Paging mode */
#define LIMINE_PAGING_MODE_REQUEST { LIMINE_COMMON_MAGIC, 0x95c1a0edab0944cb, 0xa4e5cb3842f7488a }
#if defined (__x86_64__) || defined (__i386__)
#define LIMINE_PAGING_MODE_X86_64_4LVL 0
#define LIMINE_PAGING_MODE_X86_64_5LVL 1
#define LIMINE_PAGING_MODE_MAX LIMINE_PAGING_MODE_X86_64_5LVL
#define LIMINE_PAGING_MODE_DEFAULT LIMINE_PAGING_MODE_X86_64_4LVL
#elif defined (__aarch64__)
#define LIMINE_PAGING_MODE_AARCH64_4LVL 0
#define LIMINE_PAGING_MODE_AARCH64_5LVL 1
#define LIMINE_PAGING_MODE_MAX LIMINE_PAGING_MODE_AARCH64_5LVL
#define LIMINE_PAGING_MODE_DEFAULT LIMINE_PAGING_MODE_AARCH64_4LVL
#elif defined (__riscv) && (__riscv_xlen == 64)
#define LIMINE_PAGING_MODE_RISCV_SV39 0
#define LIMINE_PAGING_MODE_RISCV_SV48 1
#define LIMINE_PAGING_MODE_RISCV_SV57 2
#define LIMINE_PAGING_MODE_MAX LIMINE_PAGING_MODE_RISCV_SV57
#define LIMINE_PAGING_MODE_DEFAULT LIMINE_PAGING_MODE_RISCV_SV48
#else
#error Unknown architecture
#endif
struct limine_paging_mode_response {
uint64_t revision;
uint64_t mode;
uint64_t flags;
};
struct limine_paging_mode_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_paging_mode_response *) response;
uint64_t mode;
uint64_t flags;
};
/* 5-level paging */
#define LIMINE_5_LEVEL_PAGING_REQUEST { LIMINE_COMMON_MAGIC, 0x94469551da9b3192, 0xebe5e86db7382888 }
LIMINE_DEPRECATED_IGNORE_START
struct LIMINE_DEPRECATED limine_5_level_paging_response {
uint64_t revision;
};
struct LIMINE_DEPRECATED limine_5_level_paging_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_5_level_paging_response *) response;
};
LIMINE_DEPRECATED_IGNORE_END
/* SMP */
#define LIMINE_SMP_REQUEST { LIMINE_COMMON_MAGIC, 0x95a67b819a1b857e, 0xa0b61b723b6a73e0 }
struct limine_smp_info;
typedef void (*limine_goto_address)(struct limine_smp_info *);
#if defined (__x86_64__) || defined (__i386__)
#define LIMINE_SMP_X2APIC (1 << 0)
struct limine_smp_info {
uint32_t processor_id;
uint32_t lapic_id;
uint64_t reserved;
LIMINE_PTR(limine_goto_address) goto_address;
uint64_t extra_argument;
};
struct limine_smp_response {
uint64_t revision;
uint32_t flags;
uint32_t bsp_lapic_id;
uint64_t cpu_count;
LIMINE_PTR(struct limine_smp_info **) cpus;
};
#elif defined (__aarch64__)
struct limine_smp_info {
uint32_t processor_id;
uint32_t gic_iface_no;
uint64_t mpidr;
uint64_t reserved;
LIMINE_PTR(limine_goto_address) goto_address;
uint64_t extra_argument;
};
struct limine_smp_response {
uint64_t revision;
uint32_t flags;
uint64_t bsp_mpidr;
uint64_t cpu_count;
LIMINE_PTR(struct limine_smp_info **) cpus;
};
#elif defined (__riscv) && (__riscv_xlen == 64)
struct limine_smp_info {
uint32_t processor_id;
uint64_t hartid;
uint64_t reserved;
LIMINE_PTR(limine_goto_address) goto_address;
uint64_t extra_argument;
};
struct limine_smp_response {
uint64_t revision;
uint32_t flags;
uint64_t bsp_hartid;
uint64_t cpu_count;
LIMINE_PTR(struct limine_smp_info **) cpus;
};
#else
#error Unknown architecture
#endif
struct limine_smp_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_smp_response *) response;
uint64_t flags;
};
/* Memory map */
#define LIMINE_MEMMAP_REQUEST { LIMINE_COMMON_MAGIC, 0x67cf3d9d378a806f, 0xe304acdfc50c3c62 }
#define LIMINE_MEMMAP_USABLE 0
#define LIMINE_MEMMAP_RESERVED 1
#define LIMINE_MEMMAP_ACPI_RECLAIMABLE 2
#define LIMINE_MEMMAP_ACPI_NVS 3
#define LIMINE_MEMMAP_BAD_MEMORY 4
#define LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE 5
#define LIMINE_MEMMAP_KERNEL_AND_MODULES 6
#define LIMINE_MEMMAP_FRAMEBUFFER 7
struct limine_memmap_entry {
uint64_t base;
uint64_t length;
uint64_t type;
};
struct limine_memmap_response {
uint64_t revision;
uint64_t entry_count;
LIMINE_PTR(struct limine_memmap_entry **) entries;
};
struct limine_memmap_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_memmap_response *) response;
};
/* Entry point */
#define LIMINE_ENTRY_POINT_REQUEST { LIMINE_COMMON_MAGIC, 0x13d86c035a1cd3e1, 0x2b0caa89d8f3026a }
typedef void (*limine_entry_point)(void);
struct limine_entry_point_response {
uint64_t revision;
};
struct limine_entry_point_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_entry_point_response *) response;
LIMINE_PTR(limine_entry_point) entry;
};
/* Kernel File */
#define LIMINE_KERNEL_FILE_REQUEST { LIMINE_COMMON_MAGIC, 0xad97e90e83f1ed67, 0x31eb5d1c5ff23b69 }
struct limine_kernel_file_response {
uint64_t revision;
LIMINE_PTR(struct limine_file *) kernel_file;
};
struct limine_kernel_file_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_kernel_file_response *) response;
};
/* Module */
#define LIMINE_MODULE_REQUEST { LIMINE_COMMON_MAGIC, 0x3e7e279702be32af, 0xca1c4f3bd1280cee }
#define LIMINE_INTERNAL_MODULE_REQUIRED (1 << 0)
struct limine_internal_module {
LIMINE_PTR(const char *) path;
LIMINE_PTR(const char *) cmdline;
uint64_t flags;
};
struct limine_module_response {
uint64_t revision;
uint64_t module_count;
LIMINE_PTR(struct limine_file **) modules;
};
struct limine_module_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_module_response *) response;
/* Request revision 1 */
uint64_t internal_module_count;
LIMINE_PTR(struct limine_internal_module **) internal_modules;
};
/* RSDP */
#define LIMINE_RSDP_REQUEST { LIMINE_COMMON_MAGIC, 0xc5e77b6b397e7b43, 0x27637845accdcf3c }
struct limine_rsdp_response {
uint64_t revision;
LIMINE_PTR(void *) address;
};
struct limine_rsdp_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_rsdp_response *) response;
};
/* SMBIOS */
#define LIMINE_SMBIOS_REQUEST { LIMINE_COMMON_MAGIC, 0x9e9046f11e095391, 0xaa4a520fefbde5ee }
struct limine_smbios_response {
uint64_t revision;
LIMINE_PTR(void *) entry_32;
LIMINE_PTR(void *) entry_64;
};
struct limine_smbios_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_smbios_response *) response;
};
/* EFI system table */
#define LIMINE_EFI_SYSTEM_TABLE_REQUEST { LIMINE_COMMON_MAGIC, 0x5ceba5163eaaf6d6, 0x0a6981610cf65fcc }
struct limine_efi_system_table_response {
uint64_t revision;
LIMINE_PTR(void *) address;
};
struct limine_efi_system_table_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_efi_system_table_response *) response;
};
/* Boot time */
#define LIMINE_BOOT_TIME_REQUEST { LIMINE_COMMON_MAGIC, 0x502746e184c088aa, 0xfbc5ec83e6327893 }
struct limine_boot_time_response {
uint64_t revision;
int64_t boot_time;
};
struct limine_boot_time_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_boot_time_response *) response;
};
/* Kernel address */
#define LIMINE_KERNEL_ADDRESS_REQUEST { LIMINE_COMMON_MAGIC, 0x71ba76863cc55f63, 0xb2644a48c516a487 }
struct limine_kernel_address_response {
uint64_t revision;
uint64_t physical_base;
uint64_t virtual_base;
};
struct limine_kernel_address_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_kernel_address_response *) response;
};
/* Device Tree Blob */
#define LIMINE_DTB_REQUEST { LIMINE_COMMON_MAGIC, 0xb40ddb48fb54bac7, 0x545081493f81ffb7 }
struct limine_dtb_response {
uint64_t revision;
LIMINE_PTR(void *) dtb_ptr;
};
struct limine_dtb_request {
uint64_t id[4];
uint64_t revision;
LIMINE_PTR(struct limine_dtb_response *) response;
};
#ifdef __cplusplus
}
#endif
#endif

46
src/arch/x86/limine_fb.c Normal file
View File

@@ -0,0 +1,46 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#include "limine_fb.h"
#include <stddef.h>
#include "kmem.h"
static volatile struct limine_framebuffer_request framebuffer_request = {
.id = LIMINE_FRAMEBUFFER_REQUEST,
.revision = 0};
int framebuffer_count = 0;
struct limine_framebuffer framebuffers[10];
struct {
void *base;
uint64_t len;
} framebufferAddrs[10];
void limine_fb_save_response(struct AddressSpace* boot_address_space) {
if (framebuffer_request.response == NULL || framebuffer_request.response->framebuffer_count < 1) {
framebuffer_count = 0;
return;
}
framebuffer_count = framebuffer_request.response->framebuffer_count;
if (framebuffer_count >= 10) framebuffer_count = 10;
for (int i = 0; i < framebuffer_count; i++) {
memcpy(&framebuffers[i], framebuffer_request.response->framebuffers[i], sizeof(struct limine_framebuffer));
framebufferAddrs[i].base = virt2real(framebuffers[i].address, boot_address_space);
}
}
void limine_fb_remap(struct AddressSpace* space) {
for (int i = 0; i < framebuffer_count; i++) {
void *base = framebuffers[i].address;
void *realbase = framebufferAddrs[i].base;
// TODO: Proper map
for (int i = 0; i < 100000; i++) {
map(base + i * 4096, realbase + i * 4096, PAGE_RW, space);
}
}
_tlb_flush();
}

19
src/arch/x86/limine_fb.h Normal file
View File

@@ -0,0 +1,19 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#ifndef OS1_LIMINE_FB_H
#define OS1_LIMINE_FB_H
#include "limine.h"
#include "paging.h"
void limine_fb_save_response(struct AddressSpace* boot_address_space);
void limine_fb_remap(struct AddressSpace* space);
extern int framebuffer_count;
extern struct limine_framebuffer framebuffers[10];
#endif//OS1_LIMINE_FB_H

29
src/arch/x86/limine_mm.c Normal file
View File

@@ -0,0 +1,29 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#include "limine_mm.h"
#include "kmem.h"
#include "limine.h"
static volatile struct limine_memmap_request memmap_request = {
.id = LIMINE_MEMMAP_REQUEST,
.revision = 0};
unsigned int limine_mm_count;
struct limine_memmap_entry limine_mm_entries[LIMINE_MM_MAX];
unsigned int limine_mm_overflow;
void limine_mm_save_response() {
limine_mm_count = memmap_request.response->entry_count;
if (limine_mm_count > LIMINE_MM_MAX) {
limine_mm_count = LIMINE_MM_MAX;
limine_mm_overflow = 1;
} else {
limine_mm_overflow = 0;
}
for (int i = 0; i < limine_mm_count; i++) {
memcpy(&limine_mm_entries[i], memmap_request.response->entries[i], sizeof(struct limine_memmap_entry));
}
}

18
src/arch/x86/limine_mm.h Normal file
View File

@@ -0,0 +1,18 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#ifndef OS1_LIMINE_MM_H
#define OS1_LIMINE_MM_H
#include "limine.h"
#define LIMINE_MM_MAX 256
extern unsigned int limine_mm_count;
extern struct limine_memmap_entry limine_mm_entries[LIMINE_MM_MAX];
extern unsigned int limine_mm_overflow;
void limine_mm_save_response();
#endif//OS1_LIMINE_MM_H

64
src/arch/x86/linker.ld Normal file
View File

@@ -0,0 +1,64 @@
/* Tell the linker that we want an x86_64 ELF64 output file */
OUTPUT_FORMAT(elf64-x86-64)
OUTPUT_ARCH(i386:x86-64)
/* We want the symbol _start to be our entry point */
ENTRY(_start)
/* Define the program headers we want so the bootloader gives us the right */
/* MMU permissions */
PHDRS
{
text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */
rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */
data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic PHDR for relocations */
}
SECTIONS
{
/* We wanna be placed in the topmost 2GiB of the address space, for optimisations */
/* and because that is what the Limine spec mandates. */
/* Any address in this region will do, but often 0xffffffff80000000 is chosen as */
/* that is the beginning of the region. */
. = 0xffffffff80000000;
.text : {
*(.text .text.*)
} :text
/* Move to the next memory page for .rodata */
. += CONSTANT(MAXPAGESIZE);
.rodata : {
*(.rodata .rodata.*)
} :rodata
/* Move to the next memory page for .data */
. += CONSTANT(MAXPAGESIZE);
.data : {
*(.gdt .gdt.*)
*(.data .data.*)
} :data
/* Dynamic section for relocations, both in its own PHDR and inside data PHDR */
.dynamic : {
*(.dynamic)
} :data :dynamic
/* NOTE: .bss needs to be the last thing mapped to :data, otherwise lots of */
/* unnecessary zeros will be written to the binary. */
/* If you need, for example, .init_array and .fini_array, those should be placed */
/* above this. */
.bss : {
*(.bss .bss.*)
*(COMMON)
} :data
/* Discard .note.* and .eh_frame since they may cause issues on some hosts. */
/DISCARD/ : {
*(.eh_frame)
*(.note .note.*)
}
}

130
src/arch/x86/memman.c Normal file
View File

@@ -0,0 +1,130 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#include "memman.h"
#include "misc.h"
#include "mutex.h"
#include "paging.h"
#include "serial.h"
#include <stddef.h>
#define MAXGB 32ULL
#define BITMAP_SIZE (((MAXGB) *1024ULL * 1024ULL) / (16ULL))
#define MAX_PID (((BITMAP_SIZE) *4) - 4)
// Expected to be nulled by the bootloader
static struct FourPages used_bitmap[BITMAP_SIZE];
static struct Mutex memman_lock;
static uint64_t maxPid = 0;// Past the end
static uint64_t minPid = 0;
static uint64_t totalMem = 0;// Past the end
static uint64_t roundup4k(uint64_t addr) {
if ((addr & 0xFFF) == 0) return addr;
else {
return (addr + 0x1000) & (~(0xFFFULL));
}
}
static uint64_t rounddown4k(uint64_t addr) {
if ((addr & 0xFFF) == 0) return addr;
else {
return (addr) & (~(0xFFFULL));
}
}
void setSts(uint64_t pid, enum PageStatus sts) {
uint64_t rounddown = pid & (~(0b11ULL));
uint64_t idx = rounddown >> 2;
switch (pid & 0b11ULL) {
case 0:
used_bitmap[idx].first = sts;
break;
case 1:
used_bitmap[idx].second = sts;
break;
case 2:
used_bitmap[idx].third = sts;
break;
case 3:
used_bitmap[idx].fourth = sts;
break;
}
}
enum PageStatus getSts(uint64_t pid) {
uint64_t rounddown = pid & (~(0b11ULL));
uint64_t idx = rounddown >> 2;
switch (pid & 0b11ULL) {
case 0:
return used_bitmap[idx].first;
case 1:
return used_bitmap[idx].second;
case 2:
return used_bitmap[idx].third;
case 3:
return used_bitmap[idx].fourth;
}
assert2(0, "Error");
}
void parse_limine_memmap(struct limine_memmap_entry *entries, unsigned int num, uint64_t what_is_considered_free) {
struct limine_memmap_entry *entry = entries;
for (unsigned int i = 0; i < num; i++, entry++) {
if (entry->type != what_is_considered_free) continue;
uint64_t roundbase = roundup4k(entry->base);
if (roundbase >= (entry->base + entry->length)) continue;
uint64_t len = rounddown4k(entry->length - (roundbase - entry->base));
if (len == 0) continue;
uint64_t pid = roundbase >> 12;
if (minPid == 0 || pid < minPid) minPid = pid;
uint64_t pidend = (roundbase + len) >> 12;
if (pidend >= MAX_PID) pidend = MAX_PID - 1;
for (uint64_t cp = pid; cp < pidend; cp++)
if (getSts(cp) != MEMMAN_STATE_USED)
setSts(cp, MEMMAN_STATE_FREE);
totalMem += (pidend - pid) * 4;
if (pidend > maxPid) maxPid = pidend;
}
}
void *get4k() {
m_lock(&memman_lock);
if (totalMem == 0) return NULL;
uint64_t curPid = minPid;
while (getSts(curPid) != MEMMAN_STATE_FREE && curPid < maxPid)
minPid = curPid++;
if (curPid >= maxPid) return NULL;
totalMem -= 4;
assert2(getSts(curPid) == MEMMAN_STATE_FREE, "Sanity check");
setSts(curPid, MEMMAN_STATE_USED);
m_unlock(&memman_lock);
return (void *) (HHDM_P2V(curPid << 12));
}
void free4k(void *page) {
m_lock(&memman_lock);
if ((uint64_t) page >= HHDM_BEGIN) page = (void *) HHDM_V2P(page);
else
assert2(0, "Tried to free memory not in HHDM!");
uint64_t roundbase = rounddown4k((uint64_t) page);
assert2(((uint64_t) page == roundbase), "Tried to free unaligned memory!");
uint64_t pid = (uint64_t) page >> 12;
assert2(getSts(pid) == MEMMAN_STATE_USED, "Tried to free memory not allocated by the allocator!");
setSts(pid, MEMMAN_STATE_FREE);
totalMem += 4;
if (minPid > pid) minPid = pid;
m_unlock(&memman_lock);
}
uint64_t get_free() {
return totalMem;
}

29
src/arch/x86/memman.h Normal file
View File

@@ -0,0 +1,29 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#ifndef OS1_MEMMAN_H
#define OS1_MEMMAN_H
#include "limine.h"
enum PageStatus {
MEMMAN_STATE_FREE = 1,
MEMMAN_STATE_USED = 2,
MEMMAN_STATE_RESERVED = 0,
MEMMAN_STATE_RECLAIMABLE = 3,
};
struct FourPages {
enum PageStatus first : 2;
enum PageStatus second : 2;
enum PageStatus third : 2;
enum PageStatus fourth : 2;
};
void parse_limine_memmap(struct limine_memmap_entry *entries, unsigned int num, uint64_t what_is_considered_free);
void *get4k();
void free4k(void *page);
uint64_t get_free();
#endif//OS1_MEMMAN_H

27
src/arch/x86/misc.asm Normal file
View File

@@ -0,0 +1,27 @@
[BITS 64]
section .text
global _sse_setup:function (_sse_setup.end - _sse_setup)
_sse_setup:
mov eax, 0x1
cpuid
test edx, 1<<25
jz .noSSE
;SSE is available
;now enable SSE and the like
mov rax, cr0
and ax, 0xFFFB ;clear coprocessor emulation CR0.EM
or ax, 0x2 ;set coprocessor monitoring CR0.MP
; TODO: set this up properly, and the FPU
or ax, 1<<5 ;set native exceptions CR0.NE
mov cr0, rax
mov rax, cr4
or ax, 3 << 9 ;set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
mov cr4, rax
ret
.noSSE:
hlt
jmp .noSSE
.end:

41
src/arch/x86/misc.c Normal file
View File

@@ -0,0 +1,41 @@
//
// Created by Stepan Usatiuk on 13.08.2023.
//
void _hcf() {
while (1)
asm volatile("cli; hlt");
}
char *itoa(int value, char *str, int base) {
char *rc;
char *ptr;
char *low;
// Check for supported base.
if (base < 2 || base > 36) {
*str = '\0';
return str;
}
rc = ptr = str;
// Set '-' for negative decimals.
if (value < 0 && base == 10) {
*ptr++ = '-';
}
// Remember where the numbers start.
low = ptr;
// The actual conversion.
do {
// Modulo is negative for negative value. This trick makes abs() unnecessary.
*ptr++ = "zyxwvutsrqponmlkjihgfedcba9876543210123456789abcdefghijklmnopqrstuvwxyz"[35 + value % base];
value /= base;
} while (value);
// Terminating the string.
*ptr-- = '\0';
// Invert the numbers.
while (low < ptr) {
char tmp = *low;
*low++ = *ptr;
*ptr-- = tmp;
}
return rc;
}

58
src/arch/x86/misc.h Normal file
View File

@@ -0,0 +1,58 @@
#ifndef OS1_MISC_H
#define OS1_MISC_H
#include <stdint.h>
void _sse_setup();
void _hcf();
#define barrier() __asm__ __volatile__ ("" ::: "memory");
static inline uint64_t *get_cr3() {
uint64_t *cr3;
asm("mov %%cr3, %0"
: "=rm"(cr3));
return cr3;
}
static inline uint64_t flags() {
uint64_t flags;
asm volatile("pushf\n\t"
"pop %0"
: "=g"(flags));
return flags;
}
static inline int are_interrupts_enabled() {
return (flags() & (1 << 9));
}
static inline unsigned long save_irqdisable(void) {
unsigned long flags;
asm volatile("pushf\n\tcli\n\tpop %0"
: "=r"(flags)
:
: "memory");
return flags;
}
static inline void irqrestore(unsigned long flags) {
asm("push %0\n\tpopf"
:
: "rm"(flags)
: "memory", "cc");
}
#define NO_INT(x) \
{ \
unsigned long f = save_irqdisable(); \
x \
irqrestore(f); \
}
char *itoa(int value, char *str, int base);
#endif

75
src/arch/x86/mutex.c Normal file
View File

@@ -0,0 +1,75 @@
//
// Created by Stepan Usatiuk on 20.08.2023.
//
#include "mutex.h"
#include "serial.h"
#include "task.h"
#include "timer.h"
void m_init(struct Mutex *m) {
atomic_init(&m->locked, false);
m->waiters = NULL;
m->spin_success = 127;
m->owner = NULL;
}
bool m_try_lock(struct Mutex *m) {
volatile atomic_bool expected = ATOMIC_VAR_INIT(false);
if (!atomic_compare_exchange_strong(&m->locked, &expected, true)) {
return false;
}
m->owner = cur_task();
return true;
}
void m_spin_lock(struct Mutex *m) {
while (!m_try_lock(m)) { __builtin_ia32_pause(); }
}
void m_lock(struct Mutex *m) {
bool spin_success = false;
if (m_try_lock(m)) {
if (m->spin_success < 255)
m->spin_success++;
return;
}
if (m->spin_success >= 127) {
uint64_t startMicros = micros;
while (micros - startMicros < 10) {
if (m_try_lock(m)) {
spin_success = true;
break;
}
__builtin_ia32_pause();
}
}
if (spin_success) {
if (m->spin_success < 255)
m->spin_success++;
return;
} else {
if (m->spin_success > 0)
m->spin_success--;
while (!m_try_lock(m)) {
wait_m_on_self(m);
}
}
}
void m_unlock(struct Mutex *m) {
volatile atomic_bool expected = ATOMIC_VAR_INIT(true);
if (!atomic_compare_exchange_strong(&m->locked, &expected, false))
writestr("Unlocking an unlocked mutex!\n");
m_unlock_sched_hook(m);
}
bool m_test(struct Mutex *m) {
return atomic_load(&m->locked);
}

36
src/arch/x86/mutex.h Normal file
View File

@@ -0,0 +1,36 @@
//
// Created by Stepan Usatiuk on 20.08.2023.
//
#ifndef OS1_MUTEX_H
#define OS1_MUTEX_H
#include <stdatomic.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#if !(ATOMIC_BOOL_LOCK_FREE == 2)
#error Atomic bool isnt lock free!
#endif
struct Mutex {
volatile atomic_bool locked;
struct TaskList *waiters;
struct Task *owner;
uint8_t spin_success;
};
static const struct Mutex DefaultMutex = {
.locked = ATOMIC_VAR_INIT(false),
.spin_success = 150,
.waiters = NULL};
void m_init(struct Mutex *m);
void m_lock(struct Mutex *m);
void m_spin_lock(struct Mutex *m);
bool m_try_lock(struct Mutex *m);
void m_unlock(struct Mutex *m);
bool m_test(struct Mutex *m);
#endif//OS1_MUTEX_H

9
src/arch/x86/paging.asm Normal file
View File

@@ -0,0 +1,9 @@
[BITS 64]
section .text
global _tlb_flush:function (_tlb_flush.end - _tlb_flush)
_tlb_flush:
mov rax, cr3
mov cr3, rax
ret
.end:

209
src/arch/x86/paging.c Normal file
View File

@@ -0,0 +1,209 @@
//
// Created by Stepan Usatiuk on 09.08.2023.
//
#include "paging.h"
#include "limine.h"
#include "memman.h"
#include "misc.h"
#include "serial.h"
struct AddressSpace *KERN_AddressSpace;
int init_addr_space(struct AddressSpace *space) {
assert2(space != NULL, "Got null!");
space->PML4 = get4k();
if (space->PML4 == NULL) return 1;
return 0;
}
// Returns a free page frame in HHDM
uint64_t *get_free_frame() {
uint64_t *res = get4k();
if (res)
for (int j = 0; j < 512; j++)
res[j] = 0;
return res;
}
static inline void invlpg(void *m) {
/* Clobber memory to avoid optimizer re-ordering access before invlpg, which may cause nasty bugs. */
asm volatile("invlpg (%0)"
:
: "b"(m)
: "memory");
}
void *virt2real(void *virt, struct AddressSpace *space) {
assert2(((uint64_t) virt & 0xFFF) == 0, "Trying to unmap non-aligned memory!");
// Assuming everything related to paging is HHDM
assert2((uint64_t) space->PML4 >= HHDM_BEGIN, "CR3 here must be in HDDM!");
assert2((uint64_t) space->PML4 < kernel_virt_base, "CR3 here must be in HDDM!");
uint64_t pml4i = (uint64_t) virt >> 39 & 0x01FF;
uint64_t pdpei = (uint64_t) virt >> 30 & 0x01FF;
uint64_t pdei = (uint64_t) virt >> 21 & 0x01FF;
uint64_t ptsi = (uint64_t) virt >> 12 & 0x01FF;
uint64_t *pml4e = space->PML4 + pml4i;
if (!((*pml4e) & PAGE_PRESENT)) return 0;
uint64_t *pdpeb = (uint64_t *) HHDM_P2V((*pml4e & 0x000FFFFFFFFFF000ULL));
uint64_t *pdpee = pdpeb + pdpei;
if (!((*pdpee) & PAGE_PRESENT)) return 0;
// Calculations here might be incorrect
if (*pdpee & PAGE_PS) return (void *) ((*pdpee & 0x000FFFFFFFFFF000ULL) | ((uint64_t) virt & 0x00000003FFFF000ULL));
uint64_t *pdeb = (uint64_t *) HHDM_P2V((*pdpee & 0x000FFFFFFFFFF000ULL));
uint64_t *pdee = pdeb + pdei;
if (!((*pdee) & PAGE_PRESENT)) return 0;
// Calculations here might be incorrect
if (*pdee & PAGE_PS) return (void *) ((*pdee & 0x000FFFFFFFFFF000ULL) | ((uint64_t) virt & 0x0000000001FF000ULL));
uint64_t *ptsb = (uint64_t *) HHDM_P2V((*pdee & 0x000FFFFFFFFFF000ULL));
uint64_t *ptse = ptsb + ptsi;
if (!((*ptse) & PAGE_PRESENT)) return 0;
return (void *) (*ptse & 0x000FFFFFFFFFF000ULL);
}
int map(void *virt, void *real, uint32_t flags, struct AddressSpace *space) {
assert2(((uint64_t) virt & 0xFFF) == 0, "Trying to map non-aligned memory!");
assert2(((uint64_t) real & 0xFFF) == 0, "Trying to map to non-aligned memory!");
// Assuming everything related to paging is HHDM
assert2((uint64_t) space->PML4 >= HHDM_BEGIN, "CR3 here must be in HDDM!");
assert2((uint64_t) space->PML4 < kernel_virt_base, "CR3 here must be in HDDM!");
uint64_t pml4i = (uint64_t) virt >> 39 & 0x01FF;
uint64_t pdpei = (uint64_t) virt >> 30 & 0x01FF;
uint64_t pdei = (uint64_t) virt >> 21 & 0x01FF;
uint64_t ptsi = (uint64_t) virt >> 12 & 0x01FF;
uint64_t *pml4e = space->PML4 + pml4i;
if (!(*pml4e & PAGE_PRESENT)) {
uint64_t *newp = get_free_frame();
assert2(newp != NULL, "Couldn't get a page frame!");
*pml4e |= PAGE_PRESENT | PAGE_RW | PAGE_USER;
*pml4e |= (uint64_t) HHDM_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
uint64_t *pdpeb = (uint64_t *) HHDM_P2V((*pml4e & 0x000FFFFFFFFFF000ULL));
uint64_t *pdpee = &pdpeb[pdpei];
assert2(!(*pdpee & PAGE_PS), "Encountered an unexpected large mapping!");
if (!(*pdpee & PAGE_PRESENT)) {
uint64_t *newp = get_free_frame();
assert2(newp != NULL, "Couldn't get a page frame!");
*pdpee |= PAGE_PRESENT | PAGE_RW | PAGE_USER;
*pdpee |= (uint64_t) HHDM_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
uint64_t *pdeb = (uint64_t *) HHDM_P2V((*pdpee & 0x000FFFFFFFFFF000ULL));
uint64_t *pdee = &pdeb[pdei];
assert2(!(*pdee & PAGE_PS), "Encountered an unexpected large mapping!");
if (!(*pdee & PAGE_PRESENT)) {
uint64_t *newp = get_free_frame();
assert2(newp != NULL, "Couldn't get a page frame!");
*pdee |= PAGE_PRESENT | PAGE_RW | PAGE_USER;
*pdee |= (uint64_t) HHDM_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
uint64_t *ptsb = (uint64_t *) HHDM_P2V((*pdee & 0x000FFFFFFFFFF000ULL));
uint64_t *ptse = &ptsb[ptsi];
*ptse = ((uint64_t) real & 0x000FFFFFFFFFF000ULL) | (flags & 0xFFF) | PAGE_PRESENT;
invlpg((void *) ((uint64_t) virt & 0x000FFFFFFFFFF000ULL));
return 1;
}
int unmap(void *virt, struct AddressSpace *space) {
assert2(((uint64_t) virt & 0xFFF) == 0, "Trying to map non-aligned memory!");
// Assuming everything related to paging is HHDM
assert2((uint64_t) space->PML4 >= HHDM_BEGIN, "CR3 here must be in HDDM!");
assert2((uint64_t) space->PML4 < kernel_virt_base, "CR3 here must be in HDDM!");
uint64_t pml4i = (uint64_t) virt >> 39 & 0x01FF;
uint64_t pdpei = (uint64_t) virt >> 30 & 0x01FF;
uint64_t pdei = (uint64_t) virt >> 21 & 0x01FF;
uint64_t ptsi = (uint64_t) virt >> 12 & 0x01FF;
uint64_t *pml4e = space->PML4 + pml4i;
assert((*pml4e & PAGE_PRESENT));
uint64_t *pdpeb = (uint64_t *) HHDM_P2V((*pml4e & 0x000FFFFFFFFFF000ULL));
uint64_t *pdpee = &pdpeb[pdpei];
assert2(!(*pdpee & PAGE_PS), "Encountered an unexpected large mapping!");
assert((*pdpee & PAGE_PRESENT));
uint64_t *pdeb = (uint64_t *) HHDM_P2V((*pdpee & 0x000FFFFFFFFFF000ULL));
uint64_t *pdee = &pdeb[pdei];
assert2(!(*pdee & PAGE_PS), "Encountered an unexpected large mapping!");
assert((*pdee & PAGE_PRESENT));
uint64_t *ptsb = (uint64_t *) HHDM_P2V((*pdee & 0x000FFFFFFFFFF000ULL));
uint64_t *ptse = &ptsb[ptsi];
assert(*ptse & PAGE_PRESENT);
*ptse = (*ptse) & (~PAGE_PRESENT);
invlpg((void *) ((uint64_t) virt & 0x000FFFFFFFFFF000ULL));
return 1;
}
static volatile struct limine_kernel_address_request kernel_address_request = {
.id = LIMINE_KERNEL_ADDRESS_REQUEST,
.revision = 0};
void limine_kern_save_response() {
kernel_phys_base = kernel_address_request.response->physical_base;
kernel_virt_base = kernel_address_request.response->virtual_base;
}
#define EARLY_PAGES_SIZE ((HHDM_SIZE + 1) * 2)
static uint64_t early_pages[EARLY_PAGES_SIZE][512] __attribute__((aligned(4096)));
static uint64_t early_pages_used = 0;
uintptr_t kernel_phys_base;
uintptr_t kernel_virt_base;
void map_hddm(uint64_t *pml4) {
assert2(kernel_virt_base != 0, "Kernel virt address not loaded!");
assert2(kernel_phys_base != 0, "Kernel phys address not loaded!");
// Assuming here that everything related to paging is identity mapped
// Which is true if the first bytes of memory, where the kernel is are identity mapped,
// Which is true if we're using Limine
for (uint64_t i = 0; i < HHDM_SIZE; i++) {
void *virt = (void *) (HHDM_BEGIN + i * 1024ULL * 1024ULL * 1024ULL);
void *real = (void *) (i * 1024ULL * 1024ULL * 1024ULL);
uint64_t pml4i = (uint64_t) virt >> 39 & 0x01FF;
uint64_t pdpei = (uint64_t) virt >> 30 & 0x01FF;
assert2((uint64_t) pml4 < 0x8000000000000000ULL, "CR3 here must be physical!");
uint64_t *pml4e = &(pml4[pml4i]);
if (!(*pml4e & PAGE_PRESENT)) {
assert2(early_pages_used < EARLY_PAGES_SIZE, "Couldn't get a page for HHDM!");
uint64_t *newp = early_pages[early_pages_used++];
for (int i = 0; i < 512; i++)
newp[i] = PAGE_RW;
*pml4e = PAGE_RW | PAGE_PRESENT;
*pml4e |= (uint64_t) KERN_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
*pml4e |= PAGE_RW | PAGE_PRESENT;
uint64_t *pdpeb = (uint64_t *) (*pml4e & 0x000FFFFFFFFFF000ULL);
uint64_t *pdpee = &pdpeb[pdpei];
assert2((!(*pdpee & PAGE_PRESENT)), "HHDM area is already mapped!");
*pdpee = PAGE_RW | PAGE_PRESENT | PAGE_PS;
*pdpee |= (uint64_t) real & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
_tlb_flush();
}

45
src/arch/x86/paging.h Normal file
View File

@@ -0,0 +1,45 @@
//
// Created by Stepan Usatiuk on 09.08.2023.
//
#ifndef OS1_PAGING_H
#define OS1_PAGING_H
#include <stddef.h>
#include <stdint.h>
struct AddressSpace {
// Pointer to PML4 in HDDM
uint64_t *PML4;
};
extern struct AddressSpace *KERN_AddressSpace;
int init_addr_space(struct AddressSpace *space);
extern uintptr_t kernel_phys_base;
extern uintptr_t kernel_virt_base;
void limine_kern_save_response();
#define KERN_V2P(a) ((((uintptr_t) (a) + kernel_phys_base) & ~kernel_virt_base))
#define KERN_P2V(a) ((((uintptr_t) (a) -kernel_phys_base) | kernel_virt_base))
#define HHDM_BEGIN 0xfffff80000000000ULL
#define HHDM_SIZE 32ULL// In GB
#define HHDM_V2P(a) ((((uintptr_t) (a)) & ~HHDM_BEGIN))
#define HHDM_P2V(a) ((((uintptr_t) (a)) | HHDM_BEGIN))
#define PAGE_PS (1 << 7)
#define PAGE_RW (1 << 1)
#define PAGE_USER (1 << 2)
#define PAGE_PRESENT (0x01ULL)
int map(void *virt, void *real, uint32_t flags, struct AddressSpace *space);
int unmap(void *virt, struct AddressSpace *space);
void *virt2real(void *virt, struct AddressSpace *space);
void map_hddm(uint64_t *pml4);
void _tlb_flush();
#endif//OS1_PAGING_H

62
src/arch/x86/serial.c Normal file
View File

@@ -0,0 +1,62 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#include "serial.h"
#include <stdint.h>
#include "io.h"
#include "task.h"
#define PORT 0x3f8// COM1
int init_serial() {
outb(PORT + 1, 0x00);// Disable all interrupts
outb(PORT + 3, 0x80);// Enable DLAB (set baud rate divisor)
outb(PORT + 0, 0x03);// Set divisor to 3 (lo byte) 38400 baud
outb(PORT + 1, 0x00);// (hi byte)
outb(PORT + 3, 0x03);// 8 bits, no parity, one stop bit
outb(PORT + 2, 0xC7);// Enable FIFO, clear them, with 14-byte threshold
outb(PORT + 4, 0x0B);// IRQs enabled, RTS/DSR set
outb(PORT + 4, 0x1E);// Set in loopback mode, test the serial chip
outb(PORT + 0, 0xAE);// Test serial chip (send byte 0xAE and check if serial returns same byte)
// Check if serial is faulty (i.e: not same byte as sent)
if (inb(PORT + 0) != 0xAE) {
return 1;
}
// If serial is not faulty set it in normal operation mode
// (not-loopback with IRQs enabled and OUT#1 and OUT#2 bits enabled)
outb(PORT + 4, 0x0F);
return 0;
}
int serial_received() {
return inb(PORT + 5) & 1;
}
char read_serial() {
while (serial_received() == 0) {
yield_self();
}
return inb(PORT);
}
int is_transmit_empty() {
return inb(PORT + 5) & 0x20;
}
void write_serial(char a) {
while (is_transmit_empty() == 0) {
yield_self();
}
outb(PORT, a);
}
void writestr(const char *a) {
while (*a != '\0') write_serial(*a++);
}

29
src/arch/x86/serial.h Normal file
View File

@@ -0,0 +1,29 @@
//
// Created by Stepan Usatiuk on 12.08.2023.
//
#ifndef OS1_SERIAL_H
#define OS1_SERIAL_H
#include "misc.h"
int init_serial();
int serial_received();
char read_serial();
int is_transmit_empty();
void write_serial(char a);
void writestr(const char *a);
static inline void _assert2(int val, const char *msg) {
if (!val) {
writestr(msg);
_hcf();
}
}
#define assert2(x, y) _assert2(x, y)
#define assert(x) _assert2(x, "Assertion failed")
#endif//OS1_SERIAL_H

42
src/arch/x86/task.asm Normal file
View File

@@ -0,0 +1,42 @@
[BITS 64]
%include "task.inc.asm"
extern switch_task
extern gdt_code
extern gdt_null
extern gdt_data
; FIXME: 75% chance this leaks stack or something
section .text
global _yield_self_kern:function (_yield_self_kern.end - _yield_self_kern)
_yield_self_kern:
pop rsi ; save the return ip
mov r8, rsp ; save cur sp
mov r10, gdt_null
mov r9, gdt_data
mov r11, gdt_code
sub r9, r10
sub r11, r10
PUSH r9; Push data segment
push r8 ; current sp
pushf ; eflags
PUSH r11; Push code segment
push rsi ; instruction address to return to
pushaq
mov rdi, 0xdeadbe3fdeadb3ef ; IDT_GUARD
push rdi ; IDT_GUARD
; pass the "pointer" to the stack as pointer to the interrupt_frame argument,
; the stack and the struct must match!
mov rdi, rsp
call switch_task
add rsp, 8 ; remove IDT_GUARD
popaq
iretq
.end:

380
src/arch/x86/task.c Normal file
View File

@@ -0,0 +1,380 @@
//
// Created by Stepan Usatiuk on 18.08.2023.
//
#include "task.h"
#include "cv.h"
#include "gdt.h"
#include "kmem.h"
#include "misc.h"
#include "mutex.h"
#include "paging.h"
#include "serial.h"
#include "timer.h"
#include "tty.h"
void sanity_check_frame(struct task_frame *cur_frame) {
assert2((void *) cur_frame->ip != NULL, "Sanity check");
assert2((void *) cur_frame->sp != NULL, "Sanity check");
assert2(cur_frame->guard == IDT_GUARD, "IDT Guard wrong!");
assert2((cur_frame->ss == GDTSEL(gdt_data) || cur_frame->ss == GDTSEL(gdt_data_user)), "SS wrong!");
}
struct TaskListNode {
struct Task *task;
struct TaskListNode *next;
};
struct TaskList {
struct TaskListNode *cur;
struct TaskListNode *last;
};
struct TaskListNode *RunningTask;
// Should be touched only in the scheduler
struct TaskList NextTasks;
// New tasks
struct Mutex NewTasks_lock = DefaultMutex;
struct TaskList NewTasks;
// Unblocked tasks
struct Mutex UnblockedTasks_lock = DefaultMutex;
struct TaskList UnblockedTasks;
// Task freer
struct Mutex TasksToFree_lock = DefaultMutex;
struct CV TasksToFree_cv = DefaultCV;
struct TaskList TasksToFree;
struct TaskList TasksToFreeTemp;
// Waiting
//struct Mutex WaitingTasks_lock = DefaultMutex;
struct TaskList WaitingTasks;
static volatile atomic_bool initialized = false;
static void free_task(struct Task *t) {
kfree(t->stack);
kfree(t->name);
kfree(t);
}
static void free_task_list_node(struct TaskListNode *t) {
kfree(t);
}
static struct TaskListNode *new_task_list_node() {
struct TaskListNode *ret = kmalloc(sizeof(struct TaskListNode));
ret->task = NULL;
ret->next = NULL;
return ret;
}
static void append_task(struct TaskList *list, struct Task *task) {
if (list == &NextTasks) {
assert2(task->state == TS_RUNNING, "Trying to add blocked task to run queue!");
}
struct TaskListNode *newNode = new_task_list_node();
newNode->task = task;
if (!list->cur) {
list->cur = newNode;
list->last = newNode;
} else {
list->last->next = newNode;
list->last = newNode;
}
}
static void append_task_node(struct TaskList *list, struct TaskListNode *newNode) {
if (list == &NextTasks) {
assert2(newNode->task->state == TS_RUNNING, "Trying to add blocked task to run queue!");
}
newNode->next = NULL;
if (!list->cur) {
assert(list->last == NULL);
list->cur = newNode;
list->last = newNode;
} else {
list->last->next = newNode;
list->last = newNode;
}
}
static struct Task *peek_front(struct TaskList *list) {
struct Task *ret = NULL;
if (list->cur) {
ret = list->cur->task;
}
return ret;
}
static struct Task *pop_front(struct TaskList *list) {
struct Task *ret = NULL;
if (list->cur) {
struct TaskListNode *node;
node = list->cur;
ret = node->task;
list->cur = node->next;
free_task_list_node(node);
if (list->cur == NULL) list->last = NULL;
}
return ret;
}
static struct TaskListNode *pop_front_node(struct TaskList *list) {
struct TaskListNode *ret = NULL;
if (list->cur) {
struct TaskListNode *node;
node = list->cur;
ret = node;
list->cur = node->next;
if (list->cur == NULL) list->last = NULL;
} else {
assert(list->last == NULL);
}
if (ret) ret->next = NULL;
return ret;
}
_Noreturn static void task_freer() {
while (true) {
m_lock(&TasksToFree_lock);
cv_wait(&TasksToFree_lock, &TasksToFree_cv);
assert2(peek_front(&TasksToFree) != NULL, "Sanity check");
while (peek_front(&TasksToFree) && peek_front(&TasksToFree)->state == TS_TO_REMOVE) {
free_task(pop_front(&TasksToFree));
}
m_unlock(&TasksToFree_lock);
}
}
struct Task *new_ktask(void(*fn), char *name) {
struct Task *new = kmalloc(sizeof(struct Task));
new->stack = kmalloc(TASK_SS);
new->name = kmalloc(strlen(name) + 1);
strcpy(name, new->name);
new->frame.sp = ((uint64_t) (&((void *) new->stack)[TASK_SS - 1]) & (~0xFULL));// Ensure 16byte alignment
new->frame.ip = (uint64_t) fn;
new->frame.cs = GDTSEL(gdt_code);
new->frame.ss = GDTSEL(gdt_data);
new->frame.flags = flags();
new->frame.guard = IDT_GUARD;
new->addressSpace = KERN_AddressSpace;
new->state = TS_RUNNING;
new->mode = TASKMODE_KERN;
m_lock(&NewTasks_lock);
append_task(&NewTasks, new);
m_unlock(&NewTasks_lock);
return new;
}
void init_tasks() {
// FIXME: not actually thread-safe, but it probably doesn't matter
assert2(!atomic_load(&initialized), "Tasks should be initialized once!");
new_ktask(task_freer, "freer");
atomic_store(&initialized, true);
}
void remove_self() {
RunningTask->task->state = TS_TO_REMOVE;
yield_self();
assert2(0, "should be removed!");
}
void sleep_self(uint64_t diff) {
RunningTask->task->sleep_until = micros + diff;
RunningTask->task->state = TS_TO_SLEEP;
yield_self();
}
void yield_self() {
if (!RunningTask) return;
NO_INT(
if (RunningTask->task->mode == TASKMODE_KERN) {
_yield_self_kern();
})
}
void switch_task(struct task_frame *cur_frame) {
if (!atomic_load(&initialized)) return;
sanity_check_frame(cur_frame);
assert2(!are_interrupts_enabled(), "Switching tasks with enabled interrupts!");
if (RunningTask) {
RunningTask->task->frame = *cur_frame;
if (RunningTask->task->state == TS_RUNNING) {
assert2(RunningTask->next == NULL, "next should be removed from RunningTask!");
append_task_node(&NextTasks, RunningTask);
} else if (RunningTask->task->state == TS_TO_SLEEP) {
if (!WaitingTasks.cur) {
assert(WaitingTasks.last == NULL);
WaitingTasks.cur = RunningTask;
WaitingTasks.last = RunningTask;
} else {
struct TaskListNode *prev = NULL;
struct TaskListNode *cur = WaitingTasks.cur;
while (cur && cur->task->sleep_until <= RunningTask->task->sleep_until) {
prev = cur;
cur = cur->next;
}
if (prev) {
prev->next = RunningTask;
RunningTask->next = cur;
if (cur == NULL) WaitingTasks.last = RunningTask;
} else {
RunningTask->next = WaitingTasks.cur;
WaitingTasks.cur = RunningTask;
}
// if (cur == WaitingTasks.last) WaitingTasks.last = RunningTask;
}
} else if (RunningTask->task->state == TS_TO_REMOVE) {
append_task_node(&TasksToFreeTemp, RunningTask);
}
}
if (TasksToFreeTemp.cur && !m_test(&UnblockedTasks_lock) && m_try_lock(&TasksToFree_lock)) {
TasksToFree.cur = TasksToFreeTemp.cur;
TasksToFree.last = TasksToFreeTemp.last;
TasksToFreeTemp.cur = NULL;
TasksToFreeTemp.last = NULL;
cv_notify_one(&TasksToFree_cv);
m_unlock(&TasksToFree_lock);
}
RunningTask = NULL;
if (m_try_lock(&NewTasks_lock)) {
while (peek_front(&NewTasks)) {
append_task_node(&NextTasks, pop_front_node(&NewTasks));
}
m_unlock(&NewTasks_lock);
}
if (m_try_lock(&UnblockedTasks_lock)) {
while (peek_front(&UnblockedTasks)) {
append_task_node(&NextTasks, pop_front_node(&UnblockedTasks));
}
m_unlock(&UnblockedTasks_lock);
}
struct TaskListNode *next = pop_front_node(&NextTasks);
assert2(next != NULL, "Kernel left with no tasks!");
assert2(next->task != NULL, "Kernel left with no tasks!");
assert2(next->task->state == TS_RUNNING, "Blocked task in run queue!");
RunningTask = next;
*cur_frame = RunningTask->task->frame;
sanity_check_frame(cur_frame);
}
void switch_task_int(struct task_frame *cur_frame) {
static uint64_t lastSwitchMicros = 0;
uint64_t curMicros = micros;
assert2(!are_interrupts_enabled(), "Switching tasks with enabled interrupts!");
if ((curMicros - lastSwitchMicros) > 1) {
struct TaskListNode *node = WaitingTasks.cur;
while (node) {
if (node->task->sleep_until <= curMicros && node->task->state == TS_TO_SLEEP) {
assert2(node->task->sleep_until, "Sleeping until 0?");
node->task->sleep_until = 0;
node->task->state = TS_RUNNING;
append_task_node(&NextTasks, pop_front_node(&WaitingTasks));
node = WaitingTasks.cur;
} else {
break;
}
}
switch_task(cur_frame);
lastSwitchMicros = curMicros;
}
}
void wait_m_on_self(struct Mutex *m) {
if (!m->waiters) {
m->waiters = kmalloc(sizeof(struct TaskList));
m->waiters->cur = NULL;
m->waiters->last = NULL;
}
// TODO: lock-free?
NO_INT(append_task_node(m->waiters, RunningTask);
RunningTask->task->state = TS_BLOCKED;)
yield_self();
}
void m_unlock_sched_hook(struct Mutex *m) {
struct TaskListNode *new = NULL;
NO_INT(if (m->waiters) {
new = pop_front_node(m->waiters);
})
if (new) {
new->task->state = TS_RUNNING;
m_spin_lock(&UnblockedTasks_lock);
append_task_node(&UnblockedTasks, new);
m_unlock(&UnblockedTasks_lock);
}
}
void wait_cv_on_self(struct CV *cv) {
if (!cv->waiters) {
cv->waiters = kmalloc(sizeof(struct TaskList));
cv->waiters->cur = NULL;
cv->waiters->last = NULL;
}
// TODO: lock-free?
NO_INT(append_task_node(cv->waiters, RunningTask);
RunningTask->task->state = TS_BLOCKED;)
yield_self();
}
void cv_unlock_sched_hook(struct CV *cv, int who) {
struct TaskListNode *new = NULL;
do {
NO_INT(if (cv->waiters) {
new = pop_front_node(cv->waiters);
})
if (new) {
new->task->state = TS_RUNNING;
m_spin_lock(&UnblockedTasks_lock);
append_task_node(&UnblockedTasks, new);
m_unlock(&UnblockedTasks_lock);
}
} while (new && (who == CV_NOTIFY_ALL));
}
struct Task *cur_task() {
if (!RunningTask) return NULL;
return RunningTask->task;
}

55
src/arch/x86/task.h Normal file
View File

@@ -0,0 +1,55 @@
//
// Created by Stepan Usatiuk on 18.08.2023.
//
#ifndef OS1_TASK_H
#define OS1_TASK_H
#include <stdbool.h>
#include "idt.h"
#define TASK_SS 16384
struct Mutex;
struct CV;
enum TaskMode {
TASKMODE_KERN,
TASKMODE_USER
};
enum TaskState {
TS_RUNNING,
TS_BLOCKED,
TS_TO_REMOVE,
TS_TO_SLEEP
};
struct Task {
struct task_frame frame;
struct AddressSpace *addressSpace;
uint64_t *stack;
char *name;
enum TaskMode mode;
uint64_t sleep_until;
enum TaskState state;
};
struct Task *cur_task();
void init_tasks();
struct Task *new_ktask(void(*fn), char *name);
void remove_self();
void sleep_self(uint64_t diff);
void switch_task(struct task_frame *cur_frame);
void switch_task_int(struct task_frame *cur_frame);
void wait_m_on_self(struct Mutex *m);
void m_unlock_sched_hook(struct Mutex *m);
void wait_cv_on_self(struct CV *cv);
void stop_waiting_on(struct Mutex *m);
void yield_self();
void _yield_self_kern();// Expects the caller to save interrupt state
void cv_unlock_sched_hook(struct CV *cv, int who);
#endif//OS1_TASK_H

59
src/arch/x86/task.inc.asm Normal file
View File

@@ -0,0 +1,59 @@
[BITS 64]
; TODO: This is probably not enough
%macro pushaq 0
push rax
push rcx
push rdx
push rbx
push rbp
push rsi
push rdi
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
; Ensure 16-byte alignment
; This works as last bunch of bits in fxsave state aren't used
sub rsp, 512
mov rsi, rsp
add rsi, 32
mov rdi, 0xFFFFFFFFFFFFFFF0
and rsi, rdi
fxsave [rsi]
%endmacro
%macro popaq 0
; Ensure 16-byte alignment
; This works as last bunch of bits in fxsave state aren't used
mov rsi, rsp
add rsi, 32
mov rdi, 0xFFFFFFFFFFFFFFF0
and rsi, rdi
fxrstor [rsi]
add rsp, 512
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop rdi
pop rsi
pop rbp
pop rbx
pop rdx
pop rcx
pop rax
%endmacro

55
src/arch/x86/timer.c Normal file
View File

@@ -0,0 +1,55 @@
//
// Created by Stepan Usatiuk on 14.08.2023.
//
#include "timer.h"
#include "idt.h"
#include "io.h"
volatile uint64_t ticks;
volatile uint64_t micros;
volatile uint64_t millis;
unsigned read_pit_count(void) {
unsigned count = 0;
// Disable interrupts
// cli();
// al = channel in bits 6 and 7, remaining bits clear
outb(0x43, 0b0000000);
count = inb(0x40); // Low byte
count |= inb(0x40) << 8;// High byte
return count;
}
void set_pit_count(unsigned count) {
// Disable interrupts
// cli();
// Set low byte
outb(0x40, count & 0xFF); // Low byte
outb(0x40, (count & 0xFF00) >> 8);// High byte
return;
}
// Very rough but I don't care right now
#define RELOAD_VAL 2
#define FREQ = (1193182 / (RELOAD_VAL))
#define MICROS_PER_TICK 1
void init_timer() {
outb(0x43, 0b00110100);
set_pit_count(RELOAD_VAL);
IRQ_clear_mask(0);
}
void timer_tick() {
ticks++;
micros += MICROS_PER_TICK;
if (micros % 1000 == 0)
millis++;
}

17
src/arch/x86/timer.h Normal file
View File

@@ -0,0 +1,17 @@
//
// Created by Stepan Usatiuk on 14.08.2023.
//
#ifndef OS1_TIMER_H
#define OS1_TIMER_H
#include <stdint.h>
extern volatile uint64_t ticks;
extern volatile uint64_t micros;
extern volatile uint64_t millis;
void init_timer();
void timer_tick();
#endif//OS1_TIMER_H

72
src/arch/x86/tty.c Normal file
View File

@@ -0,0 +1,72 @@
//
// Created by Stepan Usatiuk on 25.08.2023.
//
#include "tty.h"
#include "kmem.h"
#include "mutex.h"
#include "serial.h"
static unsigned ttyNum = 0;
static struct Mutex ttysMutex = DefaultMutex;
struct ttys {
unsigned num;
struct tty *ttys;
};
struct ttys ttys = {.num = 0};
unsigned add_tty(struct tty_funcs funcs) {
m_lock(&ttysMutex);
if (ttyNum >= ttys.num) {
if (ttys.num == 0) {
ttys.ttys = kmalloc(sizeof(struct ttys) + sizeof(struct tty));
ttys.num = 1;
} else {
ttys.num *= 2;
ttys.ttys = krealloc(ttys.ttys, sizeof(struct ttys) + sizeof(struct tty) * ttys.num);
}
assert2(ttys.ttys != NULL, "Couldn't allocate memory for ttys!");
}
m_init(&ttys.ttys[ttyNum].lock);
ttys.ttys[ttyNum].id = ttyNum;
ttys.ttys[ttyNum].funcs = funcs;
ttyNum++;
m_unlock(&ttysMutex);
}
void tty_putchar(struct tty *tty, char c) {
m_lock(&tty->lock);
tty->funcs.putchar(c);
m_unlock(&tty->lock);
}
void tty_putstr(struct tty *tty, const char *str) {
m_lock(&tty->lock);
while (*str != '\0') tty->funcs.putchar(*str++);
m_unlock(&tty->lock);
}
void all_tty_putchar(char c) {
for (unsigned i = 0; i < ttyNum; i++) { tty_putchar(get_tty(i), c); }
}
void all_tty_putstr(const char *str) {
for (unsigned i = 0; i < ttyNum; i++) { tty_putstr(get_tty(i), str); }
}
unsigned get_num_ttys() {
return ttyNum;
}
struct tty *get_tty(unsigned n) {
if (n < get_num_ttys()) return &ttys.ttys[n];
else
return NULL;
}

31
src/arch/x86/tty.h Normal file
View File

@@ -0,0 +1,31 @@
//
// Created by Stepan Usatiuk on 25.08.2023.
//
#ifndef OS1_TTY_H
#define OS1_TTY_H
#include "mutex.h"
#include <stdint.h>
struct tty_funcs {
void (*putchar)(char);
};
struct tty {
unsigned id;
struct Mutex lock;
struct tty_funcs funcs;
};
unsigned add_tty(struct tty_funcs);
void tty_putchar(struct tty *tty, char c);
void tty_putstr(struct tty *tty, const char *str);
void all_tty_putchar(char c);
void all_tty_putstr(const char *str);
unsigned get_num_ttys();
struct tty *get_tty(unsigned n);
#endif//OS1_TTY_H

31
src/iso/CMakeLists.txt Normal file
View File

@@ -0,0 +1,31 @@
add_custom_target(iso_limine)
add_custom_target(iso
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/isodir/
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_SOURCE_DIR}/limine.cfg ${CMAKE_CURRENT_BINARY_DIR}/isodir/
COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:kernel> ${CMAKE_CURRENT_BINARY_DIR}/isodir/os2.elf
COMMAND xorriso -as mkisofs -b limine-bios-cd.bin -no-emul-boot -boot-load-size 4 -boot-info-table --efi-boot limine-uefi-cd.bin -efi-boot-part --efi-boot-image --protective-msdos-label ${CMAKE_CURRENT_BINARY_DIR}/isodir -o ${CMAKE_CURRENT_BINARY_DIR}/os2.iso
COMMAND ${tools}/limine/prefix/bin/limine bios-install ${CMAKE_CURRENT_BINARY_DIR}/os2.iso
VERBATIM
DEPENDS kernel
DEPENDS iso_limine
)
file(GLOB LIMINE_EFI_FILES ${tools}/limine/prefix/share/limine/*.EFI)
foreach (CurFile IN LISTS LIMINE_EFI_FILES)
add_custom_command(
TARGET iso_limine PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/isodir/EFI/BOOT/
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CurFile} ${CMAKE_CURRENT_BINARY_DIR}/isodir/EFI/BOOT/
COMMENT "Copying limine efi file: ${CurFile}")
endforeach ()
file(GLOB LIMINE_BIN_FILES ${tools}/limine/prefix/share/limine/*.bin ${tools}/limine/prefix/share/limine/*.sys)
foreach (CurFile IN LISTS LIMINE_BIN_FILES)
add_custom_command(
TARGET iso_limine PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/isodir/
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CurFile} ${CMAKE_CURRENT_BINARY_DIR}/isodir/
COMMENT "Copying limine bin file: ${CurFile}")
endforeach ()

19
src/iso/limine.cfg Normal file
View File

@@ -0,0 +1,19 @@
# Timeout in seconds that Limine will use before automatically booting.
TIMEOUT=1
# The entry name that will be displayed in the boot menu.
:os2 (KASLR on)
# We use the Limine boot protocol.
PROTOCOL=limine
# Path to the kernel to boot. boot:/// represents the partition on which limine.cfg is located.
KERNEL_PATH=boot:///os2.elf
# Same thing, but without KASLR.
:os2 (KASLR off)
PROTOCOL=limine
# Disable KASLR (it is enabled by default for relocatable kernels)
KASLR=no
KERNEL_PATH=boot:///os2.elf