Don't use huge pages for HHDM

doesn't work on virtualbox
This commit is contained in:
2024-03-23 00:47:49 +01:00
parent 11f64d15ba
commit 175ed74d75

View File

@@ -176,44 +176,75 @@ void limine_kern_save_response() {
kernel_virt_base = kernel_address_request.response->virtual_base;
}
#define EARLY_PAGES_SIZE ((HHDM_SIZE + 1) * 2)
#define EARLY_PAGES_SIZE ((HHDM_SIZE + 1) * 1024)
static uint64_t early_pages[EARLY_PAGES_SIZE][512] __attribute__((aligned(PAGE_SIZE)));
static uint64_t early_pages_used = 0;
uintptr_t kernel_phys_base;
uintptr_t kernel_virt_base;
void map_hddm(uint64_t *pml4) {
uint64_t *get_early_frame() {
assert2(early_pages_used < EARLY_PAGES_SIZE, "Couldn't get a page for HHDM!");
uint64_t *newp = early_pages[early_pages_used++];
for (int i = 0; i < 512; i++)
newp[i] = PAGE_RW;
return newp;
}
void map_hddm(uint64_t *pml4) {
assert2(kernel_virt_base != 0, "Kernel virt address not loaded!");
assert2(kernel_phys_base != 0, "Kernel phys address not loaded!");
// Assuming here that everything related to paging is identity mapped
// Which is true if the first bytes of memory, where the kernel is are identity mapped,
// Which is true if we're using Limine
for (uint64_t i = 0; i < HHDM_SIZE; i++) {
void *virt = (void *) (HHDM_BEGIN + i * 1024ULL * 1024ULL * 1024ULL);
void *real = (void *) (i * 1024ULL * 1024ULL * 1024ULL);
for (uint64_t i = 0; i < HHDM_SIZE * 1024ULL * 1024ULL * 1024ULL; i += 4096) {
void *virt = (void *) (HHDM_BEGIN + i);
void *real = (void *) (i);
uint64_t pml4i = (uint64_t) virt >> 39 & 0x01FF;
uint64_t pdpei = (uint64_t) virt >> 30 & 0x01FF;
assert2(((uint64_t) virt & 0xFFF) == 0, "Trying to map non-aligned memory!");
assert2(((uint64_t) real & 0xFFF) == 0, "Trying to map to non-aligned memory!");
// Assuming everything related to paging is HHDM
assert2((uint64_t) pml4 < 0x8000000000000000ULL, "CR3 here must be physical!");
uint64_t *pml4e = &(pml4[pml4i]);
uint64_t pml4i = (uint64_t) virt >> 39 & 0x01FF;
uint64_t pdpei = (uint64_t) virt >> 30 & 0x01FF;
uint64_t pdei = (uint64_t) virt >> 21 & 0x01FF;
uint64_t ptsi = (uint64_t) virt >> 12 & 0x01FF;
uint64_t *pml4e = pml4 + pml4i;
if (!(*pml4e & PAGE_PRESENT)) {
assert2(early_pages_used < EARLY_PAGES_SIZE, "Couldn't get a page for HHDM!");
uint64_t *newp = early_pages[early_pages_used++];
for (int i = 0; i < 512; i++)
newp[i] = PAGE_RW;
*pml4e = PAGE_RW | PAGE_PRESENT;
uint64_t *newp = get_early_frame();
*pml4e |= PAGE_PRESENT | PAGE_RW | PAGE_USER;
*pml4e |= (uint64_t) KERN_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
*pml4e |= PAGE_RW | PAGE_PRESENT;
uint64_t *pdpeb = (uint64_t *) (*pml4e & 0x000FFFFFFFFFF000ULL);
uint64_t *pdpee = &pdpeb[pdpei];
assert2((!(*pdpee & PAGE_PRESENT)), "HHDM area is already mapped!");
*pdpee = PAGE_RW | PAGE_PRESENT | PAGE_PS;
*pdpee |= (uint64_t) real & (uint64_t) 0x000FFFFFFFFFF000ULL;
assert2(!(*pdpee & PAGE_PS), "Encountered an unexpected large mapping!");
if (!(*pdpee & PAGE_PRESENT)) {
uint64_t *newp = get_early_frame();
*pdpee |= PAGE_PRESENT | PAGE_RW | PAGE_USER;
*pdpee |= (uint64_t) KERN_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
uint64_t *pdeb = (uint64_t *) (*pdpee & 0x000FFFFFFFFFF000ULL);
uint64_t *pdee = &pdeb[pdei];
assert2(!(*pdee & PAGE_PS), "Encountered an unexpected large mapping!");
if (!(*pdee & PAGE_PRESENT)) {
uint64_t *newp = get_early_frame();
*pdee |= PAGE_PRESENT | PAGE_RW | PAGE_USER;
*pdee |= (uint64_t) KERN_V2P(newp) & (uint64_t) 0x000FFFFFFFFFF000ULL;
}
uint64_t *ptsb = (uint64_t *) (*pdee & 0x000FFFFFFFFFF000ULL);
uint64_t *ptse = &ptsb[ptsi];
*ptse = ((uint64_t) real & 0x000FFFFFFFFFF000ULL) | (PAGE_PRESENT | PAGE_RW | PAGE_USER) | PAGE_PRESENT;
}
_tlb_flush();
}