1
0
Fork 0
mirror of https://git.stationery.faith/corn/corn.git synced 2025-04-11 16:27:26 +00:00
corn/src/arch/amd64/paging.c

577 lines
12 KiB
C

#include <panic.h>
#include <cpuid.h>
#include <stdint.h>
#include <lib.h>
#include <memory.h>
#define MEMORY_INTERNAL
#include <memory/physalloc.h>
#include <memory/virtalloc.h>
#include "paging.h"
#include "bindings.h"
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
uint64_t flags : 6;
uint64_t : 6;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY POINTER TABLE ENTRY
struct pdpte {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY ENTRY
struct pde {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE TABLE ENTRY
struct pte {
uint64_t flags : 9;
uint64_t : 3;
uint64_t address : 40;
uint64_t : 7;
uint64_t protection_key : 4;
uint64_t execute_disable : 1;
};
// bss segment, can write to
extern struct pml4e kernel_pml4[512];
extern struct pdpte kernel_pdpt_0[512];
extern struct pde kernel_pd_0[512];
extern struct pte bootstrap_pt[512];
extern struct pte paging_pt[512]; // paging_pt should NEVER be outside of this file, NEVER i say
// paged address to read page tables
// the structures are not gurenteed to be ident mapped
// map them here with map_<type>(phys_addr) before useing structures
void *addr_mapped = (void *) (uintptr_t) 0x204000;
static struct pml4e *pml4_mapped = (void *) (uintptr_t) 0x200000;
static struct pdpte *pdpt_mapped = (void *) (uintptr_t) 0x201000;
static struct pde *pd_mapped = (void *) (uintptr_t) 0x202000;
static struct pte *pt_mapped = (void *) (uintptr_t) 0x203000;
static inline void invlpg(void *addr) {
__asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
static void load_addr(void *phys_addr) {
static struct pte *pt = &paging_pt[4];
pt->address = (uint64_t)phys_addr >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(addr_mapped);
}
static void load_pml4(void *phys) {
static struct pte *pt = &paging_pt[0];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pml4_mapped);
}
static void load_pdpt(void *phys) {
static struct pte *pt = &paging_pt[1];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pdpt_mapped);
}
static void load_pd(void *phys) {
static struct pte *pt = &paging_pt[2];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pd_mapped);
}
static void load_pt(void *phys) {
static struct pte *pt = &paging_pt[3];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pt_mapped);
}
#define PAG_SUCCESS 0
#define PAG_CANNOT_ALLOC 1
#define PAG_NOT_PRESENT 2
static int select_pdpt(
void *virt,
unsigned int flags,
struct pml4e *root,
struct pdpte **res,
bool create
) {
load_pml4(root);
uint64_t offset = (uint64_t)virt >> 39;
struct pml4e *pml4e = &pml4_mapped[offset];
if (!(pml4e->flags & F_PRESENT)) {
if (!create) {
return PAG_NOT_PRESENT;
}
void *new_page = alloc_phys_page();
if (new_page == NULL) {
return PAG_CANNOT_ALLOC;
}
load_addr(new_page);
memset(addr_mapped, 0, PAGE_SIZE);
pml4e->address = ((uint64_t)new_page) >> 12;
pml4e->flags = F_PRESENT;
}
if (flags)
pml4e->flags = F_PRESENT | flags;
*res = (struct pdpte *)(uintptr_t)(pml4e->address << 12);
return PAG_SUCCESS;
}
static int select_pd(
void *virt,
unsigned int flags,
struct pdpte *pdpt,
struct pde **res,
bool create
) {
load_pdpt(pdpt);
uint64_t offset = ((uint64_t)virt >> 30) & 0x1ff;
struct pdpte *pdpte = &pdpt_mapped[offset];
if (!(pdpte->flags & F_PRESENT)) {
if (!create) {
return PAG_NOT_PRESENT;
}
void *new_page = alloc_phys_page();
if (new_page == NULL) {
return PAG_CANNOT_ALLOC;
}
load_addr(new_page);
memset(addr_mapped, 0, PAGE_SIZE);
pdpte->address = ((uint64_t)new_page) >> 12;
pdpte->flags = F_PRESENT;
}
if (flags)
pdpte->flags = F_PRESENT | flags;
*res = (struct pde *)(uintptr_t)(pdpte->address << 12);
return PAG_SUCCESS;
}
static int select_pt(
void *virt,
unsigned int flags,
struct pde *pd,
struct pte **res,
bool create
) {
load_pd(pd);
uint64_t offset = ((uint64_t)virt >> 21) & 0x1ff;
struct pde *pde = &pd_mapped[offset];
if (!(pde->flags & F_PRESENT)) {
if (!create) {
return PAG_NOT_PRESENT;
}
void *new_page = alloc_phys_page();
if (new_page == NULL) {
return PAG_CANNOT_ALLOC;
}
load_addr(new_page);
memset(addr_mapped, 0, PAGE_SIZE);
pde->address = ((uint64_t)new_page) >> 12;
pde->flags = F_PRESENT;
}
if (flags)
pde->flags = F_PRESENT | flags;
*res = (struct pte *)(uintptr_t)(pde->address << 12);
return PAG_SUCCESS;
}
static void select_page(
void *virt,
struct pte *pt,
struct pte **res
) {
load_pt(pt);
uint64_t offset = ((uint64_t)virt >> 12) & 0x1ff;
struct pte *page = &pt_mapped[offset];
*res = page;
return;
}
static inline void try_unmap_pml4(void) {
for (int i = 0; i < 512; i++) {
if (pml4_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pml4_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pml4_mapped[i].address << 12);
free_phys_page(addr);
}
}
}
static inline void try_unmap_pdpt(void) {
for (int i = 0; i < 512; i++) {
if (pdpt_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pdpt_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pdpt_mapped[i].address << 12);
free_phys_page(addr);
}
}
try_unmap_pml4();
}
static inline void try_unmap_pd(void) {
for (int i = 0; i < 512; i++) {
if (pd_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pd_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pd_mapped[i].address << 12);
free_phys_page(addr);
}
}
try_unmap_pdpt();
}
static inline void try_unmap_pt(void) {
for (int i = 0; i < 512; i++) {
if (pt_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pt_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pt_mapped[i].address << 12);
free_phys_page(addr);
}
}
try_unmap_pd();
}
static void unmap_page(
struct pml4e *root,
void *virt
) {
struct pdpte *pdpt;
struct pde *pd;
struct pte *pt;
struct pte *page;
unsigned int df = 0;
if (select_pdpt(virt, df, root, &pdpt, false))
return;
if (select_pd(virt, df, pdpt, &pd, false))
return;
if (select_pt(virt, df, pd, &pt, false))
return;
select_page(virt, pt, &page);
page->address = 0;
page->flags = 0;
try_unmap_pt();
invlpg(virt);
return;
}
static void unmap_pages(
struct pml4e *root,
void *virt_start,
long page_count
) {
uint64_t pml4_o = -1,
pdpt_o = -1,
pd_o = -1;
uint64_t pml4_n,
pdpt_n,
pd_n;
struct pdpte *pdpt = NULL;
struct pde *pd = NULL;
struct pte *pt = NULL;
struct pte *page = NULL;
unsigned int df = 0;
void *virt;
for (long i = 0; i < page_count; i++) {
virt = (char *)virt_start + (i * PAGE_SIZE);
pml4_n = (uint64_t) virt >> 39;
pdpt_n = ((uint64_t) virt >> 30) & 0x1ff;
pd_n = ((uint64_t) virt >> 21) & 0x1ff;
if (pdpt == NULL || pml4_o != pml4_n) {
if (select_pdpt(virt, df, root, &pdpt, false))
continue;
pml4_o = pml4_n;
}
if (pd == NULL || pdpt_o != pdpt_n) {
if (select_pd(virt, df, pdpt, &pd, false))
continue;
pdpt_o = pdpt_n;
}
if (pt == NULL || pd_o != pd_n) {
if (pt) {
try_unmap_pt();
}
if (select_pt(virt, df, pd, &pt, false))
continue;
pd_o = pd_n;
}
select_page(virt, pt, &page);
page->address = 0;
page->flags = 0;
}
if (pt != NULL)
try_unmap_pt();
return;
}
static int map_page(
struct pml4e *root,
void *virt,
void *phys,
unsigned int flags
) {
struct pdpte *pdpt;
struct pde *pd;
struct pte *pt;
struct pte *page;
unsigned int df = F_WRITEABLE;
if (select_pdpt(virt, df, root, &pdpt, true))
return 1;
if (select_pd(virt, df, pdpt, &pd, true))
return 1;
if (select_pt(virt, df, pd, &pt, true))
return 1;
select_page(virt, pt, &page);
page->address = (uint64_t)phys >> 12;
page->flags = F_PRESENT | flags;
invlpg(virt);
return 0;
}
static int map_pages(
struct pml4e *root,
void *virt_start,
void *phys_start,
unsigned int flags,
long page_count
) {
uint64_t pml4_o = -1,
pdpt_o = -1,
pd_o = -1;
uint64_t pml4_n,
pdpt_n,
pd_n;
struct pdpte *pdpt = NULL;
struct pde *pd = NULL;
struct pte *pt = NULL;
struct pte *page = NULL;
void *virt, *phys;
unsigned int df = F_WRITEABLE;
long i;
for (i = 0; i < page_count; i++) {
virt = (char *)virt_start + (i * PAGE_SIZE);
phys = (char *)phys_start + (i * PAGE_SIZE);
pml4_n = (uint64_t) virt >> 39;
pdpt_n = ((uint64_t) virt >> 30) & 0x1ff;
pd_n = ((uint64_t) virt >> 21) & 0x1ff;
if (pdpt == NULL || pml4_o != pml4_n) {
if (select_pdpt(virt, df, root, &pdpt, true))
goto failed;
pml4_o = pml4_n;
}
if (pd == NULL || pdpt_o != pdpt_n) {
if (select_pd(virt, df, pdpt, &pd, true))
goto failed;
pdpt_o = pdpt_n;
}
if (pt == NULL || pd_o != pd_n) {
if (select_pt(virt, df, pd, &pt, true))
goto failed;
pd_o = pd_n;
}
select_page(virt, pt, &page);
page->address = (uint64_t)phys >> 12;
page->flags = F_PRESENT | flags;
if (flags & F_GLOBAL)
invlpg(virt);
}
__asm volatile("mov %cr3, %rax; mov %rax, %cr3;");
return 0;
failed:
unmap_pages(root, virt, i);
return 1;
}
void paging_init(void) {
kernel_pml4[0].flags = F_PRESENT | F_WRITEABLE;
kernel_pml4[0].address = (uint64_t)(&kernel_pdpt_0) >> 12;
kernel_pdpt_0[0].flags = F_PRESENT | F_WRITEABLE;
kernel_pdpt_0[0].address = (uint64_t)(&kernel_pd_0) >> 12;
kernel_pd_0[1].flags = F_PRESENT | F_WRITEABLE;
kernel_pd_0[1].address = (uint64_t)(&paging_pt) >> 12;
kernel_pd_0[2].flags = F_PRESENT | F_WRITEABLE;
kernel_pd_0[2].address = (uint64_t)(&bootstrap_pt) >> 12;
memset(&paging_pt, 0, 4096);
memset(&bootstrap_pt, 0, 4096);
}
static inline void *page_align(void *addr) {
uintptr_t a = (uintptr_t) addr;
a += PAGE_SIZE - 1;
a /= PAGE_SIZE;
a *= PAGE_SIZE;
return (void *) a;
}
void *mmap(void *addr, size_t len) {
len += (long)addr % PAGE_SIZE;
long pages = (len + PAGE_SIZE - 1) / PAGE_SIZE;
void *virt = virtaddr_alloc(pages);
if (virt == NULL) {
return NULL;
}
void *phys = page_align(addr);
if (map_pages(
kernel_pml4,
virt,
phys,
F_WRITEABLE,
pages
)) {
virtaddr_free(virt);
return NULL;
}
return virt;
}
void unmap(void *addr) {
long pages = virtaddr_free(addr);
if (pages < 1)
return;
unmap_pages(kernel_pml4, addr, pages);
}
void *alloc_pages(int count) {
void *virt = virtaddr_alloc(count);
if (virt == NULL)
return NULL;
void *phys = alloc_phys_pages(count);
if (phys == NULL) {
virtaddr_free(virt);
return NULL;
}
if (map_pages(
kernel_pml4,
virt,
phys,
F_WRITEABLE,
count
)) {
virtaddr_free(virt);
return NULL;
}
return virt;
}
void free_page(void *virt) {
(void) virt;
panic("free_page is not yet implemented");
}
void free_pages(void *virt) {
long pages = virtaddr_free(virt);
if (pages < 1)
return;
unmap_pages(kernel_pml4, virt, pages);
}
void memory_lock(void) {
cli();
}
void memory_unlock(void) {
sti();
}
int kmap_page(void *virt_addr, void *phys_addr, unsigned int flags) {
return map_page(kernel_pml4, virt_addr, phys_addr, flags);
}
int kunmap_page(void *virt_addr) {
unmap_page(kernel_pml4, virt_addr);
return 0;
}