summaryrefslogtreecommitdiff
path: root/src/arch/amd64/paging.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/amd64/paging.c')
-rw-r--r--src/arch/amd64/paging.c320
1 files changed, 320 insertions, 0 deletions
diff --git a/src/arch/amd64/paging.c b/src/arch/amd64/paging.c
new file mode 100644
index 0000000..3140a2a
--- /dev/null
+++ b/src/arch/amd64/paging.c
@@ -0,0 +1,320 @@
+#include <cpuid.h>
+#include <stdint.h>
+#include <lib.h>
+#include <memory.h>
+
+#define MEMORY_INTERNAL
+#include <memory/physalloc.h>
+#include <memory/virtalloc.h>
+
+#include "bindings.h"
+
+#define F_PRESENT 0x001
+#define F_WRITEABLE 0x002
+#define F_UNPRIVILEGED 0x004
+#define F_WRITETHROUGH 0x008
+#define F_CACHEDISABLE 0x010
+#define F_ACCESSED 0x020
+#define F_DIRTY 0x040
+#define F_MEGABYTE 0x080
+#define F_GLOBAL 0x100
+
+// PAGE MAP LEVEL 4 ENTRY
+struct pml4e {
+ uint64_t flags : 6;
+ uint64_t : 6;
+ uint64_t address : 40;
+ uint64_t : 11;
+ uint64_t execute_disable : 1;
+};
+
+// PAGE DIRECTORY POINTER TABLE ENTRY
+struct pdpte {
+ uint64_t flags : 6;
+ uint64_t : 1;
+ uint64_t page_size : 1;
+ uint64_t : 4;
+ uint64_t address : 40;
+ uint64_t : 11;
+ uint64_t execute_disable : 1;
+};
+
+// PAGE DIRECTORY ENTRY
+struct pde {
+ uint64_t flags : 6;
+ uint64_t : 1;
+ uint64_t page_size : 1;
+ uint64_t : 4;
+ uint64_t address : 40;
+ uint64_t : 11;
+ uint64_t execute_disable : 1;
+};
+
+// PAGE TABLE ENTRY
+struct pte {
+ uint64_t flags : 9;
+ uint64_t : 3;
+ uint64_t address : 40;
+ uint64_t : 7;
+ uint64_t protection_key : 4;
+ uint64_t execute_disable : 1;
+};
+
+// bss segment, can write to
+extern struct pml4e pml4_list[512];
+extern struct pdpte init_pdpt[512];
+extern struct pde init_pd[512];
+extern struct pte init_pt[512];
+extern struct pte paging_pt[512]; // paging_pt should NEVER be outside of this file, NEVER i say
+
+// paged address to read page tables
+// the structures are not gurenteed to be ident mapped
+// map them here with map_<type>(phys_addr) before useing structures
+static struct pdpte *pdpt_mapped = (void *) (uintptr_t) 0x1000;
+static struct pdpte *pd_mapped = (void *) (uintptr_t) 0x2000;
+static struct pdpte *pt_mapped = (void *) (uintptr_t) 0x3000;
+void *addr_mapped = (void *) (uintptr_t) 0x4000;
+
+static inline void invlpg(void *addr) {
+ __asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+}
+
+static void map_pdpt(struct pdpte *phys_addr) {
+ paging_pt[1].address = (((uint64_t)phys_addr) >> 12);
+ paging_pt[1].flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pdpt_mapped);
+}
+
+static void map_pd(struct pde *phys_addr) {
+ paging_pt[2].address = (((uint64_t)phys_addr) >> 12);
+ paging_pt[2].flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pd_mapped);
+}
+
+static void map_pt(struct pte *phys_addr) {
+ paging_pt[3].address = (((uint64_t)phys_addr) >> 12);
+ paging_pt[3].flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pt_mapped);
+}
+
+static void map_addr(void *phys_addr) {
+ paging_pt[4].address = (((uint64_t)phys_addr) >> 12);
+ paging_pt[4].flags = F_PRESENT | F_WRITEABLE;
+ invlpg(addr_mapped);
+}
+
+//static int get_maxphysaddr() {
+// uint32_t eax, ebx, ecx, edx;
+// __cpuid(0x80000008, eax, ebx, ecx, edx);
+// return eax & 0xFF;
+//}
+
+//int find_phys_addr(struct pml4e *pml4, void *virt_addr, void **phys_addr) {
+// uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
+// uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
+// uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
+// uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
+// uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
+//
+// if (!(pml4[pml4_offset].flags & F_PRESENT))
+// return -1;
+// struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12);
+// if (!(pdpt[pdpt_offset].flags & F_PRESENT))
+// return -1;
+// struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12);
+// if (!(pd[pd_offset].flags & F_PRESENT))
+// return -1;
+// struct pte *pt = (struct pte *)(pd[pd_offset].address << 12);
+// if (!(pt[pt_offset].flags & F_PRESENT))
+// return -1;
+// *phys_addr = (void *)((pt[pt_offset].address << 12) + page_offset);
+// return 0;
+//}
+
+char *curr_alloc = (void *) 0x5000;
+
+int map_page(struct pml4e *pml4, void *virt_addr, void *phys_addr, unsigned int flags) {
+ uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
+ uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
+ uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
+ uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
+ //uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
+
+ if (!(pml4[pml4_offset].flags & F_PRESENT)) {
+ void *new_page = alloc_phys_page();
+ map_addr(new_page);
+ memset(addr_mapped, 0, 4096);
+ pml4[pml4_offset].address = ((uint64_t)new_page) >> 12;
+ }
+ pml4[pml4_offset].flags = F_PRESENT | flags;
+
+ struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12);
+ map_pdpt(__pdpt);
+ if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT)) {
+ void *new_page = alloc_phys_page();
+ map_addr(new_page);
+ memset(addr_mapped, 0, 4096);
+ pdpt_mapped[pdpt_offset].address = ((uint64_t)new_page) >> 12;
+ }
+ pdpt_mapped[pdpt_offset].flags = F_PRESENT | flags;
+
+ struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12);
+ map_pd(__pd);
+ if (!(pd_mapped[pd_offset].flags & F_PRESENT)) {
+ void *new_page = alloc_phys_page();
+ map_addr(new_page);
+ memset(addr_mapped, 0, 4096);
+ pd_mapped[pd_offset].address = ((uint64_t)new_page) >> 12;
+ }
+ pd_mapped[pd_offset].flags = F_PRESENT | flags;
+
+ struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12);
+ map_pt(__pt);
+ pt_mapped[pt_offset].flags = F_PRESENT | flags;
+ pt_mapped[pt_offset].address = (((uint64_t)phys_addr) >> 12);
+ invlpg(virt_addr);
+ return 0;
+}
+
+int unmap_page(struct pml4e *pml4, void *virt_addr) {
+ uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
+ uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
+ uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
+ uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
+ //uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
+
+ if (!(pml4[pml4_offset].flags & F_PRESENT))
+ return -1;
+ struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12);
+ map_pdpt(__pdpt);
+ if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT))
+ return -1;
+ struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12);
+ map_pd(__pd);
+ if (!(pd_mapped[pd_offset].flags & F_PRESENT))
+ return -1;
+ struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12);
+ map_pt(__pt);
+ if (!(pt_mapped[pt_offset].flags & F_PRESENT))
+ return -1;
+
+ pt_mapped[pt_offset].flags = 0;
+
+ int i = 0;
+ for(; i < 512; i++) {
+ if (pt_mapped[i].flags & F_PRESENT)
+ break;
+ }
+ if (i == 512)
+ goto done;
+
+ pd_mapped[pd_offset].flags = 0;
+
+ for(i = 0; i < 512; i++) {
+ if (pd_mapped[i].flags & F_PRESENT)
+ break;
+ }
+ if (i == 512)
+ goto done;
+
+ pdpt_mapped[pdpt_offset].flags = 0;
+
+ for(i = 0; i < 512; i++) {
+ if(pdpt_mapped[i].flags & F_PRESENT)
+ break;
+ }
+ if (i == 512)
+ goto done;
+
+ pml4[pml4_offset].flags = 0;
+
+ //TODO: Return memory used for page structures
+
+done:
+ invlpg(virt_addr);
+ return 0;
+}
+
+void paging_init(void) {
+ memset(pml4_list, 0, 4096);
+ pml4_list[0].flags = F_PRESENT | F_WRITEABLE;
+ pml4_list[0].address = (uint64_t)init_pdpt >> 12;
+
+ memset(init_pdpt, 0, 4096);
+ init_pdpt[0].flags = F_PRESENT | F_WRITEABLE;
+ init_pdpt[0].address = (uint64_t)init_pd >> 12;
+
+ memset(pml4_list, 0, 4096);
+ init_pd[0].flags = F_PRESENT | F_WRITEABLE;
+ init_pd[0].address = (uint64_t)paging_pt >> 12;
+ init_pd[1].flags = F_PRESENT | F_WRITEABLE;
+ init_pd[1].address = (uint64_t)init_pt >> 12;
+
+ memset(init_pt, 0, 4096);
+ memset(paging_pt, 0, 4096);
+}
+
+static inline void *page_align(void *addr) {
+ uintptr_t a = (uintptr_t) addr;
+ a += PAGE_SIZE + 1;
+ a /= PAGE_SIZE;
+ return (void *) a;
+}
+
+void *mmap(void *addr, size_t len) {
+ len += (uintptr_t)addr % PAGE_SIZE;
+ int pages = (len + PAGE_SIZE - 1) / PAGE_SIZE;
+ void *virt = virtaddr_alloc(pages);
+ if (virt == NULL)
+ return NULL;
+ void *phys = page_align(addr);
+ for (long i = 0; i < pages; i++) {
+ void *virt_temp = (char *)virt + (i * PAGE_SIZE);
+ void *phys_temp = (char *)phys + (i * PAGE_SIZE);
+ map_page(pml4_list, virt_temp, phys_temp, F_WRITEABLE);
+ }
+ return addr;
+}
+
+void unmap(void *addr) {
+ long pages = virtaddr_free(addr);
+ for (long i = 0; i < pages; i++) {
+ void *virt = (char *)addr + (i * PAGE_SIZE);
+ unmap_page(pml4_list, virt);
+ }
+}
+
+void *alloc_pages(int count) {
+ void *virt = virtaddr_alloc(count);
+ if (virt == NULL)
+ return NULL;
+ void *phys = alloc_phys_pages(count);
+ if (phys == NULL) {
+ virtaddr_free(virt);
+ return NULL;
+ }
+ for (int i = 0; i < count; i++) {
+ void *virt_temp = (char *)virt + (i * PAGE_SIZE);
+ void *phys_temp = (char *)phys + (i * PAGE_SIZE);
+ map_page(pml4_list, virt_temp, phys_temp, F_WRITEABLE);
+ }
+ return virt;
+}
+
+void free_page(void *virt) {
+ long pages = virtaddr_free(virt);
+ if (pages < 1)
+ return;
+ for (long i = 0; i < pages; i++) {
+ void *virt_temp = (char *)virt + (i * PAGE_SIZE);
+ unmap_page(pml4_list, virt_temp);
+ }
+}
+
+void memory_lock(void) {
+ cli();
+}
+
+void memory_unlock(void) {
+ sti();
+}