summaryrefslogtreecommitdiff
path: root/src/arch/amd64/paging.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/amd64/paging.c')
-rw-r--r--src/arch/amd64/paging.c561
1 files changed, 416 insertions, 145 deletions
diff --git a/src/arch/amd64/paging.c b/src/arch/amd64/paging.c
index 836f602..453af9d 100644
--- a/src/arch/amd64/paging.c
+++ b/src/arch/amd64/paging.c
@@ -19,7 +19,7 @@ struct pml4e {
uint64_t execute_disable : 1;
};
-// PAGE DIRECTORY POINTER TABLE ENTRY
+// PAGE DIRECTORY POINTER TABLE ENTRY
struct pdpte {
uint64_t flags : 6;
uint64_t : 1;
@@ -61,170 +61,435 @@ extern struct pte paging_pt[512]; // paging_pt should NEVER be outside of this f
// paged address to read page tables
// the structures are not gurenteed to be ident mapped
// map them here with map_<type>(phys_addr) before useing structures
+void *addr_mapped = (void *) (uintptr_t) 0x204000;
+static struct pml4e *pml4_mapped = (void *) (uintptr_t) 0x200000;
static struct pdpte *pdpt_mapped = (void *) (uintptr_t) 0x201000;
-static struct pdpte *pd_mapped = (void *) (uintptr_t) 0x202000;
-static struct pdpte *pt_mapped = (void *) (uintptr_t) 0x203000;
-void *addr_mapped = (void *) (uintptr_t) 0x204000;
+static struct pde *pd_mapped = (void *) (uintptr_t) 0x202000;
+static struct pte *pt_mapped = (void *) (uintptr_t) 0x203000;
-static inline void invlpg(void *addr) {
+static inline void
+invlpg(void *addr)
+{
__asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
-static void map_pdpt(struct pdpte *phys_addr) {
- paging_pt[1].address = (((uint64_t)phys_addr) >> 12);
- paging_pt[1].flags = F_PRESENT | F_WRITEABLE;
- invlpg(pdpt_mapped);
+static void
+load_addr(void *phys_addr)
+{
+ static struct pte *pt = &paging_pt[4];
+ pt->address = (uint64_t)phys_addr >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(addr_mapped);
}
-static void map_pd(struct pde *phys_addr) {
- paging_pt[2].address = (((uint64_t)phys_addr) >> 12);
- paging_pt[2].flags = F_PRESENT | F_WRITEABLE;
- invlpg(pd_mapped);
+static void
+load_pml4(
+ void *phys
+) {
+ static struct pte *pt = &paging_pt[0];
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pml4_mapped);
}
-static void map_pt(struct pte *phys_addr) {
- paging_pt[3].address = (((uint64_t)phys_addr) >> 12);
- paging_pt[3].flags = F_PRESENT | F_WRITEABLE;
- invlpg(pt_mapped);
+static void
+load_pdpt(
+ void *phys
+) {
+ static struct pte *pt = &paging_pt[1];
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pdpt_mapped);
}
-static void map_addr(void *phys_addr) {
- paging_pt[4].address = (((uint64_t)phys_addr) >> 12);
- paging_pt[4].flags = F_PRESENT | F_WRITEABLE;
- invlpg(addr_mapped);
+static void
+load_pd(
+ void *phys
+) {
+ static struct pte *pt = &paging_pt[2];
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pdpt_mapped);
}
-//static int get_maxphysaddr() {
-// uint32_t eax, ebx, ecx, edx;
-// __cpuid(0x80000008, eax, ebx, ecx, edx);
-// return eax & 0xFF;
-//}
-
-//int find_phys_addr(struct pml4e *pml4, void *virt_addr, void **phys_addr) {
-// uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
-// uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
-// uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
-// uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
-// uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
-//
-// if (!(pml4[pml4_offset].flags & F_PRESENT))
-// return -1;
-// struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12);
-// if (!(pdpt[pdpt_offset].flags & F_PRESENT))
-// return -1;
-// struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12);
-// if (!(pd[pd_offset].flags & F_PRESENT))
-// return -1;
-// struct pte *pt = (struct pte *)(pd[pd_offset].address << 12);
-// if (!(pt[pt_offset].flags & F_PRESENT))
-// return -1;
-// *phys_addr = (void *)((pt[pt_offset].address << 12) + page_offset);
-// return 0;
-//}
-
-char *curr_alloc = (void *) 0x5000;
+static void
+load_pt(
+ void *phys
+) {
+ static struct pte *pt = &paging_pt[3];
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pt_mapped);
+}
-int map_page(struct pml4e *pml4, void *virt_addr, void *phys_addr, unsigned int flags) {
- uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
- uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
- uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
- uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
- //uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
+#define PAG_SUCCESS 0
+#define PAG_CANNOT_ALLOC 1
+#define PAG_NOT_PRESENT 2
- if (!(pml4[pml4_offset].flags & F_PRESENT)) {
+static int
+select_pdpt(
+ void *virt,
+ unsigned int flags,
+ struct pml4e *root,
+ struct pdpte **res,
+ bool create
+) {
+ load_pml4(root);
+ uint64_t offset = (uint64_t)virt >> 39;
+ struct pml4e *pml4e = &pml4_mapped[offset];
+ if (!(pml4e->flags & F_PRESENT)) {
+ if (!create) {
+ return PAG_NOT_PRESENT;
+ }
void *new_page = alloc_phys_page();
- map_addr(new_page);
- memset(addr_mapped, 0, 4096);
- pml4[pml4_offset].address = ((uint64_t)new_page) >> 12;
+ if (new_page == NULL) {
+ return PAG_CANNOT_ALLOC;
+ }
+ load_addr(new_page);
+ memset(addr_mapped, 0, PAGE_SIZE);
+ pml4e->address = ((uint64_t)new_page) >> 12;
+ pml4e->flags = F_PRESENT;
}
- pml4[pml4_offset].flags = F_PRESENT | flags;
+ if (flags)
+ pml4e->flags = F_PRESENT | flags;
+ *res = (struct pdpte *)(uintptr_t)(pml4e->address << 12);
+ return PAG_SUCCESS;
+}
- struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12);
- map_pdpt(__pdpt);
- if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT)) {
+static int
+select_pd(
+ void *virt,
+ unsigned int flags,
+ struct pdpte *pdpt,
+ struct pde **res,
+ bool create
+) {
+ load_pdpt(pdpt);
+ uint64_t offset = ((uint64_t)virt >> 30) & 0x1ff;
+ struct pdpte *pdpte = &pdpt_mapped[offset];
+ if (!(pdpte->flags & F_PRESENT)) {
+ if (!create) {
+ return PAG_NOT_PRESENT;
+ }
void *new_page = alloc_phys_page();
- map_addr(new_page);
- memset(addr_mapped, 0, 4096);
- pdpt_mapped[pdpt_offset].address = ((uint64_t)new_page) >> 12;
+ if (new_page == NULL) {
+ return PAG_CANNOT_ALLOC;
+ }
+ load_addr(new_page);
+ memset(addr_mapped, 0, PAGE_SIZE);
+ pdpte->address = ((uint64_t)new_page) >> 12;
+ pdpte->flags = F_PRESENT;
}
- pdpt_mapped[pdpt_offset].flags = F_PRESENT | flags;
+ if (flags)
+ pdpte->flags = F_PRESENT | flags;
+ *res = (struct pde *)(uintptr_t)(pdpte->address << 12);
+ return PAG_SUCCESS;
+}
- struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12);
- map_pd(__pd);
- if (!(pd_mapped[pd_offset].flags & F_PRESENT)) {
+static int
+select_pt(
+ void *virt,
+ unsigned int flags,
+ struct pde *pd,
+ struct pte **res,
+ bool create
+) {
+ load_pd(pd);
+ uint64_t offset = ((uint64_t)virt >> 21) & 0x1ff;
+ struct pde *pde = &pd_mapped[offset];
+ if (!(pde->flags & F_PRESENT)) {
+ if (!create) {
+ return PAG_NOT_PRESENT;
+ }
void *new_page = alloc_phys_page();
- map_addr(new_page);
- memset(addr_mapped, 0, 4096);
- pd_mapped[pd_offset].address = ((uint64_t)new_page) >> 12;
+ if (new_page == NULL) {
+ return PAG_CANNOT_ALLOC;
+ }
+ load_addr(new_page);
+ memset(addr_mapped, 0, PAGE_SIZE);
+ pde->address = ((uint64_t)new_page) >> 12;
+ pde->flags = F_PRESENT;
}
- pd_mapped[pd_offset].flags = F_PRESENT | flags;
+ if (flags)
+ pde->flags = F_PRESENT | flags;
+ *res = (struct pte *)(uintptr_t)(pde->address << 12);
+ return PAG_SUCCESS;
+}
- struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12);
- map_pt(__pt);
- pt_mapped[pt_offset].flags = F_PRESENT | flags;
- pt_mapped[pt_offset].address = (((uint64_t)phys_addr) >> 12);
-
- invlpg(virt_addr);
- return 0;
+static void
+select_page(
+ void *virt,
+ struct pte *pt,
+ struct pte **res
+) {
+ load_pt(pt);
+ uint64_t offset = ((uint64_t)virt >> 12) & 0x1ff;
+ struct pte *page = &pt_mapped[offset];
+ *res = page;
+ return;
}
-int unmap_page(struct pml4e *pml4, void *virt_addr) {
- uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
- uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
- uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
- uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
- //uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
+static inline void
+try_unmap_pml4(void) {
+ for (int i = 0; i < 512; i++) {
+ if (pml4_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pml4_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pml4_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+}
- if (!(pml4[pml4_offset].flags & F_PRESENT))
- return -1;
- struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12);
- map_pdpt(__pdpt);
- if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT))
- return -1;
- struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12);
- map_pd(__pd);
- if (!(pd_mapped[pd_offset].flags & F_PRESENT))
- return -1;
- struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12);
- map_pt(__pt);
- if (!(pt_mapped[pt_offset].flags & F_PRESENT))
- return -1;
+static inline void
+try_unmap_pdpt(void) {
+ for (int i = 0; i < 512; i++) {
+ if (pdpt_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pdpt_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pdpt_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+ try_unmap_pml4();
+}
- pt_mapped[pt_offset].flags = 0;
+static inline void
+try_unmap_pd(void) {
+ for (int i = 0; i < 512; i++) {
+ if (pd_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pd_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pd_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+ try_unmap_pdpt();
+}
- int i = 0;
- for(; i < 512; i++) {
+static inline void
+try_unmap_pt(void) {
+ for (int i = 0; i < 512; i++) {
if (pt_mapped[i].flags & F_PRESENT)
- break;
+ return;
}
- if (i == 512)
- goto done;
-
- pd_mapped[pd_offset].flags = 0;
-
- for(i = 0; i < 512; i++) {
- if (pd_mapped[i].flags & F_PRESENT)
- break;
+ for (int i = 0; i < 512; i++) {
+ if (pt_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pt_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
}
- if (i == 512)
- goto done;
+ try_unmap_pd();
+}
+
+static void
+unmap_page(
+ struct pml4e *root,
+ void *virt
+) {
+
+ struct pdpte *pdpt;
+ struct pde *pd;
+ struct pte *pt;
+ struct pte *page;
+
+ unsigned int df = 0;
+
+ if (select_pdpt(virt, df, root, &pdpt, false))
+ return;
+
+ if (select_pd(virt, df, pdpt, &pd, false))
+ return;
+
+ if (select_pt(virt, df, pd, &pt, false))
+ return;
+
+ select_page(virt, pt, &page);
+
+ page->address = 0;
+ page->flags = 0;
+
+ try_unmap_pt();
+
+ invlpg(virt);
+
+ return;
+}
+
+static void
+unmap_pages(
+ struct pml4e *root,
+ void *virt_start,
+ long page_count
+) {
+
+ uint64_t pml4_o = -1,
+ pdpt_o = -1,
+ pd_o = -1;
+
+ uint64_t pml4_n,
+ pdpt_n,
+ pd_n;
+
+ struct pdpte *pdpt = NULL;
+ struct pde *pd = NULL;
+ struct pte *pt = NULL;
+ struct pte *page = NULL;
+
+ unsigned int df = 0;
+
+ void *virt;
- pdpt_mapped[pdpt_offset].flags = 0;
+ for (long i = 0; i < page_count; i++) {
+
+ virt = (char *)virt_start + (i * PAGE_SIZE);
+
+ pml4_n = (uint64_t) virt >> 39;
+ pdpt_n = ((uint64_t) virt >> 30) & 0x1ff;
+ pd_n = ((uint64_t) virt >> 21) & 0x1ff;
+
+ if (pdpt == NULL || pml4_o != pml4_n) {
+ if (select_pdpt(virt, df, root, &pdpt, false))
+ continue;
+ pml4_o = pml4_n;
+ }
+
+ if (pd == NULL || pdpt_o != pdpt_n) {
+ if (select_pd(virt, df, pdpt, &pd, false))
+ continue;
+ pdpt_o = pdpt_n;
+ }
+
+ if (pt == NULL || pd_o != pd_n) {
+ if (pt) {
+ try_unmap_pt();
+ }
+ if (select_pt(virt, df, pd, &pt, false))
+ continue;
+ pd_o = pd_n;
+ }
+
+ select_page(virt, pt, &page);
+
+ page->address = 0;
+ page->flags = 0;
- for(i = 0; i < 512; i++) {
- if(pdpt_mapped[i].flags & F_PRESENT)
- break;
}
- if (i == 512)
- goto done;
- pml4[pml4_offset].flags = 0;
+ if (pt != NULL)
+ try_unmap_pt();
+
+ return;
+}
+
+static int
+map_page(
+ struct pml4e *root,
+ void *virt,
+ void *phys,
+ unsigned int flags
+) {
+
+ struct pdpte *pdpt;
+ struct pde *pd;
+ struct pte *pt;
+ struct pte *page;
+
+ unsigned int df = F_WRITEABLE;
+
+ if (select_pdpt(virt, df, root, &pdpt, true))
+ return 1;
+
+ if (select_pd(virt, df, pdpt, &pd, true))
+ return 1;
+
+ if (select_pt(virt, df, pd, &pt, true))
+ return 1;
+
+ select_page(virt, pt, &page);
+
+ page->address = (uint64_t)phys >> 12;
+ page->flags = F_PRESENT | flags;
+ invlpg(virt);
+
+ return 0;
+}
+
+static int
+map_pages(
+ struct pml4e *root,
+ void *virt_start,
+ void *phys_start,
+ unsigned int flags,
+ long page_count
+) {
+
+ uint64_t pml4_o = -1,
+ pdpt_o = -1,
+ pd_o = -1;
+
+ uint64_t pml4_n,
+ pdpt_n,
+ pd_n;
+
+ struct pdpte *pdpt = NULL;
+ struct pde *pd = NULL;
+ struct pte *pt = NULL;
+ struct pte *page = NULL;
- //TODO: Return memory used for page structures
+ void *virt, *phys;
+
+ unsigned int df = F_WRITEABLE;
+
+ long i;
+ for (i = 0; i < page_count; i++) {
+
+ virt = (char *)virt_start + (i * PAGE_SIZE);
+ phys = (char *)phys_start + (i * PAGE_SIZE);
+
+ pml4_n = (uint64_t) virt >> 39;
+ pdpt_n = ((uint64_t) virt >> 30) & 0x1ff;
+ pd_n = ((uint64_t) virt >> 21) & 0x1ff;
+
+ if (pdpt == NULL || pml4_o != pml4_n) {
+ if (select_pdpt(virt, df, root, &pdpt, true))
+ goto failed;
+ pml4_o = pml4_n;
+ }
+
+ if (pd == NULL || pdpt_o != pdpt_n) {
+ if (select_pd(virt, df, pdpt, &pd, true))
+ goto failed;
+ pdpt_o = pdpt_n;
+ }
+
+ if (pt == NULL || pd_o != pd_n) {
+ if (select_pt(virt, df, pd, &pt, true))
+ goto failed;
+ pd_o = pd_n;
+ }
+
+ select_page(virt, pt, &page);
+
+ page->address = (uint64_t)phys >> 12;
+ page->flags = F_PRESENT | flags;
+ invlpg(virt);
+
+ }
-done:
- invlpg(virt_addr);
return 0;
+
+failed:
+
+ unmap_pages(root, virt, i);
+
+ return 1;
}
void paging_init(void) {
@@ -239,7 +504,7 @@ void paging_init(void) {
kernel_pd_0[1].address = (uint64_t)(&paging_pt) >> 12;
kernel_pd_0[2].flags = F_PRESENT | F_WRITEABLE;
kernel_pd_0[2].address = (uint64_t)(&bootstrap_pt) >> 12;
-
+
memset(&paging_pt, 0, 4096);
memset(&bootstrap_pt, 0, 4096);
}
@@ -260,21 +525,24 @@ void *mmap(void *addr, size_t len) {
return NULL;
}
void *phys = page_align(addr);
- for (long i = 0; i < pages; i++) {
- void *virt_temp = (char *)virt + (i * PAGE_SIZE);
- void *phys_temp = (char *)phys + (i * PAGE_SIZE);
- map_page(kernel_pml4, virt_temp, phys_temp, F_WRITEABLE);
+ if (map_pages(
+ kernel_pml4,
+ virt,
+ phys,
+ F_WRITEABLE,
+ pages
+ )) {
+ virtaddr_free(virt);
+ return NULL;
}
- map_page(kernel_pml4, virt, (void*)0x23443, F_WRITEABLE);
return virt;
}
void unmap(void *addr) {
long pages = virtaddr_free(addr);
- for (long i = 0; i < pages; i++) {
- void *virt = (char *)addr + (i * PAGE_SIZE);
- unmap_page(kernel_pml4, virt);
- }
+ if (pages < 1)
+ return;
+ unmap_pages(kernel_pml4, addr, pages);
}
void *alloc_pages(int count) {
@@ -286,10 +554,15 @@ void *alloc_pages(int count) {
virtaddr_free(virt);
return NULL;
}
- for (int i = 0; i < count; i++) {
- void *virt_temp = (char *)virt + (i * PAGE_SIZE);
- void *phys_temp = (char *)phys + (i * PAGE_SIZE);
- map_page(kernel_pml4, virt_temp, phys_temp, F_WRITEABLE);
+ if (map_pages(
+ kernel_pml4,
+ virt,
+ phys,
+ F_WRITEABLE,
+ count
+ )) {
+ virtaddr_free(virt);
+ return NULL;
}
return virt;
}
@@ -298,10 +571,7 @@ void free_page(void *virt) {
long pages = virtaddr_free(virt);
if (pages < 1)
return;
- for (long i = 0; i < pages; i++) {
- void *virt_temp = (char *)virt + (i * PAGE_SIZE);
- unmap_page(kernel_pml4, virt_temp);
- }
+ unmap_pages(kernel_pml4, virt, pages);
}
void memory_lock(void) {
@@ -317,5 +587,6 @@ int kmap_page(void *virt_addr, void *phys_addr, unsigned int flags) {
}
int kunmap_page(void *virt_addr) {
- return unmap_page(kernel_pml4, virt_addr);
+ unmap_page(kernel_pml4, virt_addr);
+ return 0;
}