summaryrefslogtreecommitdiff
path: root/kernel/memory/paging.c
diff options
context:
space:
mode:
authorFreya Murphy <freya@freyacat.org>2025-04-23 11:43:56 -0400
committerFreya Murphy <freya@freyacat.org>2025-04-23 11:43:56 -0400
commit5619cfbf0f989cfe1cac1654a63efc4c86d06e08 (patch)
tree70fc7b896d0689c30e7ac7d77d20461bff909955 /kernel/memory/paging.c
parentchange pcb structure (move context to top) (diff)
downloadcomus-5619cfbf0f989cfe1cac1654a63efc4c86d06e08.tar.gz
comus-5619cfbf0f989cfe1cac1654a63efc4c86d06e08.tar.bz2
comus-5619cfbf0f989cfe1cac1654a63efc4c86d06e08.zip
some paging updates (more to come)
Diffstat (limited to 'kernel/memory/paging.c')
-rw-r--r--kernel/memory/paging.c594
1 files changed, 355 insertions, 239 deletions
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c
index ff73444..f8fcf00 100644
--- a/kernel/memory/paging.c
+++ b/kernel/memory/paging.c
@@ -12,61 +12,138 @@
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
uint64_t flags : 6;
- uint64_t : 6;
+ uint64_t : 1; // ignored
+ uint64_t : 1; // reserved
+ uint64_t : 4; // ignored
uint64_t address : 40;
- uint64_t : 11;
+ uint64_t : 11; // ignored
uint64_t execute_disable : 1;
-};
+} __attribute__((packed));
+
+// PAGE MAP LEVEL 4
+struct pml4 {
+ union {
+ // pml4 metadata
+ struct {
+ uint64_t : 6; // flags
+ uint64_t : 1; // ignored
+ uint64_t : 1; // reserved
+ uint64_t : 4; // ignored
+ uint64_t : 40; // address
+ uint64_t : 2; // ignored
+ uint64_t count : 9; // ignored
+ uint64_t : 1; // execute_disable
+ };
+ // entries
+ struct pml4e entries[512];
+ };
+} __attribute__((packed));
// PAGE DIRECTORY POINTER TABLE ENTRY
struct pdpte {
uint64_t flags : 6;
- uint64_t : 1;
+ uint64_t : 1; // ignored
uint64_t page_size : 1;
- uint64_t : 4;
+ uint64_t : 4; // ignored
uint64_t address : 40;
- uint64_t : 11;
+ uint64_t : 11; // ignored
uint64_t execute_disable : 1;
-};
+} __attribute__((packed, aligned(8)));
+
+// PAGE DIRECTORY POINTER TABLE
+struct pdpt {
+ union {
+ // pdpt metadata
+ struct {
+ uint64_t : 6; // flags
+ uint64_t : 1; // ignored
+ uint64_t : 1; // page_size
+ uint64_t : 4; // ignored
+ uint64_t : 40; // address
+ uint64_t : 2; // ignored
+ uint64_t count : 9; // ignored
+ uint64_t : 1; // execute_disable
+ };
+ // entries
+ struct pdpte entries[512];
+ };
+} __attribute__((packed, aligned(4096)));
// PAGE DIRECTORY ENTRY
struct pde {
uint64_t flags : 6;
- uint64_t : 1;
+ uint64_t : 1; // ignored
uint64_t page_size : 1;
- uint64_t : 4;
+ uint64_t : 4; // ignored
uint64_t address : 40;
- uint64_t : 11;
+ uint64_t : 11; // ignored
uint64_t execute_disable : 1;
-};
+} __attribute__((packed, aligned(8)));
+
+// PAGE DIRECTORY
+struct pd {
+ union {
+ // pd metadata
+ struct {
+ uint64_t : 6; // flags
+ uint64_t : 1; // ignored
+ uint64_t : 1; // page_size
+ uint64_t : 4; // ignored
+ uint64_t : 40; // address
+ uint64_t : 2; // ignored
+ uint64_t count : 9; // ignored
+ uint64_t : 1; // execute_disable
+ };
+ // entries
+ struct pde entries[512];
+ };
+} __attribute__((packed, aligned(4096)));
// PAGE TABLE ENTRY
struct pte {
uint64_t flags : 9;
- uint64_t loaded : 1;
- uint64_t : 2;
+ uint64_t loaded : 1; // ignored
+ uint64_t : 2; // ignored
uint64_t address : 40;
- uint64_t : 7;
+ uint64_t : 7; // ignored
uint64_t protection_key : 4;
uint64_t execute_disable : 1;
-};
+} __attribute__((packed, aligned(8)));
+
+// PAGE TABLE
+struct pt {
+ union {
+ // pt metadata
+ struct {
+ uint64_t : 9; // flags
+ uint64_t : 1; // (loaded) ignored
+ uint64_t count_low : 2; // ignored
+ uint64_t : 40; // address
+ uint64_t count_high : 7; // ignored
+ uint64_t : 4; // protection_key
+ uint64_t : 1; // execute_disable
+ };
+ // entries
+ struct pte entries[512];
+ };
+} __attribute__((packed, aligned(4096)));
-// bss segment, can write to
-extern volatile struct pml4e kernel_pml4[512];
-extern volatile struct pdpte kernel_pdpt_0[512];
-extern volatile struct pde kernel_pd_0[512];
-extern volatile struct pte kernel_pd_0_ents[512 * N_IDENT_PTS];
-extern volatile struct pde kernel_pd_1[512];
-extern volatile struct pte
- paging_pt[512]; // paging_pt should NEVER be outside of this file, NEVER i say
+// bootstraping kernel paging structures
+extern volatile struct pml4 kernel_pml4;
+extern volatile struct pdpt kernel_pdpt_0;
+extern volatile struct pd kernel_pd_0;
+extern volatile struct pt kernel_pd_0_ents[N_IDENT_PTS];
+extern volatile struct pd kernel_pd_1;
+extern volatile struct pt
+ paging_pt; // paging_pt should NEVER be outside of this file, NEVER i say
// paged address to read page tables
// the structures are not gurenteed to be ident mapped
-// map them here with map_<type>(phys_addr) before useing structures
-static volatile struct pml4e *pml4_mapped = (void *)(uintptr_t)0x40000000;
-static volatile struct pdpte *pdpt_mapped = (void *)(uintptr_t)0x40001000;
-static volatile struct pde *pd_mapped = (void *)(uintptr_t)0x40002000;
-static volatile struct pte *pt_mapped = (void *)(uintptr_t)0x40003000;
+// map them here with load_<type>(phys_addr) before useing structures
+static volatile struct pml4 *pml4_mapped = (void *)(uintptr_t)0x40000000;
+static volatile struct pdpt *pdpt_mapped = (void *)(uintptr_t)0x40001000;
+static volatile struct pd *pd_mapped = (void *)(uintptr_t)0x40002000;
+static volatile struct pt *pt_mapped = (void *)(uintptr_t)0x40003000;
static volatile void *addr_mapped = (void *)(uintptr_t)0x40004000;
// kernel start/end
@@ -78,55 +155,55 @@ static inline void invlpg(volatile void *addr)
__asm__ volatile("invlpg (%0)" ::"r"(addr) : "memory");
}
-static volatile struct pml4e *load_pml4(volatile void *phys)
+static volatile struct pml4 *load_pml4(volatile void *phys)
{
- static volatile struct pte *pt = &paging_pt[0];
- if ((uint64_t)phys >> 12 == pt->address)
+ static volatile struct pte *pte = &paging_pt.entries[0];
+ if ((uint64_t)phys >> 12 == pte->address)
return pml4_mapped;
- pt->address = (uint64_t)phys >> 12;
- pt->flags = F_PRESENT | F_WRITEABLE;
+ pte->address = (uint64_t)phys >> 12;
+ pte->flags = F_PRESENT | F_WRITEABLE;
invlpg(pml4_mapped);
return pml4_mapped;
}
-static volatile struct pdpte *load_pdpt(volatile void *phys)
+static volatile struct pdpt *load_pdpt(volatile void *phys)
{
- static volatile struct pte *pt = &paging_pt[1];
- if ((uint64_t)phys >> 12 == pt->address)
+ static volatile struct pte *pte = &paging_pt.entries[1];
+ if ((uint64_t)phys >> 12 == pte->address)
return pdpt_mapped;
- pt->address = (uint64_t)phys >> 12;
- pt->flags = F_PRESENT | F_WRITEABLE;
+ pte->address = (uint64_t)phys >> 12;
+ pte->flags = F_PRESENT | F_WRITEABLE;
invlpg(pdpt_mapped);
return pdpt_mapped;
}
-static volatile struct pde *load_pd(volatile void *phys)
+static volatile struct pd *load_pd(volatile void *phys)
{
- static volatile struct pte *pt = &paging_pt[2];
- if ((uint64_t)phys >> 12 == pt->address)
+ static volatile struct pte *pte = &paging_pt.entries[2];
+ if ((uint64_t)phys >> 12 == pte->address)
return pd_mapped;
- pt->address = (uint64_t)phys >> 12;
- pt->flags = F_PRESENT | F_WRITEABLE;
+ pte->address = (uint64_t)phys >> 12;
+ pte->flags = F_PRESENT | F_WRITEABLE;
invlpg(pd_mapped);
return pd_mapped;
}
-static volatile struct pte *load_pt(volatile void *phys)
+static volatile struct pt *load_pt(volatile void *phys)
{
- static volatile struct pte *pt = &paging_pt[3];
- if ((uint64_t)phys >> 12 == pt->address)
+ static volatile struct pte *pte = &paging_pt.entries[3];
+ if ((uint64_t)phys >> 12 == pte->address)
return pt_mapped;
- pt->address = (uint64_t)phys >> 12;
- pt->flags = F_PRESENT | F_WRITEABLE;
+ pte->address = (uint64_t)phys >> 12;
+ pte->flags = F_PRESENT | F_WRITEABLE;
invlpg(pt_mapped);
return pt_mapped;
}
static volatile void *load_addr(volatile void *phys_addr)
{
- static volatile struct pte *pt = &paging_pt[4];
- pt->address = (uint64_t)phys_addr >> 12;
- pt->flags = F_PRESENT | F_WRITEABLE;
+ static volatile struct pte *pte = &paging_pt.entries[4];
+ pte->address = (uint64_t)phys_addr >> 12;
+ pte->flags = F_PRESENT | F_WRITEABLE;
invlpg(addr_mapped);
return addr_mapped;
}
@@ -136,193 +213,218 @@ static volatile void *load_addr(volatile void *phys_addr)
#define PAG_NOT_PRESENT 2
static int select_pdpt(void *virt, unsigned int flags,
- volatile struct pml4e *root, volatile struct pdpte **res,
+ volatile struct pml4 *pml4, volatile struct pdpt **res,
bool create)
{
- load_pml4(root);
+ load_pml4(pml4);
uint64_t offset = (uint64_t)virt >> 39;
- volatile struct pml4e *pml4e = &pml4_mapped[offset];
+ volatile struct pml4e *pml4e = &pml4_mapped->entries[offset];
if (!(pml4e->flags & F_PRESENT)) {
- if (!create) {
+ if (!create)
return PAG_NOT_PRESENT;
- }
void *new_page = alloc_phys_page();
- if (new_page == NULL) {
+ if (new_page == NULL)
return PAG_CANNOT_ALLOC;
- }
load_addr(new_page);
memsetv(addr_mapped, 0, PAGE_SIZE);
pml4e->address = ((uint64_t)new_page) >> 12;
pml4e->flags = F_PRESENT;
+ // update count
+ pml4_mapped->count++;
}
if (flags)
pml4e->flags = F_PRESENT | flags;
- *res = (volatile struct pdpte *)(uintptr_t)(pml4e->address << 12);
+ *res = (volatile struct pdpt *)(uintptr_t)(pml4e->address << 12);
return PAG_SUCCESS;
}
-static int select_pd(void *virt, unsigned int flags,
- volatile struct pdpte *pdpt, volatile struct pde **res,
- bool create)
+static int select_pd(void *virt, unsigned int flags, volatile struct pdpt *pdpt,
+ volatile struct pd **res, bool create)
{
load_pdpt(pdpt);
uint64_t offset = ((uint64_t)virt >> 30) & 0x1ff;
- volatile struct pdpte *pdpte = &pdpt_mapped[offset];
+ volatile struct pdpte *pdpte = &pdpt_mapped->entries[offset];
if (!(pdpte->flags & F_PRESENT)) {
- if (!create) {
+ if (!create)
return PAG_NOT_PRESENT;
- }
void *new_page = alloc_phys_page();
- if (new_page == NULL) {
+ if (new_page == NULL)
return PAG_CANNOT_ALLOC;
- }
load_addr(new_page);
memsetv(addr_mapped, 0, PAGE_SIZE);
pdpte->address = ((uint64_t)new_page) >> 12;
pdpte->flags = F_PRESENT;
+ // update count
+ pdpt_mapped->count++;
}
if (flags)
pdpte->flags = F_PRESENT | flags;
- *res = (volatile struct pde *)(uintptr_t)(pdpte->address << 12);
+ *res = (volatile struct pd *)(uintptr_t)(pdpte->address << 12);
return PAG_SUCCESS;
}
-static int select_pt(void *virt, unsigned int flags, volatile struct pde *pd,
- volatile struct pte **res, bool create)
+static int select_pt(void *virt, unsigned int flags, volatile struct pd *pd,
+ volatile struct pt **res, bool create)
{
load_pd(pd);
uint64_t offset = ((uint64_t)virt >> 21) & 0x1ff;
- volatile struct pde *pde = &pd_mapped[offset];
+ volatile struct pde *pde = &pd_mapped->entries[offset];
if (!(pde->flags & F_PRESENT)) {
- if (!create) {
+ if (!create)
return PAG_NOT_PRESENT;
- }
void *new_page = alloc_phys_page();
- if (new_page == NULL) {
+ if (new_page == NULL)
return PAG_CANNOT_ALLOC;
- }
load_addr(new_page);
memsetv(addr_mapped, 0, PAGE_SIZE);
pde->address = ((uint64_t)new_page) >> 12;
pde->flags = F_PRESENT;
+ // update count
+ pd_mapped->count++;
}
if (flags)
pde->flags = F_PRESENT | flags;
- *res = (volatile struct pte *)(uintptr_t)(pde->address << 12);
+ *res = (volatile struct pt *)(uintptr_t)(pde->address << 12);
return PAG_SUCCESS;
}
-static void select_page(void *virt, volatile struct pte *pt,
+static void select_page(void *virt, volatile struct pt *pt,
volatile struct pte **res)
{
load_pt(pt);
uint64_t offset = ((uint64_t)virt >> 12) & 0x1ff;
- volatile struct pte *page = &pt_mapped[offset];
+ volatile struct pte *page = &pt_mapped->entries[offset];
*res = page;
return;
}
-static inline void try_unmap_pml4(void)
+static void pte_free(volatile struct pte *vPTE)
{
- for (int i = 0; i < 512; i++) {
- if (pml4_mapped[i].flags & F_PRESENT)
- return;
- }
- for (int i = 0; i < 512; i++) {
- if (pml4_mapped[i].address) {
- void *addr = (void *)(uintptr_t)(pml4_mapped[i].address << 12);
- free_phys_page(addr);
- }
- }
+ volatile struct pte *pPTE;
+
+ if (!(vPTE->flags & F_PRESENT))
+ return;
+
+ pPTE = (volatile struct pte *)((uintptr_t)vPTE->address << 12);
+
+ // allocated, free phys page
+ free_phys_page((void*)(uintptr_t)pPTE);
}
-static inline void try_unmap_pdpt(void)
+static void pt_free(volatile struct pde *vPDE)
{
- for (int i = 0; i < 512; i++) {
- if (pdpt_mapped[i].flags & F_PRESENT)
- return;
- }
- for (int i = 0; i < 512; i++) {
- if (pdpt_mapped[i].address) {
- void *addr = (void *)(uintptr_t)(pdpt_mapped[i].address << 12);
- free_phys_page(addr);
- }
- }
- try_unmap_pml4();
+ volatile struct pt *pPT, *vPT;
+ uint64_t count;
+
+ if (!(vPDE->flags & F_PRESENT))
+ return;
+
+ pPT = (volatile struct pt *)((uintptr_t)vPDE->address << 12);
+ vPT = load_pt(pPT);
+ count = (vPT->count_high << 2) | vPT->count_low;
+
+ if (count)
+ for (uint64_t i = 0; i < 512; i++)
+ pte_free(&vPT->entries[i]);
+ free_phys_page((void*)(uintptr_t)pPT);
}
-static inline void try_unmap_pd(void)
+static void pd_free(volatile struct pdpte *vPDPTE)
{
- for (int i = 0; i < 512; i++) {
- if (pd_mapped[i].flags & F_PRESENT)
- return;
- }
- for (int i = 0; i < 512; i++) {
- if (pd_mapped[i].address) {
- void *addr = (void *)(uintptr_t)(pd_mapped[i].address << 12);
- free_phys_page(addr);
- }
- }
- try_unmap_pdpt();
+ volatile struct pd *pPD, *vPD;
+ uint64_t count;
+
+ if (!(vPDPTE->flags & F_PRESENT))
+ return;
+
+ pPD = (volatile struct pd *)((uintptr_t)vPDPTE->address << 12);
+ vPD = load_pd(pPD);
+ count = vPD->count;
+
+ if (count)
+ for (uint64_t i = 0; i < 512; i++)
+ pt_free(&vPD->entries[i]);
+ free_phys_page((void*)(uintptr_t)pPD);
}
-static inline void try_unmap_pt(void)
+static void pdpt_free(volatile struct pml4e *vPML4E)
{
- for (int i = 0; i < 512; i++) {
- if (pt_mapped[i].flags & F_PRESENT)
- return;
- }
- for (int i = 0; i < 512; i++) {
- if (pt_mapped[i].address) {
- void *addr = (void *)(uintptr_t)(pt_mapped[i].address << 12);
- free_phys_page(addr);
- }
- }
- try_unmap_pd();
+ volatile struct pdpt *pPDPT, *vPDPT;
+ uint64_t count;
+
+ if (!(vPML4E->flags & F_PRESENT))
+ return;
+
+ pPDPT = (volatile struct pdpt *)((uintptr_t)vPML4E->address << 12);
+ vPDPT = load_pdpt(pPDPT);
+ count = vPDPT->count;
+
+ if (count)
+ for (uint64_t i = 0; i < 512; i++)
+ pd_free(&vPDPT->entries[i]);
+ free_phys_page((void*)(uintptr_t)pPDPT);
}
-static void unmap_page(volatile struct pml4e *root, void *virt)
+void pml4_free(volatile void *pPML4)
{
- volatile struct pdpte *pdpt;
- volatile struct pde *pd;
- volatile struct pte *pt;
- volatile struct pte *page;
+ volatile struct pml4 *vPML4;
+ uint64_t count;
+
+ vPML4 = load_pml4(pPML4);
+ count = vPML4->count;
+
+ if (count)
+ for (uint64_t i = 0; i < 512; i++)
+ pdpt_free(&vPML4->entries[i]);
+ free_phys_page((void*)(uintptr_t)pPML4);
+}
+
+static void unmap_page(volatile struct pml4 *pPML4, void *virt)
+{
+ volatile struct pdpt *pPDPT;
+ volatile struct pd *pPD;
+ volatile struct pt *pPT;
+ volatile struct pte *vPTE;
unsigned int df = 0;
- if (select_pdpt(virt, df, root, &pdpt, false))
+ if (select_pdpt(virt, df, pPML4, &pPDPT, false))
return;
- if (select_pd(virt, df, pdpt, &pd, false))
+ if (select_pd(virt, df, pPDPT, &pPD, false))
return;
- if (select_pt(virt, df, pd, &pt, false))
+ if (select_pt(virt, df, pPD, &pPT, false))
return;
- select_page(virt, pt, &page);
+ select_page(virt, pPT, &vPTE);
- page->address = 0;
- page->flags = 0;
- page->loaded = 0;
+ // update counts in parent structures
+ pml4_mapped->count--;
+ pdpt_mapped->count--;
+ pd_mapped->count--;
- try_unmap_pt();
+ // unmap page
+ vPTE->address = 0;
+ vPTE->flags = 0;
+ vPTE->loaded = 0;
invlpg(virt);
return;
}
-static void unmap_pages(volatile struct pml4e *root, void *virt_start,
+static void unmap_pages(volatile struct pml4 *pPML4, void *virt_start,
long page_count)
{
uint64_t pml4_o = -1, pdpt_o = -1, pd_o = -1;
uint64_t pml4_n, pdpt_n, pd_n;
- volatile struct pdpte *pdpt = NULL;
- volatile struct pde *pd = NULL;
- volatile struct pte *pt = NULL;
- volatile struct pte *page = NULL;
+ volatile struct pdpt *pPDPT = NULL;
+ volatile struct pd *pPD = NULL;
+ volatile struct pt *pPT = NULL;
+ volatile struct pte *vPTE = NULL;
unsigned int df = 0;
@@ -335,112 +437,124 @@ static void unmap_pages(volatile struct pml4e *root, void *virt_start,
pdpt_n = ((uint64_t)virt >> 30) & 0x1ff;
pd_n = ((uint64_t)virt >> 21) & 0x1ff;
- if (pdpt == NULL || pml4_o != pml4_n) {
- if (select_pdpt(virt, df, root, &pdpt, false))
+ if (pPDPT == NULL || pml4_o != pml4_n) {
+ if (select_pdpt(virt, df, pPML4, &pPDPT, false))
continue;
pml4_o = pml4_n;
}
- if (pd == NULL || pdpt_o != pdpt_n) {
- if (select_pd(virt, df, pdpt, &pd, false))
+ if (pPD == NULL || pdpt_o != pdpt_n) {
+ if (select_pd(virt, df, pPDPT, &pPD, false))
continue;
pdpt_o = pdpt_n;
}
- if (pt == NULL || pd_o != pd_n) {
- if (pt) {
- try_unmap_pt();
+ if (pPT == NULL || pd_o != pd_n) {
+ if (pPT) {
+ //pt_try_free();
}
- if (select_pt(virt, df, pd, &pt, false))
+ if (select_pt(virt, df, pPD, &pPT, false))
continue;
pd_o = pd_n;
}
- select_page(virt, pt, &page);
+ select_page(virt, pPT, &vPTE);
+
+ // update counts in parent structures
+ pml4_mapped->count--;
+ pdpt_mapped->count--;
+ pd_mapped->count--;
- page->address = 0;
- page->flags = 0;
- page->loaded = 0;
+ // unmap page
+ vPTE->address = 0;
+ vPTE->flags = 0;
+ vPTE->loaded = 0;
}
- if (pt != NULL)
- try_unmap_pt();
+ //if (pt != NULL)
+ // pt_try_free();
return;
}
-static volatile struct pte *get_page(volatile struct pml4e *root, void *virt)
+static volatile struct pte *get_page(volatile struct pml4 *pPML4, void *virt)
{
- volatile struct pdpte *pdpt;
- volatile struct pde *pd;
- volatile struct pte *pt;
- volatile struct pte *page;
+ volatile struct pdpt *pPDPT;
+ volatile struct pd *pPD;
+ volatile struct pt *pPT;
+ volatile struct pte *vPTE;
unsigned int df = 0;
- if (select_pdpt(virt, df, root, &pdpt, false))
+ if (select_pdpt(virt, df, pPML4, &pPDPT, false))
return NULL;
- if (select_pd(virt, df, pdpt, &pd, false))
+ if (select_pd(virt, df, pPDPT, &pPD, false))
return NULL;
- if (select_pt(virt, df, pd, &pt, false))
+ if (select_pt(virt, df, pPD, &pPT, false))
return NULL;
- select_page(virt, pt, &page);
+ select_page(virt, pPT, &vPTE);
- return page;
+ return vPTE;
}
-static int map_page(volatile struct pml4e *root, void *virt, void *phys,
+static int map_page(volatile struct pml4 *pPML4, void *virt, void *phys,
unsigned int flags)
{
- volatile struct pdpte *pdpt;
- volatile struct pde *pd;
- volatile struct pte *pt;
- volatile struct pte *page;
+ volatile struct pdpt *pPDPT;
+ volatile struct pd *pPD;
+ volatile struct pt *pPT;
+ volatile struct pte *vPTE;
unsigned int df = F_WRITEABLE;
if (phys == NULL)
df = 0; // alloc page on fault
- if (select_pdpt(virt, df, root, &pdpt, true))
+ if (select_pdpt(virt, df, pPML4, &pPDPT, true))
return 1;
- if (select_pd(virt, df, pdpt, &pd, true))
+ if (select_pd(virt, df, pPDPT, &pPD, true))
return 1;
- if (select_pt(virt, df, pd, &pt, true))
+ if (select_pt(virt, df, pPD, &pPT, true))
return 1;
- select_page(virt, pt, &page);
+ select_page(virt, pPT, &vPTE);
+
+ // update counts in parent structures
+ pml4_mapped->count++;
+ pdpt_mapped->count++;
+ pd_mapped->count++;
+ // map page
if (phys) {
- page->flags = F_PRESENT | flags;
- page->address = (uint64_t)phys >> 12;
- page->loaded = 1;
+ vPTE->flags = F_PRESENT | flags;
+ vPTE->address = (uint64_t)phys >> 12;
+ vPTE->loaded = 1;
invlpg(virt);
} else {
- page->flags = flags;
- page->address = 0;
- page->loaded = 0;
+ vPTE->flags = flags;
+ vPTE->address = 0;
+ vPTE->loaded = 0;
}
return 0;
}
-static int map_pages(volatile struct pml4e *root, void *virt_start,
+static int map_pages(volatile struct pml4 *pPMl4, void *virt_start,
void *phys_start, unsigned int flags, long page_count)
{
uint64_t pml4_o = -1, pdpt_o = -1, pd_o = -1;
uint64_t pml4_n, pdpt_n, pd_n;
- volatile struct pdpte *pdpt = NULL;
- volatile struct pde *pd = NULL;
- volatile struct pte *pt = NULL;
- volatile struct pte *page = NULL;
+ volatile struct pdpt *pPDPT = NULL;
+ volatile struct pd *pPD = NULL;
+ volatile struct pt *pPT = NULL;
+ volatile struct pte *vPTE = NULL;
void *virt, *phys;
@@ -458,34 +572,40 @@ static int map_pages(volatile struct pml4e *root, void *virt_start,
pdpt_n = ((uint64_t)virt >> 30) & 0x1ff;
pd_n = ((uint64_t)virt >> 21) & 0x1ff;
- if (pdpt == NULL || pml4_o != pml4_n) {
- if (select_pdpt(virt, df, root, &pdpt, true))
+ if (pPDPT == NULL || pml4_o != pml4_n) {
+ if (select_pdpt(virt, df, pPMl4, &pPDPT, true))
goto failed;
pml4_o = pml4_n;
}
- if (pd == NULL || pdpt_o != pdpt_n) {
- if (select_pd(virt, df, pdpt, &pd, true))
+ if (pPD == NULL || pdpt_o != pdpt_n) {
+ if (select_pd(virt, df, pPDPT, &pPD, true))
goto failed;
pdpt_o = pdpt_n;
}
- if (pt == NULL || pd_o != pd_n) {
- if (select_pt(virt, df, pd, &pt, true))
+ if (pPT == NULL || pd_o != pd_n) {
+ if (select_pt(virt, df, pPD, &pPT, true))
goto failed;
pd_o = pd_n;
}
- select_page(virt, pt, &page);
+ select_page(virt, pPT, &vPTE);
+ // update counts in parent structures
+ pml4_mapped->count++;
+ pdpt_mapped->count++;
+ pd_mapped->count++;
+
+ // map page
if (phys_start) {
- page->flags = F_PRESENT | flags;
- page->address = (uint64_t)phys >> 12;
- page->loaded = 1;
+ vPTE->flags = F_PRESENT | flags;
+ vPTE->address = (uint64_t)phys >> 12;
+ vPTE->loaded = 1;
} else {
- page->flags = flags;
- page->address = 0;
- page->loaded = 0;
+ vPTE->flags = flags;
+ vPTE->address = 0;
+ vPTE->loaded = 0;
}
if (flags & F_GLOBAL)
@@ -497,7 +617,7 @@ static int map_pages(volatile struct pml4e *root, void *virt_start,
return 0;
failed:
- unmap_pages(root, virt, i);
+ unmap_pages(pPMl4, virt, i);
return 1;
}
@@ -505,42 +625,41 @@ failed:
void paging_init(void)
{
// map pdpt
- kernel_pml4[0].flags = F_PRESENT | F_WRITEABLE;
- kernel_pml4[0].address = (uint64_t)(kernel_pdpt_0) >> 12;
+ kernel_pml4.entries[0].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pml4.entries[0].address = (uint64_t)(kernel_pdpt_0.entries) >> 12;
// map pd0 & pd1
- kernel_pdpt_0[0].flags = F_PRESENT | F_WRITEABLE;
- kernel_pdpt_0[0].address = (uint64_t)(kernel_pd_0) >> 12;
- kernel_pdpt_0[1].flags = F_PRESENT | F_WRITEABLE;
- kernel_pdpt_0[1].address = (uint64_t)(kernel_pd_1) >> 12;
+ kernel_pdpt_0.entries[0].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pdpt_0.entries[0].address = (uint64_t)(kernel_pd_0.entries) >> 12;
+ kernel_pdpt_0.entries[1].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pdpt_0.entries[1].address = (uint64_t)(kernel_pd_1.entries) >> 12;
// map pd0 entires (length N_IDENT_PTS)
for (int i = 0; i < N_IDENT_PTS; i++) {
- kernel_pd_0[i].flags = F_PRESENT | F_WRITEABLE;
- kernel_pd_0[i].address = (uint64_t)(kernel_pd_0_ents + 512 * i) >> 12;
+ kernel_pd_0.entries[i].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pd_0.entries[i].address = (uint64_t)(kernel_pd_0_ents[i].entries) >> 12;
+ for (size_t j = 0; j < 512; j++) {
+ kernel_pd_0_ents[i].entries[j].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pd_0_ents[i].entries[j].address = ((i*512+j) * PAGE_SIZE) >> 12;
+ }
}
- // identity map kernel
- for (size_t i = 0; i < 512 * N_IDENT_PTS; i++) {
- kernel_pd_0_ents[i].flags = F_PRESENT | F_WRITEABLE;
- kernel_pd_0_ents[i].address = (i * PAGE_SIZE) >> 12;
- }
// map paging_pt
- kernel_pd_1[0].flags = F_PRESENT | F_WRITEABLE;
- kernel_pd_1[0].address = (uint64_t)(paging_pt) >> 12;
+ kernel_pd_1.entries[0].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pd_1.entries[0].address = (uint64_t)(paging_pt.entries) >> 12;
- memsetv(paging_pt, 0, 4096);
+ memsetv(paging_pt.entries, 0, 4096);
// make sure we are using THESE pagetables
// EFI doesnt on boot
- __asm__ volatile("mov %0, %%cr3" ::"r"(kernel_pml4) : "memory");
+ __asm__ volatile("mov %0, %%cr3" ::"r"(kernel_pml4.entries) : "memory");
}
volatile void *pml4_alloc(void)
{
- struct pml4e *pml4_phys = alloc_phys_page();
- struct pml4e *pml4 = kmapaddr(pml4_phys, NULL, PAGE_SIZE, F_WRITEABLE);
+ struct pml4 *pml4_phys = alloc_phys_page();
+ struct pml4 *pml4 = kmapaddr(pml4_phys, NULL, PAGE_SIZE, F_WRITEABLE);
memset(pml4, 0, PAGE_SIZE);
if (map_pages(pml4_phys, kernel_start, kernel_start,
@@ -555,12 +674,6 @@ volatile void *pml4_alloc(void)
return pml4_phys;
}
-void pml4_free(volatile void *pml4)
-{
- (void)pml4;
- // TODD: free structures
-}
-
static inline void *page_align(void *addr)
{
uintptr_t a = (uintptr_t)addr;
@@ -588,7 +701,7 @@ void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len,
if (virt == NULL)
return NULL;
- if (map_pages((volatile struct pml4e *)ctx->pml4, virt, aligned_phys,
+ if (map_pages((volatile struct pml4 *)ctx->pml4, virt, aligned_phys,
F_PRESENT | flags, pages)) {
virtaddr_free(&ctx->virtctx, virt);
return NULL;
@@ -605,16 +718,16 @@ void mem_unmapaddr(mem_ctx_t ctx, void *virt)
long pages = virtaddr_free(&ctx->virtctx, virt);
if (pages < 1)
return;
- unmap_pages(kernel_pml4, virt, pages);
+ unmap_pages(&kernel_pml4, virt, pages);
}
-void *mem_alloc_page(mem_ctx_t ctx, bool lazy)
+void *mem_alloc_page(mem_ctx_t ctx, unsigned int flags, bool lazy)
{
void *virt = virtaddr_alloc(&ctx->virtctx, 1);
if (virt == NULL)
return NULL;
- if (mem_alloc_page_at(ctx, virt, lazy) == NULL) {
+ if (mem_alloc_page_at(ctx, virt, flags, lazy) == NULL) {
virtaddr_free(&ctx->virtctx, virt);
return NULL;
}
@@ -622,7 +735,8 @@ void *mem_alloc_page(mem_ctx_t ctx, bool lazy)
return virt;
}
-void *mem_alloc_page_at(mem_ctx_t ctx, void *virt, bool lazy)
+void *mem_alloc_page_at(mem_ctx_t ctx, void *virt, unsigned int flags,
+ bool lazy)
{
void *phys = NULL;
if (!lazy) {
@@ -630,7 +744,7 @@ void *mem_alloc_page_at(mem_ctx_t ctx, void *virt, bool lazy)
return NULL;
}
- if (map_page((volatile struct pml4e *)ctx->pml4, virt, phys, F_WRITEABLE)) {
+ if (map_page((volatile struct pml4 *)ctx->pml4, virt, phys, flags)) {
if (phys)
free_phys_page(phys);
return NULL;
@@ -639,13 +753,14 @@ void *mem_alloc_page_at(mem_ctx_t ctx, void *virt, bool lazy)
return virt;
}
-void *mem_alloc_pages(mem_ctx_t ctx, size_t count, bool lazy)
+void *mem_alloc_pages(mem_ctx_t ctx, size_t count, unsigned int flags,
+ bool lazy)
{
void *virt = virtaddr_alloc(&ctx->virtctx, count);
if (virt == NULL)
return NULL;
- if (mem_alloc_pages_at(ctx, count, virt, lazy) == NULL) {
+ if (mem_alloc_pages_at(ctx, count, virt, flags, lazy) == NULL) {
virtaddr_free(&ctx->virtctx, virt);
return NULL;
}
@@ -653,7 +768,8 @@ void *mem_alloc_pages(mem_ctx_t ctx, size_t count, bool lazy)
return virt;
}
-void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt, bool lazy)
+void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt,
+ unsigned int flags, bool lazy)
{
void *phys = NULL;
if (!lazy) {
@@ -661,7 +777,7 @@ void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt, bool lazy)
return NULL;
}
- if (map_pages((volatile struct pml4e *)ctx->pml4, virt, phys, F_WRITEABLE,
+ if (map_pages((volatile struct pml4 *)ctx->pml4, virt, phys, flags,
count)) {
if (phys)
free_phys_pages(phys, count);
@@ -677,15 +793,15 @@ void mem_free_pages(mem_ctx_t ctx, void *virt)
long pages = virtaddr_free(&ctx->virtctx, virt);
if (pages == 1)
- unmap_page((volatile struct pml4e *)ctx->pml4, virt);
+ unmap_page((volatile struct pml4 *)ctx->pml4, virt);
else if (pages > 1)
- unmap_pages((volatile struct pml4e *)ctx->pml4, virt, pages);
+ unmap_pages((volatile struct pml4 *)ctx->pml4, virt, pages);
}
int mem_load_page(mem_ctx_t ctx, void *virt_addr)
{
volatile struct pte *page =
- get_page((volatile struct pml4e *)ctx->pml4, virt_addr);
+ get_page((volatile struct pml4 *)ctx->pml4, virt_addr);
if (page == NULL)
return -1;
if (page->loaded)