summaryrefslogtreecommitdiff
path: root/kernel/memory/paging.c
diff options
context:
space:
mode:
authorFreya Murphy <freya@freyacat.org>2025-04-28 17:06:57 -0400
committerFreya Murphy <freya@freyacat.org>2025-04-28 17:06:57 -0400
commit74517ed402d318fee911f9701396fee648887165 (patch)
tree617e73d7916941d23753a01ff82d6caed3b34a15 /kernel/memory/paging.c
parentfix pml4 free and physalloc_free (diff)
downloadcomus-74517ed402d318fee911f9701396fee648887165.tar.gz
comus-74517ed402d318fee911f9701396fee648887165.tar.bz2
comus-74517ed402d318fee911f9701396fee648887165.zip
clone pgdir
Diffstat (limited to 'kernel/memory/paging.c')
-rw-r--r--kernel/memory/paging.c323
1 files changed, 253 insertions, 70 deletions
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c
index 0571148..b089895 100644
--- a/kernel/memory/paging.c
+++ b/kernel/memory/paging.c
@@ -146,69 +146,41 @@ static inline void invlpg(volatile const void *vADDR)
/* map */
-// map a physical pml4 address to access
+// map a physical address to a virtural address
// @returns VIRTUAL ADDRESS
-static volatile struct pml4 *pml4_map(volatile struct pml4 *pPML4)
+static volatile void *map_addr(volatile const void *pADDR, size_t pt_idx)
{
- static volatile struct pml4 *vPML4 = (void *)(uintptr_t)0x40000000;
- static volatile struct pte *vPTE = &paging_pt.entries[0];
+ volatile char *vADDR;
+ volatile struct pte *vPTE;
- if ((uint64_t)pPML4 >> 12 == vPTE->address)
- return vPML4;
+ assert(pt_idx < 512, "invalid page table entry index");
- vPTE->address = (uint64_t)pPML4 >> 12;
- vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPML4);
- return vPML4;
-}
+ vADDR = (char *)(uintptr_t)(0x40000000 + pt_idx * PAGE_SIZE);
+ vPTE = &paging_pt.entries[pt_idx];
-// map a physical pdpt address to access
-// @returns VIRTUAL ADDRESS
-static volatile struct pdpt *pdpt_map(volatile struct pdpt *pPDPT)
-{
- static volatile struct pdpt *vPDPT = (void *)(uintptr_t)0x40001000;
- static volatile struct pte *vPTE = &paging_pt.entries[1];
-
- if ((uint64_t)pPDPT >> 12 == vPTE->address)
- return vPDPT;
+ if ((uint64_t)pADDR >> 12 == vPTE->address)
+ return vADDR;
- vPTE->address = (uint64_t)pPDPT >> 12;
+ vPTE->address = (uint64_t)pADDR >> 12;
vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPDPT);
- return vPDPT;
+ invlpg(vADDR);
+ return vADDR;
}
-// map a physical pd address to access
-// @returns VIRTUAL ADDRESS
-static volatile struct pd *pd_map(volatile struct pd *pPD)
-{
- static volatile struct pd *vPD = (void *)(uintptr_t)0x40002000;
- static volatile struct pte *vPTE = &paging_pt.entries[2];
-
- if ((uint64_t)pPD >> 12 == vPTE->address)
- return vPD;
+#define PML4_MAP(pADDR) (volatile struct pml4 *)map_addr(pADDR, 0)
+#define PML4_MAPC(pADDR) (volatile const struct pml4 *)map_addr(pADDR, 4)
- vPTE->address = (uint64_t)pPD >> 12;
- vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPD);
- return vPD;
-}
+#define PDPT_MAP(pADDR) (volatile struct pdpt *)map_addr(pADDR, 1)
+#define PDPT_MAPC(pADDR) (volatile const struct pdpt *)map_addr(pADDR, 5)
-// map a physical pt address to access
-// @returns VIRTUAL ADDRESS
-static volatile struct pt *pt_map(volatile struct pt *pPT)
-{
- static volatile struct pt *vPT = (void *)(uintptr_t)0x40003000;
- static volatile struct pte *vPTE = &paging_pt.entries[3];
+#define PD_MAP(pADDR) (volatile struct pd *)map_addr(pADDR, 2)
+#define PD_MAPC(pADDR) (volatile const struct pd *)map_addr(pADDR, 6)
- if ((uint64_t)pPT >> 12 == vPTE->address)
- return vPT;
+#define PT_MAP(pADDR) (volatile struct pt *)map_addr(pADDR, 3)
+#define PT_MAPC(pADDR) (volatile const struct pt *)map_addr(pADDR, 7)
- vPTE->address = (uint64_t)pPT >> 12;
- vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPT);
- return vPT;
-}
+#define PAGE_MAP(pADDR) (volatile void *)map_addr(pADDR, 8)
+#define PAGE_MAPC(pADDR) (volatile const void *)map_addr(pADDR, 9)
/* locate */
@@ -223,7 +195,7 @@ static volatile struct pdpt *pdpt_locate(volatile struct pml4 *pPML4,
uint64_t offset;
offset = (uint64_t)vADDR >> 39;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
vPML4E = &vPML4->entries[offset];
if (vPML4E->flags & F_PRESENT) {
@@ -245,7 +217,7 @@ static volatile struct pd *pd_locate(volatile struct pdpt *pPDPT,
uint64_t offset;
offset = ((uint64_t)vADDR >> 30) & 0x1ff;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
vPDPTE = &vPDPT->entries[offset];
if (vPDPTE->flags & F_PRESENT) {
@@ -266,7 +238,7 @@ static volatile struct pt *pt_locate(volatile struct pd *pPD, const void *vADDR)
uint64_t offset;
offset = ((uint64_t)vADDR >> 21) & 0x1ff;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
vPDE = &vPD->entries[offset];
if (vPDE->flags & F_PRESENT) {
@@ -289,7 +261,7 @@ static volatile struct pml4 *pml4_alloc(void)
if (pPML4 == NULL)
return NULL;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
memsetv(vPML4, 0, sizeof(struct pml4));
return pPML4;
}
@@ -305,7 +277,7 @@ static volatile struct pdpt *pdpt_alloc(volatile struct pml4 *pPML4,
uint64_t offset;
offset = (uint64_t)vADDR >> 39;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
vPML4E = &vPML4->entries[offset];
pPDPT = pdpt_locate(pPML4, vADDR);
@@ -318,7 +290,7 @@ static volatile struct pdpt *pdpt_alloc(volatile struct pml4 *pPML4,
if (pPML4 == NULL)
return NULL;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
memsetv(vPDPT, 0, sizeof(struct pdpt));
vPML4E->address = (uintptr_t)pPDPT >> 12;
vPML4E->flags = F_PRESENT | flags;
@@ -338,7 +310,7 @@ static volatile struct pd *pd_alloc(volatile struct pdpt *pPDPT, void *vADDR,
uint64_t offset;
offset = ((uint64_t)vADDR >> 30) & 0x1ff;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
vPDPTE = &vPDPT->entries[offset];
pPD = pd_locate(pPDPT, vADDR);
@@ -351,7 +323,7 @@ static volatile struct pd *pd_alloc(volatile struct pdpt *pPDPT, void *vADDR,
if (pPDPT == NULL)
return NULL;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
memsetv(vPD, 0, sizeof(struct pd));
vPDPTE->address = (uintptr_t)pPD >> 12;
vPDPTE->flags = F_PRESENT | flags;
@@ -371,7 +343,7 @@ static volatile struct pt *pt_alloc(volatile struct pd *pPD, void *vADDR,
uint64_t offset;
offset = ((uint64_t)vADDR >> 21) & 0x1ff;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
vPDE = &vPD->entries[offset];
pPT = pt_locate(pPD, vADDR);
@@ -384,7 +356,7 @@ static volatile struct pt *pt_alloc(volatile struct pd *pPD, void *vADDR,
if (pPD == NULL)
return NULL;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
memsetv(vPT, 0, sizeof(struct pt));
vPDE->address = (uintptr_t)pPT >> 12;
vPDE->flags = F_PRESENT | flags;
@@ -400,7 +372,7 @@ static void pt_free(volatile struct pt *pPT, bool force)
volatile struct pt *vPT;
uint64_t count;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
count = (vPT->count_high << 2) | vPT->count_low;
if (!count)
@@ -434,7 +406,7 @@ static void pd_free(volatile struct pd *pPD, bool force)
volatile struct pd *vPD;
uint64_t count;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
count = vPD->count;
if (!count)
@@ -467,7 +439,7 @@ static void pdpt_free(volatile struct pdpt *pPDPT, bool force)
volatile struct pdpt *vPDPT;
uint64_t count;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
count = vPDPT->count;
if (!count)
@@ -500,7 +472,7 @@ static void pml4_free(volatile struct pml4 *pPML4, bool force)
volatile struct pml4 *vPML4;
uint64_t count;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
count = vPML4->count;
if (!count)
@@ -528,6 +500,220 @@ free:
free_phys_page((void *)(uintptr_t)pPML4);
}
+/* clone */
+
+volatile void *page_clone(volatile void *old_pADDR, bool cow)
+{
+ volatile const void *old_vADDR;
+ volatile void *new_pADDR, *new_vADDR;
+
+ // TODO: cow
+ (void) cow;
+
+ // dont reallocate kernel memeory!!
+ if ((volatile char *) old_pADDR <= kernel_end)
+ return old_pADDR;
+
+ new_pADDR = alloc_phys_page();
+ if (new_pADDR == NULL)
+ return NULL;
+
+ old_vADDR = PAGE_MAPC(old_pADDR);
+ new_vADDR = PAGE_MAP(new_pADDR);
+ memcpyv(new_vADDR, old_vADDR, PAGE_SIZE);
+ return new_pADDR;
+}
+
+volatile struct pt *pt_clone(volatile const struct pt *old_pPT,
+ bool cow)
+{
+ volatile const struct pt *old_vPT;
+ volatile struct pt *new_pPT, *new_vPT;
+
+ new_pPT = alloc_phys_page();
+ if (new_pPT == NULL)
+ return NULL;
+
+ old_vPT = PT_MAPC(old_pPT);
+ new_vPT = PT_MAP(new_pPT);
+ memsetv(new_vPT, 0, PAGE_SIZE);
+
+ new_vPT->count_high = old_vPT->count_high;
+ new_vPT->count_low = old_vPT->count_low;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pte *old_vPTE;
+ volatile struct pte *new_vPTE;
+ volatile void *old_pADDR, *new_pADDR;
+
+ old_vPTE = &old_vPT->entries[i];
+ new_vPTE = &new_vPT->entries[i];
+
+ new_vPTE->execute_disable = old_vPTE->execute_disable;
+ new_vPTE->flags = old_vPTE->flags;
+ if (!(old_vPTE->flags & F_PRESENT))
+ continue;
+
+ new_vPTE->execute_disable = old_vPTE->execute_disable;
+ new_vPTE->flags = old_vPTE->flags;
+
+ old_pADDR =
+ (volatile void *)((uintptr_t)old_vPTE->address
+ << 12);
+ new_pADDR = page_clone(old_pADDR, cow);
+ if (new_pADDR == NULL)
+ goto fail;
+
+ new_vPTE->address = (uint64_t)new_pADDR >> 12;
+ }
+
+ return new_pPT;
+
+fail:
+ pt_free(new_pPT, true);
+ return NULL;
+}
+
+volatile struct pd *pd_clone(volatile const struct pd *old_pPD,
+ bool cow)
+{
+ volatile const struct pd *old_vPD;
+ volatile struct pd *new_pPD, *new_vPD;
+
+ new_pPD = alloc_phys_page();
+ if (new_pPD == NULL)
+ return NULL;
+
+ old_vPD = PD_MAPC(old_pPD);
+ new_vPD = PD_MAP(new_pPD);
+ memsetv(new_vPD, 0, PAGE_SIZE);
+
+ new_vPD->count = old_vPD->count;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pde *old_vPDE;
+ volatile struct pde *new_vPDE;
+ volatile const struct pt *old_pPT;
+ volatile struct pt *new_pPT;
+
+ old_vPDE = &old_vPD->entries[i];
+ new_vPDE = &new_vPD->entries[i];
+
+ new_vPDE->execute_disable = old_vPDE->execute_disable;
+ new_vPDE->flags = old_vPDE->flags;
+ if (!(old_vPDE->flags & F_PRESENT))
+ continue;
+
+ old_pPT =
+ (volatile const struct pt *)((uintptr_t)old_vPDE->address
+ << 12);
+ new_pPT = pt_clone(old_pPT, cow);
+ if (new_pPT == NULL)
+ goto fail;
+
+ new_vPDE->address = (uint64_t)new_pPT >> 12;
+ }
+
+ return new_pPD;
+
+fail:
+ pd_free(new_pPD, true);
+ return NULL;
+}
+
+volatile struct pdpt *pdpt_clone(volatile const struct pdpt *old_pPDPT,
+ bool cow)
+{
+ volatile const struct pdpt *old_vPDPT;
+ volatile struct pdpt *new_pPDPT, *new_vPDPT;
+
+ new_pPDPT = alloc_phys_page();
+ if (new_pPDPT == NULL)
+ return NULL;
+
+ old_vPDPT = PDPT_MAPC(old_pPDPT);
+ new_vPDPT = PDPT_MAP(new_pPDPT);
+ memsetv(new_vPDPT, 0, PAGE_SIZE);
+
+ new_vPDPT->count = old_vPDPT->count;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pdpte *old_vPDPTE;
+ volatile struct pdpte *new_vPDPTE;
+ volatile const struct pd *old_pPD;
+ volatile struct pd *new_pPD;
+
+ old_vPDPTE = &old_vPDPT->entries[i];
+ new_vPDPTE = &new_vPDPT->entries[i];
+
+ new_vPDPTE->execute_disable = old_vPDPTE->execute_disable;
+ new_vPDPTE->flags = old_vPDPTE->flags;
+ if (!(old_vPDPTE->flags & F_PRESENT))
+ continue;
+
+ old_pPD =
+ (volatile const struct pd *)((uintptr_t)old_vPDPTE->address
+ << 12);
+ new_pPD = pd_clone(old_pPD, cow);
+ if (new_pPD == NULL)
+ goto fail;
+
+ new_vPDPTE->address = (uint64_t)new_pPD >> 12;
+ }
+
+ return new_pPDPT;
+
+fail:
+ pdpt_free(new_pPDPT, true);
+ return NULL;
+}
+
+volatile struct pml4 *pml4_clone(volatile const struct pml4 *old_pPML4,
+ bool cow)
+{
+ volatile const struct pml4 *old_vPML4;
+ volatile struct pml4 *new_pPML4, *new_vPML4;
+
+ new_pPML4 = pml4_alloc();
+ if (new_pPML4 == NULL)
+ return NULL;
+
+ old_vPML4 = PML4_MAPC(old_pPML4);
+ new_vPML4 = PML4_MAP(new_pPML4);
+
+ new_vPML4->count = old_vPML4->count;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pml4e *old_vPML4E;
+ volatile struct pml4e *new_vPML4E;
+ volatile const struct pdpt *old_pPDPT;
+ volatile struct pdpt *new_pPDPT;
+
+ old_vPML4E = &old_vPML4->entries[i];
+ new_vPML4E = &new_vPML4->entries[i];
+
+ new_vPML4E->execute_disable = old_vPML4E->execute_disable;
+ new_vPML4E->flags = old_vPML4E->flags;
+ if (!(old_vPML4E->flags & F_PRESENT))
+ continue;
+
+ old_pPDPT =
+ (volatile const struct pdpt *)((uintptr_t)old_vPML4E->address
+ << 12);
+ new_pPDPT = pdpt_clone(old_pPDPT, cow);
+ if (new_pPDPT == NULL)
+ goto fail;
+
+ new_vPML4E->address = (uint64_t)new_pPDPT >> 12;
+ }
+
+ return new_pPML4;
+
+fail:
+ pml4_free(new_pPML4, true);
+ return NULL;
+}
+
/* page specific */
// locate a pte for a vitural address
@@ -554,7 +740,7 @@ static volatile struct pte *page_locate(volatile struct pml4 *pPML4,
return NULL;
offset = ((uint64_t)vADDR >> 12) & 0x1ff;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
vPTE = &vPT->entries[offset];
if (vPTE->flags & F_PRESENT)
@@ -587,7 +773,7 @@ static volatile struct pte *page_alloc(volatile struct pml4 *pPML4, void *vADDR,
return NULL;
offset = ((uint64_t)vADDR >> 12) & 0x1ff;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
vPTE = &vPT->entries[offset];
memsetv(vPTE, 0, sizeof(struct pte));
@@ -707,10 +893,7 @@ volatile void *pgdir_alloc(void)
volatile void *pgdir_clone(volatile const void *old_pgdir, bool cow)
{
- // TODO:
- (void)old_pgdir;
- (void)cow;
- return NULL;
+ return pml4_clone((volatile const struct pml4 *)old_pgdir, cow);
}
void pgdir_free(volatile void *addr)