summaryrefslogtreecommitdiff
path: root/kernel/memory
diff options
context:
space:
mode:
authorGalen Sagarin <gps5307@rit.edu>2025-04-29 14:18:40 -0400
committerGalen Sagarin <gps5307@rit.edu>2025-04-29 14:18:40 -0400
commitae2cdd83ba4a0cae161db0b29031d5591005fa34 (patch)
tree82fbdfcbb1fe4e3b5e232db195c8c331d69489fd /kernel/memory
parentStarted writing fat.c (diff)
parentfs header changes (diff)
downloadcomus-ae2cdd83ba4a0cae161db0b29031d5591005fa34.tar.gz
comus-ae2cdd83ba4a0cae161db0b29031d5591005fa34.tar.bz2
comus-ae2cdd83ba4a0cae161db0b29031d5591005fa34.zip
Merge branch 'main' of https://github.com/kenshineto/kern into fat32
Merging main into here
Diffstat (limited to 'kernel/memory')
-rw-r--r--kernel/memory/memory.c54
-rw-r--r--kernel/memory/paging.c537
-rw-r--r--kernel/memory/paging.h7
-rw-r--r--kernel/memory/physalloc.c93
-rw-r--r--kernel/memory/physalloc.h35
-rw-r--r--kernel/memory/virtalloc.c122
-rw-r--r--kernel/memory/virtalloc.h15
7 files changed, 695 insertions, 168 deletions
diff --git a/kernel/memory/memory.c b/kernel/memory/memory.c
index 2a9c15e..ecfd639 100644
--- a/kernel/memory/memory.c
+++ b/kernel/memory/memory.c
@@ -26,11 +26,16 @@ void *kmapaddr(void *phys, void *virt, size_t len, unsigned int flags)
return mem_mapaddr(kernel_mem_ctx, phys, virt, len, flags);
}
-void kunmapaddr(void *virt)
+void kunmapaddr(const void *virt)
{
mem_unmapaddr(kernel_mem_ctx, virt);
}
+void *kget_phys(const void *virt)
+{
+ return mem_get_phys(kernel_mem_ctx, virt);
+}
+
void *kalloc_page(void)
{
return mem_alloc_page(kernel_mem_ctx, F_PRESENT | F_WRITEABLE);
@@ -41,7 +46,7 @@ void *kalloc_pages(size_t count)
return mem_alloc_pages(kernel_mem_ctx, count, F_PRESENT | F_WRITEABLE);
}
-void kfree_pages(void *ptr)
+void kfree_pages(const void *ptr)
{
mem_free_pages(kernel_mem_ctx, ptr);
}
@@ -52,7 +57,7 @@ mem_ctx_t mem_ctx_alloc(void)
if (ctx == NULL)
return NULL;
- if ((ctx->pml4 = paging_alloc()) == NULL)
+ if ((ctx->pml4 = pgdir_alloc()) == NULL)
return NULL;
virtaddr_init(&ctx->virtctx);
@@ -64,17 +69,39 @@ mem_ctx_t mem_ctx_alloc(void)
return ctx;
}
-mem_ctx_t mem_ctx_clone(mem_ctx_t ctx, bool cow)
+mem_ctx_t mem_ctx_clone(const mem_ctx_t old, bool cow)
{
- (void)ctx;
- (void)cow;
+ mem_ctx_t new;
+
+ assert(old != NULL, "memory context is null");
+ assert(old->pml4 != NULL, "pgdir is null");
+
+ new = user_mem_ctx_next;
+ if (new == NULL)
+ return NULL;
+
+ if ((new->pml4 = pgdir_clone(old->pml4, cow)) == NULL)
+ return NULL;
+
+ if (virtaddr_clone(&old->virtctx, &new->virtctx)) {
+ pgdir_free(new->pml4);
+ return NULL;
+ }
+
+ user_mem_ctx_next = new->prev;
+ if (new->prev)
+ new->prev->next = NULL;
+ new->prev = NULL;
- panic("not yet implemented");
+ return new;
}
void mem_ctx_free(mem_ctx_t ctx)
{
- paging_free(ctx->pml4);
+ assert(ctx != NULL, "memory context is null");
+ assert(ctx->pml4 != NULL, "pgdir is null");
+
+ pgdir_free(ctx->pml4);
virtaddr_cleanup(&ctx->virtctx);
if (user_mem_ctx_next == NULL) {
@@ -88,9 +115,20 @@ void mem_ctx_free(mem_ctx_t ctx)
void mem_ctx_switch(mem_ctx_t ctx)
{
+ assert(ctx != NULL, "memory context is null");
+ assert(ctx->pml4 != NULL, "pgdir is null");
+
__asm__ volatile("mov %0, %%cr3" ::"r"(ctx->pml4) : "memory");
}
+volatile void *mem_ctx_pgdir(mem_ctx_t ctx)
+{
+ assert(ctx != NULL, "memory context is null");
+ assert(ctx->pml4 != NULL, "pgdir is null");
+
+ return ctx->pml4;
+}
+
void memory_init(void)
{
struct memory_map mmap;
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c
index 4f1b788..763bdce 100644
--- a/kernel/memory/paging.c
+++ b/kernel/memory/paging.c
@@ -5,6 +5,7 @@
#include "physalloc.h"
#include "paging.h"
#include "memory.h"
+#include <stdint.h>
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
@@ -138,83 +139,55 @@ extern char kernel_start[];
extern char kernel_end[];
// invalidate page cache at a vitural address
-static inline void invlpg(volatile void *vADDR)
+static inline void invlpg(volatile const void *vADDR)
{
__asm__ volatile("invlpg (%0)" ::"r"(vADDR) : "memory");
}
/* map */
-// map a physical pml4 address to access
+// map a physical address to a virtural address
// @returns VIRTUAL ADDRESS
-static volatile struct pml4 *pml4_map(volatile struct pml4 *pPML4)
+static volatile void *map_addr(volatile const void *pADDR, size_t pt_idx)
{
- static struct pml4 *vPML4 = (void *)(uintptr_t)0x40000000;
- static volatile struct pte *vPTE = &paging_pt.entries[0];
-
- if ((uint64_t)pPML4 >> 12 == vPTE->address)
- return vPML4;
+ volatile char *vADDR;
+ volatile struct pte *vPTE;
- vPTE->address = (uint64_t)pPML4 >> 12;
- vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPML4);
- return vPML4;
-}
+ assert(pt_idx < 512, "invalid page table entry index");
-// map a physical pdpt address to access
-// @returns VIRTUAL ADDRESS
-static volatile struct pdpt *pdpt_map(volatile struct pdpt *pPDPT)
-{
- static struct pdpt *vPDPT = (void *)(uintptr_t)0x40001000;
- static volatile struct pte *vPTE = &paging_pt.entries[1];
+ vADDR = (char *)(uintptr_t)(0x40000000 + pt_idx * PAGE_SIZE);
+ vPTE = &paging_pt.entries[pt_idx];
- if ((uint64_t)pPDPT >> 12 == vPTE->address)
- return vPDPT;
+ if ((uint64_t)pADDR >> 12 == vPTE->address)
+ return vADDR;
- vPTE->address = (uint64_t)pPDPT >> 12;
+ vPTE->address = (uint64_t)pADDR >> 12;
vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPDPT);
- return vPDPT;
+ invlpg(vADDR);
+ return vADDR;
}
-// map a physical pd address to access
-// @returns VIRTUAL ADDRESS
-static volatile struct pd *pd_map(volatile struct pd *pPD)
-{
- static struct pd *vPD = (void *)(uintptr_t)0x40002000;
- static volatile struct pte *vPTE = &paging_pt.entries[2];
-
- if ((uint64_t)pPD >> 12 == vPTE->address)
- return vPD;
+#define PML4_MAP(pADDR) (volatile struct pml4 *)map_addr(pADDR, 0)
+#define PML4_MAPC(pADDR) (volatile const struct pml4 *)map_addr(pADDR, 4)
- vPTE->address = (uint64_t)pPD >> 12;
- vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPD);
- return vPD;
-}
+#define PDPT_MAP(pADDR) (volatile struct pdpt *)map_addr(pADDR, 1)
+#define PDPT_MAPC(pADDR) (volatile const struct pdpt *)map_addr(pADDR, 5)
-// map a physical pt address to access
-// @returns VIRTUAL ADDRESS
-static volatile struct pt *pt_map(volatile struct pt *pPT)
-{
- static struct pt *vPT = (void *)(uintptr_t)0x40003000;
- static volatile struct pte *vPTE = &paging_pt.entries[3];
+#define PD_MAP(pADDR) (volatile struct pd *)map_addr(pADDR, 2)
+#define PD_MAPC(pADDR) (volatile const struct pd *)map_addr(pADDR, 6)
- if ((uint64_t)pPT >> 12 == vPTE->address)
- return vPT;
+#define PT_MAP(pADDR) (volatile struct pt *)map_addr(pADDR, 3)
+#define PT_MAPC(pADDR) (volatile const struct pt *)map_addr(pADDR, 7)
- vPTE->address = (uint64_t)pPT >> 12;
- vPTE->flags = F_PRESENT | F_WRITEABLE;
- invlpg(vPT);
- return vPT;
-}
+#define PAGE_MAP(pADDR) (volatile void *)map_addr(pADDR, 8)
+#define PAGE_MAPC(pADDR) (volatile const void *)map_addr(pADDR, 9)
/* locate */
// locate a pdpt for a vitural address
// @returns PHYSICAL ADDRESS
static volatile struct pdpt *pdpt_locate(volatile struct pml4 *pPML4,
- void *vADDR)
+ const void *vADDR)
{
volatile struct pml4 *vPML4;
volatile struct pml4e *vPML4E;
@@ -222,7 +195,7 @@ static volatile struct pdpt *pdpt_locate(volatile struct pml4 *pPML4,
uint64_t offset;
offset = (uint64_t)vADDR >> 39;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
vPML4E = &vPML4->entries[offset];
if (vPML4E->flags & F_PRESENT) {
@@ -235,7 +208,8 @@ static volatile struct pdpt *pdpt_locate(volatile struct pml4 *pPML4,
// locate a pd for a vitural address
// @returns PHYSICAL ADDRESS
-static volatile struct pd *pd_locate(volatile struct pdpt *pPDPT, void *vADDR)
+static volatile struct pd *pd_locate(volatile struct pdpt *pPDPT,
+ const void *vADDR)
{
volatile struct pdpt *vPDPT;
volatile struct pdpte *vPDPTE;
@@ -243,7 +217,7 @@ static volatile struct pd *pd_locate(volatile struct pdpt *pPDPT, void *vADDR)
uint64_t offset;
offset = ((uint64_t)vADDR >> 30) & 0x1ff;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
vPDPTE = &vPDPT->entries[offset];
if (vPDPTE->flags & F_PRESENT) {
@@ -256,7 +230,7 @@ static volatile struct pd *pd_locate(volatile struct pdpt *pPDPT, void *vADDR)
// locate a pt for a vitural address
// @returns PHYSICAL ADDRESS
-static volatile struct pt *pt_locate(volatile struct pd *pPD, void *vADDR)
+static volatile struct pt *pt_locate(volatile struct pd *pPD, const void *vADDR)
{
volatile struct pd *vPD;
volatile struct pde *vPDE;
@@ -264,7 +238,7 @@ static volatile struct pt *pt_locate(volatile struct pd *pPD, void *vADDR)
uint64_t offset;
offset = ((uint64_t)vADDR >> 21) & 0x1ff;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
vPDE = &vPD->entries[offset];
if (vPDE->flags & F_PRESENT) {
@@ -287,7 +261,7 @@ static volatile struct pml4 *pml4_alloc(void)
if (pPML4 == NULL)
return NULL;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
memsetv(vPML4, 0, sizeof(struct pml4));
return pPML4;
}
@@ -303,7 +277,7 @@ static volatile struct pdpt *pdpt_alloc(volatile struct pml4 *pPML4,
uint64_t offset;
offset = (uint64_t)vADDR >> 39;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
vPML4E = &vPML4->entries[offset];
pPDPT = pdpt_locate(pPML4, vADDR);
@@ -316,7 +290,7 @@ static volatile struct pdpt *pdpt_alloc(volatile struct pml4 *pPML4,
if (pPML4 == NULL)
return NULL;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
memsetv(vPDPT, 0, sizeof(struct pdpt));
vPML4E->address = (uintptr_t)pPDPT >> 12;
vPML4E->flags = F_PRESENT | flags;
@@ -336,7 +310,7 @@ static volatile struct pd *pd_alloc(volatile struct pdpt *pPDPT, void *vADDR,
uint64_t offset;
offset = ((uint64_t)vADDR >> 30) & 0x1ff;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
vPDPTE = &vPDPT->entries[offset];
pPD = pd_locate(pPDPT, vADDR);
@@ -349,7 +323,7 @@ static volatile struct pd *pd_alloc(volatile struct pdpt *pPDPT, void *vADDR,
if (pPDPT == NULL)
return NULL;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
memsetv(vPD, 0, sizeof(struct pd));
vPDPTE->address = (uintptr_t)pPD >> 12;
vPDPTE->flags = F_PRESENT | flags;
@@ -369,7 +343,7 @@ static volatile struct pt *pt_alloc(volatile struct pd *pPD, void *vADDR,
uint64_t offset;
offset = ((uint64_t)vADDR >> 21) & 0x1ff;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
vPDE = &vPD->entries[offset];
pPT = pt_locate(pPD, vADDR);
@@ -382,7 +356,7 @@ static volatile struct pt *pt_alloc(volatile struct pd *pPD, void *vADDR,
if (pPD == NULL)
return NULL;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
memsetv(vPT, 0, sizeof(struct pt));
vPDE->address = (uintptr_t)pPT >> 12;
vPDE->flags = F_PRESENT | flags;
@@ -398,7 +372,7 @@ static void pt_free(volatile struct pt *pPT, bool force)
volatile struct pt *vPT;
uint64_t count;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
count = (vPT->count_high << 2) | vPT->count_low;
if (!count)
@@ -409,11 +383,18 @@ static void pt_free(volatile struct pt *pPT, bool force)
void *pADDR;
vPTE = &vPT->entries[i];
- if (!force && !(vPTE->flags & F_PRESENT))
+ if (!(vPTE->flags & F_PRESENT))
continue;
pADDR = (void *)((uintptr_t)vPTE->address << 12);
free_phys_page(pADDR);
+ count--;
+ }
+
+ if (!force && count) {
+ vPT->count_low = count;
+ vPT->count_high = count >> 2;
+ return;
}
free:
@@ -425,7 +406,7 @@ static void pd_free(volatile struct pd *pPD, bool force)
volatile struct pd *vPD;
uint64_t count;
- vPD = pd_map(pPD);
+ vPD = PD_MAP(pPD);
count = vPD->count;
if (!count)
@@ -436,11 +417,17 @@ static void pd_free(volatile struct pd *pPD, bool force)
volatile struct pt *pPT;
vPDE = &vPD->entries[i];
- if (!force && !(vPDE->flags & F_PRESENT))
+ if (!(vPDE->flags & F_PRESENT))
continue;
pPT = (volatile struct pt *)((uintptr_t)vPDE->address << 12);
pt_free(pPT, force);
+ count--;
+ }
+
+ if (!force && count) {
+ vPD->count = count;
+ return;
}
free:
@@ -452,7 +439,7 @@ static void pdpt_free(volatile struct pdpt *pPDPT, bool force)
volatile struct pdpt *vPDPT;
uint64_t count;
- vPDPT = pdpt_map(pPDPT);
+ vPDPT = PDPT_MAP(pPDPT);
count = vPDPT->count;
if (!count)
@@ -463,11 +450,17 @@ static void pdpt_free(volatile struct pdpt *pPDPT, bool force)
volatile struct pd *pPD;
vPDPTE = &vPDPT->entries[i];
- if (!force && !(vPDPTE->flags & F_PRESENT))
+ if (!(vPDPTE->flags & F_PRESENT))
continue;
pPD = (volatile struct pd *)((uintptr_t)vPDPTE->address << 12);
pd_free(pPD, force);
+ count--;
+ }
+
+ if (!force && count) {
+ vPDPT->count = count;
+ return;
}
free:
@@ -479,7 +472,7 @@ static void pml4_free(volatile struct pml4 *pPML4, bool force)
volatile struct pml4 *vPML4;
uint64_t count;
- vPML4 = pml4_map(pPML4);
+ vPML4 = PML4_MAP(pPML4);
count = vPML4->count;
if (!count)
@@ -490,23 +483,237 @@ static void pml4_free(volatile struct pml4 *pPML4, bool force)
volatile struct pdpt *pPDPT;
vPML4E = &vPML4->entries[i];
- if (!force && !(vPML4E->flags & F_PRESENT))
+ if (!(vPML4E->flags & F_PRESENT))
continue;
pPDPT = (volatile struct pdpt *)((uintptr_t)vPML4E->address << 12);
pdpt_free(pPDPT, force);
+ count--;
+ }
+
+ if (!force && count) {
+ vPML4->count = count;
+ return;
}
free:
free_phys_page((void *)(uintptr_t)pPML4);
}
+/* clone */
+
+volatile void *page_clone(volatile void *old_pADDR, bool cow)
+{
+ volatile const void *old_vADDR;
+ volatile void *new_pADDR, *new_vADDR;
+
+ // TODO: cow
+ (void)cow;
+
+ // dont reallocate kernel memeory!!
+ if ((volatile char *)old_pADDR <= kernel_end)
+ return old_pADDR;
+
+ new_pADDR = alloc_phys_page();
+ if (new_pADDR == NULL)
+ return NULL;
+
+ old_vADDR = PAGE_MAPC(old_pADDR);
+ new_vADDR = PAGE_MAP(new_pADDR);
+ memcpyv(new_vADDR, old_vADDR, PAGE_SIZE);
+ return new_pADDR;
+}
+
+volatile struct pt *pt_clone(volatile const struct pt *old_pPT, bool cow)
+{
+ volatile const struct pt *old_vPT;
+ volatile struct pt *new_pPT, *new_vPT;
+
+ new_pPT = alloc_phys_page();
+ if (new_pPT == NULL)
+ return NULL;
+
+ old_vPT = PT_MAPC(old_pPT);
+ new_vPT = PT_MAP(new_pPT);
+ memsetv(new_vPT, 0, PAGE_SIZE);
+
+ new_vPT->count_high = old_vPT->count_high;
+ new_vPT->count_low = old_vPT->count_low;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pte *old_vPTE;
+ volatile struct pte *new_vPTE;
+ volatile void *old_pADDR, *new_pADDR;
+
+ old_vPTE = &old_vPT->entries[i];
+ new_vPTE = &new_vPT->entries[i];
+
+ new_vPTE->execute_disable = old_vPTE->execute_disable;
+ new_vPTE->flags = old_vPTE->flags;
+ if (!(old_vPTE->flags & F_PRESENT))
+ continue;
+
+ new_vPTE->execute_disable = old_vPTE->execute_disable;
+ new_vPTE->flags = old_vPTE->flags;
+
+ old_pADDR = (volatile void *)((uintptr_t)old_vPTE->address << 12);
+ new_pADDR = page_clone(old_pADDR, cow);
+ if (new_pADDR == NULL)
+ goto fail;
+
+ new_vPTE->address = (uint64_t)new_pADDR >> 12;
+ }
+
+ return new_pPT;
+
+fail:
+ pt_free(new_pPT, true);
+ return NULL;
+}
+
+volatile struct pd *pd_clone(volatile const struct pd *old_pPD, bool cow)
+{
+ volatile const struct pd *old_vPD;
+ volatile struct pd *new_pPD, *new_vPD;
+
+ new_pPD = alloc_phys_page();
+ if (new_pPD == NULL)
+ return NULL;
+
+ old_vPD = PD_MAPC(old_pPD);
+ new_vPD = PD_MAP(new_pPD);
+ memsetv(new_vPD, 0, PAGE_SIZE);
+
+ new_vPD->count = old_vPD->count;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pde *old_vPDE;
+ volatile struct pde *new_vPDE;
+ volatile const struct pt *old_pPT;
+ volatile struct pt *new_pPT;
+
+ old_vPDE = &old_vPD->entries[i];
+ new_vPDE = &new_vPD->entries[i];
+
+ new_vPDE->execute_disable = old_vPDE->execute_disable;
+ new_vPDE->flags = old_vPDE->flags;
+ if (!(old_vPDE->flags & F_PRESENT))
+ continue;
+
+ old_pPT =
+ (volatile const struct pt *)((uintptr_t)old_vPDE->address << 12);
+ new_pPT = pt_clone(old_pPT, cow);
+ if (new_pPT == NULL)
+ goto fail;
+
+ new_vPDE->address = (uint64_t)new_pPT >> 12;
+ }
+
+ return new_pPD;
+
+fail:
+ pd_free(new_pPD, true);
+ return NULL;
+}
+
+volatile struct pdpt *pdpt_clone(volatile const struct pdpt *old_pPDPT,
+ bool cow)
+{
+ volatile const struct pdpt *old_vPDPT;
+ volatile struct pdpt *new_pPDPT, *new_vPDPT;
+
+ new_pPDPT = alloc_phys_page();
+ if (new_pPDPT == NULL)
+ return NULL;
+
+ old_vPDPT = PDPT_MAPC(old_pPDPT);
+ new_vPDPT = PDPT_MAP(new_pPDPT);
+ memsetv(new_vPDPT, 0, PAGE_SIZE);
+
+ new_vPDPT->count = old_vPDPT->count;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pdpte *old_vPDPTE;
+ volatile struct pdpte *new_vPDPTE;
+ volatile const struct pd *old_pPD;
+ volatile struct pd *new_pPD;
+
+ old_vPDPTE = &old_vPDPT->entries[i];
+ new_vPDPTE = &new_vPDPT->entries[i];
+
+ new_vPDPTE->execute_disable = old_vPDPTE->execute_disable;
+ new_vPDPTE->flags = old_vPDPTE->flags;
+ if (!(old_vPDPTE->flags & F_PRESENT))
+ continue;
+
+ old_pPD =
+ (volatile const struct pd *)((uintptr_t)old_vPDPTE->address << 12);
+ new_pPD = pd_clone(old_pPD, cow);
+ if (new_pPD == NULL)
+ goto fail;
+
+ new_vPDPTE->address = (uint64_t)new_pPD >> 12;
+ }
+
+ return new_pPDPT;
+
+fail:
+ pdpt_free(new_pPDPT, true);
+ return NULL;
+}
+
+volatile struct pml4 *pml4_clone(volatile const struct pml4 *old_pPML4,
+ bool cow)
+{
+ volatile const struct pml4 *old_vPML4;
+ volatile struct pml4 *new_pPML4, *new_vPML4;
+
+ new_pPML4 = pml4_alloc();
+ if (new_pPML4 == NULL)
+ return NULL;
+
+ old_vPML4 = PML4_MAPC(old_pPML4);
+ new_vPML4 = PML4_MAP(new_pPML4);
+
+ new_vPML4->count = old_vPML4->count;
+
+ for (size_t i = 0; i < 512; i++) {
+ volatile const struct pml4e *old_vPML4E;
+ volatile struct pml4e *new_vPML4E;
+ volatile const struct pdpt *old_pPDPT;
+ volatile struct pdpt *new_pPDPT;
+
+ old_vPML4E = &old_vPML4->entries[i];
+ new_vPML4E = &new_vPML4->entries[i];
+
+ new_vPML4E->execute_disable = old_vPML4E->execute_disable;
+ new_vPML4E->flags = old_vPML4E->flags;
+ if (!(old_vPML4E->flags & F_PRESENT))
+ continue;
+
+ old_pPDPT =
+ (volatile const struct pdpt *)((uintptr_t)old_vPML4E->address
+ << 12);
+ new_pPDPT = pdpt_clone(old_pPDPT, cow);
+ if (new_pPDPT == NULL)
+ goto fail;
+
+ new_vPML4E->address = (uint64_t)new_pPDPT >> 12;
+ }
+
+ return new_pPML4;
+
+fail:
+ pml4_free(new_pPML4, true);
+ return NULL;
+}
+
/* page specific */
// locate a pte for a vitural address
// @returns VIRTUAL ADDRESS
static volatile struct pte *page_locate(volatile struct pml4 *pPML4,
- void *vADDR)
+ const void *vADDR)
{
volatile struct pdpt *pPDPT;
volatile struct pd *pPD;
@@ -527,7 +734,7 @@ static volatile struct pte *page_locate(volatile struct pml4 *pPML4,
return NULL;
offset = ((uint64_t)vADDR >> 12) & 0x1ff;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
vPTE = &vPT->entries[offset];
if (vPTE->flags & F_PRESENT)
@@ -560,7 +767,7 @@ static volatile struct pte *page_alloc(volatile struct pml4 *pPML4, void *vADDR,
return NULL;
offset = ((uint64_t)vADDR >> 12) & 0x1ff;
- vPT = pt_map(pPT);
+ vPT = PT_MAP(pPT);
vPTE = &vPT->entries[offset];
memsetv(vPTE, 0, sizeof(struct pte));
@@ -573,7 +780,8 @@ static volatile struct pte *page_alloc(volatile struct pml4 *pPML4, void *vADDR,
}
// free a pte (page) for a vitural address
-static void page_free(volatile struct pml4 *pPML4, void *vADDR)
+static void page_free(volatile struct pml4 *pPML4, const void *vADDR,
+ bool deallocate)
{
volatile struct pte *vPTE;
void *pADDR;
@@ -585,17 +793,19 @@ static void page_free(volatile struct pml4 *pPML4, void *vADDR)
vPTE->flags = 0;
vPTE->address = 0;
- pADDR = (void *)((uintptr_t)vPTE->address << 12);
- free_phys_page(pADDR);
+ if (deallocate) {
+ pADDR = (void *)((uintptr_t)vPTE->address << 12);
+ free_phys_page(pADDR);
+ }
}
/* map & unmap pages */
-static void unmap_pages(volatile struct pml4 *pPML4, void *vADDR,
- long page_count)
+static void unmap_pages(volatile struct pml4 *pPML4, const void *vADDR,
+ long page_count, bool deallocate)
{
for (long i = 0; i < page_count; i++) {
- page_free(pPML4, vADDR);
+ page_free(pPML4, vADDR, deallocate);
vADDR = (char *)vADDR + PAGE_SIZE;
}
}
@@ -617,7 +827,7 @@ static int map_pages(volatile struct pml4 *pPML4, void *vADDR, void *pADDR,
return 0;
fail:
- unmap_pages(pPML4, vADDR, page_count);
+ unmap_pages(pPML4, vADDR, page_count, true);
return 1;
}
@@ -651,14 +861,14 @@ void paging_init(void)
kernel_pd_1.entries[0].flags = F_PRESENT | F_WRITEABLE;
kernel_pd_1.entries[0].address = (uint64_t)(paging_pt.entries) >> 12;
- memsetv(paging_pt.entries, 0, 4096);
+ memsetv(paging_pt.entries, 0, PAGE_SIZE);
// make sure we are using THESE pagetables
// EFI doesnt on boot
__asm__ volatile("mov %0, %%cr3" ::"r"(kernel_pml4.entries) : "memory");
}
-volatile void *paging_alloc(void)
+volatile void *pgdir_alloc(void)
{
volatile struct pml4 *pPML4;
@@ -666,8 +876,7 @@ volatile void *paging_alloc(void)
if (pPML4 == NULL)
return NULL;
- if (map_pages(pPML4, kernel_start, kernel_start,
- F_PRESENT | F_WRITEABLE,
+ if (map_pages(pPML4, kernel_start, kernel_start, F_PRESENT | F_WRITEABLE,
(kernel_end - kernel_start) / PAGE_SIZE)) {
pml4_free(pPML4, false);
return NULL;
@@ -676,31 +885,27 @@ volatile void *paging_alloc(void)
return pPML4;
}
-void paging_free(volatile void *addr)
+volatile void *pgdir_clone(volatile const void *old_pgdir, bool cow)
{
- pml4_free(addr, true);
+ return pml4_clone((volatile const struct pml4 *)old_pgdir, cow);
}
-static inline void *page_align(void *addr)
+void pgdir_free(volatile void *addr)
{
- uintptr_t a = (uintptr_t)addr;
- a /= PAGE_SIZE;
- a *= PAGE_SIZE;
- return (void *)a;
+ pml4_free(addr, true);
}
void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len,
unsigned int flags)
{
long pages;
- ptrdiff_t error;
+ size_t error;
void *aligned_phys;
- // get length and physical page aligned address
- aligned_phys = page_align(phys);
- error = (char *)phys - (char *)aligned_phys;
+ error = (size_t)phys % PAGE_SIZE;
len += error;
- pages = len / PAGE_SIZE + 1;
+ pages = (len + PAGE_SIZE - 1) / PAGE_SIZE;
+ aligned_phys = (char *)phys - error;
// get page aligned (or allocate) vitural address
if (virt == NULL)
@@ -708,6 +913,12 @@ void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len,
if (virt == NULL)
return NULL;
+ if (virtaddr_take(&ctx->virtctx, virt, pages))
+ return NULL;
+
+ assert((uint64_t)virt % PAGE_SIZE == 0,
+ "mem_mapaddr: vitural address not page aligned");
+
if (map_pages((volatile struct pml4 *)ctx->pml4, virt, aligned_phys,
F_PRESENT | flags, pages)) {
virtaddr_free(&ctx->virtctx, virt);
@@ -717,15 +928,74 @@ void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len,
return (char *)virt + error;
}
-void mem_unmapaddr(mem_ctx_t ctx, void *virt)
+void *kmapuseraddr(mem_ctx_t ctx, const void *usrADDR, size_t len)
+{
+ volatile struct pml4 *pml4;
+ char *pADDR, *vADDR;
+ size_t npages, error, i;
+
+ pml4 = (volatile struct pml4 *)kernel_mem_ctx->pml4;
+ npages = (len + PAGE_SIZE - 1) / PAGE_SIZE;
+ error = (size_t)usrADDR % PAGE_SIZE;
+ vADDR = virtaddr_alloc(&kernel_mem_ctx->virtctx, npages);
+ if (vADDR == NULL)
+ return NULL;
+
+ if (virtaddr_take(&kernel_mem_ctx->virtctx, vADDR, npages))
+ return NULL;
+
+ assert((size_t)vADDR % PAGE_SIZE == 0,
+ "kmapuseraddr: vitural address not page aligned");
+
+ for (i = 0; i < npages; i++) {
+ pADDR = mem_get_phys(ctx, (char *)usrADDR + i * PAGE_SIZE);
+ if (pADDR == NULL)
+ goto fail;
+
+ // page align
+ pADDR = (char *)(((size_t)pADDR / PAGE_SIZE) * PAGE_SIZE);
+
+ if (map_pages(pml4, vADDR + i * PAGE_SIZE, pADDR,
+ F_PRESENT | F_WRITEABLE, 1))
+ goto fail;
+ }
+
+ return vADDR + error;
+
+fail:
+ unmap_pages(&kernel_pml4, vADDR, i, false);
+ virtaddr_free(&kernel_mem_ctx->virtctx, vADDR);
+ return NULL;
+}
+
+void mem_unmapaddr(mem_ctx_t ctx, const void *virt)
{
+ long pages;
+
if (virt == NULL)
return;
- long pages = virtaddr_free(&ctx->virtctx, virt);
+ // page align
+ virt = (void *)(((size_t)virt / PAGE_SIZE) * PAGE_SIZE);
+
+ pages = virtaddr_free(&ctx->virtctx, virt);
if (pages < 1)
return;
- unmap_pages(&kernel_pml4, virt, pages);
+ unmap_pages(&kernel_pml4, virt, pages, false);
+}
+
+void *mem_get_phys(mem_ctx_t ctx, const void *vADDR)
+{
+ char *pADDR;
+ volatile struct pte *vPTE;
+
+ vPTE = page_locate((volatile struct pml4 *)ctx->pml4, vADDR);
+ if (vPTE == NULL)
+ return NULL;
+
+ pADDR = (void *)((uintptr_t)vPTE->address << 12);
+ pADDR += ((uint64_t)vADDR % PAGE_SIZE);
+ return pADDR;
}
void *mem_alloc_page(mem_ctx_t ctx, unsigned int flags)
@@ -755,24 +1025,71 @@ void *mem_alloc_pages(mem_ctx_t ctx, size_t count, unsigned int flags)
void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt,
unsigned int flags)
{
- void *phys = alloc_phys_pages(count);
- if (phys == NULL)
- return NULL;
+ size_t pages_needed = count;
- if (map_pages((volatile struct pml4 *)ctx->pml4, virt, phys, flags,
- count)) {
- if (phys)
- free_phys_pages(phys, count);
+ struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL;
+ struct phys_page_slice phys_pages;
+
+ if (virtaddr_take(&ctx->virtctx, virt, count))
return NULL;
+
+ while (pages_needed > 0) {
+ phys_pages = alloc_phys_page_withextra(pages_needed);
+ if (phys_pages.pagestart == NULL) {
+ goto mem_alloc_pages_at_fail;
+ }
+
+ {
+ // allocate the first page and store in it the physical address of the
+ // previous chunk of pages
+ // TODO: skip this if there are already enough pages from first alloc
+ void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1,
+ F_PRESENT | F_WRITEABLE);
+ if (pageone == NULL) {
+ panic("kernel out of virtual memory");
+ }
+ *((struct phys_page_slice *)pageone) = prev_phys_block;
+ prev_phys_block = phys_pages;
+ kunmapaddr(pageone);
+ }
+
+ // index into virtual page array at index [count - pages_needed]
+ void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE);
+
+ assert(pages_needed >= phys_pages.num_pages, "overflow");
+ pages_needed -= phys_pages.num_pages;
+
+ if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr,
+ phys_pages.pagestart, flags, phys_pages.num_pages)) {
+ goto mem_alloc_pages_at_fail;
+ }
}
+
return virt;
+
+mem_alloc_pages_at_fail:
+ while (prev_phys_block.pagestart) {
+ void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1,
+ F_PRESENT | F_WRITEABLE);
+ if (!virtpage) {
+ // memory corruption, most likely a bug
+ // could also ERROR here and exit with leak
+ panic("unable to free memory from failed mem_alloc_pages_at call");
+ }
+ struct phys_page_slice prev = *(struct phys_page_slice *)virtpage;
+ prev_phys_block = prev;
+ free_phys_pages_slice(prev);
+ kunmapaddr(virtpage);
+ }
+
+ return NULL;
}
-void mem_free_pages(mem_ctx_t ctx, void *virt)
+void mem_free_pages(mem_ctx_t ctx, const void *virt)
{
if (virt == NULL)
return;
long pages = virtaddr_free(&ctx->virtctx, virt);
- unmap_pages((volatile struct pml4 *)ctx->pml4, virt, pages);
+ unmap_pages((volatile struct pml4 *)ctx->pml4, virt, pages, true);
}
diff --git a/kernel/memory/paging.h b/kernel/memory/paging.h
index d80a9bf..94b7260 100644
--- a/kernel/memory/paging.h
+++ b/kernel/memory/paging.h
@@ -9,9 +9,12 @@
#ifndef PAGING_H_
#define PAGING_H_
+#include <stdbool.h>
+
void paging_init(void);
-volatile void *paging_alloc(void);
-void paging_free(volatile void *addr);
+volatile void *pgdir_alloc(void);
+volatile void *pgdir_clone(volatile const void *pdir, bool cow);
+void pgdir_free(volatile void *addr);
#endif /* paging.h */
diff --git a/kernel/memory/physalloc.c b/kernel/memory/physalloc.c
index b164358..8971bcf 100644
--- a/kernel/memory/physalloc.c
+++ b/kernel/memory/physalloc.c
@@ -4,20 +4,19 @@
#include "physalloc.h"
-extern char kernel_start;
-extern char kernel_end;
-#define kaddr(addr) ((uintptr_t)(&addr))
+extern char kernel_start[];
+extern char kernel_end[];
// between memory_start and kernel_start will be the bitmap
static uintptr_t memory_start = 0;
-static uint64_t *bitmap;
+static uint64_t *bitmap = NULL;
static uint64_t total_memory;
static uint64_t free_memory;
static uint64_t page_count;
static uint64_t segment_count;
struct memory_map phys_mmap;
-struct memory_segment *page_start;
+struct memory_segment *page_start = NULL;
static const char *segment_type_str[] = {
[SEG_TYPE_FREE] = "Free", [SEG_TYPE_RESERVED] = "Reserved",
@@ -50,6 +49,9 @@ static long page_idx(void *page)
int cur_page = 0;
for (uint64_t idx = 0; idx < segment_count; idx++) {
const struct memory_segment *m = page_start;
+ if (addr < m->addr) {
+ return -1;
+ }
if ((uintptr_t)m + m->len > addr) {
return cur_page + ((addr - m->addr) / PAGE_SIZE);
}
@@ -58,12 +60,12 @@ static long page_idx(void *page)
return -1;
}
-static inline bool bitmap_get(int i)
+static inline bool bitmap_get(size_t i)
{
return (bitmap[i / 64] >> i % 64) & 1;
}
-static inline void bitmap_set(int i, bool v)
+static inline void bitmap_set(size_t i, bool v)
{
if (v)
free_memory -= PAGE_SIZE;
@@ -76,17 +78,26 @@ static inline void bitmap_set(int i, bool v)
void *alloc_phys_page(void)
{
- return alloc_phys_pages(1);
+ return alloc_phys_pages_exact(1);
}
-void *alloc_phys_pages(int pages)
+void *alloc_phys_pages_exact(size_t pages)
{
if (pages < 1)
return NULL;
- int n_contiguous = 0;
- int free_region_start = 0;
- for (uint64_t i = 0; i < page_count; i++) {
+ if (bitmap == NULL || page_start == NULL) {
+ // temporary bump allocator
+ void *addr = (void *)memory_start;
+ assert(pages == 1,
+ "caller expects more pages, but is only getting one");
+ memory_start += PAGE_SIZE;
+ return addr;
+ }
+
+ size_t n_contiguous = 0;
+ size_t free_region_start = 0;
+ for (size_t i = 0; i < page_count; i++) {
bool free = !bitmap_get(i);
if (free) {
@@ -94,7 +105,7 @@ void *alloc_phys_pages(int pages)
free_region_start = i;
n_contiguous++;
if (n_contiguous == pages) {
- for (int j = 0; j < pages; j++)
+ for (size_t j = 0; j < pages; j++)
bitmap_set(free_region_start + j, true);
return page_at(free_region_start);
}
@@ -105,12 +116,54 @@ void *alloc_phys_pages(int pages)
return NULL;
}
+struct phys_page_slice alloc_phys_page_withextra(size_t max_pages)
+{
+ if (max_pages == 0)
+ return PHYS_PAGE_SLICE_NULL;
+
+ for (size_t i = 0; i < page_count; i++) {
+ const bool free = !bitmap_get(i);
+ if (!free)
+ continue;
+
+ // now allocated
+ bitmap_set(i, true);
+
+ // found at least one page, guaranteed to return valid slice at this
+ // point
+ struct phys_page_slice out = {
+ .pagestart = page_at(i),
+ .num_pages = 1,
+ };
+
+ // add some extra pages if possible
+ for (; out.num_pages < MIN(page_count - i, max_pages);
+ ++out.num_pages) {
+ // early return if max_pages isn't available
+ if (bitmap_get(i + out.num_pages)) {
+ return out;
+ }
+ bitmap_set(i + out.num_pages, true);
+ }
+
+ return out;
+ }
+
+ // only reachable if there is not a single free page in the bitmap
+ return PHYS_PAGE_SLICE_NULL;
+}
+
void free_phys_page(void *ptr)
{
free_phys_pages(ptr, 1);
}
-void free_phys_pages(void *ptr, int pages)
+void free_phys_pages_slice(struct phys_page_slice slice)
+{
+ free_phys_pages(slice.pagestart, slice.num_pages);
+}
+
+void free_phys_pages(void *ptr, size_t pages)
{
if (ptr == NULL)
return;
@@ -119,8 +172,8 @@ void free_phys_pages(void *ptr, int pages)
if (idx == -1)
return;
- for (int i = 0; i < pages; i++)
- bitmap_set(idx + pages, false);
+ for (size_t i = 0; i < pages; i++)
+ bitmap_set(idx + i, false);
}
static bool segment_invalid(const struct memory_segment *segment)
@@ -129,11 +182,11 @@ static bool segment_invalid(const struct memory_segment *segment)
return true;
if (segment->type != SEG_TYPE_FREE)
return true;
- if (segment->addr < kaddr(kernel_start))
+ if (segment->addr < (uintptr_t)kernel_start)
return true;
if (segment->addr + segment->len < memory_start)
return true;
- if (segment->addr + segment->len < kaddr(kernel_start))
+ if (segment->addr + segment->len < (uintptr_t)kernel_start)
return true;
return false;
}
@@ -147,7 +200,7 @@ static struct memory_segment clamp_segment(const struct memory_segment *segment)
if (memory_start)
start = memory_start;
else
- start = kaddr(kernel_end);
+ start = (uintptr_t)kernel_end;
if (segment->addr < start) {
addr = start;
@@ -192,7 +245,7 @@ void physalloc_init(struct memory_map *map)
long bitmap_pages = (page_count / 64 / PAGE_SIZE) + 1;
long bitmap_size = bitmap_pages * PAGE_SIZE;
- bitmap = (uint64_t *)page_align(kaddr(kernel_end));
+ bitmap = (uint64_t *)page_align((uintptr_t)kernel_end);
long page_area_size = segment_count * sizeof(struct memory_segment);
char *page_area_addr = (char *)bitmap + bitmap_size;
diff --git a/kernel/memory/physalloc.h b/kernel/memory/physalloc.h
index 7afe998..e279409 100644
--- a/kernel/memory/physalloc.h
+++ b/kernel/memory/physalloc.h
@@ -11,11 +11,31 @@
#include <comus/memory.h>
+/// Represents some contiguous physical pages
+struct phys_page_slice {
+ void *pagestart;
+ size_t num_pages;
+};
+
+#define PHYS_PAGE_SLICE_NULL \
+ ((struct phys_page_slice){ .pagestart = NULL, .num_pages = 0 })
+
/**
* Initalize the physical page allocator
*/
void physalloc_init(struct memory_map *map);
+/*
+ * Allocates the first page(s) it finds. Returns a pointer to that page
+ * and, if there are (up to max_pages) extra pages free after it, it allocates
+ * them as well.
+ *
+ * @param max_pages - the maximum number of pages to mark as allocated
+ * @returns a slice of all of the allocated pages, num_pages will be
+ * <= max_pages
+ */
+struct phys_page_slice alloc_phys_page_withextra(size_t max_pages);
+
/**
* Allocates a single physical page in memory
* @preturns the physical address of the page
@@ -23,10 +43,11 @@ void physalloc_init(struct memory_map *map);
void *alloc_phys_page(void);
/**
- * Allocates count physical pages in memory
- * @returns the physical address of the first page
+ * Allocates count contiguous physical pages in memory
+ * @returns the physical address of the first page, or NULL if no
+ * contiguous pages exist.
*/
-void *alloc_phys_pages(int count);
+void *alloc_phys_pages_exact(size_t count);
/**
* Frees a single physical page in memory
@@ -39,6 +60,12 @@ void free_phys_page(void *ptr);
* @param ptr - the physical address of the first page
* @param count - the number of pages in the list
*/
-void free_phys_pages(void *ptr, int count);
+void free_phys_pages(void *ptr, size_t count);
+
+/**
+ * Frees a slice of physical pages in memory
+ * @param slice - the pages to free
+ */
+void free_phys_pages_slice(struct phys_page_slice slice);
#endif /* physalloc.h */
diff --git a/kernel/memory/virtalloc.c b/kernel/memory/virtalloc.c
index 0f4de93..0cbba33 100644
--- a/kernel/memory/virtalloc.c
+++ b/kernel/memory/virtalloc.c
@@ -1,5 +1,7 @@
+#include "lib/kio.h"
#include <lib.h>
#include <comus/memory.h>
+#include <stdint.h>
#include "virtalloc.h"
@@ -79,7 +81,7 @@ static void free_node(struct virt_ctx *ctx, struct virt_addr_node *node)
void virtaddr_init(struct virt_ctx *ctx)
{
struct virt_addr_node init = {
- .start = 0x40005000, // map after paging pt
+ .start = 0x50000000,
.end = 0x1000000000000, // 48bit memory address max
.next = NULL,
.prev = NULL,
@@ -96,6 +98,44 @@ void virtaddr_init(struct virt_ctx *ctx)
ctx->is_allocating = false;
}
+int virtaddr_clone(struct virt_ctx *old, struct virt_ctx *new)
+{
+ // copy over data
+ memcpy(new, old, sizeof(struct virt_ctx));
+
+ // allocate new space
+ new->alloc_nodes =
+ kalloc(sizeof(struct virt_addr_node) * new->alloc_node_count);
+ if (new->alloc_nodes == NULL)
+ return 1;
+
+ // update prev/next in new allocation space
+ update_node_ptrs(old->alloc_nodes, new->alloc_nodes, old->alloc_node_count,
+ new->alloc_node_count);
+
+ // update bootstrap nodes
+ for (size_t i = 0; i < new->used_node_count; i++) {
+ struct virt_addr_node *prev, *next;
+
+ if (i >= BOOTSTRAP_VIRT_ALLOC_NODES)
+ break;
+
+ // get prev
+ prev = i > 0 ? &new->bootstrap_nodes[i - 1] : NULL;
+ next = i < BOOTSTRAP_VIRT_ALLOC_NODES - 1 ?
+ &new->bootstrap_nodes[i + 1] :
+ NULL;
+
+ new->bootstrap_nodes[i].prev = prev;
+ new->bootstrap_nodes[i].next = next;
+ }
+
+ // get starting node
+ new->start_node = &new->bootstrap_nodes[0]; // for now
+
+ return 0;
+}
+
static void merge_back(struct virt_ctx *ctx, struct virt_addr_node *node)
{
while (node->prev) {
@@ -139,33 +179,69 @@ void *virtaddr_alloc(struct virt_ctx *ctx, int n_pages)
if (node->is_alloc)
continue;
- if (length >= n_length) {
- struct virt_addr_node *new = get_node(ctx);
- if (node->prev != NULL) {
- node->prev->next = new;
- } else {
- ctx->start_node = new;
- }
- new->next = node;
- new->prev = node->prev;
- node->prev = new;
- new->start = node->start;
- new->end = new->start + n_length;
- node->start = new->end;
- new->is_alloc = true;
- new->is_used = true;
- new->next = node;
- void *mem = (void *)new->start;
- merge_back(ctx, new);
- merge_forward(ctx, new);
- return mem;
- }
+ if (length < n_length)
+ continue;
+
+ return (void *)node->start;
}
return NULL;
}
-long virtaddr_free(struct virt_ctx *ctx, void *virtaddr)
+int virtaddr_take(struct virt_ctx *ctx, const void *virt, int n_pages)
+{
+ if (n_pages < 1)
+ return 0;
+
+ long n_length = n_pages * PAGE_SIZE;
+ struct virt_addr_node *node = ctx->start_node;
+
+ for (; node != NULL; node = node->next) {
+ if (node->is_alloc)
+ continue;
+
+ if (node->start > (uintptr_t)virt ||
+ node->end < (uintptr_t)virt + n_length)
+ continue;
+
+ // create new node on left
+ if (node->start < (uintptr_t)virt) {
+ struct virt_addr_node *left = get_node(ctx);
+ left->next = node;
+ left->prev = node->prev;
+ left->start = node->start;
+ left->end = (uintptr_t)virt;
+ left->is_used = true;
+ left->is_alloc = false;
+ node->prev->next = left;
+ node->prev = left;
+ }
+
+ // create new node on right
+ if (node->end > (uintptr_t)virt + n_length) {
+ struct virt_addr_node *right = get_node(ctx);
+ right->prev = node;
+ right->next = node->next;
+ right->start = (uintptr_t)virt + n_length;
+ right->end = node->end;
+ right->is_used = true;
+ right->is_alloc = false;
+ node->next->prev = right;
+ node->next = right;
+ }
+
+ node->start = (uintptr_t)virt;
+ node->end = node->start + n_length;
+ node->is_alloc = true;
+ node->is_used = true;
+
+ return 0;
+ }
+
+ return 1;
+}
+
+long virtaddr_free(struct virt_ctx *ctx, const void *virtaddr)
{
if (virtaddr == NULL)
return -1;
diff --git a/kernel/memory/virtalloc.h b/kernel/memory/virtalloc.h
index 7bf8b91..5033242 100644
--- a/kernel/memory/virtalloc.h
+++ b/kernel/memory/virtalloc.h
@@ -53,6 +53,11 @@ struct virt_ctx {
void virtaddr_init(struct virt_ctx *ctx);
/**
+ * Clone the virtual address allocator
+ */
+int virtaddr_clone(struct virt_ctx *old, struct virt_ctx *new);
+
+/**
* Allocate a virtual address of length x pages
* @param pages - x pages
* @returns virt addr
@@ -60,11 +65,19 @@ void virtaddr_init(struct virt_ctx *ctx);
void *virtaddr_alloc(struct virt_ctx *ctx, int pages);
/**
+ * Take (yoink) a predefined virtual address of length x pages
+ * @param virt - the start of the vitural address to take
+ * @param pages - x pages
+ * @returns 0 on success, 1 on err
+ */
+int virtaddr_take(struct virt_ctx *ctx, const void *virt, int pages);
+
+/**
* Free the virtual address from virtaddr_alloc
* @param virtaddr - the addr to free
* @returns number of pages used for virtaddr
*/
-long virtaddr_free(struct virt_ctx *ctx, void *virtaddr);
+long virtaddr_free(struct virt_ctx *ctx, const void *virtaddr);
/**
* Cleans up heap allocations and frees the virtalloc context