summaryrefslogtreecommitdiff
path: root/kernel/memory
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/memory')
-rw-r--r--kernel/memory/memory.c15
-rw-r--r--kernel/memory/paging.c591
-rw-r--r--kernel/memory/paging.h24
-rw-r--r--kernel/memory/physalloc.c242
-rw-r--r--kernel/memory/physalloc.h44
-rw-r--r--kernel/memory/virtalloc.c200
-rw-r--r--kernel/memory/virtalloc.h31
7 files changed, 1147 insertions, 0 deletions
diff --git a/kernel/memory/memory.c b/kernel/memory/memory.c
new file mode 100644
index 0000000..1334051
--- /dev/null
+++ b/kernel/memory/memory.c
@@ -0,0 +1,15 @@
+#include <comus/memory.h>
+#include <comus/asm.h>
+
+#include "paging.h"
+#include "virtalloc.h"
+#include "physalloc.h"
+
+void memory_init(struct memory_map *map)
+{
+ cli();
+ paging_init();
+ virtaddr_init();
+ physalloc_init(map);
+ sti();
+}
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c
new file mode 100644
index 0000000..045afc7
--- /dev/null
+++ b/kernel/memory/paging.c
@@ -0,0 +1,591 @@
+#include <lib.h>
+#include <comus/memory.h>
+
+#include "virtalloc.h"
+#include "physalloc.h"
+#include "paging.h"
+
+// PAGE MAP LEVEL 4 ENTRY
+struct pml4e {
+ uint64_t flags : 6;
+ uint64_t : 6;
+ uint64_t address : 40;
+ uint64_t : 11;
+ uint64_t execute_disable : 1;
+};
+
+// PAGE DIRECTORY POINTER TABLE ENTRY
+struct pdpte {
+ uint64_t flags : 6;
+ uint64_t : 1;
+ uint64_t page_size : 1;
+ uint64_t : 4;
+ uint64_t address : 40;
+ uint64_t : 11;
+ uint64_t execute_disable : 1;
+};
+
+// PAGE DIRECTORY ENTRY
+struct pde {
+ uint64_t flags : 6;
+ uint64_t : 1;
+ uint64_t page_size : 1;
+ uint64_t : 4;
+ uint64_t address : 40;
+ uint64_t : 11;
+ uint64_t execute_disable : 1;
+};
+
+// PAGE TABLE ENTRY
+struct pte {
+ uint64_t flags : 9;
+ uint64_t loaded : 1;
+ uint64_t : 2;
+ uint64_t address : 40;
+ uint64_t : 7;
+ uint64_t protection_key : 4;
+ uint64_t execute_disable : 1;
+};
+
+// bss segment, can write to
+extern volatile struct pml4e kernel_pml4[512];
+extern volatile struct pdpte kernel_pdpt_0[512];
+extern volatile struct pde kernel_pd_0[512];
+extern volatile struct pte bootstrap_pt[512];
+extern volatile struct pte
+ paging_pt[512]; // paging_pt should NEVER be outside of this file, NEVER i say
+
+// paged address to read page tables
+// the structures are not gurenteed to be ident mapped
+// map them here with map_<type>(phys_addr) before useing structures
+void volatile *addr_mapped = (void *)(uintptr_t)0x204000;
+static volatile struct pml4e *pml4_mapped = (void *)(uintptr_t)0x200000;
+static volatile struct pdpte *pdpt_mapped = (void *)(uintptr_t)0x201000;
+static volatile struct pde *pd_mapped = (void *)(uintptr_t)0x202000;
+static volatile struct pte *pt_mapped = (void *)(uintptr_t)0x203000;
+
+static inline void invlpg(volatile void *addr)
+{
+ __asm__ volatile("invlpg (%0)" ::"r"(addr) : "memory");
+}
+
+static void load_addr(volatile void *phys_addr)
+{
+ static volatile struct pte *pt = &paging_pt[4];
+ pt->address = (uint64_t)phys_addr >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(addr_mapped);
+}
+
+static void load_pml4(volatile void *phys)
+{
+ static volatile struct pte *pt = &paging_pt[0];
+ if ((uint64_t)phys >> 12 == pt->address)
+ return;
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pml4_mapped);
+}
+
+static void load_pdpt(volatile void *phys)
+{
+ static volatile struct pte *pt = &paging_pt[1];
+ if ((uint64_t)phys >> 12 == pt->address)
+ return;
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pdpt_mapped);
+}
+
+static void load_pd(volatile void *phys)
+{
+ static volatile struct pte *pt = &paging_pt[2];
+ if ((uint64_t)phys >> 12 == pt->address)
+ return;
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pd_mapped);
+}
+
+static void load_pt(volatile void *phys)
+{
+ static volatile struct pte *pt = &paging_pt[3];
+ if ((uint64_t)phys >> 12 == pt->address)
+ return;
+ pt->address = (uint64_t)phys >> 12;
+ pt->flags = F_PRESENT | F_WRITEABLE;
+ invlpg(pt_mapped);
+}
+
+#define PAG_SUCCESS 0
+#define PAG_CANNOT_ALLOC 1
+#define PAG_NOT_PRESENT 2
+
+static int select_pdpt(void *virt, unsigned int flags,
+ volatile struct pml4e *root, volatile struct pdpte **res,
+ bool create)
+{
+ load_pml4(root);
+ uint64_t offset = (uint64_t)virt >> 39;
+ volatile struct pml4e *pml4e = &pml4_mapped[offset];
+ if (!(pml4e->flags & F_PRESENT)) {
+ if (!create) {
+ return PAG_NOT_PRESENT;
+ }
+ void *new_page = alloc_phys_page();
+ if (new_page == NULL) {
+ return PAG_CANNOT_ALLOC;
+ }
+ load_addr(new_page);
+ memsetv(addr_mapped, 0, PAGE_SIZE);
+ pml4e->address = ((uint64_t)new_page) >> 12;
+ pml4e->flags = F_PRESENT;
+ }
+ if (flags)
+ pml4e->flags = F_PRESENT | flags;
+ *res = (volatile struct pdpte *)(uintptr_t)(pml4e->address << 12);
+ return PAG_SUCCESS;
+}
+
+static int select_pd(void *virt, unsigned int flags,
+ volatile struct pdpte *pdpt, volatile struct pde **res,
+ bool create)
+{
+ load_pdpt(pdpt);
+ uint64_t offset = ((uint64_t)virt >> 30) & 0x1ff;
+ volatile struct pdpte *pdpte = &pdpt_mapped[offset];
+ if (!(pdpte->flags & F_PRESENT)) {
+ if (!create) {
+ return PAG_NOT_PRESENT;
+ }
+ void *new_page = alloc_phys_page();
+ if (new_page == NULL) {
+ return PAG_CANNOT_ALLOC;
+ }
+ load_addr(new_page);
+ memsetv(addr_mapped, 0, PAGE_SIZE);
+ pdpte->address = ((uint64_t)new_page) >> 12;
+ pdpte->flags = F_PRESENT;
+ }
+ if (flags)
+ pdpte->flags = F_PRESENT | flags;
+ *res = (volatile struct pde *)(uintptr_t)(pdpte->address << 12);
+ return PAG_SUCCESS;
+}
+
+static int select_pt(void *virt, unsigned int flags, volatile struct pde *pd,
+ volatile struct pte **res, bool create)
+{
+ load_pd(pd);
+ uint64_t offset = ((uint64_t)virt >> 21) & 0x1ff;
+ volatile struct pde *pde = &pd_mapped[offset];
+ if (!(pde->flags & F_PRESENT)) {
+ if (!create) {
+ return PAG_NOT_PRESENT;
+ }
+ void *new_page = alloc_phys_page();
+ if (new_page == NULL) {
+ return PAG_CANNOT_ALLOC;
+ }
+ load_addr(new_page);
+ memsetv(addr_mapped, 0, PAGE_SIZE);
+ pde->address = ((uint64_t)new_page) >> 12;
+ pde->flags = F_PRESENT;
+ }
+ if (flags)
+ pde->flags = F_PRESENT | flags;
+ *res = (volatile struct pte *)(uintptr_t)(pde->address << 12);
+ return PAG_SUCCESS;
+}
+
+static void select_page(void *virt, volatile struct pte *pt,
+ volatile struct pte **res)
+{
+ load_pt(pt);
+ uint64_t offset = ((uint64_t)virt >> 12) & 0x1ff;
+ volatile struct pte *page = &pt_mapped[offset];
+ *res = page;
+ return;
+}
+
+static inline void try_unmap_pml4(void)
+{
+ for (int i = 0; i < 512; i++) {
+ if (pml4_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pml4_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pml4_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+}
+
+static inline void try_unmap_pdpt(void)
+{
+ for (int i = 0; i < 512; i++) {
+ if (pdpt_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pdpt_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pdpt_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+ try_unmap_pml4();
+}
+
+static inline void try_unmap_pd(void)
+{
+ for (int i = 0; i < 512; i++) {
+ if (pd_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pd_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pd_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+ try_unmap_pdpt();
+}
+
+static inline void try_unmap_pt(void)
+{
+ for (int i = 0; i < 512; i++) {
+ if (pt_mapped[i].flags & F_PRESENT)
+ return;
+ }
+ for (int i = 0; i < 512; i++) {
+ if (pt_mapped[i].address) {
+ void *addr = (void *)(uintptr_t)(pt_mapped[i].address << 12);
+ free_phys_page(addr);
+ }
+ }
+ try_unmap_pd();
+}
+
+static void unmap_page(volatile struct pml4e *root, void *virt)
+{
+ volatile struct pdpte *pdpt;
+ volatile struct pde *pd;
+ volatile struct pte *pt;
+ volatile struct pte *page;
+
+ unsigned int df = 0;
+
+ if (select_pdpt(virt, df, root, &pdpt, false))
+ return;
+
+ if (select_pd(virt, df, pdpt, &pd, false))
+ return;
+
+ if (select_pt(virt, df, pd, &pt, false))
+ return;
+
+ select_page(virt, pt, &page);
+
+ page->address = 0;
+ page->flags = 0;
+ page->loaded = 0;
+
+ try_unmap_pt();
+
+ invlpg(virt);
+
+ return;
+}
+
+static void unmap_pages(volatile struct pml4e *root, void *virt_start,
+ long page_count)
+{
+ uint64_t pml4_o = -1, pdpt_o = -1, pd_o = -1;
+
+ uint64_t pml4_n, pdpt_n, pd_n;
+
+ volatile struct pdpte *pdpt = NULL;
+ volatile struct pde *pd = NULL;
+ volatile struct pte *pt = NULL;
+ volatile struct pte *page = NULL;
+
+ unsigned int df = 0;
+
+ void *virt;
+
+ for (long i = 0; i < page_count; i++) {
+ virt = (char *)virt_start + (i * PAGE_SIZE);
+
+ pml4_n = (uint64_t)virt >> 39;
+ pdpt_n = ((uint64_t)virt >> 30) & 0x1ff;
+ pd_n = ((uint64_t)virt >> 21) & 0x1ff;
+
+ if (pdpt == NULL || pml4_o != pml4_n) {
+ if (select_pdpt(virt, df, root, &pdpt, false))
+ continue;
+ pml4_o = pml4_n;
+ }
+
+ if (pd == NULL || pdpt_o != pdpt_n) {
+ if (select_pd(virt, df, pdpt, &pd, false))
+ continue;
+ pdpt_o = pdpt_n;
+ }
+
+ if (pt == NULL || pd_o != pd_n) {
+ if (pt) {
+ try_unmap_pt();
+ }
+ if (select_pt(virt, df, pd, &pt, false))
+ continue;
+ pd_o = pd_n;
+ }
+
+ select_page(virt, pt, &page);
+
+ page->address = 0;
+ page->flags = 0;
+ page->loaded = 0;
+ }
+
+ if (pt != NULL)
+ try_unmap_pt();
+
+ return;
+}
+
+static volatile struct pte *get_page(volatile struct pml4e *root, void *virt)
+{
+ volatile struct pdpte *pdpt;
+ volatile struct pde *pd;
+ volatile struct pte *pt;
+ volatile struct pte *page;
+
+ unsigned int df = 0;
+
+ if (select_pdpt(virt, df, root, &pdpt, false))
+ return NULL;
+
+ if (select_pd(virt, df, pdpt, &pd, false))
+ return NULL;
+
+ if (select_pt(virt, df, pd, &pt, false))
+ return NULL;
+
+ select_page(virt, pt, &page);
+
+ return page;
+}
+
+static int map_page(volatile struct pml4e *root, void *virt, void *phys,
+ unsigned int flags)
+{
+ volatile struct pdpte *pdpt;
+ volatile struct pde *pd;
+ volatile struct pte *pt;
+ volatile struct pte *page;
+
+ unsigned int df = F_WRITEABLE;
+
+ if (phys == NULL)
+ df = 0; // alloc page on fault
+
+ if (select_pdpt(virt, df, root, &pdpt, true))
+ return 1;
+
+ if (select_pd(virt, df, pdpt, &pd, true))
+ return 1;
+
+ if (select_pt(virt, df, pd, &pt, true))
+ return 1;
+
+ select_page(virt, pt, &page);
+
+ if (phys) {
+ page->flags = F_PRESENT | flags;
+ page->address = (uint64_t)phys >> 12;
+ page->loaded = 1;
+ invlpg(virt);
+ } else {
+ page->flags = flags;
+ page->address = 0;
+ page->loaded = 0;
+ }
+
+ return 0;
+}
+
+static int map_pages(volatile struct pml4e *root, void *virt_start,
+ void *phys_start, unsigned int flags, long page_count)
+{
+ uint64_t pml4_o = -1, pdpt_o = -1, pd_o = -1;
+
+ uint64_t pml4_n, pdpt_n, pd_n;
+
+ volatile struct pdpte *pdpt = NULL;
+ volatile struct pde *pd = NULL;
+ volatile struct pte *pt = NULL;
+ volatile struct pte *page = NULL;
+
+ void *virt, *phys;
+
+ unsigned int df = F_WRITEABLE;
+
+ if (phys_start == NULL)
+ df = 0; // alloc page on fault
+
+ long i;
+ for (i = 0; i < page_count; i++) {
+ virt = (char *)virt_start + (i * PAGE_SIZE);
+ phys = (char *)phys_start + (i * PAGE_SIZE);
+
+ pml4_n = (uint64_t)virt >> 39;
+ pdpt_n = ((uint64_t)virt >> 30) & 0x1ff;
+ pd_n = ((uint64_t)virt >> 21) & 0x1ff;
+
+ if (pdpt == NULL || pml4_o != pml4_n) {
+ if (select_pdpt(virt, df, root, &pdpt, true))
+ goto failed;
+ pml4_o = pml4_n;
+ }
+
+ if (pd == NULL || pdpt_o != pdpt_n) {
+ if (select_pd(virt, df, pdpt, &pd, true))
+ goto failed;
+ pdpt_o = pdpt_n;
+ }
+
+ if (pt == NULL || pd_o != pd_n) {
+ if (select_pt(virt, df, pd, &pt, true))
+ goto failed;
+ pd_o = pd_n;
+ }
+
+ select_page(virt, pt, &page);
+
+ if (phys_start) {
+ page->flags = F_PRESENT | flags;
+ page->address = (uint64_t)phys >> 12;
+ page->loaded = 1;
+ } else {
+ page->flags = flags;
+ page->address = 0;
+ page->loaded = 0;
+ }
+
+ if (flags & F_GLOBAL)
+ invlpg(virt);
+ }
+
+ __asm__ volatile("mov %cr3, %rax; mov %rax, %cr3;");
+
+ return 0;
+
+failed:
+
+ unmap_pages(root, virt, i);
+
+ return 1;
+}
+
+void paging_init(void)
+{
+ kernel_pml4[0].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pml4[0].address = (uint64_t)(&kernel_pdpt_0) >> 12;
+
+ kernel_pdpt_0[0].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pdpt_0[0].address = (uint64_t)(&kernel_pd_0) >> 12;
+
+ kernel_pd_0[1].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pd_0[1].address = (uint64_t)(&paging_pt) >> 12;
+ kernel_pd_0[2].flags = F_PRESENT | F_WRITEABLE;
+ kernel_pd_0[2].address = (uint64_t)(&bootstrap_pt) >> 12;
+
+ memsetv(&paging_pt, 0, 4096);
+ memsetv(&bootstrap_pt, 0, 4096);
+}
+
+static inline void *page_align(void *addr)
+{
+ uintptr_t a = (uintptr_t)addr;
+ a /= PAGE_SIZE;
+ a *= PAGE_SIZE;
+ return (void *)a;
+}
+
+void *mapaddr(void *addr, size_t len)
+{
+ void *phys = page_align(addr);
+ ptrdiff_t error = (char *)addr - (char *)phys;
+ len += error;
+ long pages = len / PAGE_SIZE + 1;
+ void *virt = virtaddr_alloc(pages);
+ if (virt == NULL) {
+ return NULL;
+ }
+ if (map_pages(kernel_pml4, virt, phys, F_WRITEABLE, pages)) {
+ virtaddr_free(virt);
+ return NULL;
+ }
+ return (char *)virt + error;
+}
+
+void unmapaddr(void *addr)
+{
+ long pages = virtaddr_free(addr);
+ if (pages < 1)
+ return;
+ unmap_pages(kernel_pml4, addr, pages);
+}
+
+void *alloc_pages(size_t count)
+{
+ void *virt = virtaddr_alloc(count);
+ if (virt == NULL)
+ return NULL;
+ //void *phys = alloc_phys_pages(count);
+ //if (phys == NULL) {
+ // virtaddr_free(virt);
+ // return NULL;
+ //}
+ if (map_pages(kernel_pml4, virt,
+ //phys,
+ //F_WRITEABLE,
+ NULL, F_WRITEABLE, count)) {
+ virtaddr_free(virt);
+ return NULL;
+ }
+ return virt;
+}
+
+void free_page(void *virt)
+{
+ (void)virt;
+ panic("free_page is not yet implemented");
+}
+
+void free_pages(void *virt)
+{
+ long pages = virtaddr_free(virt);
+ if (pages < 1)
+ return;
+ unmap_pages(kernel_pml4, virt, pages);
+}
+
+int kload_page(void *virt_addr)
+{
+ volatile struct pte *page = get_page(kernel_pml4, virt_addr);
+ if (page == NULL)
+ return -1;
+ if (page->loaded)
+ return -1;
+ void *phys = alloc_phys_page();
+ if (phys == NULL)
+ return -2;
+ page->loaded = 1;
+ page->address = (uint64_t)phys >> 12;
+ page->flags |= F_PRESENT;
+ invlpg(virt_addr);
+ return 0;
+}
diff --git a/kernel/memory/paging.h b/kernel/memory/paging.h
new file mode 100644
index 0000000..be6fd06
--- /dev/null
+++ b/kernel/memory/paging.h
@@ -0,0 +1,24 @@
+/**
+ * @file paging.h
+ *
+ * @author Freya Murphy <freya@freyacat.org>
+ *
+ * 64-bit paging functions
+ */
+
+#ifndef PAGING_H_
+#define PAGING_H_
+
+#define F_PRESENT 0x001
+#define F_WRITEABLE 0x002
+#define F_UNPRIVILEGED 0x004
+#define F_WRITETHROUGH 0x008
+#define F_CACHEDISABLE 0x010
+#define F_ACCESSED 0x020
+#define F_DIRTY 0x040
+#define F_MEGABYTE 0x080
+#define F_GLOBAL 0x100
+
+void paging_init(void);
+
+#endif /* paging.h */
diff --git a/kernel/memory/physalloc.c b/kernel/memory/physalloc.c
new file mode 100644
index 0000000..de0e4a7
--- /dev/null
+++ b/kernel/memory/physalloc.c
@@ -0,0 +1,242 @@
+#include <lib.h>
+#include <comus/memory.h>
+#include <comus/asm.h>
+
+#include "physalloc.h"
+
+extern char kernel_start;
+extern char kernel_end;
+#define kaddr(addr) ((uintptr_t)(&addr))
+
+// between memory_start and kernel_start will be the bitmap
+static uintptr_t memory_start = 0;
+
+struct memory_area {
+ uint64_t len;
+ uintptr_t addr;
+};
+
+static uint64_t *bitmap;
+static uint64_t total_memory;
+static uint64_t free_memory;
+static uint64_t page_count;
+static uint64_t segment_count;
+struct memory_area *page_start;
+
+static int n_pages(const struct memory_area *m)
+{
+ return m->len / PAGE_SIZE;
+}
+
+static void *page_at(int i)
+{
+ int cur_page = 0;
+ for (uint64_t idx = 0; idx < segment_count; idx++) {
+ const struct memory_area *m = page_start;
+ int pages = n_pages(m);
+ if (i - cur_page < pages) {
+ return (void *)(m->addr + (PAGE_SIZE * (i - cur_page)));
+ }
+ cur_page += pages;
+ }
+ return NULL;
+}
+
+static long page_idx(void *page)
+{
+ uintptr_t addr = (uintptr_t)page;
+ int cur_page = 0;
+ for (uint64_t idx = 0; idx < segment_count; idx++) {
+ const struct memory_area *m = page_start;
+ if ((uintptr_t)m + m->len > addr) {
+ return cur_page + ((addr - m->addr) / PAGE_SIZE);
+ }
+ cur_page += n_pages(m);
+ }
+ return -1;
+}
+
+static inline bool bitmap_get(int i)
+{
+ return (bitmap[i / 64] >> i % 64) & 1;
+}
+
+static inline void bitmap_set(int i, bool v)
+{
+ if (v)
+ free_memory -= PAGE_SIZE;
+ else
+ free_memory += PAGE_SIZE;
+ int idx = i / 64;
+ bitmap[idx] &= ~(1 << i % 64);
+ bitmap[idx] |= (v << i % 64);
+}
+
+void *alloc_phys_page(void)
+{
+ return alloc_phys_pages(1);
+}
+
+void *alloc_phys_pages(int pages)
+{
+ if (pages < 1)
+ return NULL;
+
+ int n_contiguous = 0;
+ int free_region_start = 0;
+ for (uint64_t i = 0; i < page_count; i++) {
+ bool free = !bitmap_get(i);
+
+ if (free) {
+ if (n_contiguous == 0)
+ free_region_start = i;
+ n_contiguous++;
+ if (n_contiguous == pages) {
+ for (int j = 0; j < pages; j++)
+ bitmap_set(free_region_start + j, true);
+ return page_at(free_region_start);
+ }
+ } else
+ n_contiguous = 0;
+ }
+
+ return NULL;
+}
+
+void free_phys_page(void *ptr)
+{
+ free_phys_pages(ptr, 1);
+}
+
+void free_phys_pages(void *ptr, int pages)
+{
+ long idx = page_idx(ptr);
+ if (idx == -1)
+ return;
+
+ for (int i = 0; i < pages; i++)
+ bitmap_set(idx + pages, false);
+}
+
+static bool segment_invalid(const struct memory_segment *segment)
+{
+ if (segment->addr < kaddr(kernel_start))
+ return true;
+ if (segment->addr + segment->len < memory_start)
+ return true;
+ if (segment->addr + segment->len < kaddr(kernel_start))
+ return true;
+ return false;
+}
+
+static struct memory_area segment_to_area(const struct memory_segment *segment)
+{
+ uint64_t length = segment->len;
+ uintptr_t addr = segment->addr;
+
+ uintptr_t start;
+ if (memory_start)
+ start = memory_start;
+ else
+ start = kaddr(kernel_end);
+
+ if (segment->addr < start) {
+ addr = start;
+ length -= addr - segment->addr;
+ } else {
+ addr = segment->addr;
+ }
+
+ struct memory_area temp;
+ temp.len = length;
+ temp.addr = addr;
+
+ return temp;
+}
+
+static uintptr_t page_align(uintptr_t ptr)
+{
+ return (ptr + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
+}
+
+void physalloc_init(struct memory_map *map)
+{
+ bitmap = NULL;
+ total_memory = 0;
+ free_memory = 0;
+ page_count = 0;
+ page_start = NULL;
+
+ segment_count = 0;
+
+ for (uint32_t i = 0; i < map->entry_count; i++) {
+ struct memory_segment *segment = &map->entries[i];
+
+ if (segment_invalid(segment))
+ continue;
+
+ struct memory_area temp = segment_to_area(segment);
+ page_count += n_pages(&temp);
+ segment_count++;
+ }
+
+ long bitmap_pages = (page_count / 64 / PAGE_SIZE) + 1;
+ long bitmap_size = bitmap_pages * PAGE_SIZE;
+ bitmap = (uint64_t *)page_align(kaddr(kernel_end));
+
+ long page_area_size = segment_count * sizeof(struct memory_area);
+ char *page_area_addr = (char *)bitmap + bitmap_size;
+ page_area_addr = (char *)page_align((uintptr_t)page_area_addr);
+
+ memory_start = page_align((uintptr_t)page_area_addr + page_area_size);
+
+ bitmap = mapaddr(bitmap, bitmap_size);
+ memset(bitmap, 0, bitmap_size);
+ page_area_addr = mapaddr(page_area_addr, page_area_size);
+ memset(page_area_addr, 0, page_area_size);
+
+ page_start = (struct memory_area *)page_area_addr;
+
+ struct memory_area *area = page_start;
+
+ for (uint32_t i = 0; i < map->entry_count; i++) {
+ struct memory_segment *segment = &map->entries[i];
+
+ if (segment_invalid(segment))
+ continue;
+
+ struct memory_area temp = segment_to_area(segment);
+ *area = temp;
+ area++;
+ }
+
+ total_memory = page_count * PAGE_SIZE;
+ page_count -= bitmap_pages;
+ free_memory = page_count * PAGE_SIZE;
+
+ char buf[20];
+ printf("\nMEMORY USAGE\n");
+ printf("mem total: %s\n", btoa(memory_total(), buf));
+ printf("mem free: %s\n", btoa(memory_free(), buf));
+ printf("mem used: %s\n\n", btoa(memory_used(), buf));
+}
+
+void *alloc_page(void)
+{
+ return alloc_pages(1);
+}
+
+uint64_t memory_total(void)
+{
+ return total_memory;
+}
+
+uint64_t memory_free(void)
+{
+ return free_memory;
+}
+
+uint64_t memory_used(void)
+{
+ return total_memory - free_memory;
+}
diff --git a/kernel/memory/physalloc.h b/kernel/memory/physalloc.h
new file mode 100644
index 0000000..7afe998
--- /dev/null
+++ b/kernel/memory/physalloc.h
@@ -0,0 +1,44 @@
+/**
+ * @file physalloc.h
+ *
+ * @author Freya Murphy <freya@freyacat.org>
+ *
+ * Physical page allocator functions
+ */
+
+#ifndef PHYSALLOC_H_
+#define PHYSALLOC_H_
+
+#include <comus/memory.h>
+
+/**
+ * Initalize the physical page allocator
+ */
+void physalloc_init(struct memory_map *map);
+
+/**
+ * Allocates a single physical page in memory
+ * @preturns the physical address of the page
+ */
+void *alloc_phys_page(void);
+
+/**
+ * Allocates count physical pages in memory
+ * @returns the physical address of the first page
+ */
+void *alloc_phys_pages(int count);
+
+/**
+* Frees a single physical page in memory
+ * @param ptr - the physical address of the page
+ */
+void free_phys_page(void *ptr);
+
+/**
+ * Frees count physical pages in memory
+ * @param ptr - the physical address of the first page
+ * @param count - the number of pages in the list
+ */
+void free_phys_pages(void *ptr, int count);
+
+#endif /* physalloc.h */
diff --git a/kernel/memory/virtalloc.c b/kernel/memory/virtalloc.c
new file mode 100644
index 0000000..a9f25af
--- /dev/null
+++ b/kernel/memory/virtalloc.c
@@ -0,0 +1,200 @@
+#include <lib.h>
+#include <comus/memory.h>
+
+#include "virtalloc.h"
+
+struct addr_node {
+ uintptr_t start;
+ uintptr_t end;
+ struct addr_node *next;
+ struct addr_node *prev;
+ uint8_t is_alloc; // if node is storing allocated data
+ uint8_t is_used; // if node is in use by virtalloc
+};
+
+#define BSS_NODES 64
+static struct addr_node bootstrap_nodes[BSS_NODES];
+static struct addr_node *alloc_nodes = NULL;
+static size_t free_node_start = 0;
+static size_t alloc_node_count = 0;
+static size_t used_node_count = 0;
+static bool is_allocating = false;
+
+static struct addr_node *start_node = NULL;
+
+static struct addr_node *get_node_idx(int idx)
+{
+ if (idx < BSS_NODES) {
+ return &bootstrap_nodes[idx];
+ } else {
+ return &alloc_nodes[idx - BSS_NODES];
+ }
+}
+
+static void update_node_ptrs(struct addr_node *old, struct addr_node *new,
+ int old_len, int new_len)
+{
+ if (old == NULL)
+ return;
+ int idx = 0;
+ for (int i = 0; i < old_len; i++) {
+ struct addr_node *o = &old[i];
+ if (o && !o->is_used)
+ continue;
+ struct addr_node *n = &new[idx++];
+ *n = *o;
+ if (n->prev != NULL)
+ n->prev->next = n;
+ if (n->next != NULL)
+ n->next->prev = n;
+ }
+ for (int i = idx; i < new_len; i++) {
+ struct addr_node *n = &new[idx++];
+ n->is_used = false;
+ }
+}
+
+static struct addr_node *get_node(void)
+{
+ size_t count = BSS_NODES + alloc_node_count;
+
+ if (!is_allocating && used_node_count + 16 >= count) {
+ is_allocating = true;
+ int new_alloc = alloc_node_count * 2;
+ if (new_alloc < 8)
+ new_alloc = 8;
+ struct addr_node *new_nodes;
+ new_nodes = malloc(sizeof(struct addr_node) * new_alloc);
+ if (new_nodes == NULL)
+ panic("virt addr alloc nodes is null");
+ update_node_ptrs(alloc_nodes, new_nodes, alloc_node_count, new_alloc);
+ free(alloc_nodes);
+ alloc_nodes = new_nodes;
+ alloc_node_count = new_alloc;
+ is_allocating = false;
+ count = BSS_NODES + alloc_node_count;
+ }
+
+ size_t idx = free_node_start;
+ for (; idx < count; idx++) {
+ struct addr_node *node = get_node_idx(idx);
+ if (!node->is_used) {
+ used_node_count++;
+ return node;
+ }
+ }
+
+ panic("could not get virtaddr node");
+}
+
+static void free_node(struct addr_node *node)
+{
+ node->is_used = false;
+ used_node_count--;
+}
+
+void virtaddr_init(void)
+{
+ struct addr_node init = {
+ .start = 0x400000, // third page table
+ .end = 0x1000000000000, // 48bit memory address max
+ .next = NULL,
+ .prev = NULL,
+ .is_alloc = false,
+ .is_used = true,
+ };
+ memset(bootstrap_nodes, 0, sizeof(bootstrap_nodes));
+ bootstrap_nodes[0] = init;
+ start_node = &bootstrap_nodes[0];
+}
+
+static void merge_back(struct addr_node *node)
+{
+ while (node->prev) {
+ if (node->is_alloc != node->prev->is_alloc)
+ break;
+ struct addr_node *temp = node->prev;
+ node->start = temp->start;
+ node->prev = temp->prev;
+ if (temp->prev)
+ temp->prev->next = node;
+ free_node(temp);
+ }
+ if (node->prev == NULL) {
+ start_node = node;
+ }
+}
+
+static void merge_forward(struct addr_node *node)
+{
+ while (node->next) {
+ if (node->is_alloc != node->next->is_alloc)
+ break;
+ struct addr_node *temp = node->next;
+ node->end = temp->end;
+ node->next = temp->next;
+ if (temp->next)
+ temp->next->prev = node;
+ free_node(temp);
+ }
+}
+
+void *virtaddr_alloc(int n_pages)
+{
+ if (n_pages < 1)
+ return NULL;
+ long n_length = n_pages * PAGE_SIZE;
+ struct addr_node *node = start_node;
+
+ for (; node != NULL; node = node->next) {
+ long length = node->end - node->start;
+ if (node->is_alloc)
+ continue;
+
+ if (length >= n_length) {
+ struct addr_node *new = get_node();
+ if (node->prev != NULL) {
+ node->prev->next = new;
+ } else {
+ start_node = new;
+ }
+ new->next = node;
+ new->prev = node->prev;
+ node->prev = new;
+ new->start = node->start;
+ new->end = new->start + n_length;
+ node->start = new->end;
+ new->is_alloc = true;
+ new->is_used = true;
+ new->next = node;
+ void *mem = (void *)new->start;
+ merge_back(new);
+ merge_forward(new);
+ return mem;
+ }
+ }
+
+ return NULL;
+}
+
+long virtaddr_free(void *virtaddr)
+{
+ uintptr_t virt = (uintptr_t)virtaddr;
+
+ if (virt % PAGE_SIZE)
+ return -1; // not page aligned, we did not give this out!!!
+
+ struct addr_node *node = start_node;
+
+ for (; node != NULL; node = node->next) {
+ if (node->start == virt) {
+ int length = node->end - node->start;
+ int pages = length / PAGE_SIZE;
+ merge_back(node);
+ merge_forward(node);
+ return pages;
+ }
+ }
+
+ return -1;
+}
diff --git a/kernel/memory/virtalloc.h b/kernel/memory/virtalloc.h
new file mode 100644
index 0000000..a5ca840
--- /dev/null
+++ b/kernel/memory/virtalloc.h
@@ -0,0 +1,31 @@
+/**
+ * @file virtalloc.h
+ *
+ * @author Freya Murphy <freya@freyacat.org>
+ *
+ * Virtural address allocator functions
+ */
+
+#ifndef VIRTALLOC_H_
+#define VIRTALLOC_H_
+
+/**
+ * Initalizes the virtual address allocator
+ */
+void virtaddr_init(void);
+
+/**
+ * Allocate a virtual address of length x pages
+ * @param pages - x pages
+ * @returns virt addr
+ */
+void *virtaddr_alloc(int pages);
+
+/**
+ * Free the virtual address from virtaddr_alloc
+ * @param virtaddr - the addr to free
+ * @returns number of pages used for virtaddr
+ */
+long virtaddr_free(void *virtaddr);
+
+#endif /* virtalloc.h */