summaryrefslogtreecommitdiff
path: root/kernel/memory
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/memory')
-rw-r--r--kernel/memory/paging.c150
-rw-r--r--kernel/memory/physalloc.c45
-rw-r--r--kernel/memory/virtalloc.c56
-rw-r--r--kernel/memory/virtalloc.h2
4 files changed, 158 insertions, 95 deletions
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c
index 763bdce..fc3b256 100644
--- a/kernel/memory/paging.c
+++ b/kernel/memory/paging.c
@@ -5,7 +5,6 @@
#include "physalloc.h"
#include "paging.h"
#include "memory.h"
-#include <stdint.h>
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
@@ -258,8 +257,10 @@ static volatile struct pml4 *pml4_alloc(void)
volatile struct pml4 *pPML4, *vPML4;
pPML4 = alloc_phys_page();
- if (pPML4 == NULL)
+ if (pPML4 == NULL) {
+ ERROR("Could not allocate PML4");
return NULL;
+ }
vPML4 = PML4_MAP(pPML4);
memsetv(vPML4, 0, sizeof(struct pml4));
@@ -287,8 +288,10 @@ static volatile struct pdpt *pdpt_alloc(volatile struct pml4 *pPML4,
}
pPDPT = alloc_phys_page();
- if (pPML4 == NULL)
+ if (pPDPT == NULL) {
+ ERROR("Could not allocate PDPT");
return NULL;
+ }
vPDPT = PDPT_MAP(pPDPT);
memsetv(vPDPT, 0, sizeof(struct pdpt));
@@ -320,8 +323,10 @@ static volatile struct pd *pd_alloc(volatile struct pdpt *pPDPT, void *vADDR,
}
pPD = alloc_phys_page();
- if (pPDPT == NULL)
+ if (pPD == NULL) {
+ ERROR("Could not allocate PD");
return NULL;
+ }
vPD = PD_MAP(pPD);
memsetv(vPD, 0, sizeof(struct pd));
@@ -353,8 +358,10 @@ static volatile struct pt *pt_alloc(volatile struct pd *pPD, void *vADDR,
}
pPT = alloc_phys_page();
- if (pPD == NULL)
+ if (pPT == NULL) {
+ ERROR("Could not allocate PT");
return NULL;
+ }
vPT = PT_MAP(pPT);
memsetv(vPT, 0, sizeof(struct pt));
@@ -910,11 +917,15 @@ void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len,
// get page aligned (or allocate) vitural address
if (virt == NULL)
virt = virtaddr_alloc(&ctx->virtctx, pages);
- if (virt == NULL)
+ if (virt == NULL) {
+ ERROR("Could not alloc vitural address for %zu pages", pages);
return NULL;
+ }
- if (virtaddr_take(&ctx->virtctx, virt, pages))
+ if (virtaddr_take(&ctx->virtctx, virt, pages)) {
+ ERROR("Could not take vitural address: %p", virt);
return NULL;
+ }
assert((uint64_t)virt % PAGE_SIZE == 0,
"mem_mapaddr: vitural address not page aligned");
@@ -922,6 +933,7 @@ void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len,
if (map_pages((volatile struct pml4 *)ctx->pml4, virt, aligned_phys,
F_PRESENT | flags, pages)) {
virtaddr_free(&ctx->virtctx, virt);
+ ERROR("Could not map pages");
return NULL;
}
@@ -1025,64 +1037,90 @@ void *mem_alloc_pages(mem_ctx_t ctx, size_t count, unsigned int flags)
void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt,
unsigned int flags)
{
- size_t pages_needed = count;
-
- struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL;
- struct phys_page_slice phys_pages;
+ void *phys = NULL;
- if (virtaddr_take(&ctx->virtctx, virt, count))
+ if (virtaddr_take(&ctx->virtctx, virt, count)) {
+ ERROR("Could not take vitural address: %p", virt);
return NULL;
+ }
- while (pages_needed > 0) {
- phys_pages = alloc_phys_page_withextra(pages_needed);
- if (phys_pages.pagestart == NULL) {
- goto mem_alloc_pages_at_fail;
- }
-
- {
- // allocate the first page and store in it the physical address of the
- // previous chunk of pages
- // TODO: skip this if there are already enough pages from first alloc
- void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1,
- F_PRESENT | F_WRITEABLE);
- if (pageone == NULL) {
- panic("kernel out of virtual memory");
- }
- *((struct phys_page_slice *)pageone) = prev_phys_block;
- prev_phys_block = phys_pages;
- kunmapaddr(pageone);
- }
-
- // index into virtual page array at index [count - pages_needed]
- void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE);
-
- assert(pages_needed >= phys_pages.num_pages, "overflow");
- pages_needed -= phys_pages.num_pages;
+ phys = alloc_phys_pages_exact(count);
+ if (phys == NULL) {
+ ERROR("Could not allocate %zu physical pages", count);
+ goto fail;
+ }
- if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr,
- phys_pages.pagestart, flags, phys_pages.num_pages)) {
- goto mem_alloc_pages_at_fail;
- }
+ if (map_pages((volatile struct pml4 *)ctx->pml4, virt, phys, flags,
+ count)) {
+ ERROR("Could not map pages");
+ goto fail;
}
return virt;
-mem_alloc_pages_at_fail:
- while (prev_phys_block.pagestart) {
- void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1,
- F_PRESENT | F_WRITEABLE);
- if (!virtpage) {
- // memory corruption, most likely a bug
- // could also ERROR here and exit with leak
- panic("unable to free memory from failed mem_alloc_pages_at call");
- }
- struct phys_page_slice prev = *(struct phys_page_slice *)virtpage;
- prev_phys_block = prev;
- free_phys_pages_slice(prev);
- kunmapaddr(virtpage);
- }
-
+fail:
+ free_phys_pages(phys, count);
+ virtaddr_free(&ctx->virtctx, virt);
return NULL;
+
+ // size_t pages_needed = count;
+ //
+ // struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL;
+ // struct phys_page_slice phys_pages;
+ //
+ // if (virtaddr_take(&ctx->virtctx, virt, count))
+ // return NULL;
+ //
+ // while (pages_needed > 0) {
+ // phys_pages = alloc_phys_page_withextra(pages_needed);
+ // if (phys_pages.pagestart == NULL) {
+ // goto mem_alloc_pages_at_fail;
+ // }
+ //
+ // {
+ // // allocate the first page and store in it the physical address of the
+ // // previous chunk of pages
+ // // TODO: skip this if there are already enough pages from first alloc
+ // void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1,
+ // F_PRESENT | F_WRITEABLE);
+ // if (pageone == NULL) {
+ // panic("kernel out of virtual memory");
+ // }
+ // *((struct phys_page_slice *)pageone) = prev_phys_block;
+ // prev_phys_block = phys_pages;
+ // kunmapaddr(pageone);
+ // }
+ //
+ // // index into virtual page array at index [count - pages_needed]
+ // void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE);
+ //
+ // assert(pages_needed >= phys_pages.num_pages, "overflow");
+ // pages_needed -= phys_pages.num_pages;
+ //
+ // if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr,
+ // phys_pages.pagestart, flags, phys_pages.num_pages)) {
+ // goto mem_alloc_pages_at_fail;
+ // }
+ // }
+ //
+ // return virt;
+ //
+ //mem_alloc_pages_at_fail:
+ // while (prev_phys_block.pagestart) {
+ // void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1,
+ // F_PRESENT | F_WRITEABLE);
+ // if (!virtpage) {
+ // // memory corruption, most likely a bug
+ // // could also ERROR here and exit with leak
+ // panic("unable to free memory from failed mem_alloc_pages_at call");
+ // }
+ // struct phys_page_slice prev = *(struct phys_page_slice *)virtpage;
+ // prev_phys_block = prev;
+ // free_phys_pages_slice(prev);
+ // kunmapaddr(virtpage);
+ // }
+ //
+ // return NULL;
}
void mem_free_pages(mem_ctx_t ctx, const void *virt)
diff --git a/kernel/memory/physalloc.c b/kernel/memory/physalloc.c
index 8971bcf..7551c75 100644
--- a/kernel/memory/physalloc.c
+++ b/kernel/memory/physalloc.c
@@ -1,11 +1,14 @@
#include <lib.h>
#include <comus/memory.h>
#include <comus/asm.h>
+#include <comus/mboot.h>
+#include <stdint.h>
#include "physalloc.h"
extern char kernel_start[];
extern char kernel_end[];
+static void *kernel_real_end = NULL;
// between memory_start and kernel_start will be the bitmap
static uintptr_t memory_start = 0;
@@ -26,19 +29,20 @@ static const char *segment_type_str[] = {
static int n_pages(const struct memory_segment *m)
{
- return m->len / PAGE_SIZE;
+ return (m->len + PAGE_SIZE - 1) / PAGE_SIZE;
}
-static void *page_at(int i)
+static void *page_at(size_t i)
{
- int cur_page = 0;
+ size_t cur_page = 0;
+ const struct memory_segment *m = page_start;
for (uint64_t idx = 0; idx < segment_count; idx++) {
- const struct memory_segment *m = page_start;
- int pages = n_pages(m);
+ size_t pages = n_pages(m);
if (i - cur_page < pages) {
return (void *)(m->addr + (PAGE_SIZE * (i - cur_page)));
}
cur_page += pages;
+ m++;
}
return NULL;
}
@@ -47,8 +51,8 @@ static long page_idx(void *page)
{
uintptr_t addr = (uintptr_t)page;
int cur_page = 0;
+ const struct memory_segment *m = page_start;
for (uint64_t idx = 0; idx < segment_count; idx++) {
- const struct memory_segment *m = page_start;
if (addr < m->addr) {
return -1;
}
@@ -56,13 +60,14 @@ static long page_idx(void *page)
return cur_page + ((addr - m->addr) / PAGE_SIZE);
}
cur_page += n_pages(m);
+ m++;
}
return -1;
}
static inline bool bitmap_get(size_t i)
{
- return (bitmap[i / 64] >> i % 64) & 1;
+ return (bitmap[i / 64] >> (i % 64)) & 1;
}
static inline void bitmap_set(size_t i, bool v)
@@ -71,9 +76,9 @@ static inline void bitmap_set(size_t i, bool v)
free_memory -= PAGE_SIZE;
else
free_memory += PAGE_SIZE;
- int idx = i / 64;
- bitmap[idx] &= ~(1 << i % 64);
- bitmap[idx] |= (v << i % 64);
+ size_t idx = i / 64;
+ bitmap[idx] &= ~(1 << (i % 64));
+ bitmap[idx] |= (v << (i % 64));
}
void *alloc_phys_page(void)
@@ -105,9 +110,17 @@ void *alloc_phys_pages_exact(size_t pages)
free_region_start = i;
n_contiguous++;
if (n_contiguous == pages) {
+ void *pADDR;
+ pADDR = page_at(free_region_start);
+
+ if (pADDR == NULL) {
+ n_contiguous = 0;
+ continue;
+ }
+
for (size_t j = 0; j < pages; j++)
bitmap_set(free_region_start + j, true);
- return page_at(free_region_start);
+ return pADDR;
}
} else
n_contiguous = 0;
@@ -118,6 +131,7 @@ void *alloc_phys_pages_exact(size_t pages)
struct phys_page_slice alloc_phys_page_withextra(size_t max_pages)
{
+ panic("please dont use this its broken i think?!\n");
if (max_pages == 0)
return PHYS_PAGE_SLICE_NULL;
@@ -160,6 +174,7 @@ void free_phys_page(void *ptr)
void free_phys_pages_slice(struct phys_page_slice slice)
{
+ panic("please dont use this its broken i think?!\n");
free_phys_pages(slice.pagestart, slice.num_pages);
}
@@ -200,7 +215,7 @@ static struct memory_segment clamp_segment(const struct memory_segment *segment)
if (memory_start)
start = memory_start;
else
- start = (uintptr_t)kernel_end;
+ start = (uintptr_t)kernel_real_end;
if (segment->addr < start) {
addr = start;
@@ -232,6 +247,10 @@ void physalloc_init(struct memory_map *map)
segment_count = 0;
+ kernel_real_end = mboot_end();
+ if ((char *)kernel_real_end < kernel_end)
+ kernel_real_end = kernel_end;
+
for (uint32_t i = 0; i < map->entry_count; i++) {
struct memory_segment *segment = &map->entries[i];
@@ -245,7 +264,7 @@ void physalloc_init(struct memory_map *map)
long bitmap_pages = (page_count / 64 / PAGE_SIZE) + 1;
long bitmap_size = bitmap_pages * PAGE_SIZE;
- bitmap = (uint64_t *)page_align((uintptr_t)kernel_end);
+ bitmap = (uint64_t *)page_align((uintptr_t)kernel_real_end);
long page_area_size = segment_count * sizeof(struct memory_segment);
char *page_area_addr = (char *)bitmap + bitmap_size;
diff --git a/kernel/memory/virtalloc.c b/kernel/memory/virtalloc.c
index 0cbba33..3d60a9f 100644
--- a/kernel/memory/virtalloc.c
+++ b/kernel/memory/virtalloc.c
@@ -1,10 +1,12 @@
-#include "lib/kio.h"
#include <lib.h>
#include <comus/memory.h>
#include <stdint.h>
#include "virtalloc.h"
+extern char kernel_start[];
+extern char kernel_end[];
+
static struct virt_addr_node *get_node_idx(struct virt_ctx *ctx, int idx)
{
if (idx < BOOTSTRAP_VIRT_ALLOC_NODES) {
@@ -64,6 +66,7 @@ static struct virt_addr_node *get_node(struct virt_ctx *ctx)
for (; idx < count; idx++) {
struct virt_addr_node *node = get_node_idx(ctx, idx);
if (!node->is_used) {
+ node->is_used = true;
ctx->used_node_count++;
return node;
}
@@ -81,7 +84,7 @@ static void free_node(struct virt_ctx *ctx, struct virt_addr_node *node)
void virtaddr_init(struct virt_ctx *ctx)
{
struct virt_addr_node init = {
- .start = 0x50000000,
+ .start = 0x0,
.end = 0x1000000000000, // 48bit memory address max
.next = NULL,
.prev = NULL,
@@ -96,6 +99,10 @@ void virtaddr_init(struct virt_ctx *ctx)
ctx->alloc_node_count = 0;
ctx->used_node_count = 0;
ctx->is_allocating = false;
+
+ virtaddr_take(ctx, (void *)0,
+ ((uint64_t)kernel_end + PAGE_SIZE - 1) / PAGE_SIZE *
+ PAGE_SIZE);
}
int virtaddr_clone(struct virt_ctx *old, struct virt_ctx *new)
@@ -138,32 +145,31 @@ int virtaddr_clone(struct virt_ctx *old, struct virt_ctx *new)
static void merge_back(struct virt_ctx *ctx, struct virt_addr_node *node)
{
- while (node->prev) {
- if (node->is_alloc != node->prev->is_alloc)
+ struct virt_addr_node *prev;
+ for (prev = node->prev; prev != NULL; prev = prev->prev) {
+ if (prev->is_alloc)
break;
- struct virt_addr_node *temp = node->prev;
- node->start = temp->start;
- node->prev = temp->prev;
- if (temp->prev)
- temp->prev->next = node;
- free_node(ctx, temp);
+ node->start = prev->start;
+ node->prev = prev->prev;
+ if (node->prev)
+ node->prev->next = node;
+ free_node(ctx, prev);
}
- if (node->prev == NULL) {
+ if (node->prev == NULL)
ctx->start_node = node;
- }
}
static void merge_forward(struct virt_ctx *ctx, struct virt_addr_node *node)
{
- while (node->next) {
- if (node->is_alloc != node->next->is_alloc)
+ struct virt_addr_node *next;
+ for (next = node->next; next != NULL; next = next->next) {
+ if (next->is_alloc)
break;
- struct virt_addr_node *temp = node->next;
- node->end = temp->end;
- node->next = temp->next;
- if (temp->next)
- temp->next->prev = node;
- free_node(ctx, temp);
+ node->end = next->end;
+ node->next = next->next;
+ if (node->next)
+ node->next->prev = node;
+ free_node(ctx, next);
}
}
@@ -211,9 +217,9 @@ int virtaddr_take(struct virt_ctx *ctx, const void *virt, int n_pages)
left->prev = node->prev;
left->start = node->start;
left->end = (uintptr_t)virt;
- left->is_used = true;
left->is_alloc = false;
- node->prev->next = left;
+ if (node->prev)
+ node->prev->next = left;
node->prev = left;
}
@@ -224,16 +230,15 @@ int virtaddr_take(struct virt_ctx *ctx, const void *virt, int n_pages)
right->next = node->next;
right->start = (uintptr_t)virt + n_length;
right->end = node->end;
- right->is_used = true;
right->is_alloc = false;
- node->next->prev = right;
+ if (node->next)
+ node->next->prev = right;
node->next = right;
}
node->start = (uintptr_t)virt;
node->end = node->start + n_length;
node->is_alloc = true;
- node->is_used = true;
return 0;
}
@@ -257,6 +262,7 @@ long virtaddr_free(struct virt_ctx *ctx, const void *virtaddr)
if (node->start == virt) {
int length = node->end - node->start;
int pages = length / PAGE_SIZE;
+ node->is_alloc = false;
merge_back(ctx, node);
merge_forward(ctx, node);
return pages;
diff --git a/kernel/memory/virtalloc.h b/kernel/memory/virtalloc.h
index 5033242..44bf8d7 100644
--- a/kernel/memory/virtalloc.h
+++ b/kernel/memory/virtalloc.h
@@ -13,7 +13,7 @@
#include <stdint.h>
#include <stdbool.h>
-#define BOOTSTRAP_VIRT_ALLOC_NODES 64
+#define BOOTSTRAP_VIRT_ALLOC_NODES 256
struct virt_addr_node {
/// first virtural address