diff options
author | Freya Murphy <freya@freyacat.org> | 2025-04-15 22:20:59 -0400 |
---|---|---|
committer | Freya Murphy <freya@freyacat.org> | 2025-04-15 22:20:59 -0400 |
commit | 2dbf529c33aa3e24beff944758d586bb0608c1be (patch) | |
tree | 5b3381c3c8d9a74ccb988c8945fe0681c3ee7301 /kernel | |
parent | fix %n (diff) | |
download | comus-2dbf529c33aa3e24beff944758d586bb0608c1be.tar.gz comus-2dbf529c33aa3e24beff944758d586bb0608c1be.tar.bz2 comus-2dbf529c33aa3e24beff944758d586bb0608c1be.zip |
expand memory manager work with userspace (more then one ctx)
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu/idt.c | 1 | ||||
-rw-r--r-- | kernel/drivers/acpi.c | 4 | ||||
-rw-r--r-- | kernel/include/comus/memory.h | 101 | ||||
-rw-r--r-- | kernel/include/lib/kio.h | 10 | ||||
-rw-r--r-- | kernel/include/lib/klib.h | 25 | ||||
-rw-r--r-- | kernel/lib/kprintf.c | 54 | ||||
-rw-r--r-- | kernel/lib/panic.c | 1 | ||||
-rw-r--r-- | kernel/memory/memory.c | 48 | ||||
-rw-r--r-- | kernel/memory/memory.h | 8 | ||||
-rw-r--r-- | kernel/memory/paging.c | 81 | ||||
-rw-r--r-- | kernel/memory/paging.h | 10 | ||||
-rw-r--r-- | kernel/memory/physalloc.c | 5 | ||||
-rw-r--r-- | kernel/memory/virtalloc.c | 124 | ||||
-rw-r--r-- | kernel/memory/virtalloc.h | 44 |
14 files changed, 350 insertions, 166 deletions
diff --git a/kernel/cpu/idt.c b/kernel/cpu/idt.c index 5b93987..4dbf4ba 100644 --- a/kernel/cpu/idt.c +++ b/kernel/cpu/idt.c @@ -1,4 +1,3 @@ -#include "lib/klib.h" #include <lib.h> #include <comus/memory.h> #include <comus/asm.h> diff --git a/kernel/drivers/acpi.c b/kernel/drivers/acpi.c index b4bb805..bd9fe54 100644 --- a/kernel/drivers/acpi.c +++ b/kernel/drivers/acpi.c @@ -287,10 +287,10 @@ static void acpi_load_table(uint64_t addr) struct acpi_header *temp, *mapped; uint32_t length; temp = (struct acpi_header *)(uintptr_t)addr; - mapped = kmapaddr(temp, sizeof(struct acpi_header)); + mapped = kmapaddr(temp, NULL, sizeof(struct acpi_header), 0); length = mapped->length; kunmapaddr(mapped); - mapped = kmapaddr(temp, length); + mapped = kmapaddr(temp, NULL, length, 0); if (!checksum((uint8_t *)mapped, mapped->length)) { kunmapaddr(mapped); return; diff --git a/kernel/include/comus/memory.h b/kernel/include/comus/memory.h index 77d64ff..ee413e3 100644 --- a/kernel/include/comus/memory.h +++ b/kernel/include/comus/memory.h @@ -15,6 +15,16 @@ #define MMAP_MAX_ENTRY 64 #define PAGE_SIZE 4096 +#define F_PRESENT 0x001 +#define F_WRITEABLE 0x002 +#define F_UNPRIVILEGED 0x004 +#define F_WRITETHROUGH 0x008 +#define F_CACHEDISABLE 0x010 +#define F_ACCESSED 0x020 +#define F_DIRTY 0x040 +#define F_MEGABYTE 0x080 +#define F_GLOBAL 0x100 + struct memory_segment { uint64_t addr; uint64_t len; @@ -25,6 +35,9 @@ struct memory_map { struct memory_segment entries[MMAP_MAX_ENTRY]; }; +typedef struct mem_ctx_s *mem_ctx_t; +extern mem_ctx_t kernel_mem_ctx; + /** * Initalize system memory allocator */ @@ -46,62 +59,91 @@ uint64_t memory_free(void); uint64_t memory_used(void); /** + * Allocate a new memory context + * + * @returns pointer context or NULL on failure + */ +mem_ctx_t alloc_mem_ctx(void); + +/** + * Free a memory context + * + * @param ctx - pointer to the memory context + */ +void free_mem_ctx(mem_ctx_t ctx); + +/** * Allocates at least len bytes of memory starting at * physical address addr. Returned address can be * any virtural address. * - * @param addr - the physical address to map - * @param len - the minimum length to map - * @param writable - if this memory should be writable - * @param user - if this memory should be user writable + * @param phys - the physical address to map + * @param virt - the virtural address to map (or NULL for any virt addr) + * @param len - the minimum length in bytes to map + * @param flags - memory flags (F_PRESENT will always be set) + * @returns the mapped vitural address to phys, or NULL on failure */ -void *kmapaddr(void *addr, size_t len); +void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len, + unsigned int flags); /** - * Unmaps mapped address from the mmap function - * @param addr - the address returned from mmap - * @param len - the length allocated + * Unmaps mapped address from the kmapaddr function + * @param virt - the vitural address returned from kmapaddr */ -void kunmapaddr(void *addr); +void mem_unmapaddr(mem_ctx_t ctx, void *virt); /** - * Allocates size_t bytes in memory + * Allocate a single page of memory with the given paging structure * - * @param size - the amount of bytes to allocate - * @returns the address allocated or NULL on failure + * @returns the vitural address aloocated or NULL on failure */ -void *kalloc(size_t size); +void *mem_alloc_page(mem_ctx_t ctx); /** - * Rellocates a given allocated ptr to a new size of bytes in memory. - * If ptr is NULL it will allocate new memory. + * Allocate size_t amount of contiguous virtual pages with the given paging structure * - * @param ptr - the pointer to reallocate - * @param size - the amount of bytes to reallocate to + * @param count - the number of pages to allocate * @returns the address allocated or NULL on failure */ -void *krealloc(void *ptr, size_t size); +void *mem_alloc_pages(mem_ctx_t ctx, size_t count); /** - * Frees an allocated pointer in memory + * Free allocated pages with the given paging structure. * - * @param ptr - the pointer to free + * @param ptr - the pointer provided by alloc_page or alloc_pages */ -void kfree(void *ptr); +void mem_free_pages(mem_ctx_t ctx, void *ptr); /** - * Attemps to load a mapped but not yet allocated page. + * Load a not allocated but properly mapped page * - * @param virt_addr - the virtural address from either page allocation function + * @returns 0 on success, negative error code on failure + */ +int mem_load_page(mem_ctx_t ctx, void *virt); + +/** + * Allocates at least len bytes of memory starting at + * physical address addr. Returned address can be + * any virtural address. * - * @returns 0 on success and a negative error code on failure. + * @param phys - the physical address to map + * @param virt - the virtural address to map (or NULL for any virt addr) + * @param len - the minimum length in bytes to map + * @param flags - memory flags (F_PRESENT will always be set) + * @returns the mapped vitural address to phys, or NULL on failure */ -int kload_page(void *virt_addr); +void *kmapaddr(void *phys, void *virt, size_t len, unsigned int flags); + +/** + * Unmaps mapped address from the kmapaddr function + * @param virt - the vitural address returned from kmapaddr + */ +void kunmapaddr(void *virt); /** * Allocate a single page of memory * - * @returns the address allocated or NULL on failure + * @returns the vitural address allocated or NULL on failure */ void *kalloc_page(void); @@ -120,4 +162,11 @@ void *kalloc_pages(size_t count); */ void kfree_pages(void *ptr); +/** + * Load a not allocated but properly mapped page + * + * @returns 0 on success, negative error code on failure + */ +int kload_page(void *virt); + #endif /* memory.h */ diff --git a/kernel/include/lib/kio.h b/kernel/include/lib/kio.h index 66efc7b..1b10a39 100644 --- a/kernel/include/lib/kio.h +++ b/kernel/include/lib/kio.h @@ -26,6 +26,16 @@ void kputc(char c); */ void kputs(const char *s); +#ifdef TRACING +#define TRACE(format, ...) \ + do { \ + kprintf("[TRACE] %s ", __FUNCTION__); \ + kprintf(format, ##__VA_ARGS__); \ + } while (0) +#else +#define TRACE(format, ...) +#endif + /** * prints out a formatted string * diff --git a/kernel/include/lib/klib.h b/kernel/include/lib/klib.h index 7f66e90..0d9797b 100644 --- a/kernel/include/lib/klib.h +++ b/kernel/include/lib/klib.h @@ -231,4 +231,29 @@ void log_backtrace(void); */ void log_backtrace_ex(void *ip, void *bp); +/** + * Allocates size_t bytes in memory + * + * @param size - the amount of bytes to allocate + * @returns the address allocated or NULL on failure + */ +void *kalloc(size_t size); + +/** + * Rellocates a given allocated ptr to a new size of bytes in memory. + * If ptr is NULL it will allocate new memory. + * + * @param ptr - the pointer to reallocate + * @param size - the amount of bytes to reallocate to + * @returns the address allocated or NULL on failure + */ +void *krealloc(void *ptr, size_t size); + +/** + * Frees an allocated pointer in memory + * + * @param ptr - the pointer to free + */ +void kfree(void *ptr); + #endif /* klib.h */ diff --git a/kernel/lib/kprintf.c b/kernel/lib/kprintf.c index 7b1ed71..c2e25fd 100644 --- a/kernel/lib/kprintf.c +++ b/kernel/lib/kprintf.c @@ -262,43 +262,44 @@ static int printf_lltoa(char *buf, options_t *opts, bool is_neg, } precision = 0; - // sign - if (is_neg) { - *(buf++) = '-'; - } else if (opts->sign) { - *(buf++) = '+'; - } else if (opts->space) { - *(buf++) = ' '; + // write number + if (num == 0) { + *(buf++) = '0'; + } + while (num) { + if (opts->precision_set && precision++ >= opts->precision) + break; + *(buf++) = printf_itoc(opts->is_uppercase, num % opts->radix); + num /= opts->radix; + } + + // print zeros if needed + if (opts->width_set && len < opts->width && opts->zero) { + while (len++ < opts->width) + *(buf++) = '0'; } // radix specifier if (opts->hash) { if (opts->radix == 8) { - *(buf++) = '0'; *(buf++) = 'o'; + *(buf++) = '0'; } if (opts->radix == 16) { - *(buf++) = '0'; *(buf++) = 'x'; + *(buf++) = '0'; } } - // print zeros if needed - if (opts->width_set && len < opts->width && opts->zero) { - while (len++ < opts->width) - *(buf++) = '0'; + // sign + if (is_neg) { + *(buf++) = '-'; + } else if (opts->sign) { + *(buf++) = '+'; + } else if (opts->space) { + *(buf++) = ' '; } - // write number - if (num == 0) { - *(buf++) = '0'; - } - while (num) { - if (opts->precision_set && precision++ >= opts->precision) - break; - *(buf++) = printf_itoc(opts->is_uppercase, num % opts->radix); - num /= opts->radix; - } *(buf++) = '\0'; return buf - start; @@ -333,8 +334,8 @@ static void handle_int_specifier(context_t *ctx, options_t *const opts, printf_putc(ctx, opts->zero ? '0' : ' '); } // number - for (int i = 0; i < buf_len; i++) - printf_putc(ctx, buf[i]); + for (int i = 1; i <= buf_len; i++) + printf_putc(ctx, buf[buf_len - i]); // right padding if (opts->left == 1) { for (int i = 0; i < padding; i++) @@ -424,7 +425,8 @@ static void do_printf(context_t *ctx, va_list args) switch (spec) { case 'p': opts.len = PRINTF_LEN_SIZE_T; - opts.width_set = true; + opts.width_set = 1; + opts.width = sizeof(void *) * 2; opts.radix = 16; opts.hash = true; opts.zero = true; diff --git a/kernel/lib/panic.c b/kernel/lib/panic.c index dc62eb7..72f5c51 100644 --- a/kernel/lib/panic.c +++ b/kernel/lib/panic.c @@ -1,4 +1,3 @@ -#include "lib/klib.h" #include <lib.h> #include <stdarg.h> #include <comus/asm.h> diff --git a/kernel/memory/memory.c b/kernel/memory/memory.c index 145ce2b..b4ecb0d 100644 --- a/kernel/memory/memory.c +++ b/kernel/memory/memory.c @@ -1,21 +1,67 @@ +#include "lib/klib.h" #include <comus/memory.h> #include <comus/asm.h> #include <comus/mboot.h> #include <lib.h> +#include "memory.h" #include "paging.h" #include "virtalloc.h" #include "physalloc.h" +mem_ctx_t kernel_mem_ctx; +struct mem_ctx_s _kernel_mem_ctx; +extern volatile char kernel_pml4[]; +extern struct virt_ctx kernel_virt_ctx; + +void *kmapaddr(void *phys, void *virt, size_t len, unsigned int flags) +{ + return mem_mapaddr(kernel_mem_ctx, phys, virt, len, flags); +} + +void kunmapaddr(void *virt) +{ + mem_unmapaddr(kernel_mem_ctx, virt); +} + +void *kalloc_page(void) +{ + return mem_alloc_page(kernel_mem_ctx); +} + +void *kalloc_pages(size_t count) +{ + return mem_alloc_pages(kernel_mem_ctx, count); +} + +void kfree_pages(void *ptr) +{ + mem_free_pages(kernel_mem_ctx, ptr); +} + +int kload_page(void *virt) +{ + return mem_load_page(kernel_mem_ctx, virt); +} + +mem_ctx_t alloc_mem_ctx(void) +{ + panic("alloc_mem_ctx not yet implemented"); +} + void memory_init(void) { struct memory_map mmap; if (mboot_get_mmap(&mmap)) panic("failed to load memory map"); + kernel_mem_ctx = &_kernel_mem_ctx; + kernel_mem_ctx->pml4 = kernel_pml4; + kernel_mem_ctx->virtctx = &kernel_virt_ctx; + cli(); paging_init(); - virtaddr_init(); + virtaddr_init(kernel_mem_ctx->virtctx); physalloc_init(&mmap); sti(); } diff --git a/kernel/memory/memory.h b/kernel/memory/memory.h new file mode 100644 index 0000000..c39656d --- /dev/null +++ b/kernel/memory/memory.h @@ -0,0 +1,8 @@ + +#include <comus/memory.h> +#include "virtalloc.h" + +struct mem_ctx_s { + struct virt_ctx *virtctx; + volatile char *pml4; +}; diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c index 5c4fa5c..2671cc2 100644 --- a/kernel/memory/paging.c +++ b/kernel/memory/paging.c @@ -1,9 +1,13 @@ #include <lib.h> #include <comus/memory.h> +#include <stddef.h> +#include <stdint.h> + #include "virtalloc.h" #include "physalloc.h" #include "paging.h" +#include "memory.h" // PAGE MAP LEVEL 4 ENTRY struct pml4e { @@ -514,34 +518,56 @@ static inline void *page_align(void *addr) return (void *)a; } -void *kmapaddr(void *addr, size_t len) +void *mem_mapaddr(mem_ctx_t ctx, void *phys, void *virt, size_t len, + unsigned int flags) { - void *phys = page_align(addr); - ptrdiff_t error = (char *)addr - (char *)phys; + TRACE("PHYS %16p VIRT %16p LEN %zu FLAGS %08x \n", phys, virt, len, flags); + + long pages; + ptrdiff_t error; + void *aligned_phys; + + // get length and physical page aligned address + aligned_phys = page_align(phys); + error = (char *)phys - (char *)aligned_phys; len += error; - long pages = len / PAGE_SIZE + 1; - void *virt = virtaddr_alloc(pages); - if (virt == NULL) { + pages = len / PAGE_SIZE + 1; + + // get page aligned (or allocate) vitural address + if (virt == NULL) + virt = virtaddr_alloc(ctx->virtctx, pages); + if (virt == NULL) return NULL; - } - if (map_pages(kernel_pml4, virt, phys, F_WRITEABLE, pages)) { - virtaddr_free(virt); + + if (map_pages((volatile struct pml4e *)ctx->pml4, virt, aligned_phys, + F_WRITEABLE | flags, pages)) { + virtaddr_free(ctx->virtctx, virt); return NULL; } + return (char *)virt + error; } -void kunmapaddr(void *addr) +void mem_unmapaddr(mem_ctx_t ctx, void *virt) { - long pages = virtaddr_free(addr); + TRACE("VIRT %16p\n", virt); + + long pages = virtaddr_free(ctx->virtctx, virt); if (pages < 1) return; - unmap_pages(kernel_pml4, addr, pages); + unmap_pages(kernel_pml4, virt, pages); } -void *kalloc_pages(size_t count) +void *mem_alloc_page(mem_ctx_t ctx) { - void *virt = virtaddr_alloc(count); + return mem_alloc_pages(ctx, 1); +} + +void *mem_alloc_pages(mem_ctx_t ctx, size_t count) +{ + TRACE("COUNT %zu\n", count); + + void *virt = virtaddr_alloc(ctx->virtctx, count); if (virt == NULL) return NULL; //void *phys = alloc_phys_pages(count); @@ -549,32 +575,33 @@ void *kalloc_pages(size_t count) // virtaddr_free(virt); // return NULL; //} - if (map_pages(kernel_pml4, virt, + if (map_pages((volatile struct pml4e *)ctx->pml4, virt, //phys, //F_WRITEABLE, NULL, F_WRITEABLE, count)) { - virtaddr_free(virt); + virtaddr_free(ctx->virtctx, virt); return NULL; } return virt; } -void *kalloc_page(void) +void mem_free_pages(mem_ctx_t ctx, void *virt) { - return kalloc_pages(1); -} + TRACE("VIRT %16p\n", virt); -void kfree_pages(void *virt) -{ - long pages = virtaddr_free(virt); - if (pages < 1) - return; - unmap_pages(kernel_pml4, virt, pages); + long pages = virtaddr_free(ctx->virtctx, virt); + if (pages == 1) + unmap_page((volatile struct pml4e *)ctx->pml4, virt); + else if (pages > 1) + unmap_pages((volatile struct pml4e *)ctx->pml4, virt, pages); } -int kload_page(void *virt_addr) +int mem_load_page(mem_ctx_t ctx, void *virt_addr) { - volatile struct pte *page = get_page(kernel_pml4, virt_addr); + TRACE("VIRT %16p\n", virt_addr); + + volatile struct pte *page = + get_page((volatile struct pml4e *)ctx->pml4, virt_addr); if (page == NULL) return -1; if (page->loaded) diff --git a/kernel/memory/paging.h b/kernel/memory/paging.h index be6fd06..b54d422 100644 --- a/kernel/memory/paging.h +++ b/kernel/memory/paging.h @@ -9,16 +9,6 @@ #ifndef PAGING_H_ #define PAGING_H_ -#define F_PRESENT 0x001 -#define F_WRITEABLE 0x002 -#define F_UNPRIVILEGED 0x004 -#define F_WRITETHROUGH 0x008 -#define F_CACHEDISABLE 0x010 -#define F_ACCESSED 0x020 -#define F_DIRTY 0x040 -#define F_MEGABYTE 0x080 -#define F_GLOBAL 0x100 - void paging_init(void); #endif /* paging.h */ diff --git a/kernel/memory/physalloc.c b/kernel/memory/physalloc.c index a907077..7083c21 100644 --- a/kernel/memory/physalloc.c +++ b/kernel/memory/physalloc.c @@ -185,9 +185,10 @@ void physalloc_init(struct memory_map *map) memory_start = page_align((uintptr_t)page_area_addr + page_area_size); - bitmap = kmapaddr(bitmap, bitmap_size); + bitmap = kmapaddr(bitmap, NULL, bitmap_size, F_WRITEABLE); memset(bitmap, 0, bitmap_size); - page_area_addr = kmapaddr(page_area_addr, page_area_size); + page_area_addr = + kmapaddr(page_area_addr, NULL, page_area_size, F_WRITEABLE); memset(page_area_addr, 0, page_area_size); page_start = (struct memory_segment *)page_area_addr; diff --git a/kernel/memory/virtalloc.c b/kernel/memory/virtalloc.c index 6b7fd20..1c83427 100644 --- a/kernel/memory/virtalloc.c +++ b/kernel/memory/virtalloc.c @@ -3,45 +3,29 @@ #include "virtalloc.h" -struct addr_node { - uintptr_t start; - uintptr_t end; - struct addr_node *next; - struct addr_node *prev; - uint8_t is_alloc; // if node is storing allocated data - uint8_t is_used; // if node is in use by virtalloc -}; +struct virt_ctx kernel_virt_ctx; -#define BSS_NODES 64 -static struct addr_node bootstrap_nodes[BSS_NODES]; -static struct addr_node *alloc_nodes = NULL; -static size_t free_node_start = 0; -static size_t alloc_node_count = 0; -static size_t used_node_count = 0; -static bool is_allocating = false; - -static struct addr_node *start_node = NULL; - -static struct addr_node *get_node_idx(int idx) +static struct virt_addr_node *get_node_idx(struct virt_ctx *ctx, int idx) { - if (idx < BSS_NODES) { - return &bootstrap_nodes[idx]; + if (idx < BOOTSTRAP_VIRT_ALLOC_NODES) { + return &ctx->bootstrap_nodes[idx]; } else { - return &alloc_nodes[idx - BSS_NODES]; + return &ctx->alloc_nodes[idx - BOOTSTRAP_VIRT_ALLOC_NODES]; } } -static void update_node_ptrs(struct addr_node *old, struct addr_node *new, - int old_len, int new_len) +static void update_node_ptrs(struct virt_addr_node *old, + struct virt_addr_node *new, int old_len, + int new_len) { if (old == NULL) return; int idx = 0; for (int i = 0; i < old_len; i++) { - struct addr_node *o = &old[i]; + struct virt_addr_node *o = &old[i]; if (o && !o->is_used) continue; - struct addr_node *n = &new[idx++]; + struct virt_addr_node *n = &new[idx++]; *n = *o; if (n->prev != NULL) n->prev->next = n; @@ -49,37 +33,38 @@ static void update_node_ptrs(struct addr_node *old, struct addr_node *new, n->next->prev = n; } for (int i = idx; i < new_len; i++) { - struct addr_node *n = &new[idx++]; + struct virt_addr_node *n = &new[idx++]; n->is_used = false; } } -static struct addr_node *get_node(void) +static struct virt_addr_node *get_node(struct virt_ctx *ctx) { - size_t count = BSS_NODES + alloc_node_count; + size_t count = BOOTSTRAP_VIRT_ALLOC_NODES + ctx->alloc_node_count; - if (!is_allocating && used_node_count + 16 >= count) { - is_allocating = true; - int new_alloc = alloc_node_count * 2; + if (!ctx->is_allocating && ctx->used_node_count + 16 >= count) { + ctx->is_allocating = true; + int new_alloc = ctx->alloc_node_count * 2; if (new_alloc < 8) new_alloc = 8; - struct addr_node *new_nodes; - new_nodes = kalloc(sizeof(struct addr_node) * new_alloc); + struct virt_addr_node *new_nodes; + new_nodes = kalloc(sizeof(struct virt_addr_node) * new_alloc); if (new_nodes == NULL) panic("virt addr alloc nodes is null"); - update_node_ptrs(alloc_nodes, new_nodes, alloc_node_count, new_alloc); - kfree(alloc_nodes); - alloc_nodes = new_nodes; - alloc_node_count = new_alloc; - is_allocating = false; - count = BSS_NODES + alloc_node_count; + update_node_ptrs(ctx->alloc_nodes, new_nodes, ctx->alloc_node_count, + new_alloc); + kfree(ctx->alloc_nodes); + ctx->alloc_nodes = new_nodes; + ctx->alloc_node_count = new_alloc; + ctx->is_allocating = false; + count = BOOTSTRAP_VIRT_ALLOC_NODES + ctx->alloc_node_count; } - size_t idx = free_node_start; + size_t idx = ctx->free_node_start; for (; idx < count; idx++) { - struct addr_node *node = get_node_idx(idx); + struct virt_addr_node *node = get_node_idx(ctx, idx); if (!node->is_used) { - used_node_count++; + ctx->used_node_count++; return node; } } @@ -87,15 +72,15 @@ static struct addr_node *get_node(void) panic("could not get virtaddr node"); } -static void free_node(struct addr_node *node) +static void free_node(struct virt_ctx *ctx, struct virt_addr_node *node) { node->is_used = false; - used_node_count--; + ctx->used_node_count--; } -void virtaddr_init(void) +void virtaddr_init(struct virt_ctx *ctx) { - struct addr_node init = { + struct virt_addr_node init = { .start = 0x400000, // third page table .end = 0x1000000000000, // 48bit memory address max .next = NULL, @@ -103,48 +88,53 @@ void virtaddr_init(void) .is_alloc = false, .is_used = true, }; - memsetv(bootstrap_nodes, 0, sizeof(bootstrap_nodes)); - bootstrap_nodes[0] = init; - start_node = &bootstrap_nodes[0]; + memsetv(ctx->bootstrap_nodes, 0, sizeof(ctx->bootstrap_nodes)); + ctx->bootstrap_nodes[0] = init; + ctx->alloc_nodes = NULL; + ctx->start_node = &ctx->bootstrap_nodes[0]; + ctx->free_node_start = 0; + ctx->alloc_node_count = 0; + ctx->used_node_count = 0; + ctx->is_allocating = false; } -static void merge_back(struct addr_node *node) +static void merge_back(struct virt_ctx *ctx, struct virt_addr_node *node) { while (node->prev) { if (node->is_alloc != node->prev->is_alloc) break; - struct addr_node *temp = node->prev; + struct virt_addr_node *temp = node->prev; node->start = temp->start; node->prev = temp->prev; if (temp->prev) temp->prev->next = node; - free_node(temp); + free_node(ctx, temp); } if (node->prev == NULL) { - start_node = node; + ctx->start_node = node; } } -static void merge_forward(struct addr_node *node) +static void merge_forward(struct virt_ctx *ctx, struct virt_addr_node *node) { while (node->next) { if (node->is_alloc != node->next->is_alloc) break; - struct addr_node *temp = node->next; + struct virt_addr_node *temp = node->next; node->end = temp->end; node->next = temp->next; if (temp->next) temp->next->prev = node; - free_node(temp); + free_node(ctx, temp); } } -void *virtaddr_alloc(int n_pages) +void *virtaddr_alloc(struct virt_ctx *ctx, int n_pages) { if (n_pages < 1) return NULL; long n_length = n_pages * PAGE_SIZE; - struct addr_node *node = start_node; + struct virt_addr_node *node = ctx->start_node; for (; node != NULL; node = node->next) { long length = node->end - node->start; @@ -152,11 +142,11 @@ void *virtaddr_alloc(int n_pages) continue; if (length >= n_length) { - struct addr_node *new = get_node(); + struct virt_addr_node *new = get_node(ctx); if (node->prev != NULL) { node->prev->next = new; } else { - start_node = new; + ctx->start_node = new; } new->next = node; new->prev = node->prev; @@ -168,8 +158,8 @@ void *virtaddr_alloc(int n_pages) new->is_used = true; new->next = node; void *mem = (void *)new->start; - merge_back(new); - merge_forward(new); + merge_back(ctx, new); + merge_forward(ctx, new); return mem; } } @@ -177,21 +167,21 @@ void *virtaddr_alloc(int n_pages) return NULL; } -long virtaddr_free(void *virtaddr) +long virtaddr_free(struct virt_ctx *ctx, void *virtaddr) { uintptr_t virt = (uintptr_t)virtaddr; if (virt % PAGE_SIZE) return -1; // not page aligned, we did not give this out!!! - struct addr_node *node = start_node; + struct virt_addr_node *node = ctx->start_node; for (; node != NULL; node = node->next) { if (node->start == virt) { int length = node->end - node->start; int pages = length / PAGE_SIZE; - merge_back(node); - merge_forward(node); + merge_back(ctx, node); + merge_forward(ctx, node); return pages; } } diff --git a/kernel/memory/virtalloc.h b/kernel/memory/virtalloc.h index a5ca840..9f974c5 100644 --- a/kernel/memory/virtalloc.h +++ b/kernel/memory/virtalloc.h @@ -9,23 +9,61 @@ #ifndef VIRTALLOC_H_ #define VIRTALLOC_H_ +#include <stddef.h> +#include <stdint.h> +#include <stdbool.h> + +#define BOOTSTRAP_VIRT_ALLOC_NODES 64 + +struct virt_addr_node { + /// first virtural address + uintptr_t start; + /// last virtural address + uintptr_t end; + /// next node in linked list + struct virt_addr_node *next; + /// prev node in linked list + struct virt_addr_node *prev; + /// if this node is storing any allocated data + uint8_t is_alloc; + /// if this node is in use by virtalloc + uint8_t is_used; +}; + +struct virt_ctx { + /// bootstrap nodes for the context (not in heap) + struct virt_addr_node bootstrap_nodes[BOOTSTRAP_VIRT_ALLOC_NODES]; + /// heap allocated nodes + struct virt_addr_node *alloc_nodes; + /// start node + struct virt_addr_node *start_node; + /// index of first free node + size_t free_node_start; + /// number of heap allocated nodes + size_t alloc_node_count; + /// number of used nodes + size_t used_node_count; + /// if we are currently allocating (recursion check) + bool is_allocating; +}; + /** * Initalizes the virtual address allocator */ -void virtaddr_init(void); +void virtaddr_init(struct virt_ctx *ctx); /** * Allocate a virtual address of length x pages * @param pages - x pages * @returns virt addr */ -void *virtaddr_alloc(int pages); +void *virtaddr_alloc(struct virt_ctx *ctx, int pages); /** * Free the virtual address from virtaddr_alloc * @param virtaddr - the addr to free * @returns number of pages used for virtaddr */ -long virtaddr_free(void *virtaddr); +long virtaddr_free(struct virt_ctx *ctx, void *virtaddr); #endif /* virtalloc.h */ |