memory allocator/pager, plus other stuff

This commit is contained in:
Murphy 2025-04-03 16:53:51 -04:00
parent 01221a0d10
commit 516e920cd9
Signed by: freya
GPG key ID: 9FBC6FFD6D2DBF17
18 changed files with 1681 additions and 23 deletions

View file

@ -25,9 +25,16 @@ const boot_src = &[_][]const u8{"boot/boot.S"};
const kernel_src = &[_][]const u8{
"kernel/entry.S", // must be first
"kernel/kernel.c",
"kernel/io/io.c",
"kernel/io/panic.c",
"kernel/memory/memory.c",
"kernel/memory/paging.c",
"kernel/memory/physalloc.c",
"kernel/memory/virtalloc.c",
};
const lib_src = &[_][]const u8{
"lib/alloc.c",
"lib/atox.c",
"lib/bound.c",
"lib/btoa.c",
@ -38,8 +45,11 @@ const lib_src = &[_][]const u8{
"lib/itoc.c",
"lib/memcmp.c",
"lib/memcpy.c",
"lib/memcpyv.c",
"lib/memmove.c",
"lib/memmovev.c",
"lib/memset.c",
"lib/memsetv.c",
"lib/printf.c",
"lib/stpcpy.c",
"lib/stpncpy.c",

View file

@ -11,6 +11,8 @@
#include <stddef.h>
#define PAGE_SIZE 4096
/**
* converts single digit int to base 36
* @param i - int
@ -194,4 +196,59 @@ extern unsigned int bound(unsigned int min, unsigned int value,
*/
extern void delay(int count);
/**
* Allocates size_t bytes in memory
*
* @param size - the amount of bytes to allocate
* @returns the address allocated or NULL on failure
*/
extern void *malloc(size_t size);
/**
* Rellocates a given allocated ptr to a new size of bytes in memory.
* If ptr is NULL it will allocate new memory.
*
* @param ptr - the pointer to reallocate
* @param size - the amount of bytes to reallocate to
* @returns the address allocated or NULL on failure
*/
extern void *realloc(void *ptr, size_t size);
/**
* Frees an allocated pointer in memory
*
* @param ptr - the pointer to free
*/
extern void free(void *ptr);
/**
* Allocate a single page of memory
*
* @returns the address allocated or NULL on failure
*/
extern void *alloc_page(void);
/**
* Allocate size_t amount of contiguous virtual pages
*
* @param count - the number of pages to allocate
* @returns the address allocated or NULL on failure
*/
extern void *alloc_pages(size_t count);
/**
* Free allocated pages.
*
* @param ptr - the pointer provided by alloc_page or alloc_pages
*/
extern void free_pages(void *ptr);
/**
* Abort the current process with a given message.
*
* @param format - the format string
* @param ... - variable args for the format
*/
__attribute__((noreturn)) extern void panic(const char *format, ...);
#endif /* stlib.h */

View file

@ -0,0 +1,64 @@
#pragma once
#include <stdint.h>
static inline uint8_t inb(uint16_t port)
{
uint8_t ret;
__asm__ volatile("inb %1, %0" : "=a"(ret) : "Nd"(port));
return ret;
}
static inline void outb(uint16_t port, uint8_t val)
{
__asm__ volatile("outb %0, %1" : : "a"(val), "Nd"(port));
}
static inline uint16_t inw(uint16_t port)
{
uint16_t ret;
__asm__ volatile("inw %1, %0" : "=a"(ret) : "Nd"(port));
return ret;
}
static inline void outw(uint16_t port, uint16_t val)
{
__asm__ volatile("outw %0, %1" : : "a"(val), "Nd"(port));
}
static inline uint32_t inl(uint16_t port)
{
uint32_t ret;
__asm__ volatile("inl %1, %0" : "=a"(ret) : "Nd"(port));
return ret;
}
static inline void outl(uint16_t port, uint32_t val)
{
__asm__ volatile("outl %0, %1" : : "a"(val), "Nd"(port));
}
static inline void io_wait(void)
{
outb(0x80, 0);
}
static inline void sti(void)
{
__asm__ volatile("sti");
}
static inline void cli(void)
{
__asm__ volatile("cli");
}
static inline void int_wait(void)
{
__asm__ volatile("sti; hlt");
}
static inline void halt(void)
{
__asm__ volatile("cli; hlt");
}

View file

@ -1 +1,75 @@
#include <memory/mapping.h>
/**
* @file memory.h
*
* @author Freya Murphy <freya@freyacat.org>
*
* Kernel memory functions
*/
#ifndef _MEMORY_H
#define _MEMORY_H
#include <stdint.h>
#include <stddef.h>
#define MMAP_MAX_ENTRY 64
struct memory_segment {
uint64_t addr;
uint64_t len;
};
struct memory_map {
uint32_t entry_count;
struct memory_segment entries[MMAP_MAX_ENTRY];
};
/**
* Initalize system memory allocator
*/
void memory_init(struct memory_map *map);
/**
* @returns how much memory the system has
*/
uint64_t memory_total(void);
/**
* @returns how much memory is free
*/
uint64_t memory_free(void);
/**
* @returns how much memory is used
*/
uint64_t memory_used(void);
/**
* Allocates at least len bytes of memory starting at
* physical address addr. Returned address can be
* any virtural address.
*
* @param addr - the physical address to map
* @param len - the minimum length to map
* @param writable - if this memory should be writable
* @param user - if this memory should be user writable
*/
void *mapaddr(void *addr, size_t len);
/**
* Unmaps mapped address from the mmap function
* @param addr - the address returned from mmap
* @param len - the length allocated
*/
void unmapaddr(void *addr);
/**
* Attemps to load a mapped but not yet allocated page.
*
* @param virt_addr - the virtural address from either page allocation function
*
* @returns 0 on success and a negative error code on failure.
*/
int load_page(void *virt_addr);
#endif /* memory.h */

View file

@ -1,20 +0,0 @@
/**
* @file memory.h
*
* @author Freya Murphy
*
* Kernel memory declarations
*/
#ifndef _MEMORY_MAPPING_H
#define _MEMORY_MAPPING_H
// paging
#define PAGE_SIZE 4096
#define PAGE_PRESENT 0x1
#define PAGE_WRITE 0x2
#define PAGE_USER 0x4
#define PAGE_HUGE 0x80
#define PAGE_GLOBAL 0x100
#endif

9
kernel/io/io.c Normal file
View file

@ -0,0 +1,9 @@
#include <lib.h>
#include <stdio.h>
void fputc(FILE *stream, char c) {
(void) stream;
(void) c;
// FIXME: !!!
}

16
kernel/io/panic.c Normal file
View file

@ -0,0 +1,16 @@
#include <lib.h>
#include <stdarg.h>
#include <comus/asm.h>
__attribute__((noreturn))
void panic(const char *format, ...) {
cli();
va_list list;
va_start(list, format);
printf("\n\n!!! PANIC !!!\n");
vprintf(format, list);
printf("\n\n");
while (1)
halt();
}

View file

@ -1,5 +1,8 @@
#include <comus/memory.h>
void main(void)
{
void main(void) {
while (1) ;
while (1)
;
}

View file

@ -4,6 +4,8 @@ SECTIONS
{
. = 1M;
kernel_start = .;
. = ALIGN(0x1000);
.boot : {
@ -40,4 +42,6 @@ SECTIONS
*(.eh_frame .eh_frame_hdr)
*(.note.GNU-stack .note.gnu.property .comment)
}
kernel_end = .;
}

15
kernel/memory/memory.c Normal file
View file

@ -0,0 +1,15 @@
#include <comus/memory.h>
#include <comus/asm.h>
#include "paging.h"
#include "virtalloc.h"
#include "physalloc.h"
void memory_init(struct memory_map *map)
{
cli();
paging_init();
virtaddr_init();
physalloc_init(map);
sti();
}

591
kernel/memory/paging.c Normal file
View file

@ -0,0 +1,591 @@
#include <lib.h>
#include <comus/memory.h>
#include "virtalloc.h"
#include "physalloc.h"
#include "paging.h"
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
uint64_t flags : 6;
uint64_t : 6;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY POINTER TABLE ENTRY
struct pdpte {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY ENTRY
struct pde {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE TABLE ENTRY
struct pte {
uint64_t flags : 9;
uint64_t loaded : 1;
uint64_t : 2;
uint64_t address : 40;
uint64_t : 7;
uint64_t protection_key : 4;
uint64_t execute_disable : 1;
};
// bss segment, can write to
extern volatile struct pml4e kernel_pml4[512];
extern volatile struct pdpte kernel_pdpt_0[512];
extern volatile struct pde kernel_pd_0[512];
extern volatile struct pte bootstrap_pt[512];
extern volatile struct pte
paging_pt[512]; // paging_pt should NEVER be outside of this file, NEVER i say
// paged address to read page tables
// the structures are not gurenteed to be ident mapped
// map them here with map_<type>(phys_addr) before useing structures
void volatile *addr_mapped = (void *)(uintptr_t)0x204000;
static volatile struct pml4e *pml4_mapped = (void *)(uintptr_t)0x200000;
static volatile struct pdpte *pdpt_mapped = (void *)(uintptr_t)0x201000;
static volatile struct pde *pd_mapped = (void *)(uintptr_t)0x202000;
static volatile struct pte *pt_mapped = (void *)(uintptr_t)0x203000;
static inline void invlpg(volatile void *addr)
{
__asm__ volatile("invlpg (%0)" ::"r"(addr) : "memory");
}
static void load_addr(volatile void *phys_addr)
{
static volatile struct pte *pt = &paging_pt[4];
pt->address = (uint64_t)phys_addr >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(addr_mapped);
}
static void load_pml4(volatile void *phys)
{
static volatile struct pte *pt = &paging_pt[0];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pml4_mapped);
}
static void load_pdpt(volatile void *phys)
{
static volatile struct pte *pt = &paging_pt[1];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pdpt_mapped);
}
static void load_pd(volatile void *phys)
{
static volatile struct pte *pt = &paging_pt[2];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pd_mapped);
}
static void load_pt(volatile void *phys)
{
static volatile struct pte *pt = &paging_pt[3];
if ((uint64_t)phys >> 12 == pt->address)
return;
pt->address = (uint64_t)phys >> 12;
pt->flags = F_PRESENT | F_WRITEABLE;
invlpg(pt_mapped);
}
#define PAG_SUCCESS 0
#define PAG_CANNOT_ALLOC 1
#define PAG_NOT_PRESENT 2
static int select_pdpt(void *virt, unsigned int flags,
volatile struct pml4e *root, volatile struct pdpte **res,
bool create)
{
load_pml4(root);
uint64_t offset = (uint64_t)virt >> 39;
volatile struct pml4e *pml4e = &pml4_mapped[offset];
if (!(pml4e->flags & F_PRESENT)) {
if (!create) {
return PAG_NOT_PRESENT;
}
void *new_page = alloc_phys_page();
if (new_page == NULL) {
return PAG_CANNOT_ALLOC;
}
load_addr(new_page);
memsetv(addr_mapped, 0, PAGE_SIZE);
pml4e->address = ((uint64_t)new_page) >> 12;
pml4e->flags = F_PRESENT;
}
if (flags)
pml4e->flags = F_PRESENT | flags;
*res = (volatile struct pdpte *)(uintptr_t)(pml4e->address << 12);
return PAG_SUCCESS;
}
static int select_pd(void *virt, unsigned int flags,
volatile struct pdpte *pdpt, volatile struct pde **res,
bool create)
{
load_pdpt(pdpt);
uint64_t offset = ((uint64_t)virt >> 30) & 0x1ff;
volatile struct pdpte *pdpte = &pdpt_mapped[offset];
if (!(pdpte->flags & F_PRESENT)) {
if (!create) {
return PAG_NOT_PRESENT;
}
void *new_page = alloc_phys_page();
if (new_page == NULL) {
return PAG_CANNOT_ALLOC;
}
load_addr(new_page);
memsetv(addr_mapped, 0, PAGE_SIZE);
pdpte->address = ((uint64_t)new_page) >> 12;
pdpte->flags = F_PRESENT;
}
if (flags)
pdpte->flags = F_PRESENT | flags;
*res = (volatile struct pde *)(uintptr_t)(pdpte->address << 12);
return PAG_SUCCESS;
}
static int select_pt(void *virt, unsigned int flags, volatile struct pde *pd,
volatile struct pte **res, bool create)
{
load_pd(pd);
uint64_t offset = ((uint64_t)virt >> 21) & 0x1ff;
volatile struct pde *pde = &pd_mapped[offset];
if (!(pde->flags & F_PRESENT)) {
if (!create) {
return PAG_NOT_PRESENT;
}
void *new_page = alloc_phys_page();
if (new_page == NULL) {
return PAG_CANNOT_ALLOC;
}
load_addr(new_page);
memsetv(addr_mapped, 0, PAGE_SIZE);
pde->address = ((uint64_t)new_page) >> 12;
pde->flags = F_PRESENT;
}
if (flags)
pde->flags = F_PRESENT | flags;
*res = (volatile struct pte *)(uintptr_t)(pde->address << 12);
return PAG_SUCCESS;
}
static void select_page(void *virt, volatile struct pte *pt,
volatile struct pte **res)
{
load_pt(pt);
uint64_t offset = ((uint64_t)virt >> 12) & 0x1ff;
volatile struct pte *page = &pt_mapped[offset];
*res = page;
return;
}
static inline void try_unmap_pml4(void)
{
for (int i = 0; i < 512; i++) {
if (pml4_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pml4_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pml4_mapped[i].address << 12);
free_phys_page(addr);
}
}
}
static inline void try_unmap_pdpt(void)
{
for (int i = 0; i < 512; i++) {
if (pdpt_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pdpt_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pdpt_mapped[i].address << 12);
free_phys_page(addr);
}
}
try_unmap_pml4();
}
static inline void try_unmap_pd(void)
{
for (int i = 0; i < 512; i++) {
if (pd_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pd_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pd_mapped[i].address << 12);
free_phys_page(addr);
}
}
try_unmap_pdpt();
}
static inline void try_unmap_pt(void)
{
for (int i = 0; i < 512; i++) {
if (pt_mapped[i].flags & F_PRESENT)
return;
}
for (int i = 0; i < 512; i++) {
if (pt_mapped[i].address) {
void *addr = (void *)(uintptr_t)(pt_mapped[i].address << 12);
free_phys_page(addr);
}
}
try_unmap_pd();
}
static void unmap_page(volatile struct pml4e *root, void *virt)
{
volatile struct pdpte *pdpt;
volatile struct pde *pd;
volatile struct pte *pt;
volatile struct pte *page;
unsigned int df = 0;
if (select_pdpt(virt, df, root, &pdpt, false))
return;
if (select_pd(virt, df, pdpt, &pd, false))
return;
if (select_pt(virt, df, pd, &pt, false))
return;
select_page(virt, pt, &page);
page->address = 0;
page->flags = 0;
page->loaded = 0;
try_unmap_pt();
invlpg(virt);
return;
}
static void unmap_pages(volatile struct pml4e *root, void *virt_start,
long page_count)
{
uint64_t pml4_o = -1, pdpt_o = -1, pd_o = -1;
uint64_t pml4_n, pdpt_n, pd_n;
volatile struct pdpte *pdpt = NULL;
volatile struct pde *pd = NULL;
volatile struct pte *pt = NULL;
volatile struct pte *page = NULL;
unsigned int df = 0;
void *virt;
for (long i = 0; i < page_count; i++) {
virt = (char *)virt_start + (i * PAGE_SIZE);
pml4_n = (uint64_t)virt >> 39;
pdpt_n = ((uint64_t)virt >> 30) & 0x1ff;
pd_n = ((uint64_t)virt >> 21) & 0x1ff;
if (pdpt == NULL || pml4_o != pml4_n) {
if (select_pdpt(virt, df, root, &pdpt, false))
continue;
pml4_o = pml4_n;
}
if (pd == NULL || pdpt_o != pdpt_n) {
if (select_pd(virt, df, pdpt, &pd, false))
continue;
pdpt_o = pdpt_n;
}
if (pt == NULL || pd_o != pd_n) {
if (pt) {
try_unmap_pt();
}
if (select_pt(virt, df, pd, &pt, false))
continue;
pd_o = pd_n;
}
select_page(virt, pt, &page);
page->address = 0;
page->flags = 0;
page->loaded = 0;
}
if (pt != NULL)
try_unmap_pt();
return;
}
static volatile struct pte *get_page(volatile struct pml4e *root, void *virt)
{
volatile struct pdpte *pdpt;
volatile struct pde *pd;
volatile struct pte *pt;
volatile struct pte *page;
unsigned int df = 0;
if (select_pdpt(virt, df, root, &pdpt, false))
return NULL;
if (select_pd(virt, df, pdpt, &pd, false))
return NULL;
if (select_pt(virt, df, pd, &pt, false))
return NULL;
select_page(virt, pt, &page);
return page;
}
static int map_page(volatile struct pml4e *root, void *virt, void *phys,
unsigned int flags)
{
volatile struct pdpte *pdpt;
volatile struct pde *pd;
volatile struct pte *pt;
volatile struct pte *page;
unsigned int df = F_WRITEABLE;
if (phys == NULL)
df = 0; // alloc page on fault
if (select_pdpt(virt, df, root, &pdpt, true))
return 1;
if (select_pd(virt, df, pdpt, &pd, true))
return 1;
if (select_pt(virt, df, pd, &pt, true))
return 1;
select_page(virt, pt, &page);
if (phys) {
page->flags = F_PRESENT | flags;
page->address = (uint64_t)phys >> 12;
page->loaded = 1;
invlpg(virt);
} else {
page->flags = flags;
page->address = 0;
page->loaded = 0;
}
return 0;
}
static int map_pages(volatile struct pml4e *root, void *virt_start,
void *phys_start, unsigned int flags, long page_count)
{
uint64_t pml4_o = -1, pdpt_o = -1, pd_o = -1;
uint64_t pml4_n, pdpt_n, pd_n;
volatile struct pdpte *pdpt = NULL;
volatile struct pde *pd = NULL;
volatile struct pte *pt = NULL;
volatile struct pte *page = NULL;
void *virt, *phys;
unsigned int df = F_WRITEABLE;
if (phys_start == NULL)
df = 0; // alloc page on fault
long i;
for (i = 0; i < page_count; i++) {
virt = (char *)virt_start + (i * PAGE_SIZE);
phys = (char *)phys_start + (i * PAGE_SIZE);
pml4_n = (uint64_t)virt >> 39;
pdpt_n = ((uint64_t)virt >> 30) & 0x1ff;
pd_n = ((uint64_t)virt >> 21) & 0x1ff;
if (pdpt == NULL || pml4_o != pml4_n) {
if (select_pdpt(virt, df, root, &pdpt, true))
goto failed;
pml4_o = pml4_n;
}
if (pd == NULL || pdpt_o != pdpt_n) {
if (select_pd(virt, df, pdpt, &pd, true))
goto failed;
pdpt_o = pdpt_n;
}
if (pt == NULL || pd_o != pd_n) {
if (select_pt(virt, df, pd, &pt, true))
goto failed;
pd_o = pd_n;
}
select_page(virt, pt, &page);
if (phys_start) {
page->flags = F_PRESENT | flags;
page->address = (uint64_t)phys >> 12;
page->loaded = 1;
} else {
page->flags = flags;
page->address = 0;
page->loaded = 0;
}
if (flags & F_GLOBAL)
invlpg(virt);
}
__asm__ volatile("mov %cr3, %rax; mov %rax, %cr3;");
return 0;
failed:
unmap_pages(root, virt, i);
return 1;
}
void paging_init(void)
{
kernel_pml4[0].flags = F_PRESENT | F_WRITEABLE;
kernel_pml4[0].address = (uint64_t)(&kernel_pdpt_0) >> 12;
kernel_pdpt_0[0].flags = F_PRESENT | F_WRITEABLE;
kernel_pdpt_0[0].address = (uint64_t)(&kernel_pd_0) >> 12;
kernel_pd_0[1].flags = F_PRESENT | F_WRITEABLE;
kernel_pd_0[1].address = (uint64_t)(&paging_pt) >> 12;
kernel_pd_0[2].flags = F_PRESENT | F_WRITEABLE;
kernel_pd_0[2].address = (uint64_t)(&bootstrap_pt) >> 12;
memsetv(&paging_pt, 0, 4096);
memsetv(&bootstrap_pt, 0, 4096);
}
static inline void *page_align(void *addr)
{
uintptr_t a = (uintptr_t)addr;
a /= PAGE_SIZE;
a *= PAGE_SIZE;
return (void *)a;
}
void *mapaddr(void *addr, size_t len)
{
void *phys = page_align(addr);
ptrdiff_t error = (char *)addr - (char *)phys;
len += error;
long pages = len / PAGE_SIZE + 1;
void *virt = virtaddr_alloc(pages);
if (virt == NULL) {
return NULL;
}
if (map_pages(kernel_pml4, virt, phys, F_WRITEABLE, pages)) {
virtaddr_free(virt);
return NULL;
}
return (char *)virt + error;
}
void unmapaddr(void *addr)
{
long pages = virtaddr_free(addr);
if (pages < 1)
return;
unmap_pages(kernel_pml4, addr, pages);
}
void *alloc_pages(size_t count)
{
void *virt = virtaddr_alloc(count);
if (virt == NULL)
return NULL;
//void *phys = alloc_phys_pages(count);
//if (phys == NULL) {
// virtaddr_free(virt);
// return NULL;
//}
if (map_pages(kernel_pml4, virt,
//phys,
//F_WRITEABLE,
NULL, F_WRITEABLE, count)) {
virtaddr_free(virt);
return NULL;
}
return virt;
}
void free_page(void *virt)
{
(void)virt;
panic("free_page is not yet implemented");
}
void free_pages(void *virt)
{
long pages = virtaddr_free(virt);
if (pages < 1)
return;
unmap_pages(kernel_pml4, virt, pages);
}
int kload_page(void *virt_addr)
{
volatile struct pte *page = get_page(kernel_pml4, virt_addr);
if (page == NULL)
return -1;
if (page->loaded)
return -1;
void *phys = alloc_phys_page();
if (phys == NULL)
return -2;
page->loaded = 1;
page->address = (uint64_t)phys >> 12;
page->flags |= F_PRESENT;
invlpg(virt_addr);
return 0;
}

24
kernel/memory/paging.h Normal file
View file

@ -0,0 +1,24 @@
/**
* @file paging.h
*
* @author Freya Murphy <freya@freyacat.org>
*
* 64-bit paging functions
*/
#ifndef PAGING_H_
#define PAGING_H_
#define F_PRESENT 0x001
#define F_WRITEABLE 0x002
#define F_UNPRIVILEGED 0x004
#define F_WRITETHROUGH 0x008
#define F_CACHEDISABLE 0x010
#define F_ACCESSED 0x020
#define F_DIRTY 0x040
#define F_MEGABYTE 0x080
#define F_GLOBAL 0x100
void paging_init(void);
#endif /* paging.h */

242
kernel/memory/physalloc.c Normal file
View file

@ -0,0 +1,242 @@
#include <lib.h>
#include <comus/memory.h>
#include <comus/asm.h>
#include "physalloc.h"
extern char kernel_start;
extern char kernel_end;
#define kaddr(addr) ((uintptr_t)(&addr))
// between memory_start and kernel_start will be the bitmap
static uintptr_t memory_start = 0;
struct memory_area {
uint64_t len;
uintptr_t addr;
};
static uint64_t *bitmap;
static uint64_t total_memory;
static uint64_t free_memory;
static uint64_t page_count;
static uint64_t segment_count;
struct memory_area *page_start;
static int n_pages(const struct memory_area *m)
{
return m->len / PAGE_SIZE;
}
static void *page_at(int i)
{
int cur_page = 0;
for (uint64_t idx = 0; idx < segment_count; idx++) {
const struct memory_area *m = page_start;
int pages = n_pages(m);
if (i - cur_page < pages) {
return (void *)(m->addr + (PAGE_SIZE * (i - cur_page)));
}
cur_page += pages;
}
return NULL;
}
static long page_idx(void *page)
{
uintptr_t addr = (uintptr_t)page;
int cur_page = 0;
for (uint64_t idx = 0; idx < segment_count; idx++) {
const struct memory_area *m = page_start;
if ((uintptr_t)m + m->len > addr) {
return cur_page + ((addr - m->addr) / PAGE_SIZE);
}
cur_page += n_pages(m);
}
return -1;
}
static inline bool bitmap_get(int i)
{
return (bitmap[i / 64] >> i % 64) & 1;
}
static inline void bitmap_set(int i, bool v)
{
if (v)
free_memory -= PAGE_SIZE;
else
free_memory += PAGE_SIZE;
int idx = i / 64;
bitmap[idx] &= ~(1 << i % 64);
bitmap[idx] |= (v << i % 64);
}
void *alloc_phys_page(void)
{
return alloc_phys_pages(1);
}
void *alloc_phys_pages(int pages)
{
if (pages < 1)
return NULL;
int n_contiguous = 0;
int free_region_start = 0;
for (uint64_t i = 0; i < page_count; i++) {
bool free = !bitmap_get(i);
if (free) {
if (n_contiguous == 0)
free_region_start = i;
n_contiguous++;
if (n_contiguous == pages) {
for (int j = 0; j < pages; j++)
bitmap_set(free_region_start + j, true);
return page_at(free_region_start);
}
} else
n_contiguous = 0;
}
return NULL;
}
void free_phys_page(void *ptr)
{
free_phys_pages(ptr, 1);
}
void free_phys_pages(void *ptr, int pages)
{
long idx = page_idx(ptr);
if (idx == -1)
return;
for (int i = 0; i < pages; i++)
bitmap_set(idx + pages, false);
}
static bool segment_invalid(const struct memory_segment *segment)
{
if (segment->addr < kaddr(kernel_start))
return true;
if (segment->addr + segment->len < memory_start)
return true;
if (segment->addr + segment->len < kaddr(kernel_start))
return true;
return false;
}
static struct memory_area segment_to_area(const struct memory_segment *segment)
{
uint64_t length = segment->len;
uintptr_t addr = segment->addr;
uintptr_t start;
if (memory_start)
start = memory_start;
else
start = kaddr(kernel_end);
if (segment->addr < start) {
addr = start;
length -= addr - segment->addr;
} else {
addr = segment->addr;
}
struct memory_area temp;
temp.len = length;
temp.addr = addr;
return temp;
}
static uintptr_t page_align(uintptr_t ptr)
{
return (ptr + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
}
void physalloc_init(struct memory_map *map)
{
bitmap = NULL;
total_memory = 0;
free_memory = 0;
page_count = 0;
page_start = NULL;
segment_count = 0;
for (uint32_t i = 0; i < map->entry_count; i++) {
struct memory_segment *segment = &map->entries[i];
if (segment_invalid(segment))
continue;
struct memory_area temp = segment_to_area(segment);
page_count += n_pages(&temp);
segment_count++;
}
long bitmap_pages = (page_count / 64 / PAGE_SIZE) + 1;
long bitmap_size = bitmap_pages * PAGE_SIZE;
bitmap = (uint64_t *)page_align(kaddr(kernel_end));
long page_area_size = segment_count * sizeof(struct memory_area);
char *page_area_addr = (char *)bitmap + bitmap_size;
page_area_addr = (char *)page_align((uintptr_t)page_area_addr);
memory_start = page_align((uintptr_t)page_area_addr + page_area_size);
bitmap = mapaddr(bitmap, bitmap_size);
memset(bitmap, 0, bitmap_size);
page_area_addr = mapaddr(page_area_addr, page_area_size);
memset(page_area_addr, 0, page_area_size);
page_start = (struct memory_area *)page_area_addr;
struct memory_area *area = page_start;
for (uint32_t i = 0; i < map->entry_count; i++) {
struct memory_segment *segment = &map->entries[i];
if (segment_invalid(segment))
continue;
struct memory_area temp = segment_to_area(segment);
*area = temp;
area++;
}
total_memory = page_count * PAGE_SIZE;
page_count -= bitmap_pages;
free_memory = page_count * PAGE_SIZE;
char buf[20];
printf("\nMEMORY USAGE\n");
printf("mem total: %s\n", btoa(memory_total(), buf));
printf("mem free: %s\n", btoa(memory_free(), buf));
printf("mem used: %s\n\n", btoa(memory_used(), buf));
}
void *alloc_page(void)
{
return alloc_pages(1);
}
uint64_t memory_total(void)
{
return total_memory;
}
uint64_t memory_free(void)
{
return free_memory;
}
uint64_t memory_used(void)
{
return total_memory - free_memory;
}

44
kernel/memory/physalloc.h Normal file
View file

@ -0,0 +1,44 @@
/**
* @file physalloc.h
*
* @author Freya Murphy <freya@freyacat.org>
*
* Physical page allocator functions
*/
#ifndef PHYSALLOC_H_
#define PHYSALLOC_H_
#include <comus/memory.h>
/**
* Initalize the physical page allocator
*/
void physalloc_init(struct memory_map *map);
/**
* Allocates a single physical page in memory
* @preturns the physical address of the page
*/
void *alloc_phys_page(void);
/**
* Allocates count physical pages in memory
* @returns the physical address of the first page
*/
void *alloc_phys_pages(int count);
/**
* Frees a single physical page in memory
* @param ptr - the physical address of the page
*/
void free_phys_page(void *ptr);
/**
* Frees count physical pages in memory
* @param ptr - the physical address of the first page
* @param count - the number of pages in the list
*/
void free_phys_pages(void *ptr, int count);
#endif /* physalloc.h */

200
kernel/memory/virtalloc.c Normal file
View file

@ -0,0 +1,200 @@
#include <lib.h>
#include <comus/memory.h>
#include "virtalloc.h"
struct addr_node {
uintptr_t start;
uintptr_t end;
struct addr_node *next;
struct addr_node *prev;
uint8_t is_alloc; // if node is storing allocated data
uint8_t is_used; // if node is in use by virtalloc
};
#define BSS_NODES 64
static struct addr_node bootstrap_nodes[BSS_NODES];
static struct addr_node *alloc_nodes = NULL;
static size_t free_node_start = 0;
static size_t alloc_node_count = 0;
static size_t used_node_count = 0;
static bool is_allocating = false;
static struct addr_node *start_node = NULL;
static struct addr_node *get_node_idx(int idx)
{
if (idx < BSS_NODES) {
return &bootstrap_nodes[idx];
} else {
return &alloc_nodes[idx - BSS_NODES];
}
}
static void update_node_ptrs(struct addr_node *old, struct addr_node *new,
int old_len, int new_len)
{
if (old == NULL)
return;
int idx = 0;
for (int i = 0; i < old_len; i++) {
struct addr_node *o = &old[i];
if (o && !o->is_used)
continue;
struct addr_node *n = &new[idx++];
*n = *o;
if (n->prev != NULL)
n->prev->next = n;
if (n->next != NULL)
n->next->prev = n;
}
for (int i = idx; i < new_len; i++) {
struct addr_node *n = &new[idx++];
n->is_used = false;
}
}
static struct addr_node *get_node(void)
{
size_t count = BSS_NODES + alloc_node_count;
if (!is_allocating && used_node_count + 16 >= count) {
is_allocating = true;
int new_alloc = alloc_node_count * 2;
if (new_alloc < 8)
new_alloc = 8;
struct addr_node *new_nodes;
new_nodes = malloc(sizeof(struct addr_node) * new_alloc);
if (new_nodes == NULL)
panic("virt addr alloc nodes is null");
update_node_ptrs(alloc_nodes, new_nodes, alloc_node_count, new_alloc);
free(alloc_nodes);
alloc_nodes = new_nodes;
alloc_node_count = new_alloc;
is_allocating = false;
count = BSS_NODES + alloc_node_count;
}
size_t idx = free_node_start;
for (; idx < count; idx++) {
struct addr_node *node = get_node_idx(idx);
if (!node->is_used) {
used_node_count++;
return node;
}
}
panic("could not get virtaddr node");
}
static void free_node(struct addr_node *node)
{
node->is_used = false;
used_node_count--;
}
void virtaddr_init(void)
{
struct addr_node init = {
.start = 0x400000, // third page table
.end = 0x1000000000000, // 48bit memory address max
.next = NULL,
.prev = NULL,
.is_alloc = false,
.is_used = true,
};
memset(bootstrap_nodes, 0, sizeof(bootstrap_nodes));
bootstrap_nodes[0] = init;
start_node = &bootstrap_nodes[0];
}
static void merge_back(struct addr_node *node)
{
while (node->prev) {
if (node->is_alloc != node->prev->is_alloc)
break;
struct addr_node *temp = node->prev;
node->start = temp->start;
node->prev = temp->prev;
if (temp->prev)
temp->prev->next = node;
free_node(temp);
}
if (node->prev == NULL) {
start_node = node;
}
}
static void merge_forward(struct addr_node *node)
{
while (node->next) {
if (node->is_alloc != node->next->is_alloc)
break;
struct addr_node *temp = node->next;
node->end = temp->end;
node->next = temp->next;
if (temp->next)
temp->next->prev = node;
free_node(temp);
}
}
void *virtaddr_alloc(int n_pages)
{
if (n_pages < 1)
return NULL;
long n_length = n_pages * PAGE_SIZE;
struct addr_node *node = start_node;
for (; node != NULL; node = node->next) {
long length = node->end - node->start;
if (node->is_alloc)
continue;
if (length >= n_length) {
struct addr_node *new = get_node();
if (node->prev != NULL) {
node->prev->next = new;
} else {
start_node = new;
}
new->next = node;
new->prev = node->prev;
node->prev = new;
new->start = node->start;
new->end = new->start + n_length;
node->start = new->end;
new->is_alloc = true;
new->is_used = true;
new->next = node;
void *mem = (void *)new->start;
merge_back(new);
merge_forward(new);
return mem;
}
}
return NULL;
}
long virtaddr_free(void *virtaddr)
{
uintptr_t virt = (uintptr_t)virtaddr;
if (virt % PAGE_SIZE)
return -1; // not page aligned, we did not give this out!!!
struct addr_node *node = start_node;
for (; node != NULL; node = node->next) {
if (node->start == virt) {
int length = node->end - node->start;
int pages = length / PAGE_SIZE;
merge_back(node);
merge_forward(node);
return pages;
}
}
return -1;
}

31
kernel/memory/virtalloc.h Normal file
View file

@ -0,0 +1,31 @@
/**
* @file virtalloc.h
*
* @author Freya Murphy <freya@freyacat.org>
*
* Virtural address allocator functions
*/
#ifndef VIRTALLOC_H_
#define VIRTALLOC_H_
/**
* Initalizes the virtual address allocator
*/
void virtaddr_init(void);
/**
* Allocate a virtual address of length x pages
* @param pages - x pages
* @returns virt addr
*/
void *virtaddr_alloc(int pages);
/**
* Free the virtual address from virtaddr_alloc
* @param virtaddr - the addr to free
* @returns number of pages used for virtaddr
*/
long virtaddr_free(void *virtaddr);
#endif /* virtalloc.h */

View file

@ -0,0 +1,83 @@
/**
** @file offsets.h
**
** GENERATED AUTOMATICALLY - DO NOT EDIT
**
** This header file contains C Preprocessor macros which expand
** into the byte offsets needed to reach fields within structs
** used in the baseline system. Should those struct declarations
** change, the Offsets program should be modified (if needed),
** recompiled, and re-run to recreate this file.
*/
#ifndef OFFSETS_H_
#define OFFSETS_H_
// Sizes of basic types
#define SZ_char 1
#define SZ_short 2
#define SZ_int 4
#define SZ_long 4
#define SZ_long_long 8
#define SZ_pointer 4
// Sizes of our types
#define SZ_int8_t 1
#define SZ_uint8_t 1
#define SZ_int16_t 2
#define SZ_uint16_t 2
#define SZ_int32_t 4
#define SZ_uint32_t 4
#define SZ_int64_t 8
#define SZ_uint64_t 8
#define SZ_bool_t 1
// context_t structure
#define SZ_CTX 72
#define CTX_ss 0
#define CTX_gs 4
#define CTX_fs 8
#define CTX_es 12
#define CTX_ds 16
#define CTX_edi 20
#define CTX_esi 24
#define CTX_ebp 28
#define CTX_esp 32
#define CTX_ebx 36
#define CTX_edx 40
#define CTX_ecx 44
#define CTX_eax 48
#define CTX_vector 52
#define CTX_code 56
#define CTX_eip 60
#define CTX_cs 64
#define CTX_eflags 68
// section_t structure
#define SZ_SCT 8
#define SCT_length 0
#define SCT_addr 4
// pcb_t structure
#define SZ_PCB 72
#define PCB_context 0
#define PCB_pdir 4
#define PCB_sects 8
#define PCB_next 40
#define PCB_parent 44
#define PCB_wakeup 48
#define PCB_exit_status 52
#define PCB_pid 56
#define PCB_state 60
#define PCB_priority 64
#define PCB_ticks 68
#endif

211
lib/alloc.c Normal file
View file

@ -0,0 +1,211 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define MAGIC 0xBEEFCAFE
struct page_header {
struct page_header *next;
struct page_header *prev;
size_t
node_number; // all headers on the same page alloc have the same node number (so they can be merged)
size_t
free; // free space after the node (if its the last node in the alloc block)
size_t used; // how much space this allocation is using
uint64_t magic;
};
static const size_t header_len = sizeof(struct page_header);
static struct page_header *start_header = NULL;
static struct page_header *end_header = NULL;
static struct page_header *get_header(void *ptr)
{
struct page_header *header =
(struct page_header *)((uintptr_t)ptr - header_len);
// PERF: do we want to make sure this pointer is paged
// before reading it???
if (header->magic != MAGIC) {
return NULL; // invalid pointer
}
return header;
}
static void *alloc_new(size_t size)
{
size_t pages = ((size + header_len) / PAGE_SIZE) + 1;
void *addr = alloc_pages(pages);
void *mem = (char *)addr + header_len;
size_t total = pages * PAGE_SIZE;
size_t free = total - (size + header_len);
if (addr == NULL) {
return NULL;
}
size_t node;
if (end_header != NULL) {
node = end_header->node_number + 1;
} else {
node = 0;
}
struct page_header *header = addr;
header->magic = 0xBEEFCAFE;
header->used = size;
header->free = free;
header->prev = end_header;
header->next = NULL;
header->node_number = node;
if (start_header == NULL) {
start_header = header;
}
if (end_header != NULL) {
end_header->next = header;
} else {
end_header = header;
}
return mem;
}
static void *alloc_block(size_t size, struct page_header *block)
{
struct page_header *header =
(struct page_header *)((char *)block + block->used + header_len);
size_t free = block->free - (size + header_len);
block->free = 0;
header->magic = MAGIC;
header->used = size;
header->free = free;
header->prev = block;
header->next = block->next;
block->next = header;
header->node_number = block->node_number;
void *mem = (char *)header + header_len;
return mem;
}
void *malloc(size_t size)
{
struct page_header *header = start_header;
for (; header != NULL; header = header->next) {
size_t free = header->free;
if (free < header_len)
continue;
if (size <=
(free - header_len)) { // we must be able to fit data + header
break;
}
}
if (header != NULL) {
return alloc_block(size, header);
} else {
return alloc_new(size);
}
}
void *realloc(void *src, size_t dst_len)
{
struct page_header *header;
size_t src_len;
void *dst;
// realloc of 0 means free pointer
if (dst_len == 0) {
free(src);
return NULL;
}
// NULL src means allocate ptr
if (src == NULL) {
dst = malloc(dst_len);
return dst;
}
header = get_header(src);
if (header == NULL)
return NULL;
src_len = header->used;
if (src_len == 0)
return NULL;
dst = malloc(dst_len);
if (dst == NULL)
return NULL; // allocation failed
if (dst_len < src_len)
src_len = dst_len;
memcpy(dst, src, src_len);
free(src);
return dst;
}
void free(void *ptr)
{
struct page_header *header;
if (ptr == NULL)
return;
header = get_header(ptr);
if (header == NULL)
return;
header->free += header->used;
header->used = 0;
struct page_header *neighbor;
// merge left
for (neighbor = header->prev; neighbor != NULL; neighbor = neighbor->prev) {
if (neighbor->node_number != header->node_number)
break;
if (neighbor->used && header->used)
break;
neighbor->free += header->free + header_len;
neighbor->next = header->next;
header = neighbor;
}
// merge right
for (neighbor = header->next; neighbor != NULL; neighbor = neighbor->next) {
if (neighbor->node_number != header->node_number)
break;
if (neighbor->used)
break;
header->free += neighbor->free + header_len;
header->next = neighbor->next;
}
if ((header->next == NULL ||
header->next->node_number != header->node_number) &&
(header->prev == NULL ||
header->prev->node_number != header->node_number) &&
header->used == 0) {
if (header->next)
header->next->prev = header->prev;
if (header->prev)
header->prev->next = header->next;
free_pages(header);
}
}