memory changes

This commit is contained in:
Freya Murphy 2024-01-28 19:22:09 -05:00
parent cfdc91ad07
commit 6957948f3a
Signed by: freya
GPG key ID: 744AB800E383AE52
16 changed files with 943 additions and 215 deletions

View file

@ -9,7 +9,7 @@ CC=cc
LD=ld
CFLAGS=-std=c2x -ffreestanding -fno-stack-protector -g -Wall -Wextra -pedantic -lgcc -isystem $(INCLUDE_DIR)
CFLAGS+= -DPAGE_SIZE=4096
C_SRC=$(shell find $(SRC_DIR) -type f -name "*.c")
C_OBJ=$(patsubst %.c,$(BUILD_DIR)/%.o,$(C_SRC))
@ -46,5 +46,5 @@ $(BUILD_DIR)/$(ISO_NAME): $(BUILD_DIR)/$(K_BIN_NAME) grub.cfg
cp $(BUILD_DIR)/$(K_BIN_NAME) $(BUILD_DIR)/iso/boot
grub-mkrescue -o $(BUILD_DIR)/$(ISO_NAME) $(BUILD_DIR)/iso
run:
run: all
qemu-system-x86_64 $(BUILD_DIR)/$(ISO_NAME) -serial stdio

107
include/memory.h Normal file
View file

@ -0,0 +1,107 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
struct memory_segment {
uint64_t addr;
uint64_t len;
uint32_t type;
uint32_t reserved;
} __attribute__((packed));
struct memory_map {
uint32_t size;
uint32_t version;
struct memory_segment entries[];
} __attribute__((packed));
/**
* Initalize system memory allocator
*/
extern void memory_init(struct memory_map *map);
/**
* Disabled cpu interupts to not interfere with
* current memory allocations.
*/
extern void memory_lock(void);
/**
* Reenabled cpu interupts
*/
extern void memory_unlock(void);
/**
* @returns how much memory the system has
*/
extern uint64_t memory_total(void);
/**
* @returns how much memory is free
*/
extern uint64_t memory_free(void);
/**
* @returns how much memory is used
*/
extern uint64_t memory_used(void);
/**
* Allocates a single page in memory.
* @returns the page if allocated or NULL on failure
*/
extern void *alloc_page(void);
/**
* Allocats count pages in memory
* @param count - the number of continious pages to allocate
* @returns the pages if allocated or NULL on failure
*/
extern void *alloc_pages(int count);
/**
* Frees a single page in memory
* @param page - the pointer to the page
*/
extern void free_page(void *page);
/**
* Allocates at least len bytes of memory starting at
* physical address addr. Returned address can be
* any virtural address.
* @param addr - the physical address to map
* @param len - the minimum length to map
* @param writable - if this memory should be writable
* @param user - if this memory should be user writable
*/
extern void *mmap(void *addr, size_t len);
/**
* Unmaps mapped address from the mmap function
* @param addr - the address returned from mmap
* @param len - the length allocated
*/
extern void unmap(void *addr);
/**
* Allocates size_t bytes in memory
* @param size - the amount of bytes to allocate
* @retruns the address allocated or NULL on failure
*/
extern void *malloc(size_t size);
/**
* Reallocates a given allocated ptr to a new size of bytes in memory.
* If ptr is NULL it will allocate new memory.
* @param ptr - the pointer to reallocate
* @param size - the amount of bytes to set the pointer to
* @returns the address allocated or NULL on failure
*/
extern void *realloc(void *ptr, size_t size);
/**
* Frees a allocated pointer in memory
* @param ptr - the pointer to free
*/
extern void free(void *ptr);

View file

@ -0,0 +1,30 @@
#pragma once
#ifndef MEMORY_INTERNAL
#error "Do not include headers from <memory/___.h>, only use <memory.h>"
#endif
/**
* Allocates a single physical page in memory
* @preturns the physical address of the page
*/
extern void *alloc_phys_page(void);
/**
* Allocates count physical pages in memory
* @returns the physical address of the first page
*/
extern void *alloc_phys_pages(int count);
/**
* Frees a single physical page in memory
* @param ptr - the physical address of the page
*/
extern void free_phys_page(void *ptr);
/**
* Frees count physical pages in memory
* @param ptr - the physical address of the first page
* @param count - the number of pages in the list
*/
extern void free_phys_pages(void *ptr, int count);

View file

@ -0,0 +1,24 @@
#pragma once
#ifndef MEMORY_INTERNAL
#error "Do not include headers from <memory/___.h>, only use <memory.h>"
#endif
/**
* Initalizes the virtual address allocator
*/
void virtaddr_init(void);
/**
* Allocate a virtual address of length x pages
* @param pages - x pages
* @returns virt addr
*/
void *virtaddr_alloc(int pages);
/**
* Free the virtual address from virtaddr_alloc
* @param virtaddr - the addr to free
* @returns number of pages used for virtaddr
*/
long virtaddr_free(void *virtaddr);

7
include/shim.h Normal file
View file

@ -0,0 +1,7 @@
#pragma once
#include <memory.h>
struct boot_info {
struct memory_map *map;
};

3
include/term.h Normal file
View file

@ -0,0 +1,3 @@
#pragma once

View file

@ -1,4 +1,9 @@
global start
global pml4_list
global init_pdpt
global init_pd
global init_pt
global paging_pt
extern kmain
extern amd64_shim
bits 32
@ -19,6 +24,18 @@ dd 8
mb_end:
section .bss
align 4096
pml4_list: ; reserve memory for initial 512 pml4 entires
resb 4096
init_pdpt: ; reserve memory for initial 512 pdpt entires
resb 4096
init_pd: ; reserve memory for initial 512 pd entries
resb 4096
init_pt: ; reserve memory for initial 512 pt entries
resb 4096
paging_pt: ; reserve memory for 512 paging reserved pt entires
resb 4096
align 16
stack_start:
resb 16384
@ -76,7 +93,7 @@ start:
mov ecx, 4096
rep stosd
mov edi, cr3
; FIXME: Update boot.S to point base of paging to pml4e_list, see above
mov DWORD [edi], 0x2003 ; Set the uint32_t at the destination index to 0x2003.
add edi, 0x1000 ; Add 0x1000 to the destination index.
mov DWORD [edi], 0x3003 ; Set the uint32_t at the destination index to 0x3003.

View file

@ -3,8 +3,12 @@
#include <stdint.h>
#include <stddef.h>
#define PREFERRED_VY 4096
#define PREFERRED_B 32
#define INDEX 0x1CE
#define DATA 0x1CF
#define FB_ADDR 0xE0000000
#define PREFERRED_VY 4096
#define PREFERRED_B 32
uint16_t fb_res_x = 0;
uint16_t fb_res_y = 0;
@ -14,36 +18,36 @@ uint8_t *fb_buffer = NULL;
int fb_init(uint16_t res_x, uint16_t res_y) {
outw(0x1CE, 0x00);
uint16_t i = inw(0x1CF);
outw(INDEX, 0x00);
uint16_t i = inw(DATA);
if (i < 0xB0C0 || i > 0xB0C6) {
return -1;
}
outw(0x1CF, 0xB0C4);
i = inw(0x1CF);
outw(DATA, 0xB0C4);
i = inw(DATA);
/* Disable VBE */
outw(0x1CE, 0x04);
outw(0x1CF, 0x00);
outw(INDEX, 0x04);
outw(DATA, 0x00);
/* Set X resolution to 1024 */
outw(0x1CE, 0x01);
outw(0x1CF, res_x);
outw(INDEX, 0x01);
outw(DATA, res_x);
/* Set Y resolution to 768 */
outw(0x1CE, 0x02);
outw(0x1CF, res_y);
outw(INDEX, 0x02);
outw(DATA, res_y);
/* Set bpp to 32 */
outw(0x1CE, 0x03);
outw(0x1CF, PREFERRED_B);
outw(INDEX, 0x03);
outw(DATA, PREFERRED_B);
/* Set Virtual Height to stuff */
outw(0x1CE, 0x07);
outw(0x1CF, PREFERRED_VY);
outw(INDEX, 0x07);
outw(DATA, PREFERRED_VY);
/* Re-enable VBE */
outw(0x1CE, 0x04);
outw(0x1CF, 0x41);
outw(INDEX, 0x04);
outw(DATA, 0x41);
uint32_t * text_vid_mem = (uint32_t *)0xA0000;
text_vid_mem[0] = 0xA5ADFACE;
for (uintptr_t fb_offset = 0xE0000000; fb_offset < 0xFF000000; fb_offset += 0x01000000) {
for (uintptr_t fb_offset = FB_ADDR; fb_offset < 0xFF000000; fb_offset += 0x01000000) {
/* Enable the higher memory */
for (uintptr_t i = fb_offset; i < fb_offset; i += 0x1000) {
// todo ident map fb

View file

View file

@ -2,6 +2,8 @@ ENTRY(start)
SECTIONS {
. = 1M;
kernel_start = .;
.boot BLOCK(4K) : ALIGN(4K)
{
@ -22,4 +24,6 @@ SECTIONS {
{
*(.bss)
}
kernel_end = .;
}

View file

@ -1,5 +1,6 @@
#pragma once
#include "memory.h"
#include <stdint.h>
#define CMDLINE_MAX 32
@ -10,12 +11,14 @@ struct mboot_tag {
uint32_t size;
union {
char cmdline[CMDLINE_MAX + 1];
struct memory_map *memory_map;
void *rootsdp;
} data;
};
enum mboot_tag_type {
MBOOT_CMDLINE = 0,
MBOOT_MEMORYMAP = 6,
MBOOT_XSDP = 14
};

320
src/arch/amd64/paging.c Normal file
View file

@ -0,0 +1,320 @@
#include <cpuid.h>
#include <stdint.h>
#include <lib.h>
#include <memory.h>
#define MEMORY_INTERNAL
#include <memory/physalloc.h>
#include <memory/virtalloc.h>
#include "bindings.h"
#define F_PRESENT 0x001
#define F_WRITEABLE 0x002
#define F_UNPRIVILEGED 0x004
#define F_WRITETHROUGH 0x008
#define F_CACHEDISABLE 0x010
#define F_ACCESSED 0x020
#define F_DIRTY 0x040
#define F_MEGABYTE 0x080
#define F_GLOBAL 0x100
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
uint64_t flags : 6;
uint64_t : 6;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY POINTER TABLE ENTRY
struct pdpte {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY ENTRY
struct pde {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE TABLE ENTRY
struct pte {
uint64_t flags : 9;
uint64_t : 3;
uint64_t address : 40;
uint64_t : 7;
uint64_t protection_key : 4;
uint64_t execute_disable : 1;
};
// bss segment, can write to
extern struct pml4e pml4_list[512];
extern struct pdpte init_pdpt[512];
extern struct pde init_pd[512];
extern struct pte init_pt[512];
extern struct pte paging_pt[512]; // paging_pt should NEVER be outside of this file, NEVER i say
// paged address to read page tables
// the structures are not gurenteed to be ident mapped
// map them here with map_<type>(phys_addr) before useing structures
static struct pdpte *pdpt_mapped = (void *) (uintptr_t) 0x1000;
static struct pdpte *pd_mapped = (void *) (uintptr_t) 0x2000;
static struct pdpte *pt_mapped = (void *) (uintptr_t) 0x3000;
void *addr_mapped = (void *) (uintptr_t) 0x4000;
static inline void invlpg(void *addr) {
__asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
static void map_pdpt(struct pdpte *phys_addr) {
paging_pt[1].address = (((uint64_t)phys_addr) >> 12);
paging_pt[1].flags = F_PRESENT | F_WRITEABLE;
invlpg(pdpt_mapped);
}
static void map_pd(struct pde *phys_addr) {
paging_pt[2].address = (((uint64_t)phys_addr) >> 12);
paging_pt[2].flags = F_PRESENT | F_WRITEABLE;
invlpg(pd_mapped);
}
static void map_pt(struct pte *phys_addr) {
paging_pt[3].address = (((uint64_t)phys_addr) >> 12);
paging_pt[3].flags = F_PRESENT | F_WRITEABLE;
invlpg(pt_mapped);
}
static void map_addr(void *phys_addr) {
paging_pt[4].address = (((uint64_t)phys_addr) >> 12);
paging_pt[4].flags = F_PRESENT | F_WRITEABLE;
invlpg(addr_mapped);
}
//static int get_maxphysaddr() {
// uint32_t eax, ebx, ecx, edx;
// __cpuid(0x80000008, eax, ebx, ecx, edx);
// return eax & 0xFF;
//}
//int find_phys_addr(struct pml4e *pml4, void *virt_addr, void **phys_addr) {
// uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
// uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
// uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
// uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
// uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
//
// if (!(pml4[pml4_offset].flags & F_PRESENT))
// return -1;
// struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12);
// if (!(pdpt[pdpt_offset].flags & F_PRESENT))
// return -1;
// struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12);
// if (!(pd[pd_offset].flags & F_PRESENT))
// return -1;
// struct pte *pt = (struct pte *)(pd[pd_offset].address << 12);
// if (!(pt[pt_offset].flags & F_PRESENT))
// return -1;
// *phys_addr = (void *)((pt[pt_offset].address << 12) + page_offset);
// return 0;
//}
char *curr_alloc = (void *) 0x5000;
int map_page(struct pml4e *pml4, void *virt_addr, void *phys_addr, unsigned int flags) {
uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
//uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
if (!(pml4[pml4_offset].flags & F_PRESENT)) {
void *new_page = alloc_phys_page();
map_addr(new_page);
memset(addr_mapped, 0, 4096);
pml4[pml4_offset].address = ((uint64_t)new_page) >> 12;
}
pml4[pml4_offset].flags = F_PRESENT | flags;
struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12);
map_pdpt(__pdpt);
if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT)) {
void *new_page = alloc_phys_page();
map_addr(new_page);
memset(addr_mapped, 0, 4096);
pdpt_mapped[pdpt_offset].address = ((uint64_t)new_page) >> 12;
}
pdpt_mapped[pdpt_offset].flags = F_PRESENT | flags;
struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12);
map_pd(__pd);
if (!(pd_mapped[pd_offset].flags & F_PRESENT)) {
void *new_page = alloc_phys_page();
map_addr(new_page);
memset(addr_mapped, 0, 4096);
pd_mapped[pd_offset].address = ((uint64_t)new_page) >> 12;
}
pd_mapped[pd_offset].flags = F_PRESENT | flags;
struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12);
map_pt(__pt);
pt_mapped[pt_offset].flags = F_PRESENT | flags;
pt_mapped[pt_offset].address = (((uint64_t)phys_addr) >> 12);
invlpg(virt_addr);
return 0;
}
int unmap_page(struct pml4e *pml4, void *virt_addr) {
uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
//uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
if (!(pml4[pml4_offset].flags & F_PRESENT))
return -1;
struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12);
map_pdpt(__pdpt);
if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT))
return -1;
struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12);
map_pd(__pd);
if (!(pd_mapped[pd_offset].flags & F_PRESENT))
return -1;
struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12);
map_pt(__pt);
if (!(pt_mapped[pt_offset].flags & F_PRESENT))
return -1;
pt_mapped[pt_offset].flags = 0;
int i = 0;
for(; i < 512; i++) {
if (pt_mapped[i].flags & F_PRESENT)
break;
}
if (i == 512)
goto done;
pd_mapped[pd_offset].flags = 0;
for(i = 0; i < 512; i++) {
if (pd_mapped[i].flags & F_PRESENT)
break;
}
if (i == 512)
goto done;
pdpt_mapped[pdpt_offset].flags = 0;
for(i = 0; i < 512; i++) {
if(pdpt_mapped[i].flags & F_PRESENT)
break;
}
if (i == 512)
goto done;
pml4[pml4_offset].flags = 0;
//TODO: Return memory used for page structures
done:
invlpg(virt_addr);
return 0;
}
void paging_init(void) {
memset(pml4_list, 0, 4096);
pml4_list[0].flags = F_PRESENT | F_WRITEABLE;
pml4_list[0].address = (uint64_t)init_pdpt >> 12;
memset(init_pdpt, 0, 4096);
init_pdpt[0].flags = F_PRESENT | F_WRITEABLE;
init_pdpt[0].address = (uint64_t)init_pd >> 12;
memset(pml4_list, 0, 4096);
init_pd[0].flags = F_PRESENT | F_WRITEABLE;
init_pd[0].address = (uint64_t)paging_pt >> 12;
init_pd[1].flags = F_PRESENT | F_WRITEABLE;
init_pd[1].address = (uint64_t)init_pt >> 12;
memset(init_pt, 0, 4096);
memset(paging_pt, 0, 4096);
}
static inline void *page_align(void *addr) {
uintptr_t a = (uintptr_t) addr;
a += PAGE_SIZE + 1;
a /= PAGE_SIZE;
return (void *) a;
}
void *mmap(void *addr, size_t len) {
len += (uintptr_t)addr % PAGE_SIZE;
int pages = (len + PAGE_SIZE - 1) / PAGE_SIZE;
void *virt = virtaddr_alloc(pages);
if (virt == NULL)
return NULL;
void *phys = page_align(addr);
for (long i = 0; i < pages; i++) {
void *virt_temp = (char *)virt + (i * PAGE_SIZE);
void *phys_temp = (char *)phys + (i * PAGE_SIZE);
map_page(pml4_list, virt_temp, phys_temp, F_WRITEABLE);
}
return addr;
}
void unmap(void *addr) {
long pages = virtaddr_free(addr);
for (long i = 0; i < pages; i++) {
void *virt = (char *)addr + (i * PAGE_SIZE);
unmap_page(pml4_list, virt);
}
}
void *alloc_pages(int count) {
void *virt = virtaddr_alloc(count);
if (virt == NULL)
return NULL;
void *phys = alloc_phys_pages(count);
if (phys == NULL) {
virtaddr_free(virt);
return NULL;
}
for (int i = 0; i < count; i++) {
void *virt_temp = (char *)virt + (i * PAGE_SIZE);
void *phys_temp = (char *)phys + (i * PAGE_SIZE);
map_page(pml4_list, virt_temp, phys_temp, F_WRITEABLE);
}
return virt;
}
void free_page(void *virt) {
long pages = virtaddr_free(virt);
if (pages < 1)
return;
for (long i = 0; i < pages; i++) {
void *virt_temp = (char *)virt + (i * PAGE_SIZE);
unmap_page(pml4_list, virt_temp);
}
}
void memory_lock(void) {
cli();
}
void memory_unlock(void) {
sti();
}

View file

@ -1,203 +1,36 @@
#include <stdint.h>
#include <serial.h>
#include <cpuid.h>
#include <lib.h>
#include <shim.h>
#include <memory.h>
#define F_PRESENT 0x001
#define F_WRITEABLE 0x002
#define F_UNPRIVILEGED 0x004
#define F_WRITETHROUGH 0x008
#define F_CACHEDISABLE 0x010
#define F_ACCESSED 0x020
#define F_DIRTY 0x040
#define F_MEGABYTE 0x080
#define F_GLOBAL 0x100
// PAGE MAP LEVEL 4 ENTRY
struct pml4e {
uint64_t flags : 6;
uint64_t : 6;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY POINTER TABLE ENTRY
struct pdpte {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE DIRECTORY ENTRY
struct pde {
uint64_t flags : 6;
uint64_t : 1;
uint64_t page_size : 1;
uint64_t : 4;
uint64_t address : 40;
uint64_t : 11;
uint64_t execute_disable : 1;
};
// PAGE TABLE ENTRY
struct pte {
uint64_t flags : 9;
uint64_t : 3;
uint64_t address : 40;
uint64_t : 7;
uint64_t protection_key : 4;
uint64_t execute_disable : 1;
};
static inline void invlpg(unsigned long addr) {
__asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
static int get_maxphysaddr() {
uint32_t eax, ebx, ecx, edx;
__cpuid(0x80000008, eax, ebx, ecx, edx);
return eax & 0xFF;
}
int find_phys_addr(struct pml4e *pml4, void *virt_addr, void **phys_addr) {
uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
if (!pml4[pml4_offset].flags & F_PRESENT)
return -1;
struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12);
if (!pdpt[pdpt_offset].flags & F_PRESENT)
return -1;
struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12);
if (!pd[pd_offset].flags & F_PRESENT)
return -1;
struct pte *pt = (struct pte *)(pd[pd_offset].address << 12);
if (!pt[pt_offset].flags & F_PRESENT)
return -1;
*phys_addr = (void *)((pt[pt_offset].address << 12) + page_offset);
return 0;
}
char *curr_alloc = 0x5000;
void *alloc_phys_page() {
void *ret = curr_alloc;
curr_alloc += 4096;
return ret;
}
int map_page(struct pml4e *pml4, void *virt_addr, void *phys_addr, unsigned int flags) {
uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
if (!pml4[pml4_offset].flags & F_PRESENT) {
void *new_page = alloc_phys_page();
memset(new_page, 0, 4096);
pml4[pml4_offset].address = ((uint64_t)new_page) >> 12;
pml4[pml4_offset].flags = F_PRESENT | flags;
}
struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12);
if (!pdpt[pdpt_offset].flags & F_PRESENT) {
void *new_page = alloc_phys_page();
memset(new_page, 0, 4096);
pdpt[pdpt_offset].address = ((uint64_t)new_page) >> 12;
pdpt[pdpt_offset].flags = F_PRESENT | flags;
}
struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12);
if (!pd[pd_offset].flags & F_PRESENT) {
void *new_page = alloc_phys_page();
memset(new_page, 0, 4096);
pd[pd_offset].address = ((uint64_t)new_page) >> 12;
pd[pd_offset].flags = F_PRESENT | flags;
}
struct pte *pt = (struct pte *)(pd[pd_offset].address << 12);
if (!pt[pt_offset].flags & F_PRESENT) {
pt[pt_offset].flags |= F_PRESENT | flags;
}
pt[pt_offset].address = (((uint64_t)phys_addr) >> 12);
invlpg(virt_addr);
return 0;
}
int unmap_page(struct pml4e *pml4, void *virt_addr) {
uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39;
uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF;
uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF;
uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF;
uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF;
if (!pml4[pml4_offset].flags & F_PRESENT)
return -1;
struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12);
if (!pdpt[pdpt_offset].flags & F_PRESENT)
return -1;
struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12);
if (!pd[pd_offset].flags & F_PRESENT)
return -1;
struct pte *pt = (struct pte *)(pd[pd_offset].address << 12);
if (!pt[pt_offset].flags & F_PRESENT)
return -1;
pt[pt_offset].flags = 0;
int i = 0;
for(; i < 512; i++) {
if (pt[i].flags & F_PRESENT)
break;
}
if (i == 512)
goto done;
pd[pd_offset].flags = 0;
for(i = 0; i < 512; i++) {
if (pd[i].flags & F_PRESENT)
break;
}
if (i == 512)
goto done;
pdpt[pdpt_offset].flags = 0;
for(i = 0; i < 512; i++) {
if(pdpt[i].flags & F_PRESENT)
break;
}
if (i == 512)
goto done;
pml4[pml4_offset].flags = 0;
//TODO: Return memory used for page structures
done:
invlpg(virt_addr);
return 0;
}
#include "mboot.h"
static struct boot_info boot_info;
// entry point for amd64
void* amd64_shim(void *boot_info) {
struct pml4e *pml4 = (struct pml4e *)0x1000;
struct pdpte *pdpt = (struct pdpte *)0x2000;
struct pde *pd = (struct pde *)0x3000;
struct pte *pt = (struct pte *)0x4000;
void* amd64_shim(void *mboot_data_ptr) {
//struct pml4e *pml4 = (struct pml4e *)0x1000;
//struct pdpte *pdpt = (struct pdpte *)0x2000;
//struct pde *pd = (struct pde *)0x3000;
//struct pte *pt = (struct pte *)0x4000;
//pd[1].flags = F_PRESENT | F_WRITEABLE;
//pd[1].address = ((uint64_t)pt) >> 12;
map_page(pml4, 0x80000000, 0xB8002, F_WRITEABLE);
//map_page(pml4, (void *)0x80000000, (void *)0xB8002, F_WRITEABLE);
//__asm("invlpg 0x200000");
void *ret;
find_phys_addr(pml4, 0x80000000, &ret);
return ret;
//void *ret;
//find_phys_addr(pml4, (void *)0x80000000, &ret);
//return ret;
struct mboot_info mboot_info;
mboot_info = mboot_load_info(mboot_data_ptr);
struct mboot_tag *map_tag;
map_tag = mboot_get_tag(&mboot_info, MBOOT_MEMORYMAP);
boot_info.map = map_tag->data.memory_map;
return &boot_info;
}

View file

@ -7,6 +7,6 @@ void kmain(void *info) {
*(char*)0xB8000 = 'c';
//*(char*)(0xB8002 + 0x20'0000) = 'd';
itoa((long)info, buf, 16);
fb_init(1024, 768);
//fb_init(1024, 768);
serial_out_str(buf);
}

250
src/memory/physalloc.c Normal file
View file

@ -0,0 +1,250 @@
#include <memory.h>
#include <stdint.h>
#include <lib.h>
#include <stddef.h>
#define MEMORY_INTERNAL
#include <memory/physalloc.h>
#include <memory/virtalloc.h>
extern uintptr_t kernel_start;
extern uintptr_t kernel_end;
// between memory_start and kernel_start will be the bitmap
static uintptr_t memory_start = 0;
typedef unsigned char page[4096];
struct memory_area {
uint64_t len;
uintptr_t addr;
};
static uint64_t *bitmap;
static uint64_t total_memory;
static uint64_t free_memory;
static uint64_t page_count;
static uint64_t page_free_start;
static struct memory_area *page_start;
static int n_pages(const struct memory_area *m) {
return m->len / PAGE_SIZE;
}
static void *page_at(int i) {
int cur_page = 0;
for (struct memory_area *m = page_start; m != NULL; m++) {
int pages = n_pages(m);
if (i - cur_page < pages) {
return (void *) (m->addr + (PAGE_SIZE * (i - cur_page)));
}
cur_page += pages;
}
return NULL;
}
static long page_idx(page p) {
uintptr_t addr = (uintptr_t) p;
int cur_page = 0;
for (struct memory_area *m = page_start; m != NULL; m++) {
if ((uintptr_t) m + m->len > addr) {
return cur_page + ((addr - m->addr) / PAGE_SIZE);
}
cur_page += n_pages(m);
}
return -1;
}
static inline bool bitmap_get(int i) {
return (bitmap[i / 64] >> i % 64) & 1;
}
static inline void bitmap_set(int i, bool v) {
int idx = i / 64;
bitmap[idx] &= ~(1 << i % 64);
bitmap[idx] |= (v << i % 64);
}
void *alloc_phys_page(void) {
return alloc_phys_pages(1);
}
void *alloc_phys_pages(int pages) {
if (pages < 1) return NULL;
int n_contiguous = 0;
int free_region_start = 0;
bool first = true;
for (uint64_t i = page_free_start; i < page_count; i++) {
bool free = !bitmap_get(i);
if (first) {
first = false;
page_free_start = i;
}
if (free) {
if (n_contiguous == 0) free_region_start = i;
n_contiguous++;
if (n_contiguous == pages) {
for (int j = 0; j < pages; j++)
bitmap_set(free_region_start + j, true);
return page_at(free_region_start);
}
} else n_contiguous = 0;
}
return NULL;
}
void free_phys_page(void *ptr) {
free_phys_pages(ptr, 1);
}
void free_phys_pages(void *ptr, int pages) {
long idx = page_idx(ptr);
if (idx == -1) return;
if ((unsigned) idx < page_free_start) page_free_start = idx;
for (int i = 0; i < pages; i++)
bitmap_set(idx + pages, false);
}
static bool segment_invalid(const struct memory_segment *segment) {
if (segment->type != 1) return false;
if (segment->addr < kernel_start) return false;
if (segment->addr + segment->len < memory_start) return false;
if (segment->addr + segment->len < kernel_start) return false;
return true;
}
static struct memory_area segment_to_area(const struct memory_segment *segment) {
uint64_t length = segment->len;
uintptr_t addr = segment->addr;
uintptr_t start;
if (memory_start)
start = memory_start;
else
start = kernel_end;
if (segment->addr < start) {
addr = start;
length -= addr - segment->addr;
} else {
addr = segment->addr;
}
struct memory_area temp;
temp.len = length;
temp.addr = addr;
return temp;
}
static uintptr_t page_align(uintptr_t ptr) {
return ptr + PAGE_SIZE - 1 / PAGE_SIZE;
}
void memory_init(struct memory_map *map) {
memory_lock();
virtaddr_init();
bitmap = NULL;
total_memory = 0;
free_memory = 0;
page_count = 0;
page_free_start = 0;
page_start = NULL;
uintptr_t end = (uintptr_t) map;
end += map->size;
struct memory_segment *segment = &map->entries[0];
int segment_count = 0;
for(; (uintptr_t) segment < end; segment++) {
if (segment_invalid(segment))
continue;
struct memory_area temp = segment_to_area(segment);
page_count += n_pages(&temp);
segment_count++;
}
long bitmap_pages = page_count / 64 / PAGE_SIZE + 1;
free_memory = page_count * PAGE_SIZE;
//HACK: terrible hack bad bad bad bad
long bitmap_size = bitmap_pages * PAGE_SIZE;
bitmap = (uint64_t *) page_at(page_count - bitmap_pages);
long page_area_size = segment_count * sizeof(struct memory_area);
char *page_area_addr = (char *)bitmap + bitmap_size;
bitmap = mmap(bitmap, bitmap_size);
memset(bitmap, 0, bitmap_size);
memory_start = page_align(kernel_end + bitmap_size + page_area_size);
page_area_addr = mmap(page_area_addr, page_area_size);
page_start = (struct memory_area *) page_area_addr;
struct memory_area *area = page_start;
segment = &map->entries[0];
for(; (uintptr_t) segment < end; segment++) {
if (segment_invalid(segment))
continue;
struct memory_area temp = segment_to_area(segment);
*area = temp;
area++;
}
page_count -= bitmap_pages;
memory_unlock();
}
void *alloc_page(void) {
return alloc_pages(1);
}
uint64_t memory_total(void) {
return total_memory;
}
uint64_t memory_free(void) {
return free_memory;
}
uint64_t memory_used(void) {
return total_memory - free_memory;
}
// stubs
// simon do these ik you want to
// :3
void *malloc(size_t size) {
//TODO: implement
(void)size;
return NULL;
}
void *realloc(void *ptr, size_t size) {
//TODO: implement
(void)ptr;
(void)size;
return NULL;
}
void free(void *ptr) {
//TODO: implement
(void)ptr;
}

126
src/memory/virtalloc.c Normal file
View file

@ -0,0 +1,126 @@
#include <stdint.h>
#include <stddef.h>
#include <memory.h>
#define MEMORY_INTERNAL
#include <memory/virtalloc.h>
struct addr_node {
uintptr_t start;
uintptr_t end;
struct addr_node *next;
struct addr_node *prev;
uint8_t is_alloc;
uint8_t is_bss;
};
#define BOOTSTRAP_BSS_NODES 16
static uint8_t bss_nodes = 0;
static struct addr_node nodes[BOOTSTRAP_BSS_NODES];
static struct addr_node *start_node;
static struct addr_node *alloc_node(void) {
if (bss_nodes >= BOOTSTRAP_BSS_NODES) {
//FIXME: alloc on heap
} else {
struct addr_node *node = &nodes[bss_nodes];
bss_nodes += 1;
node->is_bss = false;
return node;
}
return NULL;
}
static void free_node(struct addr_node *node) {
if (!node->is_bss)
free(node);
}
void virtaddr_init(void) {
struct addr_node init = {
.start = 0,
.end = UINT64_MAX,
.next = NULL,
.prev = NULL,
.is_alloc = false,
.is_bss = true,
};
nodes[0] = init;
start_node = &nodes[0];
bss_nodes++;
}
void *virtaddr_alloc(int n_pages) {
long n_length = n_pages * PAGE_SIZE;
struct addr_node *node = start_node;
for (; node != NULL ; node = node->next) {
if (node->is_alloc)
continue;
long length = node->end - node->start;
if (length >= n_length) {
struct addr_node *new = alloc_node();
if (node == NULL)
return NULL;
new->next = node;
node->prev = new;
if (node->prev != NULL) {
node->prev->next = new;
}
new->start = node->start;
new->end = new->start + n_length;
node->start = new->end;
new->is_alloc = true;
return (void *) new->start;
}
}
return NULL;
}
static void merge_back(struct addr_node *node) {
while(node->prev && !node->is_alloc) {
struct addr_node *temp = node->prev;
node->start = node->prev->start;
node->prev = node->prev->prev;
free_node(temp);
}
if (node->prev == NULL) {
start_node = node;
}
}
static void merge_forward(struct addr_node *node) {
while(node->next && !node->is_alloc) {
struct addr_node *temp = node->next;
node->end = node->next->end;
node->next = node->next->next;
free_node(temp);
}
}
long virtaddr_free(void *virtaddr) {
uintptr_t virt = (uintptr_t) virtaddr;
if (virt % PAGE_SIZE)
return -1; // not page aligned, we did not give this out!!!
struct addr_node *node = start_node;
for (; node != NULL; node = node->next) {
if (node->start == virt) {
int length = node->end - node->start;
int pages = length / PAGE_SIZE;
merge_back(node);
merge_forward(node);
return pages;
}
}
return -1;
}