From e76cbbcb327e0966fff47a645cdbc26e27a4bc8a Mon Sep 17 00:00:00 2001 From: Freya Murphy Date: Tue, 30 Jan 2024 12:16:22 -0500 Subject: [PATCH] make paging more stable --- src/arch/amd64/fb.c | 63 +++-- src/arch/amd64/mboot.c | 7 +- src/arch/amd64/mboot.h | 1 + src/arch/amd64/paging.c | 579 +++++++++++++++++++++++++++++----------- src/arch/amd64/paging.h | 3 +- src/memory/physalloc.c | 6 +- 6 files changed, 474 insertions(+), 185 deletions(-) diff --git a/src/arch/amd64/fb.c b/src/arch/amd64/fb.c index 07847e5..0cd5b4f 100644 --- a/src/arch/amd64/fb.c +++ b/src/arch/amd64/fb.c @@ -1,11 +1,16 @@ -#include "bindings.h" - +#include +#include +#include #include #include +#include "bindings.h" + #define INDEX 0x1CE #define DATA 0x1CF #define FB_ADDR 0xE0000000 +#define FB_MAX 0xFF000000 +#define FB_LEN (FB_MAX - FB_ADDR) #define PREFERRED_VY 4096 #define PREFERRED_B 32 @@ -44,29 +49,35 @@ int fb_init(uint16_t res_x, uint16_t res_y) { outw(INDEX, 0x04); outw(DATA, 0x41); - uint32_t * text_vid_mem = (uint32_t *)0xA0000; - text_vid_mem[0] = 0xA5ADFACE; - - for (uintptr_t fb_offset = FB_ADDR; fb_offset < 0xFF000000; fb_offset += 0x01000000) { - /* Enable the higher memory */ - for (uintptr_t i = fb_offset; i < fb_offset; i += 0x1000) { - // todo ident map fb - } - - /* Go find it */ - for (uintptr_t x = fb_offset; x < fb_offset + 0xFF0000; x += 0x1000) { - if (((uintptr_t *)x)[0] == 0xA5ADFACE) { - fb_buffer = (uint8_t *) x; - goto mem_found; - } - } - } - -mem_found: - - fb_res_x = res_x; - fb_res_y = res_y; - fb_res_b = PREFERRED_B; - +// uint32_t * text_vid_mem = (uint32_t *)0xA0000; +// text_vid_mem[0] = 0xA5ADFACE; +// +// void *temp = mmap((void *) FB_ADDR, 0xFF1000); +// +// for (uintptr_t fb_offset = FB_ADDR; fb_offset < FB_MAX; fb_offset += 0x01000000) { +// /* Enable the higher memory */ +// remap(temp, (void *)fb_offset, 0xFF1000); +// +// /* Go find it */ +// for (uintptr_t offset = 0; offset < 0xFF0000; offset += 0x1000) { +// uintptr_t x = (uintptr_t)temp + offset; +// if (((uintptr_t *)x)[0] == 0xA5ADFACE) { +// fb_buffer = (uint8_t *) (fb_offset + offset); +// goto mem_found; +// } +// } +// } +// +// unmap(temp); +// +//mem_found: +// +// fb_res_x = res_x; +// fb_res_y = res_y; +// fb_res_b = PREFERRED_B; +// +// fb_buffer = mmap(fb_buffer, 0xFF0000); +// memset(fb_buffer, 7, 0xFF0000); +// return 0; } diff --git a/src/arch/amd64/mboot.c b/src/arch/amd64/mboot.c index fbaaa92..9d06fd0 100644 --- a/src/arch/amd64/mboot.c +++ b/src/arch/amd64/mboot.c @@ -35,13 +35,18 @@ static uint32_t *read_tag(struct mboot_info *info, uint32_t *data) { case MBOOT_MEMORYMAP: read_memorymap(&tag, tag.size, data + 2); break; + case MBOOT_SYMBOLS: + // TODO: + goto done; case MBOOT_XSDP: read_xsdp(&tag, (char *) (data + 2)); break; default: - break; + goto done; } +done: + info->tags[tag.type] = tag; if(tag.size % 8 != 0) { diff --git a/src/arch/amd64/mboot.h b/src/arch/amd64/mboot.h index 6e407d5..3be3054 100644 --- a/src/arch/amd64/mboot.h +++ b/src/arch/amd64/mboot.h @@ -19,6 +19,7 @@ struct mboot_tag { enum mboot_tag_type { MBOOT_CMDLINE = 0, MBOOT_MEMORYMAP = 6, + MBOOT_SYMBOLS = 9, MBOOT_XSDP = 14 }; diff --git a/src/arch/amd64/paging.c b/src/arch/amd64/paging.c index 836f602..453af9d 100644 --- a/src/arch/amd64/paging.c +++ b/src/arch/amd64/paging.c @@ -19,7 +19,7 @@ struct pml4e { uint64_t execute_disable : 1; }; -// PAGE DIRECTORY POINTER TABLE ENTRY +// PAGE DIRECTORY POINTER TABLE ENTRY struct pdpte { uint64_t flags : 6; uint64_t : 1; @@ -61,170 +61,435 @@ extern struct pte paging_pt[512]; // paging_pt should NEVER be outside of this f // paged address to read page tables // the structures are not gurenteed to be ident mapped // map them here with map_(phys_addr) before useing structures +void *addr_mapped = (void *) (uintptr_t) 0x204000; +static struct pml4e *pml4_mapped = (void *) (uintptr_t) 0x200000; static struct pdpte *pdpt_mapped = (void *) (uintptr_t) 0x201000; -static struct pdpte *pd_mapped = (void *) (uintptr_t) 0x202000; -static struct pdpte *pt_mapped = (void *) (uintptr_t) 0x203000; -void *addr_mapped = (void *) (uintptr_t) 0x204000; +static struct pde *pd_mapped = (void *) (uintptr_t) 0x202000; +static struct pte *pt_mapped = (void *) (uintptr_t) 0x203000; -static inline void invlpg(void *addr) { +static inline void +invlpg(void *addr) +{ __asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } -static void map_pdpt(struct pdpte *phys_addr) { - paging_pt[1].address = (((uint64_t)phys_addr) >> 12); - paging_pt[1].flags = F_PRESENT | F_WRITEABLE; - invlpg(pdpt_mapped); -} - -static void map_pd(struct pde *phys_addr) { - paging_pt[2].address = (((uint64_t)phys_addr) >> 12); - paging_pt[2].flags = F_PRESENT | F_WRITEABLE; - invlpg(pd_mapped); -} - -static void map_pt(struct pte *phys_addr) { - paging_pt[3].address = (((uint64_t)phys_addr) >> 12); - paging_pt[3].flags = F_PRESENT | F_WRITEABLE; - invlpg(pt_mapped); -} - -static void map_addr(void *phys_addr) { - paging_pt[4].address = (((uint64_t)phys_addr) >> 12); - paging_pt[4].flags = F_PRESENT | F_WRITEABLE; +static void +load_addr(void *phys_addr) +{ + static struct pte *pt = &paging_pt[4]; + pt->address = (uint64_t)phys_addr >> 12; + pt->flags = F_PRESENT | F_WRITEABLE; invlpg(addr_mapped); } -//static int get_maxphysaddr() { -// uint32_t eax, ebx, ecx, edx; -// __cpuid(0x80000008, eax, ebx, ecx, edx); -// return eax & 0xFF; -//} +static void +load_pml4( + void *phys +) { + static struct pte *pt = &paging_pt[0]; + pt->address = (uint64_t)phys >> 12; + pt->flags = F_PRESENT | F_WRITEABLE; + invlpg(pml4_mapped); +} -//int find_phys_addr(struct pml4e *pml4, void *virt_addr, void **phys_addr) { -// uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39; -// uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF; -// uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF; -// uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF; -// uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF; -// -// if (!(pml4[pml4_offset].flags & F_PRESENT)) -// return -1; -// struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12); -// if (!(pdpt[pdpt_offset].flags & F_PRESENT)) -// return -1; -// struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12); -// if (!(pd[pd_offset].flags & F_PRESENT)) -// return -1; -// struct pte *pt = (struct pte *)(pd[pd_offset].address << 12); -// if (!(pt[pt_offset].flags & F_PRESENT)) -// return -1; -// *phys_addr = (void *)((pt[pt_offset].address << 12) + page_offset); -// return 0; -//} +static void +load_pdpt( + void *phys +) { + static struct pte *pt = &paging_pt[1]; + pt->address = (uint64_t)phys >> 12; + pt->flags = F_PRESENT | F_WRITEABLE; + invlpg(pdpt_mapped); +} -char *curr_alloc = (void *) 0x5000; +static void +load_pd( + void *phys +) { + static struct pte *pt = &paging_pt[2]; + pt->address = (uint64_t)phys >> 12; + pt->flags = F_PRESENT | F_WRITEABLE; + invlpg(pdpt_mapped); +} -int map_page(struct pml4e *pml4, void *virt_addr, void *phys_addr, unsigned int flags) { - uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39; - uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF; - uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF; - uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF; - //uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF; +static void +load_pt( + void *phys +) { + static struct pte *pt = &paging_pt[3]; + pt->address = (uint64_t)phys >> 12; + pt->flags = F_PRESENT | F_WRITEABLE; + invlpg(pt_mapped); +} - if (!(pml4[pml4_offset].flags & F_PRESENT)) { +#define PAG_SUCCESS 0 +#define PAG_CANNOT_ALLOC 1 +#define PAG_NOT_PRESENT 2 + +static int +select_pdpt( + void *virt, + unsigned int flags, + struct pml4e *root, + struct pdpte **res, + bool create +) { + load_pml4(root); + uint64_t offset = (uint64_t)virt >> 39; + struct pml4e *pml4e = &pml4_mapped[offset]; + if (!(pml4e->flags & F_PRESENT)) { + if (!create) { + return PAG_NOT_PRESENT; + } void *new_page = alloc_phys_page(); - map_addr(new_page); - memset(addr_mapped, 0, 4096); - pml4[pml4_offset].address = ((uint64_t)new_page) >> 12; + if (new_page == NULL) { + return PAG_CANNOT_ALLOC; + } + load_addr(new_page); + memset(addr_mapped, 0, PAGE_SIZE); + pml4e->address = ((uint64_t)new_page) >> 12; + pml4e->flags = F_PRESENT; } - pml4[pml4_offset].flags = F_PRESENT | flags; + if (flags) + pml4e->flags = F_PRESENT | flags; + *res = (struct pdpte *)(uintptr_t)(pml4e->address << 12); + return PAG_SUCCESS; +} - struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12); - map_pdpt(__pdpt); - if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT)) { +static int +select_pd( + void *virt, + unsigned int flags, + struct pdpte *pdpt, + struct pde **res, + bool create +) { + load_pdpt(pdpt); + uint64_t offset = ((uint64_t)virt >> 30) & 0x1ff; + struct pdpte *pdpte = &pdpt_mapped[offset]; + if (!(pdpte->flags & F_PRESENT)) { + if (!create) { + return PAG_NOT_PRESENT; + } void *new_page = alloc_phys_page(); - map_addr(new_page); - memset(addr_mapped, 0, 4096); - pdpt_mapped[pdpt_offset].address = ((uint64_t)new_page) >> 12; + if (new_page == NULL) { + return PAG_CANNOT_ALLOC; + } + load_addr(new_page); + memset(addr_mapped, 0, PAGE_SIZE); + pdpte->address = ((uint64_t)new_page) >> 12; + pdpte->flags = F_PRESENT; } - pdpt_mapped[pdpt_offset].flags = F_PRESENT | flags; + if (flags) + pdpte->flags = F_PRESENT | flags; + *res = (struct pde *)(uintptr_t)(pdpte->address << 12); + return PAG_SUCCESS; +} - struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12); - map_pd(__pd); - if (!(pd_mapped[pd_offset].flags & F_PRESENT)) { +static int +select_pt( + void *virt, + unsigned int flags, + struct pde *pd, + struct pte **res, + bool create +) { + load_pd(pd); + uint64_t offset = ((uint64_t)virt >> 21) & 0x1ff; + struct pde *pde = &pd_mapped[offset]; + if (!(pde->flags & F_PRESENT)) { + if (!create) { + return PAG_NOT_PRESENT; + } void *new_page = alloc_phys_page(); - map_addr(new_page); - memset(addr_mapped, 0, 4096); - pd_mapped[pd_offset].address = ((uint64_t)new_page) >> 12; + if (new_page == NULL) { + return PAG_CANNOT_ALLOC; + } + load_addr(new_page); + memset(addr_mapped, 0, PAGE_SIZE); + pde->address = ((uint64_t)new_page) >> 12; + pde->flags = F_PRESENT; } - pd_mapped[pd_offset].flags = F_PRESENT | flags; + if (flags) + pde->flags = F_PRESENT | flags; + *res = (struct pte *)(uintptr_t)(pde->address << 12); + return PAG_SUCCESS; +} + +static void +select_page( + void *virt, + struct pte *pt, + struct pte **res +) { + load_pt(pt); + uint64_t offset = ((uint64_t)virt >> 12) & 0x1ff; + struct pte *page = &pt_mapped[offset]; + *res = page; + return; +} + +static inline void +try_unmap_pml4(void) { + for (int i = 0; i < 512; i++) { + if (pml4_mapped[i].flags & F_PRESENT) + return; + } + for (int i = 0; i < 512; i++) { + if (pml4_mapped[i].address) { + void *addr = (void *)(uintptr_t)(pml4_mapped[i].address << 12); + free_phys_page(addr); + } + } +} + +static inline void +try_unmap_pdpt(void) { + for (int i = 0; i < 512; i++) { + if (pdpt_mapped[i].flags & F_PRESENT) + return; + } + for (int i = 0; i < 512; i++) { + if (pdpt_mapped[i].address) { + void *addr = (void *)(uintptr_t)(pdpt_mapped[i].address << 12); + free_phys_page(addr); + } + } + try_unmap_pml4(); +} + +static inline void +try_unmap_pd(void) { + for (int i = 0; i < 512; i++) { + if (pd_mapped[i].flags & F_PRESENT) + return; + } + for (int i = 0; i < 512; i++) { + if (pd_mapped[i].address) { + void *addr = (void *)(uintptr_t)(pd_mapped[i].address << 12); + free_phys_page(addr); + } + } + try_unmap_pdpt(); +} + +static inline void +try_unmap_pt(void) { + for (int i = 0; i < 512; i++) { + if (pt_mapped[i].flags & F_PRESENT) + return; + } + for (int i = 0; i < 512; i++) { + if (pt_mapped[i].address) { + void *addr = (void *)(uintptr_t)(pt_mapped[i].address << 12); + free_phys_page(addr); + } + } + try_unmap_pd(); +} + +static void +unmap_page( + struct pml4e *root, + void *virt +) { + + struct pdpte *pdpt; + struct pde *pd; + struct pte *pt; + struct pte *page; + + unsigned int df = 0; + + if (select_pdpt(virt, df, root, &pdpt, false)) + return; + + if (select_pd(virt, df, pdpt, &pd, false)) + return; + + if (select_pt(virt, df, pd, &pt, false)) + return; + + select_page(virt, pt, &page); + + page->address = 0; + page->flags = 0; + + try_unmap_pt(); + + invlpg(virt); + + return; +} + +static void +unmap_pages( + struct pml4e *root, + void *virt_start, + long page_count +) { + + uint64_t pml4_o = -1, + pdpt_o = -1, + pd_o = -1; + + uint64_t pml4_n, + pdpt_n, + pd_n; + + struct pdpte *pdpt = NULL; + struct pde *pd = NULL; + struct pte *pt = NULL; + struct pte *page = NULL; + + unsigned int df = 0; + + void *virt; + + for (long i = 0; i < page_count; i++) { + + virt = (char *)virt_start + (i * PAGE_SIZE); + + pml4_n = (uint64_t) virt >> 39; + pdpt_n = ((uint64_t) virt >> 30) & 0x1ff; + pd_n = ((uint64_t) virt >> 21) & 0x1ff; + + if (pdpt == NULL || pml4_o != pml4_n) { + if (select_pdpt(virt, df, root, &pdpt, false)) + continue; + pml4_o = pml4_n; + } + + if (pd == NULL || pdpt_o != pdpt_n) { + if (select_pd(virt, df, pdpt, &pd, false)) + continue; + pdpt_o = pdpt_n; + } + + if (pt == NULL || pd_o != pd_n) { + if (pt) { + try_unmap_pt(); + } + if (select_pt(virt, df, pd, &pt, false)) + continue; + pd_o = pd_n; + } + + select_page(virt, pt, &page); + + page->address = 0; + page->flags = 0; + + } + + if (pt != NULL) + try_unmap_pt(); + + return; +} + +static int +map_page( + struct pml4e *root, + void *virt, + void *phys, + unsigned int flags +) { + + struct pdpte *pdpt; + struct pde *pd; + struct pte *pt; + struct pte *page; + + unsigned int df = F_WRITEABLE; + + if (select_pdpt(virt, df, root, &pdpt, true)) + return 1; + + if (select_pd(virt, df, pdpt, &pd, true)) + return 1; + + if (select_pt(virt, df, pd, &pt, true)) + return 1; + + select_page(virt, pt, &page); + + page->address = (uint64_t)phys >> 12; + page->flags = F_PRESENT | flags; + invlpg(virt); - struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12); - map_pt(__pt); - pt_mapped[pt_offset].flags = F_PRESENT | flags; - pt_mapped[pt_offset].address = (((uint64_t)phys_addr) >> 12); - - invlpg(virt_addr); return 0; } -int unmap_page(struct pml4e *pml4, void *virt_addr) { - uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39; - uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF; - uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF; - uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF; - //uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF; +static int +map_pages( + struct pml4e *root, + void *virt_start, + void *phys_start, + unsigned int flags, + long page_count +) { - if (!(pml4[pml4_offset].flags & F_PRESENT)) - return -1; - struct pdpte *__pdpt = (struct pdpte *)(uintptr_t)(pml4[pml4_offset].address << 12); - map_pdpt(__pdpt); - if (!(pdpt_mapped[pdpt_offset].flags & F_PRESENT)) - return -1; - struct pde *__pd = (struct pde *)(uintptr_t)(pdpt_mapped[pdpt_offset].address << 12); - map_pd(__pd); - if (!(pd_mapped[pd_offset].flags & F_PRESENT)) - return -1; - struct pte *__pt = (struct pte *)(uintptr_t)(pd_mapped[pd_offset].address << 12); - map_pt(__pt); - if (!(pt_mapped[pt_offset].flags & F_PRESENT)) - return -1; + uint64_t pml4_o = -1, + pdpt_o = -1, + pd_o = -1; - pt_mapped[pt_offset].flags = 0; + uint64_t pml4_n, + pdpt_n, + pd_n; + + struct pdpte *pdpt = NULL; + struct pde *pd = NULL; + struct pte *pt = NULL; + struct pte *page = NULL; + + void *virt, *phys; + + unsigned int df = F_WRITEABLE; + + long i; + for (i = 0; i < page_count; i++) { + + virt = (char *)virt_start + (i * PAGE_SIZE); + phys = (char *)phys_start + (i * PAGE_SIZE); + + pml4_n = (uint64_t) virt >> 39; + pdpt_n = ((uint64_t) virt >> 30) & 0x1ff; + pd_n = ((uint64_t) virt >> 21) & 0x1ff; + + if (pdpt == NULL || pml4_o != pml4_n) { + if (select_pdpt(virt, df, root, &pdpt, true)) + goto failed; + pml4_o = pml4_n; + } + + if (pd == NULL || pdpt_o != pdpt_n) { + if (select_pd(virt, df, pdpt, &pd, true)) + goto failed; + pdpt_o = pdpt_n; + } + + if (pt == NULL || pd_o != pd_n) { + if (select_pt(virt, df, pd, &pt, true)) + goto failed; + pd_o = pd_n; + } + + select_page(virt, pt, &page); + + page->address = (uint64_t)phys >> 12; + page->flags = F_PRESENT | flags; + invlpg(virt); - int i = 0; - for(; i < 512; i++) { - if (pt_mapped[i].flags & F_PRESENT) - break; } - if (i == 512) - goto done; - - pd_mapped[pd_offset].flags = 0; - for(i = 0; i < 512; i++) { - if (pd_mapped[i].flags & F_PRESENT) - break; - } - if (i == 512) - goto done; - - pdpt_mapped[pdpt_offset].flags = 0; - - for(i = 0; i < 512; i++) { - if(pdpt_mapped[i].flags & F_PRESENT) - break; - } - if (i == 512) - goto done; - - pml4[pml4_offset].flags = 0; - - //TODO: Return memory used for page structures - -done: - invlpg(virt_addr); return 0; + +failed: + + unmap_pages(root, virt, i); + + return 1; } void paging_init(void) { @@ -239,7 +504,7 @@ void paging_init(void) { kernel_pd_0[1].address = (uint64_t)(&paging_pt) >> 12; kernel_pd_0[2].flags = F_PRESENT | F_WRITEABLE; kernel_pd_0[2].address = (uint64_t)(&bootstrap_pt) >> 12; - + memset(&paging_pt, 0, 4096); memset(&bootstrap_pt, 0, 4096); } @@ -260,21 +525,24 @@ void *mmap(void *addr, size_t len) { return NULL; } void *phys = page_align(addr); - for (long i = 0; i < pages; i++) { - void *virt_temp = (char *)virt + (i * PAGE_SIZE); - void *phys_temp = (char *)phys + (i * PAGE_SIZE); - map_page(kernel_pml4, virt_temp, phys_temp, F_WRITEABLE); + if (map_pages( + kernel_pml4, + virt, + phys, + F_WRITEABLE, + pages + )) { + virtaddr_free(virt); + return NULL; } - map_page(kernel_pml4, virt, (void*)0x23443, F_WRITEABLE); return virt; } void unmap(void *addr) { long pages = virtaddr_free(addr); - for (long i = 0; i < pages; i++) { - void *virt = (char *)addr + (i * PAGE_SIZE); - unmap_page(kernel_pml4, virt); - } + if (pages < 1) + return; + unmap_pages(kernel_pml4, addr, pages); } void *alloc_pages(int count) { @@ -286,10 +554,15 @@ void *alloc_pages(int count) { virtaddr_free(virt); return NULL; } - for (int i = 0; i < count; i++) { - void *virt_temp = (char *)virt + (i * PAGE_SIZE); - void *phys_temp = (char *)phys + (i * PAGE_SIZE); - map_page(kernel_pml4, virt_temp, phys_temp, F_WRITEABLE); + if (map_pages( + kernel_pml4, + virt, + phys, + F_WRITEABLE, + count + )) { + virtaddr_free(virt); + return NULL; } return virt; } @@ -298,10 +571,7 @@ void free_page(void *virt) { long pages = virtaddr_free(virt); if (pages < 1) return; - for (long i = 0; i < pages; i++) { - void *virt_temp = (char *)virt + (i * PAGE_SIZE); - unmap_page(kernel_pml4, virt_temp); - } + unmap_pages(kernel_pml4, virt, pages); } void memory_lock(void) { @@ -317,5 +587,6 @@ int kmap_page(void *virt_addr, void *phys_addr, unsigned int flags) { } int kunmap_page(void *virt_addr) { - return unmap_page(kernel_pml4, virt_addr); + unmap_page(kernel_pml4, virt_addr); + return 0; } diff --git a/src/arch/amd64/paging.h b/src/arch/amd64/paging.h index b32c376..1768106 100644 --- a/src/arch/amd64/paging.h +++ b/src/arch/amd64/paging.h @@ -10,7 +10,6 @@ #define F_MEGABYTE 0x080 #define F_GLOBAL 0x100 +void paging_init(void); int kmap_page(void *virt_addr, void *phys_addr, unsigned int flags); int kunmap_page(void *virt_addr); - -void paging_init(void); diff --git a/src/memory/physalloc.c b/src/memory/physalloc.c index 5be7469..dccd980 100644 --- a/src/memory/physalloc.c +++ b/src/memory/physalloc.c @@ -26,6 +26,7 @@ static uint64_t total_memory; static uint64_t free_memory; static uint64_t page_count; static uint64_t page_free_start; +static uint64_t segment_count; static struct memory_area *page_start; static int n_pages(const struct memory_area *m) { @@ -34,7 +35,8 @@ static int n_pages(const struct memory_area *m) { static void *page_at(int i) { int cur_page = 0; - for (struct memory_area *m = page_start; m != NULL; m++) { + for (uint64_t idx = 0; idx < segment_count; idx++) { + struct memory_area *m = &page_start[idx]; int pages = n_pages(m); if (i - cur_page < pages) { return (void *) (m->addr + (PAGE_SIZE * (i - cur_page))); @@ -165,7 +167,7 @@ void memory_init(struct memory_map *map) { end += map->size; struct memory_segment *segment = &map->entries[0]; - int segment_count = 0; + segment_count = 0; for(; (uintptr_t) segment < end; segment++) { if (segment_invalid(segment))