diff options
author | Freya Murphy <freya@freyacat.org> | 2025-05-01 19:52:25 -0400 |
---|---|---|
committer | Freya Murphy <freya@freyacat.org> | 2025-05-01 19:52:25 -0400 |
commit | 34247153636c4f38116907eb1421868113cae50d (patch) | |
tree | bcb75f5152ecb1c5d0b5d66895dde34992fce3b4 | |
parent | finish syscall impls (diff) | |
download | comus-34247153636c4f38116907eb1421868113cae50d.tar.gz comus-34247153636c4f38116907eb1421868113cae50d.tar.bz2 comus-34247153636c4f38116907eb1421868113cae50d.zip |
comment out non contigous phys page allocator, its causing problems and this is due tomorrow, dear god aaaaaaaaa
-rw-r--r-- | kernel/memory/paging.c | 114 |
1 files changed, 65 insertions, 49 deletions
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c index 763bdce..9dfa236 100644 --- a/kernel/memory/paging.c +++ b/kernel/memory/paging.c @@ -1025,64 +1025,80 @@ void *mem_alloc_pages(mem_ctx_t ctx, size_t count, unsigned int flags) void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt, unsigned int flags) { - size_t pages_needed = count; - - struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL; - struct phys_page_slice phys_pages; + void *phys = NULL; if (virtaddr_take(&ctx->virtctx, virt, count)) return NULL; - while (pages_needed > 0) { - phys_pages = alloc_phys_page_withextra(pages_needed); - if (phys_pages.pagestart == NULL) { - goto mem_alloc_pages_at_fail; - } - - { - // allocate the first page and store in it the physical address of the - // previous chunk of pages - // TODO: skip this if there are already enough pages from first alloc - void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1, - F_PRESENT | F_WRITEABLE); - if (pageone == NULL) { - panic("kernel out of virtual memory"); - } - *((struct phys_page_slice *)pageone) = prev_phys_block; - prev_phys_block = phys_pages; - kunmapaddr(pageone); - } - - // index into virtual page array at index [count - pages_needed] - void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE); - - assert(pages_needed >= phys_pages.num_pages, "overflow"); - pages_needed -= phys_pages.num_pages; + phys = alloc_phys_pages_exact(count); + if (phys == NULL) + return NULL; - if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr, - phys_pages.pagestart, flags, phys_pages.num_pages)) { - goto mem_alloc_pages_at_fail; - } + if (map_pages((volatile struct pml4 *)ctx->pml4, virt, phys, flags, count)) { + free_phys_pages(phys, count); + return NULL; } return virt; -mem_alloc_pages_at_fail: - while (prev_phys_block.pagestart) { - void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1, - F_PRESENT | F_WRITEABLE); - if (!virtpage) { - // memory corruption, most likely a bug - // could also ERROR here and exit with leak - panic("unable to free memory from failed mem_alloc_pages_at call"); - } - struct phys_page_slice prev = *(struct phys_page_slice *)virtpage; - prev_phys_block = prev; - free_phys_pages_slice(prev); - kunmapaddr(virtpage); - } - - return NULL; +// size_t pages_needed = count; +// +// struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL; +// struct phys_page_slice phys_pages; +// +// if (virtaddr_take(&ctx->virtctx, virt, count)) +// return NULL; +// +// while (pages_needed > 0) { +// phys_pages = alloc_phys_page_withextra(pages_needed); +// if (phys_pages.pagestart == NULL) { +// goto mem_alloc_pages_at_fail; +// } +// +// { +// // allocate the first page and store in it the physical address of the +// // previous chunk of pages +// // TODO: skip this if there are already enough pages from first alloc +// void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1, +// F_PRESENT | F_WRITEABLE); +// if (pageone == NULL) { +// panic("kernel out of virtual memory"); +// } +// *((struct phys_page_slice *)pageone) = prev_phys_block; +// prev_phys_block = phys_pages; +// kunmapaddr(pageone); +// } +// +// // index into virtual page array at index [count - pages_needed] +// void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE); +// +// assert(pages_needed >= phys_pages.num_pages, "overflow"); +// pages_needed -= phys_pages.num_pages; +// +// if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr, +// phys_pages.pagestart, flags, phys_pages.num_pages)) { +// goto mem_alloc_pages_at_fail; +// } +// } +// +// return virt; +// +//mem_alloc_pages_at_fail: +// while (prev_phys_block.pagestart) { +// void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1, +// F_PRESENT | F_WRITEABLE); +// if (!virtpage) { +// // memory corruption, most likely a bug +// // could also ERROR here and exit with leak +// panic("unable to free memory from failed mem_alloc_pages_at call"); +// } +// struct phys_page_slice prev = *(struct phys_page_slice *)virtpage; +// prev_phys_block = prev; +// free_phys_pages_slice(prev); +// kunmapaddr(virtpage); +// } +// +// return NULL; } void mem_free_pages(mem_ctx_t ctx, const void *virt) |