summaryrefslogtreecommitdiff
path: root/kernel/memory/paging.c
diff options
context:
space:
mode:
authorFreya Murphy <freya@freyacat.org>2025-05-02 12:37:55 -0400
committerFreya Murphy <freya@freyacat.org>2025-05-02 12:37:55 -0400
commite10b750a33bb9b8fb9b5f636c5d937cb54104bd9 (patch)
tree140548e1cc6d22f429f41d5f3eb061402edeac3e /kernel/memory/paging.c
parenttemp badness (diff)
downloadcomus-e10b750a33bb9b8fb9b5f636c5d937cb54104bd9.tar.gz
comus-e10b750a33bb9b8fb9b5f636c5d937cb54104bd9.tar.bz2
comus-e10b750a33bb9b8fb9b5f636c5d937cb54104bd9.zip
broken?
Diffstat (limited to 'kernel/memory/paging.c')
-rw-r--r--kernel/memory/paging.c119
1 files changed, 60 insertions, 59 deletions
diff --git a/kernel/memory/paging.c b/kernel/memory/paging.c
index 39f7638..e21fd34 100644
--- a/kernel/memory/paging.c
+++ b/kernel/memory/paging.c
@@ -1035,71 +1035,72 @@ void *mem_alloc_pages_at(mem_ctx_t ctx, size_t count, void *virt,
if (phys == NULL)
return NULL;
- if (map_pages((volatile struct pml4 *)ctx->pml4, virt, phys, flags, count)) {
+ if (map_pages((volatile struct pml4 *)ctx->pml4, virt, phys, flags,
+ count)) {
free_phys_pages(phys, count);
return NULL;
}
return virt;
-// size_t pages_needed = count;
-//
-// struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL;
-// struct phys_page_slice phys_pages;
-//
-// if (virtaddr_take(&ctx->virtctx, virt, count))
-// return NULL;
-//
-// while (pages_needed > 0) {
-// phys_pages = alloc_phys_page_withextra(pages_needed);
-// if (phys_pages.pagestart == NULL) {
-// goto mem_alloc_pages_at_fail;
-// }
-//
-// {
-// // allocate the first page and store in it the physical address of the
-// // previous chunk of pages
-// // TODO: skip this if there are already enough pages from first alloc
-// void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1,
-// F_PRESENT | F_WRITEABLE);
-// if (pageone == NULL) {
-// panic("kernel out of virtual memory");
-// }
-// *((struct phys_page_slice *)pageone) = prev_phys_block;
-// prev_phys_block = phys_pages;
-// kunmapaddr(pageone);
-// }
-//
-// // index into virtual page array at index [count - pages_needed]
-// void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE);
-//
-// assert(pages_needed >= phys_pages.num_pages, "overflow");
-// pages_needed -= phys_pages.num_pages;
-//
-// if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr,
-// phys_pages.pagestart, flags, phys_pages.num_pages)) {
-// goto mem_alloc_pages_at_fail;
-// }
-// }
-//
-// return virt;
-//
-//mem_alloc_pages_at_fail:
-// while (prev_phys_block.pagestart) {
-// void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1,
-// F_PRESENT | F_WRITEABLE);
-// if (!virtpage) {
-// // memory corruption, most likely a bug
-// // could also ERROR here and exit with leak
-// panic("unable to free memory from failed mem_alloc_pages_at call");
-// }
-// struct phys_page_slice prev = *(struct phys_page_slice *)virtpage;
-// prev_phys_block = prev;
-// free_phys_pages_slice(prev);
-// kunmapaddr(virtpage);
-// }
-//
-// return NULL;
+ // size_t pages_needed = count;
+ //
+ // struct phys_page_slice prev_phys_block = PHYS_PAGE_SLICE_NULL;
+ // struct phys_page_slice phys_pages;
+ //
+ // if (virtaddr_take(&ctx->virtctx, virt, count))
+ // return NULL;
+ //
+ // while (pages_needed > 0) {
+ // phys_pages = alloc_phys_page_withextra(pages_needed);
+ // if (phys_pages.pagestart == NULL) {
+ // goto mem_alloc_pages_at_fail;
+ // }
+ //
+ // {
+ // // allocate the first page and store in it the physical address of the
+ // // previous chunk of pages
+ // // TODO: skip this if there are already enough pages from first alloc
+ // void *pageone = kmapaddr(phys_pages.pagestart, NULL, 1,
+ // F_PRESENT | F_WRITEABLE);
+ // if (pageone == NULL) {
+ // panic("kernel out of virtual memory");
+ // }
+ // *((struct phys_page_slice *)pageone) = prev_phys_block;
+ // prev_phys_block = phys_pages;
+ // kunmapaddr(pageone);
+ // }
+ //
+ // // index into virtual page array at index [count - pages_needed]
+ // void *vaddr = ((uint8_t *)virt) + ((count - pages_needed) * PAGE_SIZE);
+ //
+ // assert(pages_needed >= phys_pages.num_pages, "overflow");
+ // pages_needed -= phys_pages.num_pages;
+ //
+ // if (map_pages((volatile struct pml4 *)ctx->pml4, vaddr,
+ // phys_pages.pagestart, flags, phys_pages.num_pages)) {
+ // goto mem_alloc_pages_at_fail;
+ // }
+ // }
+ //
+ // return virt;
+ //
+ //mem_alloc_pages_at_fail:
+ // while (prev_phys_block.pagestart) {
+ // void *virtpage = kmapaddr(prev_phys_block.pagestart, NULL, 1,
+ // F_PRESENT | F_WRITEABLE);
+ // if (!virtpage) {
+ // // memory corruption, most likely a bug
+ // // could also ERROR here and exit with leak
+ // panic("unable to free memory from failed mem_alloc_pages_at call");
+ // }
+ // struct phys_page_slice prev = *(struct phys_page_slice *)virtpage;
+ // prev_phys_block = prev;
+ // free_phys_pages_slice(prev);
+ // kunmapaddr(virtpage);
+ // }
+ //
+ // return NULL;
}
void mem_free_pages(mem_ctx_t ctx, const void *virt)