#include #include #include #include #define F_PRESENT 0x001 #define F_WRITEABLE 0x002 #define F_UNPRIVILEGED 0x004 #define F_WRITETHROUGH 0x008 #define F_CACHEDISABLE 0x010 #define F_ACCESSED 0x020 #define F_DIRTY 0x040 #define F_MEGABYTE 0x080 #define F_GLOBAL 0x100 // PAGE MAP LEVEL 4 ENTRY struct pml4e { uint64_t flags : 6; uint64_t : 6; uint64_t address : 40; uint64_t : 11; uint64_t execute_disable : 1; }; // PAGE DIRECTORY POINTER TABLE ENTRY struct pdpte { uint64_t flags : 6; uint64_t : 1; uint64_t page_size : 1; uint64_t : 4; uint64_t address : 40; uint64_t : 11; uint64_t execute_disable : 1; }; // PAGE DIRECTORY ENTRY struct pde { uint64_t flags : 6; uint64_t : 1; uint64_t page_size : 1; uint64_t : 4; uint64_t address : 40; uint64_t : 11; uint64_t execute_disable : 1; }; // PAGE TABLE ENTRY struct pte { uint64_t flags : 9; uint64_t : 3; uint64_t address : 40; uint64_t : 7; uint64_t protection_key : 4; uint64_t execute_disable : 1; }; static inline void invlpg(unsigned long addr) { __asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } static int get_maxphysaddr() { uint32_t eax, ebx, ecx, edx; __cpuid(0x80000008, eax, ebx, ecx, edx); return eax & 0xFF; } int find_phys_addr(struct pml4e *pml4, void *virt_addr, void **phys_addr) { uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39; uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF; uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF; uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF; uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF; if (!pml4[pml4_offset].flags & F_PRESENT) return -1; struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12); if (!pdpt[pdpt_offset].flags & F_PRESENT) return -1; struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12); if (!pd[pd_offset].flags & F_PRESENT) return -1; struct pte *pt = (struct pte *)(pd[pd_offset].address << 12); if (!pt[pt_offset].flags & F_PRESENT) return -1; *phys_addr = (void *)((pt[pt_offset].address << 12) + page_offset); return 0; } char *curr_alloc = 0x5000; void *alloc_phys_page() { void *ret = curr_alloc; curr_alloc += 4096; return ret; } int map_page(struct pml4e *pml4, void *virt_addr, void *phys_addr, unsigned int flags) { uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39; uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF; uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF; uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF; uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF; if (!pml4[pml4_offset].flags & F_PRESENT) { void *new_page = alloc_phys_page(); memset(new_page, 0, 4096); pml4[pml4_offset].address = ((uint64_t)new_page) >> 12; pml4[pml4_offset].flags = F_PRESENT | flags; } struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12); if (!pdpt[pdpt_offset].flags & F_PRESENT) { void *new_page = alloc_phys_page(); memset(new_page, 0, 4096); pdpt[pdpt_offset].address = ((uint64_t)new_page) >> 12; pdpt[pdpt_offset].flags = F_PRESENT | flags; } struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12); if (!pd[pd_offset].flags & F_PRESENT) { void *new_page = alloc_phys_page(); memset(new_page, 0, 4096); pd[pd_offset].address = ((uint64_t)new_page) >> 12; pd[pd_offset].flags = F_PRESENT | flags; } struct pte *pt = (struct pte *)(pd[pd_offset].address << 12); if (!pt[pt_offset].flags & F_PRESENT) { pt[pt_offset].flags |= F_PRESENT | flags; } pt[pt_offset].address = (((uint64_t)phys_addr) >> 12); invlpg(virt_addr); return 0; } int unmap_page(struct pml4e *pml4, void *virt_addr) { uint64_t pml4_offset = ((uint64_t)virt_addr) >> 39; uint64_t pdpt_offset = (((uint64_t)virt_addr) >> 30) & 0x1FF; uint64_t pd_offset = (((uint64_t)virt_addr) >> 21) & 0x1FF; uint64_t pt_offset = (((uint64_t)virt_addr) >> 12) & 0x1FF; uint64_t page_offset = (((uint64_t)virt_addr) ) & 0xFFF; if (!pml4[pml4_offset].flags & F_PRESENT) return -1; struct pdpte *pdpt = (struct pdpte *)(pml4[pml4_offset].address << 12); if (!pdpt[pdpt_offset].flags & F_PRESENT) return -1; struct pde *pd = (struct pde *)(pdpt[pdpt_offset].address << 12); if (!pd[pd_offset].flags & F_PRESENT) return -1; struct pte *pt = (struct pte *)(pd[pd_offset].address << 12); if (!pt[pt_offset].flags & F_PRESENT) return -1; pt[pt_offset].flags = 0; int i = 0; for(; i < 512; i++) { if (pt[i].flags & F_PRESENT) break; } if (i == 512) goto done; pd[pd_offset].flags = 0; for(i = 0; i < 512; i++) { if (pd[i].flags & F_PRESENT) break; } if (i == 512) goto done; pdpt[pdpt_offset].flags = 0; for(i = 0; i < 512; i++) { if(pdpt[i].flags & F_PRESENT) break; } if (i == 512) goto done; pml4[pml4_offset].flags = 0; //TODO: Return memory used for page structures done: invlpg(virt_addr); return 0; } // entry point for amd64 void* amd64_shim(void *boot_info) { struct pml4e *pml4 = (struct pml4e *)0x1000; struct pdpte *pdpt = (struct pdpte *)0x2000; struct pde *pd = (struct pde *)0x3000; struct pte *pt = (struct pte *)0x4000; //pd[1].flags = F_PRESENT | F_WRITEABLE; //pd[1].address = ((uint64_t)pt) >> 12; map_page(pml4, 0x80000000, 0xB8002, F_WRITEABLE); //__asm("invlpg 0x200000"); void *ret; find_phys_addr(pml4, 0x80000000, &ret); return ret; }