#pragma once #include "core/types.hpp" #define MAX_TABLES 280 #define PAGING_MAX_PAGE (20*512) /** * Current number of page (from the beginning of the ram) used by the kernel that * should not be used by the paging allocation mechanism and should not be granted * for allocation */ #define PAGING_KERNEL_USED_PAGE (2*512) /// @brief New number of page reachable at the end of the paging_enable() call #define PAGING_KERNEL_SPACE_MAX_PAGE (20*512) #define PAGING_ALLOCATE() paging_allocate_contiguous(1) #define PAGING_OPT_P 1 #define PAGING_OPT_RW (1<<1) #define PAGING_OPT_PCD (1<<3) /// @brief Get page address that contain addr #define PAGE(addr) (addr&(~(0xFFF))) #define PAGING_MAP(addr) paging_allocate_addr(kpages[0],(u64)(addr),(u64)(addr),PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma) #define PAGING_MAP_RANGE(addr, n) { \ for(u64 i=0;i<(n);i++){ \ paging_allocate_addr(kpages[0],((u64)(addr))+i,((u64)(addr))+i,PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma); \ }} #define PAGING_MAP2(addr,phy) paging_allocate_addr(kpages[0],(u64)(addr),(u64)(phy),PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma) #define PAGING_MAP2_RANGE(addr, phy, n) { \ for(u64 i=0;i<(n);i++){ \ paging_allocate_addr(kpages[0],((u64)(addr))+i,((u64)(phy))+i,PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma); \ }} /// @brief All PAE table structures are allocated here extern u64 kpages[MAX_TABLES][512]; /// CF boucane.hpp extern u64 kvar_kernel_vma,kvar_stack_pma,kvar_userspace_pma; extern void (*printk)(char *str,...); /** * Setup and enable PAE paging */ void paging_enable(); /** * Allocate the next available page * and return its physical address */ u64* paging_allocate_contiguous(int npages); /** * Deallocate a page located at addr */ void paging_deallocate(u64 addr); /** * Dump a specific range of bytes in the paging_status */ void paging_dump(int min, int max); /** * Deallocate all the pages linked to a pml4 */ void paging_deallocate_pml4(u64* pml4); /** * Deallocate all the pages related to a pml4 structure */ void paging_deallocate_table(u64* table); /** * Allocate table structure (pml4, pdp etc..) */ u64* paging_allocate_table_local(); /** * Map virtual page associated to virt * to the physical page associated with phy * Offset can be used to convert pml4_table address * content to virtual addresses (since physical addresses from the pml4_table are not * available after the trampoline into High-Half memory) */ void paging_allocate_addr(u64* pml4_table,u64 virt, u64 phy, u16 options, u64 offset); /** * Get associated physical address */ u64 paging_as_phy(u64* pml4_table, u64 virt);