aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/paging.cc38
-rw-r--r--src/core/paging.hpp51
2 files changed, 38 insertions, 51 deletions
diff --git a/src/core/paging.cc b/src/core/paging.cc
index c283c57..7ae1b25 100644
--- a/src/core/paging.cc
+++ b/src/core/paging.cc
@@ -11,11 +11,14 @@ u64* paging_allocate_table(){
u64 addr=(u64)kpages[kpages_next];
u64* allocated=(u64*)(addr-kvar_kernel_vma);
kpages_next++;
+ if(kpages_next>=PAGING_MAX_PAGE){
+ printk("Could not allocate more page structures. Kernel Panic!");
+ while(1);
+ }
return allocated;
}
void paging_enable() {
-
// Init status
for (int i = 0; i < PAGING_MAX_PAGE / 8; i++) {
paging_status[i] = 0;
@@ -33,10 +36,11 @@ void paging_enable() {
// Setting up new kernel address space
for(u64 i=0;i<=0x10000000;i+=4096){
- paging_allocate_addr(kpages[0],kvar_kernel_vma+i,i, 0x3,0); // Identity map
+ PAGE_MAP(i);
}
+
// 4096 bytes stack
- paging_allocate_addr(kpages[0],kvar_kernel_vma-4096,kvar_stack_pma,0x3,0);
+ PAGE_MAP_PHY(-4096, kvar_stack_pma);
// Load new pml4
u64 kpage_phy=((u64)kpages[0]-kvar_kernel_vma);
@@ -46,18 +50,6 @@ void paging_enable() {
:: "r" (kpage_phy));
}
-u64 paging_as_phy(u64* pml4_table, u64 virt){
- u16 pml4=virt>>39&0x1FF;
- u16 pdp=virt>>30&0x1FF;
- u16 pd=virt>>21&0x1FF;
- u16 pt=(virt>>12)&0x1FF;
-
- u64* pdp_table=(u64*)PAGE(pml4_table[pml4]);
- u64* pd_table=(u64*)PAGE(pdp_table[pdp]);
- u64* pt_table=(u64*)PAGE(pd_table[pd]);
- return((PAGE(pt_table[pt]))|(virt&0xFFF));
-}
-
u64* paging_allocate_contiguous(int npages){
int n_contiguous=0;
for (int i = 0; i < PAGING_MAX_PAGE / 8; i++) {
@@ -94,6 +86,8 @@ void paging_deallocate(u64 addr){
paging_status[page_number/8]=byte&(~(1<<(page_number%8)));
}
+
+/// TODO: Debug addess
void paging_deallocate_pml4(u64* pml4){
for(int i=0;i<512;i++){
u64* pdp=(u64*)PAGE(pml4[i]);
@@ -142,7 +136,7 @@ void paging_deallocate_table(u64* table){
}
}
-void paging_allocate_addr(u64* pml4_table, u64 virt, u64 phy, u16 options, u64 offset){
+void paging_allocate_addr(u64* pml4_table, u64 virt, u64 phy, u16 options){
u16 pml4=virt>>39&0x1FF;
u16 pdp=virt>>30&0x1FF;
u16 pd=virt>>21&0x1FF;
@@ -153,29 +147,29 @@ void paging_allocate_addr(u64* pml4_table, u64 virt, u64 phy, u16 options, u64 o
if(pml4_table[pml4] == 0){
pml4_table[pml4]=(u64)paging_allocate_table();
pml4_table[pml4]|=options;
- paging_allocate_addr(pml4_table,virt,phy,options,offset);
+ paging_allocate_addr(pml4_table,virt,phy,options);
return;
}
// Solve pd
- u64* pdp_table=(u64*)(PAGE(pml4_table[pml4])+offset);
+ u64* pdp_table=(u64*)(VIRT(PAGE(pml4_table[pml4])));
if(pdp_table[pdp] == 0){
pdp_table[pdp]=(u64)paging_allocate_table();
pdp_table[pdp]|=options;
- paging_allocate_addr(pml4_table,virt,phy,options,offset);
+ paging_allocate_addr(pml4_table,virt,phy,options);
return;
}
// Solve pt
- u64* pd_table=(u64*)(PAGE(pdp_table[pdp])+offset);
+ u64* pd_table=(u64*)(VIRT(PAGE(pdp_table[pdp])));
if(pd_table[pd] == 0){
pd_table[pd]=(u64)paging_allocate_table();
pd_table[pd]|=options;
- paging_allocate_addr(pml4_table,virt,phy,options,offset);
+ paging_allocate_addr(pml4_table,virt,phy,options);
return;
}
// Solve address
- u64* pt_table=(u64*)(PAGE(pd_table[pd])+offset);
+ u64* pt_table=(u64*)(VIRT(PAGE(pd_table[pd])));
if(pt_table[pt] == 0){
pt_table[pt]=PAGE(phy);
pt_table[pt]|=options;
diff --git a/src/core/paging.hpp b/src/core/paging.hpp
index a3c2fec..1aae24a 100644
--- a/src/core/paging.hpp
+++ b/src/core/paging.hpp
@@ -2,34 +2,40 @@
#include "core/types.hpp"
-#define MAX_TABLES 280
-#define PAGING_MAX_PAGE (20*512)
/**
* Current number of page (from the beginning of the ram) used by the kernel that
* should not be used by the paging allocation mechanism and should not be granted
* for allocation
*/
-#define PAGING_KERNEL_USED_PAGE (2*512)
/// @brief New number of page reachable at the end of the paging_enable() call
-#define PAGING_KERNEL_SPACE_MAX_PAGE (20*512)
-#define PAGING_ALLOCATE() paging_allocate_contiguous(1)
+#define MAX_TABLES 280
+#define PAGING_MAX_PAGE (20*512)
+#define PAGE_ALLOCATE() paging_allocate_contiguous(1)
+
+/// @brief Options
#define PAGING_OPT_P 1
#define PAGING_OPT_RW (1<<1)
#define PAGING_OPT_PCD (1<<3)
/// @brief Get page address that contain addr
#define PAGE(addr) (addr&(~(0xFFF)))
-#define PAGING_MAP(addr) paging_allocate_addr(kpages[0],(u64)(addr),(u64)(addr),PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma)
-#define PAGING_MAP_RANGE(addr, n) { \
- for(u64 i=0;i<(n);i++){ \
- paging_allocate_addr(kpages[0],((u64)(addr))+i,((u64)(addr))+i,PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma); \
-}}
+#define VIRT(addr) (addr+kvar_kernel_vma)
-
-#define PAGING_MAP2(addr,phy) paging_allocate_addr(kpages[0],(u64)(addr),(u64)(phy),PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma)
-#define PAGING_MAP2_RANGE(addr, phy, n) { \
- for(u64 i=0;i<(n);i++){ \
- paging_allocate_addr(kpages[0],((u64)(addr))+i,((u64)(phy))+i,PAGING_OPT_P|PAGING_OPT_RW,kvar_kernel_vma); \
+/// @brief Mapping facilities
+#define PAGE_ID_MAP(addr) paging_allocate_addr(kpages[0],(u64)(addr),(u64)(addr),PAGING_OPT_P|PAGING_OPT_RW)
+#define PAGE_ID_RMAP(addr, n) { \
+ for(u64 i=0;i<(n);i+=4096){ \
+ paging_allocate_addr(kpages[0],((u64)(addr))+i,((u64)(addr))+i,PAGING_OPT_P|PAGING_OPT_RW); \
+}}
+#define PAGE_MAP(addr) paging_allocate_addr(kpages[0],(u64)((addr)+kvar_kernel_vma),(u64)(addr),PAGING_OPT_P|PAGING_OPT_RW)
+#define PAGE_RMAP(addr, n) { \
+ for(u64 i=0;i<(n);i+=4096){ \
+ paging_allocate_addr(kpages[0],(((u64)(addr))+kvar_kernel_vma)+i,((u64)(addr))+i,PAGING_OPT_P|PAGING_OPT_RW); \
+}}
+#define PAGE_MAP_PHY(addr,phy) paging_allocate_addr(kpages[0],(u64)((addr)+kvar_kernel_vma),(u64)(phy),PAGING_OPT_P|PAGING_OPT_RW)
+#define PAGE_RMAP_PHY(addr,phy, n) { \
+ for(u64 i=0;i<(n);i+=4096){ \
+ paging_allocate_addr(kpages[0],(((u64)(addr))+kvar_kernel_vma)+i,((u64)(phy))+i,PAGING_OPT_P|PAGING_OPT_RW); \
}}
/// @brief All PAE table structures are allocated here
@@ -71,20 +77,7 @@ void paging_deallocate_pml4(u64* pml4);
void paging_deallocate_table(u64* table);
/**
- * Allocate table structure (pml4, pdp etc..)
- */
-u64* paging_allocate_table_local();
-
-/**
* Map virtual page associated to virt
* to the physical page associated with phy
- * Offset can be used to convert pml4_table address
- * content to virtual addresses (since physical addresses from the pml4_table are not
- * available after the trampoline into High-Half memory)
- */
-void paging_allocate_addr(u64* pml4_table,u64 virt, u64 phy, u16 options, u64 offset);
-
-/**
- * Get associated physical address
*/
-u64 paging_as_phy(u64* pml4_table, u64 virt); \ No newline at end of file
+void paging_allocate_addr(u64* pml4_table,u64 virt, u64 phy, u16 options);