#include "klibc.h" #include "mem.h" #include "stdarg.h" // In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry point to a Page Table. // The 10 next bits are then an index in this Page Table. A Page Table Entry then point to a physical address at which is added the remaining 12 bits. // So they are 1024 entry in the PD, each of them pointing to a PT of 1024 entry. Each PTE pointing to 4K page. // First address (up to page_desc from mem.c) are mapped such as Paddr == Vaddr. // To make PD always accessible a (x86?) trick is used : The mirroring. A given entry N in the PD point to the PD (this is possible because PDE very looks like PTE in x86). // So N << (10 + 12 = 4Mo) point to the Paddr of PD. Then, accessing N * 4Mo + I * 4Ko is accessing the PT of the Ieme entry in the PD (as MMU take the PD pointed by the PDE number N like a PT). // More particularly, accessing N * 4Mo + N * 4ko is accessing the PD. // // PD is at Vaddr N * 4Mo and take 4ko. Each PT are allocated dynamically. // Just make sure that N have not been used by identity mapping #define PT_SHIFT 12 #define PTE_MASK 0x3ff //10bits #define PD_SHIFT 22 #define PD_MIRROR_PAGE_IDX 1023 struct pde { uint32_t present : 1; uint32_t write : 1; // 0 read - 1 RW uint32_t user : 1; // 0 supervisor - 1 user uint32_t write_through : 1; // 0 write-back - 1 write_through uint32_t cache_disable : 1; uint32_t access : 1; // have been accessed uint32_t zero : 1; // Not used uint32_t size : 1; // 0 for 4Kb 1 for 4Mb uint32_t ignored : 1; uint32_t available : 3; uint32_t pt_addr : 20; } __attribute__((packed)); struct pte { uint32_t present : 1; uint32_t write : 1; // 0 read - 1 RW uint32_t user : 1; // 0 supervisor - 1 user uint32_t write_through : 1; // 0 write-back - 1 write_through uint32_t cache_disable : 1; uint32_t access : 1; // have been accessed uint32_t dirty : 1; // if set, indicates that page has been written to. This flag is // not updated by the CPU, and once set will not unset itself. uint32_t zero : 1; // if PAT is supported, shall indicate the memory type. Otherwise, // it must be 0. uint32_t global : 1; // if set, prevents the TLB from updating the address in its cache // if CR3 is reset. Note, that the page global enable bit in CR4 // must be set to enable this feature. uint32_t available : 3; uint32_t paddr : 20; } __attribute__((packed)); struct pdbr { uint32_t zero1 : 3; // reserved uint32_t write_through : 1; // 0 write-back - 1 write-through uint32_t cache_disabled : 1; // 1=cache disabled uint32_t zero2 : 7; // reserved uint32_t pd_paddr : 20; } __attribute__((packed)); static inline void __native_flush_tlb_single(unsigned long addr) { asm volatile("invlpg (%0)" ::"r"(addr) : "memory"); } int pagingSetup(paddr_t upperKernelAddr) { struct pdbr cr3; // x86 got 1024 of pde for 4Byte each: 4ko ! struct pde *pd = (struct pde *)allocPhyPage(); memset(pd, 0, PAGE_SIZE); memset(&cr3, 0x0, sizeof(struct pdbr)); cr3.pd_paddr = ((paddr_t)pd) >> 12; // MMU not enabled for the moment. No need to use mirroring // Identity mapping up to upperKernelAddr for (paddr_t i = 0; i < upperKernelAddr; i += PAGE_SIZE) { uint pdEntry = i >> (PD_SHIFT); uint ptEntry = (i >> PT_SHIFT ) & PTE_MASK; struct pte *pt; if (pd[pdEntry].present){ pt = (struct pte *)(pd[pdEntry].pt_addr << PT_SHIFT); refPhyPage((paddr_t)pt); } else { pt = (struct pte *)allocPhyPage(); memset(pt, 0, PAGE_SIZE); pd[pdEntry].present = 1; pd[pdEntry].write = 1; pd[pdEntry].pt_addr = ((paddr_t)pt >> PT_SHIFT); } pt[ptEntry].present = 1; pt[ptEntry].write = 1; //TODO set Kernel code as RO pt[ptEntry].paddr = i >> PAGE_SHIFT; } // Setup mirroring pd[PD_MIRROR_PAGE_IDX].present = 1; pd[PD_MIRROR_PAGE_IDX].write = 1; pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT); // Loading of the PDBR in the MMU: asm volatile ("movl %0,%%cr3\n\t" "movl %%cr0,%%eax\n\t" "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */ "movl %%eax,%%cr0\n\t" "jmp 1f\n\t" "1:\n\t" "movl $2f, %%eax\n\t" "jmp *%%eax\n\t" "2:\n\t" ::"r"(cr3):"memory","eax"); return 0; }