diff --git a/core/mem.c b/core/mem.c index 13a402c..b57d432 100644 --- a/core/mem.c +++ b/core/mem.c @@ -71,7 +71,7 @@ int unrefPhyPage(paddr_t addr) list_add_tail(free_page, mem); } - return 0; + return mem->ref; } int refPhyPage(paddr_t addr) diff --git a/core/mem.h b/core/mem.h index bb58650..900224b 100644 --- a/core/mem.h +++ b/core/mem.h @@ -12,7 +12,7 @@ extern uint32_t __ld_kernel_end; struct mem_desc{ paddr_t phy_addr; - unsigned long ref; + long ref; struct mem_desc *next, *prev; }; diff --git a/core/paging.c b/core/paging.c index 210d8b9..169f922 100644 --- a/core/paging.c +++ b/core/paging.c @@ -1,6 +1,9 @@ #include "klibc.h" +#include "errno.h" #include "mem.h" +#include "paging.h" #include "stdarg.h" +#include "vga.h" // In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry point to a Page Table. // The 10 next bits are then an index in this Page Table. A Page Table Entry then point to a physical address at which is added the remaining 12 bits. @@ -58,6 +61,8 @@ struct pdbr { uint32_t pd_paddr : 20; } __attribute__((packed)); + +// invalidate the TLB entry for the page located at the given virtual address static inline void __native_flush_tlb_single(unsigned long addr) { asm volatile("invlpg (%0)" ::"r"(addr) : "memory"); @@ -123,12 +128,67 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags) uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; // Thank to mirroring, we can access the PD - struct pde *pd = (struct pde *)(PD_MIRROR_PAGE_IDX * 4 * (1 << 20) + - PD_MIRROR_PAGE_IDX * 4 * (1 << 10)); + struct pde *pd = (struct pde *)(PD_MIRROR_PAGE_IDX * (1U << 22) + + PD_MIRROR_PAGE_IDX * (1U << 12)); - struct pte *pt = (struct pte *)(PD_MIRROR_PAGE_IDX * 4 * (1 << 20) + - PD_MIRROR_PAGE_IDX * 4 * (1 << 10)); + struct pte *pt = (struct pte *)(PD_MIRROR_PAGE_IDX * (1U << 22) + + pdEntry * (1U << 12)); - if(pd[pdEntry].present){ + if(!pd[pdEntry].present){ + paddr_t ptPhy = allocPhyPage(); + memset((void *)ptPhy, 0, PAGE_SIZE); + + pd[pdEntry].user = (flags & PAGING_MEM_USER) ? 0:1; + pd[pdEntry].present = 1; + pd[pdEntry].write = 1; + pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT); + + __native_flush_tlb_single((vaddr_t)pt); } + + // Already mapped ? Remove old mapping + if (pt[ptEntry].present) { + unrefPhyPage(pt[ptEntry].paddr << PAGE_SHIFT); + } // PTE not already used ? We will use it ! So increase the PT ref count + else { + refPhyPage(pd[pdEntry].pt_addr << PAGE_SHIFT); + } + + pt[ptEntry].user = (flags & PAGING_MEM_USER) ? 0 : 1; + pt[ptEntry].present = 1; + pt[ptEntry].write = (flags & PAGING_MEM_WRITE) ? 0 : 1; + pt[ptEntry].paddr = paddr >> PAGE_SHIFT; + refPhyPage(paddr); + + __native_flush_tlb_single(vaddr); + return 0; +} + +int pageUnmap(vaddr_t vaddr) +{ + uint pdEntry = vaddr >> (PD_SHIFT); + uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK; + + // Thank to mirroring, we can access the PD + struct pde *pd = + (struct pde *)(PD_MIRROR_PAGE_IDX * (1U << 22) + PD_MIRROR_PAGE_IDX * (1U << 12)); + + struct pte *pt = + (struct pte *)(PD_MIRROR_PAGE_IDX * (1U << 22) + pdEntry * (1U << 12)); + if (!pd[pdEntry].present) + return -EINVAL; + if (!pt[ptEntry].present) + return -EINVAL; + + unrefPhyPage(pt[ptEntry].paddr << PAGE_SHIFT); + pt[ptEntry].present = 0; + + // PTE not used. Decrease refcount on it. Is PT not used anymore ? + if(unrefPhyPage(pd[pdEntry].pt_addr<< PT_SHIFT) == 0){ + pd[pdEntry].present = 0; + __native_flush_tlb_single((vaddr_t)pt); + } + __native_flush_tlb_single(vaddr); + return 0; + } diff --git a/core/paging.h b/core/paging.h index a5551f6..0aa4b90 100644 --- a/core/paging.h +++ b/core/paging.h @@ -1,6 +1,10 @@ #pragma once #include "types.h" +#define PAGING_MEM_USER 1 +#define PAGING_MEM_READ (1 << 1) +#define PAGING_MEM_WRITE (1 << 2) + int pagingSetup(paddr_t upperKernelAddr); int pageMap(vaddr_t vaddr, paddr_t paddr, int flags);