matos/core/paging.c

207 lines
6.5 KiB
C
Raw Normal View History

#include "paging.h"
2018-11-12 23:01:55 +01:00
#include "errno.h"
#include "klibc.h"
2018-11-11 23:04:23 +01:00
#include "mem.h"
#include "stdarg.h"
// In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry
// point to a Page Table. The 10 next bits are then an index in this Page Table. A Page Table
// Entry then point to a physical address at which is added the remaining 12 bits. So they are
// 1024 entry in the PD, each of them pointing to a PT of 1024 entry. Each PTE pointing to 4K
// page. First address (up to page_desc from mem.c) are mapped such as Paddr == Vaddr. To make
// PD always accessible a (x86?) trick is used : The mirroring. A given entry N in the PD point
// to the PD (this is possible because PDE very looks like PTE in x86). So N << (10 + 12 = 4Mo)
// point to the Paddr of PD. Then, accessing N * 4Mo + I * 4Ko is accessing the PT of the Ieme
// entry in the PD (as MMU take the PD pointed by the PDE number N like a PT). More
// particularly, accessing N * 4Mo + N * 4ko is accessing the PD.
2018-11-11 23:04:23 +01:00
//
// PD is at Vaddr N * 4Mo and take 4ko. Each PT are allocated dynamically.
// Just make sure that N have not been used by identity mapping
#define PT_SHIFT 12
#define PTE_MASK 0x3ff // 10bits
2018-11-11 23:04:23 +01:00
#define PD_SHIFT 22
#define PD_MIRROR_PAGE_IDX 1023U
2018-11-11 23:04:23 +01:00
static unsigned long mappedPage = 0;
2018-11-11 23:04:23 +01:00
struct pde {
uint32_t present : 1;
uint32_t write : 1; // 0 read - 1 RW
uint32_t user : 1; // 0 supervisor - 1 user
uint32_t write_through : 1; // 0 write-back - 1 write_through
uint32_t cache_disable : 1;
uint32_t access : 1; // have been accessed
uint32_t zero : 1; // Not used
uint32_t size : 1; // 0 for 4Kb 1 for 4Mb
uint32_t ignored : 1;
uint32_t available : 3;
uint32_t pt_addr : 20;
} __attribute__((packed));
struct pte {
uint32_t present : 1;
uint32_t write : 1; // 0 read - 1 RW
uint32_t user : 1; // 0 supervisor - 1 user
uint32_t write_through : 1; // 0 write-back - 1 write_through
uint32_t cache_disable : 1;
uint32_t access : 1; // have been accessed
uint32_t dirty : 1; // if set, indicates that page has been written to. This flag is
// not updated by the CPU, and once set will not unset itself.
uint32_t zero : 1; // if PAT is supported, shall indicate the memory type. Otherwise,
// it must be 0.
uint32_t global : 1; // if set, prevents the TLB from updating the address in its cache
// if CR3 is reset. Note, that the page global enable bit in CR4
// must be set to enable this feature.
uint32_t available : 3;
uint32_t paddr : 20;
} __attribute__((packed));
struct pdbr {
uint32_t zero1 : 3; // reserved
uint32_t write_through : 1; // 0 write-back - 1 write-through
uint32_t cache_disabled : 1; // 1=cache disabled
uint32_t zero2 : 7; // reserved
uint32_t pd_paddr : 20;
} __attribute__((packed));
2018-11-12 23:01:55 +01:00
// invalidate the TLB entry for the page located at the given virtual address
2018-11-11 23:04:23 +01:00
static inline void __native_flush_tlb_single(unsigned long addr)
{
asm volatile("invlpg (%0)" ::"r"(addr) : "memory");
}
int pagingSetup(paddr_t upperKernelAddr)
{
struct pdbr cr3;
// x86 got 1024 of pde for 4Byte each: 4ko !
struct pde *pd = (struct pde *)allocPhyPage();
memset(pd, 0, PAGE_SIZE);
memset(&cr3, 0x0, sizeof(struct pdbr));
cr3.pd_paddr = ((paddr_t)pd) >> 12;
// MMU not enabled for the moment. No need to use mirroring
// Identity mapping up to upperKernelAddr
for (paddr_t i = 0; i < upperKernelAddr; i += PAGE_SIZE) {
uint pdEntry = i >> (PD_SHIFT);
uint ptEntry = (i >> PT_SHIFT) & PTE_MASK;
2018-11-11 23:04:23 +01:00
struct pte *pt;
if (pd[pdEntry].present) {
2018-11-11 23:04:23 +01:00
pt = (struct pte *)(pd[pdEntry].pt_addr << PT_SHIFT);
refPhyPage((paddr_t)pt);
} else {
pt = (struct pte *)allocPhyPage();
memset(pt, 0, PAGE_SIZE);
pd[pdEntry].present = 1;
pd[pdEntry].write = 1;
pd[pdEntry].pt_addr = ((paddr_t)pt >> PT_SHIFT);
2018-11-11 23:04:23 +01:00
}
pt[ptEntry].present = 1;
2018-11-15 22:53:27 +01:00
pt[ptEntry].write = 1; // TODO set Kernel code as RO
pt[ptEntry].paddr = i >> PAGE_SHIFT;
2018-11-11 23:04:23 +01:00
}
// Setup mirroring
pd[PD_MIRROR_PAGE_IDX].present = 1;
pd[PD_MIRROR_PAGE_IDX].write = 1;
2018-11-11 23:04:23 +01:00
pd[PD_MIRROR_PAGE_IDX].pt_addr = ((paddr_t)pd >> PT_SHIFT);
// Loading of the PDBR in the MMU:
asm volatile("movl %0,%%cr3\n\t"
"movl %%cr0,%%eax\n\t"
"orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
"movl %%eax,%%cr0\n\t"
"jmp 1f\n\t"
"1:\n\t"
"movl $2f, %%eax\n\t"
"jmp *%%eax\n\t"
"2:\n\t" ::"r"(cr3)
: "memory", "eax");
2018-11-11 23:04:23 +01:00
return 0;
}
2018-11-12 21:32:04 +01:00
int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
{
uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
// Thank to mirroring, we can access the PD
struct pde *pd = (struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) +
(PD_MIRROR_PAGE_IDX << PT_SHIFT));
2018-11-12 23:01:55 +01:00
struct pte *pt =
(struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
2018-11-12 23:01:55 +01:00
if (!pd[pdEntry].present) {
2018-11-12 23:01:55 +01:00
paddr_t ptPhy = allocPhyPage();
if (ptPhy == (vaddr_t)NULL)
return -ENOMEM;
2018-11-12 23:01:55 +01:00
pd[pdEntry].user = (flags & PAGING_MEM_USER) ? 1 : 0;
2018-11-12 23:01:55 +01:00
pd[pdEntry].present = 1;
pd[pdEntry].write = 1;
2018-11-12 23:01:55 +01:00
pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT);
__native_flush_tlb_single((vaddr_t)pt);
memset((void *)pt, 0, PAGE_SIZE);
2018-11-15 22:53:27 +01:00
} else {
// Already mapped ? Remove old mapping
if (pt[ptEntry].present) {
unrefPhyPage(pt[ptEntry].paddr << PAGE_SHIFT);
} // PTE not already used ? We will use it ! So increase the PT ref count
else {
refPhyPage(pd[pdEntry].pt_addr << PAGE_SHIFT);
}
2018-11-12 23:01:55 +01:00
}
2018-11-13 10:37:28 +01:00
pt[ptEntry].user = (flags & PAGING_MEM_USER) ? 1 : 0;
2018-11-12 23:01:55 +01:00
pt[ptEntry].present = 1;
2018-11-13 10:37:28 +01:00
pt[ptEntry].write = (flags & PAGING_MEM_WRITE) ? 1 : 0;
2018-11-12 23:01:55 +01:00
pt[ptEntry].paddr = paddr >> PAGE_SHIFT;
refPhyPage(paddr);
2018-11-12 21:32:04 +01:00
2018-11-12 23:01:55 +01:00
__native_flush_tlb_single(vaddr);
mappedPage++;
2018-11-12 23:01:55 +01:00
return 0;
}
2018-11-12 21:32:04 +01:00
2018-11-12 23:01:55 +01:00
int pageUnmap(vaddr_t vaddr)
{
uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
// Thank to mirroring, we can access the PD
struct pde *pd = (struct pde *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) +
(PD_MIRROR_PAGE_IDX << PT_SHIFT));
2018-11-12 23:01:55 +01:00
struct pte *pt =
(struct pte *)((PD_MIRROR_PAGE_IDX << PD_SHIFT) + (pdEntry << PT_SHIFT));
2018-11-12 23:01:55 +01:00
if (!pd[pdEntry].present)
return -EINVAL;
if (!pt[ptEntry].present)
return -EINVAL;
unrefPhyPage(pt[ptEntry].paddr << PAGE_SHIFT);
pt[ptEntry].present = 0;
// PTE not used. Decrease refcount on it. Is PT not used anymore ?
if (unrefPhyPage(pd[pdEntry].pt_addr << PT_SHIFT) == 0) {
2018-11-12 23:01:55 +01:00
pd[pdEntry].present = 0;
__native_flush_tlb_single((vaddr_t)pt);
2018-11-12 21:32:04 +01:00
}
2018-11-12 23:01:55 +01:00
__native_flush_tlb_single(vaddr);
mappedPage--;
2018-11-12 23:01:55 +01:00
return 0;
}
2018-11-12 23:01:55 +01:00
unsigned long getNbMappedPage(void)
{
return mappedPage;
2018-11-12 21:32:04 +01:00
}