implement paging map and unmap

This commit is contained in:
Mathieu Maret 2018-11-12 23:01:55 +01:00
parent 63ce78205a
commit 4a5f5674ce
4 changed files with 71 additions and 7 deletions

View File

@ -71,7 +71,7 @@ int unrefPhyPage(paddr_t addr)
list_add_tail(free_page, mem);
}
return 0;
return mem->ref;
}
int refPhyPage(paddr_t addr)

View File

@ -12,7 +12,7 @@ extern uint32_t __ld_kernel_end;
struct mem_desc{
paddr_t phy_addr;
unsigned long ref;
long ref;
struct mem_desc *next, *prev;
};

View File

@ -1,6 +1,9 @@
#include "klibc.h"
#include "errno.h"
#include "mem.h"
#include "paging.h"
#include "stdarg.h"
#include "vga.h"
// In a Vaddr, 10 first bit (MSB) are the index in the Page Directory. A Page Directory Entry point to a Page Table.
// The 10 next bits are then an index in this Page Table. A Page Table Entry then point to a physical address at which is added the remaining 12 bits.
@ -58,6 +61,8 @@ struct pdbr {
uint32_t pd_paddr : 20;
} __attribute__((packed));
// invalidate the TLB entry for the page located at the given virtual address
static inline void __native_flush_tlb_single(unsigned long addr)
{
asm volatile("invlpg (%0)" ::"r"(addr) : "memory");
@ -123,12 +128,67 @@ int pageMap(vaddr_t vaddr, paddr_t paddr, int flags)
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
// Thank to mirroring, we can access the PD
struct pde *pd = (struct pde *)(PD_MIRROR_PAGE_IDX * 4 * (1 << 20) +
PD_MIRROR_PAGE_IDX * 4 * (1 << 10));
struct pde *pd = (struct pde *)(PD_MIRROR_PAGE_IDX * (1U << 22) +
PD_MIRROR_PAGE_IDX * (1U << 12));
struct pte *pt = (struct pte *)(PD_MIRROR_PAGE_IDX * 4 * (1 << 20) +
PD_MIRROR_PAGE_IDX * 4 * (1 << 10));
struct pte *pt = (struct pte *)(PD_MIRROR_PAGE_IDX * (1U << 22) +
pdEntry * (1U << 12));
if(pd[pdEntry].present){
if(!pd[pdEntry].present){
paddr_t ptPhy = allocPhyPage();
memset((void *)ptPhy, 0, PAGE_SIZE);
pd[pdEntry].user = (flags & PAGING_MEM_USER) ? 0:1;
pd[pdEntry].present = 1;
pd[pdEntry].write = 1;
pd[pdEntry].pt_addr = (ptPhy >> PT_SHIFT);
__native_flush_tlb_single((vaddr_t)pt);
}
// Already mapped ? Remove old mapping
if (pt[ptEntry].present) {
unrefPhyPage(pt[ptEntry].paddr << PAGE_SHIFT);
} // PTE not already used ? We will use it ! So increase the PT ref count
else {
refPhyPage(pd[pdEntry].pt_addr << PAGE_SHIFT);
}
pt[ptEntry].user = (flags & PAGING_MEM_USER) ? 0 : 1;
pt[ptEntry].present = 1;
pt[ptEntry].write = (flags & PAGING_MEM_WRITE) ? 0 : 1;
pt[ptEntry].paddr = paddr >> PAGE_SHIFT;
refPhyPage(paddr);
__native_flush_tlb_single(vaddr);
return 0;
}
int pageUnmap(vaddr_t vaddr)
{
uint pdEntry = vaddr >> (PD_SHIFT);
uint ptEntry = (vaddr >> PT_SHIFT) & PTE_MASK;
// Thank to mirroring, we can access the PD
struct pde *pd =
(struct pde *)(PD_MIRROR_PAGE_IDX * (1U << 22) + PD_MIRROR_PAGE_IDX * (1U << 12));
struct pte *pt =
(struct pte *)(PD_MIRROR_PAGE_IDX * (1U << 22) + pdEntry * (1U << 12));
if (!pd[pdEntry].present)
return -EINVAL;
if (!pt[ptEntry].present)
return -EINVAL;
unrefPhyPage(pt[ptEntry].paddr << PAGE_SHIFT);
pt[ptEntry].present = 0;
// PTE not used. Decrease refcount on it. Is PT not used anymore ?
if(unrefPhyPage(pd[pdEntry].pt_addr<< PT_SHIFT) == 0){
pd[pdEntry].present = 0;
__native_flush_tlb_single((vaddr_t)pt);
}
__native_flush_tlb_single(vaddr);
return 0;
}

View File

@ -1,6 +1,10 @@
#pragma once
#include "types.h"
#define PAGING_MEM_USER 1
#define PAGING_MEM_READ (1 << 1)
#define PAGING_MEM_WRITE (1 << 2)
int pagingSetup(paddr_t upperKernelAddr);
int pageMap(vaddr_t vaddr, paddr_t paddr, int flags);