520 lines
19 KiB
C
520 lines
19 KiB
C
//----------------------------------------------------------------------------//
|
|
// GNU GPL OS/K //
|
|
// //
|
|
// Desc: Paging memory related functions //
|
|
// //
|
|
// //
|
|
// Copyright © 2018-2019 The OS/K Team //
|
|
// //
|
|
// This file is part of OS/K. //
|
|
// //
|
|
// OS/K is free software: you can redistribute it and/or modify //
|
|
// it under the terms of the GNU General Public License as published by //
|
|
// the Free Software Foundation, either version 3 of the License, or //
|
|
// any later version. //
|
|
// //
|
|
// OS/K is distributed in the hope that it will be useful, //
|
|
// but WITHOUT ANY WARRANTY//without even the implied warranty of //
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
|
|
// GNU General Public License for more details. //
|
|
// //
|
|
// You should have received a copy of the GNU General Public License //
|
|
// along with OS/K. If not, see <https://www.gnu.org/licenses/>. //
|
|
//----------------------------------------------------------------------------//
|
|
|
|
#include <kernel.h>
|
|
#include <init/boot.h>
|
|
#include <ke/idt.h>
|
|
#include <ex/malloc.h>
|
|
#include <mm/heap.h>
|
|
#include <mm/paging.h>
|
|
#include <mm/map.h>
|
|
#include <lib/buf.h>
|
|
#include <io/vga.h>
|
|
#include <ke/time.h>
|
|
|
|
//-----------
|
|
|
|
static pml4_t MmPageMapLevel4[512] __attribute__((__aligned__(KPAGESIZE)));
|
|
static ulong *MmPhysicalPageTable __attribute__((__aligned__(KPAGESIZE)));
|
|
|
|
extern ulong _text;
|
|
extern ulong _text_end;
|
|
extern ulong _rodata;
|
|
extern ulong _rodata_end;
|
|
extern ulong _data;
|
|
extern ulong _data_end;
|
|
|
|
extern MemoryMap_t memoryMap;
|
|
|
|
static ulong MmStackGuards[2] = { 0 };
|
|
ulong MmVirtLastAddress = 0;
|
|
ulong MmPhysLastKernAddress = 0;
|
|
|
|
//-----------
|
|
|
|
//
|
|
// Creates our new page table structure and loads it
|
|
//
|
|
void MmInitPaging(void)
|
|
{
|
|
pdpe_t *MmPDP = NULL;
|
|
pde_t *MmPD = NULL;
|
|
pte_t *MmPT = NULL;
|
|
ulong index, xedni;
|
|
ulong curAddrPML4;
|
|
ulong curAddrPDP;
|
|
ulong curAddrPD;
|
|
ulong curAddrPT;
|
|
|
|
ulong firstDirectoryAddr = 0;
|
|
ulong lastDirectoryAddr = 0;
|
|
ulong phDirSize = 0;
|
|
|
|
KernLog("\tActivating paging...\n");
|
|
|
|
// Maximum PHYSICAL address in memory
|
|
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
|
|
|
|
// Difference between the end of kernel and the begin of userspace
|
|
MmPhysLastKernAddress = (ulong)(_heap_start + _heap_max);
|
|
|
|
// Size of physical table
|
|
phDirSize = (((phRamSize + KPAGESIZE) / KPAGESIZE)*sizeof(ulong));
|
|
|
|
// Maximum VIRTUAL address in memory
|
|
MmVirtLastAddress = phRamSize;
|
|
|
|
// Alloc structures
|
|
memzero((void *)&MmPageMapLevel4[0], 512*sizeof(ulong));
|
|
KalAllocMemoryEx((void**)&MmPhysicalPageTable, phDirSize, M_ZEROED, KPAGESIZE);
|
|
|
|
//DebugLog("\t\t\t\tPhysical map addr : %p\n", MmPhysicalPageTable);
|
|
|
|
for (curAddrPML4 = 0;
|
|
curAddrPML4 < 512 * KPAGESIZE * 0x8000000;
|
|
curAddrPML4 += ((ulong)KPAGESIZE * 0x8000000)) {
|
|
// Create an entry in PML4 each 512GB
|
|
// 0x8000000 = 512 ^ 3
|
|
|
|
index = (curAddrPML4 / ((ulong)KPAGESIZE * 0x8000000)) % 512;
|
|
|
|
if (curAddrPML4 > MmPhysLastKernAddress) {
|
|
MmPageMapLevel4[index] = (pdpe_t *)0;
|
|
////DebugLog("PML4 %d\n", index);
|
|
continue;
|
|
}
|
|
|
|
KalAllocMemoryEx((void**)&MmPDP, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
|
|
|
|
if (!firstDirectoryAddr) {
|
|
firstDirectoryAddr = (ulong)MmPDP;
|
|
}
|
|
|
|
//DebugLog("\t\t\t\tPDP %d : %p\n", index, MmPDP);
|
|
MmPageMapLevel4[index] = (pdpe_t *)((ulong)MmPDP | PRESENT | READWRITE);
|
|
|
|
for (curAddrPDP = curAddrPML4;
|
|
curAddrPDP < (curAddrPML4 + ((ulong)KPAGESIZE * 0x8000000));
|
|
curAddrPDP += ((ulong)KPAGESIZE * 0x40000)) {
|
|
// Create an intry in PDP each 1GB
|
|
// 0x40000 = 512 ^ 2
|
|
|
|
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
|
|
|
|
if (curAddrPDP > MmPhysLastKernAddress) {
|
|
MmPDP[index] = (pde_t *)0;
|
|
//DebugLog("PDP %d\n", index);
|
|
continue;
|
|
}
|
|
|
|
KalAllocMemoryEx((void**)&MmPD, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
|
|
|
|
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
|
|
|
|
//DebugLog("\t\t\t\tPD %d : %p\n", index, MmPD);
|
|
MmPDP[index] = (pde_t *)((ulong)MmPD | PRESENT | READWRITE);
|
|
|
|
for (curAddrPD = curAddrPDP;
|
|
curAddrPD < (curAddrPDP + ((ulong)KPAGESIZE * 0x40000));
|
|
curAddrPD += ((ulong)KPAGESIZE * 0x200)) {
|
|
// Create an intry in PD each 2MB
|
|
// 0x200 = 512
|
|
|
|
index = (curAddrPD / ((ulong)KPAGESIZE * 0x200)) % 512;
|
|
|
|
if (curAddrPD > MmPhysLastKernAddress) {
|
|
MmPD[index] = (pte_t *)0;
|
|
//DebugLog("PD %d\n", index);
|
|
continue;
|
|
}
|
|
|
|
KalAllocMemoryEx((void**)&MmPT, 512*sizeof(pte_t), M_ZEROED, KPAGESIZE);
|
|
|
|
//DebugLog("\t\t\t\tPT %d : %p\n", index, MmPT);
|
|
MmPD[index] = (pte_t *)((ulong)MmPT | PRESENT | READWRITE);
|
|
|
|
for (curAddrPT = curAddrPD;
|
|
curAddrPT < (curAddrPD + ((ulong)KPAGESIZE * 0x200));
|
|
curAddrPT += (ulong)KPAGESIZE) {
|
|
// Create an entry in PT each page of 4KB
|
|
|
|
index = (curAddrPT / ((ulong)KPAGESIZE)) % 512;
|
|
xedni = (curAddrPT / ((ulong)KPAGESIZE));
|
|
|
|
// STACK GUARD PAGE */
|
|
if ((ulong)curAddrPT == (ulong)BtLoaderInfo.stackEndAddr) {
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT;
|
|
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
|
|
MmStackGuards[0] = (ulong)curAddrPT;
|
|
//DebugLog("\tStack Guard at %p\n", curAddrPT);
|
|
}
|
|
else if ((ulong)curAddrPT == (ulong)BtLoaderInfo.kernelEndAddr) {
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT;
|
|
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
|
|
MmStackGuards[1] = (ulong)curAddrPT;
|
|
//DebugLog("\tStack Guard at %p\n", curAddrPT);
|
|
}
|
|
// SECTION .TEXT PROTECTION
|
|
else if ((ulong)curAddrPT >= (ulong)&_text && (ulong)curAddrPT <= (ulong)&_text_end) {
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT;
|
|
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
|
|
//DebugLog("\tSection .text at %p\n", curAddrPT);
|
|
}
|
|
// SECTION .DATA PROTECTION
|
|
else if ((ulong)curAddrPT >= (ulong)&_data && (ulong)curAddrPT <= (ulong)&_data_end) {
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT | WRITETHR | READWRITE | NX;
|
|
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
|
|
//DebugLog("\tSection .data at %p\n", curAddrPT);
|
|
}
|
|
// SECTION .RODATA PROTECTION
|
|
else if ((ulong)curAddrPT >= (ulong)&_rodata && (ulong)curAddrPT <= (ulong)&_rodata_end) {
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT | NX;
|
|
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
|
|
//DebugLog("\tSection .rodata at %p\n", curAddrPT);
|
|
}
|
|
// While we're inside the kernel pages
|
|
else if ((ulong)curAddrPT <= MmPhysLastKernAddress) {
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT | READWRITE;
|
|
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
|
|
}
|
|
else {
|
|
MmPT[index] = (ulong)0;
|
|
MmPhysicalPageTable[xedni] = (ulong)0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
lastDirectoryAddr = (ulong)MmPT;
|
|
|
|
MmLoadPML4((void *)MmPageMapLevel4);
|
|
MmEnableWriteProtect();
|
|
|
|
DebugLog("\tPage table size : %u MB\n", (lastDirectoryAddr - firstDirectoryAddr + phDirSize)/MB);
|
|
}
|
|
|
|
//
|
|
// Get a page from an address
|
|
//
|
|
static ulong *MmGetPageDescriptorFromVirtual(void *virtualAddr)
|
|
{
|
|
volatile ulong virtAddrPage;
|
|
volatile pdpe_t *pdp;
|
|
volatile pde_t *pd;
|
|
volatile pte_t *pt;
|
|
volatile ulong *page;
|
|
volatile ulong index;
|
|
|
|
//DebugLog("Get virtual descriptor %p\n", virtualAddr);
|
|
while (virtualAddr) {
|
|
virtAddrPage = (ulong)virtualAddr & ( ~((KPAGESIZE - 1) | NX));
|
|
|
|
index = (virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512;
|
|
pdp = (pdpe_t*)((ulong)MmPageMapLevel4[index] & ( ~(KPAGESIZE - 1)) );
|
|
//DebugLog("pdp at %p\t: %p\n", &pdp, pdp);
|
|
if (!pdp) {
|
|
KalAllocMemoryEx((void**)&pdp, 512*sizeof(pdpe_t), M_ZEROED, KPAGESIZE);
|
|
MmPageMapLevel4[index] = (pdpe_t *)((ulong)pdp | PRESENT | READWRITE);
|
|
//DebugLog("Created pdp\t: %p\n", pdp);
|
|
continue;
|
|
}
|
|
|
|
index = (virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512;
|
|
pd = (pde_t*)( (ulong)pdp[index] & ( ~(KPAGESIZE - 1)) );
|
|
//DebugLog("pd at %p\t: %p\n", &pd, pd);
|
|
if (!pd) {
|
|
KalAllocMemoryEx((void**)&pd, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
|
|
pdp[index] = (pde_t *)((ulong)pd | PRESENT | READWRITE);
|
|
//DebugLog("Created pd\t: %p\n", pd);
|
|
continue;
|
|
}
|
|
|
|
index = (virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512;
|
|
pt = (pte_t*)( (ulong)pd[index] & ( ~(KPAGESIZE - 1)) );
|
|
DebugLog("pt at %p\t: %p\n", &pt, pt);
|
|
if (!pt) {
|
|
KalAllocMemoryEx((void**)&pt, 512*sizeof(pte_t), M_ZEROED, KPAGESIZE);
|
|
pd[index] = (pte_t *)((ulong)pt | PRESENT | READWRITE);
|
|
//DebugLog("Created pt\t: %p\n", pt);
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
|
|
index = ((ulong)virtualAddr / ((ulong)KPAGESIZE)) % 512;
|
|
page = &(pt[index]);
|
|
DebugLog("page (with flags): %p\n", *page);
|
|
return page;
|
|
}
|
|
|
|
//
|
|
// Translates a virtual address to its physical equivalent
|
|
//
|
|
void *MmTransVirtToPhyAddr(void* virtualAddr)
|
|
{
|
|
ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1));
|
|
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
|
|
|
if (!(*page)) {
|
|
return NULL;
|
|
}
|
|
|
|
return (void*)((*page & ~((KPAGESIZE - 1) | NX))+ ((ulong)virtualAddr - (ulong)virtAddrPage));
|
|
}
|
|
|
|
void *MmTransPhyToVirtAddr(void* physicalAddr)
|
|
{
|
|
ulong phyAddrPage = (ulong)physicalAddr & ( ~((KPAGESIZE - 1) | NX));
|
|
return (void*)( MmPhysicalPageTable[(ulong)physicalAddr
|
|
/ ((ulong)KPAGESIZE)
|
|
] + ((ulong)physicalAddr - phyAddrPage));
|
|
}
|
|
|
|
//
|
|
// Add flags to a page
|
|
//
|
|
void MmSetPage(void* virtualAddr, ulong flags)
|
|
{
|
|
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
|
|
|
*page |= flags;
|
|
|
|
KeFlushTlbSingle(*page);
|
|
}
|
|
|
|
//
|
|
// Remove flags of a page
|
|
//
|
|
void MmUnsetPage(void* virtualAddr, ulong flags)
|
|
{
|
|
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
|
|
|
*page &= (~flags);
|
|
|
|
KeFlushTlbSingle(*page);
|
|
}
|
|
|
|
//
|
|
// Map a page in memory
|
|
//
|
|
void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags)
|
|
{
|
|
//DebugLog("Request %p:%p with %lu\n", virtualAddr, physicalAddr, flags);
|
|
|
|
register ulong virtAddrPage;
|
|
volatile pdpe_t *pdp;
|
|
volatile pde_t *pd;
|
|
volatile pte_t *pt;
|
|
|
|
virtAddrPage = (ulong)virtualAddr & ( ~((KPAGESIZE - 1) | NX));
|
|
|
|
//DebugLog("Get virtual descriptor %p\n", virtualAddr);
|
|
while (virtAddrPage) {
|
|
|
|
pdp = (pdpe_t*)((ulong)MmPageMapLevel4[
|
|
(virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512
|
|
] & ( ~(KPAGESIZE - 1)) );
|
|
//DebugLog("pdp at %p\t: %p\n", &pdp, pdp);
|
|
|
|
if (!pdp) {
|
|
KalAllocMemoryEx((void**)&pdp, 512*sizeof(pdpe_t), M_ZEROED, KPAGESIZE);
|
|
|
|
MmPageMapLevel4[
|
|
(virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512
|
|
] = (pdpe_t *)((ulong)pdp | PRESENT | READWRITE);
|
|
|
|
//DebugLog("Created pdp\t: %p\n", pdp);
|
|
continue;
|
|
}
|
|
|
|
pd = (pde_t*)( (ulong)pdp[
|
|
(virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512
|
|
] & ( ~(KPAGESIZE - 1)) );
|
|
//DebugLog("pd at %p\t: %p\n", &pd, pd);
|
|
|
|
if (!pd) {
|
|
KalAllocMemoryEx((void**)&pd, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
|
|
|
|
pdp[
|
|
(virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512
|
|
] = (pde_t *)((ulong)pd | PRESENT | READWRITE);
|
|
DebugLog("Created pd\t: %p\n", pd);
|
|
continue;
|
|
}
|
|
|
|
pt = (pte_t*)( (ulong)pd[
|
|
(virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512
|
|
] & ( ~(KPAGESIZE - 1)) );
|
|
//DebugLog("pt at %p\t: %p\n", &pt, pt);
|
|
|
|
if (!pt) {
|
|
KalAllocMemoryEx((void**)&pt, 512*sizeof(pte_t), M_ZEROED, KPAGESIZE);
|
|
pd[
|
|
(virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512
|
|
] = (pte_t *)((ulong)pt | PRESENT | READWRITE);
|
|
DebugLog("Created pt\t: %p\n", pt);
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
|
|
pt[
|
|
(virtAddrPage / (ulong)KPAGESIZE) % 512
|
|
] = (ulong)physicalAddr | flags;
|
|
|
|
MmPhysicalPageTable[(ulong)physicalAddr
|
|
/ ((ulong)KPAGESIZE)
|
|
] = (ulong)virtualAddr;
|
|
|
|
KeFlushTlbSingle(
|
|
pt[
|
|
(virtAddrPage / (ulong)KPAGESIZE) % 512
|
|
] = (ulong)physicalAddr | flags
|
|
);
|
|
|
|
//DebugLog("Done %p at page %p\n", *page, page);
|
|
|
|
if ((ulong)virtualAddr > MmVirtLastAddress)
|
|
MmVirtLastAddress = (ulong)virtualAddr + KPAGESIZE;
|
|
}
|
|
|
|
//
|
|
// Unmap a page in memory
|
|
//
|
|
void MmUnmapPage(void* virtualAddr)
|
|
{
|
|
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
|
|
|
MmPhysicalPageTable[(ulong)(MmTransVirtToPhyAddr(virtualAddr))
|
|
/ ((ulong)KPAGESIZE)
|
|
] = 0;
|
|
|
|
*page = 0;
|
|
|
|
KeFlushTlbSingle(*page);
|
|
}
|
|
|
|
//-----------
|
|
|
|
//
|
|
// Returns the rank of the Stack Guards
|
|
//
|
|
void *MmGetStackGuards(char rank)
|
|
{
|
|
return (void *)MmStackGuards[(int)rank];
|
|
}
|
|
|
|
//
|
|
// Page fault handler
|
|
//
|
|
static void PagingHandler(ISRFrame_t *regs)
|
|
{
|
|
ulong StackGuardOne = (ulong)MmGetStackGuards(0);
|
|
ulong StackGuardTwo = (ulong)MmGetStackGuards(1);
|
|
if ((regs->cr2 >= StackGuardOne) && (regs->cr2 <= StackGuardOne + KPAGESIZE) && (regs->rsp <= regs->cr2)) {
|
|
bprintf(BStdOut,
|
|
"\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Underflow\n\n"
|
|
" Page Fault Error code : %#x (%b)\n"
|
|
" Stack Guard bypassed : %#x",
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
regs->ErrorCode,
|
|
regs->ErrorCode,
|
|
StackGuardOne
|
|
);
|
|
} else if ((regs->cr2 >= StackGuardTwo) && (regs->cr2 <= StackGuardTwo + KPAGESIZE) && (regs->rsp >= regs->cr2)) {
|
|
bprintf(BStdOut,
|
|
"\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Overflow\n\n"
|
|
" Page Fault Error code : %#x (%b)\n"
|
|
" Stack Guard bypassed : %#x",
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
regs->ErrorCode,
|
|
regs->ErrorCode,
|
|
StackGuardTwo
|
|
);
|
|
} else if (regs->cr2 == 0) {
|
|
bprintf(BStdOut,
|
|
"\n\n%CPANIC\n[ISR 0x8] Null vector exception !\n\n"
|
|
" Page Fault Error code : %#x (%b)\n",
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
regs->intNo,
|
|
regs->ErrorCode,
|
|
regs->ErrorCode
|
|
);
|
|
} else if (regs->cr2 >= MmVirtLastAddress || regs->cr2 <= 0) {
|
|
bprintf(BStdOut,
|
|
"\n\n%CPANIC\n[ISR 0x8] Out of bound of the address space at %p !\n\n"
|
|
" End of the address space : %p\n"
|
|
" Page Fault Error code : %#x (%b)\n",
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
regs->cr2,
|
|
MmVirtLastAddress,
|
|
regs->ErrorCode,
|
|
regs->ErrorCode
|
|
);
|
|
} else {
|
|
//XXX page fault
|
|
bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Irrecoverable Page Fault at %p\n\n"
|
|
" Error code : 0x%x (%b)",
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
regs->cr2,
|
|
regs->ErrorCode,
|
|
regs->ErrorCode
|
|
);
|
|
}
|
|
|
|
bprintf(BStdOut, "\n Description : ");
|
|
|
|
if (regs->ErrorCode & PRESENT) {
|
|
bprintf(BStdOut, "Page-protection violation ");
|
|
} else {
|
|
bprintf(BStdOut, "Non present page ");
|
|
}
|
|
if (regs->ErrorCode & READWRITE) {
|
|
bprintf(BStdOut, "during write access ");
|
|
} else {
|
|
bprintf(BStdOut, "during read access ");
|
|
}
|
|
if (regs->ErrorCode & (1 << 3))
|
|
bprintf(BStdOut, "from userspace ");
|
|
if (regs->ErrorCode & (1 << 4))
|
|
bprintf(BStdOut, "after instruction fetching ");
|
|
|
|
KeBrkDumpRegisters(regs);
|
|
|
|
BStdOut->flusher(BStdOut);
|
|
|
|
KeHaltCPU();
|
|
}
|
|
|
|
void MmActivatePageHandler(void)
|
|
{
|
|
KeRegisterISR(PagingHandler, 0xe);
|
|
//DebugLog("\tPage handler activated\n");
|
|
}
|