Merge branch 'BetterMemory'

This commit is contained in:
Adrien Bourmault 2020-01-20 19:38:46 +01:00
commit f6f1896cb7
18 changed files with 563 additions and 161 deletions

View File

@ -46,7 +46,7 @@ CINCLUDES=-Iinclude
CFLAGS1=-nostdlib -ffreestanding -mcmodel=large -std=gnu11 -fstack-protector-all -fdump-rtl-expand
CFLAGS2= -c -mno-red-zone -mno-mmx -mno-sse -mno-sse2
CFLAGS= $(CFLAGS1) $(CFLAGS2)
CFLAGS_MATHS= $(CFLAGS1) -c -mno-red-zone -mno-mmx -mno-sse2
CFLAGS_MATHS= $(CFLAGS1) -c -mno-red-zone -mno-mmx
ifeq ($(mode), release)
CFLAGS += -D_NO_DEBUG
@ -115,7 +115,7 @@ KernSources = kernel/ke/cpuid.c kernel/mm/paging.c \
kernel/sh/shell.c kernel/sh/shcmds.c \
kernel/sh/musage.c kernel/io/ata.c \
kernel/sh/argv.c kernel/ke/pit.c \
kernel/sh/testcmds.c
kernel/sh/testcmds.c kernel/mm/palloc.c
KernObj=$(patsubst %.c,$(KOBJDIR)/%.o,$(KernSources))
KernDep=$(patsubst %.c,$(KOBJDIR)/%.d,$(KernSources))

View File

@ -74,7 +74,8 @@
│   │   ├── heap.h
│   │   ├── malloc.h
│   │   ├── map.h
│   │   └── paging.h
│   │   ├── paging.h
│   │   └── palloc.h
│   ├── po
│   │   └── shtdwn.h
│   ├── sh
@ -116,7 +117,8 @@
│   │   │   ├── malloc.c
│   │   │   ├── map.c
│   │   │   ├── paging.asm
│   │   │   └── paging.c
│   │   │   ├── paging.c
│   │   │   └── palloc.c
│   │   ├── po
│   │   │   └── shtdwn.c
│   │   ├── ps
@ -157,4 +159,4 @@
├── ProjectTree
└── README.md
28 directories, 104 files
28 directories, 106 files

View File

@ -23,43 +23,36 @@
; along with OS/K. If not, see <https://www.gnu.org/licenses/>. ;
;=----------------------------------------------------------------------------=;
%define MAX_MEMORY 1 ; GiB
[BITS 32]
[section .text]
; ---------------------------------------------------------------------------- ;
; Constructor for the page tables in protected mode ;
; ---------------------------------------------------------------------------- ;
Setup_paging:
;; Map the first PML4 entry to PDP table
;; Map first PML4 entry to PDP table
mov eax, PDP_table
or eax, 1 << 1 | 1 << 0 ; present + writable
or eax, 0b11 ; Present + writable
mov [PML4_table], eax
;; Map the PDP entries to PD tables
mov ebx, PD_table ; start address
mov ecx, 0x0 ; counter variable
.map_pdp_table:
mov eax, ebx
or eax, 1 << 1 | 1 << 0 ; present + writable
mov [PDP_table + 8 * ecx], eax
inc ecx
add ebx, 4096
cmp ecx, MAX_MEMORY ; PDP table is mapped if MAX_MEMORY
jne .map_pdp_table ; else map the next entry
;; Map first PDP entry to PD table
mov eax, PD_table
or eax, 0b11 ; Present + writable
mov [PDP_table], eax
;; Map each PD entry to a 'huge' 4MiB page
;; Map each PD entry to a huge 2MiB page
mov ecx, 0 ; counter variable
mov ecx, 0x0 ; counter variable
.map_pd_table:
;; map ecx-th PD entry to a huge page that starts at address 4MiB*ecx
mov eax, 0x200000
;; Map ecx-th PD entry to a huge page that starts at address 2MiB*ecx
mov eax, 0x200000 ; 2MiB
mul ecx ; start address of ecx-th page
or eax, 1 << 7 | 1 << 1 | 1 << 0 ; present + writable + huge
mov [PD_table + ecx * 8], eax
inc ecx
cmp ecx, 512 * MAX_MEMORY ; PD table is mapped if 512
or eax, 0b10000011 ; present + writable + huge
mov [PD_table + ecx * 8], eax ; map ecx-th entry
inc ecx ; increase counter
cmp ecx, 512 ; if counter == 512, the whole P2 table is mapped
jne .map_pd_table ; else map the next entry
ret
; ---------------------------------------------------------------------------- ;

View File

@ -31,7 +31,7 @@ global newStackEnd
global GDT64
[section .text]
KERNEL_STACK equ 16 * 1024 ; 16KB of stack
KERNEL_STACK equ 16 * 1024 * 1024 ; 16MB of stack
newKernelEnd dq 0x0
newStackEnd dq 0x0
@ -63,4 +63,4 @@ PML4_table:
PDP_table:
resb 4096
PD_table:
times MAX_MEMORY resb 4096
resb 4096

View File

@ -64,7 +64,7 @@ void KeEnablePIT(void);
void KeSleep(uint);
Timer_t *KeSetTimer(uint delay);
int KeGetTimer(Timer_t*);
ulong KeGetTicks(void);
//----------------------------------------------------------------------------//

View File

@ -168,6 +168,7 @@ unsigned long strtoul(const char *restrict, char **restrict, int);
//----------------------------------------------------------------------------//
void *calloc(size_t, size_t) __attribute__((__malloc__));
void *memalign(size_t n, size_t align) __attribute__((__malloc__));
void *malloc(size_t) __attribute__((__malloc__));
void free(void *);

View File

@ -33,7 +33,7 @@
#define KPAGESIZE (4 * KB)
#define UPAGESIZE (4 * KB)
#define USERSPACE 0x80000000
#define USERSPACE 0x200000000
//----------------------------------------------------------------------------//
@ -49,6 +49,19 @@ typedef pde_t* pdpe_t;
// Page directory L4 pointer offset
typedef pdpe_t* pml4_t;
enum
{
PRESENT = 1 << 0,
READWRITE = 1 << 1,
USERMODE = 1 << 2,
WRITETHR = 1 << 3,
CACHEDIS = 1 << 4,
ACCESSED = 1 << 5,
DIRTY = 1 << 6,
HUGE = 1 << 7,
NX = 1UL << 63
};
//----------------------------------------------------------------------------//
//

View File

@ -31,7 +31,22 @@
//----------------------------------------------------------------------------//
typedef struct AllocatedPage_t{
void *phyAddress;
ulong id;
struct AllocatedPage_t *next;
} AllocatedPage_t;
#define CONTIGUOUS true
#define NORMAL false
//----------------------------------------------------------------------------//
ulong MmAllocPageFrameEx(void ***frameListPtr, size_t *pageNumber, size_t size, bool contiguous);
ulong MmAllocPageFrame(size_t size, bool contiguous);
void MmFreePageFrame(ulong id);
error_t MmTestBusyPage(void);
//----------------------------------------------------------------------------//

View File

@ -84,6 +84,8 @@ noreturn void BtStartKern(multiboot_info_t *mbInfo, uint mbMagic, void *codeSeg)
// Command line (kernel mode)
ShStartShell();
//KeCrashSystem();
// Exit !
PoShutdown();
}

View File

@ -24,7 +24,7 @@
#include <kernel.h>
ulong __stack_chk_guard = 0x447c0ffe4dbf9e55;
ulong __stack_chk_guard = 0xec0ffec0ffec0ffe;
noreturn void __stack_chk_fail(void)
{

View File

@ -219,3 +219,8 @@ ulong KeGetTimeStamp(void)
+ (time->year + time->century * 100)
* dpy * 24 * 60 * 60;
}
ulong KeGetTicks(void)
{
return Ticks;
}

View File

@ -58,9 +58,9 @@ void MmInitGdt(void)
tssDesc.veryHighBase = ((ulong)&tss >> 32) & 0xFFFFFFFF;
tssDesc.lowLimit = sizeof(tss);
tss.ist1 = (ulong)0x0007FFFF; // ISR RESCUE STACK, GARANTIED FREE FOR USE BY OSDEV.ORG
tss.ist2 = (ulong)0x00EFFFFF; // ISR STACK, GARANTIED FREE FOR USE BY OSDEV.ORG
tss.ist3 = (ulong)0x00EF0000; // ISR STACK, GARANTIED FREE FOR USE BY OSDEV.ORG
tss.ist1 = (ulong)0x00007BFF; // ISR RESCUE STACK, GARANTIED FREE FOR USE BY OSDEV.ORG
tss.ist2 = (ulong)0x00043F00; // ISR STACK, GARANTIED FREE FOR USE BY OSDEV.ORG
tss.ist3 = (ulong)0x0007FFFF; // ISR STACK, GARANTIED FREE FOR USE BY OSDEV.ORG
tss.iomap_base = sizeof(tss);
memmove(&gdt[2], &tssDesc, sizeof(TssDescriptor_t));

View File

@ -59,7 +59,7 @@ error_t KalAllocMemoryEx(void **ptr, size_t req, int flags, size_t align)
KeStartPanic("KalAllocMemory: Out of memory");
}
*ptr = (void *)brk;
*ptr = (void *)_ALIGN_UP(brk, align);
if (flags & M_ZEROED) {
memzero(*ptr, req);

View File

@ -102,14 +102,11 @@ static error_t InitMemoryMap(void)
KernLog("\tAvailable RAM size : %u MB\n",
memoryMap.freeRamSize / MB);
// Magic value in memory to prevent smashing
ulong * heapStart = BtLoaderInfo.stackEndAddr + 8;
*heapStart = 0xbad00badbad00bad;
return EOK;
}
size_t MmGetAvailZoneSize(void *start) {
size_t MmGetAvailZoneSize(void *start)
{
uint i;
// Because the kernel is the kernel
@ -133,7 +130,33 @@ size_t MmGetAvailZoneSize(void *start) {
return 0;
}
void *MmGetFirstAvailZone(void *start) {
bool MmIsFailingZoneSize(void *start)
{
uint i;
// Because the kernel is the kernel
if (start < BtLoaderInfo.stackEndAddr + 16)
return 0;
// Search the zone where the start address is
for (i = 0; i < memoryMap.length; i++) {
// if the address is in a failing zone, we can return 1
if (
(memoryMap.entry[i].type == BADRAM_ZONE) &&
(ulong)start >= (ulong)memoryMap.entry[i].addr &&
(ulong)start < ((ulong)memoryMap.entry[i].addr +
(ulong)memoryMap.entry[i].length)
) {
return 1;
}
}
// If there is no zone, we return a 0 size
return 0;
}
void *MmGetFirstAvailZone(void *start)
{
uint i;
void *current = 0;
@ -174,7 +197,8 @@ void *MmGetFirstAvailZone(void *start) {
return current;
}
void MmPrintMemoryMap(void) {
void MmPrintMemoryMap(void)
{
char avStr[15];
extern int shcol;

View File

@ -31,11 +31,12 @@
#include <mm/map.h>
#include <lib/buf.h>
#include <io/vga.h>
#include <ke/time.h>
//-----------
pml4_t MmPageMapLevel4[512] __attribute__((__aligned__(KPAGESIZE)));
ulong *MmPhysicalPageTable;
static pml4_t MmPageMapLevel4[512] __attribute__((__aligned__(KPAGESIZE)));
static ulong *MmPhysicalPageTable __attribute__((__aligned__(KPAGESIZE)));
extern ulong _text;
extern ulong _text_end;
@ -46,23 +47,10 @@ extern ulong _data_end;
extern MemoryMap_t memoryMap;
ulong MmStackGuards[2] = { 0 };
static ulong MmStackGuards[2] = { 0 };
ulong MmVirtLastAddress = 0;
ulong MmPhysLastKernAddress = 0;
enum
{
PRESENT = 1 << 0,
READWRITE = 1 << 1,
USERMODE = 1 << 2,
WRITETHR = 1 << 3,
CACHEDIS = 1 << 4,
ACCESSED = 1 << 5,
DIRTY = 1 << 6,
HUGE = 1 << 7,
NX = 1UL << 63
};
//-----------
//
@ -74,84 +62,106 @@ void MmInitPaging(void)
pde_t *MmPD = NULL;
pte_t *MmPT = NULL;
ulong index, xedni;
ulong curAddrPML4;
ulong curAddrPDP;
ulong curAddrPD;
ulong curAddrPT;
ulong firstDirectoryAddr = 0;
ulong lastDirectoryAddr = 0;
ulong phDirSize = 0;
KernLog("\tActivating paging...\n");
// Maximum PHYSICAL address in memory
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
// Difference between the end of kernel and the begin of userspace
MmPhysLastKernAddress = (ulong)(_heap_start + _heap_max);
ulong diffKernUsr = (ulong)USERSPACE - MmPhysLastKernAddress - KPAGESIZE;
// Size of physical table
phDirSize = (((phRamSize + KPAGESIZE) / KPAGESIZE)*sizeof(ulong));
// Maximum VIRTUAL address in memory
MmVirtLastAddress = phRamSize + diffKernUsr;
MmVirtLastAddress = phRamSize;
//DebugLog("\tPaging gap : %u MB (%p)\n\tLast virtual address %p\n", diffKernUsr / MB, diffKernUsr, MmVirtLastAddress);
// Alloc structures
memzero((void *)&MmPageMapLevel4[0], 512*sizeof(ulong));
MmPhysicalPageTable = memalign(phDirSize, KPAGESIZE);
memzero((void *)&MmPageMapLevel4[0], sizeof(MmPageMapLevel4));
phDirSize = ((phRamSize / KPAGESIZE)*sizeof(ulong) + KPAGESIZE) & ( ~(KPAGESIZE - 1));
//DebugLog("\t\t\t\tPhysical map addr : %p\n", MmPhysicalPageTable);
MmPhysicalPageTable = (ulong*)malloc(phDirSize);
//DebugLog("\t\tRam %u MB, pagesize %u KB, size %u MB\n", phRamSize / MB, KPAGESIZE / KB, phDirSize / MB);
for (ulong curAddrPML4 = 0;
curAddrPML4 < MmVirtLastAddress;
for (curAddrPML4 = 0;
curAddrPML4 < 512 * KPAGESIZE * 0x8000000;
curAddrPML4 += ((ulong)KPAGESIZE * 0x8000000)) {
// Create an entry in PML4 each 512GB
// 0x8000000 = 512 ^ 3
MmPDP = (pdpe_t *)malloc(512*sizeof(pde_t));
index = (curAddrPML4 / ((ulong)KPAGESIZE * 0x8000000)) % 512;
if (curAddrPML4 > MmPhysLastKernAddress) {
MmPageMapLevel4[index] = (pdpe_t *)0;
////DebugLog("PML4 %d\n", index);
continue;
}
MmPDP = memalign(512*sizeof(pde_t), KPAGESIZE);
if (!firstDirectoryAddr) {
firstDirectoryAddr = (ulong)MmPDP;
}
index = (curAddrPML4 / ((ulong)KPAGESIZE * 0x8000000)) % 512;
//DebugLog("\t\t\t\tPDP %d : %p\n", index, MmPDP);
MmPageMapLevel4[index] = (pdpe_t *)((ulong)MmPDP | PRESENT | READWRITE);
for (ulong curAddrPDP = curAddrPML4;
curAddrPDP < (curAddrPML4 + ((ulong)KPAGESIZE * 0x8000000)) &&
curAddrPDP < MmVirtLastAddress;
for (curAddrPDP = curAddrPML4;
curAddrPDP < (curAddrPML4 + ((ulong)KPAGESIZE * 0x8000000));
curAddrPDP += ((ulong)KPAGESIZE * 0x40000)) {
// Create an intry in PDP each 1GB
// 0x40000 = 512 ^ 2
MmPD = (pde_t *)malloc(512*sizeof(pde_t));
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
if (curAddrPDP > MmPhysLastKernAddress) {
MmPDP[index] = (pde_t *)0;
//DebugLog("PDP %d\n", index);
continue;
}
MmPD = memalign(512*sizeof(pde_t), KPAGESIZE);
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
//DebugLog("\t\t\t\tPD %d : %p\n", index, MmPD);
MmPDP[index] = (pde_t *)((ulong)MmPD | PRESENT | READWRITE);
for (ulong curAddrPD = curAddrPDP;
curAddrPD < (curAddrPDP + ((ulong)KPAGESIZE * 0x40000)) &&
curAddrPD < MmVirtLastAddress;
for (curAddrPD = curAddrPDP;
curAddrPD < (curAddrPDP + ((ulong)KPAGESIZE * 0x40000));
curAddrPD += ((ulong)KPAGESIZE * 0x200)) {
// Create an intry in PD each 2MB
// 0x200 = 512
MmPT = (pte_t *)malloc(512*sizeof(pte_t));
index = (curAddrPD / ((ulong)KPAGESIZE * 0x200)) % 512;
if (curAddrPD > MmPhysLastKernAddress) {
MmPD[index] = (pte_t *)0;
//DebugLog("PD %d\n", index);
continue;
}
MmPT = memalign(512*sizeof(pte_t), KPAGESIZE);
//DebugLog("\t\t\t\tPT %d : %p\n", index, MmPT);
MmPD[index] = (pte_t *)((ulong)MmPT | PRESENT | READWRITE);
for (ulong curAddrPT = curAddrPD;
curAddrPT < (curAddrPD + ((ulong)KPAGESIZE * 0x200)) &&
curAddrPT < MmVirtLastAddress;
for (curAddrPT = curAddrPD;
curAddrPT < (curAddrPD + ((ulong)KPAGESIZE * 0x200));
curAddrPT += (ulong)KPAGESIZE) {
// Create an entry in PT each page of 4KB
index = (curAddrPT / ((ulong)KPAGESIZE)) % 512;
xedni = (curAddrPT / ((ulong)KPAGESIZE));
//DebugLog("\t\t\t\tPage %d : %p\n", index, curAddrPT);
// STACK GUARD PAGE */
if ((ulong)curAddrPT == (ulong)BtLoaderInfo.stackEndAddr) {
MmPT[index] = (ulong)curAddrPT | PRESENT;
@ -179,7 +189,7 @@ void MmInitPaging(void)
}
// SECTION .RODATA PROTECTION
else if ((ulong)curAddrPT >= (ulong)&_rodata && (ulong)curAddrPT <= (ulong)&_rodata_end) {
MmPT[index] = (ulong)curAddrPT | PRESENT | WRITETHR | NX;
MmPT[index] = (ulong)curAddrPT | PRESENT | NX;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
//DebugLog("\tSection .rodata at %p\n", curAddrPT);
}
@ -187,26 +197,11 @@ void MmInitPaging(void)
else if ((ulong)curAddrPT <= MmPhysLastKernAddress) {
MmPT[index] = (ulong)curAddrPT | PRESENT | READWRITE;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
if ((ulong)curAddrPT == MmPhysLastKernAddress) {
//DebugLog("\tLast page of kernel at %p\n", curAddrPT);
}
}
/* // While we're inside the userspace pages */
/* else if ((ulong)curAddrPT >= USERSPACE) { */
/* MmPT[index] = ((ulong)curAddrPT - diffKernUsr) | PRESENT; // Not present for instance */
/* xedni = (((ulong)curAddrPT - diffKernUsr) / ((ulong)KPAGESIZE)); */
/* //MmPhysicalPageTable[xedni] = (ulong)curAddrPT; */
/* if ((ulong)curAddrPT == USERSPACE) { */
/* DebugLog("\tUserspace at %p:%p\n", curAddrPT, curAddrPT - diffKernUsr); */
/* } */
/* } */
else {
MmPT[index] = 0;
MmPT[index] = (ulong)0;
MmPhysicalPageTable[xedni] = (ulong)0;
}
KeFlushTlbSingle(curAddrPT);
}
}
}
@ -214,7 +209,7 @@ void MmInitPaging(void)
lastDirectoryAddr = (ulong)MmPT;
MmLoadPML4((void *)MmPageMapLevel4);
//MmEnableWriteProtect();
MmEnableWriteProtect();
DebugLog("\tPage table size : %u MB\n", (lastDirectoryAddr - firstDirectoryAddr + phDirSize)/MB);
}
@ -222,46 +217,88 @@ void MmInitPaging(void)
//
// Get a page from an address
//
static pte_t *MmGetPageDescriptorFromVirtual(void *virtualAddr)
ulong *MmGetPageDescriptorFromVirtual(void *virtualAddr)
{
ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1));
register ulong pml4Index = ((ulong)virtualAddr & 0xFF8000000000) >> 39; // Select bit from 39 to 48
register ulong pdpIndex = ((ulong)virtualAddr & 0x7FC0000000) >> 30; // Select bit from 39 to 48
register ulong pdIndex = ((ulong)virtualAddr & 0x3FE00000) >> 21; // Select bit from 39 to 48
register ulong ptIndex = ((ulong)virtualAddr & 0x1FF000) >> 12; // Select bit from 39 to 48
pdpe_t *pdp = NULL;
pde_t *pd = NULL;
pte_t *pt = NULL;
if (virtAddrPage > MmVirtLastAddress) {
KeStartPanic("MmSetPage() Out of bound of the address space !");
DebugLog("PML4[%d], PDP[%d], PD[%d], PT[%d]\n", pml4Index, pdpIndex, pdIndex, ptIndex);
if (!((ulong)MmPageMapLevel4[pml4Index] & 0xFFFFFFFFFF000)) { // Select bit from 12 to 51
// Alloc space
MmPageMapLevel4[pml4Index] = memalign(512*sizeof(pdpe_t), KPAGESIZE);
// Set present
MmPageMapLevel4[pml4Index] = (pml4_t)((ulong)MmPageMapLevel4[pml4Index] | PRESENT | READWRITE);
pdp = (pdpe_t *)((ulong)MmPageMapLevel4[pml4Index] & 0xFFFFFFFFFF000);
DebugLog("\tCreate PDP at %p\n", MmPageMapLevel4[pml4Index]);
} else {
pdp = (pdpe_t *)((ulong)MmPageMapLevel4[pml4Index] & 0xFFFFFFFFFF000);
}
pdpe_t *pdp = (pdpe_t*)((ulong)MmPageMapLevel4[(virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512] & ~(KPAGESIZE - 1));
//DebugLog("pdp\t: %p\n", pdp);
pde_t *pd = (pde_t*)( (ulong)pdp[(virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512] & ~(KPAGESIZE - 1));
//DebugLog("pd\t: %p\n", pd);
pte_t *pt = (pte_t*)( (ulong)pd[(virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512] & ~(KPAGESIZE - 1));
//DebugLog("pt\t: %p\n", pt);
DebugLog("\tPDP[%d] = %p\n", pdpIndex, pdp[pdpIndex]);
pte_t *page = &pt[(virtAddrPage / ((ulong)KPAGESIZE)) % 512];
//DebugLog("page (with flags): %p\n", page);
if (!((ulong)pdp[pdpIndex] & 0xFFFFFFFFFF000)) { // Select bit from 12 to 51
return page;
pdp[pdpIndex] = memalign(512*sizeof(pde_t), KPAGESIZE);
pdp[pdpIndex] = (pdpe_t)((ulong)pdp[pdpIndex] | PRESENT | READWRITE);
pd = (pde_t *)((ulong)pdp[pdpIndex] & 0xFFFFFFFFFF000);
DebugLog("\tCreate PD at %p\n", (ulong)pdp[pdpIndex]);
} else {
pd = (pde_t *)((ulong)pdp[pdpIndex] & 0xFFFFFFFFFF000);
}
DebugLog("\tPD[%d] = %p\n", pdIndex, pd[pdIndex]);
if (!((ulong)pd[pdIndex] & 0xFFFFFFFFFF000)) { // Select bit from 12 to 51
pd[pdIndex] = memalign(512*sizeof(pte_t), KPAGESIZE);
pd[pdIndex] = (pde_t)((ulong)pd[pdIndex] | PRESENT | READWRITE);
pt = (pte_t *)((ulong)pd[pdIndex] & 0xFFFFFFFFFF000);
DebugLog("\tCreate PT at %p\n", (ulong)pd[pdIndex]);
} else {
pt = (pte_t *)((ulong)pd[pdIndex] & 0xFFFFFFFFFF000);
}
DebugLog("\tPT[%d] = %p\n", ptIndex, pt[ptIndex]);
MmLoadPML4((void *)MmPageMapLevel4);
return &pt[ptIndex];
}
//
// Translates a virtual address to its physical equivalent
//
void *MmTransVirtToPhyAddr(void* virtualAddr)
{
ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1));
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
if (*page == (*page & ~(KPAGESIZE - 1))) {
if (!(page)) {
return NULL;
}
return (void*)((*page & ~(KPAGESIZE - 1))+ ((ulong)virtualAddr - (ulong)virtAddrPage));
return (void*)(((ulong)*page & 0xFFFFFFFFFF000)+ ((ulong)virtualAddr - (ulong)virtAddrPage));
}
void *MmTransPhyToVirtAddr(void* physicalAddr)
{
ulong phyAddrPage = (ulong)physicalAddr & ( ~(KPAGESIZE - 1));
ulong phyAddrPage = (ulong)physicalAddr & ( ~((KPAGESIZE - 1) | NX));
return (void*)( MmPhysicalPageTable[(ulong)physicalAddr
/ ((ulong)KPAGESIZE)
] + ((ulong)physicalAddr - phyAddrPage));
@ -272,9 +309,7 @@ void *MmTransPhyToVirtAddr(void* physicalAddr)
//
void MmSetPage(void* virtualAddr, ulong flags)
{
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
*page |= flags;
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
KeFlushTlbSingle(*page);
}
@ -284,9 +319,7 @@ void MmSetPage(void* virtualAddr, ulong flags)
//
void MmUnsetPage(void* virtualAddr, ulong flags)
{
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
*page &= (~flags);
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
KeFlushTlbSingle(*page);
}
@ -296,11 +329,22 @@ void MmUnsetPage(void* virtualAddr, ulong flags)
//
void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags)
{
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
//DebugLog("Request %p:%p with %lu\n", virtualAddr, physicalAddr, flags);
*page = ((ulong)physicalAddr & ~(KPAGESIZE - 1)) | flags;
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
KeFlushTlbSingle(*page);
*page = (ulong)physicalAddr | flags;
MmPhysicalPageTable[(ulong)physicalAddr
/ ((ulong)KPAGESIZE)
] = (ulong)virtualAddr;
KeFlushTlbSingle(virtualAddr);
//DebugLog("Done %p at page %p\n", *page, page);
if ((ulong)virtualAddr > MmVirtLastAddress)
MmVirtLastAddress = (ulong)virtualAddr + KPAGESIZE;
}
//
@ -308,11 +352,17 @@ void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags)
//
void MmUnmapPage(void* virtualAddr)
{
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
*page = 0;
/* MmPhysicalPageTable[(ulong)(MmTransVirtToPhyAddr(virtualAddr)) */
/* / ((ulong)KPAGESIZE) */
/* ] = 0; */
KeFlushTlbSingle(*page);
/* pt[ */
/* (virtualAddr / (ulong)KPAGESIZE) % 512 */
/* ] = 0; */
KeFlushTlbSingle(virtualAddr);
}
//-----------
@ -415,5 +465,5 @@ static void PagingHandler(ISRFrame_t *regs)
void MmActivatePageHandler(void)
{
KeRegisterISR(PagingHandler, 0xe);
DebugLog("\tPaging activated\n");
//DebugLog("\tPage handler activated\n");
}

View File

@ -27,12 +27,310 @@
#include <ex/malloc.h>
#include <mm/paging.h>
#include <mm/palloc.h>
#include <mm/map.h>
#include <io/vga.h>
#include <lib/buf.h>
#include <ke/time.h>
//---------
enum
{
Whatever = 1UL << 52,
Whatever2 = 1UL << 62
};
static AllocatedPage_t busyPagesList = { (void*)0, 0, (AllocatedPage_t*)0 };
extern MemoryMap_t memoryMap;
extern ulong MmPhysLastKernAddress;
static ulong NSuccessfulAlloc = 0;
static ulong NSuccessfulFree = 0;
//---------
static bool isPageBusy(void *phyPageAddr)
{
AllocatedPage_t *busyPage = &busyPagesList;
bool isBusy = false;
// In case of NVS, ACPI or BADRAM zone, considered busy
if (!MmGetAvailZoneSize(phyPageAddr))
return true;
// Search in the busylist if the phy addr is here
while(busyPage->next) {
busyPage = busyPage->next;
if (phyPageAddr == busyPage->phyAddress) {
isBusy = true;
break;
}
}
return isBusy;
}
static void printBusyPages(void)
{
AllocatedPage_t *busyPage = &busyPagesList;
if (!busyPage->next) {
KernLog("No busy page\n");
} else {
while(busyPage->next) {
busyPage = busyPage->next;
KernLog("Busy page at %p\n", busyPage->phyAddress);
}
}
}
static ulong MmBusyPagesSpace(void)
{
ulong c = 0;
AllocatedPage_t *busyPage = &busyPagesList;
if (!busyPage->next) {
return 0;
} else {
while(busyPage->next) {
busyPage = busyPage->next;
c += 4096;
}
}
return c;
}
static void addPageToBusyList(void *phyPageAddr, ulong id)
{
AllocatedPage_t *busyPage = &busyPagesList;
AllocatedPage_t *prevBusyPage = NULL;
while(busyPage->next) {
prevBusyPage = busyPage;
busyPage = busyPage->next;
if (busyPage->phyAddress > phyPageAddr) {
busyPage = prevBusyPage;
break;
}
}
AllocatedPage_t *newBusyPage = (AllocatedPage_t*)malloc(sizeof(AllocatedPage_t));
newBusyPage->phyAddress = phyPageAddr;
newBusyPage->id = id;
newBusyPage->next = busyPage->next;
busyPage->next = newBusyPage;
}
static void removePageFromBusyList(void *phyPageAddr)
{
AllocatedPage_t *busyPage = &busyPagesList;
AllocatedPage_t *prevBusyPage = NULL;
while(busyPage->next) {
prevBusyPage = busyPage;
busyPage = busyPage->next;
if (phyPageAddr == busyPage->phyAddress) {
prevBusyPage->next = busyPage->next;
free(busyPage);
break;
}
}
}
//
// Returns an id to identify a page frame allocated (kernel)
//
ulong MmAllocPageFrameEx(void ***frameListPtr, size_t *pageNumber, size_t size, bool contiguous)
{
static ulong id = 0;
*pageNumber = (((ulong)size - 1) / KPAGESIZE) + 1;
*frameListPtr = (void**)malloc(sizeof(void*)*(*pageNumber));
size_t curNumber = 0;
bool inBlock = false;
// Incrementing id
id++;
// Maximum PHYSICAL address in memory
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
////DebugLog("Allocating %d pages...\n", *pageNumber);
if (contiguous) {
for (void *curPage = (void*)(MmPhysLastKernAddress + KPAGESIZE); curPage < (void*)phRamSize; curPage += KPAGESIZE) {
if (!isPageBusy(curPage)) {
(*frameListPtr)[curNumber] = curPage;
inBlock = true;
////DebugLog("Select page : %p\n", curPage);
if (++curNumber >= *pageNumber) {
break;
}
} else {
inBlock = false;
}
if (contiguous)
if (!inBlock)
curNumber = 0;
}
} else {
for (void *curPage = (void*)(MmPhysLastKernAddress + KPAGESIZE); curPage < (void*)phRamSize; curPage += KPAGESIZE) {
if (!isPageBusy(curPage)) {
(*frameListPtr)[curNumber] = curPage;
////DebugLog("Select page : %p\n", curPage);
if (++curNumber >= *pageNumber) {
break;
}
}
}
}
if (curNumber != *pageNumber) {
KeStartPanic("MmAllocPageFrameEx() : No more free pages to allocate");
}
for (size_t i = 0; i < *pageNumber; i++) {
addPageToBusyList((*frameListPtr)[i], id);
////DebugLog("Allocating page : %p\n", *frameListPtr[i]);
}
NSuccessfulAlloc++;
return id;
}
ulong MmAllocPageFrame(size_t size, bool contiguous)
{
void **ptr = NULL;
ulong d = 0;
return MmAllocPageFrameEx(&ptr, &d, size, contiguous);
(void)ptr;
(void)d;
}
//
// Frees a page frame by its id
//
void MmFreePageFrame(ulong id)
{
AllocatedPage_t *busyPage = &busyPagesList;
bool success = false;
while(busyPage->next) {
busyPage = busyPage->next;
if (id == busyPage->id) {
removePageFromBusyList(busyPage->phyAddress);
success = true;
}
}
if (success)
NSuccessfulFree++;
}
//
// Maps an allocated page frame to the given address
//
error_t MmMapPageFrame(ulong id, void *virtAddr, ulong flags)
{
AllocatedPage_t *busyPage = &busyPagesList;
while(busyPage->next) {
busyPage = busyPage->next;
if (MmTransPhyToVirtAddr(busyPage->phyAddress)) {
return EADDRINUSE;
}
if (id == busyPage->id) {
DebugLog("Map %p at %p\n", busyPage->phyAddress, virtAddr);
MmMapPage((void*)((ulong)virtAddr), busyPage->phyAddress, flags);
virtAddr += KPAGESIZE;
}
}
return EOK;
}
error_t MmUnmapPageFrame(ulong id)
{
AllocatedPage_t *busyPage = &busyPagesList;
void *actualPhys = 0;
while(busyPage->next) {
busyPage = busyPage->next;
actualPhys = MmTransPhyToVirtAddr(busyPage->phyAddress);
////DebugLog("Physical : %p is %p\n", busyPage->phyAddress, actualPhys);
if (actualPhys && id == busyPage->id) {
////DebugLog("Unmap %p from %p\n", busyPage->phyAddress, MmTransPhyToVirtAddr(busyPage->phyAddress));
MmUnmapPage(MmTransPhyToVirtAddr(busyPage->phyAddress));
}
}
return EOK;
}
ulong tab[4000] = {0};
error_t MmTestBusyPage(void)
{
int j = 0;
/* for (int i = 0; i < 2000; i++) { */
/* if (rand() %2) { */
/* if (rand() %2) { */
/* tab[j++] = MmAllocPageFrame(rand()%6553689, NORMAL); */
/* } else { */
/* tab[j++] = MmAllocPageFrame(rand()%6553689, CONTIGUOUS); */
/* } */
/* } else { */
/* MmFreePageFrame(tab[rand() % (j+1)]); */
/* } */
/* //DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB); */
/* } */
ulong a = KeGetTicks();
DebugLog("Start alloc 30 MB: %lu s\n", a/1000);
tab[j++] = MmAllocPageFrame(5*MB, NORMAL);
tab[j++] = MmAllocPageFrame(5*MB, NORMAL);
ulong b = KeGetTicks();
DebugLog("End alloc : %lu s\n", b/1000);
DebugLog("Alloc time : %lu s\n", (b-a)/1000);
DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB);
/* a = KeGetTicks(); */
/* DebugLog("Start alloc 30MB : %lu s\n", a/1000); */
/* tab[j++] = MmAllocPageFrame(5*MB, NORMAL); */
/* b = KeGetTicks(); */
/* DebugLog("End alloc : %lu s\n", b/1000); */
/* DebugLog("Alloc time : %lu s\n", (b-a)/1000); */
/* DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB); */
/* j = 0; */
/* a = KeGetTicks(); */
/* DebugLog("Start free : %lu ms\n", a); */
/* MmFreePageFrame(tab[j++]); */
/* b = KeGetTicks(); */
/* DebugLog("End free : %lu ms\n", b); */
/* DebugLog("Free time : %lu ms\n", (b-a)); */
/* DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB); */
a = KeGetTicks();
DebugLog("Start map at %p: %lu ms\n", USERSPACE, a);
MmMapPageFrame(tab[1], (void*)(USERSPACE), PRESENT | READWRITE);
b = KeGetTicks();
DebugLog("End map : %lu ms\n", b);
DebugLog("Map time : %lu ms\n", (b-a));
//printBusyPages();
//DebugLog("Finished !\n");
return EOK;
}

View File

@ -24,6 +24,7 @@
#include <vers.h>
#include <mm/paging.h>
#include <mm/palloc.h>
#include <mm/map.h>
#include <io/ata.h>
#include <io/vga.h>
@ -112,19 +113,19 @@ error_t CmdDumpATASect(int argc, char **argv, char *cmdline)
error_t CmdDumpMem(int argc, char **argv, char *cmdline)
{
char sector[1024] = {0};
char sector[8] = {0};
char *address = (char*)strtoul(argv[1], NULL, 16);
int nb = 1; //atoi(argv[2]);
int x = 0;
int step = 16;
int step = 8;
KernLog("Address begin: %p\n", address);
for (int i = 0; i < 1024*nb; i++) {
for (int i = 0; i < 8*nb; i++) {
sector[i] = *address++;
}
while(x < 1024*nb) {
while(x < 8*nb) {
KernLog("%C", shcol);
for (int i = 0; i < step; i++) {
KernLog("%02x ", (uchar)sector[i+x]);
@ -219,19 +220,6 @@ error_t CmdPageTranslatePhyToVirt(int argc, char **argv, char *cmdline)
return EOK;
}
enum
{
PRESENT = 1 << 0,
READWRITE = 1 << 1,
USERMODE = 1 << 2,
WRITETHR = 1 << 3,
CACHEDIS = 1 << 4,
ACCESSED = 1 << 5,
DIRTY = 1 << 6,
HUGE = 1 << 7,
NX = 1UL << 63
};
error_t CmdPageMap(int argc, char **argv, char *cmdline)
{
void *virtual = (void*)strtoul(argv[1], NULL, 16);
@ -253,12 +241,8 @@ error_t CmdPageUnmap(int argc, char **argv, char *cmdline)
error_t CmdPageBlock(int argc, char **argv, char *cmdline)
{
size_t size = (size_t)atoi(argv[1]);
bool usermode = (bool)atoi(argv[2]);
//MmGetPhyPageBlock(size, usermode);
return EOK;
error_t err = MmTestBusyPage();
return err;
}
error_t CmdPF(int argc, char **argv, char *cmdline)

View File

@ -46,6 +46,21 @@ void *malloc(size_t n)
return ptr;
}
void *memalign(size_t n, size_t align)
{
void *ptr;
error_t rc;
#ifndef _KALEID_KERNEL
rc = KalAllocMemoryEx(&ptr, n, 0, align);
#else
rc = KalAllocMemoryEx(&ptr, n, M_ZEROED, align);
#endif
if (rc > 0) seterrno(rc);
return ptr;
}
void *calloc(size_t n, size_t m)
{
void *ptr;