Replaced the crappy miniVMM with a kernel scratch thing.

it starts at 0xFFFFFFFF00000000, it's mean, it's unmanaged, but you can make a heap on it later. But it broke everything, so I need to fix that.

One step closer to saner memoy management, I guess.
alentours-dev
q3k 2012-10-30 16:35:01 +01:00
parent b5ecbd1a07
commit c93a20ad03
6 changed files with 104 additions and 37 deletions

View File

@ -91,13 +91,18 @@ const inline u64 paging_temp_page_get_virtual(void)
} }
void paging_temp_page_set_physical(u64 Physical); void paging_temp_page_set_physical(u64 Physical);
// The MiniVMM is a mini virtual memory manager that manages, similarly to the // We have to prepare for virtual memory allocation from 0xFFFFFFFF00000000
// basic physical frame manager, allocation of _virtual_ frames, for use by // right from the beggining, because that's the mapping that we will be using
// paging functions. This should be only used by major parts of the kernel, for // in later parts of the code (see Tier1/CKernelML4.h), and there's not sense
// example to map some data structures to physical memory, or if physmem_read // in remapping everything.
// calls would be too slow. // This means we have to set up a page directory for our managed pool, fill
void paging_minivmm_setup(void); // it up with a heap (from Tier0/heap.c), and attach that directory to a DPT.
u64 paging_minivmm_allocate(void); // Then, when we offload page management to CKernelML4, we have to let it know
// about the state of all these things.
void paging_scratch_initialize(void);
// Allocates 4096 of physical and virtual memory in the kernel scratch buffer.
// Warning, this memory cannot be freed.
void *paging_scratch_allocate(void);
// A simple page map call. This does no checks! Triple faults ahoy. // A simple page map call. This does no checks! Triple faults ahoy.
void paging_map_page(u64 Virtual, u64 Physical); void paging_map_page(u64 Virtual, u64 Physical);

View File

@ -64,7 +64,8 @@ void apic_enable_lapic(void)
system_msr_set(0x1B, APICMSR); system_msr_set(0x1B, APICMSR);
} }
u64 Virtual = paging_minivmm_allocate(); //u64 Virtual = paging_minivmm_allocate();
u64 Virtual = 0;
kprintf("[i] LAPIC will be @0x%x.\n", Virtual); kprintf("[i] LAPIC will be @0x%x.\n", Virtual);
paging_map_page(Virtual, 0xFEE00000); paging_map_page(Virtual, 0xFEE00000);

View File

@ -73,10 +73,10 @@ T_HEAP *heap_create(u64 Size)
u64 Start = 0; u64 Start = 0;
for (u32 i = 0; i < NumPages; i++) for (u32 i = 0; i < NumPages; i++)
{ {
if (!Start) // if (!Start)
Start = paging_minivmm_allocate(); // Start = paging_minivmm_allocate();
else // else
paging_minivmm_allocate(); // paging_minivmm_allocate();
} }
kprintf("[i] Heap starts at 0x%x\n", Start); kprintf("[i] Heap starts at 0x%x\n", Start);

View File

@ -66,10 +66,14 @@ void kmain(u32 LoadContextAddress)
paging_temp_page_setup(); paging_temp_page_setup();
physmem_init(); physmem_init();
paging_scratch_initialize();
paging_minivmm_setup(); void *a1 = paging_scratch_allocate();
void *a2 = paging_scratch_allocate();
kprintf("%x %x\n", a1, a2);
for (;;) {}
// Let's create a new kernel stack // Let's create a new kernel stack
u64 StackVirtual = paging_minivmm_allocate(); //u64 StackVirtual = paging_minivmm_allocate();
u64 StackVirtual = 0;
kprintf("[i] New kernel stack 0x%x\n", StackVirtual); kprintf("[i] New kernel stack 0x%x\n", StackVirtual);
// And now let's use it and forget ebp because we can. // And now let's use it and forget ebp because we can.

View File

@ -3,6 +3,7 @@
#include "Tier0/kstdlib.h" #include "Tier0/kstdlib.h"
#include "Tier0/panic.h" #include "Tier0/panic.h"
#include "Tier0/system.h" #include "Tier0/system.h"
#include "Tier0/physmem.h"
#include "types.h" #include "types.h"
struct { struct {
@ -10,13 +11,6 @@ struct {
u64 TempPageVirtual; u64 TempPageVirtual;
} g_KernelPaging; } g_KernelPaging;
struct {
u64 Start;
u64 End;
u64 Top;
} g_MiniVMM;
T_PAGING_ML4 *paging_get_ml4(void) T_PAGING_ML4 *paging_get_ml4(void)
{ {
u64 Address; u64 Address;
@ -100,23 +94,87 @@ void paging_set_ml4(u64 ML4Physical)
__asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4Physical)); __asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4Physical));
} }
void paging_minivmm_setup(void) struct {
u32 UnmanagedSize;
u8 HeapSetUp;
u64 DirectoryPhysical;
} g_PagingScratch;
void paging_scratch_initialize(void)
{ {
g_MiniVMM.Start = system_get_kernel_virtual_start() + system_get_kernel_size(); // let's first allocate a physical frame for the DIR
g_MiniVMM.End = system_get_kernel_virtual_start() + 511 * 0x1000; u64 DirPhysical = physmem_allocate_page() * 4096;
g_MiniVMM.Top = g_MiniVMM.Start; // map it to our trusty temp page
kprintf("[i] MiniVMM: %x - %x.\n", g_MiniVMM.Start, g_MiniVMM.End); paging_temp_page_set_physical(DirPhysical);
T_PAGING_DIR *Directory = (T_PAGING_DIR *)paging_temp_page_get_virtual();
// zero the entries
for (u16 i = 0; i < 512; i++)
Directory->Entries[i].Present = 0;
// attach the scratch to the DPT. we can do this without using a temp page,
// as the boot paging structures lie in the 2mib identity paged zone
u16 ML4Entry = PAGING_GET_ML4_INDEX(0xFFFFFFFF00000000);
T_PAGING_ML4 *ML4 = paging_get_ml4();
ASSERT(ML4->Entries[ML4Entry].Present);
u64 aDPT = ML4->Entries[ML4Entry].Physical << 12;
T_PAGING_DPT *DPT = (T_PAGING_DPT *)aDPT;
u16 DPTEntry = PAGING_GET_DPT_INDEX(0xFFFFFFFF00000000);
ASSERT(!DPT->Entries[DPTEntry].Present);
DPT->Entries[DPTEntry].Present = 1;
DPT->Entries[DPTEntry].RW = 1;
DPT->Entries[DPTEntry].Physical = DirPhysical >> 12;
g_PagingScratch.UnmanagedSize = 0;
g_PagingScratch.HeapSetUp = 0;
g_PagingScratch.DirectoryPhysical = DirPhysical;
} }
u64 paging_minivmm_allocate(void) void *paging_scratch_allocate(void)
{ {
if (g_MiniVMM.Top + 0x1000 > g_MiniVMM.End) if (g_PagingScratch.HeapSetUp)
PANIC("MiniVMM out of memory!"); PANIC("Trying to allocate unmanaged scratch after heap exists, abort!");
u64 Result = g_MiniVMM.Top; u64 Virtual = 0xFFFFFFFF00000000 + g_PagingScratch.UnmanagedSize;
g_MiniVMM.Top += 4096; u64 Physical = physmem_allocate_page() * 4096;
u16 DirEntry = PAGING_GET_DIR_INDEX(Virtual);
return Result;
paging_temp_page_set_physical(g_PagingScratch.DirectoryPhysical);
T_PAGING_DIR *Directory = (T_PAGING_DIR *)paging_temp_page_get_virtual();
// create table if necessary
u64 TablePhysical;
if (!Directory->Entries[DirEntry].Present)
{
// create a new page table
TablePhysical = physmem_allocate_page() * 4096;
paging_temp_page_set_physical(TablePhysical);
T_PAGING_TAB *Table = (T_PAGING_TAB*)paging_temp_page_get_virtual();
// zero the table
for (u16 i = 0; i < 512; i++)
Table->Entries[i].Present = 0;
// set the directory to point where it should
paging_temp_page_set_physical(g_PagingScratch.DirectoryPhysical);
Directory->Entries[DirEntry].Present = 1;
Directory->Entries[DirEntry].RW = 1;
Directory->Entries[DirEntry].Physical = TablePhysical >> 12;
}
else
TablePhysical = Directory->Entries[DirEntry].Physical << 12;
// set the table entry to point to our new page frame
paging_temp_page_set_physical(TablePhysical);
T_PAGING_TAB *Table = (T_PAGING_TAB*)paging_temp_page_get_virtual();
u16 TabEntry = PAGING_GET_TAB_INDEX(Virtual);
Table->Entries[TabEntry].Present = 1;
Table->Entries[TabEntry].RW = 1;
Table->Entries[TabEntry].Physical = Physical >> 12;
g_PagingScratch.UnmanagedSize += 4096;
__asm__ __volatile__("invlpg %0" :: "m"(Virtual));
return (void *)Virtual;
} }
void paging_map_page(u64 Virtual, u64 Physical) void paging_map_page(u64 Virtual, u64 Physical)
@ -130,5 +188,5 @@ void paging_map_page(u64 Virtual, u64 Physical)
T_PAGING_TAB *Tab = (T_PAGING_TAB *)aTab; T_PAGING_TAB *Tab = (T_PAGING_TAB *)aTab;
Tab->Entries[PAGING_GET_TAB_INDEX(Virtual)].Physical = Physical >> 12; Tab->Entries[PAGING_GET_TAB_INDEX(Virtual)].Physical = Physical >> 12;
__asm__ volatile("invlpg %0" :: "m"(*(u32 *)Virtual)); __asm__ volatile("invlpg %0" :: "m"(Virtual));
} }

View File

@ -106,7 +106,6 @@ u64 physmem_allocate_page(void)
{ {
PHYSMEM_METADATA_SET_BIT(Metadata, i * 64 + j); PHYSMEM_METADATA_SET_BIT(Metadata, i * 64 + j);
g_PhysicalMemory.MemoryFree -= 4096; g_PhysicalMemory.MemoryFree -= 4096;
kprintf("-> %i\n", i * 64 + j);
return i * 64 + j; return i * 64 + j;
} }
} }