Removed shitty old memory management system.

master
q3k 2013-07-10 15:17:29 +02:00
parent 347e5d9974
commit c45c4a5f6b
12 changed files with 50 additions and 418 deletions

View File

@ -77,33 +77,4 @@ typedef struct {
T_PAGING_ML4 * paging_get_ml4(void);
void paging_set_ml4(u64 ML4Physical);
u8 paging_get_physical(u64 Virtual, u64 *Physical);
u8 paging_get_physical_ex(u64 Virtual, u64 *Physical,T_PAGING_ML4 *ML4);
void paging_kernel_initialize(u64 KernelVirtualStart, u64 KernelPhysicalStart, u64 KernelSize);
// The temporary page is a page you can use to access some temporary physical
// location. There is only one page, 4096 bytes large. Deal with it.
void paging_temp_page_setup(void);
volatile const u64 paging_temp_page_get_virtual(void);
void paging_temp_page_set_physical(u64 Physical);
// We have to prepare for virtual memory allocation from 0xFFFFFFFF00000000
// right from the beggining, because that's the mapping that we will be using
// in later parts of the code (see Tier1/CKernelML4.h), and there's not sense
// in remapping everything.
// This means we have to set up a page directory for our managed pool, fill
// it up with a heap (from Tier0/heap.c), and attach that directory to a DPT.
// Then, when we offload page management to CKernelML4, we have to let it know
// about the state of all these things.
void paging_scratch_initialize(void);
// Allocates 4096 of physical and virtual memory in the kernel scratch buffer.
// Warning, this memory cannot be freed.
void *paging_scratch_map(u64 Physical);
void *paging_scratch_allocate(void);
u64 paging_scratch_get_physical(void *Virtual);
// A simple page map call. This does no checks! Triple faults ahoy.
void paging_map_page(u64 Virtual, u64 Physical);
#endif

View File

@ -122,9 +122,6 @@ u64 system_msr_get(u32 MSR);
void system_msr_set(u32 MSR, u64 Data);
// kernel load address, size and mapping
u64 system_get_kernel_size(void);
u64 system_get_kernel_physical_start(void);
u64 system_get_kernel_virtual_start(void);
u64 system_get_memory_top(void);
#endif

View File

@ -8,11 +8,9 @@
#include "types.h"
struct S_LOAD_CONTEXT {
u64 KernelPhysicalStart;
u64 KernelPhysicalEnd;
u64 ReservedPhysicalStart;
u64 ReservedPhysicalEnd;
u64 LoaderPhysicalStart;
u64 LoaderPhysicalEnd;
s8 LoaderName[80];
// VGA text mode 0

View File

@ -64,7 +64,8 @@ void apic_enable_lapic(void)
system_msr_set(0x1B, APICMSR);
}
g_APIC.LAPIC = paging_scratch_map(0xFEE00000);
// g_APIC.LAPIC = paging_scratch_map(0xFEE00000);
PANIC("nanananoapic");
kprintf("[i] LAPIC will be @0x%x.\n", g_APIC.LAPIC);
// prepare interrupts ..

View File

@ -71,13 +71,14 @@ T_HEAP *heap_create(u64 Size)
NumPages, Size / 1000);
u64 Start = 0;
for (u32 i = 0; i < NumPages; i++)
{
if (!Start)
Start = (u64)paging_scratch_allocate();
else
paging_scratch_allocate();
}
// for (u32 i = 0; i < NumPages; i++)
// {
// if (!Start)
// Start = (u64)paging_scratch_allocate();
// else
// paging_scratch_allocate();
// }
PANIC("nananananoheap");
T_HEAP* Heap = (T_HEAP *)Start;
u64 DataStart = Start;

View File

@ -25,7 +25,6 @@ void kmain_newstack(void);
// Real kernel entry point, called from loader
void kmain(u32 LoadContextAddress)
{
for(;;){}
T_LOAD_CONTEXT *LoadContext = (T_LOAD_CONTEXT*)(u64)LoadContextAddress;
kstdio_init();
@ -59,30 +58,23 @@ void kmain(u32 LoadContextAddress)
system_parse_load_context(LoadContext);
kprintf("[i] Booting via %s.\n", LoadContext->LoaderName);
kprintf("[i] Memory available: %uk.\n", system_get_memory_upper());
kprintf("[i] Kernel physical: %x-%x.\n", system_get_kernel_physical_start(),
system_get_kernel_physical_start() + system_get_kernel_size());
kprintf("[i] Loader physical: %x-%x.\n", LoadContext->LoaderPhysicalStart, LoadContext->LoaderPhysicalEnd);
kprintf("[i] Kernel virtual: %x-%x.\n", system_get_kernel_virtual_start(),
system_get_kernel_virtual_start() + system_get_kernel_size());
paging_temp_page_setup();
physmem_init();
paging_scratch_initialize();
// Let's create a new kernel stack
u64 StackVirtual = (u64)paging_scratch_allocate();
kprintf("[i] New kernel stack 0x%x\n", StackVirtual);
// And now let's use it and forget ebp because we can.
__asm__ volatile("mov %0, %%rsp" : : "r" (StackVirtual + 4096));
// // Let's create a new kernel stack
// u64 StackVirtual = (u64)paging_scratch_allocate();
// kprintf("[i] New kernel stack 0x%x\n", StackVirtual);
// for (;;) {}
// // And now let's use it and forget ebp because we can.
// __asm__ volatile("mov %0, %%rsp" : : "r" (StackVirtual + 4096));
// And let's create a new stack frame.
// (and prevent gcc from inlinin the function call)
void (*kmain_newstack_ptr)() = kmain_newstack;
kmain_newstack_ptr();
}
// // And let's create a new stack frame.
// // (and prevent gcc from inlinin the function call)
// void (*kmain_newstack_ptr)() = kmain_newstack;
// kmain_newstack_ptr();
// }
void kmain_newstack(void)
{
// void kmain_newstack(void)
// {
u64 RSDPAddress = acpi_find_rsdp();
if (RSDPAddress == 0)

View File

@ -18,209 +18,7 @@ T_PAGING_ML4 *paging_get_ml4(void)
return (T_PAGING_ML4*)Address;
}
void paging_temp_page_setup(void)
{
u64 KernelVirtualStart = system_get_kernel_virtual_start();
u64 KernelSize = system_get_kernel_size();
// Try using page 511 (last) from kernel table
u64 PageVirtual = KernelVirtualStart + 511 * 4096;
u64 MaxMapped = 4096 * 512; // first 2Mib by loader
if (KernelSize >= MaxMapped)
PANIC("Cannot set up temp page, kernel > 2Mib!");
T_PAGING_ML4 *ML4 = paging_get_ml4();
kprintf("[i] Loader-provided ML4 @0x%x\n", ML4);
if ((u64)ML4 > MaxMapped)
PANIC("Cannot set up temp page, ML4 not accessible!");
u64 aDPT = ML4->Entries[PAGING_GET_ML4_INDEX(PageVirtual)].Physical << 12;
if (aDPT > MaxMapped)
PANIC("Cannot set up temp page, DPT not accessible!");
T_PAGING_DPT *DPT = (T_PAGING_DPT *)aDPT;
u64 aDir = DPT->Entries[PAGING_GET_DPT_INDEX(PageVirtual)].Physical << 12;
if (aDir > MaxMapped)
PANIC("Cannot set up temp page, Dir not accessible!");
T_PAGING_DIR *Dir = (T_PAGING_DIR *)aDir;
u64 aTab = Dir->Entries[PAGING_GET_DIR_INDEX(PageVirtual)].Physical << 12;
if (aTab > MaxMapped)
PANIC("Cannot set up temp page, Tab not accessible!");
T_PAGING_TAB *Tab = (T_PAGING_TAB *)aTab;
g_KernelPaging.TempPage = &Tab->Entries[511];
kprintf("[i] Using paging table entry @0x%x as temporary page.\n", g_KernelPaging.TempPage);
g_KernelPaging.TempPageVirtual = PageVirtual;
}
void paging_temp_page_set_physical(u64 Physical)
{
if ((Physical & 0xFFF) != 0)
PANIC("Tried to set temp page physical to unaligned address!");
// TODO: check if smaller than maxphyaddr
g_KernelPaging.TempPage->Physical = Physical >> 12;
__asm__ volatile("invlpg %0" :: "m"(*(u64 *)g_KernelPaging.TempPageVirtual));
}
inline volatile const u64 paging_temp_page_get_virtual(void)
{
return 0xFFFFFFFF80000000 + 511 * 0x1000;
}
/*u8 paging_get_physical_ex(u64 Virtual, u64 *Physical, T_PAGING_ML4 *ML4)
{
if (Virtual < g_KernelPaging.KernelVirtualStart || Virtual > g_KernelPaging.KernelVirtualStart + g_KernelPaging.KernelSize)
{
PANIC("not implemented");
return 0;
}
*Physical = Virtual - g_KernelPaging.KernelVirtualStart + g_KernelPaging.KernelPhysicalStart;
return 1;
}
u8 paging_get_physical(u64 Virtual, u64 *Physical)
{
T_PAGING_ML4 *ml4 = paging_get_ml4();
return paging_get_physical_ex(Virtual, Physical, ml4);
}*/
void paging_set_ml4(u64 ML4Physical)
{
__asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4Physical));
}
struct {
u32 UnmanagedSize;
u8 HeapSetUp;
u64 DirectoryPhysical;
} g_PagingScratch;
void paging_scratch_initialize(void)
{
// let's first allocate a physical frame for the DIR
u64 DirPhysical = physmem_allocate_page() * 4096;
// map it to our trusty temp page
paging_temp_page_set_physical(DirPhysical);
T_PAGING_DIR *Directory = (T_PAGING_DIR *)paging_temp_page_get_virtual();
// zero the entries
for (u16 i = 0; i < 512; i++)
Directory->Entries[i].Present = 0;
// attach the scratch to the DPT. we can do this without using a temp page,
// as the boot paging structures lie in the 2mib identity paged zone
u16 ML4Entry = PAGING_GET_ML4_INDEX(0xFFFFFFFF00000000);
T_PAGING_ML4 *ML4 = paging_get_ml4();
ASSERT(ML4->Entries[ML4Entry].Present);
u64 aDPT = ML4->Entries[ML4Entry].Physical << 12;
kprintf("[i] DPT Physical 0x%x, ML4 index %i.\n", aDPT, ML4Entry);
kprintf("[i] Scratch DIR Physical 0x%x\n", DirPhysical);
T_PAGING_DPT *DPT = (T_PAGING_DPT *)aDPT;
u16 DPTEntry = PAGING_GET_DPT_INDEX(0xFFFFFFFF00000000);
ASSERT(!DPT->Entries[DPTEntry].Present);
DPT->Entries[DPTEntry].Present = 1;
DPT->Entries[DPTEntry].RW = 1;
DPT->Entries[DPTEntry].Physical = DirPhysical >> 12;
g_PagingScratch.UnmanagedSize = 0;
g_PagingScratch.HeapSetUp = 0;
g_PagingScratch.DirectoryPhysical = DirPhysical;
}
void *paging_scratch_map(u64 Physical)
{
if (g_PagingScratch.HeapSetUp)
PANIC("Trying to allocate unmanaged scratch after heap exists, abort!");
u64 Virtual = 0xFFFFFFFF00000000 + g_PagingScratch.UnmanagedSize;
u16 DirEntry = PAGING_GET_DIR_INDEX(Virtual);
paging_temp_page_set_physical(g_PagingScratch.DirectoryPhysical);
T_PAGING_DIR *Directory = (T_PAGING_DIR *)paging_temp_page_get_virtual();
// create table if necessary
u64 TablePhysical;
if (!Directory->Entries[DirEntry].Present)
{
// create a new page table
TablePhysical = physmem_allocate_page() * 4096;
paging_temp_page_set_physical(TablePhysical);
T_PAGING_TAB *Table = (T_PAGING_TAB*)paging_temp_page_get_virtual();
// zero the table
for (u16 i = 0; i < 512; i++)
Table->Entries[i].Present = 0;
// set the directory to point where it should
paging_temp_page_set_physical(g_PagingScratch.DirectoryPhysical);
Directory->Entries[DirEntry].Present = 1;
Directory->Entries[DirEntry].RW = 1;
Directory->Entries[DirEntry].Physical = TablePhysical >> 12;
}
else
TablePhysical = Directory->Entries[DirEntry].Physical << 12;
// set the table entry to point to our new page frame
paging_temp_page_set_physical(TablePhysical);
T_PAGING_TAB *Table = (T_PAGING_TAB*)paging_temp_page_get_virtual();
u16 TabEntry = PAGING_GET_TAB_INDEX(Virtual);
Table->Entries[TabEntry].Present = 1;
Table->Entries[TabEntry].RW = 1;
Table->Entries[TabEntry].Physical = Physical >> 12;
g_PagingScratch.UnmanagedSize += 4096;
__asm__ __volatile__("invlpg %0" :: "m"(Virtual));
return (void *)Virtual;
}
void *paging_scratch_allocate(void)
{
u64 Physical = physmem_allocate_page() * 4096;
return paging_scratch_map(Physical);
}
void paging_map_page(u64 Virtual, u64 Physical)
{
T_PAGING_ML4 *ML4 = paging_get_ml4();
u64 aDPT = ML4->Entries[PAGING_GET_ML4_INDEX(Virtual)].Physical << 12;
T_PAGING_DPT *DPT = (T_PAGING_DPT *)aDPT;
u64 aDir = DPT->Entries[PAGING_GET_DPT_INDEX(Virtual)].Physical << 12;
T_PAGING_DIR *Dir = (T_PAGING_DIR *)aDir;
u64 aTab = Dir->Entries[PAGING_GET_DIR_INDEX(Virtual)].Physical << 12;
T_PAGING_TAB *Tab = (T_PAGING_TAB *)aTab;
Tab->Entries[PAGING_GET_TAB_INDEX(Virtual)].Physical = Physical >> 12;
__asm__ volatile("invlpg %0" :: "m"(Virtual));
}
u64 paging_scratch_get_physical(void* Virtual)
{
u16 DirEntry = PAGING_GET_DIR_INDEX(Virtual);
paging_temp_page_set_physical(g_PagingScratch.DirectoryPhysical);
T_PAGING_DIR *Directory = (T_PAGING_DIR *)paging_temp_page_get_virtual();
if (!Directory->Entries[DirEntry].Present)
PANIC("Address not in directory!");
u64 TablePhysical = Directory->Entries[DirEntry].Physical << 12;
paging_temp_page_set_physical(TablePhysical);
T_PAGING_TAB *Table = (T_PAGING_TAB*)paging_temp_page_get_virtual();
u16 TabEntry = PAGING_GET_TAB_INDEX(Virtual);
if (!Table->Entries[TabEntry].Present)
PANIC("Address not in table!");
return Table->Entries[TabEntry].Physical << 12;
}

View File

@ -47,14 +47,13 @@ u64 __physmem_allocate_first_page(void)
if (NextPageStart > g_PhysicalMemory.MemorySize)
PANIC("Out of memory!");
}
kprintf("%x\n", NextPageStart);
return NextPageStart;
}
void physmem_init(void)
{
g_PhysicalMemory.MemorySize = system_get_memory_top();
g_PhysicalMemory.MemoryFree = system_get_memory_top();
// allocate the first frame, for metadata
u64 MetadataFrame = __physmem_allocate_first_page();
@ -63,14 +62,14 @@ void physmem_init(void)
if (PHYSMEM_ADDRESS_TO_METADATA_NUMBER(MetadataFrame) > 0)
PANIC("Physmem: First allocated address > metadata covering!");
// map it to virtual mem so we can use it
paging_temp_page_set_physical(MetadataFrame);
T_PHYSMEM_METADATA *Metadata = (T_PHYSMEM_METADATA *)paging_temp_page_get_virtual();
// Let's make sure that frame is mapped into our memory...
if (MetadataFrame >= 0xEFFFFF)
PANIC("Physmem: first allocated address > memory mapped by loader!");
T_PHYSMEM_METADATA *Metadata = (T_PHYSMEM_METADATA *)MetadataFrame;
// zero the metadata (the 512th bit overflows into the nextmatadata field)
for (u64 i = 0; i < 512; i++)
Metadata->Bitmap[i] = 0;
// mask all the bits up to and including our metadata frame as used
kprintf("[i] Marking physical memory up to 0x%x (bit %i) as used.\n", MetadataFrame, PHYSMEM_ADDRESS_TO_BIT_NUMBER(MetadataFrame));
for (u32 i = 0; i <= PHYSMEM_ADDRESS_TO_BIT_NUMBER(MetadataFrame); i++)
@ -97,8 +96,8 @@ void physmem_init(void)
u64 physmem_allocate_page(void)
{
paging_temp_page_set_physical(g_PhysicalMemory.FirstMetadata);
T_PHYSMEM_METADATA *Metadata = (T_PHYSMEM_METADATA *)paging_temp_page_get_virtual();
ASSERT(g_PhysicalMemory.FirstMetadata <= 0xEFFFFF);
T_PHYSMEM_METADATA *Metadata = (T_PHYSMEM_METADATA *)g_PhysicalMemory.FirstMetadata;
for (u32 i = 0; i < PHYSMEM_METADATA_COVERS_BITS; i++)
{
if (Metadata->Bitmap[i] != 0xFFFFFFFFFFFFFFFF)
@ -122,8 +121,8 @@ void physmem_free_page(u64 Page)
if (Page > PHYSMEM_METADATA_COVERS_BITS)
PANIC("...and where did you get that page index?");
paging_temp_page_set_physical(g_PhysicalMemory.FirstMetadata);
T_PHYSMEM_METADATA *Metadata = (T_PHYSMEM_METADATA *)paging_temp_page_get_virtual();
ASSERT(g_PhysicalMemory.FirstMetadata <= 0xEFFFFF);
T_PHYSMEM_METADATA *Metadata = (T_PHYSMEM_METADATA *)g_PhysicalMemory.FirstMetadata;
// todo: check for double frees
u32 Bit = PHYSMEM_BIT_NUMBER_TO_BIT_IN_METADATA(Page);
@ -143,22 +142,14 @@ u64 physmem_physical_to_page(u64 Physical)
void physmem_read(u64 Base, u64 Size, void *Destination)
{
u8 *DataSource = (u8 *)paging_temp_page_get_virtual();
u64 OffsetInSource = Base & 0xFFF;
u64 PreviousPageBase = Base & ~((u64)0xFFF);
paging_temp_page_set_physical(PreviousPageBase);
for (u64 i = 0; i < Size; i++)
if ((u64)Destination <= 0xEFFFFF)
{
u64 PageBase = (Base + i) & ~((u64)0xFFF);
if (PageBase != PreviousPageBase)
paging_temp_page_set_physical(PageBase);
PreviousPageBase = PageBase;
*((u8 *)Destination + i) = DataSource[OffsetInSource % 4096];
OffsetInSource++;
for (u64 i = 0; i < Size; i++)
((u8 *)Destination)[i] = ((u8 *)Base)[i];
}
else
{
PANIC("physmem_read > extmem not implemented!");
}
}

View File

@ -51,12 +51,6 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext)
g_SystemInfo.MemoryLower = (u64)((u8*)Header)[4];
g_SystemInfo.MemoryUpper = (u64)((u8*)Header)[8];
}
// Kernel location in memory
g_SystemInfo.KernelPhysicalStart = LoadContext->KernelPhysicalStart;
g_SystemInfo.KernelSize = LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart;
g_SystemInfo.KernelVirtualStart = SYSTEM_KERNEL_VIRTUAL;
ASSERT(SYSTEM_KERNEL_VIRTUAL == (u64)&_start);
// Bootloader name from Multiboot header
if ((Flags >> 9) & 1)
@ -117,16 +111,10 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext)
BIOSArea->Size = 1024 *1024;
g_SystemInfo.NumInvalidAreas++;
// And mark our kernel physical location as unavailable
T_SYSTEM_INVALID_RAM *KernelArea = &g_SystemInfo.InvalidMemoryAreas[g_SystemInfo.NumInvalidAreas];
KernelArea->Base = LoadContext->KernelPhysicalStart;
KernelArea->Size = LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart;
g_SystemInfo.NumInvalidAreas++;
// ...and the loader physical location.
// Mark the loader physical location ad unavailable.
T_SYSTEM_INVALID_RAM *LoaderArea = &g_SystemInfo.InvalidMemoryAreas[g_SystemInfo.NumInvalidAreas];
LoaderArea->Base = LoadContext->LoaderPhysicalStart;
LoaderArea->Size = LoadContext->LoaderPhysicalEnd - LoadContext->LoaderPhysicalStart;
LoaderArea->Base = LoadContext->ReservedPhysicalStart;
LoaderArea->Size = LoadContext->ReservedPhysicalEnd - LoadContext->ReservedPhysicalStart;
g_SystemInfo.NumInvalidAreas++;
// ...and the IOAPIC
@ -214,11 +202,6 @@ u64 system_get_kernel_size(void)
return g_SystemInfo.KernelSize;
}
u64 system_get_kernel_physical_start(void)
{
return g_SystemInfo.KernelPhysicalStart;
}
u64 system_get_kernel_virtual_start(void)
{
return g_SystemInfo.KernelVirtualStart;

View File

@ -20,100 +20,5 @@ extern "C"
void CKernelML4::PopulateCommonPointers(void)
{
T_PAGING_ML4 *ML4 = (T_PAGING_ML4*)kmalloc_aligned(sizeof(T_PAGING_ML4));
for (u16 i = 0; i < 256; i++)
ML4->Entries[i].Present = 0;
// start with TEXT, this is the easiest (16 directory entries)
// TEXT: ml4 entry 511, dpt entry 510, dir entries 0 - 15
T_PAGING_DPT *TextDPT = (T_PAGING_DPT*)kmalloc_aligned(sizeof(T_PAGING_DPT));
for (u16 i = 0; i < 256; i++)
TextDPT->Entries[i].Present = 0;
POPULATE_PAGING_ENTRY(ML4->Entries[511], paging_scratch_get_physical(TextDPT));
ASSERT_ALIGNED(paging_scratch_get_physical(TextDPT));
T_PAGING_DIR *TextDirectory = (T_PAGING_DIR*)kmalloc_aligned(sizeof(T_PAGING_DIR));
for (u16 i = 0; i < 256; i++)
TextDirectory->Entries[i].Present = 0;
u64 KernelStart = system_get_kernel_physical_start();
for (u16 i = 0; i < 16; i++)
{
T_PAGING_TAB *Table = (T_PAGING_TAB*)kmalloc_aligned(sizeof(T_PAGING_TAB));
for (u16 j = 0; j < 256; i++)
{
POPULATE_PAGING_ENTRY(Table->Entries[i], KernelStart);
KernelStart += 4096;
}
u64 TablePhysical = paging_scratch_get_physical(Table);
ASSERT_ALIGNED(TablePhysical);
POPULATE_PAGING_ENTRY(TextDirectory->Entries[i], TablePhysical);
}
m_TEXT = TextDirectory;
m_TEXT_Physical = paging_scratch_get_physical(TextDirectory);
POPULATE_PAGING_ENTRY(TextDPT->Entries[510], paging_scratch_get_physical(TextDirectory));
ASSERT_ALIGNED(paging_scratch_get_physical(TextDirectory));
// next let's populate LOWMEM (1/2 of a Table)
// LOWMEM: ml4 entry 0, dpt entry 0, dir entry 0, table entries 0-127
T_PAGING_DPT *LowmemDPT = (T_PAGING_DPT*)kmalloc_aligned(sizeof(T_PAGING_DPT));
for (u16 i = 0; i < 256; i++)
LowmemDPT->Entries[i].Present = 0;
POPULATE_PAGING_ENTRY(ML4->Entries[0], paging_scratch_get_physical(LowmemDPT));
ASSERT_ALIGNED(paging_scratch_get_physical(LowmemDPT));
T_PAGING_DIR *LowmemDirectory = (T_PAGING_DIR*)kmalloc_aligned(sizeof(T_PAGING_DIR));
for (u16 i = 0; i < 256; i++)
LowmemDirectory->Entries[i].Present = 0;
POPULATE_PAGING_ENTRY(LowmemDPT->Entries[0], paging_scratch_get_physical(LowmemDirectory));
ASSERT_ALIGNED(paging_scratch_get_physical(LowmemDirectory));
T_PAGING_TAB *LowmemTable = (T_PAGING_TAB*)kmalloc_aligned(sizeof(T_PAGING_TAB));
for (u16 i = 0; i < 128; i++)
POPULATE_PAGING_ENTRY(LowmemTable->Entries[i], 4096 * i);
for (u16 i = 128; i < 256; i++)
LowmemTable->Entries[i].Present = 0;
POPULATE_PAGING_ENTRY(LowmemDirectory->Entries[0], paging_scratch_get_physical(LowmemTable));
ASSERT_ALIGNED(paging_scratch_get_physical(LowmemTable));
m_LOWMEM = LowmemTable;
m_LOWMEM_Physical = paging_scratch_get_physical(LowmemTable);
// aaand do the SCRATCH (one whole dirctory of tables)
// SCRATCH: ml4 entry 511, dpt entry 509, dir entries 0 - 255
// T_PAGING_DIR *ScratchDirectory = (T_PAGING_DIR*)kmalloc_aligned(sizeof(T_PAGING_DIR));
// u64 ScratchStart = 0xFFFFFFFF40000000;
// for (u16 i = 0; i < 256; i++)
// {
// T_PAGING_TAB *Table = (T_PAGING_TAB*)kmalloc_aligned(sizeof(T_PAGING_TAB));
// for (u16 j = 0; j < 256; i++)
// {
// Table->Entries[j].Present = 1;
// Table->Entries[j].RW = 0;
// Table->Entries[j].User = 0;
// Table->Entries[j].Misc = 0;
// Table->Entries[j].Zero = 0;
// POPULATE_PAGING_ENTRY(Table->Entries[j], ScratchStart);
// ScratchStart += 4096;
// }
// u64 TablePhysical = paging_scratch_get_physical(Table);
// POPULATE_PAGING_ENTRY(ScratchDirectory->Entries[i], TablePhysical);
// ASSERT_ALIGNED(TablePhysical);
// }
// // SCRATCH and TEXT share the same DPT
// TextDPT->Entries[509].Present = 0;
// TextDPT->Entries[509].RW = 0;
// TextDPT->Entries[509].User = 0;
// TextDPT->Entries[509].Misc = 0;
// TextDPT->Entries[509].Zero = 0;
// TextDPT->Entries[509].Physical = paging_scratch_get_physical(ScratchDirectory) >> 12;
// ASSERT_ALIGNED(paging_scratch_get_physical(ScratchDirectory));
/* bullshit */
}

View File

@ -2,11 +2,9 @@
#define __CONTEXT_H__
struct S_LOAD_CONTEXT {
u64 KernelPhysicalStart;
u64 KernelPhysicalEnd;
u64 ReservedPhysicalStart;
u64 ReservedPhysicalEnd;
u64 LoaderPhysicalStart;
u64 LoaderPhysicalEnd;
s8 LoaderName[80];
// VGA text mode 0

View File

@ -122,8 +122,6 @@ u32 load(void *Multiboot, unsigned int Magic)
FreeSpaceStart = KernelEnd;
if (FreeSpaceStart % 0x1000)
FreeSpaceStart = (FreeSpaceStart + 0x1000) & 0xFFFFF000;
g_Context.KernelPhysicalStart = FreeSpaceStart;
u32 KernelApproximateSize = FreeSpaceStart - StartPhysical;
if (FreeSpaceStart + KernelApproximateSize > 0x00EFFFFF)
@ -169,7 +167,8 @@ u32 load(void *Multiboot, unsigned int Magic)
}
}
}
g_Context.KernelPhysicalEnd = paging_get_last_frame();
g_Context.ReservedPhysicalStart = (u32)&_start;
g_Context.ReservedPhysicalEnd = paging_get_last_frame();
s8 *LoaderName = "Cucumber x86-64 loader";
u8 i = 0;
@ -182,8 +181,6 @@ u32 load(void *Multiboot, unsigned int Magic)
g_Context.VGATextModeUsed = 1;
g_Context.MultibootUsed = 1;
g_Context.MultibootHeader = (u32)Multiboot;
g_Context.LoaderPhysicalStart = (u32)&_start;
g_Context.LoaderPhysicalEnd = (u32)&_end;
printf("Load context at 0x%x\n", (u64)&g_Context);