Refactored paging and system code to reflect unified kernel size info.
parent
19c5812c71
commit
d0200d140f
|
@ -84,7 +84,7 @@ void paging_kernel_initialize(u64 KernelVirtualStart, u64 KernelPhysicalStart, u
|
|||
|
||||
// The temporary page is a page you can use to access some temporary physical
|
||||
// location. There is only one page, 4096 bytes large. Deal with it.
|
||||
void paging_temp_page_setup(T_LOAD_CONTEXT *LoadContext);
|
||||
void paging_temp_page_setup(void);
|
||||
const inline u64 paging_temp_page_get_virtual(void)
|
||||
{
|
||||
return 0xFFFFFFFF80000000 + 511 * 0x1000;
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include "load_context.h"
|
||||
|
||||
#define SYSTEM_KERNEL_VIRTUAL 0xFFFFFFFF80000000
|
||||
|
||||
struct S_SYSTEM_MLTBT_MMAP {
|
||||
u32 Size;
|
||||
u64 Base;
|
||||
|
@ -102,7 +104,12 @@ typedef struct {
|
|||
|
||||
// Just a guess...
|
||||
T_SYSTEM_INVALID_RAM InvalidMemoryAreas[256];
|
||||
u8 NumInvalidAreas;
|
||||
u8 NumInvalidAreas;
|
||||
|
||||
// kernel code size and location
|
||||
u64 KernelSize;
|
||||
u64 KernelPhysicalStart;
|
||||
u64 KernelVirtualStart;
|
||||
} T_SYSTEM_INFO;
|
||||
|
||||
u64 system_cpuid(u32 Code);
|
||||
|
@ -112,4 +119,9 @@ u8 system_msr_available(void);
|
|||
u64 system_msr_get(u32 MSR);
|
||||
void system_msr_set(u32 MSR, u64 Data);
|
||||
|
||||
// kernel load address, size and mapping
|
||||
u64 system_get_kernel_size(void);
|
||||
u64 system_get_kernel_physical_start(void);
|
||||
u64 system_get_kernel_virtual_start(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -65,9 +65,8 @@ void kmain(u32 LoadContextAddress)
|
|||
kprintf("[i] Loader physical: %x-%x.\n", LoadContext->LoaderPhysicalStart, LoadContext->LoaderPhysicalEnd);
|
||||
kprintf("[i] Kernel virtual: %x-%x.\n", &_start, &_end);
|
||||
|
||||
paging_kernel_initialize((u64)&_start, LoadContext->KernelPhysicalStart, LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart);
|
||||
paging_temp_page_setup(LoadContext);
|
||||
paging_minivmm_setup((u64)&_end, 0xFFFFFFFF80000000 + 511 * 0x1000);
|
||||
paging_temp_page_setup();
|
||||
paging_minivmm_setup((u64)&_end, SYSTEM_KERNEL_VIRTUAL + 511 * 0x1000);
|
||||
// Let's create a new kernel stack
|
||||
u64 StackVirtual = paging_minivmm_allocate();
|
||||
kprintf("[i] New kernel stack 0x%x\n", StackVirtual);
|
||||
|
|
|
@ -2,15 +2,12 @@
|
|||
#include "Tier0/kstdio.h"
|
||||
#include "Tier0/kstdlib.h"
|
||||
#include "Tier0/panic.h"
|
||||
#include "Tier0/system.h"
|
||||
#include "types.h"
|
||||
|
||||
struct {
|
||||
T_PAGING_TAB_ENTRY *TempPage; // For temp page mapping.
|
||||
u64 TempPageVirtual;
|
||||
|
||||
u64 KernelVirtualStart;
|
||||
u64 KernelPhysicalStart;
|
||||
u64 KernelSize;
|
||||
} g_KernelPaging;
|
||||
|
||||
struct {
|
||||
|
@ -27,13 +24,15 @@ T_PAGING_ML4 *paging_get_ml4(void)
|
|||
return (T_PAGING_ML4*)Address;
|
||||
}
|
||||
|
||||
void paging_temp_page_setup(T_LOAD_CONTEXT *LoadContext)
|
||||
void paging_temp_page_setup(void)
|
||||
{
|
||||
u64 KernelVirtualStart = system_get_kernel_virtual_start();
|
||||
u64 KernelSize = system_get_kernel_size();
|
||||
|
||||
// Try using page 511 (last) from kernel table
|
||||
u64 PageVirtual = 0xFFFFFFFF80000000 + 511 * 4096;
|
||||
u64 PageVirtual = KernelVirtualStart + 511 * 4096;
|
||||
u64 MaxMapped = 4096 * 512; // first 2Mib by loader
|
||||
|
||||
u64 KernelSize = LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart;
|
||||
if (KernelSize >= MaxMapped)
|
||||
PANIC("Cannot set up temp page, kernel > 2Mib!");
|
||||
|
||||
|
@ -78,13 +77,6 @@ void paging_temp_page_set_physical(u64 Physical)
|
|||
__asm__ volatile("invlpg %0" :: "m"(*(u64 *)g_KernelPaging.TempPageVirtual));
|
||||
}
|
||||
|
||||
void paging_kernel_initialize(u64 KernelVirtualStart, u64 KernelPhysicalStart, u64 KernelSize)
|
||||
{
|
||||
g_KernelPaging.KernelVirtualStart = KernelVirtualStart;
|
||||
g_KernelPaging.KernelPhysicalStart = KernelPhysicalStart;
|
||||
g_KernelPaging.KernelSize = KernelSize;
|
||||
}
|
||||
|
||||
/*u8 paging_get_physical_ex(u64 Virtual, u64 *Physical, T_PAGING_ML4 *ML4)
|
||||
{
|
||||
if (Virtual < g_KernelPaging.KernelVirtualStart || Virtual > g_KernelPaging.KernelVirtualStart + g_KernelPaging.KernelSize)
|
||||
|
|
|
@ -51,6 +51,11 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext)
|
|||
g_SystemInfo.MemoryLower = (u64)((u8*)Header)[4];
|
||||
g_SystemInfo.MemoryUpper = (u64)((u8*)Header)[8];
|
||||
}
|
||||
|
||||
// Kernel location in memory
|
||||
g_SystemInfo.KernelPhysicalStart = LoadContext->KernelPhysicalStart;
|
||||
g_SystemInfo.KernelSize = LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart;
|
||||
g_SystemInfo.KernelVirtualStart = SYSTEM_KERNEL_VIRTUAL;
|
||||
|
||||
// Bootloader name from Multiboot header
|
||||
if ((Flags >> 9) & 1)
|
||||
|
@ -202,3 +207,18 @@ void system_msr_set(u32 MSR, u64 Data)
|
|||
{
|
||||
__asm__ volatile("wrmsr" :: "a"((u32)(Data & 0xFFFFFFFF)), "d"((u32)(Data >> 32)), "c"(MSR));
|
||||
}
|
||||
|
||||
u64 system_get_kernel_size(void)
|
||||
{
|
||||
return g_SystemInfo.KernelSize;
|
||||
}
|
||||
|
||||
u64 system_get_kernel_physical_start(void)
|
||||
{
|
||||
return g_SystemInfo.KernelPhysicalStart;
|
||||
}
|
||||
|
||||
u64 system_get_kernel_virtual_start(void)
|
||||
{
|
||||
return g_SystemInfo.KernelVirtualStart;
|
||||
}
|
Loading…
Reference in New Issue