New paging mechanisms working.

master
q3k 2013-09-02 10:43:50 +02:00
parent 4958d0b6f1
commit a2a6581994
4 changed files with 120 additions and 0 deletions

View File

@ -121,5 +121,6 @@ void system_msr_set(u32 MSR, u64 Data);
// kernel load address, size and mapping
u64 system_get_memory_top(void);
TELF *system_get_kernel_elf(void);
#endif

View File

@ -41,6 +41,35 @@ void *_early_alloc(void)
return (void*)Address;
}
u8 _paging_resolve(u64 Virtual, u64 *PhysicalOut)
{
u64 PML4I = PAGING_GET_PML4_INDEX(Virtual);
u64 PDPI = PAGING_GET_PDP_INDEX(Virtual);
u64 DIRI = PAGING_GET_DIR_INDEX(Virtual);
u64 TABI = PAGING_GET_TAB_INDEX(Virtual);
T_PAGING_ML4_ENTRY *ML4E = &g_KernelPaging.ML4->Entries[PML4I];
if (!ML4E->Present)
return -1;
T_PAGING_PDP *PDP = (T_PAGING_PDP *)(ML4E->Physical << 12);
T_PAGING_PDP_ENTRY *PDPE = &PDP->Entries[PDPI];
if (!PDPE->Present)
return -2;
T_PAGING_DIR *Dir = (T_PAGING_DIR *)(PDPE->Physical << 12);
T_PAGING_DIR_ENTRY *DIRE = &Dir->Entries[DIRI];
if (!DIRE->Present)
return -3;
T_PAGING_TAB *Tab = (T_PAGING_TAB *)(DIRE->Physical << 12);
T_PAGING_TAB_ENTRY *TABE = &Tab->Entries[TABI];
if (!TABE->Present)
return -4;
if (PhysicalOut)
*PhysicalOut = TABE->Physical << 12;
return 0;
}
// AccessBits is reserved for future use
void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits)
{
@ -59,6 +88,7 @@ void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits)
PDP = (T_PAGING_PDP *)_early_alloc();
_zero_paging_structure(PDP);
ML4E->Present = 1;
ML4E->RW = 1;
ML4E->Physical = (u64)PDP >> 12;
}
@ -69,6 +99,7 @@ void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits)
Dir = (T_PAGING_DIR *)_early_alloc();
_zero_paging_structure(Dir);
PDPE->Present = 1;
PDPE->RW = 1;
PDPE->Physical = (u64)Dir >> 12;
}
@ -80,12 +111,14 @@ void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits)
Tab = (T_PAGING_TAB *)_early_alloc();
_zero_paging_structure(Tab);
DIRE->Present = 1;
DIRE->RW = 1;
DIRE->Physical = (u64)Tab >> 12;
}
T_PAGING_TAB_ENTRY *TABE = &Tab->Entries[TABI];
TABE->Physical = Physical >> 12;
TABE->Present = 1;
TABE->RW = 1;
}
void paging_map_area(u64 PhysicalStart, u64 VirtualStart, u64 Size, void *AccessBits)
@ -103,8 +136,87 @@ void paging_map_area(u64 PhysicalStart, u64 VirtualStart, u64 Size, void *Access
void paging_kernel_init(void)
{
g_KernelPaging.ML4 = _early_alloc();
kprintf("[i] Setting up new paging structures...\n");
// Identity map all the BIOS EMM (extended memory). This covers a lot of
// classic PC I/O mapped stuff (eg. video RAM) and probably our kernel and
// loader artifacts.
paging_map_area(0x0, 0x0, 0x00EFFFFF, 0);
// Copy all the necessary ELF sections of our kernel image
// However, we need to copy them from the currently running kernel,
// as we have some important .data and .bss contents we need to keep.
// This results in the fact, that we need to do it in two passes:
// The first uses all the necessary system functions to allocate
// the memory for the new kernel memory. After this pass ends, we must
// ensure that there will be no .data or .bss modifications.
// The second pass copies the data.
// First pass:
kprintf("[i] Setting up paging for kernel ELF:\n");
TELF *Elf = system_get_kernel_elf();
for (u64 Section = 0; Section < Elf->SectionCount; Section++)
{
u64 Virtual = elf_section_get_virtual_address(Elf, Section);
// non-loadable sections have a zero VA
if (Virtual)
{
u64 Size = elf_section_get_size(Elf, Section);
s8 *Name = elf_section_get_name(Elf, Section);
kprintf(" - '%s': %x (%i)\n", Name, Virtual, Size);
ASSERT(!(Virtual % 0x1000));
u64 NumPages = Size / 0x1000;
if (Size % 0x1000)
NumPages++;
for (u64 i = 0; i < NumPages; i++)
{
// Allocate the memory...
u64 TargetPhysical = physmem_allocate_physical();
// ..and map the page.
paging_map_page(Virtual + (i * 0x1000), TargetPhysical, 0);
}
}
}
// Second pass:
for (u64 Section = 0; Section < Elf->SectionCount; Section++)
{
u64 Virtual = elf_section_get_virtual_address(Elf, Section);
if (Virtual)
{
u64 Size = elf_section_get_size(Elf, Section);
u64 NumPages = Size / 0x1000;
if (Size % 0x1000)
NumPages++;
for (u64 i = 0; i < NumPages; i++)
{
// Get the physical address of the page we copy data to.
u64 TargetPhysical;
u64 Source = Virtual + (i * 0x1000);
ASSERT(_paging_resolve(Source, &TargetPhysical) == 0);
// This ensures that the last page, which might not be
// fully occupied, is copied properly
u64 BytesToCopy = (Size - (i * 0x1000));
if (BytesToCopy > 0x1000)
BytesToCopy = 0x1000;
// Copy the data.
kmemcpy((u8 *)TargetPhysical, (u8 *)Source, BytesToCopy);
// Stupid check...
ASSERT(((u64 *)Source)[0] == ((u64 *)TargetPhysical)[0]);
}
}
}
// Immediately apply new paging structures.
__asm__ volatile ("movq %%rax, %%cr3" :: "a" (g_KernelPaging.ML4));
// TODO: release loader-provided paging in order to conserver memory.
}

View File

@ -61,6 +61,8 @@ void physmem_init(void)
if (PHYSMEM_ADDRESS_TO_METADATA_NUMBER(MetadataFrame) > 0)
PANIC("Physmem: First allocated address > metadata covering!");
g_PhysicalMemory.MemoryFree = g_PhysicalMemory.MemorySize - MetadataFrame;
// Let's make sure that frame is mapped into our memory...
if (MetadataFrame >= 0xEFFFFF)
PANIC("Physmem: first allocated address > memory mapped by loader!");

View File

@ -225,4 +225,9 @@ void system_msr_set(u32 MSR, u64 Data)
u64 system_get_memory_top(void)
{
return g_SystemInfo.MemoryTop;
}
TELF *system_get_kernel_elf(void)
{
return &g_SystemInfo.KernelELF;
}