diff --git a/Kernel/include/Tier0/paging.h b/Kernel/include/Tier0/paging.h index d477c21..18d63e8 100644 --- a/Kernel/include/Tier0/paging.h +++ b/Kernel/include/Tier0/paging.h @@ -2,6 +2,7 @@ #define __PAGING_H__ #include "types.h" +#include "load_context.h" // Some helpful macros #define PAGING_GET_ML4_INDEX(x) (((u64)x >> 39) & 0x1FF) @@ -73,19 +74,19 @@ typedef struct { T_PAGING_ML4_ENTRY Entries[512]; // For use by the CPU } __attribute__((packed)) T_PAGING_ML4; -void paging_init_simple(u64 KernelPhysicalStart, u64 KernelPhysicalSize); -T_PAGING_ML4 * paging_get_ml4(void); -u8 paging_get_physical(u64 Virtual, u64 *Physical); -u8 paging_get_physical_ex(u64 Virtual, u64 *Physical,T_PAGING_ML4 *ML4); +T_PAGING_ML4 * paging_get_ml4(void); +void paging_set_ml4(u64 ML4Physical); +u8 paging_get_physical(u64 Virtual, u64 *Physical); +u8 paging_get_physical_ex(u64 Virtual, u64 *Physical,T_PAGING_ML4 *ML4); -void paging_use_ml4(T_PAGING_ML4 *ML4); -T_PAGING_ML4 *paging_get_kernel_ml4(void); -/*void paging_map_kernel_page(u64 Virtual, u64 Physical); -void paging_map_kernel_table(u64 Virtual, u64 Physical); -void paging_map_page(u64 Virtual, u64 Physical, T_PAGING_DIRECTORY *Directory, - u8 User, u8 RW); -void paging_use_directory(T_PAGING_DIRECTORY *Directory); -T_PAGING_DIRECTORY *paging_get_directory(void);*/ +// The temporary page is a page you can use to access some temporary physical +// location. There is only one page, 4096 bytes large. Deal with it. +void paging_temp_page_setup(T_LOAD_CONTEXT *LoadContext); +const inline u64 paging_temp_page_get_virtual(void) +{ + return 0xFF000000 + 511 * 0x1000; +} +void paging_temp_page_set_physical(u64 Physical); #endif diff --git a/Kernel/include/Tier0/physmem.h b/Kernel/include/Tier0/physmem.h index ee24bdb..9555c89 100644 --- a/Kernel/include/Tier0/physmem.h +++ b/Kernel/include/Tier0/physmem.h @@ -5,10 +5,13 @@ #define PHYSMEM_PAGE_SIZE 4096 +// Page frame allocation void physmem_init(u64 MemorySize); u64 physmem_allocate_page(void); - u64 physmem_page_to_physical(u64 Page); u64 physmem_physical_to_page(u64 Physical); +// Read physical data +void physmem_read(u64 Base, u64 Size, void *Destination); + #endif diff --git a/Kernel/src/Tier0/kmain.c b/Kernel/src/Tier0/kmain.c index 4541668..a40e671 100644 --- a/Kernel/src/Tier0/kmain.c +++ b/Kernel/src/Tier0/kmain.c @@ -1,6 +1,7 @@ #include "types.h" #include "load_context.h" #include "Tier0/kstdio.h" +#include "Tier0/kstdlib.h" //#include "Tier0/gdt.h" #include "Tier0/paging.h" //#include "Tier0/acpi.h" @@ -19,6 +20,9 @@ extern u64 _start; extern u64 _end; +u8 test[4096 * 2]; +u8 test2[4096 * 2]; + // Real kernel entry point, called from loader void kmain(u32 LoadContextAddress) { @@ -30,7 +34,7 @@ void kmain(u32 LoadContextAddress) else kclear(); - kprintf("\n _ \n" + kprintf(" _ \n" " ___ _ _ ___ _ _ _____| |_ ___ ___ \n" " | _| | | _| | | | . | -_| _|\n" " |___|___|___|___|_|_|_|___|___|_| \n\n"); @@ -48,21 +52,13 @@ void kmain(u32 LoadContextAddress) kprintf("[i] Loader physical: %x-%x.\n", LoadContext->LoaderPhysicalStart, LoadContext->LoaderPhysicalEnd); kprintf("[i] Kernel virtual: %x-%x.\n", &_start, &_end); - paging_init_simple(LoadContext->KernelPhysicalStart, LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart); - - //kprintf("physical: %x\n", PhysicalData); - //paging_get_physical_ex(0xf0000000, &test, paging_get_kernel_ml4()); - //kprintf("%x\n", test); + paging_temp_page_setup(LoadContext); + //gdt_create_flat(); for (;;) {} - /*gdt_create_flat(); - - physmem_init(); - system_parse_multiboot_header(MultibootHeader); - //Add kernel memory as reserved. - physmem_mark_as_used(0); + /*physmem_mark_as_used(0); physmem_mark_as_used(1); kprintf("[i] Booting via %s.\n", system_get_bootloader_name()); diff --git a/Kernel/src/Tier0/paging.c b/Kernel/src/Tier0/paging.c index a35e69e..6f12eb4 100644 --- a/Kernel/src/Tier0/paging.c +++ b/Kernel/src/Tier0/paging.c @@ -4,19 +4,9 @@ #include "Tier0/panic.h" #include "types.h" -// The basic structures for the first kernel thread... -// Here's an overview of how it will look like: -// -// ML4 -> DPT -> - DIR0 -> 512x Tab -> 512x512x Page -// - DIR1 -> 512x Tab -> 512x512x Page -// - DIR2 -> 512x Tab -> 512x512x Page -// - DIR3 -> 512x Tab -> 512x512x Page -// This lets us have more-or-less dynamic paging of the whole first 32-bit -// memory space... Since we can't have true dynamic allocation of paging -// structures (as he wave no kernel heap, as we have no paging), we'll have to -// settle for this. The size of this whole mess should be around 8mib or so. struct { - T_PAGING_ML4 *g_KernelML4; // This is allocated semi-dynamically + T_PAGING_TAB_ENTRY *TempPage; // For temp page mapping. + u64 TempPageVirtual; } __attribute__((packed)) g_KernelPaging; T_PAGING_ML4 *paging_get_ml4(void) @@ -26,37 +16,60 @@ T_PAGING_ML4 *paging_get_ml4(void) return (T_PAGING_ML4*)Address; } -T_PAGING_ML4 *paging_get_kernel_ml4(void) +void paging_temp_page_setup(T_LOAD_CONTEXT *LoadContext) { - //return &g_KernelPaging.ML4; - return 0; + // Try using page 511 (last) from kernel table + u64 PageVirtual = 0xFF000000 + 511 * 4096; + u64 MaxMapped = 4096 * 512; // first 2Mib by loader + + u64 KernelSize = LoadContext->KernelPhysicalEnd - LoadContext->KernelPhysicalStart; + if (KernelSize >= MaxMapped) + PANIC("Cannot set up temp page, kernel > 2Mib!"); + + T_PAGING_ML4 *ML4 = paging_get_ml4(); + kprintf("[i] Loader-provided ML4 @0x%x\n", ML4); + + if ((u64)ML4 > MaxMapped) + PANIC("Cannot set up temp page, ML4 not accessible!"); + + u64 aDPT = ML4->Entries[PAGING_GET_ML4_INDEX(PageVirtual)].Physical << 12; + + if (aDPT > MaxMapped) + PANIC("Cannot set up temp page, DPT not accessible!"); + T_PAGING_DPT *DPT = (T_PAGING_DPT *)aDPT; + + u64 aDir = DPT->Entries[PAGING_GET_DPT_INDEX(PageVirtual)].Physical << 12; + + if (aDir > MaxMapped) + PANIC("Cannot set up temp page, Dir not accessible!"); + T_PAGING_DIR *Dir = (T_PAGING_DIR *)aDir; + + u64 aTab = Dir->Entries[PAGING_GET_DIR_INDEX(PageVirtual)].Physical << 12; + + if (aTab > MaxMapped) + PANIC("Cannot set up temp page, Tab not accessible!"); + T_PAGING_TAB *Tab = (T_PAGING_TAB *)aTab; + + g_KernelPaging.TempPage = &Tab->Entries[511]; + + kprintf("[i] Using paging table entry @0x%x as temporary page.\n", g_KernelPaging.TempPage); + g_KernelPaging.TempPageVirtual = PageVirtual; +} + +void paging_temp_page_set_physical(u64 Physical) +{ + if ((Physical & 0xFFF) != 0) + PANIC("Tried to set temp page physical to unaligned address!"); + + // TODO: check if smaller than maxphyaddr + + g_KernelPaging.TempPage->Physical = Physical >> 12; + __asm__ volatile("invlpg %0" :: "m"(*(u32 *)g_KernelPaging.TempPageVirtual)); } u8 paging_get_physical_ex(u64 Virtual, u64 *Physical, T_PAGING_ML4 *ML4) { - /*u16 ML4Index = PAGING_GET_ML4_INDEX(Virtual); - u16 DPTIndex = PAGING_GET_DPT_INDEX(Virtual); - u16 DirIndex = PAGING_GET_DIR_INDEX(Virtual); - u16 TabIndex = PAGING_GET_TAB_INDEX(Virtual); - - - if (!ML4->Entries[ML4Index].Present) - return 1; - T_PAGING_DPT *DPT = ML4->Children[ML4Index]; - - if (!DPT->Entries[DPTIndex].Present) - return 1; - T_PAGING_DIR *Dir = DPT->Children[DPTIndex]; - - if (!Dir->Entries[DirIndex].Present) - return 1; - T_PAGING_TAB *Tab = Dir->Children[DirIndex]; - - if (!Tab->Entries[TabIndex].Present) - return 1; - - (*Physical) = (Tab->Entries[TabIndex].Physical << 12) + PAGING_GET_PAGE_OFFSET(Virtual);*/ - + PANIC("not implemented!"); return 0; } @@ -66,146 +79,7 @@ u8 paging_get_physical(u64 Virtual, u64 *Physical) return paging_get_physical_ex(Virtual, Physical, ml4); } -// This initializes the paging structure for the first kernel thread -void paging_init_simple(u64 KernelPhysicalStart, u64 KernelPhysicalSize) +void paging_set_ml4(u64 ML4Physical) { - + __asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4Physical)); } - -void paging_use_ml4(T_PAGING_ML4 *ML4) -{ - //__asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4->PhysicalAddress)); -} - -/*void paging_dump_directory(void) -{ - for (u32 i = 0; i < 10; i++) - { - kprintf("[i] Virtual 0x%X - 0x%X, Table 0x%X.\n", i * 4096 * 1024, \ - (i + 1) * 4096 * 1024, g_kernel_page_directory.Entries[i]); - } -} - -// Hey, Serge, or whoever will read this. -// -// Do NOT modify me to be used in user processes. I know it may be tempting to -// do so, but don't. I'm dead serious. -// -// This is strictly (!) kernel-only. This assumes that the tables are already -// created. If we were to create an empty set of tables, it would mean wasting -// 1MB of memory for each process - and that's a Bad Thing. However, we can -// permit ourselves to do this for the kernel. Heck, it's necessary - we need -// solid paging to make a solid heap which will enable us to create dynamic -// page tables for user processes. Woo. - -// This maps 4KB -void paging_map_kernel_page(u32 Virtual, u32 Physical) -{ - u16 DirectoryIndex = (Virtual >> 22) & 0x3FF; - - // Set directory entry to available - u32 *DirectoryEntry = &g_kernel_page_directory.Entries[DirectoryIndex]; - *DirectoryEntry |= 0x03; - - u16 TableIndex = (Virtual >> 12) & 0x3FF; - - T_PAGING_PAGE *Page = &g_kernel_page_tables[DirectoryIndex].Pages[TableIndex]; - - *((u32*)Page) = 0; - Page->Present = 1; - Page->RW = 1; - Page->Physical = (Physical & 0xFFFFF000) >> 12; - - // Flush the TLB - __asm__ volatile("invlpg %0" :: "m" (Virtual)); -} - - -void paging_map_page(u32 Virtual, u32 Physical, T_PAGING_DIRECTORY *Directory, - u8 User, u8 RW) -{ - u16 DirectoryIndex = (Virtual >> 22) & 0x3FF; - u32 Entry = Directory->Entries[DirectoryIndex]; - - u8 TablePresent = (Entry & 0x01) > 0; - - - if (!TablePresent) - { - u32 NewTablePhysical; - T_PAGING_TABLE *NewTable = (T_PAGING_TABLE*)kmalloc_p( - sizeof(T_PAGING_TABLE), 1, &NewTablePhysical); - - kmemsetw((void*)NewTable, 0, 1024); - - u32 *Entry = &Directory->Entries[DirectoryIndex]; - *Entry = 1; - *Entry |= (RW << 1); - *Entry |= (User << 2); - *Entry |= ((u32)NewTable); - - Directory->Tables[DirectoryIndex] = NewTable; - } - - T_PAGING_TABLE *Table = Directory->Tables[DirectoryIndex]; - - u16 TableIndex = (Virtual >> 12) & 0x3FF; - - T_PAGING_PAGE *Page = &Table->Pages[TableIndex]; - - *((u32*)Page) = 1; - Page->User = User; - Page->RW = RW; - Page->Physical = (Physical & 0xFFFFF000) >> 12; - - // Flush the TLB - __asm__ volatile("invlpg %0" :: "m" (Virtual)); -} - -// This maps 4MB -void paging_map_kernel_table(u32 Virtual, u32 Physical) -{ - for (u16 i = 0; i < 1024; i++) - paging_map_kernel_page(Virtual + i * 0x1000, Physical + i * 0x1000); -} - -void paging_use_directory(T_PAGING_DIRECTORY *Directory) -{ - __asm volatile ( "mov %0, %%eax\n" - "mov %%eax, %%cr3\n" - "mov %%cr0, %%eax\n" - "orl $0x80000000, %%eax\n" - "mov %%eax, %%cr0\n" :: "m" (Directory->PhysicalAddress)); -} - -T_PAGING_DIRECTORY *paging_get_directory(void) -{ - u32 Address; - __asm__ volatile("mov %%cr3, %%eax\n" - "mov %%eax, %0\n" : "=r"(Address)); - return (T_PAGING_DIRECTORY *)Address; -} - -void paging_init_simple(void) -{ - // Initialize the directory - for (u16 i = 0; i < 1024; i++) - { - g_kernel_page_directory.Entries[i] = (((u32)&g_kernel_page_tables[i]) - + 0x40000000); - g_kernel_page_directory.Tables[i] = &g_kernel_page_tables[i]; - } - - g_kernel_page_directory.PhysicalAddress = - (u32)g_kernel_page_directory.Entries + 0x40000000; - - // Initialize the kernel mappings (0..8MB and 3072..3080MB - paging_map_kernel_table(0x00000000, 0x00000000); - paging_map_kernel_table(0x00400000, 0x00400000); - - paging_map_kernel_table(0xC0000000, 0x00000000); - paging_map_kernel_table(0xC0400000, 0x00400000); - - paging_use_directory(&g_kernel_page_directory); -}*/ - diff --git a/Kernel/src/Tier0/physmem.c b/Kernel/src/Tier0/physmem.c index e246b8f..40130d7 100755 --- a/Kernel/src/Tier0/physmem.c +++ b/Kernel/src/Tier0/physmem.c @@ -23,6 +23,7 @@ #include "Tier0/system.h" #include "Tier0/kstdio.h" #include "Tier0/panic.h" +#include "Tier0/paging.h" // The amount of memory in the system, or the top usable pointer. u64 g_MemorySize; @@ -60,3 +61,24 @@ u64 physmem_physical_to_page(u64 Physical) { return Physical / PHYSMEM_PAGE_SIZE; } + +void physmem_read(u64 Base, u64 Size, void *Destination) +{ + u8 *DataSource = (u8 *)paging_temp_page_get_virtual(); + u64 OffsetInSource = Base & 0xFFF; + + u64 PreviousPageBase = Base & ~((u64)0xFFF); + paging_temp_page_set_physical(PreviousPageBase); + for (u64 i = 0; i < Size; i++) + { + u64 PageBase = (Base + i) & ~((u64)0xFFF); + + if (PageBase != PreviousPageBase) + paging_temp_page_set_physical(PageBase); + + PreviousPageBase = PageBase; + + *((u8 *)Destination + i) = DataSource[OffsetInSource % 4096]; + OffsetInSource++; + } +} diff --git a/Loader/src/load.c b/Loader/src/load.c index b7408aa..2c4fa26 100644 --- a/Loader/src/load.c +++ b/Loader/src/load.c @@ -306,7 +306,6 @@ u32 create_ia32e_paging(u64 KernelPhysicalStart, u64 KernelVirtualStart, u64 Ker page_tab_high[i] = Address | 3; Address += 0x1000; - if (i >= 512) break; }