From 4958d0b6f1bd2e186db94b8e7802d37ecaf24f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergiusz=20=27q3k=27=20Baza=C5=84ski?= Date: Sun, 1 Sep 2013 23:06:00 +0200 Subject: [PATCH] First steps of new VMM. --- Kernel/include/Tier0/elf.h | 65 +++++++++++++++++++++++ Kernel/include/Tier0/paging.h | 20 ++++--- Kernel/include/Tier0/physmem.h | 4 ++ Kernel/include/Tier0/system.h | 6 +-- Kernel/include/load_context.h | 1 + Kernel/src/Tier0/cpp.c | 3 +- Kernel/src/Tier0/elf.c | 57 ++++++++++++++++++++ Kernel/src/Tier0/kmain.c | 30 ++++++----- Kernel/src/Tier0/paging.c | 96 ++++++++++++++++++++++++++++++++-- Kernel/src/Tier0/physmem.c | 6 ++- Kernel/src/Tier0/system.c | 37 +++++++++---- Kernel/src/Tier1/CKernel.cpp | 2 +- Kernel/src/Tier1/new.cpp | 5 ++ Loader/src/context.h | 1 + Loader/src/load.c | 2 + 15 files changed, 293 insertions(+), 42 deletions(-) create mode 100644 Kernel/include/Tier0/elf.h create mode 100644 Kernel/src/Tier0/elf.c diff --git a/Kernel/include/Tier0/elf.h b/Kernel/include/Tier0/elf.h new file mode 100644 index 0000000..2a6564e --- /dev/null +++ b/Kernel/include/Tier0/elf.h @@ -0,0 +1,65 @@ +#ifndef __ELF_H__ +#define __ELF_H__ + +#include "types.h" + +#define ELF_IDENT_MAGIC 0x464c457f // \x7FELF +typedef struct elf_ident { + u32 Magic; // \x7FELF + u8 Class; + u8 Data; + u8 Version; + u8 Padding[9]; +} __attribute__((packed)) _elf_ident; + +typedef struct { + _elf_ident Identification; + u16 Type; + u16 Machine; + u32 Version; + u64 Entry; + u64 ProgramHeaderOffset; + u64 SectionHeaderOffset; + u32 Flags; + u16 HeaderSize; + u16 ProgramHeaderEntrySize; + u16 NumProgramHeaderEntries; + u16 SectionHeaderEntrySize; + u16 NumSectionHeaderEntries; + u16 SectionEntryStrings; +} __attribute__((packed)) TELFHeader; + +#define SHT_PROGBITS 1 +#define SHT_NOBITS 8 + +typedef struct { + u32 Name; + u32 Type; + u64 Flags; + u64 Address; + u64 Offset; + u64 Size; + u32 Link; + u32 Info; + u64 Alignment; + u64 FixedSize; +} __attribute__((packed)) TELFSectionHeader; + +typedef struct { + void *Address; + u64 Size; + TELFHeader *Header; + + u64 SectionCount; + TELFSectionHeader *Sections; +} TELF; + +u8 elf_open(TELF *elf, void *Address, u64 Size); +s8* elf_section_get_name(TELF *elf, u64 Section); +u64 elf_section_get_physical_address(TELF *elf, u64 Section); +u64 elf_section_get_virtual_address(TELF *elf, u64 Section); +u64 elf_section_get_size(TELF *elf, u64 Section); +// Returns whether this section has actual bits to load (is data or text) +u64 elf_section_has_bits(TELF *elf, u64 SectionID); + +#endif \ No newline at end of file diff --git a/Kernel/include/Tier0/paging.h b/Kernel/include/Tier0/paging.h index 05a6ac4..9b8492a 100644 --- a/Kernel/include/Tier0/paging.h +++ b/Kernel/include/Tier0/paging.h @@ -5,8 +5,8 @@ #include "load_context.h" // Some helpful macros -#define PAGING_GET_ML4_INDEX(x) (((u64)x >> 39) & 0x1FF) -#define PAGING_GET_DPT_INDEX(x) (((u64)x >> 30) & 0x1FF) +#define PAGING_GET_PML4_INDEX(x) (((u64)x >> 39) & 0x1FF) +#define PAGING_GET_PDP_INDEX(x) (((u64)x >> 30) & 0x1FF) #define PAGING_GET_DIR_INDEX(x) (((u64)x >> 21) & 0x1FF) #define PAGING_GET_TAB_INDEX(x) (((u64)x >> 12) & 0x1FF) #define PAGING_GET_PAGE_OFFSET(x) (x & 0xFFF) @@ -34,7 +34,7 @@ struct S_PAGING_DIR_ENTRY { } __attribute__((packed)); typedef struct S_PAGING_DIR_ENTRY T_PAGING_DIR_ENTRY; -struct S_PAGING_DPT_ENTRY { +struct S_PAGING_PDP_ENTRY { u8 Present : 1; u8 RW : 1; u8 User : 1; @@ -43,7 +43,7 @@ struct S_PAGING_DPT_ENTRY { u64 Physical : 40; // The physical address is limited by MAXPHYADDR u64 Zero : 12; } __attribute__((packed)); -typedef struct S_PAGING_DPT_ENTRY T_PAGING_DPT_ENTRY; +typedef struct S_PAGING_PDP_ENTRY T_PAGING_PDP_ENTRY; struct S_PAGING_ML4_ENTRY { u8 Present : 1; @@ -67,14 +67,22 @@ typedef struct { } __attribute__((packed)) T_PAGING_DIR; typedef struct { - T_PAGING_DPT_ENTRY Entries[512]; // For use by the CPU -} __attribute__((packed)) T_PAGING_DPT; + T_PAGING_PDP_ENTRY Entries[512]; // For use by the CPU +} __attribute__((packed)) T_PAGING_PDP; typedef struct { T_PAGING_ML4_ENTRY Entries[512]; // For use by the CPU } __attribute__((packed)) T_PAGING_ML4; +// Generic funcitons T_PAGING_ML4 * paging_get_ml4(void); void paging_set_ml4(u64 ML4Physical); +// Management of kernel paging structures +void paging_kernel_init(void); +// Map a 4k page from Physical to Virtual. AccessBits is undefined right now. +void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits); +// Map an arbitrary Size range from Physical to Virtual. Must still be kinda aligned. +void paging_map_area(u64 PhysicalStart, u64 VirtualStart, u64 Size, void *AccessBits); + #endif diff --git a/Kernel/include/Tier0/physmem.h b/Kernel/include/Tier0/physmem.h index 7230034..11b9507 100644 --- a/Kernel/include/Tier0/physmem.h +++ b/Kernel/include/Tier0/physmem.h @@ -7,7 +7,11 @@ // Page frame allocation void physmem_init(void); +// This retrns a frame number, not an address! u64 physmem_allocate_page(void); +// But this returns an address: +u64 physmem_allocate_physical(void); + u64 physmem_page_to_physical(u64 Page); u64 physmem_physical_to_page(u64 Physical); diff --git a/Kernel/include/Tier0/system.h b/Kernel/include/Tier0/system.h index 7f160e7..54f8834 100644 --- a/Kernel/include/Tier0/system.h +++ b/Kernel/include/Tier0/system.h @@ -2,6 +2,7 @@ #define __SYSTEM_H__ #include "load_context.h" +#include "Tier0/elf.h" #define SYSTEM_KERNEL_VIRTUAL 0xFFFFFFFF80000000 @@ -108,10 +109,7 @@ typedef struct { T_SYSTEM_INVALID_RAM InvalidMemoryAreas[256]; u8 NumInvalidAreas; - // kernel code size and location - u64 KernelSize; - u64 KernelPhysicalStart; - u64 KernelVirtualStart; + TELF KernelELF; } T_SYSTEM_INFO; u64 system_cpuid(u32 Code); diff --git a/Kernel/include/load_context.h b/Kernel/include/load_context.h index 9855ffc..8524de8 100755 --- a/Kernel/include/load_context.h +++ b/Kernel/include/load_context.h @@ -25,6 +25,7 @@ struct S_LOAD_CONTEXT { // Kernel ELF void *KernelELF; + u64 KernelELFSize; } __attribute__((packed)); typedef struct S_LOAD_CONTEXT T_LOAD_CONTEXT; diff --git a/Kernel/src/Tier0/cpp.c b/Kernel/src/Tier0/cpp.c index 0fac052..d2d3fce 100644 --- a/Kernel/src/Tier0/cpp.c +++ b/Kernel/src/Tier0/cpp.c @@ -13,7 +13,8 @@ void cpp_call_ctors(void) kprintf("[i] Calling %i constructors before jumping to Tier1..\n", Number); for(u64 *C = (u64*)&_start_ctors; C < (u64*)&_end_ctors; ++C) { - ((void (*) (void)) (*C)) (); + kprintf("should've called %x\n", *C); + // ((void (*) (void)) (*C)) (); } } diff --git a/Kernel/src/Tier0/elf.c b/Kernel/src/Tier0/elf.c new file mode 100644 index 0000000..447eb98 --- /dev/null +++ b/Kernel/src/Tier0/elf.c @@ -0,0 +1,57 @@ +#include "Tier0/elf.h" +#include "Tier0/kstdio.h" + +u8 elf_open(TELF *elf, void *Address, u64 Size) +{ + elf->Address = Address; + elf->Size = Size; + + TELFHeader *Header = (TELFHeader *)Address; + if (Header->Identification.Magic != ELF_IDENT_MAGIC) + { + kprintf("ELF: bad magic (0x%x)\n", Header->Identification.Magic); + return 1; + } + elf->Header = Header; + elf->Sections = (TELFSectionHeader *)((u64)Header + (u64)Header->SectionHeaderOffset); + elf->SectionCount = Header->NumSectionHeaderEntries; + + if (Header->SectionHeaderOffset + elf->SectionCount * sizeof(TELFSectionHeader) > elf->Size) + { + kprintf("ELF: invalid section count/offset.\n"); + return 1; + } + return 0; +} + +s8* elf_section_get_name(TELF *elf, u64 SectionID) +{ + u64 StringSectionID = elf->Header->SectionEntryStrings; + TELFSectionHeader* StringSection = &elf->Sections[StringSectionID]; + TELFSectionHeader* RequestedSection = &elf->Sections[SectionID]; + return (s8*)elf->Header + StringSection->Offset + RequestedSection->Name; +} + +u64 elf_section_get_physical_address(TELF *elf, u64 SectionID) +{ + TELFSectionHeader* RequestedSection = &elf->Sections[SectionID]; + return (u64)elf->Header + RequestedSection->Offset; +} + +u64 elf_section_get_virtual_address(TELF *elf, u64 SectionID) +{ + TELFSectionHeader* RequestedSection = &elf->Sections[SectionID]; + return (u64)RequestedSection->Address; +} + +u64 elf_section_get_size(TELF *elf, u64 SectionID) +{ + TELFSectionHeader* RequestedSection = &elf->Sections[SectionID]; + return (u64)RequestedSection->Size; +} + +u64 elf_section_has_bits(TELF *elf, u64 SectionID) +{ + TELFSectionHeader* RequestedSection = &elf->Sections[SectionID]; + return !(RequestedSection->Type & SHT_NOBITS); +} \ No newline at end of file diff --git a/Kernel/src/Tier0/kmain.c b/Kernel/src/Tier0/kmain.c index 07ba62a..b970911 100644 --- a/Kernel/src/Tier0/kmain.c +++ b/Kernel/src/Tier0/kmain.c @@ -55,10 +55,16 @@ void kmain(u32 LoadContextAddress) if (!CPUID_HAS(APIC)) PANIC("CPU doesn't support APIC!"); + interrupts_init_simple(); + exceptions_init_simple(); + system_parse_load_context(LoadContext); kprintf("[i] Booting via %s.\n", LoadContext->LoaderName); kprintf("[i] Memory available: %uk.\n", system_get_memory_upper()); physmem_init(); + paging_kernel_init(); + for(;;){} + // // Let's create a new kernel stack // u64 StackVirtual = (u64)paging_scratch_allocate(); @@ -80,20 +86,18 @@ void kmain(u32 LoadContextAddress) if (RSDPAddress == 0) PANIC("ACPI not supported! What is this, 1999?"); - //smp_initialize(); - interrupts_init_simple(); - exceptions_init_simple(); - apic_enable_lapic(); + smp_initialize(); + // apic_enable_lapic(); heap_init_simple(); - // enable FPU/SSE... - __asm__ volatile( - "movq %cr0, %rax;" - "and $0xfffb, %ax;" - "or $0x2, %rax;" - "movq %rax, %cr0;" - "movq %cr4, %rax;" - "orq $0x600, %rax;" - "movq %rax, %cr4;"); + // // enable FPU/SSE... + // __asm__ volatile( + // "movq %cr0, %rax;" + // "and $0xfffb, %ax;" + // "or $0x2, %rax;" + // "movq %rax, %cr0;" + // "movq %cr4, %rax;" + // "orq $0x600, %rax;" + // "movq %rax, %cr4;"); cpp_call_ctors(); cpp_start_ckernel(); diff --git a/Kernel/src/Tier0/paging.c b/Kernel/src/Tier0/paging.c index c333636..4eeb238 100644 --- a/Kernel/src/Tier0/paging.c +++ b/Kernel/src/Tier0/paging.c @@ -4,13 +4,9 @@ #include "Tier0/panic.h" #include "Tier0/system.h" #include "Tier0/physmem.h" +#include "Tier0/elf.h" #include "types.h" -struct { - T_PAGING_TAB_ENTRY *TempPage; // For temp page mapping. - u64 TempPageVirtual; -} g_KernelPaging; - T_PAGING_ML4 *paging_get_ml4(void) { u64 Address; @@ -21,4 +17,94 @@ T_PAGING_ML4 *paging_get_ml4(void) void paging_set_ml4(u64 ML4Physical) { __asm volatile ( "mov %%rax, %%cr3\n" :: "a" (ML4Physical)); +} + +void _zero_paging_structure(void *Structure) +{ + for (unsigned i = 0; i < 512; i++) + ((u64 *)Structure)[i] = 0; +} + + +struct { + T_PAGING_ML4 *ML4; +} g_KernelPaging; + +// allocate a page frame and make sure that it is accessible before +// our main paging is running - check whether it fits the extended +// memory area (up to 0xEFFFFF), as this is the area the bootloader +// identity mapped up to from 0x0. +void *_early_alloc(void) +{ + u64 Address = physmem_allocate_physical(); + ASSERT(Address < 0x00EFFFFF - 0x1000); + return (void*)Address; +} + +// AccessBits is reserved for future use +void paging_map_page(u64 Virtual, u64 Physical, void *AccessBits) +{ + if (Virtual % 0x1000 || Physical % 0x1000) + PANIC("BUG: Requsted allocation of unaligned address.\n"); + + u64 PML4I = PAGING_GET_PML4_INDEX(Virtual); + u64 PDPI = PAGING_GET_PDP_INDEX(Virtual); + u64 DIRI = PAGING_GET_DIR_INDEX(Virtual); + u64 TABI = PAGING_GET_TAB_INDEX(Virtual); + + T_PAGING_ML4_ENTRY *ML4E = &g_KernelPaging.ML4->Entries[PML4I]; + T_PAGING_PDP *PDP = (T_PAGING_PDP *)(ML4E->Physical << 12); + if (!ML4E->Present) + { + PDP = (T_PAGING_PDP *)_early_alloc(); + _zero_paging_structure(PDP); + ML4E->Present = 1; + ML4E->Physical = (u64)PDP >> 12; + } + + T_PAGING_PDP_ENTRY *PDPE = &PDP->Entries[PDPI]; + T_PAGING_DIR *Dir = (T_PAGING_DIR *)(PDPE->Physical << 12); + if (!PDPE->Present) + { + Dir = (T_PAGING_DIR *)_early_alloc(); + _zero_paging_structure(Dir); + PDPE->Present = 1; + PDPE->Physical = (u64)Dir >> 12; + } + + + T_PAGING_DIR_ENTRY *DIRE = &Dir->Entries[DIRI]; + T_PAGING_TAB *Tab = (T_PAGING_TAB *)(DIRE->Physical << 12); + if (!DIRE->Present) + { + Tab = (T_PAGING_TAB *)_early_alloc(); + _zero_paging_structure(Tab); + DIRE->Present = 1; + DIRE->Physical = (u64)Tab >> 12; + } + + T_PAGING_TAB_ENTRY *TABE = &Tab->Entries[TABI]; + TABE->Physical = Physical >> 12; + TABE->Present = 1; +} + +void paging_map_area(u64 PhysicalStart, u64 VirtualStart, u64 Size, void *AccessBits) +{ + if (VirtualStart % 0x1000 || PhysicalStart % 0x1000) + PANIC("BUG: Requsted allocation of unaligned address.\n"); + + u64 AlignedSize = Size; + if (AlignedSize % 0x1000) + AlignedSize = (AlignedSize + 0x1000) & 0xFFFFF000; + for (u64 i = 0; i < AlignedSize; i += 0x1000) + paging_map_page(VirtualStart + i, PhysicalStart + i, AccessBits); +} + +void paging_kernel_init(void) +{ + g_KernelPaging.ML4 = _early_alloc(); + // Identity map all the BIOS EMM (extended memory). This covers a lot of + // classic PC I/O mapped stuff (eg. video RAM) and probably our kernel and + // loader artifacts. + paging_map_area(0x0, 0x0, 0x00EFFFFF, 0); } \ No newline at end of file diff --git a/Kernel/src/Tier0/physmem.c b/Kernel/src/Tier0/physmem.c index a9e762f..822c0f2 100755 --- a/Kernel/src/Tier0/physmem.c +++ b/Kernel/src/Tier0/physmem.c @@ -47,7 +47,6 @@ u64 __physmem_allocate_first_page(void) if (NextPageStart > g_PhysicalMemory.MemorySize) PANIC("Out of memory!"); } - kprintf("%x\n", NextPageStart); return NextPageStart; } @@ -116,6 +115,11 @@ u64 physmem_allocate_page(void) return 0; } +u64 physmem_allocate_physical(void) +{ + return (physmem_allocate_page() * PHYSMEM_PAGE_SIZE); +} + void physmem_free_page(u64 Page) { if (Page > PHYSMEM_METADATA_COVERS_BITS) diff --git a/Kernel/src/Tier0/system.c b/Kernel/src/Tier0/system.c index c53fc64..877f594 100644 --- a/Kernel/src/Tier0/system.c +++ b/Kernel/src/Tier0/system.c @@ -111,7 +111,7 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext) BIOSArea->Size = 1024 *1024; g_SystemInfo.NumInvalidAreas++; - // Mark the loader physical location ad unavailable. + // Mark the loader physical location as unavailable. T_SYSTEM_INVALID_RAM *LoaderArea = &g_SystemInfo.InvalidMemoryAreas[g_SystemInfo.NumInvalidAreas]; LoaderArea->Base = LoadContext->ReservedPhysicalStart; LoaderArea->Size = LoadContext->ReservedPhysicalEnd - LoadContext->ReservedPhysicalStart; @@ -128,6 +128,31 @@ void system_parse_load_context(T_LOAD_CONTEXT *LoadContext) LAPICArea->Base = 0xFEE00000; LAPICArea->Size = 0xFEEFFFFF - 0xFEE00000; g_SystemInfo.NumInvalidAreas++; + + // Parse the kernel ELF + TELF *ELF = &g_SystemInfo.KernelELF; + if (elf_open(ELF, LoadContext->KernelELF, LoadContext->KernelELFSize)) + PANIC("Error parsing kernel ELF file.\n"); + else + { + kprintf("[i] Kernel has the following sections:\n"); + for (u32 i = 0; i < ELF->SectionCount; i++) + { + u64 Virtual = elf_section_get_virtual_address(ELF, i); + u64 HasBits = elf_section_has_bits(ELF, i); + if (Virtual && HasBits) + kprintf(" - %s (virt: 0x%X, phys: 0x%x, %i bytes)\n", + elf_section_get_name(ELF, i), + Virtual, + elf_section_get_physical_address(ELF, i), + elf_section_get_size(ELF, i)); + else if (Virtual) + kprintf(" - %s (virt: 0x%X, (zero'd bits), %i bytes)\n", + elf_section_get_name(ELF, i), + Virtual, + elf_section_get_size(ELF, i)); + } + } kprintf("[i] Highest unavailable address is %x.\n", HighestUnavailable); g_SystemInfo.MemoryTop = HighestUnavailable; @@ -197,16 +222,6 @@ void system_msr_set(u32 MSR, u64 Data) __asm__ volatile("wrmsr" :: "a"((u32)(Data & 0xFFFFFFFF)), "d"((u32)(Data >> 32)), "c"(MSR)); } -u64 system_get_kernel_size(void) -{ - return g_SystemInfo.KernelSize; -} - -u64 system_get_kernel_virtual_start(void) -{ - return g_SystemInfo.KernelVirtualStart; -} - u64 system_get_memory_top(void) { return g_SystemInfo.MemoryTop; diff --git a/Kernel/src/Tier1/CKernel.cpp b/Kernel/src/Tier1/CKernel.cpp index 3e339cc..89483fd 100644 --- a/Kernel/src/Tier1/CKernel.cpp +++ b/Kernel/src/Tier1/CKernel.cpp @@ -36,7 +36,7 @@ CLogger &CKernel::Logger(void) void CKernel::Start(void) { kprintf("[i] Hello from C++ land!\n"); - + for (;;) {} if (m_dwMagic != CKERNEL_MAGIC) { kprintf("[e] Error! My constructor wasn't called properly.\n"); diff --git a/Kernel/src/Tier1/new.cpp b/Kernel/src/Tier1/new.cpp index 50547a5..d412666 100644 --- a/Kernel/src/Tier1/new.cpp +++ b/Kernel/src/Tier1/new.cpp @@ -1,26 +1,31 @@ #include "types.h" extern "C" { #include "Tier0/heap.h" + #include "Tier0/panic.h" }; typedef long unsigned int size_t; void *operator new(size_t size) { + PANIC("nonew4u"); return kmalloc(size); } void *operator new[](size_t size) { + PANIC("nonew4u"); return kmalloc(size); } void operator delete(void *p) { + PANIC("nodel4u"); kfree(p); } void operator delete[](void *p) { + PANIC("nodel4u"); kfree(p); } diff --git a/Loader/src/context.h b/Loader/src/context.h index e869db0..ae59885 100644 --- a/Loader/src/context.h +++ b/Loader/src/context.h @@ -19,6 +19,7 @@ struct S_LOAD_CONTEXT { // Kernel ELF u64 KernelELF; + u64 KernelELFSize; } __attribute__((packed)); typedef struct S_LOAD_CONTEXT T_LOAD_CONTEXT; diff --git a/Loader/src/load.c b/Loader/src/load.c index 1e03417..18f9fcd 100644 --- a/Loader/src/load.c +++ b/Loader/src/load.c @@ -181,6 +181,8 @@ u32 load(void *Multiboot, unsigned int Magic) g_Context.VGATextModeUsed = 1; g_Context.MultibootUsed = 1; g_Context.MultibootHeader = (u32)Multiboot; + g_Context.KernelELF = (u64)KernelStart; + g_Context.KernelELFSize = (u64)(KernelEnd - KernelStart); printf("Load context at 0x%x\n", (u64)&g_Context);