diff --git a/rmm b/rmm index 61ba2e6c8e2bba0b6460b681b7f566fa0fe231ca..27bb8e44ddfb962741e7023b35d0e9eafff041be 160000 --- a/rmm +++ b/rmm @@ -1 +1 @@ -Subproject commit 61ba2e6c8e2bba0b6460b681b7f566fa0fe231ca +Subproject commit 27bb8e44ddfb962741e7023b35d0e9eafff041be diff --git a/src/acpi/hpet.rs b/src/acpi/hpet.rs index 241bdaad77983c8e70af0051f74a59e28a7f1aeb..e037e5cda4eb30103aa77982d5c720aa26181422 100644 --- a/src/acpi/hpet.rs +++ b/src/acpi/hpet.rs @@ -3,7 +3,8 @@ use core::{mem, ptr}; use core::intrinsics::{volatile_load, volatile_store}; use crate::memory::Frame; -use crate::paging::{KernelMapper, PhysicalAddress, PageFlags}; +use crate::paging::{KernelMapper, PhysicalAddress, Page, PageFlags, VirtualAddress}; +use crate::paging::entry::EntryFlags; use super::sdt::Sdt; use super::{ACPI_TABLE, find_sdt}; @@ -63,13 +64,38 @@ impl Hpet { } } +//TODO: x86 use assumes only one HPET and only one GenericAddressStructure +#[cfg(target_arch = "x86")] +impl GenericAddressStructure { + pub unsafe fn init(&self, mapper: &mut KernelMapper) { + let frame = Frame::containing_address(PhysicalAddress::new(self.address as usize)); + let page = Page::containing_address(VirtualAddress::new(crate::HPET_OFFSET)); + + mapper + .get_mut() + .expect("KernelMapper locked re-entrant while mapping memory for GenericAddressStructure") + .map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true)) + .expect("failed to map memory for GenericAddressStructure") + .flush(); + } + + pub unsafe fn read_u64(&self, offset: usize) -> u64{ + volatile_load((crate::HPET_OFFSET + offset) as *const u64) + } + + pub unsafe fn write_u64(&mut self, offset: usize, value: u64) { + volatile_store((crate::HPET_OFFSET + offset) as *mut u64, value); + } +} + +#[cfg(not(target_arch = "x86"))] impl GenericAddressStructure { pub unsafe fn init(&self, mapper: &mut KernelMapper) { let frame = Frame::containing_address(PhysicalAddress::new(self.address as usize)); let (_, result) = mapper .get_mut() .expect("KernelMapper locked re-entrant while mapping memory for GenericAddressStructure") - .map_linearly(frame.start_address(), PageFlags::new().write(true)) + .map_linearly(frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true)) .expect("failed to map memory for GenericAddressStructure"); result.flush(); } diff --git a/src/arch/x86/consts.rs b/src/arch/x86/consts.rs index e05b27542c1f3e030d9c23c2edda274cd1b42875..bcd6df173aaa90e1d669f6355677e08773a473a2 100644 --- a/src/arch/x86/consts.rs +++ b/src/arch/x86/consts.rs @@ -3,21 +3,28 @@ // Each PML4 entry references up to 512 GB of memory // The second from the top (510) PML4 is reserved for the kernel - /// Offset of kernel + /// Offset of kernel (256 MiB max) pub const KERNEL_OFFSET: usize = 0xC000_0000; - /// Offset to kernel heap + // Framebuffer mapped by bootloader to 0xD000_0000 (128 MiB max) + + // Offset to APIC mappings (optional) + pub const LAPIC_OFFSET: usize = 0xD800_0000; + pub const IOAPIC_OFFSET: usize = LAPIC_OFFSET + 4096; + pub const HPET_OFFSET: usize = IOAPIC_OFFSET + 4096; + + /// Offset to kernel heap (256 MiB max) pub const KERNEL_HEAP_OFFSET: usize = 0xE000_0000; /// Size of kernel heap pub const KERNEL_HEAP_SIZE: usize = rmm::MEGABYTE; - /// Offset to kernel percpu variables + /// Offset to kernel percpu variables (256 MiB max) pub const KERNEL_PERCPU_OFFSET: usize = 0xF000_0000; /// Size of kernel percpu variables pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT; - /// Offset of physmap + /// Offset of physmap (1 GiB max) // This needs to match RMM's PHYS_OFFSET pub const PHYS_OFFSET: usize = 0x8000_0000; diff --git a/src/arch/x86/device/ioapic.rs b/src/arch/x86/device/ioapic.rs index 9fdf416f90cc7a10eff27d32b0f0cb8a275d7dc7..bbd3735864901346bbc9900a95a29863ddecf56a 100644 --- a/src/arch/x86/device/ioapic.rs +++ b/src/arch/x86/device/ioapic.rs @@ -8,7 +8,7 @@ use crate::acpi::madt::{self, Madt, MadtEntry, MadtIoApic, MadtIntSrcOverride}; use crate::arch::interrupt::irq; use crate::memory::Frame; -use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch}; +use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch, VirtualAddress}; use crate::paging::entry::EntryFlags; use super::super::cpuid::cpuid; @@ -234,7 +234,7 @@ pub unsafe fn handle_ioapic(mapper: &mut KernelMapper, madt_ioapic: &'static Mad // map the I/O APIC registers let frame = Frame::containing_address(PhysicalAddress::new(madt_ioapic.address as usize)); - let page = Page::containing_address(RmmA::phys_to_virt(frame.start_address())); + let page = Page::containing_address(VirtualAddress::new(crate::IOAPIC_OFFSET)); assert!(mapper.translate(page.start_address()).is_none()); diff --git a/src/arch/x86/device/local_apic.rs b/src/arch/x86/device/local_apic.rs index 58745bcb1d535e2229e304f23e1e330cf9fe2f68..9ad4f575e5be457e455d71b2be6f70158c6cc815 100644 --- a/src/arch/x86/device/local_apic.rs +++ b/src/arch/x86/device/local_apic.rs @@ -2,7 +2,7 @@ use core::sync::atomic::{self, AtomicU32}; use core::intrinsics::{volatile_load, volatile_store}; use x86::msr::*; -use crate::paging::{KernelMapper, PhysicalAddress, PageFlags, RmmA, RmmArch}; +use crate::paging::{KernelMapper, PhysicalAddress, PageFlags, RmmA, RmmArch, VirtualAddress}; use super::super::cpuid::cpuid; @@ -45,7 +45,7 @@ impl LocalApic { let mapper = mapper.get_mut().expect("expected KernelMapper not to be locked re-entrant while initializing LAPIC"); let physaddr = PhysicalAddress::new(rdmsr(IA32_APIC_BASE) as usize & 0xFFFF_0000); - let virtaddr = RmmA::phys_to_virt(physaddr); + let virtaddr = VirtualAddress::new(crate::LAPIC_OFFSET); self.address = virtaddr.data(); self.x2 = cpuid().map_or(false, |cpuid| {