diff --git a/src/allocator/frame/mod.rs b/src/allocator/frame/mod.rs index 5b6898a2368bca492d845e9eb218a4ad1450e1f0..99c9d3215c8f9a6415f943a12fac909232bdb00c 100644 --- a/src/allocator/frame/mod.rs +++ b/src/allocator/frame/mod.rs @@ -20,6 +20,7 @@ impl FrameCount { } } +#[derive(Debug)] pub struct FrameUsage { used: FrameCount, total: FrameCount, @@ -58,3 +59,21 @@ pub trait FrameAllocator { unsafe fn usage(&self) -> FrameUsage; } + +impl FrameAllocator for &mut T where T: FrameAllocator { + unsafe fn allocate(&mut self, count: FrameCount) -> Option { + T::allocate(self, count) + } + unsafe fn free(&mut self, address: PhysicalAddress, count: FrameCount) { + T::free(self, address, count) + } + unsafe fn allocate_one(&mut self) -> Option { + T::allocate_one(self) + } + unsafe fn free_one(&mut self, address: PhysicalAddress) { + T::free_one(self, address) + } + unsafe fn usage(&self) -> FrameUsage { + T::usage(self) + } +} diff --git a/src/arch/x86_64.rs b/src/arch/x86_64.rs index 74c37548f59b291b3649968840edd1ea0c1e9da1..1008df8e8f90483ccc4c6b1bc62a040848d4cac6 100644 --- a/src/arch/x86_64.rs +++ b/src/arch/x86_64.rs @@ -7,7 +7,7 @@ use crate::{ VirtualAddress, }; -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] pub struct X8664Arch; impl Arch for X8664Arch { diff --git a/src/page/entry.rs b/src/page/entry.rs index 6732da4970834771245f1e6d0c53c15246ab6a85..800482cc2f1e30890e6a3377dfef0ba96ec978e5 100644 --- a/src/page/entry.rs +++ b/src/page/entry.rs @@ -2,6 +2,7 @@ use core::marker::PhantomData; use crate::{ Arch, + PageFlags, PhysicalAddress, }; @@ -23,13 +24,24 @@ impl PageEntry { } #[inline(always)] - pub fn address(&self) -> PhysicalAddress { - PhysicalAddress(self.data & A::ENTRY_ADDRESS_MASK) + pub fn address(&self) -> Result { + let addr = PhysicalAddress(self.data & A::ENTRY_ADDRESS_MASK); + + if self.present() { + Ok(addr) + } else { + Err(addr) + } } #[inline(always)] - pub fn flags(&self) -> usize { - self.data & A::ENTRY_FLAGS_MASK + pub fn flags(&self) -> PageFlags { + unsafe { PageFlags::from_data(self.data & A::ENTRY_FLAGS_MASK) } + } + #[inline(always)] + pub fn set_flags(&mut self, flags: PageFlags) { + self.data &= !A::ENTRY_FLAGS_MASK; + self.data |= flags.data(); } #[inline(always)] diff --git a/src/page/flags.rs b/src/page/flags.rs index 6058637e659591e75fc129a7212576f3855ff223..22b8780caff8c52d308a017e01067c03fd5b4bf8 100644 --- a/src/page/flags.rs +++ b/src/page/flags.rs @@ -111,7 +111,11 @@ impl PageFlags { impl fmt::Debug for PageFlags { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PageFlags") - .field("data", &self.data) + .field("present", &self.has_present()) + .field("write", &self.has_write()) + .field("executable", &self.has_execute()) + .field("user", &self.has_user()) + .field("bits", &format_args!("{:#0x}", self.data)) .finish() } -} \ No newline at end of file +} diff --git a/src/page/flush.rs b/src/page/flush.rs index 9aae51f5990e657153dc5606bec05ad42586e80a..1114c33bc8d62fee217e9a5f8c6db3f9f75308c9 100644 --- a/src/page/flush.rs +++ b/src/page/flush.rs @@ -8,6 +8,10 @@ use crate::{ VirtualAddress, }; +pub trait Flusher { + fn consume(&mut self, flush: PageFlush); +} + #[must_use = "The page table must be flushed, or the changes unsafely ignored"] pub struct PageFlush { virt: VirtualAddress, @@ -31,9 +35,10 @@ impl PageFlush { } } -#[must_use = "The page table must be flushed, or the changes unsafely ignored"] -pub struct PageFlushAll { - phantom: PhantomData, +// TODO: Might remove Drop and add #[must_use] again, but ergonomically I prefer being able to pass +// a flusher, and have it dropped by the end of the function it is passed to, in order to flush. +pub struct PageFlushAll { + phantom: PhantomData A>, } impl PageFlushAll { @@ -43,15 +48,27 @@ impl PageFlushAll { } } - pub fn consume(&self, flush: PageFlush) { - unsafe { flush.ignore(); } - } - - pub fn flush(self) { - unsafe { A::invalidate_all(); } - } + pub fn flush(self) {} pub unsafe fn ignore(self) { mem::forget(self); } } +impl Drop for PageFlushAll { + fn drop(&mut self) { + unsafe { A::invalidate_all(); } + } +} +impl Flusher for PageFlushAll { + fn consume(&mut self, flush: PageFlush) { + unsafe { flush.ignore(); } + } +} +impl + ?Sized> Flusher for &mut T { + fn consume(&mut self, flush: PageFlush) { + >::consume(self, flush) + } +} +impl Flusher for () { + fn consume(&mut self, _: PageFlush) {} +} diff --git a/src/page/mapper.rs b/src/page/mapper.rs index 6617547ca77bdfd06019a31e31db4586d99b82c9..17d4b11f80b24a756115d0b7a3b48735348a0d9f 100644 --- a/src/page/mapper.rs +++ b/src/page/mapper.rs @@ -8,44 +8,61 @@ use crate::{ PageFlush, PageTable, PhysicalAddress, + TableKind, VirtualAddress, }; -pub struct PageMapper<'f, A, F> { +pub struct PageMapper { table_addr: PhysicalAddress, - allocator: &'f mut F, - phantom: PhantomData, + allocator: F, + _phantom: PhantomData A>, } -impl<'f, A: Arch, F: FrameAllocator> PageMapper<'f, A, F> { - pub unsafe fn new(table_addr: PhysicalAddress, allocator: &'f mut F) -> Self { +impl PageMapper { + pub unsafe fn new(table_addr: PhysicalAddress, allocator: F) -> Self { Self { table_addr, allocator, - phantom: PhantomData, + _phantom: PhantomData, } } - pub unsafe fn create(allocator: &'f mut F) -> Option { + pub unsafe fn create(mut allocator: F) -> Option { let table_addr = allocator.allocate_one()?; Some(Self::new(table_addr, allocator)) } - pub unsafe fn current(allocator: &'f mut F) -> Self { + pub unsafe fn current(allocator: F) -> Self { let table_addr = A::table(); Self::new(table_addr, allocator) } + pub fn is_current(&self) -> bool { + unsafe { self.table().phys() == A::table() } + } - pub unsafe fn make_current(&mut self) { + pub unsafe fn make_current(&self) { A::set_table(self.table_addr); } - pub unsafe fn table(&self) -> PageTable { - PageTable::new( - VirtualAddress::new(0), - self.table_addr, - A::PAGE_LEVELS - 1 - ) + pub fn table(&self) -> PageTable { + // SAFETY: The only way to initialize a PageMapper is via new(), and we assume it upholds + // all necessary invariants for this to be safe. + unsafe { + PageTable::new( + VirtualAddress::new(0), + self.table_addr, + A::PAGE_LEVELS - 1 + ) + } + } + + pub unsafe fn remap(&mut self, virt: VirtualAddress, flags: PageFlags) -> Option> { + self.visit(virt, |p1, i| { + let mut entry = p1.entry(i)?; + entry.set_flags(flags); + p1.set_entry(i, entry); + Some(PageFlush::new(virt)) + }).flatten() } pub unsafe fn map(&mut self, virt: VirtualAddress, flags: PageFlags) -> Option> { @@ -71,7 +88,8 @@ impl<'f, A: Arch, F: FrameAllocator> PageMapper<'f, A, F> { None => { let next_phys = self.allocator.allocate_one()?; //TODO: correct flags? - table.set_entry(i, PageEntry::new(next_phys.data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_DEFAULT_TABLE)); + let flags = A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_DEFAULT_TABLE | if virt.kind() == TableKind::User { A::ENTRY_FLAG_USER } else { 0 }; + table.set_entry(i, PageEntry::new(next_phys.data() | flags)); table.next(i)? } }; @@ -79,27 +97,74 @@ impl<'f, A: Arch, F: FrameAllocator> PageMapper<'f, A, F> { } } } + pub unsafe fn map_linearly(&mut self, phys: PhysicalAddress, flags: PageFlags) -> Option<(VirtualAddress, PageFlush)> { + let virt = A::phys_to_virt(phys); + self.map_phys(virt, phys, flags).map(|flush| (virt, flush)) + } + fn visit(&self, virt: VirtualAddress, f: impl FnOnce(&mut PageTable, usize) -> T) -> Option { + let mut table = self.table(); + unsafe { + loop { + let i = table.index_of(virt)?; + if table.level() == 0 { + return Some(f(&mut table, i)); + } else { + table = table.next(i)?; + } + } + } + } + pub fn translate(&self, virt: VirtualAddress) -> Option<(PhysicalAddress, PageFlags)> { + let entry = self.visit(virt, |p1, i| unsafe { p1.entry(i) })??; + Some((entry.address().ok()?, entry.flags())) + } - pub unsafe fn unmap(&mut self, virt: VirtualAddress) -> Option> { - let (old, flush) = self.unmap_phys(virt)?; - self.allocator.free_one(old.address()); + pub unsafe fn unmap(&mut self, virt: VirtualAddress, unmap_parents: bool) -> Option> { + let (old, _, flush) = self.unmap_phys(virt, unmap_parents)?; + self.allocator.free_one(old); Some(flush) } - pub unsafe fn unmap_phys(&mut self, virt: VirtualAddress) -> Option<(PageEntry, PageFlush)> { + pub unsafe fn unmap_phys(&mut self, virt: VirtualAddress, unmap_parents: bool) -> Option<(PhysicalAddress, PageFlags, PageFlush)> { //TODO: verify virt is aligned let mut table = self.table(); - //TODO: unmap parents - loop { - let i = table.index_of(virt)?; - if table.level() == 0 { - let entry_opt = table.entry(i); + let level = table.level(); + unmap_phys_inner(virt, &mut table, level, false, &mut self.allocator).map(|(pa, pf)| (pa, pf, PageFlush::new(virt))) + } +} +unsafe fn unmap_phys_inner(virt: VirtualAddress, table: &mut PageTable, initial_level: usize, unmap_parents: bool, allocator: &mut impl FrameAllocator) -> Option<(PhysicalAddress, PageFlags)> { + let i = table.index_of(virt)?; + + if table.level() == 0 { + let entry_opt = table.entry(i); + table.set_entry(i, PageEntry::new(0)); + let entry = entry_opt?; + + Some((entry.address().ok()?, entry.flags())) + } else { + let mut subtable = table.next(i)?; + + let res = unmap_phys_inner(virt, &mut subtable, initial_level, unmap_parents, allocator)?; + + if unmap_parents { + // TODO: Use a counter? This would reduce the remaining number of available bits, but could be + // faster (benchmark is needed). + let is_still_populated = (0..A::PAGE_ENTRIES).map(|j| subtable.entry(j).expect("must be within bounds")).any(|e| e.present()); + + if !is_still_populated { + allocator.free_one(table.phys()); table.set_entry(i, PageEntry::new(0)); - let entry = entry_opt?; - return Some((entry, PageFlush::new(virt))); - } else { - table = table.next(i)?; } } + + Some(res) + } +} +impl core::fmt::Debug for PageMapper { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PageMapper") + .field("frame", &self.table_addr) + .field("allocator", &self.allocator) + .finish() } } diff --git a/src/page/table.rs b/src/page/table.rs index 23a8fa129330674ba05a5df9d9efec18f82622ed..0abc78486d791050aa375248c8525a22efe2eff8 100644 --- a/src/page/table.rs +++ b/src/page/table.rs @@ -93,16 +93,14 @@ impl PageTable { } pub unsafe fn next(&self, i: usize) -> Option { - if self.level > 0 { - let entry = self.entry(i)?; - if entry.present() { - return Some(PageTable::new( - self.entry_base(i)?, - entry.address(), - self.level - 1 - )); - } + if self.level == 0 { + return None; } - None + + Some(PageTable::new( + self.entry_base(i)?, + self.entry(i)?.address().ok()?, + self.level - 1, + )) } }