From 465c461b603e98885f6f158d338de0f768523361 Mon Sep 17 00:00:00 2001
From: 4lDO2 <4lDO2@protonmail.com>
Date: Tue, 22 Dec 2020 12:34:31 +0100
Subject: [PATCH] WIP: Stop using recursive mapping.

Note that this is very preliminary, and I merely got my already freezing
kernel branch not to triple fault, but I would probably apply this patch
to upstream.

What is changed here, is that rather than relying on recursive mapping
for accessing page table frames, it now uses linear translation
(virt=phys+KERNEL_OFFSET). The only problem is that the paging code now
makes assumptions that the entire physical address space remains mapped,
which is not necessarily the case on x86_64 architecturally, even though
systems with RAM more than a PML4 are very rare. We'd probably lazily
(but linearly) map physical address space using huge pages.
---
 src/arch/x86_64/consts.rs        |   7 +--
 src/arch/x86_64/paging/mapper.rs |  51 ++++++++++-----
 src/arch/x86_64/paging/mod.rs    | 101 +++++++++---------------------
 src/arch/x86_64/paging/table.rs  |  30 +++++----
 src/arch/x86_64/rmm.rs           |   5 --
 src/context/memory.rs            | 103 ++++++++++++++-----------------
 src/ptrace.rs                    |  11 ++--
 src/scheme/user.rs               |  58 ++++++++---------
 src/syscall/process.rs           |  56 +++++++----------
 9 files changed, 183 insertions(+), 239 deletions(-)

diff --git a/src/arch/x86_64/consts.rs b/src/arch/x86_64/consts.rs
index 2d9ac401..2ac10c27 100644
--- a/src/arch/x86_64/consts.rs
+++ b/src/arch/x86_64/consts.rs
@@ -1,22 +1,17 @@
 // Because the memory map is so important to not be aliased, it is defined here, in one place
 // The lower 256 PML4 entries are reserved for userspace
 // Each PML4 entry references up to 512 GB of memory
-// The top (511) PML4 is reserved for recursive mapping
 // The second from the top (510) PML4 is reserved for the kernel
     /// The size of a single PML4
     pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
     pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
 
-    /// Offset of recursive paging
-    pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
-    pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE;
-
     /// Offset of kernel
     pub const KERNEL_OFFSET: usize = 0xFFFF_8000_0000_0000; //TODO: better calculation
     pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
 
     /// Offset to kernel heap
-    pub const KERNEL_HEAP_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
+    pub const KERNEL_HEAP_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
     pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
     /// Size of kernel heap
     pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
diff --git a/src/arch/x86_64/paging/mapper.rs b/src/arch/x86_64/paging/mapper.rs
index da45a06c..a89ea678 100644
--- a/src/arch/x86_64/paging/mapper.rs
+++ b/src/arch/x86_64/paging/mapper.rs
@@ -1,32 +1,55 @@
-use core::ptr::Unique;
-
 use crate::memory::{allocate_frames, deallocate_frames, Frame};
-
 use super::{Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
-use super::table::{self, Table, Level4};
+
 use super::RmmA;
+use super::table::{Table, Level4};
 
 pub use rmm::{PageFlush, PageFlushAll};
 
-#[derive(Debug)]
-pub struct Mapper {
-    p4: Unique<Table<Level4>>,
+pub struct Mapper<'table> {
+    p4: &'table mut Table<Level4>,
 }
 
-impl Mapper {
-    /// Create a new page table
-    pub unsafe fn new() -> Mapper {
-        Mapper {
-            p4: Unique::new_unchecked(table::P4),
+impl core::fmt::Debug for Mapper<'_> {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(f, "Mapper referencing P4 at {:p}", self.p4)
+    }
+}
+
+impl<'table> Mapper<'table> {
+    /// Wrap the current address space in a mapper.
+    ///
+    /// # Safety
+    ///
+    /// For this to be safe, the caller must have exclusive access to the pointer in the CR3
+    /// register.
+    // TODO: Find some lifetime hack we can use for ensuring exclusive access at compile time?
+    pub unsafe fn current() -> Mapper<'table> {
+        // SAFETY: We know that CR3 must be a valid frame, since the processor would triple fault
+        // otherwise, and the caller has ensured exclusive ownership of the KERNEL_OFFSET+CR3.
+        Self::from_p4_unchecked(&mut Frame::containing_address(PhysicalAddress::new(x86::controlregs::cr3() as usize)))
+    }
+    /// Wrap a top-level page table (an entire address space) in a mapper.
+    ///
+    /// # Safety
+    ///
+    /// For this to be safe, the caller must have exclusive access to the frame argument. The frame
+    /// must also be valid, and the frame must not outlive the lifetime.
+    pub unsafe fn from_p4_unchecked(frame: &mut Frame) -> Self {
+        let phys = frame.start_address();
+        let virt = VirtualAddress::new(phys.data() + crate::KERNEL_OFFSET);
+
+        Self {
+            p4: &mut *(virt.data() as *mut Table<Level4>),
         }
     }
 
     pub fn p4(&self) -> &Table<Level4> {
-        unsafe { self.p4.as_ref() }
+        &*self.p4
     }
 
     pub fn p4_mut(&mut self) -> &mut Table<Level4> {
-        unsafe { self.p4.as_mut() }
+        &mut *self.p4
     }
 
     /// Map a page to a frame
diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index ebf1e5f2..c34c0e66 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -9,7 +9,7 @@ use x86::msr;
 use crate::memory::Frame;
 
 use self::mapper::{Mapper, PageFlushAll};
-use self::temporary_page::TemporaryPage;
+use self::table::{Level4, Table};
 
 pub use rmm::{
     Arch as RmmArch,
@@ -204,15 +204,11 @@ pub unsafe fn init_ap(
 
     let mut new_table = InactivePageTable::from_address(bsp_table);
 
-    let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(
-        crate::USER_TMP_MISC_OFFSET,
-    )));
-
-    active_table.with(&mut new_table, &mut temporary_page, |mapper| {
-        let flush_all = map_tss(cpu_id, mapper);
+    {
+        let flush_all = map_tss(cpu_id, &mut new_table.mapper());
         // The flush can be ignored as this is not the active table. See later active_table.switch
         flush_all.ignore();
-    });
+    };
 
     // This switches the active table, which is setup by the bootloader, to a correct table
     // setup by the lambda above. This will also flush the TLB
@@ -223,20 +219,20 @@ pub unsafe fn init_ap(
 
 #[derive(Debug)]
 pub struct ActivePageTable {
-    mapper: Mapper,
+    mapper: Mapper<'static>,
     locked: bool,
 }
 
 impl Deref for ActivePageTable {
-    type Target = Mapper;
+    type Target = Mapper<'static>;
 
-    fn deref(&self) -> &Mapper {
+    fn deref(&self) -> &Mapper<'static> {
         &self.mapper
     }
 }
 
 impl DerefMut for ActivePageTable {
-    fn deref_mut(&mut self) -> &mut Mapper {
+    fn deref_mut(&mut self) -> &mut Mapper<'static> {
         &mut self.mapper
     }
 }
@@ -245,14 +241,14 @@ impl ActivePageTable {
     pub unsafe fn new(_table_kind: TableKind) -> ActivePageTable {
         page_table_lock();
         ActivePageTable {
-            mapper: Mapper::new(),
+            mapper: Mapper::current(),
             locked: true,
         }
     }
 
     pub unsafe fn new_unlocked(_table_kind: TableKind) -> ActivePageTable {
         ActivePageTable {
-            mapper: Mapper::new(),
+            mapper: Mapper::current(),
             locked: false,
         }
     }
@@ -281,47 +277,6 @@ impl ActivePageTable {
         }
     }
 
-    pub fn with<F>(
-        &mut self,
-        table: &mut InactivePageTable,
-        temporary_page: &mut TemporaryPage,
-        f: F,
-    ) where
-        F: FnOnce(&mut Mapper),
-    {
-        {
-            let backup = Frame::containing_address(unsafe {
-                RmmA::table()
-            });
-
-            // map temporary_page to current p4 table
-            let p4_table = temporary_page.map_table_frame(
-                backup.clone(),
-                PageFlags::new_table().write(true), //TODO: RISC-V will not like this
-                self,
-            );
-
-            // overwrite recursive mapping
-            self.p4_mut()[crate::RECURSIVE_PAGE_PML4].set(
-                table.frame.clone(),
-                PageFlags::new_table().write(true), //TODO: RISC-V will not like this
-            );
-            self.flush_all();
-
-            // execute f in the new context
-            f(self);
-
-            // restore recursive mapping to original p4 table
-            p4_table[crate::RECURSIVE_PAGE_PML4].set(
-                backup,
-                PageFlags::new_table().write(true), //TODO: RISC-V will not like this
-            );
-            self.flush_all();
-        }
-
-        temporary_page.unmap(self);
-    }
-
     pub unsafe fn address(&self) -> usize {
         RmmA::table().data()
     }
@@ -341,28 +296,29 @@ pub struct InactivePageTable {
 }
 
 impl InactivePageTable {
-    pub fn new(
+    /// Create a new inactive page table, located at a given frame.
+    ///
+    /// # Safety
+    ///
+    /// For this to be safe, the caller must have exclusive access to the corresponding virtual
+    /// address of the frame.
+    pub unsafe fn new(
+        _active_table: &mut ActivePageTable,
         frame: Frame,
-        active_table: &mut ActivePageTable,
-        temporary_page: &mut TemporaryPage,
     ) -> InactivePageTable {
+        // FIXME: Use active_table to ensure that the newly-allocated frame be linearly mapped, in
+        // case it is outside the pre-mapped physical address range, or if such a range is too
+        // large to fit the whole physical address space in the virtual address space.
         {
-            let table = temporary_page.map_table_frame(
-                frame.clone(),
-                PageFlags::new_table().write(true), //TODO: RISC-V will not like this
-                active_table,
-            );
+            let table = VirtualAddress::new(frame.start_address().data() + crate::KERNEL_OFFSET);
             // now we are able to zero the table
-            table.zero();
-            // set up recursive mapping for the table
-            table[crate::RECURSIVE_PAGE_PML4].set(
-                frame.clone(),
-                PageFlags::new_table().write(true), //TODO: RISC-V will not like this
-            );
+
+            // SAFETY: The caller must ensure exclusive access to the pointed-to virtual address of
+            // the frame.
+            (&mut *(table.data() as *mut Table::<Level4>)).zero();
         }
-        temporary_page.unmap(active_table);
 
-        InactivePageTable { frame: frame }
+        InactivePageTable { frame }
     }
 
     pub unsafe fn from_address(address: usize) -> InactivePageTable {
@@ -371,6 +327,9 @@ impl InactivePageTable {
         }
     }
 
+    pub fn mapper<'inactive_table>(&'inactive_table mut self) -> Mapper<'inactive_table> {
+        unsafe { Mapper::from_p4_unchecked(&mut self.frame) }
+    }
     pub unsafe fn address(&self) -> usize {
         self.frame.start_address().data()
     }
diff --git a/src/arch/x86_64/paging/table.rs b/src/arch/x86_64/paging/table.rs
index 7023b115..ff928225 100644
--- a/src/arch/x86_64/paging/table.rs
+++ b/src/arch/x86_64/paging/table.rs
@@ -5,12 +5,11 @@ use core::marker::PhantomData;
 use core::ops::{Index, IndexMut};
 
 use crate::memory::allocate_frames;
+use crate::paging::VirtualAddress;
 
 use super::{ENTRY_COUNT, PageFlags};
 use super::entry::{Entry, EntryFlags};
 
-pub const P4: *mut Table<Level4> = (crate::RECURSIVE_PAGE_OFFSET | 0x7f_ffff_f000) as *mut _;
-
 pub trait TableLevel {}
 
 pub enum Level4 {}
@@ -39,7 +38,7 @@ impl HierarchicalLevel for Level2 {
     type NextLevel = Level1;
 }
 
-#[repr(packed(4096))]
+#[repr(C, align(4096))]
 pub struct Table<L: TableLevel> {
     entries: [Entry; ENTRY_COUNT],
     level: PhantomData<L>,
@@ -84,11 +83,11 @@ impl<L> Table<L> where L: TableLevel {
 
 impl<L> Table<L> where L: HierarchicalLevel {
     pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
-        self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
+        self.next_table_address(index).map(|address| unsafe { &*(address.data() as *const _) })
     }
 
     pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
-        self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
+        self.next_table_address(index).map(|address| unsafe { &mut *(address.data() as *mut _) })
     }
 
     pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
@@ -104,14 +103,19 @@ impl<L> Table<L> where L: HierarchicalLevel {
         self.next_table_mut(index).unwrap()
     }
 
-    fn next_table_address(&self, index: usize) -> Option<usize> {
-        let entry_flags = self[index].flags();
-        if entry_flags.has_present() && !entry_flags.has_flag(EntryFlags::HUGE_PAGE.bits()) {
-            let table_address = self as *const _ as usize;
-            Some((table_address << 9) | (index << 12))
-        } else {
-            None
-        }
+    fn next_table_address(&self, index: usize) -> Option<VirtualAddress> {
+        let entry = &self[index];
+        let entry_flags = entry.flags();
+
+        entry.pointed_frame().and_then(|next_table_frame| {
+            if entry_flags.has_flag(EntryFlags::HUGE_PAGE.bits()) {
+                return None;
+            }
+            let next_table_physaddr = next_table_frame.start_address();
+            let next_table_virtaddr = VirtualAddress::new(next_table_physaddr.data() + crate::KERNEL_OFFSET);
+
+            Some(next_table_virtaddr)
+        })
     }
 }
 
diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs
index 970dbd00..90f0eb4a 100644
--- a/src/arch/x86_64/rmm.rs
+++ b/src/arch/x86_64/rmm.rs
@@ -100,11 +100,6 @@ unsafe fn inner<A: Arch>(areas: &'static [MemoryArea], kernel_base: usize, kerne
             flush.ignore(); // Not the active table
         }
 
-        //TODO: remove backwards compatible recursive mapping
-        mapper.table().set_entry(511, rmm::PageEntry::new(
-            mapper.table().phys().data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC
-        ));
-
         println!("Table: {:X}", mapper.table().phys().data());
         for i in 0..512 {
             if let Some(entry) = mapper.table().entry(i) {
diff --git a/src/context/memory.rs b/src/context/memory.rs
index 75c5421b..46512997 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -1,4 +1,4 @@
-use alloc::collections::{BTreeMap, BTreeSet, VecDeque};
+use alloc::collections::{BTreeMap, BTreeSet};
 use alloc::sync::{Arc, Weak};
 use core::borrow::Borrow;
 use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
@@ -15,9 +15,8 @@ use crate::arch::paging::PAGE_SIZE;
 use crate::context::file::FileDescriptor;
 use crate::ipi::{ipi, IpiKind, IpiTarget};
 use crate::memory::Frame;
-use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, VirtualAddress};
 use crate::paging::mapper::PageFlushAll;
-use crate::paging::temporary_page::TemporaryPage;
+use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, VirtualAddress};
 
 /// Round down to the nearest multiple of page size
 pub fn round_down_pages(number: usize) -> usize {
@@ -260,9 +259,9 @@ impl PartialEq for Region {
 impl Eq for Region {}
 
 impl PartialOrd for Region {
-fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-    self.start.partial_cmp(&other.start)
-}
+    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+        self.start.partial_cmp(&other.start)
+    }
 }
 impl Ord for Region {
     fn cmp(&self, other: &Self) -> Ordering {
@@ -358,37 +357,31 @@ impl Grant {
         }
     }
 
-    pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: PageFlags<RmmA>, desc_opt: Option<FileDescriptor>, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
-        let active_table = unsafe { ActivePageTable::new(from.kind()) };
+    pub fn map_inactive(src: VirtualAddress, dst: VirtualAddress, size: usize, flags: PageFlags<RmmA>, desc_opt: Option<FileDescriptor>, inactive_table: &mut InactivePageTable) -> Grant {
+        let active_table = unsafe { ActivePageTable::new(src.kind()) };
+        let mut inactive_mapper = inactive_table.mapper();
 
-        //TODO: Do not allocate
-        let mut frames = VecDeque::with_capacity(size/PAGE_SIZE);
+        let src_start_page = Page::containing_address(src);
+        let src_end_page = Page::containing_address(VirtualAddress::new(src.data() + size - 1));
+        let src_range = Page::range_inclusive(src_start_page, src_end_page);
 
-        let start_page = Page::containing_address(from);
-        let end_page = Page::containing_address(VirtualAddress::new(from.data() + size - 1));
-        for page in Page::range_inclusive(start_page, end_page) {
-            let frame = active_table.translate_page(page).expect("grant references unmapped memory");
-            frames.push_back(frame);
-        }
+        let dst_start_page = Page::containing_address(dst);
+        let dst_end_page = Page::containing_address(VirtualAddress::new(dst.data() + size - 1));
+        let dst_range = Page::range_inclusive(dst_start_page, dst_end_page);
 
-        let mut active_table = unsafe { ActivePageTable::new(to.kind()) };
+        for (src_page, dst_page) in src_range.zip(dst_range) {
+            let frame = active_table.translate_page(src_page).expect("grant references unmapped memory");
 
-        active_table.with(new_table, temporary_page, |mapper| {
-            let start_page = Page::containing_address(to);
-            let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
-            for page in Page::range_inclusive(start_page, end_page) {
-                let frame = frames.pop_front().expect("grant did not find enough frames");
-                let result = mapper.map_to(page, frame, flags);
-                // Ignore result due to mapping on inactive table
-                unsafe { result.ignore(); }
-            }
-        });
+            let inactive_flush = inactive_mapper.map_to(dst_page, frame, flags);
+            // Ignore result due to mapping on inactive table
+            unsafe { inactive_flush.ignore(); }
+        }
 
         ipi(IpiKind::Tlb, IpiTarget::Other);
 
         Grant {
             region: Region {
-                start: to,
+                start: dst,
                 size,
             },
             flags,
@@ -456,7 +449,7 @@ impl Grant {
         }
     }
 
-    pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
+    pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
         assert!(self.mapped);
 
         let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
@@ -471,12 +464,10 @@ impl Grant {
             let (result, frame) = active_table.unmap_return(page, false);
             flush_all.consume(result);
 
-            active_table.with(new_table, temporary_page, |mapper| {
-                let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
-                let result = mapper.map_to(new_page, frame, flags);
-                // Ignore result due to mapping on inactive table
-                unsafe { result.ignore(); }
-            });
+            let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
+            let result = new_table.mapper().map_to(new_page, frame, flags);
+            // Ignore result due to mapping on inactive table
+            unsafe { result.ignore(); }
         }
 
         flush_all.flush();
@@ -522,24 +513,20 @@ impl Grant {
         self.mapped = false;
     }
 
-    pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
+    pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable) {
         assert!(self.mapped);
 
-        let mut active_table = unsafe { ActivePageTable::new(self.start_address().kind()) };
-
-        active_table.with(new_table, temporary_page, |mapper| {
-            let start_page = Page::containing_address(self.start_address());
-            let end_page = Page::containing_address(self.final_address());
-            for page in Page::range_inclusive(start_page, end_page) {
-                let (result, frame) = mapper.unmap_return(page, false);
-                if self.owned {
-                    //TODO: make sure this frame can be safely freed, physical use counter
-                    crate::memory::deallocate_frames(frame, 1);
-                }
-                // This is not the active table, so the flush can be ignored
-                unsafe { result.ignore(); }
+        let start_page = Page::containing_address(self.start_address());
+        let end_page = Page::containing_address(self.final_address());
+        for page in Page::range_inclusive(start_page, end_page) {
+            let (result, frame) = new_table.mapper().unmap_return(page, false);
+            if self.owned {
+                //TODO: make sure this frame can be safely freed, physical use counter
+                crate::memory::deallocate_frames(frame, 1);
             }
-        });
+            // This is not the active table, so the flush can be ignored
+            unsafe { result.ignore(); }
+        }
 
         ipi(IpiKind::Tlb, IpiTarget::Other);
 
@@ -734,7 +721,9 @@ impl Memory {
 
     /// A complicated operation to move a piece of memory to a new page table
     /// It also allows for changing the address at the same time
-    pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
+    pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
+        let mut inactive_mapper = new_table.mapper();
+
         let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
 
         let flush_all = PageFlushAll::new();
@@ -743,12 +732,10 @@ impl Memory {
             let (result, frame) = active_table.unmap_return(page, false);
             flush_all.consume(result);
 
-            active_table.with(new_table, temporary_page, |mapper| {
-                let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
-                let result = mapper.map_to(new_page, frame, self.flags);
-                // This is not the active table, so the flush can be ignored
-                unsafe { result.ignore(); }
-            });
+            let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
+            let result = inactive_mapper.map_to(new_page, frame, self.flags);
+            // This is not the active table, so the flush can be ignored
+            unsafe { result.ignore(); }
         }
 
         flush_all.flush();
@@ -838,6 +825,8 @@ impl Tls {
     }
 }
 
+pub const DANGLING: usize = 1 << (usize::BITS - 2);
+
 #[cfg(tests)]
 mod tests {
     // TODO: Get these tests working
diff --git a/src/ptrace.rs b/src/ptrace.rs
index b4a3be21..7f7711a1 100644
--- a/src/ptrace.rs
+++ b/src/ptrace.rs
@@ -7,7 +7,6 @@ use crate::{
         interrupt::InterruptStack,
         paging::{
             mapper::PageFlushAll,
-            temporary_page::TemporaryPage,
             ActivePageTable, InactivePageTable, Page, PAGE_SIZE, TableKind, VirtualAddress
         }
     },
@@ -465,8 +464,9 @@ where F: FnOnce(*mut u8) -> Result<()>
     // Find the physical frames for all pages
     let mut frames = Vec::new();
 
-    let mut result = None;
-    active_page_table.with(&mut target_page_table, &mut TemporaryPage::new(start), |mapper| {
+    {
+        let mapper = target_page_table.mapper();
+
         let mut inner = || -> Result<()> {
             let start = Page::containing_address(offset);
             let end = Page::containing_address(VirtualAddress::new(offset.data() + len - 1));
@@ -478,9 +478,8 @@ where F: FnOnce(*mut u8) -> Result<()>
             }
             Ok(())
         };
-        result = Some(inner());
-    });
-    result.expect("with(...) callback should always be called")?;
+        inner()?;
+    }
 
     // Map all the physical frames into linear pages
     let pages = frames.len();
diff --git a/src/scheme/user.rs b/src/scheme/user.rs
index 1bbbcf28..5d56472b 100644
--- a/src/scheme/user.rs
+++ b/src/scheme/user.rs
@@ -8,10 +8,9 @@ use spin::{Mutex, RwLock};
 
 use crate::context::{self, Context};
 use crate::context::file::FileDescriptor;
-use crate::context::memory::{page_flags, round_down_pages, Grant, Region};
+use crate::context::memory::{DANGLING, page_flags, round_down_pages, Grant, Region};
 use crate::event;
-use crate::paging::{PAGE_SIZE, InactivePageTable, Page, VirtualAddress};
-use crate::paging::temporary_page::TemporaryPage;
+use crate::paging::{PAGE_SIZE, InactivePageTable, VirtualAddress};
 use crate::scheme::{AtomicSchemeId, SchemeId};
 use crate::sync::{WaitQueue, WaitMap};
 use crate::syscall::data::{Map, OldMap, Packet, Stat, StatVfs, TimeSpec};
@@ -124,7 +123,7 @@ impl UserInner {
         ).map(|addr| addr.data())
     }
 
-    fn capture_inner(context_weak: &Weak<RwLock<Context>>, to_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option<FileDescriptor>)
+    fn capture_inner(context_weak: &Weak<RwLock<Context>>, dst_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option<FileDescriptor>)
                      -> Result<VirtualAddress> {
         // TODO: More abstractions over grant creation!
 
@@ -138,57 +137,50 @@ impl UserInner {
             // if they ever tried to access this dangling address.
 
             // Set the most significant bit.
-            let dangling: usize = 1 << (core::mem::size_of::<usize>() * 8 - 1);
-
-            return Ok(VirtualAddress::new(dangling));
+            return Ok(VirtualAddress::new(DANGLING));
         }
 
         let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?;
         let mut context = context_lock.write();
 
         let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
-        let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
 
         let mut grants = context.grants.write();
 
-        let from_address = round_down_pages(address);
-        let offset = address - from_address;
-        let from_region = Region::new(VirtualAddress::new(from_address), offset + size).round();
-        let to_region = grants.find_free_at(VirtualAddress::new(to_address), from_region.size(), flags)?;
+        let src_address = round_down_pages(address);
+        let offset = address - src_address;
+        let src_region = Region::new(VirtualAddress::new(src_address), offset + size).round();
+        let dst_region = grants.find_free_at(VirtualAddress::new(dst_address), src_region.size(), flags)?;
 
         //TODO: Use syscall_head and syscall_tail to avoid leaking data
         grants.insert(Grant::map_inactive(
-            from_region.start_address(),
-            to_region.start_address(),
-            from_region.size(),
+            src_region.start_address(),
+            dst_region.start_address(),
+            src_region.size(),
             page_flags(flags),
             desc_opt,
             &mut new_table,
-            &mut temporary_page
         ));
 
-        Ok(VirtualAddress::new(to_region.start_address().data() + offset))
+        Ok(VirtualAddress::new(dst_region.start_address().data() + offset))
     }
 
     pub fn release(&self, address: usize) -> Result<()> {
-        if address == 0 {
-            Ok(())
-        } else {
-            let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
-            let mut context = context_lock.write();
-
-            let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
-            let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
-
-            let mut grants = context.grants.write();
+        if address == DANGLING {
+            return Ok(());
+        }
+        let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
+        let mut context = context_lock.write();
 
-            if let Some(region) = grants.contains(VirtualAddress::new(address)).map(Region::from) {
-                grants.take(&region).unwrap().unmap_inactive(&mut new_table, &mut temporary_page);
-                return Ok(());
-            }
+        let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
+        let mut grants = context.grants.write();
 
-            Err(Error::new(EFAULT))
-        }
+        let region = match grants.contains(VirtualAddress::new(address)).map(Region::from) {
+            Some(region) => region,
+            None => return  Err(Error::new(EFAULT)),
+        };
+        grants.take(&region).unwrap().unmap_inactive(&mut new_table);
+        Ok(())
     }
 
     pub fn read(&self, buf: &mut [u8]) -> Result<usize> {
diff --git a/src/syscall/process.rs b/src/syscall/process.rs
index 97d613f0..19a3210e 100644
--- a/src/syscall/process.rs
+++ b/src/syscall/process.rs
@@ -20,7 +20,6 @@ use crate::interrupt;
 use crate::ipi::{ipi, IpiKind, IpiTarget};
 use crate::memory::allocate_frames;
 use crate::paging::mapper::PageFlushAll;
-use crate::paging::temporary_page::TemporaryPage;
 use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, TableKind, VirtualAddress, PAGE_SIZE};
 use crate::{ptrace, syscall};
 use crate::scheme::FileHandle;
@@ -332,12 +331,11 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
             let mut active_utable = unsafe { ActivePageTable::new(TableKind::User) };
             let mut active_ktable = unsafe { ActivePageTable::new(TableKind::Kernel) };
 
-            let mut temporary_upage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
-            let mut temporary_kpage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::KERNEL_TMP_MISC_OFFSET)));
-
-            let mut new_utable = {
+            let mut new_utable = unsafe {
                 let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
-                InactivePageTable::new(frame, &mut active_utable, &mut temporary_upage)
+                // SAFETY: This is safe because the frame is exclusive, owned, and valid, as we
+                // have just allocated it.
+                InactivePageTable::new(&mut active_utable, frame)
             };
             context.arch.set_page_utable(unsafe { new_utable.address() });
 
@@ -345,7 +343,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
             let mut new_ktable = {
                 let mut new_ktable = {
                     let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
-                    InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage)
+                    InactivePageTable::new(frame, &mut active_ktable)
                 };
                 context.arch.set_page_ktable(unsafe { new_ktable.address() });
                 new_ktable
@@ -360,35 +358,29 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
             {
                 let frame = active_ktable.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
                 let flags = active_ktable.p4()[crate::KERNEL_PML4].flags();
-                active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
-                    mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags);
-                });
+
+                new_ktable.mapper().p4_mut()[crate::KERNEL_PML4].set(frame, flags);
             }
 
             // Copy kernel heap mapping
             {
                 let frame = active_ktable.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
                 let flags = active_ktable.p4()[crate::KERNEL_HEAP_PML4].flags();
-                active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
-                    mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
-                });
+
+                new_ktable.mapper().p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
             }
 
             // Copy physmap mapping
             {
                 let frame = active_ktable.p4()[crate::PHYS_PML4].pointed_frame().expect("physmap not mapped");
                 let flags = active_ktable.p4()[crate::PHYS_PML4].flags();
-                active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
-                    mapper.p4_mut()[crate::PHYS_PML4].set(frame, flags);
-                });
+                new_ktable.mapper().p4_mut()[crate::PHYS_PML4].set(frame, flags);
             }
             // Copy kernel percpu (similar to TLS) mapping.
             {
                 let frame = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].pointed_frame().expect("kernel TLS not mapped");
                 let flags = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].flags();
-                active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
-                    mapper.p4_mut()[crate::KERNEL_PERCPU_PML4].set(frame, flags);
-                });
+                new_ktable.mapper().p4_mut()[crate::KERNEL_PERCPU_PML4].set(frame, flags);
             }
 
             if let Some(fx) = kfx_opt.take() {
@@ -414,9 +406,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 if ! image.is_empty() {
                     let frame = active_utable.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
                     let flags = active_utable.p4()[crate::USER_PML4].flags();
-                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
-                        mapper.p4_mut()[crate::USER_PML4].set(frame, flags);
-                    });
+
+                    new_utable.mapper().p4_mut()[crate::USER_PML4].set(frame, flags);
                 }
                 context.image = image;
 
@@ -424,9 +415,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 if ! grants.read().is_empty() {
                     let frame = active_utable.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
                     let flags = active_utable.p4()[crate::USER_GRANT_PML4].flags();
-                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
-                        mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
-                    });
+
+                    new_utable.mapper().p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
                 }
                 context.grants = grants;
             } else {
@@ -434,7 +424,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 for memory_shared in image.iter_mut() {
                     memory_shared.with(|memory| {
                         let start = VirtualAddress::new(memory.start_address().data() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
-                        memory.move_to(start, &mut new_utable, &mut temporary_upage);
+                        memory.move_to(start, &mut new_utable);
                     });
                 }
                 context.image = image;
@@ -446,7 +436,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
 
                     for mut grant in old_grants.inner.into_iter() {
                         let start = VirtualAddress::new(grant.start_address().data() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
-                        grant.move_to(start, &mut new_utable, &mut temporary_upage);
+                        grant.move_to(start, &mut new_utable);
                         grants.insert(grant);
                     }
                 }
@@ -458,12 +448,11 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 if flags.contains(CLONE_STACK) {
                     let frame = active_utable.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
                     let flags = active_utable.p4()[crate::USER_STACK_PML4].flags();
-                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
-                        mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
-                    });
+
+                    new_utable.mapper().p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
                 } else {
                     stack_shared.with(|stack| {
-                        stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_utable, &mut temporary_upage);
+                        stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_utable);
                     });
                 }
                 context.stack = Some(stack_shared);
@@ -471,7 +460,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
 
             // Setup user sigstack
             if let Some(mut sigstack) = sigstack_opt {
-                sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_utable, &mut temporary_upage);
+                sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_utable);
                 context.sigstack = Some(sigstack);
             }
 
@@ -563,9 +552,8 @@ fn empty(context: &mut context::Context, reaping: bool) {
                 log::error!("{}: {}: Grant should not exist: {:?}", context.id.into(), *context.name.read(), grant);
 
                 let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
-                let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
 
-                grant.unmap_inactive(&mut new_table, &mut temporary_page);
+                grant.unmap_inactive(&mut new_table);
             } else {
                 grant.unmap();
             }
-- 
GitLab