From 490e1b27775eba4e896202cfc5ce271f98b7fb41 Mon Sep 17 00:00:00 2001
From: 4lDO2 <4lDO2@protonmail.com>
Date: Tue, 20 Jun 2023 20:06:34 +0200
Subject: [PATCH] WIP: Track grant ownership.

---
 src/common/mod.rs         |  16 ++
 src/context/memory.rs     | 324 ++++++++++++++++++++++++--------------
 src/debugger.rs           |   4 +-
 src/scheme/memory.rs      |   8 +-
 src/scheme/proc.rs        |  65 +-------
 src/scheme/sys/context.rs |   3 +-
 src/scheme/user.rs        |  14 +-
 src/syscall/driver.rs     |   2 -
 src/syscall/process.rs    |   6 +-
 9 files changed, 250 insertions(+), 192 deletions(-)

diff --git a/src/common/mod.rs b/src/common/mod.rs
index 9f2a70f7..e52d286e 100644
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -1,3 +1,8 @@
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+
+use crate::memory::Enomem;
+
 pub mod aligned_box;
 #[macro_use]
 pub mod int_like;
@@ -26,3 +31,14 @@ macro_rules! dbg {
         ($($crate::dbg!($val)),+,)
     };
 }
+
+pub fn try_new_vec_with_exact_size<T>(len: usize) -> Result<Vec<T>, Enomem> {
+    let mut vec = Vec::new();
+    vec.try_reserve_exact(len).map_err(|_| Enomem)?;
+    Ok(vec.into())
+}
+pub fn try_box_slice_new<T: Clone>(value: T, len: usize) -> Result<Box<[T]>, Enomem> {
+    let mut vec = try_new_vec_with_exact_size(len)?;
+    vec.resize(len, value);
+    Ok(vec.into())
+}
diff --git a/src/context/memory.rs b/src/context/memory.rs
index 620ba3d1..b26f23f6 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -1,9 +1,11 @@
+use alloc::boxed::Box;
 use alloc::collections::BTreeMap;
 use alloc::{sync::Arc, vec::Vec};
 use core::cmp;
 use core::fmt::Debug;
 use core::num::NonZeroUsize;
-use spin::{RwLock, RwLockWriteGuard};
+use core::sync::atomic::AtomicUsize;
+use spin::{RwLock, RwLockWriteGuard, Once};
 use syscall::{
     flag::MapFlags,
     error::*,
@@ -11,6 +13,7 @@ use syscall::{
 use rmm::Arch as _;
 
 use crate::arch::paging::PAGE_SIZE;
+use crate::common::{try_box_slice_new, try_new_vec_with_exact_size};
 use crate::context::file::FileDescriptor;
 use crate::memory::{Enomem, Frame};
 use crate::paging::mapper::{Flusher, InactiveFlusher, PageFlushAll};
@@ -65,7 +68,7 @@ impl AddrSpace {
     }
 
     /// Attempt to clone an existing address space so that all mappings are copied (CoW).
-    pub fn try_clone(&mut self) -> Result<Arc<RwLock<Self>>> {
+    pub fn try_clone(&mut self, self_arc: Arc<RwLock<Self>>) -> Result<Arc<RwLock<Self>>> {
         let mut new = new_addrspace()?;
 
         let new_guard = Arc::get_mut(&mut new)
@@ -74,30 +77,22 @@ impl AddrSpace {
 
         let this_mapper = &mut self.table.utable;
         let new_mapper = &mut new_guard.table.utable;
+        let mut this_flusher = PageFlushAll::new();
 
         for (grant_base, grant_info) in self.grants.iter() {
-            if grant_info.desc_opt.is_some() { continue; }
+            let new_grant = match grant_info.provider {
+                Provider::PhysBorrowed { ref base } => Grant::physmap(base.clone(), PageSpan::new(grant_base, grant_info.page_count), grant_info.flags, new_mapper, ())?,
+                Provider::Allocated { ref pages } => Grant::cow(Arc::clone(&self_arc), grant_base, grant_base, grant_info.page_count, grant_info.flags, this_mapper, new_mapper, &mut this_flusher, (), &pages)?,
 
-            let new_grant;
+                // MAP_SHARED grants are retained by reference, across address space clones (across
+                // forks on monolithic kernels).
+                Provider::External { cow: false, ref address_space, ref src_base } => Grant::borrow_grant(Arc::clone(&address_space), grant_base, grant_base, grant_info, new_mapper, (), false)?,
 
-            // TODO: Replace this with CoW
-            if grant_info.owned {
-                new_grant = Grant::zeroed(grant_base, grant_info.page_count, grant_info.flags, new_mapper, ())?;
+                // MAP_PRIVATE grants, in this case indirect ones, are CoW.
+                Provider::External { cow: true, ref address_space, ref src_base } => todo!(),
 
-                for page in new_grant.span().pages().map(Page::start_address) {
-                    let current_frame = unsafe { RmmA::phys_to_virt(this_mapper.translate(page).expect("grant containing unmapped pages").0) }.data() as *const u8;
-                    let new_frame = unsafe { RmmA::phys_to_virt(new_mapper.translate(page).expect("grant containing unmapped pages").0) }.data() as *mut u8;
-
-                    unsafe {
-                        new_frame.copy_from_nonoverlapping(current_frame, PAGE_SIZE);
-                    }
-                }
-            } else {
-                // TODO: Remove reborrow? In that case, physmapped memory will need to either be
-                // remapped when cloning, or be backed by a file descriptor (like
-                // `memory:physical`).
-                new_grant = Grant::reborrow(grant_base, grant_info, grant_base, this_mapper, new_mapper, ())?;
-            }
+                Provider::Fmap { ref desc } => todo!(),
+            };
 
             new_guard.grants.insert(new_grant);
         }
@@ -169,8 +164,9 @@ impl AddrSpace {
             let (before, mut grant, after) = grant.extract(intersection).expect("conflicting region shared no common parts");
 
             // Notify scheme that holds grant
-            if let Some(file_desc) = grant.info.desc_opt.take() {
-                notify_files.push((file_desc, intersection));
+            if let Provider::Fmap { ref desc } = grant.info.provider {
+                // TODO: Remove clone
+                notify_files.push((desc.clone(), intersection));
             }
 
             // Keep untouched regions
@@ -508,10 +504,37 @@ pub struct GrantInfo {
     flags: PageFlags<RmmA>,
     // TODO: Rename to unmapped?
     mapped: bool,
-    pub(crate) owned: bool,
-    //TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
-    pub desc_opt: Option<GrantFileRef>,
+    pub(crate) provider: Provider,
 }
+
+/// The arch-specific user page tables are throwaway, and this enum contains all required
+/// information to update lazy mappings in the event of page faults.
+#[derive(Debug)]
+pub enum Provider {
+    /// The grant was initialized with (lazy) zeroed memory, and any changes will make it owned by
+    /// the frame allocator.
+    //
+    // TODO: strong-count-only Arc?
+    //
+    // https://internals.rust-lang.org/t/pre-rfc-rc-and-arc-with-only-strong-count/5828
+    Allocated { pages: Box<[Option<Arc<PageInfo>>]> },
+    /// The grant is not owned, but borrowed from physical memory frames that do not belong to the
+    /// frame allocator.
+    PhysBorrowed { base: Frame },
+    /// The memory is borrowed directly from another address space.
+    External { address_space: Arc<RwLock<AddrSpace>>, src_base: Page, cow: bool },
+    /// The memory is borrowed from another address space, but managed by a scheme via fmap.
+    // TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the
+    // ~~context~~ address space?
+    // TODO: mmap CoW
+    Fmap { desc: GrantFileRef },
+}
+#[derive(Debug)]
+pub struct PageInfo {
+    // refcount is already stored in the Arc that maps the page.
+    phys: Frame,
+}
+
 #[derive(Debug)]
 pub struct Grant {
     pub(crate) base: Page,
@@ -527,97 +550,143 @@ pub struct GrantFileRef {
     pub flags: MapFlags,
 }
 
+static THE_ZEROED_FRAME: Once<Frame> = Once::new();
+
 impl Grant {
     // TODO: PageCount newtype, to avoid confusion between bytes and pages?
 
-    pub fn physmap(phys: Frame, dst: Page, page_count: usize, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant> {
-        for index in 0..page_count {
-            let result = unsafe {
-                mapper
-                    .map_phys(dst.next_by(index).start_address(), phys.next_by(index).start_address(), flags)
-                    .expect("TODO: handle OOM from paging structures in physmap")
-            };
-            flusher.consume(result);
+    pub fn physmap(phys: Frame, span: PageSpan, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant> {
+        Ok(Grant {
+            base: span.base,
+            info: GrantInfo {
+                page_count: span.count,
+                flags,
+                mapped: true,
+                provider: Provider::PhysBorrowed { base: phys },
+            },
+        })
+    }
+    pub fn zeroed(span: PageSpan, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
+        let the_frame = THE_ZEROED_FRAME.get().expect("expected the zeroed frame to be available").start_address();
+
+        // TODO: O(n) readonly map with zeroed page, or O(1) no-op and then lazily map?
+        // TODO: Use flush_all after a certain number of pages, otherwise no
+
+        let pages = try_box_slice_new(None, span.count)?;
+
+        /*
+        for page in span.pages() {
+            // Good thing with lazy page fault handlers, is that if we fail due to ENOMEM here, we
+            // can continue and let the process face the OOM killer later.
+            unsafe {
+                let Some(result) = mapper.map_phys(page.start_address(), the_frame.start_address(), flags.write(false)) else {
+                    break;
+                };
+                flusher.consume(result);
+            }
         }
+        */
 
         Ok(Grant {
-            base: dst,
+            base: span.base,
             info: GrantInfo {
-                page_count,
+                page_count: span.count,
                 flags,
                 mapped: true,
-                owned: false,
-                desc_opt: None,
+                provider: Provider::Allocated { pages },
             },
         })
     }
-    pub fn zeroed(dst: Page, page_count: usize, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
-        Ok(Grant { base: dst, info: GrantInfo { page_count, flags, mapped: true, owned: true, desc_opt: None } })
-    }
-    pub fn borrow(src_base: Page, dst_base: Page, page_count: usize, flags: PageFlags<RmmA>, desc_opt: Option<GrantFileRef>, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
-        Self::copy_inner(src_base, dst_base, page_count, flags, desc_opt, src_mapper, dst_mapper, (), dst_flusher, false, false)
-    }
-    pub fn reborrow(src_base: Page, src_info: &GrantInfo, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
-        Self::borrow(src_base, dst_base, src_info.page_count, src_info.flags, src_info.desc_opt.clone(), src_mapper, dst_mapper, dst_flusher).map_err(Into::into)
-    }
-    pub fn transfer(mut src_grant: Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, src_flusher: impl Flusher<RmmA>, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
-        assert!(core::mem::replace(&mut src_grant.info.mapped, false));
-        let desc_opt = src_grant.info.desc_opt.take();
 
-        Self::copy_inner(src_grant.base, dst_base, src_grant.info.page_count, src_grant.info.flags(), desc_opt, src_mapper, dst_mapper, src_flusher, dst_flusher, src_grant.info.owned, true).map_err(Into::into)
+    // XXX: borrow_grant is needed because of the borrow checker (iterator invalidation), maybe
+    // borrow_grant/borrow can be abstracted somehow?
+    pub fn borrow_grant(src_address_space_lock: Arc<RwLock<AddrSpace>>, src_base: Page, dst_base: Page, src_info: &GrantInfo, mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>, eager: bool) -> Result<Grant, Enomem> {
+        Ok(Grant {
+            base: dst_base,
+            info: GrantInfo {
+                page_count: src_info.page_count,
+                flags: src_info.flags,
+                mapped: true,
+                provider: Provider::External {
+                    src_base,
+                    address_space: src_address_space_lock,
+                    cow: false,
+                }
+            },
+        })
     }
 
-    fn copy_inner(
+    pub fn borrow(
+        src_address_space_lock: Arc<RwLock<AddrSpace>>,
+        src_address_space: &AddrSpace,
+        src_base: Page,
+        dst_base: Page,
+        page_count: usize,
+        flags: PageFlags<RmmA>,
+        dst_mapper: &mut PageMapper,
+        dst_flusher: impl Flusher<RmmA>,
+        eager: bool,
+    ) -> Result<Grant, Enomem> {
+        /*
+        if eager {
+            for page in PageSpan::new(src_base, page_count) {
+                // ...
+            }
+        }
+        */
+
+        Ok(Grant {
+            base: dst_base,
+            info: GrantInfo {
+                page_count,
+                flags,
+                mapped: true,
+                provider: Provider::External {
+                    src_base,
+                    address_space: src_address_space_lock,
+                    cow: false,
+                }
+            },
+        })
+    }
+    // TODO: This is limited to one page. Should it be (if some magic new proc: API is introduced).
+    pub fn cow(
+        src_address_space: Arc<RwLock<AddrSpace>>,
         src_base: Page,
         dst_base: Page,
         page_count: usize,
         flags: PageFlags<RmmA>,
-        desc_opt: Option<GrantFileRef>,
         src_mapper: &mut PageMapper,
         dst_mapper: &mut PageMapper,
         mut src_flusher: impl Flusher<RmmA>,
         mut dst_flusher: impl Flusher<RmmA>,
-        owned: bool,
-        unmap: bool,
+        src_pages: &[Option<Arc<PageInfo>>],
     ) -> Result<Grant, Enomem> {
-        let mut successful_count = 0;
+        let mut pages = try_new_vec_with_exact_size(page_count)?;
 
-        for index in 0..page_count {
-            let src_page = src_base.next_by(index);
-            let (address, _entry_flags) = if unmap {
-                let (entry, entry_flags, flush) = unsafe { src_mapper.unmap_phys(src_page.start_address(), true).expect("grant references unmapped memory") };
-                src_flusher.consume(flush);
+        for page_idx in 0..page_count {
+            let src_page_info = src_pages[page_idx].as_ref().map(Arc::clone);
+            let phys = src_page_info.as_ref().map(|pg| pg.phys.start_address());
+            pages.push(src_page_info);
 
-                (entry, entry_flags)
-            } else {
-                src_mapper.translate(src_page.start_address()).unwrap_or_else(|| panic!("grant at {:p} references unmapped memory", src_page.start_address().data() as *const u8))
+            let Some(frame) = phys else {
+                continue;
             };
 
-            let flush = match unsafe { dst_mapper.map_phys(dst_base.next_by(index).start_address(), address, flags) } {
-                Some(f) => f,
-                // ENOMEM
-                None => break,
-            };
-
-            dst_flusher.consume(flush);
+            let src_page = src_base.next_by(page_idx);
+            if flags.has_write() {
+                unsafe {
+                    src_flusher.consume(src_mapper.remap(src_page.start_address(), flags.write(false)).expect("page table grant inconsistency"));
+                }
+            }
 
-            successful_count = index + 1;
-        }
+            let dst_page = dst_base.next_by(page_idx).start_address();
 
-        if successful_count != page_count {
-            // TODO: The grant will be lost in case of ENOMEM. Allow putting it back in source?
-            for index in 0..successful_count {
-                let (frame, _, flush) = match unsafe { dst_mapper.unmap_phys(dst_base.next_by(index).start_address(), true) } {
-                    Some(f) => f,
-                    None => unreachable!("grant unmapped by someone else in the meantime despite having a &mut PageMapper"),
-                };
-                dst_flusher.consume(flush);
+            let Some(map_result) = (unsafe { dst_mapper.map_phys(dst_page, frame, flags.write(false)) }) else {
+                break;
+            };
 
-                if owned {
-                    crate::memory::deallocate_frames(Frame::containing_address(frame), 1);
-                }
-            }
-            return Err(Enomem);
+            dst_flusher.consume(map_result);
         }
 
         Ok(Grant {
@@ -626,11 +695,19 @@ impl Grant {
                 page_count,
                 flags,
                 mapped: true,
-                owned,
-                desc_opt,
+                provider: Provider::External { src_base, address_space: src_address_space, cow: true }
             },
         })
     }
+    pub fn transfer(mut src_grant: Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, src_flusher: impl Flusher<RmmA>, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
+        todo!()
+        /*
+        assert!(core::mem::replace(&mut src_grant.info.mapped, false));
+        let desc_opt = src_grant.info.desc_opt.take();
+
+        Self::copy_inner(src_grant.base, dst_base, src_grant.info.page_count, src_grant.info.flags(), desc_opt, src_mapper, dst_mapper, src_flusher, dst_flusher, src_grant.info.owned, true).map_err(Into::into)
+            */
+    }
 
     pub fn remap(&mut self, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>, flags: PageFlags<RmmA>) {
         assert!(self.info.mapped);
@@ -639,7 +716,10 @@ impl Grant {
             // TODO: PageMapper is unsafe because it can be used to modify kernel memory. Add a
             // subset/wrapper that is safe but only for user mappings.
             unsafe {
-                let result = mapper.remap(page.start_address(), flags).expect("grant contained unmap address");
+                // Lazy mappings don't require remapping, as info.flags will be updated.
+                let Some(result) = mapper.remap(page.start_address(), flags) else {
+                    continue;
+                };
                 flusher.consume(result);
             }
         }
@@ -649,34 +729,35 @@ impl Grant {
     pub fn unmap(mut self, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> UnmapResult {
         assert!(self.info.mapped);
 
-        for page in self.span().pages() {
-            let (entry, _, flush) = unsafe { mapper.unmap_phys(page.start_address(), true) }
-                .unwrap_or_else(|| panic!("missing page at {:#0x} for grant {:?}", page.start_address().data(), self));
-
-            if self.info.owned {
-                // TODO: make sure this frame can be safely freed, physical use counter.
-                //
-                // Namely, we can either have MAP_PRIVATE or MAP_SHARED-style mappings. The former
-                // maps the source memory read-only and then (not yet) implements CoW on top (as of
-                // now the kernel does not yet support this distinction), while the latter simply
-                // means the memory is shared. We can in addition to the desc_opt also include an
-                // address space and region within, indicating borrowed memory. The source grant
-                // will have a refcount, and if it is unmapped, it will be transferred to a
-                // borrower. Only if this refcount becomes zero when decremented, will it be
-                // possible to unmap.
-                //
-                // So currently, it is technically possible to get double frees if the scheme
-                // "hosting" the memory of an fmap call, decides to funmap its memory before the
-                // fmapper does.
-                crate::memory::deallocate_frames(Frame::containing_address(entry), 1);
+        for (page_idx, page) in self.span().pages().enumerate() {
+
+            // Lazy mappings do not need to be unmapped.
+            let Some((entry, _, flush)) = (unsafe { mapper.unmap_phys(page.start_address(), true) }) else {
+                continue;
+            };
+
+            match self.info.provider {
+                Provider::Allocated { ref mut pages } => {
+                    if let Some(page_info) = pages[page_idx].take().and_then(|arc| Arc::try_unwrap(arc).ok()) {
+                        crate::memory::deallocate_frames(Frame::containing_address(entry), 1);
+                    }
+                }
+                _ => (),
             }
+
             flusher.consume(flush);
         }
 
         self.info.mapped = false;
 
-        // TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
-        UnmapResult { file_desc: self.info.desc_opt.take() }
+        UnmapResult {
+            file_desc: if let Provider::Fmap { ref desc } = self.info.provider {
+                // TODO: Don't clone
+                Some(desc.clone())
+            } else {
+                None
+            }
+        }
     }
 
     /// Extract out a region into a separate grant. The return value is as
@@ -703,9 +784,8 @@ impl Grant {
             info: GrantInfo {
                 flags: self.info.flags,
                 mapped: self.info.mapped,
-                owned: self.info.owned,
-                desc_opt: self.info.desc_opt.clone(),
                 page_count: span.count,
+                provider: todo!(),
             },
         });
         let after_grant = after_span.map(|span| Grant {
@@ -713,9 +793,8 @@ impl Grant {
             info: GrantInfo {
                 flags: self.info.flags,
                 mapped: self.info.mapped,
-                owned: self.info.owned,
-                desc_opt: self.info.desc_opt.clone(),
                 page_count: span.count,
+                provider: todo!(),
             },
         });
         self.base = this_span.base;
@@ -728,17 +807,22 @@ impl GrantInfo {
     pub fn flags(&self) -> PageFlags<RmmA> {
         self.flags
     }
-    pub fn is_owned(&self) -> bool {
-        self.owned
-    }
     pub fn page_count(&self) -> usize {
         self.page_count
     }
     pub fn can_have_flags(&self, flags: MapFlags) -> bool {
-        self.owned || ((self.flags.has_write() || !flags.contains(MapFlags::PROT_WRITE)) && (self.flags.has_execute() || !flags.contains(MapFlags::PROT_EXEC)))
+        // TODO: read
+        let is_downgrade = (self.flags.has_write() || !flags.contains(MapFlags::PROT_WRITE)) && (self.flags.has_execute() || !flags.contains(MapFlags::PROT_EXEC));
+
+        match self.provider {
+            Provider::Allocated { .. } | Provider::External { cow: true, .. } => true,
+            Provider::PhysBorrowed { .. } | Provider::External { cow: false, .. } => is_downgrade,
+            Provider::Fmap { .. } => is_downgrade,
+        }
     }
 
     pub fn can_be_merged_if_adjacent(&self, with: &Self) -> bool {
+        /*
         match (&self.desc_opt, &with.desc_opt) {
             (None, None) => (),
             (Some(ref a), Some(ref b)) if Arc::ptr_eq(&a.desc.description, &b.desc.description) => (),
@@ -746,6 +830,8 @@ impl GrantInfo {
             _ => return false,
         }
         self.owned == with.owned && self.mapped == with.mapped && self.flags.data() == with.flags.data()
+        */
+        todo!()
     }
 }
 
diff --git a/src/debugger.rs b/src/debugger.rs
index d96bdf43..e26b8c4a 100644
--- a/src/debugger.rs
+++ b/src/debugger.rs
@@ -187,7 +187,9 @@ pub unsafe fn debugger(target_id: Option<crate::context::ContextId>) {
                     println!(
                         "    virt 0x{:016x}:0x{:016x} size 0x{:08x} {}",
                         base.start_address().data(), base.start_address().data() + size - 1, size,
-                        if info.is_owned() { "owned" } else { "borrowed" },
+                        //if info.is_owned() { "owned" } else { "borrowed" },
+                        // TODO
+                        "",
                     );
                 }
             }
diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs
index c071add5..2bc6658c 100644
--- a/src/scheme/memory.rs
+++ b/src/scheme/memory.rs
@@ -62,7 +62,7 @@ impl MemoryScheme {
         let page = addr_space
             .write()
             .mmap((map.address != 0).then_some(span.base), page_count, map.flags, |page, flags, mapper, flusher| {
-                Ok(Grant::zeroed(page, page_count.get(), flags, mapper, flusher)?)
+                Ok(Grant::zeroed(span, flags, mapper, flusher)?)
             })?;
 
         Ok(page.start_address().data())
@@ -99,8 +99,10 @@ impl MemoryScheme {
 
             Grant::physmap(
                 Frame::containing_address(PhysicalAddress::new(physical_address)),
-                dst_page,
-                page_count.get(),
+                PageSpan::new(
+                    dst_page,
+                    page_count.get(),
+                ),
                 page_flags,
                 dst_mapper,
                 dst_flusher,
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
index 1d8fc9f9..4a2a3801 100644
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -135,11 +135,6 @@ enum Operation {
     // TODO: Remove this once openat is implemented, or allow openat-via-dup via e.g. the top-level
     // directory.
     OpenViaDup,
-    // Allows calling fmap directly on a FileDescriptor (as opposed to a FileDescriptor).
-    //
-    // TODO: Remove this once cross-scheme links are merged. That would allow acquiring a new
-    // FD to access the file descriptor behind grants.
-    GrantHandle { description: Arc<RwLock<FileDescription>> },
 
     SchedAffinity,
     Sigactions(Arc<RwLock<Vec<(SigAction, usize)>>>),
@@ -717,28 +712,11 @@ impl KernelScheme for ProcScheme {
         let info = self.handles.read().get(&id).ok_or(Error::new(EBADF))?.info.clone();
 
         match info.operation {
-            Operation::GrantHandle { ref description } => {
-                // The map struct will probably reside in kernel memory, on the stack, and for that
-                // it would be very insecure not to use the pinned head/tail buffer.
-                let mut buf = BorrowedHtBuf::head()?;
-                // TODO: This can be safe
-                let map_dst = unsafe { buf.use_for_struct()? };
-                *map_dst = *map;
-
-                let (scheme_id, number) = {
-                    let description = description.read();
-
-                    (description.scheme, description.number)
-                };
-                let scheme = Arc::clone(scheme::schemes().get(scheme_id).ok_or(Error::new(EBADFD))?);
-                let res = scheme.fmap(number, map_dst);
-
-                res
-            }
             Operation::AddrSpace { ref addrspace } => {
                 if Arc::ptr_eq(addrspace, dst_addr_space) {
                     return Err(Error::new(EBUSY));
                 }
+                /*
                 // Limit to transferring/borrowing at most one grant, or part of a grant (splitting
                 // will be mandatory if grants are coalesced).
 
@@ -780,8 +758,10 @@ impl KernelScheme for ProcScheme {
                 } else {
                     dst_addr_space.mmap(requested_dst_page, src_page_count, map.flags, |dst_page, flags, dst_mapper, flusher| Ok(Grant::borrow(src_grant_span.base, dst_page, src_grant_span.count, flags, None, src_mapper, dst_mapper, flusher)?))?
                 };
+                */
 
-                Ok(result_page.start_address().data())
+                //Ok(result_page.start_address().data())
+                todo!()
             }
             _ => Err(Error::new(EBADF)),
         }
@@ -828,32 +808,6 @@ impl KernelScheme for ProcScheme {
                 data.offset = VirtualAddress::new(data.offset.data() + bytes_read);
                 Ok(bytes_read)
             },
-            // TODO: Support reading only a specific address range. Maybe using seek?
-            Operation::AddrSpace { addrspace } => {
-                let mut handles = self.handles.write();
-                let OperationData::Offset(ref mut offset) = handles.get_mut(&id).ok_or(Error::new(EBADF))?.data else {
-                    return Err(Error::new(EBADFD));
-                };
-
-                // TODO: Define a struct somewhere?
-                const RECORD_SIZE: usize = mem::size_of::<usize>() * 4;
-                let records = buf.in_exact_chunks(mem::size_of::<usize>()).array_chunks::<4>();
-
-                let addrspace = addrspace.read();
-                let mut bytes_read = 0;
-
-                for ([r1, r2, r3, r4], (base, info)) in records.zip(addrspace.grants.iter()).skip(*offset / RECORD_SIZE) {
-                    r1.write_usize(base.start_address().data())?;
-                    r2.write_usize(info.page_count() * PAGE_SIZE)?;
-                    r3.write_usize(map_flags(info.flags()).bits() | if info.desc_opt.is_some() { 0x8000_0000 } else { 0 })?;
-                    r4.write_usize(info.desc_opt.as_ref().map_or(0, |d| d.offset))?;
-                    bytes_read += RECORD_SIZE;
-                }
-
-                *offset += bytes_read;
-                Ok(bytes_read)
-            }
-
             Operation::Regs(kind) => {
                 union Output {
                     float: FloatRegisters,
@@ -1284,21 +1238,16 @@ impl KernelScheme for ProcScheme {
                 handle(Operation::Filetable { filetable: new_filetable }, OperationData::Other)
             }
             Operation::AddrSpace { ref addrspace } => {
+                let addrspace_clone = Arc::clone(addrspace);
+
                 let (operation, is_mem) = match buf {
                     // TODO: Better way to obtain new empty address spaces, perhaps using SYS_OPEN. But
                     // in that case, what scheme?
                     b"empty" => (Operation::AddrSpace { addrspace: new_addrspace()? }, false),
-                    b"exclusive" => (Operation::AddrSpace { addrspace: addrspace.write().try_clone()? }, false),
+                    b"exclusive" => (Operation::AddrSpace { addrspace: addrspace.write().try_clone(addrspace_clone)? }, false),
                     b"mem" => (Operation::Memory { addrspace: Arc::clone(addrspace) }, true),
                     b"mmap-min-addr" => (Operation::MmapMinAddr(Arc::clone(addrspace)), false),
 
-                    grant_handle if grant_handle.starts_with(b"grant-") => {
-                        let start_addr = usize::from_str_radix(core::str::from_utf8(&grant_handle[6..]).map_err(|_| Error::new(EINVAL))?, 16).map_err(|_| Error::new(EINVAL))?;
-                        (Operation::GrantHandle {
-                            description: Arc::clone(&addrspace.read().grants.contains(Page::containing_address(VirtualAddress::new(start_addr))).ok_or(Error::new(EINVAL))?.1.desc_opt.as_ref().ok_or(Error::new(EINVAL))?.desc.description)
-                        }, false)
-                    }
-
                     _ => return Err(Error::new(EINVAL)),
                 };
 
diff --git a/src/scheme/sys/context.rs b/src/scheme/sys/context.rs
index c3b235ec..2a5aaca7 100644
--- a/src/scheme/sys/context.rs
+++ b/src/scheme/sys/context.rs
@@ -86,7 +86,8 @@ pub fn resource() -> Result<Vec<u8>> {
             }
             if let Ok(addr_space) = context.addr_space() {
                 for (_base, info) in addr_space.read().grants.iter() {
-                    if info.is_owned() {
+                    // TODO: method
+                    if matches!(info.provider, context::memory::Provider::Allocated { .. }) {
                         memory += info.page_count() * PAGE_SIZE;
                     }
                 }
diff --git a/src/scheme/user.rs b/src/scheme/user.rs
index eef1839c..ba81f345 100644
--- a/src/scheme/user.rs
+++ b/src/scheme/user.rs
@@ -142,7 +142,7 @@ impl UserInner {
 
         let src_page = Page::containing_address(VirtualAddress::new(tail.buf_mut().as_ptr() as usize));
 
-        let dst_page = dst_addr_space.write().mmap(None, ONE, PROT_READ, |dst_page, flags, mapper, flusher| Ok(Grant::borrow(src_page, dst_page, 1, flags, None, &mut KernelMapper::lock(), mapper, flusher)?))?;
+        let dst_page = dst_addr_space.write().mmap(None, ONE, PROT_READ, |dst_page, flags, mapper, flusher| Ok(Grant::physmap(todo!(), PageSpan::new(dst_page, 1), flags, mapper, flusher)?))?;
 
         Ok(CaptureGuard {
             destroyed: false,
@@ -237,7 +237,8 @@ impl UserInner {
             let head_buf_page = Page::containing_address(VirtualAddress::new(array.buf_mut().as_mut_ptr() as usize));
 
             dst_space.mmap(Some(free_span.base), ONE, map_flags, move |dst_page, page_flags, mapper, flusher| {
-                Ok(Grant::borrow(head_buf_page, dst_page, 1, page_flags, None, &mut KernelMapper::lock(), mapper, flusher)?)
+                //Ok(Grant::borrow(head_buf_page, dst_page, 1, page_flags, None, &mut KernelMapper::lock(), mapper, flusher)?)
+                todo!()
             })?;
 
             let head = CopyInfo {
@@ -261,8 +262,7 @@ impl UserInner {
 
         if let Some(middle_page_count) = NonZeroUsize::new(middle_page_count) {
             dst_space.mmap(Some(first_middle_dst_page), middle_page_count, map_flags, move |dst_page, page_flags, mapper, flusher| {
-                let mut cur_space = cur_space_lock.write();
-                Ok(Grant::borrow(first_middle_src_page, dst_page, middle_page_count.get(), page_flags, None, &mut cur_space.table.utable, mapper, flusher)?)
+                Ok(Grant::borrow(Arc::clone(&cur_space_lock), &mut *cur_space_lock.write(), first_middle_src_page, dst_page, middle_page_count.get(), page_flags, mapper, flusher, true)?)
             })?;
         }
 
@@ -289,7 +289,8 @@ impl UserInner {
             }
 
             dst_space.mmap(Some(tail_dst_page), ONE, map_flags, move |dst_page, page_flags, mapper, flusher| {
-                Ok(Grant::borrow(tail_buf_page, dst_page, 1, page_flags, None, &mut KernelMapper::lock(), mapper, flusher)?)
+                todo!();
+                //Ok(Grant::borrow(tail_buf_page, dst_page, 1, page_flags, None, &mut KernelMapper::lock(), mapper, flusher)?)
             })?;
 
             CopyInfo {
@@ -393,7 +394,8 @@ impl UserInner {
                         let nz_page_count = NonZeroUsize::new(page_count).ok_or(Error::new(EINVAL));
 
                         let res = nz_page_count.and_then(|page_count| addr_space.mmap(dst_page, page_count, map.flags, move |dst_page, flags, mapper, flusher| {
-                            Ok(Grant::borrow(src_page, dst_page, page_count.get(), flags, Some(file_ref), &mut AddrSpace::current()?.write().table.utable, mapper, flusher)?)
+                            todo!()
+                            //Ok(Grant::borrow(src_page, dst_page, page_count.get(), flags, Some(file_ref), &mut AddrSpace::current()?.write().table.utable, mapper, flusher)?)
                         }));
                         retcode = Error::mux(res.map(|grant_start_page| {
                             addr_space.grants.funmap.insert(
diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs
index c5f6e663..cdc17b52 100644
--- a/src/syscall/driver.rs
+++ b/src/syscall/driver.rs
@@ -1,5 +1,3 @@
-use core::num::NonZeroUsize;
-
 use crate::interrupt::InterruptStack;
 use crate::memory::{allocate_frames_complex, deallocate_frames, Frame, PAGE_SIZE};
 use crate::paging::{PhysicalAddress, VirtualAddress};
diff --git a/src/syscall/process.rs b/src/syscall/process.rs
index c228fcb2..8d08d5e9 100644
--- a/src/syscall/process.rs
+++ b/src/syscall/process.rs
@@ -595,8 +595,10 @@ pub unsafe fn usermode_bootstrap(bootstrap: &Bootstrap) -> ! {
         // deallocated?
         addr_space.grants.insert(context::memory::Grant::physmap(
             bootstrap.base.clone(),
-            Page::containing_address(VirtualAddress::new(0)),
-            bootstrap.page_count,
+            PageSpan::new(
+                Page::containing_address(VirtualAddress::new(0)),
+                bootstrap.page_count,
+            ),
             PageFlags::new().user(true).write(true).execute(true),
             &mut addr_space.table.utable,
             PageFlushAll::new(),
-- 
GitLab