diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index f9726fbc10be6266e962099be05c547607e04467..357f47e8e33a5a8bfa5d6bfc8815a73928968401 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -98,6 +98,9 @@ impl Page {
             number: self.number + n,
         }
     }
+    pub fn offset_from(self, other: Self) -> usize {
+        self.number - other.number
+    }
 }
 
 pub struct PageIter {
diff --git a/src/context/memory.rs b/src/context/memory.rs
index f21d37e1106495e23b179d2b72db43ace541d1e2..7f6410b9c9af57d6dbcc3317c2f4b8ffa3d4a015 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -1,9 +1,8 @@
-use alloc::collections::{BTreeMap, BTreeSet};
+use alloc::collections::BTreeMap;
 use alloc::{sync::Arc, vec::Vec};
-use core::borrow::Borrow;
-use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
-use core::fmt::{self, Debug};
-use core::ops::Deref;
+use core::cmp;
+use core::fmt::Debug;
+use core::num::NonZeroUsize;
 use spin::{RwLock, RwLockWriteGuard};
 use syscall::{
     flag::MapFlags,
@@ -15,7 +14,7 @@ use crate::arch::paging::PAGE_SIZE;
 use crate::context::file::FileDescriptor;
 use crate::memory::{Enomem, Frame};
 use crate::paging::mapper::{Flusher, InactiveFlusher, PageFlushAll};
-use crate::paging::{KernelMapper, Page, PageFlags, PageIter, PageMapper, RmmA, round_up_pages, TableKind, VirtualAddress};
+use crate::paging::{KernelMapper, Page, PageFlags, PageMapper, RmmA, TableKind, VirtualAddress};
 
 pub const MMAP_MIN_DEFAULT: usize = PAGE_SIZE;
 
@@ -76,16 +75,16 @@ impl AddrSpace {
         let this_mapper = &mut self.table.utable;
         let new_mapper = &mut new_guard.table.utable;
 
-        for grant in self.grants.iter() {
-            if grant.desc_opt.is_some() { continue; }
+        for (grant_base, grant_info) in self.grants.iter() {
+            if grant_info.desc_opt.is_some() { continue; }
 
             let new_grant;
 
             // TODO: Replace this with CoW
-            if grant.owned {
-                new_grant = Grant::zeroed(Page::containing_address(grant.start_address()), grant.size() / PAGE_SIZE, grant.flags(), new_mapper, ())?;
+            if grant_info.owned {
+                new_grant = Grant::zeroed(grant_base, grant_info.page_count, grant_info.flags, new_mapper, ())?;
 
-                for page in new_grant.pages().map(Page::start_address) {
+                for page in new_grant.span().pages().map(Page::start_address) {
                     let current_frame = unsafe { RmmA::phys_to_virt(this_mapper.translate(page).expect("grant containing unmapped pages").0) }.data() as *const u8;
                     let new_frame = unsafe { RmmA::phys_to_virt(new_mapper.translate(page).expect("grant containing unmapped pages").0) }.data() as *mut u8;
 
@@ -97,7 +96,7 @@ impl AddrSpace {
                 // TODO: Remove reborrow? In that case, physmapped memory will need to either be
                 // remapped when cloning, or be backed by a file descriptor (like
                 // `memory:physical`).
-                new_grant = Grant::reborrow(grant, Page::containing_address(grant.start_address()), this_mapper, new_mapper, ())?;
+                new_grant = Grant::reborrow(grant_base, grant_info, grant_base, this_mapper, new_mapper, ())?;
             }
 
             new_guard.grants.insert(new_grant);
@@ -114,7 +113,7 @@ impl AddrSpace {
     pub fn is_current(&self) -> bool {
         self.table.utable.is_current()
     }
-    pub fn mprotect(&mut self, base: Page, page_count: usize, flags: MapFlags) -> Result<()> {
+    pub fn mprotect(&mut self, requested_span: PageSpan, flags: MapFlags) -> Result<()> {
         let (mut active, mut inactive);
         let mut flusher = if self.is_current() {
             active = PageFlushAll::new();
@@ -125,26 +124,24 @@ impl AddrSpace {
         };
         let mapper = &mut self.table.utable;
 
-        let region = Region::new(base.start_address(), page_count * PAGE_SIZE);
+        // TODO: Remove allocation (might require BTreeMap::set_key or interior mutability).
+        let regions = self.grants.conflicts(requested_span).map(|(base, info)| PageSpan::new(base, info.page_count)).collect::<Vec<_>>();
 
-        // TODO: Remove allocation
-        let regions = self.grants.conflicts(region).map(|g| *g.region()).collect::<Vec<_>>();
-
-        for grant_region in regions {
-            let grant = self.grants.take(&grant_region).expect("grant cannot magically disappear while we hold the lock!");
-            let intersection = grant_region.intersect(region);
+        for grant_span in regions {
+            let grant = self.grants.remove(grant_span.base).expect("grant cannot magically disappear while we hold the lock!");
+            let intersection = grant_span.intersection(requested_span);
 
             let (before, mut grant, after) = grant.extract(intersection).expect("failed to extract grant");
 
             if let Some(before) = before { self.grants.insert(before); }
             if let Some(after) = after { self.grants.insert(after); }
 
-            if !grant.can_have_flags(flags) {
+            if !grant.info.can_have_flags(flags) {
                 self.grants.insert(grant);
                 return Err(Error::new(EACCES));
             }
 
-            let new_flags = grant.flags()
+            let new_flags = grant.info.flags()
                 // TODO: Require a capability in order to map executable memory?
                 .execute(flags.contains(MapFlags::PROT_EXEC))
                 .write(flags.contains(MapFlags::PROT_WRITE));
@@ -158,36 +155,36 @@ impl AddrSpace {
         }
         Ok(())
     }
-    pub fn munmap(mut this: RwLockWriteGuard<'_, Self>, page: Page, page_count: usize) {
+    pub fn munmap(mut self: RwLockWriteGuard<'_, Self>, requested_span: PageSpan) {
         let mut notify_files = Vec::new();
 
-        let requested = Region::new(page.start_address(), page_count * PAGE_SIZE);
         let mut flusher = PageFlushAll::new();
 
-        let conflicting: Vec<Region> = this.grants.conflicts(requested).map(Region::from).collect();
+        // TODO: Allocating may even be wrong!
+        let conflicting: Vec<PageSpan> = self.grants.conflicts(requested_span).map(|(base, info)| PageSpan::new(base, info.page_count)).collect();
 
         for conflict in conflicting {
-            let grant = this.grants.take(&conflict).expect("conflicting region didn't exist");
-            let intersection = grant.intersect(requested);
-            let (before, mut grant, after) = grant.extract(intersection.round()).expect("conflicting region shared no common parts");
+            let grant = self.grants.remove(conflict.base).expect("conflicting region didn't exist");
+            let intersection = conflict.intersection(requested_span);
+            let (before, mut grant, after) = grant.extract(intersection).expect("conflicting region shared no common parts");
 
             // Notify scheme that holds grant
-            if let Some(file_desc) = grant.desc_opt.take() {
+            if let Some(file_desc) = grant.info.desc_opt.take() {
                 notify_files.push((file_desc, intersection));
             }
 
             // Keep untouched regions
             if let Some(before) = before {
-                this.grants.insert(before);
+                self.grants.insert(before);
             }
             if let Some(after) = after {
-                this.grants.insert(after);
+                self.grants.insert(after);
             }
 
             // Remove irrelevant region
-            grant.unmap(&mut this.table.utable, &mut flusher);
+            grant.unmap(&mut self.table.utable, &mut flusher);
         }
-        drop(this);
+        drop(self);
 
         for (file_ref, intersection) in notify_files {
             let scheme_id = { file_ref.desc.description.read().scheme };
@@ -201,23 +198,17 @@ impl AddrSpace {
             // Same here, we don't really care about errors when schemes respond to unmap events.
             // The caller wants the memory to be unmapped, period. When already unmapped, what
             // would we do with error codes anyway?
-            let _ = scheme.funmap(intersection.start_address().data(), intersection.size());
+            let _ = scheme.funmap(intersection.base.start_address().data(), intersection.count * PAGE_SIZE);
 
             let _ = file_ref.desc.close();
         }
     }
-    pub fn mmap(&mut self, page: Option<Page>, page_count: usize, flags: MapFlags, map: impl FnOnce(Page, PageFlags<RmmA>, &mut PageMapper, &mut dyn Flusher<RmmA>) -> Result<Grant>) -> Result<Page> {
+    pub fn mmap(&mut self, page: Option<Page>, page_count: NonZeroUsize, flags: MapFlags, map: impl FnOnce(Page, PageFlags<RmmA>, &mut PageMapper, &mut dyn Flusher<RmmA>) -> Result<Grant>) -> Result<Page> {
         // Finally, the end of all "T0DO: Abstract with other grant creation"!
-        if page_count == 0 {
-            return Err(Error::new(EINVAL));
-        }
-
-        let region = match page {
-            Some(page) => self.grants.find_free_at(self.mmap_min, page.start_address(), page_count * PAGE_SIZE, flags)?,
-            None => self.grants.find_free(self.mmap_min, page_count * PAGE_SIZE).ok_or(Error::new(ENOMEM))?,
-        };
-        let page = Page::containing_address(region.start_address());
+        let selected_span = self.grants.find_free_at(self.mmap_min, page, page_count.get(), flags)?;
 
+        // TODO: Threads share address spaces, so not only the inactive flusher should be sending
+        // out IPIs.
         let (mut active, mut inactive);
         let flusher = if self.is_current() {
             active = PageFlushAll::new();
@@ -227,20 +218,96 @@ impl AddrSpace {
             &mut inactive as &mut dyn Flusher<RmmA>
         };
 
-        self.grants.insert(map(page, page_flags(flags), &mut self.table.utable, flusher)?);
-        Ok(page)
+        self.grants.insert(map(selected_span.base, page_flags(flags), &mut self.table.utable, flusher)?);
+
+        Ok(selected_span.base)
     }
 }
 
 #[derive(Debug)]
 pub struct UserGrants {
-    inner: BTreeSet<Grant>,
+    inner: BTreeMap<Page, GrantInfo>,
     holes: BTreeMap<VirtualAddress, usize>,
     // TODO: Would an additional map ordered by (size,start) to allow for O(log n) allocations be
     // beneficial?
 
     //TODO: technically VirtualAddress is from a scheme's context!
-    pub funmap: BTreeMap<Region, VirtualAddress>,
+    pub funmap: BTreeMap<Page, (usize, Page)>,
+}
+
+#[derive(Clone, Copy)]
+pub struct PageSpan {
+    pub base: Page,
+    pub count: usize,
+}
+impl PageSpan {
+    pub fn new(base: Page, count: usize) -> Self {
+        Self { base, count }
+    }
+    pub fn validate_nonempty(address: VirtualAddress, size: usize) -> Option<Self> {
+        Self::validate(address, size).filter(|this| !this.is_empty())
+    }
+    pub fn validate(address: VirtualAddress, size: usize) -> Option<Self> {
+        if address.data() % PAGE_SIZE != 0 || size % PAGE_SIZE != 0 { return None; }
+        if address.data().saturating_add(size) > crate::USER_END_OFFSET { return None; }
+
+        Some(Self::new(Page::containing_address(address), size / PAGE_SIZE))
+    }
+    pub fn is_empty(&self) -> bool {
+        self.count == 0
+    }
+    pub fn intersection(&self, with: PageSpan) -> PageSpan {
+        Self::between(
+            cmp::max(self.base, with.base),
+            cmp::min(self.end(), with.end()),
+        )
+    }
+    pub fn intersects(&self, with: PageSpan) -> bool {
+        !self.intersection(with).is_empty()
+    }
+    pub fn contains(&self, page: Page) -> bool {
+        self.intersects(Self::new(page, 1))
+    }
+    pub fn slice(&self, inner_span: PageSpan) -> (Option<PageSpan>, PageSpan, Option<PageSpan>) {
+        todo!()
+    }
+    pub fn pages(self) -> impl Iterator<Item = Page> {
+        (0..self.count).map(move |i| self.base.next_by(i))
+    }
+
+    pub fn end(&self) -> Page {
+        self.base.next_by(self.count)
+    }
+
+    /// Returns the span from the start of self until the start of the specified span.
+    pub fn before(self, span: Self) -> Option<Self> {
+        assert!(self.base <= span.base);
+        Some(Self::between(
+            self.base,
+            span.base,
+        )).filter(|reg| !reg.is_empty())
+    }
+
+    /// Returns the span from the end of the given span until the end of self.
+    pub fn after(self, span: Self) -> Option<Self> {
+        assert!(span.end() <= self.end());
+        Some(Self::between(
+            span.end(),
+            self.end(),
+        )).filter(|reg| !reg.is_empty())
+    }
+    /// Returns the span between two pages, `[start, end)`, truncating to zero if end < start.
+    pub fn between(start: Page, end: Page) -> Self {
+        Self::new(
+            start,
+            end.start_address().data().saturating_sub(start.start_address().data()) / PAGE_SIZE,
+        )
+    }
+
+    pub fn rebase(self, new_base: Self, page: Page) -> Page {
+        let offset = page.offset_from(self.base);
+        new_base.base.next_by(offset)
+    }
 }
 
 impl Default for UserGrants {
@@ -248,41 +315,53 @@ impl Default for UserGrants {
         Self::new()
     }
 }
+impl Debug for PageSpan {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(f, "[{:p}:{:p}, {} pages]", self.base.start_address().data() as *const u8, self.base.start_address().add(self.count * PAGE_SIZE - 1).data() as *const u8, self.count)
+    }
+}
 
 impl UserGrants {
     pub fn new() -> Self {
         Self {
-            inner: BTreeSet::new(),
+            inner: BTreeMap::new(),
             holes: core::iter::once((VirtualAddress::new(0), crate::USER_END_OFFSET)).collect::<BTreeMap<_, _>>(),
             funmap: BTreeMap::new(),
         }
     }
-    /// Returns the grant, if any, which occupies the specified address
-    pub fn contains(&self, address: VirtualAddress) -> Option<&Grant> {
-        let byte = Region::byte(address);
+    /// Returns the grant, if any, which occupies the specified page
+    pub fn contains(&self, page: Page) -> Option<(Page, &GrantInfo)> {
         self.inner
-            .range(..=byte)
+            .range(..=page)
             .next_back()
-            .filter(|existing| existing.occupies(byte))
+            .filter(|(base, info)| (**base..base.next_by(info.page_count)).contains(&page))
+            .map(|(base, info)| (*base, info))
     }
     /// Returns an iterator over all grants that occupy some part of the
     /// requested region
-    pub fn conflicts<'a>(&'a self, requested: Region) -> impl Iterator<Item = &'a Grant> + 'a {
-        let start = self.contains(requested.start_address());
-        let start_region = start.map(Region::from).unwrap_or(requested);
+    pub fn conflicts(&self, span: PageSpan) -> impl Iterator<Item = (Page, &'_ GrantInfo)> + '_ {
+        let start = self.contains(span.base);
+
+        // If there is a grant that contains the base page, start searching at the base of that
+        // grant, rather than the requested base here.
+        let start_span = start.map(|(base, info)| PageSpan::new(base, info.page_count)).unwrap_or(span);
+
         self
             .inner
-            .range(start_region..)
-            .take_while(move |region| !region.intersect(requested).is_empty())
+            .range(start_span.base..)
+            .take_while(move |(base, info)| !PageSpan::new(**base, info.page_count).intersects(span))
+            .map(|(base, info)| (*base, info))
     }
     /// Return a free region with the specified size
     // TODO: Alignment (x86_64: 4 KiB, 2 MiB, or 1 GiB).
-    pub fn find_free(&self, min: usize, size: usize) -> Option<Region> {
+    // TODO: size => page_count
+    pub fn find_free(&self, min: usize, page_count: usize) -> Option<PageSpan> {
         // Get first available hole, but do reserve the page starting from zero as most compiled
         // languages cannot handle null pointers safely even if they point to valid memory. If an
         // application absolutely needs to map the 0th page, they will have to do so explicitly via
         // MAP_FIXED/MAP_FIXED_NOREPLACE.
-        // TODO: Allow explicitly allocating guard pages?
+        // TODO: Allow explicitly allocating guard pages? Perhaps using mprotect or mmap with
+        // PROT_NONE?
 
         let (hole_start, _hole_size) = self.holes.iter()
             .skip_while(|(hole_offset, hole_size)| hole_offset.data() + **hole_size <= min)
@@ -292,31 +371,22 @@ impl UserGrants {
                 } else {
                     **hole_size
                 };
-                size <= avail_size
+                page_count * PAGE_SIZE <= avail_size
             })?;
         // Create new region
-        Some(Region::new(VirtualAddress::new(cmp::max(hole_start.data(), min)), size))
+        Some(PageSpan::new(Page::containing_address(VirtualAddress::new(cmp::max(hole_start.data(), min))), page_count))
     }
     /// Return a free region, respecting the user's hinted address and flags. Address may be null.
-    pub fn find_free_at(&mut self, min: usize, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
-        if address == VirtualAddress::new(0) {
+    pub fn find_free_at(&mut self, min: usize, base: Option<Page>, page_count: usize, flags: MapFlags) -> Result<PageSpan> {
+        let Some(requested_base) = base else {
             // Free hands!
-            return self.find_free(min, size).ok_or(Error::new(ENOMEM));
-        }
+            return self.find_free(min, page_count).ok_or(Error::new(ENOMEM));
+        };
 
         // The user wished to have this region...
-        let mut requested = Region::new(address, size);
-
-        if
-            requested.end_address().data() > crate::USER_END_OFFSET
-            || address.data() % PAGE_SIZE != 0
-        {
-            // ... but it was invalid
-            return Err(Error::new(EINVAL));
-        }
-
+        let requested_span = PageSpan::new(requested_base, page_count);
 
-        if let Some(_grant) = self.conflicts(requested).next() {
+        if let Some(_grant) = self.conflicts(requested_span).next() {
             // ... but it already exists
 
             if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
@@ -326,61 +396,70 @@ impl UserGrants {
                 return Err(Error::new(EOPNOTSUPP));
             } else {
                 // TODO: Find grant close to requested address?
-                requested = self.find_free(min, requested.size()).ok_or(Error::new(ENOMEM))?;
+                return self.find_free(min, page_count).ok_or(Error::new(ENOMEM));
             }
         }
 
-        Ok(requested)
+        Ok(requested_span)
     }
-    fn reserve(&mut self, grant: &Region) {
-        let previous_hole = self.holes.range_mut(..grant.start_address()).next_back();
+    fn reserve(&mut self, base: Page, page_count: usize) {
+        let start_address = base.start_address();
+        let size = page_count * PAGE_SIZE;
+        let end_address = base.start_address().add(size);
+
+        let previous_hole = self.holes.range_mut(..start_address).next_back();
 
         if let Some((hole_offset, hole_size)) = previous_hole {
             let prev_hole_end = hole_offset.data() + *hole_size;
 
-            // Note that prev_hole_end cannot exactly equal grant.start_address, since that would
-            // imply there is another grant at that position already, as it would otherwise have
-            // been larger.
+            // Note that prev_hole_end cannot exactly equal start_address, since that would imply
+            // there is another grant at that position already, as it would otherwise have been
+            // larger.
 
-            if prev_hole_end > grant.start_address().data() {
+            if prev_hole_end > start_address.data() {
                 // hole_offset must be below (but never equal to) the start address due to the
-                // `..grant.start_address()` limit; hence, all we have to do is to shrink the
+                // `..start_address()` limit; hence, all we have to do is to shrink the
                 // previous offset.
-                *hole_size = grant.start_address().data() - hole_offset.data();
+                *hole_size = start_address.data() - hole_offset.data();
             }
-            if prev_hole_end > grant.end_address().data() {
+            if prev_hole_end > end_address.data() {
                 // The grant is splitting this hole in two, so insert the new one at the end.
-                self.holes.insert(grant.end_address(), prev_hole_end - grant.end_address().data());
+                self.holes.insert(end_address, prev_hole_end - end_address.data());
             }
         }
 
         // Next hole
-        if let Some(hole_size) = self.holes.remove(&grant.start_address()) {
-            let remainder = hole_size - grant.size();
+        if let Some(hole_size) = self.holes.remove(&start_address) {
+            let remainder = hole_size - size;
             if remainder > 0 {
-                self.holes.insert(grant.end_address(), remainder);
+                self.holes.insert(end_address, remainder);
             }
         }
     }
-    fn unreserve(holes: &mut BTreeMap<VirtualAddress, usize>, grant: &Region) {
+    fn unreserve(holes: &mut BTreeMap<VirtualAddress, usize>, base: Page, page_count: usize) {
+        // TODO
+        let start_address = base.start_address();
+        let size = page_count * PAGE_SIZE;
+        let end_address = base.start_address().add(size);
+
         // The size of any possible hole directly after the to-be-freed region.
-        let exactly_after_size = holes.remove(&grant.end_address());
+        let exactly_after_size = holes.remove(&end_address);
 
         // There was a range that began exactly prior to the to-be-freed region, so simply
         // increment the size such that it occupies the grant too. If in addition there was a grant
         // directly after the grant, include it too in the size.
-        if let Some((hole_offset, hole_size)) = holes.range_mut(..grant.start_address()).next_back().filter(|(offset, size)| offset.data() + **size == grant.start_address().data()) {
-            *hole_size = grant.end_address().data() - hole_offset.data() + exactly_after_size.unwrap_or(0);
+        if let Some((hole_offset, hole_size)) = holes.range_mut(..start_address).next_back().filter(|(offset, size)| offset.data() + **size == start_address.data()) {
+            *hole_size = end_address.data() - hole_offset.data() + exactly_after_size.unwrap_or(0);
         } else {
             // There was no free region directly before the to-be-freed region, however will
             // now unconditionally insert a new free region where the grant was, and add that extra
             // size if there was something after it.
-            holes.insert(grant.start_address(), grant.size() + exactly_after_size.unwrap_or(0));
+            holes.insert(start_address, size + exactly_after_size.unwrap_or(0));
         }
     }
     pub fn insert(&mut self, grant: Grant) {
-        assert!(self.conflicts(*grant).next().is_none());
-        self.reserve(&grant);
+        assert!(self.conflicts(PageSpan::new(grant.base, grant.info.page_count)).next().is_none());
+        self.reserve(grant.base, grant.info.page_count);
 
         // FIXME: This currently causes issues, mostly caused by old code that unmaps only based on
         // offsets. For instance, the scheme code does not specify any length, and would thus unmap
@@ -388,219 +467,57 @@ impl UserGrants {
 
         /*
         let before_region = self.inner
-            .range(..grant.region).next_back()
-            .filter(|b| b.end_address() == grant.start_address() && b.can_be_merged_if_adjacent(&grant)).map(|g| g.region);
+            .range(..grant.base).next_back()
+            .filter(|(base, info)| base.next_by(info.page_count) == grant.base && info.can_be_merged_if_adjacent(&grant.info)).map(|(base, info)| (*base, info.page_count));
 
         let after_region = self.inner
-            .range(Region::new(grant.end_address(), 1)..).next()
-            .filter(|a| a.start_address() == grant.end_address() && a.can_be_merged_if_adjacent(&grant)).map(|g| g.region);
+            .range(grant.span().end()..).next()
+            .filter(|(base, info)| **base == grant.base.next_by(grant.info.page_count) && info.can_be_merged_if_adjacent(&grant.info)).map(|(base, info)| (*base, info.page_count));
 
-        if let Some(before) = before_region {
-            grant.region.start = before.start;
-            grant.region.size += before.size;
+        if let Some((before_base, before_page_count)) = before_region {
+            grant.base = before_base;
+            grant.info.page_count += before_page_count;
 
-            core::mem::forget(self.inner.take(&before));
+            core::mem::forget(self.inner.remove(&before_base));
         }
-        if let Some(after) = after_region {
-            grant.region.size += after.size;
+        if let Some((after_base, after_page_count)) = after_region {
+            grant.info.page_count += after_page_count;
 
-            core::mem::forget(self.inner.take(&after));
+            core::mem::forget(self.inner.remove(&after_base));
         }
         */
 
-        self.inner.insert(grant);
-    }
-    pub fn remove(&mut self, region: &Region) -> bool {
-        self.take(region).is_some()
+        self.inner.insert(grant.base, grant.info);
     }
-    pub fn take(&mut self, region: &Region) -> Option<Grant> {
-        let grant = self.inner.take(region)?;
-        Self::unreserve(&mut self.holes, grant.region());
-        Some(grant)
+    pub fn remove(&mut self, base: Page) -> Option<Grant> {
+        let info = self.inner.remove(&base)?;
+        Self::unreserve(&mut self.holes, base, info.page_count);
+        Some(Grant { base, info })
     }
-    pub fn iter(&self) -> impl Iterator<Item = &Grant> + '_ {
-        self.inner.iter()
+    pub fn iter(&self) -> impl Iterator<Item = (Page, &GrantInfo)> + '_ {
+        self.inner.iter().map(|(base, info)| (*base, info))
     }
     pub fn is_empty(&self) -> bool { self.inner.is_empty() }
     pub fn into_iter(self) -> impl Iterator<Item = Grant> {
-        self.inner.into_iter()
+        self.inner.into_iter().map(|(base, info)| Grant { base, info })
     }
 }
 
-#[derive(Clone, Copy)]
-pub struct Region {
-    start: VirtualAddress,
-    size: usize,
-}
-impl Region {
-    /// Create a new region with the given size
-    pub fn new(start: VirtualAddress, size: usize) -> Self {
-        Self { start, size }
-    }
-
-    /// Create a new region spanning exactly one byte
-    pub fn byte(address: VirtualAddress) -> Self {
-        Self::new(address, 1)
-    }
-
-    /// Create a new region spanning between the start and end address
-    /// (exclusive end)
-    pub fn between(start: VirtualAddress, end: VirtualAddress) -> Self {
-        Self::new(
-            start,
-            end.data().saturating_sub(start.data()),
-        )
-    }
-
-    /// Return the part of the specified region that intersects with self.
-    pub fn intersect(&self, other: Self) -> Self {
-        Self::between(
-            cmp::max(self.start_address(), other.start_address()),
-            cmp::min(self.end_address(), other.end_address()),
-        )
-    }
-
-    /// Get the start address of the region
-    pub fn start_address(&self) -> VirtualAddress {
-        self.start
-    }
-    /// Set the start address of the region
-    pub fn set_start_address(&mut self, start: VirtualAddress) {
-        self.start = start;
-    }
-
-    /// Get the last address in the region (inclusive end)
-    pub fn final_address(&self) -> VirtualAddress {
-        VirtualAddress::new(self.start.data() + self.size - 1)
-    }
-
-    /// Get the start address of the next region (exclusive end)
-    pub fn end_address(&self) -> VirtualAddress {
-        VirtualAddress::new(self.start.data() + self.size)
-    }
-
-    /// Return the exact size of the region
-    pub fn size(&self) -> usize {
-        self.size
-    }
-
-    /// Return true if the size of this region is zero. Grants with such a
-    /// region should never exist.
-    pub fn is_empty(&self) -> bool {
-        self.size == 0
-    }
-
-    /// Set the exact size of the region
-    pub fn set_size(&mut self, size: usize) {
-        self.size = size;
-    }
-
-    /// Round region up to nearest page size
-    pub fn round(self) -> Self {
-        Self {
-            size: round_up_pages(self.size),
-            ..self
-        }
-    }
-
-    /// Return the size of the grant in multiples of the page size
-    pub fn full_size(&self) -> usize {
-        self.round().size()
-    }
-
-    /// Returns true if the address is within the regions's requested range
-    pub fn collides(&self, other: Self) -> bool {
-        self.start_address() <= other.start_address() && other.end_address().data() - self.start_address().data() < self.size()
-    }
-    /// Returns true if the address is within the regions's actual range (so,
-    /// rounded up to the page size)
-    pub fn occupies(&self, other: Self) -> bool {
-        self.round().collides(other)
-    }
-
-    /// Return all pages containing a chunk of the region
-    pub fn pages(&self) -> PageIter {
-        Page::range_exclusive(
-            Page::containing_address(self.start_address()),
-            Page::containing_address(self.end_address())
-        )
-    }
-
-    /// Returns the region from the start of self until the start of the specified region.
-    ///
-    /// # Panics
-    ///
-    /// Panics if the given region starts before self
-    pub fn before(self, region: Self) -> Option<Self> {
-        assert!(self.start_address() <= region.start_address());
-        Some(Self::between(
-            self.start_address(),
-            region.start_address(),
-        )).filter(|reg| !reg.is_empty())
-    }
-
-    /// Returns the region from the end of the given region until the end of self.
-    ///
-    /// # Panics
-    ///
-    /// Panics if self ends before the given region
-    pub fn after(self, region: Self) -> Option<Self> {
-        assert!(region.end_address() <= self.end_address());
-        Some(Self::between(
-            region.end_address(),
-            self.end_address(),
-        )).filter(|reg| !reg.is_empty())
-    }
-
-    /// Re-base address that lives inside this region, onto a new base region
-    pub fn rebase(self, new_base: Self, address: VirtualAddress) -> VirtualAddress {
-        let offset = address.data() - self.start_address().data();
-        let new_start = new_base.start_address().data() + offset;
-        VirtualAddress::new(new_start)
-    }
-}
-
-impl PartialEq for Region {
-    fn eq(&self, other: &Self) -> bool {
-        self.start.eq(&other.start)
-    }
-}
-impl Eq for Region {}
-
-impl PartialOrd for Region {
-    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        self.start.partial_cmp(&other.start)
-    }
-}
-impl Ord for Region {
-    fn cmp(&self, other: &Self) -> Ordering {
-        self.start.cmp(&other.start)
-    }
-}
-
-impl Debug for Region {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().data(), self.end_address().data(), self.size())
-    }
-}
-
-
-impl<'a> From<&'a Grant> for Region {
-    fn from(source: &'a Grant) -> Self {
-        source.region
-    }
-}
-
-
 #[derive(Debug)]
-pub struct Grant {
-    region: Region,
+pub struct GrantInfo {
+    page_count: usize,
     flags: PageFlags<RmmA>,
     mapped: bool,
     pub(crate) owned: bool,
     //TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
     pub desc_opt: Option<GrantFileRef>,
 }
+#[derive(Debug)]
+pub struct Grant {
+    pub(crate) base: Page,
+    pub(crate) info: GrantInfo,
+}
+
 #[derive(Clone, Debug)]
 pub struct GrantFileRef {
     pub desc: FileDescriptor,
@@ -611,19 +528,7 @@ pub struct GrantFileRef {
 }
 
 impl Grant {
-    pub fn is_owned(&self) -> bool {
-        self.owned
-    }
-
-    pub fn region(&self) -> &Region {
-        &self.region
-    }
-
-    /// Get a mutable reference to the region. This is unsafe, because a bad
-    /// region could lead to the wrong addresses being unmapped.
-    unsafe fn region_mut(&mut self) -> &mut Region {
-        &mut self.region
-    }
+    // TODO: PageCount newtype, to avoid confusion between bytes and pages?
 
     pub fn physmap(phys: Frame, dst: Page, page_count: usize, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant> {
         for index in 0..page_count {
@@ -636,14 +541,14 @@ impl Grant {
         }
 
         Ok(Grant {
-            region: Region {
-                start: dst.start_address(),
-                size: page_count * PAGE_SIZE,
+            base: dst,
+            info: GrantInfo {
+                page_count,
+                flags,
+                mapped: true,
+                owned: false,
+                desc_opt: None,
             },
-            flags,
-            mapped: true,
-            owned: false,
-            desc_opt: None,
         })
     }
     pub fn zeroed(dst: Page, page_count: usize, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
@@ -652,19 +557,19 @@ impl Grant {
             let flush = unsafe { mapper.map(page.start_address(), flags) }.ok_or(Enomem)?;
             flusher.consume(flush);
         }
-        Ok(Grant { region: Region { start: dst.start_address(), size: page_count * PAGE_SIZE }, flags, mapped: true, owned: true, desc_opt: None })
+        Ok(Grant { base: dst, info: GrantInfo { page_count, flags, mapped: true, owned: true, desc_opt: None } })
     }
     pub fn borrow(src_base: Page, dst_base: Page, page_count: usize, flags: PageFlags<RmmA>, desc_opt: Option<GrantFileRef>, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
         Self::copy_inner(src_base, dst_base, page_count, flags, desc_opt, src_mapper, dst_mapper, (), dst_flusher, false, false)
     }
-    pub fn reborrow(src_grant: &Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
-        Self::borrow(Page::containing_address(src_grant.start_address()), dst_base, src_grant.size() / PAGE_SIZE, src_grant.flags(), src_grant.desc_opt.clone(), src_mapper, dst_mapper, dst_flusher).map_err(Into::into)
+    pub fn reborrow(src_base: Page, src_info: &GrantInfo, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
+        Self::borrow(src_base, dst_base, src_info.page_count, src_info.flags, src_info.desc_opt.clone(), src_mapper, dst_mapper, dst_flusher).map_err(Into::into)
     }
     pub fn transfer(mut src_grant: Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, src_flusher: impl Flusher<RmmA>, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
-        assert!(core::mem::replace(&mut src_grant.mapped, false));
-        let desc_opt = src_grant.desc_opt.take();
+        assert!(core::mem::replace(&mut src_grant.info.mapped, false));
+        let desc_opt = src_grant.info.desc_opt.take();
 
-        Self::copy_inner(Page::containing_address(src_grant.start_address()), dst_base, src_grant.size() / PAGE_SIZE, src_grant.flags(), desc_opt, src_mapper, dst_mapper, src_flusher, dst_flusher, src_grant.owned, true).map_err(Into::into)
+        Self::copy_inner(src_grant.base, dst_base, src_grant.info.page_count, src_grant.info.flags(), desc_opt, src_mapper, dst_mapper, src_flusher, dst_flusher, src_grant.info.owned, true).map_err(Into::into)
     }
 
     fn copy_inner(
@@ -721,45 +626,39 @@ impl Grant {
         }
 
         Ok(Grant {
-            region: Region {
-                start: dst_base.start_address(),
-                size: page_count * PAGE_SIZE,
+            base: dst_base,
+            info: GrantInfo {
+                page_count,
+                flags,
+                mapped: true,
+                owned,
+                desc_opt,
             },
-            flags,
-            mapped: true,
-            owned,
-            desc_opt,
         })
     }
 
-    pub fn flags(&self) -> PageFlags<RmmA> {
-        self.flags
-    }
-
     pub fn remap(&mut self, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>, flags: PageFlags<RmmA>) {
-        assert!(self.mapped);
+        assert!(self.info.mapped);
 
-        for page in self.pages() {
+        for page in self.span().pages() {
+            // TODO: PageMapper is unsafe because it can be used to modify kernel memory. Add a
+            // subset/wrapper that is safe but only for user mappings.
             unsafe {
                 let result = mapper.remap(page.start_address(), flags).expect("grant contained unmap address");
                 flusher.consume(result);
             }
         }
 
-        self.flags = flags;
+        self.info.flags = flags;
     }
-    pub fn can_have_flags(&self, flags: MapFlags) -> bool {
-        self.owned || ((self.flags.has_write() || !flags.contains(MapFlags::PROT_WRITE)) && (self.flags.has_execute() || !flags.contains(MapFlags::PROT_EXEC)))
-    }
-
     pub fn unmap(mut self, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> UnmapResult {
-        assert!(self.mapped);
+        assert!(self.info.mapped);
 
-        for page in self.pages() {
+        for page in self.span().pages() {
             let (entry, _, flush) = unsafe { mapper.unmap_phys(page.start_address(), true) }
                 .unwrap_or_else(|| panic!("missing page at {:#0x} for grant {:?}", page.start_address().data(), self));
 
-            if self.owned {
+            if self.info.owned {
                 // TODO: make sure this frame can be safely freed, physical use counter.
                 //
                 // Namely, we can either have MAP_PRIVATE or MAP_SHARED-style mappings. The former
@@ -779,10 +678,10 @@ impl Grant {
             flusher.consume(flush);
         }
 
-        self.mapped = false;
+        self.info.mapped = false;
 
         // TODO: This imposes a large cost on unmapping, but that cost cannot be avoided without modifying fmap and funmap
-        UnmapResult { file_desc: self.desc_opt.take() }
+        UnmapResult { file_desc: self.info.desc_opt.take() }
     }
 
     /// Extract out a region into a separate grant. The return value is as
@@ -798,31 +697,54 @@ impl Grant {
     ///
     /// Also panics if the given region isn't completely contained within the
     /// grant. Use `grant.intersect` to find a sub-region that works.
-    pub fn extract(mut self, region: Region) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
-        assert_eq!(region.start_address().data() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
-        assert_eq!(region.size() % PAGE_SIZE, 0, "split_out must be called on page-size aligned end address");
-
-        let before_grant = self.before(region).map(|region| Grant {
-            region,
-            flags: self.flags,
-            mapped: self.mapped,
-            owned: self.owned,
-            desc_opt: self.desc_opt.clone(),
+    pub fn span(&self) -> PageSpan {
+        PageSpan::new(self.base, self.info.page_count)
+    }
+    pub fn extract(mut self, span: PageSpan) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
+        let (before_span, this_span, after_span) = self.span().slice(span);
+
+        let before_grant = before_span.map(|span| Grant {
+            base: span.base,
+            info: GrantInfo {
+                flags: self.info.flags,
+                mapped: self.info.mapped,
+                owned: self.info.owned,
+                desc_opt: self.info.desc_opt.clone(),
+                page_count: span.count,
+            },
         });
-        let after_grant = self.after(region).map(|region| Grant {
-            region,
-            flags: self.flags,
-            mapped: self.mapped,
-            owned: self.owned,
-            desc_opt: self.desc_opt.clone(),
+        let after_grant = after_span.map(|span| Grant {
+            base: span.base,
+            info: GrantInfo {
+                flags: self.info.flags,
+                mapped: self.info.mapped,
+                owned: self.info.owned,
+                desc_opt: self.info.desc_opt.clone(),
+                page_count: span.count,
+            },
         });
-
-        unsafe {
-            *self.region_mut() = region;
-        }
+        self.base = this_span.base;
+        self.info.page_count = this_span.count;
 
         Some((before_grant, self, after_grant))
     }
+    pub fn rebase(mut self) {
+    }
+}
+impl GrantInfo {
+    pub fn flags(&self) -> PageFlags<RmmA> {
+        self.flags
+    }
+    pub fn is_owned(&self) -> bool {
+        self.owned
+    }
+    pub fn page_count(&self) -> usize {
+        self.page_count
+    }
+    pub fn can_have_flags(&self, flags: MapFlags) -> bool {
+        self.owned || ((self.flags.has_write() && !flags.contains(MapFlags::PROT_WRITE)) && (self.flags.has_execute() && !flags.contains(MapFlags::PROT_EXEC)))
+    }
+
     pub fn can_be_merged_if_adjacent(&self, with: &Self) -> bool {
         match (&self.desc_opt, &with.desc_opt) {
             (None, None) => (),
@@ -834,38 +756,9 @@ impl Grant {
     }
 }
 
-impl Deref for Grant {
-    type Target = Region;
-    fn deref(&self) -> &Self::Target {
-        &self.region
-    }
-}
-
-impl PartialOrd for Grant {
-    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-        self.region.partial_cmp(&other.region)
-    }
-}
-impl Ord for Grant {
-    fn cmp(&self, other: &Self) -> Ordering {
-        self.region.cmp(&other.region)
-    }
-}
-impl PartialEq for Grant {
-    fn eq(&self, other: &Self) -> bool {
-        self.region.eq(&other.region)
-    }
-}
-impl Eq for Grant {}
-
-impl Borrow<Region> for Grant {
-    fn borrow(&self) -> &Region {
-        &self.region
-    }
-}
-
-impl Drop for Grant {
+impl Drop for GrantInfo {
     fn drop(&mut self) {
+        // XXX: This will not show the address...
         assert!(!self.mapped, "Grant dropped while still mapped: {:#x?}", self);
     }
 }
diff --git a/src/debugger.rs b/src/debugger.rs
index 4a97f7f5604956b0aeefbd5ee20bed3c39f8c10a..d96bdf43935647d8a63dddaf6da5df1db2a3ef49 100644
--- a/src/debugger.rs
+++ b/src/debugger.rs
@@ -1,4 +1,4 @@
-use crate::paging::{RmmA, RmmArch, TableKind};
+use crate::paging::{RmmA, RmmArch, TableKind, PAGE_SIZE};
 
 //TODO: combine arches into one function (aarch64 one is newest)
 
@@ -182,12 +182,12 @@ pub unsafe fn debugger(target_id: Option<crate::context::ContextId>) {
             let addr_space = addr_space.read();
             if ! addr_space.grants.is_empty() {
                 println!("grants:");
-                for grant in addr_space.grants.iter() {
-                    let region = grant.region();
+                for (base, info) in addr_space.grants.iter() {
+                    let size = info.page_count() * PAGE_SIZE;
                     println!(
                         "    virt 0x{:016x}:0x{:016x} size 0x{:08x} {}",
-                        region.start_address().data(), region.final_address().data(), region.size(),
-                        if grant.is_owned() { "owned" } else { "borrowed" },
+                        base.start_address().data(), base.start_address().data() + size - 1, size,
+                        if info.is_owned() { "owned" } else { "borrowed" },
                     );
                 }
             }
@@ -228,6 +228,7 @@ pub unsafe fn debugger(target_id: Option<crate::context::ContextId>) {
 
 #[cfg(target_arch = "x86_64")]
 pub unsafe fn check_consistency(addr_space: &mut crate::context::memory::AddrSpace) {
+    use crate::context::memory::PageSpan;
     use crate::paging::*;
 
     let p4 = addr_space.table.utable.table();
@@ -261,7 +262,7 @@ pub unsafe fn check_consistency(addr_space: &mut crate::context::memory::AddrSpa
                     };
                     let address = VirtualAddress::new((p1i << 12) | (p2i << 21) | (p3i << 30) | (p4i << 39));
 
-                    let grant = match addr_space.grants.contains(address) {
+                    let (base, grant) = match addr_space.grants.contains(Page::containing_address(address)) {
                         Some(g) => g,
                         None => {
                             log::error!("ADDRESS {:p} LACKING GRANT BUT MAPPED TO {:#0x} FLAGS {:?}!", address.data() as *const u8, physaddr.data(), flags);
@@ -270,19 +271,20 @@ pub unsafe fn check_consistency(addr_space: &mut crate::context::memory::AddrSpa
                     };
                     const STICKY: usize = (1 << 5) | (1 << 6); // accessed+dirty
                     if grant.flags().data() & !STICKY != flags.data() & !STICKY {
-                        log::error!("FLAG MISMATCH: {:?} != {:?}, address {:p} in grant at {:?}", grant.flags(), flags, address.data() as *const u8, grant.region());
+                        log::error!("FLAG MISMATCH: {:?} != {:?}, address {:p} in grant at {:?}", grant.flags(), flags, address.data() as *const u8, PageSpan::new(base, grant.page_count()));
                     }
                 }
             }
         }
     }
 
-    for grant in addr_space.grants.iter() {
-        for page in grant.pages() {
+    for (base, info) in addr_space.grants.iter() {
+        let span = PageSpan::new(base, info.page_count());
+        for page in span.pages() {
             let _entry = match addr_space.table.utable.translate(page.start_address()) {
                 Some(e) => e,
                 None => {
-                    log::error!("GRANT AT {:?} LACKING MAPPING AT PAGE {:p}", grant.region(), page.start_address().data() as *const u8);
+                    log::error!("GRANT AT {:?} LACKING MAPPING AT PAGE {:p}", span, page.start_address().data() as *const u8);
                     continue;
                 }
             };
diff --git a/src/lib.rs b/src/lib.rs
index 3be0afc1132c271abe0a65e8dc2b09f9eed088e3..94be64242ff23d7a9372b993c081e9a3cb3b1aca 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -47,6 +47,8 @@
 #![feature(array_chunks)]
 #![feature(iter_array_chunks)]
 #![feature(asm_const)] // TODO: Relax requirements of most asm invocations
+#![feature(const_option)]
+#![feature(arbitrary_self_types)]
 #![feature(int_roundings)]
 #![feature(naked_functions)]
 #![feature(slice_ptr_get, slice_ptr_len)]
diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs
index 211af45b6999aa8497ec78d15e9f979dfafa6667..56caf972691f00f4b7b8dbbc3aa69ebc82711608 100644
--- a/src/scheme/memory.rs
+++ b/src/scheme/memory.rs
@@ -1,3 +1,5 @@
+use core::num::NonZeroUsize;
+
 use alloc::sync::Arc;
 use rmm::PhysicalAddress;
 use spin::RwLock;
@@ -54,11 +56,12 @@ impl MemoryScheme {
 
     pub fn fmap_anonymous(addr_space: &Arc<RwLock<AddrSpace>>, map: &Map) -> Result<usize> {
         let (requested_page, page_count) = crate::syscall::usercopy::validate_region(map.address, map.size)?;
+        let page_count = NonZeroUsize::new(page_count).ok_or(Error::new(EINVAL))?;
 
         let page = addr_space
             .write()
             .mmap((map.address != 0).then_some(requested_page), page_count, map.flags, |page, flags, mapper, flusher| {
-                Ok(Grant::zeroed(page, page_count, flags, mapper, flusher)?)
+                Ok(Grant::zeroed(page, page_count.get(), flags, mapper, flusher)?)
             })?;
 
         Ok(page.start_address().data())
@@ -76,7 +79,7 @@ impl MemoryScheme {
             log::warn!("physmap size {} is not multiple of PAGE_SIZE {}", size, PAGE_SIZE);
             return Err(Error::new(EINVAL));
         }
-        let page_count = size.div_ceil(PAGE_SIZE);
+        let page_count = NonZeroUsize::new(size.div_ceil(PAGE_SIZE)).ok_or(Error::new(EINVAL))?;
 
         AddrSpace::current()?.write().mmap(None, page_count, flags, |dst_page, mut page_flags, dst_mapper, dst_flusher| {
             match memory_type {
@@ -96,7 +99,7 @@ impl MemoryScheme {
             Grant::physmap(
                 Frame::containing_address(PhysicalAddress::new(physical_address)),
                 dst_page,
-                page_count,
+                page_count.get(),
                 page_flags,
                 dst_mapper,
                 dst_flusher,
diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs
index e57894df20bd79a69cfb71cb52853367bc3ed000..1d8fc9f944b46ce8f5e791dba7c07714f49aef78 100644
--- a/src/scheme/proc.rs
+++ b/src/scheme/proc.rs
@@ -1,6 +1,6 @@
 use crate::{
     arch::paging::{mapper::InactiveFlusher, Page, RmmA, RmmArch, VirtualAddress},
-    context::{self, Context, ContextId, Status, file::{FileDescription, FileDescriptor}, memory::{AddrSpace, Grant, new_addrspace, map_flags, Region}, BorrowedHtBuf},
+    context::{self, Context, ContextId, Status, file::{FileDescription, FileDescriptor}, memory::{AddrSpace, Grant, new_addrspace, map_flags, PageSpan}, BorrowedHtBuf},
     memory::PAGE_SIZE,
     ptrace,
     scheme::{self, FileHandle, KernelScheme, SchemeId},
@@ -28,7 +28,7 @@ use core::{
     mem,
     slice,
     str,
-    sync::atomic::{AtomicUsize, Ordering},
+    sync::atomic::{AtomicUsize, Ordering}, num::NonZeroUsize,
 };
 use spin::{Once, RwLock};
 
@@ -751,35 +751,34 @@ impl KernelScheme for ProcScheme {
                 let src_addr_space = &mut *src_addr_space;
                 let mut dst_addr_space = dst_addr_space.write();
 
-                let src_grant_region = {
-                    let src_region = Region::new(src_page.start_address(), page_count * PAGE_SIZE);
-                    let mut conflicts = src_addr_space.grants.conflicts(src_region);
-                    let first = conflicts.next().ok_or(Error::new(EINVAL))?;
+                let src_grant_span = {
+                    let src_span = PageSpan::new(src_page, page_count);
+                    let mut conflicts = src_addr_space.grants.conflicts(src_span);
+                    let (first_base, first_info) = conflicts.next().ok_or(Error::new(EINVAL))?;
                     if conflicts.next().is_some() {
                         return Err(Error::new(EINVAL));
                     }
 
-                    if !first.can_have_flags(map.flags) {
+                    if !first_info.can_have_flags(map.flags) {
                         return Err(Error::new(EACCES));
                     }
 
-                    first.region().intersect(src_region)
+                    PageSpan::new(first_base, first_info.page_count()).intersection(src_span)
                 };
 
-                let grant_page_count = src_grant_region.size() / PAGE_SIZE;
-
                 let src_mapper = &mut src_addr_space.table.utable;
+                let src_page_count = NonZeroUsize::new(src_grant_span.count).ok_or(Error::new(EINVAL))?;
 
                 let result_page = if consume {
-                    let grant = src_addr_space.grants.take(&src_grant_region).expect("grant cannot disappear");
-                    let (before, middle, after) = grant.extract(src_grant_region).expect("called intersect(), must succeed");
+                    let grant = src_addr_space.grants.remove(src_grant_span.base).expect("grant cannot disappear");
+                    let (before, middle, after) = grant.extract(src_grant_span).expect("called intersect(), must succeed");
 
                     if let Some(before) = before { src_addr_space.grants.insert(before); }
                     if let Some(after) = after { src_addr_space.grants.insert(after); }
 
-                    dst_addr_space.mmap(requested_dst_page, grant_page_count, map.flags, |dst_page, _flags, dst_mapper, dst_flusher| Grant::transfer(middle, dst_page, src_mapper, dst_mapper, InactiveFlusher::new(), dst_flusher))?
+                    dst_addr_space.mmap(requested_dst_page, src_page_count, map.flags, |dst_page, _flags, dst_mapper, dst_flusher| Grant::transfer(middle, dst_page, src_mapper, dst_mapper, InactiveFlusher::new(), dst_flusher))?
                 } else {
-                    dst_addr_space.mmap(requested_dst_page, grant_page_count, map.flags, |dst_page, flags, dst_mapper, flusher| Ok(Grant::borrow(Page::containing_address(src_grant_region.start_address()), dst_page, grant_page_count, flags, None, src_mapper, dst_mapper, flusher)?))?
+                    dst_addr_space.mmap(requested_dst_page, src_page_count, map.flags, |dst_page, flags, dst_mapper, flusher| Ok(Grant::borrow(src_grant_span.base, dst_page, src_grant_span.count, flags, None, src_mapper, dst_mapper, flusher)?))?
                 };
 
                 Ok(result_page.start_address().data())
@@ -843,11 +842,11 @@ impl KernelScheme for ProcScheme {
                 let addrspace = addrspace.read();
                 let mut bytes_read = 0;
 
-                for ([r1, r2, r3, r4], grant) in records.zip(addrspace.grants.iter()).skip(*offset / RECORD_SIZE) {
-                    r1.write_usize(grant.start_address().data())?;
-                    r2.write_usize(grant.size())?;
-                    r3.write_usize(map_flags(grant.flags()).bits() | if grant.desc_opt.is_some() { 0x8000_0000 } else { 0 })?;
-                    r4.write_usize(grant.desc_opt.as_ref().map_or(0, |d| d.offset))?;
+                for ([r1, r2, r3, r4], (base, info)) in records.zip(addrspace.grants.iter()).skip(*offset / RECORD_SIZE) {
+                    r1.write_usize(base.start_address().data())?;
+                    r2.write_usize(info.page_count() * PAGE_SIZE)?;
+                    r3.write_usize(map_flags(info.flags()).bits() | if info.desc_opt.is_some() { 0x8000_0000 } else { 0 })?;
+                    r4.write_usize(info.desc_opt.as_ref().map_or(0, |d| d.offset))?;
                     bytes_read += RECORD_SIZE;
                 }
 
@@ -1030,13 +1029,13 @@ impl KernelScheme for ProcScheme {
                     ADDRSPACE_OP_MUNMAP => {
                         let (page, page_count) = crate::syscall::validate_region(next()??, next()??)?;
 
-                        AddrSpace::munmap(addrspace.write(), page, page_count);
+                        addrspace.write().munmap(PageSpan::new(page, page_count));
                     }
                     ADDRSPACE_OP_MPROTECT => {
                         let (page, page_count) = crate::syscall::validate_region(next()??, next()??)?;
                         let flags = MapFlags::from_bits(next()??).ok_or(Error::new(EINVAL))?;
 
-                        addrspace.write().mprotect(page, page_count, flags)?;
+                        addrspace.write().mprotect(PageSpan::new(page, page_count), flags)?;
                     }
                     _ => return Err(Error::new(EINVAL)),
                 }
@@ -1296,7 +1295,7 @@ impl KernelScheme for ProcScheme {
                     grant_handle if grant_handle.starts_with(b"grant-") => {
                         let start_addr = usize::from_str_radix(core::str::from_utf8(&grant_handle[6..]).map_err(|_| Error::new(EINVAL))?, 16).map_err(|_| Error::new(EINVAL))?;
                         (Operation::GrantHandle {
-                            description: Arc::clone(&addrspace.read().grants.contains(VirtualAddress::new(start_addr)).ok_or(Error::new(EINVAL))?.desc_opt.as_ref().ok_or(Error::new(EINVAL))?.desc.description)
+                            description: Arc::clone(&addrspace.read().grants.contains(Page::containing_address(VirtualAddress::new(start_addr))).ok_or(Error::new(EINVAL))?.1.desc_opt.as_ref().ok_or(Error::new(EINVAL))?.desc.description)
                         }, false)
                     }
 
diff --git a/src/scheme/sys/context.rs b/src/scheme/sys/context.rs
index cb3d92796341cc06528ab04d5e05509ee8af70dc..9909f7a8788a19841410b1f1b7348c90dae0da26 100644
--- a/src/scheme/sys/context.rs
+++ b/src/scheme/sys/context.rs
@@ -2,6 +2,7 @@ use alloc::string::String;
 use alloc::vec::Vec;
 
 use crate::context;
+use crate::paging::PAGE_SIZE;
 use crate::syscall::error::Result;
 
 pub fn resource() -> Result<Vec<u8>> {
@@ -84,9 +85,9 @@ pub fn resource() -> Result<Vec<u8>> {
                 memory += kstack.len();
             }
             if let Ok(addr_space) = context.addr_space() {
-                for grant in addr_space.read().grants.iter() {
-                    if grant.is_owned() {
-                        memory += grant.size();
+                for (base, info) in addr_space.read().grants.iter() {
+                    if info.is_owned() {
+                        memory += info.page_count() * PAGE_SIZE;
                     }
                 }
             }
diff --git a/src/scheme/user.rs b/src/scheme/user.rs
index 7b0725228f80adee2032da8c53b45b18cc4cad8c..eef1839cc83d890d76d83919eac39a2c56129cbd 100644
--- a/src/scheme/user.rs
+++ b/src/scheme/user.rs
@@ -2,6 +2,7 @@ use alloc::sync::{Arc, Weak};
 use alloc::boxed::Box;
 use alloc::collections::BTreeMap;
 use syscall::{SKMSG_FRETURNFD, CallerCtx};
+use core::num::NonZeroUsize;
 use core::sync::atomic::{AtomicBool, Ordering};
 use core::{mem, usize};
 use core::convert::TryFrom;
@@ -9,7 +10,7 @@ use spin::{Mutex, RwLock};
 
 use crate::context::{self, Context, BorrowedHtBuf};
 use crate::context::file::{FileDescriptor, FileDescription};
-use crate::context::memory::{AddrSpace, DANGLING, Grant, Region, GrantFileRef};
+use crate::context::memory::{AddrSpace, DANGLING, Grant, GrantFileRef, PageSpan};
 use crate::event;
 use crate::paging::KernelMapper;
 use crate::paging::{PAGE_SIZE, Page, VirtualAddress};
@@ -42,6 +43,8 @@ pub enum Response {
     Fd(Arc<RwLock<FileDescription>>),
 }
 
+const ONE: NonZeroUsize = NonZeroUsize::new(1).unwrap();
+
 impl UserInner {
     pub fn new(root_id: SchemeId, handle_id: usize, name: Box<str>, flags: usize, context: Weak<RwLock<Context>>) -> UserInner {
         UserInner {
@@ -139,7 +142,7 @@ impl UserInner {
 
         let src_page = Page::containing_address(VirtualAddress::new(tail.buf_mut().as_ptr() as usize));
 
-        let dst_page = dst_addr_space.write().mmap(None, 1, PROT_READ, |dst_page, flags, mapper, flusher| Ok(Grant::borrow(src_page, dst_page, 1, flags, None, &mut KernelMapper::lock(), mapper, flusher)?))?;
+        let dst_page = dst_addr_space.write().mmap(None, ONE, PROT_READ, |dst_page, flags, mapper, flusher| Ok(Grant::borrow(src_page, dst_page, 1, flags, None, &mut KernelMapper::lock(), mapper, flusher)?))?;
 
         Ok(CaptureGuard {
             destroyed: false,
@@ -209,9 +212,7 @@ impl UserInner {
 
         let mut dst_space = dst_space_lock.write();
 
-        let free_region = dst_space.grants.find_free(dst_space.mmap_min, page_count * PAGE_SIZE).ok_or(Error::new(ENOMEM))?;
-
-        let first_dst_page = Page::containing_address(free_region.start_address());
+        let free_span = dst_space.grants.find_free(dst_space.mmap_min, page_count).ok_or(Error::new(ENOMEM))?;
 
         let head = if !head_part_of_buf.is_empty() {
             // FIXME: Signal context can probably recursively use head/tail.
@@ -235,7 +236,7 @@ impl UserInner {
             }
             let head_buf_page = Page::containing_address(VirtualAddress::new(array.buf_mut().as_mut_ptr() as usize));
 
-            dst_space.mmap(Some(first_dst_page), 1, map_flags, move |dst_page, page_flags, mapper, flusher| {
+            dst_space.mmap(Some(free_span.base), ONE, map_flags, move |dst_page, page_flags, mapper, flusher| {
                 Ok(Grant::borrow(head_buf_page, dst_page, 1, page_flags, None, &mut KernelMapper::lock(), mapper, flusher)?)
             })?;
 
@@ -251,17 +252,17 @@ impl UserInner {
                 dst: None,
             }
         };
-        let (first_middle_dst_page, first_middle_src_page) = if !head_part_of_buf.is_empty() { (first_dst_page.next(), src_page.next()) } else { (first_dst_page, src_page) };
+        let (first_middle_dst_page, first_middle_src_page) = if !head_part_of_buf.is_empty() { (free_span.base.next(), src_page.next()) } else { (free_span.base, src_page) };
 
         let middle_page_count = middle_tail_part_of_buf.len() / PAGE_SIZE;
         let tail_size = middle_tail_part_of_buf.len() % PAGE_SIZE;
 
         let (_middle_part_of_buf, tail_part_of_buf) = middle_tail_part_of_buf.split_at(middle_page_count * PAGE_SIZE).expect("split must succeed");
 
-        if middle_page_count > 0 {
+        if let Some(middle_page_count) = NonZeroUsize::new(middle_page_count) {
             dst_space.mmap(Some(first_middle_dst_page), middle_page_count, map_flags, move |dst_page, page_flags, mapper, flusher| {
                 let mut cur_space = cur_space_lock.write();
-                Ok(Grant::borrow(first_middle_src_page, dst_page, middle_page_count, page_flags, None, &mut cur_space.table.utable, mapper, flusher)?)
+                Ok(Grant::borrow(first_middle_src_page, dst_page, middle_page_count.get(), page_flags, None, &mut cur_space.table.utable, mapper, flusher)?)
             })?;
         }
 
@@ -287,7 +288,7 @@ impl UserInner {
                 }
             }
 
-            dst_space.mmap(Some(tail_dst_page), 1, map_flags, move |dst_page, page_flags, mapper, flusher| {
+            dst_space.mmap(Some(tail_dst_page), ONE, map_flags, move |dst_page, page_flags, mapper, flusher| {
                 Ok(Grant::borrow(tail_buf_page, dst_page, 1, page_flags, None, &mut KernelMapper::lock(), mapper, flusher)?)
             })?;
 
@@ -306,7 +307,7 @@ impl UserInner {
 
         Ok(CaptureGuard {
             destroyed: false,
-            base: free_region.start_address().data() + offset,
+            base: free_span.base.start_address().data() + offset,
             len: user_buf.len(),
             space: Some(dst_space_lock),
             head,
@@ -389,14 +390,15 @@ impl UserInner {
 
                         // TODO: ensure all mappings are aligned!
                         let page_count = map.size.div_ceil(PAGE_SIZE);
+                        let nz_page_count = NonZeroUsize::new(page_count).ok_or(Error::new(EINVAL));
 
-                        let res = addr_space.mmap(dst_page, page_count, map.flags, move |dst_page, flags, mapper, flusher| {
-                            Ok(Grant::borrow(src_page, dst_page, page_count, flags, Some(file_ref), &mut AddrSpace::current()?.write().table.utable, mapper, flusher)?)
-                        });
+                        let res = nz_page_count.and_then(|page_count| addr_space.mmap(dst_page, page_count, map.flags, move |dst_page, flags, mapper, flusher| {
+                            Ok(Grant::borrow(src_page, dst_page, page_count.get(), flags, Some(file_ref), &mut AddrSpace::current()?.write().table.utable, mapper, flusher)?)
+                        }));
                         retcode = Error::mux(res.map(|grant_start_page| {
                             addr_space.grants.funmap.insert(
-                                Region::new(grant_start_page.start_address(), page_count * PAGE_SIZE),
-                                VirtualAddress::new(address),
+                                grant_start_page,
+                                (page_count, src_page),
                             );
                             grant_start_page.start_address().data()
                         }));
@@ -529,7 +531,7 @@ impl<const READ: bool, const WRITE: bool> CaptureGuard<READ, WRITE> {
 
         let (first_page, page_count, _offset) = page_range_containing(self.base, self.len);
 
-        AddrSpace::munmap(space.write(), first_page, page_count);
+        space.write().munmap(PageSpan::new(first_page, page_count));
 
         result
     }
@@ -632,40 +634,40 @@ impl Scheme for UserScheme {
     }
 
     fn funmap(&self, grant_address: usize, size: usize) -> Result<usize> {
+        let requested_span = PageSpan::validate_nonempty(VirtualAddress::new(grant_address), size).ok_or(Error::new(EINVAL))?;
+
         let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
+
         let address_opt = {
-            let contexts = context::contexts();
-            let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
+            let context_lock = context::current()?;
             let context = context_lock.read();
+
             let mut addr_space = context.addr_space()?.write();
             let funmap = &mut addr_space.grants.funmap;
-            let entry = funmap.range(..=Region::byte(VirtualAddress::new(grant_address))).next_back();
+            let entry = funmap.range(..=Page::containing_address(VirtualAddress::new(grant_address))).next_back();
 
-            let grant_address = VirtualAddress::new(grant_address);
-
-            if let Some((&grant, &user_base)) = entry {
-                let grant_requested = Region::new(grant_address, size);
-                if grant_requested.end_address() > grant.end_address() {
+            if let Some((&grant_page, &(page_count, user_page))) = entry {
+                if requested_span.base.next_by(requested_span.count) > grant_page.next_by(page_count) {
                     return Err(Error::new(EINVAL));
                 }
 
-                funmap.remove(&grant);
+                funmap.remove(&grant_page);
 
-                let user = Region::new(user_base, grant.size());
+                let grant_span = PageSpan::new(grant_page, page_count);
+                let user_span = PageSpan::new(user_page, page_count);
 
-                if let Some(before) = grant.before(grant_requested) {
-                    funmap.insert(before, user_base);
+                if let Some(before) = grant_span.before(requested_span) {
+                    funmap.insert(before.base, (before.count, user_page));
                 }
-                if let Some(after) = grant.after(grant_requested) {
-                    let start = grant.rebase(user, after.start_address());
-                    funmap.insert(after, start);
+                if let Some(after) = grant_span.after(requested_span) {
+                    let start = grant_span.rebase(user_span, after.base);
+                    funmap.insert(after.base, (after.count, start));
                 }
 
-                Some(grant.rebase(user, grant_address).data())
+                Some(grant_span.rebase(user_span,grant_span.base).start_address().data())
             } else {
                 None
             }
-
         };
         if let Some(user_address) = address_opt {
             inner.call(SYS_FUNMAP, user_address, size, 0)
diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs
index cdc17b52123391ca4f7b41a72bb85515bc9aa8ee..c5f6e663c40be4332cb4401e587b3c1481048b68 100644
--- a/src/syscall/driver.rs
+++ b/src/syscall/driver.rs
@@ -1,3 +1,5 @@
+use core::num::NonZeroUsize;
+
 use crate::interrupt::InterruptStack;
 use crate::memory::{allocate_frames_complex, deallocate_frames, Frame, PAGE_SIZE};
 use crate::paging::{PhysicalAddress, VirtualAddress};
diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs
index 2f6e185fb69e42ca45410d94f715afd115b3e5ba..29cb1280262e213c7e3fd6b5ce91d2cfe723d046 100644
--- a/src/syscall/fs.rs
+++ b/src/syscall/fs.rs
@@ -3,9 +3,9 @@ use alloc::sync::Arc;
 use spin::RwLock;
 
 use crate::context::file::{FileDescriptor, FileDescription};
-use crate::context::memory::AddrSpace;
 use crate::context;
-use crate::memory::PAGE_SIZE;
+use crate::context::memory::PageSpan;
+use crate::paging::{PAGE_SIZE, VirtualAddress};
 use crate::scheme::{self, FileHandle, OpenResult, current_caller_ctx, KernelScheme, SchemeId};
 use crate::syscall::data::Stat;
 use crate::syscall::error::*;
@@ -392,10 +392,9 @@ pub fn funmap(virtual_address: usize, length: usize) -> Result<usize> {
         log::warn!("funmap passed length {:#x} instead of {:#x}", length, length_aligned);
     }
 
-    let (page, page_count) = crate::syscall::validate_region(virtual_address, length_aligned)?;
-
     let addr_space = Arc::clone(context::current()?.read().addr_space()?);
-    AddrSpace::munmap(addr_space.write(), page, page_count);
+    let span = PageSpan::validate_nonempty(VirtualAddress::new(virtual_address), length_aligned).ok_or(Error::new(EINVAL))?;
+    addr_space.write().munmap(span);
 
     Ok(0)
 }
diff --git a/src/syscall/process.rs b/src/syscall/process.rs
index 1625b9e1735b9c51ce66896d75b4c74db090c2ae..02dd36c4f7c70eaf59912f2993f3a8460bd46818 100644
--- a/src/syscall/process.rs
+++ b/src/syscall/process.rs
@@ -6,6 +6,7 @@ use core::mem;
 
 use spin::{RwLock, RwLockWriteGuard};
 
+use crate::context::memory::PageSpan;
 use crate::context::{Context, ContextId, memory::AddrSpace, WaitpidKey};
 
 use crate::Bootstrap;
@@ -290,10 +291,9 @@ pub fn kill(pid: ContextId, sig: usize) -> Result<usize> {
 pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> {
     // println!("mprotect {:#X}, {}, {:#X}", address, size, flags);
 
-    if address % PAGE_SIZE != 0 || size % PAGE_SIZE != 0 { return Err(Error::new(EINVAL)); }
-    if address.saturating_add(size) > crate::USER_END_OFFSET { return Err(Error::new(EFAULT)); }
+    let span = PageSpan::validate_nonempty(VirtualAddress::new(address), size).ok_or(Error::new(EINVAL))?;
 
-    AddrSpace::current()?.write().mprotect(Page::containing_address(VirtualAddress::new(address)), size / PAGE_SIZE, flags).map(|()| 0)
+    AddrSpace::current()?.write().mprotect(span, flags).map(|()| 0)
 }
 
 pub fn setpgid(pid: ContextId, pgid: ContextId) -> Result<usize> {
@@ -587,6 +587,7 @@ pub unsafe fn usermode_bootstrap(bootstrap: &Bootstrap) -> ! {
             .read().addr_space()
             .expect("expected bootstrap context to have an address space"));
 
+        // TODO: Use AddrSpace::mmap.
         let mut addr_space = addr_space.write();
         let addr_space = &mut *addr_space;