diff --git a/src/arch/x86_64/interrupt/exception.rs b/src/arch/x86_64/interrupt/exception.rs
index d67ef9fc025a41e095c1a7c5b63471a53148a41c..8f7837b0a15103144cba8992394461f571a1113c 100644
--- a/src/arch/x86_64/interrupt/exception.rs
+++ b/src/arch/x86_64/interrupt/exception.rs
@@ -135,42 +135,20 @@ interrupt_error!(protection, |stack| {
 });
 
 interrupt_error!(page, |stack| {
-    let cr2 = unsafe { x86::controlregs::cr2() };
+    let cr2 = VirtualAddress::new(unsafe { x86::controlregs::cr2() });
     let flags = PageFaultError::from_bits_truncate(stack.code as u32);
 
-    extern "C" {
-        static __usercopy_start: u8;
-        static __usercopy_end: u8;
-    }
-    let usercopy_region = (&__usercopy_start as *const u8 as usize)..(&__usercopy_end as *const u8 as usize);
-
-    // TODO: Most likely not necessary, but maybe also check that cr2 is not too close to USER_END.
-    let address_is_user = VirtualAddress::new(cr2).kind() == TableKind::User;
-
-    let invalid_page_tables = flags.contains(PageFaultError::RSVD);
-    let caused_by_user = flags.contains(PageFaultError::US);
-    let caused_by_instr_fetch = flags.contains(PageFaultError::ID);
-
-    if address_is_user && !caused_by_user && !caused_by_instr_fetch && !invalid_page_tables && usercopy_region.contains(&{ stack.inner.iret.rip }) {
-        // We were inside a usercopy function that failed. This is handled by setting rax to a
-        // nonzero value, and emulating the ret instruction.
-        stack.inner.scratch.rax = 1;
-        let ret_addr = unsafe { (stack.inner.iret.rsp as *const usize).read() };
-        stack.inner.iret.rsp += 8;
-        stack.inner.iret.rip = ret_addr;
-        stack.inner.iret.rflags &= !(1 << 18);
-        return;
+    if crate::paging::page_fault_handler(&mut stack.inner, flags, cr2).is_err() {
+        println!("Page fault: {:>016X}", cr2.data());
+        println!("  Present: {}", flags.contains(PageFaultError::P));
+        println!("  Write: {}", flags.contains(PageFaultError::WR));
+        println!("  User: {}", flags.contains(PageFaultError::US));
+        println!("  Reserved write: {}", flags.contains(PageFaultError::RSVD));
+        println!("  Instruction fetch: {}", flags.contains(PageFaultError::ID));
+        stack.dump();
+        stack_trace();
+        ksignal(SIGSEGV);
     }
-
-    println!("Page fault: {:>016X}", cr2);
-    println!("  Present: {}", flags.contains(PageFaultError::P));
-    println!("  Write: {}", flags.contains(PageFaultError::WR));
-    println!("  User: {}", flags.contains(PageFaultError::US));
-    println!("  Reserved write: {}", flags.contains(PageFaultError::RSVD));
-    println!("  Instruction fetch: {}", flags.contains(PageFaultError::ID));
-    stack.dump();
-    stack_trace();
-    ksignal(SIGSEGV);
 });
 
 interrupt_stack!(fpu_fault, |stack| {
diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index d523dd0a78a115fe4b7c44f47b5ae78b1511f7df..96cc31b65b16e92853ae13b558e73b1b73a625f7 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -3,6 +3,7 @@
 
 use core::fmt::Debug;
 
+use x86::irq::PageFaultError;
 use x86::msr;
 
 pub use rmm::{
@@ -16,6 +17,8 @@ pub use rmm::{
 pub use super::CurrentRmmArch as RmmA;
 
 pub type PageMapper = rmm::PageMapper<RmmA, crate::arch::rmm::LockedAllocator>;
+use crate::context::memory::AddrSpace;
+use crate::interrupt::InterruptStack;
 pub use crate::rmm::KernelMapper;
 
 pub mod entry;
@@ -137,3 +140,95 @@ pub fn round_down_pages(number: usize) -> usize {
 pub fn round_up_pages(number: usize) -> usize {
     number.next_multiple_of(PAGE_SIZE)
 }
+pub struct Segv;
+
+pub fn page_fault_handler(stack: &mut InterruptStack, code: PageFaultError, faulting_address: VirtualAddress) -> Result<(), Segv> {
+    let faulting_page = Page::containing_address(faulting_address);
+
+    extern "C" {
+        static __usercopy_start: u8;
+        static __usercopy_end: u8;
+    }
+    let usercopy_region = unsafe { (&__usercopy_start as *const u8 as usize)..(&__usercopy_end as *const u8 as usize) };
+
+    // TODO: Most likely not necessary, but maybe also check that cr2 is not too close to USER_END.
+    let address_is_user = faulting_address.kind() == TableKind::User;
+
+    let invalid_page_tables = code.contains(PageFaultError::RSVD);
+    let caused_by_user = code.contains(PageFaultError::US);
+    let caused_by_kernel = !caused_by_user;
+    let caused_by_write = code.contains(PageFaultError::WR);
+    let caused_by_instr_fetch = code.contains(PageFaultError::ID);
+
+    let mode = match (caused_by_write, caused_by_instr_fetch) {
+        (true, false) => AccessMode::Write,
+        (false, false) => AccessMode::Read,
+        (false, true) => AccessMode::InstrFetch,
+        (true, true) => unreachable!("page fault cannot be caused by both instruction fetch and write"),
+    };
+
+    if invalid_page_tables {
+        // TODO: Better error code than Segv?
+        return Err(Segv);
+    }
+
+    if address_is_user && caused_by_kernel && mode != AccessMode::InstrFetch && usercopy_region.contains(&{ stack.iret.rip }) {
+        // We were inside a usercopy function that failed. This is handled by setting rax to a
+        // nonzero value, and emulating the ret instruction.
+        stack.scratch.rax = 1;
+        let ret_addr = unsafe { (stack.iret.rsp as *const usize).read() };
+        stack.iret.rsp += 8;
+        stack.iret.rip = ret_addr;
+        stack.iret.rflags &= !(1 << 18);
+        return Ok(());
+    }
+
+    if address_is_user && caused_by_user {
+        if try_correcting_page_tables(faulting_page, mode) {
+            return Ok(());
+        }
+    }
+
+    Err(Segv)
+}
+#[derive(PartialEq)]
+enum AccessMode {
+    Read,
+    Write,
+    InstrFetch,
+}
+
+fn try_correcting_page_tables(faulting_page: Page, access: AccessMode) -> bool {
+    let Ok(addr_space) = AddrSpace::current() else {
+        log::warn!("User page fault without address space being set.");
+        return false;
+    };
+
+    let mut addr_space = addr_space.write();
+
+    let Some((_, grant_info)) = addr_space.grants.contains(faulting_page) else {
+        return false;
+    };
+    let grant_flags = grant_info.flags();
+    match access {
+        // TODO: has_read
+        AccessMode::Read => (),
+
+        AccessMode::Write if !grant_flags.has_write() => return false,
+        AccessMode::InstrFetch if !grant_flags.has_execute() => return false,
+
+        _ => (),
+    }
+
+    // By now, the memory at the faulting page is actually valid, but simply not yet mapped.
+    // TODO: Readahead
+
+    let Some(flush) = (unsafe { addr_space.table.utable.map(faulting_page.start_address(), grant_flags) }) else {
+        // TODO
+        return false;
+    };
+
+    flush.flush();
+
+    true
+}
diff --git a/src/context/memory.rs b/src/context/memory.rs
index a9a224118a0905fa750beccf1349fda688a2953f..620ba3d1cd4840278fb04b6d94e0a8f59efbbf1f 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -506,6 +506,7 @@ impl UserGrants {
 pub struct GrantInfo {
     page_count: usize,
     flags: PageFlags<RmmA>,
+    // TODO: Rename to unmapped?
     mapped: bool,
     pub(crate) owned: bool,
     //TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
@@ -551,11 +552,6 @@ impl Grant {
         })
     }
     pub fn zeroed(dst: Page, page_count: usize, flags: PageFlags<RmmA>, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
-        // TODO: Unmap partially in case of ENOMEM
-        for page in Page::range_exclusive(dst, dst.next_by(page_count)) {
-            let flush = unsafe { mapper.map(page.start_address(), flags) }.ok_or(Enomem)?;
-            flusher.consume(flush);
-        }
         Ok(Grant { base: dst, info: GrantInfo { page_count, flags, mapped: true, owned: true, desc_opt: None } })
     }
     pub fn borrow(src_base: Page, dst_base: Page, page_count: usize, flags: PageFlags<RmmA>, desc_opt: Option<GrantFileRef>, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {