diff --git a/rmm b/rmm
index c66956ca2ac1b4df3904afbafe53321a4b6c50af..32caee3095e54a8d9181841fe48d9ea8d43f4e6c 160000
--- a/rmm
+++ b/rmm
@@ -1 +1 @@
-Subproject commit c66956ca2ac1b4df3904afbafe53321a4b6c50af
+Subproject commit 32caee3095e54a8d9181841fe48d9ea8d43f4e6c
diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs
index 7343d45ae96b045dae7adb042dbf183f664d1253..88e50734d8491511a6bf11eba7774281f9a4b46b 100644
--- a/src/arch/x86_64/mod.rs
+++ b/src/arch/x86_64/mod.rs
@@ -41,6 +41,8 @@ pub mod start;
 /// Stop function
 pub mod stop;
 
+pub use ::rmm::X8664Arch as CurrentRmmArch;
+
 // Flags
 pub mod flags {
     pub const FLAG_SINGLESTEP: usize = 1 << 8;
diff --git a/src/syscall/futex.rs b/src/syscall/futex.rs
index 8d3582e5fb99c713ca46be971a46fdc365d8ea19..a130e12b00f6622666fb5300acb1addb80bd522b 100644
--- a/src/syscall/futex.rs
+++ b/src/syscall/futex.rs
@@ -7,14 +7,16 @@ use alloc::collections::VecDeque;
 use core::intrinsics;
 use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
 
+use rmm::Arch;
+
 use crate::context::{self, Context};
 use crate::time;
 use crate::memory::PhysicalAddress;
 use crate::paging::{ActivePageTable, TableKind, VirtualAddress};
 use crate::syscall::data::TimeSpec;
 use crate::syscall::error::{Error, Result, ESRCH, EAGAIN, EFAULT, EINVAL};
-use crate::syscall::flag::{FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE};
-use crate::syscall::validate::{validate_slice, validate_slice_mut};
+use crate::syscall::flag::{FUTEX_WAIT, FUTEX_WAIT64, FUTEX_WAKE, FUTEX_REQUEUE};
+use crate::syscall::validate::{validate_array, validate_slice, validate_slice_mut};
 
 type FutexList = VecDeque<FutexEntry>;
 
@@ -41,23 +43,28 @@ pub fn futexes_mut() -> RwLockWriteGuard<'static, FutexList> {
     FUTEXES.call_once(init_futexes).write()
 }
 
-// FIXME: Don't take a mutable reference to the addr, since rustc can make assumptions that the
-// pointee cannot be changed by another thread, which could make atomic ops useless.
-pub fn futex(addr: &mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32) -> Result<usize> {
+pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> Result<usize> {
     let target_physaddr = unsafe {
         let active_table = ActivePageTable::new(TableKind::User);
-        let virtual_address = VirtualAddress::new(addr as *mut i32 as usize);
+        let virtual_address = VirtualAddress::new(addr);
+
+        if !crate::CurrentRmmArch::virt_is_valid(virtual_address) || crate::CurrentRmmArch::virt_kind(virtual_address) == TableKind::Kernel {
+            return Err(Error::new(EFAULT));
+        }
 
-        // FIXME: Already validated in syscall/mod.rs
         active_table.translate(virtual_address).ok_or(Error::new(EFAULT))?
     };
 
     match op {
-        FUTEX_WAIT => {
-            let timeout_opt = if val2 != 0 {
-                Some(validate_slice(val2 as *const TimeSpec, 1).map(|req| &req[0])?)
-            } else {
+        // TODO: FUTEX_WAIT_MULTIPLE?
+        FUTEX_WAIT | FUTEX_WAIT64 => {
+            let timeout_ptr = val2 as *const TimeSpec;
+
+            let timeout_opt = if timeout_ptr.is_null() {
                 None
+            } else {
+                let [timeout] = unsafe { *validate_array(timeout_ptr)? };
+                Some(timeout)
             };
 
             {
@@ -69,7 +76,22 @@ pub fn futex(addr: &mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32)
                     Arc::clone(&context_lock)
                 };
 
-                if unsafe { intrinsics::atomic_load(addr) != val } {
+                // TODO: Is the implicit SeqCst ordering too strong here?
+                let (fetched, expected) = if op == FUTEX_WAIT {
+                    // Must be aligned, otherwise it could cross a page boundary and mess up the
+                    // (simpler) validation we did in the first place.
+                    if addr % 4 != 0 {
+                        return Err(Error::new(EINVAL));
+                    }
+                    (u64::from(unsafe { intrinsics::atomic_load::<u32>(addr as *const u32) }), u64::from(val as u32))
+                } else {
+                    // op == FUTEX_WAIT64
+                    if addr % 8 != 0 {
+                        return Err(Error::new(EINVAL));
+                    }
+                    (unsafe { intrinsics::atomic_load::<u64>(addr as *const u64) }, val as u64)
+                };
+                if fetched != expected {
                     return Err(Error::new(EAGAIN));
                 }
 
@@ -116,15 +138,17 @@ pub fn futex(addr: &mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32)
                 let mut futexes = futexes_mut();
 
                 let mut i = 0;
-                while i < futexes.len() && (woken as i32) < val {
-                    if futexes[i].target_physaddr == target_physaddr {
-                        if let Some(futex) = futexes.swap_remove_back(i) {
-                            let mut context_guard = futex.context_lock.write();
-                            context_guard.unblock();
-                            woken += 1;
-                        }
-                    } else {
+
+                // TODO: Use retain, once it allows the closure to tell it to stop iterating...
+                while i < futexes.len() && woken < val {
+                    if futexes[i].target_physaddr != target_physaddr {
                         i += 1;
+                        continue;
+                    }
+                    if let Some(futex) = futexes.swap_remove_back(i) {
+                        let mut context_guard = futex.context_lock.write();
+                        context_guard.unblock();
+                        woken += 1;
                     }
                 }
             }
@@ -133,12 +157,13 @@ pub fn futex(addr: &mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32)
         },
         FUTEX_REQUEUE => {
             let addr2_physaddr = unsafe {
-                let active_table = ActivePageTable::new(TableKind::User);
+                let addr2_virt = VirtualAddress::new(addr2);
 
-                let addr2_safe = validate_slice_mut(addr2, 1).map(|addr2_safe| &mut addr2_safe[0])?;
-                let addr2_virt = VirtualAddress::new(addr2_safe as *mut i32 as usize);
+                if !crate::CurrentRmmArch::virt_is_valid(addr2_virt) || crate::CurrentRmmArch::virt_kind(addr2_virt) == TableKind::Kernel {
+                    return Err(Error::new(EFAULT));
+                }
 
-                // FIXME: Already validated.
+                let active_table = ActivePageTable::new(TableKind::User);
                 active_table.translate(addr2_virt).ok_or(Error::new(EFAULT))?
             };
 
@@ -149,22 +174,21 @@ pub fn futex(addr: &mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32)
                 let mut futexes = futexes_mut();
 
                 let mut i = 0;
-                while i < futexes.len() && (woken as i32) < val {
-                    if futexes[i].target_physaddr == target_physaddr {
-                        if let Some(futex) = futexes.swap_remove_back(i) {
-                            futex.context_lock.write().unblock();
-                            woken += 1;
-                        }
-                    } else {
+                while i < futexes.len() && woken < val {
+                    if futexes[i].target_physaddr != target_physaddr {
                         i += 1;
                     }
+                    if let Some(futex) = futexes.swap_remove_back(i) {
+                        futex.context_lock.write().unblock();
+                        woken += 1;
+                    }
                 }
                 while i < futexes.len() && requeued < val2 {
-                    if futexes[i].target_physaddr == target_physaddr {
-                        futexes[i].target_physaddr = addr2_physaddr;
-                        requeued += 1;
+                    if futexes[i].target_physaddr != target_physaddr {
+                        i += 1;
                     }
-                    i += 1;
+                    futexes[i].target_physaddr = addr2_physaddr;
+                    requeued += 1;
                 }
             }
 
diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs
index 5792cd8851be55b3a0376df6c90f323d070dbe40..b90dd54d6e6dc0d9b97c686e8f675ed1bfcf74a1 100644
--- a/src/syscall/mod.rs
+++ b/src/syscall/mod.rs
@@ -117,7 +117,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
                     }
                 ),
                 SYS_CLOCK_GETTIME => clock_gettime(b, validate_slice_mut(c as *mut TimeSpec, 1).map(|time| &mut time[0])?),
-                SYS_FUTEX => futex(validate_slice_mut(b as *mut i32, 1).map(|uaddr| &mut uaddr[0])?, c, d as i32, e, f as *mut i32),
+                SYS_FUTEX => futex(b, c, d, e, f),
                 SYS_GETPID => getpid().map(ContextId::into),
                 SYS_GETPGID => getpgid(ContextId::from(b)).map(ContextId::into),
                 SYS_GETPPID => getppid().map(ContextId::into),
diff --git a/src/syscall/validate.rs b/src/syscall/validate.rs
index ae99a9503618e52aac60fbc3d479327ebd6a7e65..883f01c3770648af03866fcf1d8840ad658605f1 100644
--- a/src/syscall/validate.rs
+++ b/src/syscall/validate.rs
@@ -41,9 +41,26 @@ pub fn validate_slice<T>(ptr: *const T, len: usize) -> Result<&'static [T]> {
         Ok(unsafe { slice::from_raw_parts(ptr, len) })
     }
 }
+/// Convert a pointer with fixed static length to a reference to an array, if valid.
+// TODO: This is probably also quite unsafe, mainly because we have no idea unless we do very
+// careful checking, that this upholds the rules that LLVM relies with shared references, namely
+// that the value cannot change by others. Atomic volatile.
+pub unsafe fn validate_array<'a, T, const N: usize>(ptr: *const T) -> Result<&'a [T; N]> {
+    validate(ptr as usize, mem::size_of::<T>() * N, false)?;
+    Ok(&*ptr.cast::<[T; N]>())
+}
+pub unsafe fn validate_array_mut<'a, T, const N: usize>(ptr: *mut T) -> Result<&'a mut [T; N]> {
+    validate(ptr as usize, mem::size_of::<T>() * N, true)?;
+    Ok(&mut *ptr.cast::<[T; N]>())
+}
 
 /// Convert a pointer and length to slice, if valid
-//TODO: Mark unsafe
+// TODO: Mark unsafe
+//
+// FIXME: This is probably never ever safe, except under very special circumstances. Any &mut
+// reference will allow LLVM to assume that nobody else will ever modify this value, which is
+// certainly not the case for multithreaded userspace programs. Instead, we will want something
+// like atomic volatile.
 pub fn validate_slice_mut<T>(ptr: *mut T, len: usize) -> Result<&'static mut [T]> {
     if len == 0 {
         Ok(&mut [])
diff --git a/syscall b/syscall
index 52fcd238db87354b6556eac9b05bb8ab2658426a..fa5bc90eadda43d72d3fe8e00415ec63a7cfc2ea 160000
--- a/syscall
+++ b/syscall
@@ -1 +1 @@
-Subproject commit 52fcd238db87354b6556eac9b05bb8ab2658426a
+Subproject commit fa5bc90eadda43d72d3fe8e00415ec63a7cfc2ea