diff --git a/src/context/arch/x86_64.rs b/src/context/arch/x86_64.rs
index fa108a0a7165f6f089cd0e15b12f3af3503c83d7..2517d26890c9b9f565bd9045b9735b32a79434e9 100644
--- a/src/context/arch/x86_64.rs
+++ b/src/context/arch/x86_64.rs
@@ -1,5 +1,5 @@
 use core::mem;
-use core::sync::atomic::AtomicBool;
+use core::sync::atomic::{AtomicBool, Ordering};
 use syscall::data::FloatRegisters;
 
 /// This must be used by the kernel to ensure that context switches are done atomically
@@ -127,6 +127,7 @@ impl Context {
     }
 
     /// Switch to the next context by restoring its stack and registers
+    /// Check disassembly!
     #[cold]
     #[inline(never)]
     #[naked]
@@ -167,6 +168,9 @@ impl Context {
 
         asm!("mov $0, rbp" : "=r"(self.rbp) : : "memory" : "intel", "volatile");
         asm!("mov rbp, $0" : : "r"(next.rbp) : "memory" : "intel", "volatile");
+
+        // Unset global lock after loading registers but before switch
+        CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
     }
 }
 
diff --git a/src/context/switch.rs b/src/context/switch.rs
index 34fdd23a7f72e20cbc1ed35f817ae72733d45fa2..6e10825fcab4326d10a02bbf88d758b35740b773 100644
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -156,9 +156,6 @@ pub unsafe fn switch() -> bool {
 
         (*from_ptr).arch.switch_to(&mut (*to_ptr).arch);
 
-        // Unset global lock after switch
-        arch::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
-
         true
     }
 }