diff --git a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S
index c368bae58b0349bb85b950bc1f7aee1dc77b6b67..2241348397ad149e2b5cea03057881b1727075be 100644
--- a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S
+++ b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S
@@ -10,92 +10,92 @@ exception_vector_base:
     .align 7
 __vec_00:
     mov     x18, #0xb0b0
-    wfi
-    b __vec_00
+    b       do_exception_synchronous
+    b       __vec_00
 
     .align 7
 __vec_01:
     mov     x18, #0xb0b1
-    wfi
-    b __vec_01
+    b       do_exception_irq
+    b       __vec_01
 
     .align 7
 __vec_02:
     mov     x18, #0xb0b2
-    wfi
-    b __vec_02
+    b       do_exception_unhandled
+    b       __vec_02
 
     .align 7
 __vec_03:
     mov     x18, #0xb0b3
-    wfi
-    b __vec_03
+    b       do_exception_unhandled
+    b       __vec_03
 
     .align 7
 __vec_04:
-    b       do_report_exception
-    wfi
-    b __vec_04
+    mov     x18, #0xb0b4
+    b       do_exception_synchronous
+    b       __vec_04
 
     .align 7
 __vec_05:
-    b       do_irq                              // First level interrupt handler
-    wfi
-    b __vec_05
+    mov     x18, #0xb0b5
+    b       do_exception_irq
+    b       __vec_05
 
     .align 7
 __vec_06:
     mov     x18, #0xb0b6
-    wfi
-    b __vec_06
+    b       do_exception_unhandled
+    b       __vec_06
 
     .align 7
 __vec_07:
     mov     x18, #0xb0b7
-    wfi
-    b __vec_07
+    b       do_exception_unhandled
+    b       __vec_07
 
     .align 7
 __vec_08:
-    b       do_syscall                          // Syscall handler
-    wfi
-    b __vec_08
+    mov     x18, #0xb0b8
+    b       do_exception_synchronous
+    b       __vec_08
 
     .align 7
 __vec_09:
-    b       do_irq                              // First level interrupt handler
-    wfi
-    b __vec_09
+    mov     x18, #0xb0b9
+    b       do_exception_irq
+    b       __vec_09
 
     .align 7
 __vec_10:
-    mov     x18, #0xb0bb
-    wfi
-    b __vec_10
+    mov     x18, #0xb0ba
+    b       do_exception_unhandled
+    b       __vec_10
 
     .align 7
 __vec_11:
-    mov     x18, #0xb0bc
-    wfi
-    b __vec_11
+    mov     x18, #0xb0bb
+    b       do_exception_unhandled
+    b       __vec_11
 
     .align 7
 __vec_12:
-    mov     x18, #0xb0bd
-    wfi
-    b __vec_12
+    mov     x18, #0xb0bc
+    b       do_exception_unhandled
+    b       __vec_12
 
     .align 7
 __vec_13:
-    mov     x18, #0xb0be
-    wfi
-    b __vec_13
+    mov     x18, #0xb0bd
+    b       do_exception_unhandled
+    b       __vec_13
 
     .align 7
 __vec_14:
-    mov     x18, #0xb0bf
-    wfi
-    b __vec_14
+    mov     x18, #0xb0be
+    b       do_exception_unhandled
+    b       __vec_14
 
     .align 7
 exception_vector_end:
diff --git a/src/arch/aarch64/interrupt/irq.rs b/src/arch/aarch64/interrupt/irq.rs
index 72de8c1278c21c7dee72e635fe66c5f3e62cb9a8..1ad13f6f28b2b6a495218ba5bef936986495e0c2 100644
--- a/src/arch/aarch64/interrupt/irq.rs
+++ b/src/arch/aarch64/interrupt/irq.rs
@@ -12,7 +12,7 @@ pub static PIT_TICKS: AtomicUsize = ATOMIC_USIZE_INIT;
 
 #[naked]
 #[no_mangle]
-pub unsafe extern fn do_irq() {
+pub unsafe extern fn do_exception_irq() {
     #[inline(never)]
     unsafe fn inner() {
         irq_demux();
diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs
index 26f47a0051e2c2f0dc225b933013c0715b04c64f..42b64ffa0c1ca2209185dbe699d2ccc34da69732 100644
--- a/src/arch/aarch64/interrupt/syscall.rs
+++ b/src/arch/aarch64/interrupt/syscall.rs
@@ -3,9 +3,91 @@ use crate::syscall;
 
 #[naked]
 #[no_mangle]
-pub unsafe extern fn do_syscall() {
+pub unsafe extern fn do_exception_unhandled() {
     #[inline(never)]
     unsafe fn inner(stack: &mut InterruptStack) -> usize {
+        println!("do_exception_unhandled: ELR: 0x{:016x}", stack.elr_el1);
+        loop {}
+    }
+
+    llvm_asm!("str	    x0, [sp, #-8]!
+          str	    x1, [sp, #-8]!
+          str	    x2, [sp, #-8]!
+          str	    x3, [sp, #-8]!
+          str	    x4, [sp, #-8]!
+          str	    x5, [sp, #-8]!
+          str	    x6, [sp, #-8]!
+          str	    x7, [sp, #-8]!
+          str	    x8, [sp, #-8]!
+          str	    x9, [sp, #-8]!
+          str	    x10, [sp, #-8]!
+          str	    x11, [sp, #-8]!
+          str	    x12, [sp, #-8]!
+          str	    x13, [sp, #-8]!
+          str	    x14, [sp, #-8]!
+          str	    x15, [sp, #-8]!
+          str	    x16, [sp, #-8]!
+          str	    x17, [sp, #-8]!
+          str	    x18, [sp, #-8]!
+          str	    x19, [sp, #-8]!
+          str	    x20, [sp, #-8]!
+          str	    x21, [sp, #-8]!
+          str	    x22, [sp, #-8]!
+          str	    x23, [sp, #-8]!
+          str	    x24, [sp, #-8]!
+          str	    x25, [sp, #-8]!
+          str	    x26, [sp, #-8]!
+          str	    x27, [sp, #-8]!
+          str	    x28, [sp, #-8]!
+          str	    x29, [sp, #-8]!
+          str	    x30, [sp, #-8]!
+
+          mrs       x18, sp_el0
+          str       x18, [sp, #-8]!
+
+          mrs       x18, esr_el1
+          str       x18, [sp, #-8]!
+
+          mrs       x18, spsr_el1
+          str       x18, [sp, #-8]!
+
+          mrs       x18, tpidrro_el0
+          str       x18, [sp, #-8]!
+
+          mrs       x18, tpidr_el0
+          str       x18, [sp, #-8]!
+
+          str       x18, [sp, #-8]!
+
+          mrs       x18, elr_el1
+          str       x18, [sp, #-8]!"
+    : : : : "volatile");
+
+    let sp: usize;
+    llvm_asm!("" : "={sp}"(sp) : : : "volatile");
+    llvm_asm!("mov x29, sp" : : : : "volatile");
+
+    let a = inner(&mut *(sp as *mut InterruptStack));
+}
+
+#[naked]
+#[no_mangle]
+pub unsafe extern fn do_exception_synchronous() {
+    #[inline(never)]
+    unsafe fn inner(stack: &mut InterruptStack) -> usize {
+        let exception_code = (stack.esr_el1 & (0x3f << 26)) >> 26;
+        if exception_code != 0b010101 {
+            println!("do_exception_synchronous: Non-SVC!!!");
+            loop {}
+        } else {
+            println!("do_exception_synchronous: SVC: x8: 0x{:016x}", stack.scratch.x8);
+        }
+
+        llvm_asm!("nop": : : : "volatile");
+        llvm_asm!("nop": : : : "volatile");
+        llvm_asm!("nop": : : : "volatile");
+        llvm_asm!("nop": : : : "volatile");
+
         let fp;
         llvm_asm!("" : "={fp}"(fp) : : : "volatile");
 
@@ -183,15 +265,6 @@ pub struct SyscallStack {
 
 #[naked]
 pub unsafe extern fn clone_ret() {
-    llvm_asm!("ldp x29, x30, [sp], #16");
-    llvm_asm!("mov x0, 0");
-}
-
-/*
-#[naked]
-pub unsafe extern fn clone_ret() {
-    llvm_asm!("add sp, sp, #16");
-    llvm_asm!("ldp x29, x30, [sp], #16");
+    llvm_asm!("ldp x29, x30, [sp], #0x60");
     llvm_asm!("mov x0, 0");
 }
-*/
diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs
index e4d3530f1043e544b008762fba51142f49a85258..13d1b2da7693464d02d76d7f33963e9b158fb60d 100644
--- a/src/arch/aarch64/start.rs
+++ b/src/arch/aarch64/start.rs
@@ -176,10 +176,8 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! {
 #[naked]
 pub unsafe fn usermode(ip: usize, sp: usize, arg: usize, singlestep: bool) -> ! {
     let cpu_id: usize = 0;
-    let uspace_tls_start = (crate::USER_TLS_OFFSET + crate::USER_TLS_SIZE * cpu_id);
     let spsr: u32 = 0;
 
-    llvm_asm!("msr   tpidr_el0, $0" : : "r"(uspace_tls_start) : : "volatile");
     llvm_asm!("msr   spsr_el1, $0" : : "r"(spsr) : : "volatile");
     llvm_asm!("msr   elr_el1, $0" : : "r"(ip) : : "volatile");
     llvm_asm!("msr   sp_el0, $0" : : "r"(sp) : : "volatile");
diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs
index ac74e220803f5f9d04d22fb985f133be4bbde60f..e759f0cef04f9c8bb07935c9099f53afae9d7331 100644
--- a/src/context/arch/aarch64.rs
+++ b/src/context/arch/aarch64.rs
@@ -14,6 +14,7 @@ pub struct Context {
     elr_el1: usize,
     sp_el0: usize,
     ttbr0_el1: usize,   /* Pointer to U4 translation table for this Context     */
+    ttbr1_el1: usize,   /* Pointer to P4 translation table for this Context     */
     tpidr_el0: usize,   /* Pointer to TLS region for this Context               */
     tpidrro_el0: usize, /* Pointer to TLS (read-only) region for this Context   */
     rflags: usize,
@@ -53,6 +54,7 @@ impl Context {
             elr_el1: 0,
             sp_el0: 0,
             ttbr0_el1: 0,
+            ttbr1_el1: 0,
             tpidr_el0: 0,
             tpidrro_el0: 0,
             rflags: 0,          /* spsr_el1 */
@@ -92,10 +94,14 @@ impl Context {
     pub fn set_fx(&mut self, _address: usize) {
     }
 
-    pub fn set_page_table(&mut self, address: usize) {
+    pub fn set_page_utable(&mut self, address: usize) {
         self.ttbr0_el1 = address;
     }
 
+    pub fn set_page_ktable(&mut self, address: usize) {
+        self.ttbr1_el1 = address;
+    }
+
     pub fn set_stack(&mut self, address: usize) {
         self.sp = address;
     }
diff --git a/src/context/list.rs b/src/context/list.rs
index 2e85ae8e150daa3df8f2e26e509231ff6ccc2432..47c6c9fe49950b86135d74fd1a8f65b9e8f4bdcd 100644
--- a/src/context/list.rs
+++ b/src/context/list.rs
@@ -92,12 +92,12 @@ impl ContextList {
             #[cfg(target_arch = "aarch64")]
             {
                 let context_id = context.id.into();
-                context.arch.set_tcb(context_id);
                 context.arch.set_lr(func as usize);
                 context.arch.set_context_handle();
             }
 
-            context.arch.set_page_table(unsafe { ActivePageTable::new(PageTableType::User).address() });
+            context.arch.set_page_utable(unsafe { ActivePageTable::new(PageTableType::User).address() });
+            context.arch.set_page_ktable(unsafe { ActivePageTable::new(PageTableType::Kernel).address() });
             context.arch.set_fx(fx.as_ptr() as usize);
             context.arch.set_stack(stack.as_ptr() as usize + offset);
             context.kfx = Some(fx);
diff --git a/src/context/switch.rs b/src/context/switch.rs
index 2e17b42821bfa510802bb622c3657567830fa137..634aff32c2aded32d00a2844aeb173ed71d2cfd3 100644
--- a/src/context/switch.rs
+++ b/src/context/switch.rs
@@ -151,6 +151,11 @@ pub unsafe fn switch() -> bool {
             }
             gdt::set_tcb((*to_ptr).id.into());
         }
+        #[cfg(target_arch = "aarch64")]
+        {
+            let pid = (*to_ptr).id.into();
+            (*to_ptr).arch.set_tcb(pid);
+        }
         CONTEXT_ID.store((*to_ptr).id, Ordering::SeqCst);
     }
 
diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs
index d5e1485e6695e2ead5bba049e07b99260c5fdb52..779ac68f416c266d3e784147fc0dd9b2fae2d9f7 100644
--- a/src/syscall/mod.rs
+++ b/src/syscall/mod.rs
@@ -129,7 +129,8 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
                     #[cfg(target_arch = "aarch64")]
                     {
                         //TODO: CLONE_STACK
-                        clone(b, bp).map(ContextId::into)
+                        let ret = clone(b, bp).map(ContextId::into);
+                        ret
                     }
 
                     #[cfg(target_arch = "x86_64")]
diff --git a/src/syscall/process.rs b/src/syscall/process.rs
index 535f68d3072542733f1188a71306c101bd0a57f5..8518f99b90cc106e4750e84f0eccf914b0cb90dc 100644
--- a/src/syscall/process.rs
+++ b/src/syscall/process.rs
@@ -94,30 +94,42 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 kfx_opt = Some(new_fx);
             }
 
-            if let Some(ref stack) = context.kstack {
-                // Get the relative offset to the return address of the function
-                // obtaining `stack_base`.
-                //
-                // (base pointer - start of stack) - one
-                offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
-                let mut new_stack = stack.clone();
+            #[cfg(target_arch = "x86_64")]
+            {
+                if let Some(ref stack) = context.kstack {
+                    // Get the relative offset to the return address of the function
+                    // obtaining `stack_base`.
+                    //
+                    // (base pointer - start of stack) - one
+                    offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
+                    let mut new_stack = stack.clone();
 
-                unsafe {
-                    // Set clone's return value to zero. This is done because
-                    // the clone won't return like normal, which means the value
-                    // would otherwise never get set.
-                    #[cfg(target_arch = "x86_64")] // TODO
-                    if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) {
-                        (*regs).scratch.rax = 0;
+                    unsafe {
+                        // Set clone's return value to zero. This is done because
+                        // the clone won't return like normal, which means the value
+                        // would otherwise never get set.
+                        if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) {
+                            (*regs).scratch.rax = 0;
+                        }
+
+                        // Change the return address of the child (previously
+                        // syscall) to the arch-specific clone_ret callback
+                        let func_ptr = new_stack.as_mut_ptr().add(offset);
+                        *(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize;
                     }
 
-                    // Change the return address of the child (previously
-                    // syscall) to the arch-specific clone_ret callback
-                    let func_ptr = new_stack.as_mut_ptr().add(offset);
-                    *(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize;
+                    kstack_opt = Some(new_stack);
                 }
+            }
 
-                kstack_opt = Some(new_stack);
+            #[cfg(target_arch = "aarch64")]
+            {
+                if let Some(ref stack) = context.kstack {
+                    offset = stack_base - stack.as_ptr() as usize;
+                    let mut new_stack = stack.clone();
+
+                    kstack_opt = Some(new_stack);
+                }
             }
 
             if flags.contains(CLONE_VM) {
@@ -338,31 +350,39 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
 
             context.arch = arch;
 
-            let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) };
+            let mut active_utable = unsafe { ActivePageTable::new(PageTableType::User) };
+            let mut active_ktable = unsafe { ActivePageTable::new(PageTableType::Kernel) };
 
-            let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
+            let mut temporary_upage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
+            let mut temporary_kpage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::KERNEL_TMP_MISC_OFFSET)));
 
-            let mut new_table = {
+            let mut new_utable = {
                 let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
-                InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
+                InactivePageTable::new(frame, &mut active_utable, &mut temporary_upage)
             };
 
-            context.arch.set_page_table(unsafe { new_table.address() });
+            let mut new_ktable = {
+                let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
+                InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage)
+            };
+
+            context.arch.set_page_utable(unsafe { new_utable.address() });
+            context.arch.set_page_ktable(unsafe { new_ktable.address() });
 
             // Copy kernel image mapping
             {
-                let frame = active_table.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
-                let flags = active_table.p4()[crate::KERNEL_PML4].flags();
-                active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                let frame = active_ktable.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
+                let flags = active_ktable.p4()[crate::KERNEL_PML4].flags();
+                active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
                     mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags);
                 });
             }
 
             // Copy kernel heap mapping
             {
-                let frame = active_table.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
-                let flags = active_table.p4()[crate::KERNEL_HEAP_PML4].flags();
-                active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                let frame = active_ktable.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
+                let flags = active_ktable.p4()[crate::KERNEL_HEAP_PML4].flags();
+                active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
                     mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
                 });
             }
@@ -376,6 +396,10 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
             if let Some(stack) = kstack_opt.take() {
                 context.arch.set_stack(stack.as_ptr() as usize + offset);
                 context.kstack = Some(stack);
+                #[cfg(target_arch = "aarch64")]
+                {
+                    context.arch.set_lr(interrupt::syscall::clone_ret as usize);
+                }
             }
 
             // TODO: Clone ksig?
@@ -384,9 +408,9 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
             if flags.contains(CLONE_VM) {
                 // Copy user image mapping, if found
                 if ! image.is_empty() {
-                    let frame = active_table.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
-                    let flags = active_table.p4()[crate::USER_PML4].flags();
-                    active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                    let frame = active_utable.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
+                    let flags = active_utable.p4()[crate::USER_PML4].flags();
+                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
                         mapper.p4_mut()[crate::USER_PML4].set(frame, flags);
                     });
                 }
@@ -394,9 +418,9 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
 
                 // Copy grant mapping
                 if ! grants.lock().is_empty() {
-                    let frame = active_table.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
-                    let flags = active_table.p4()[crate::USER_GRANT_PML4].flags();
-                    active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                    let frame = active_utable.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
+                    let flags = active_utable.p4()[crate::USER_GRANT_PML4].flags();
+                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
                         mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
                     });
                 }
@@ -419,8 +443,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                     let start_page = Page::containing_address(VirtualAddress::new(start));
                     let end_page = Page::containing_address(VirtualAddress::new(end - 1));
                     for page in Page::range_inclusive(start_page, end_page) {
-                        let frame = active_table.translate_page(page).expect("kernel percpu not mapped");
-                        active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                        let frame = active_ktable.translate_page(page).expect("kernel percpu not mapped");
+                        active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
                             let result = mapper.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
                             // Ignore result due to operating on inactive table
                             unsafe { result.ignore(); }
@@ -432,7 +456,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 for memory_shared in image.iter_mut() {
                     memory_shared.with(|memory| {
                         let start = VirtualAddress::new(memory.start_address().data() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
-                        memory.move_to(start, &mut new_table, &mut temporary_page);
+                        memory.move_to(start, &mut new_utable, &mut temporary_upage);
                     });
                 }
                 context.image = image;
@@ -444,7 +468,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
 
                     for mut grant in old_grants.inner.into_iter() {
                         let start = VirtualAddress::new(grant.start_address().data() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
-                        grant.move_to(start, &mut new_table, &mut temporary_page);
+                        grant.move_to(start, &mut new_utable, &mut temporary_upage);
                         grants.insert(grant);
                     }
                 }
@@ -454,14 +478,14 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
             // Setup user stack
             if let Some(stack_shared) = stack_opt {
                 if flags.contains(CLONE_STACK) {
-                    let frame = active_table.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
-                    let flags = active_table.p4()[crate::USER_STACK_PML4].flags();
-                    active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                    let frame = active_utable.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
+                    let flags = active_utable.p4()[crate::USER_STACK_PML4].flags();
+                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
                         mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
                     });
                 } else {
                     stack_shared.with(|stack| {
-                        stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
+                        stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_utable, &mut temporary_upage);
                     });
                 }
                 context.stack = Some(stack_shared);
@@ -469,7 +493,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
 
             // Setup user sigstack
             if let Some(mut sigstack) = sigstack_opt {
-                sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_table, &mut temporary_page);
+                sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_utable, &mut temporary_upage);
                 context.sigstack = Some(sigstack);
             }
 
@@ -482,13 +506,25 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 true
             );
 
+            #[cfg(target_arch = "aarch64")]
+            {
+                if let Some(stack) = &mut context.kstack {
+                    unsafe {
+                        let interrupt_stack_offset_from_stack_base = *(stack_base as *const u64) - stack_base as u64;
+                        let mut interrupt_stack = &mut *(stack.as_mut_ptr().add(offset + interrupt_stack_offset_from_stack_base as usize) as *mut crate::arch::interrupt::InterruptStack);
+                        interrupt_stack.tpidr_el0 = tcb_addr;
+                    }
+                }
+            }
+
+
             // Setup user TLS
             if let Some(mut tls) = tls_opt {
                 // Copy TLS mapping
                 {
-                    let frame = active_table.p4()[crate::USER_TLS_PML4].pointed_frame().expect("user tls not mapped");
-                    let flags = active_table.p4()[crate::USER_TLS_PML4].flags();
-                    active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+                    let frame = active_utable.p4()[crate::USER_TLS_PML4].pointed_frame().expect("user tls not mapped");
+                    let flags = active_utable.p4()[crate::USER_TLS_PML4].flags();
+                    active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
                         mapper.p4_mut()[crate::USER_TLS_PML4].set(frame, flags);
                     });
                 }
@@ -496,7 +532,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 // TODO: Make sure size is not greater than USER_TLS_SIZE
                 let tls_addr = crate::USER_TLS_OFFSET + context.id.into() * crate::USER_TLS_SIZE;
                 //println!("{}: Copy TLS: address 0x{:x}, size 0x{:x}", context.id.into(), tls_addr, tls.mem.size());
-                tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_table, &mut temporary_page);
+                tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_utable, &mut temporary_upage);
                 unsafe {
                     *(tcb_addr as *mut usize) = tls.mem.start_address().data() + tls.mem.size();
                 }
@@ -511,7 +547,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
                 }
             }
 
-            tcb.move_to(VirtualAddress::new(tcb_addr), &mut new_table, &mut temporary_page);
+            tcb.move_to(VirtualAddress::new(tcb_addr), &mut new_utable, &mut temporary_upage);
             context.image.push(tcb.to_shared());
 
             context.name = name;