diff --git a/rmm b/rmm
index 7aeb9f0ac812266aa3ec9a018eebb49854658ee4..e24e8485ba59befe91620164e7ea6d09d170514f 160000
--- a/rmm
+++ b/rmm
@@ -1 +1 @@
-Subproject commit 7aeb9f0ac812266aa3ec9a018eebb49854658ee4
+Subproject commit e24e8485ba59befe91620164e7ea6d09d170514f
diff --git a/src/allocator/mod.rs b/src/allocator/mod.rs
index f8be01794c7f1dbafe4fdd77d984996e05efa304..4da36eb3003a5968d53eae8495b3d8bd652cfd69 100644
--- a/src/allocator/mod.rs
+++ b/src/allocator/mod.rs
@@ -1,5 +1,5 @@
 use rmm::Flusher;
-use crate::paging::{KernelMapper, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags};
+use crate::paging::{KernelMapper, Page, PageFlags, VirtualAddress, mapper::PageFlushAll};
 
 #[cfg(not(feature="slab"))]
 pub use self::linked_list::Allocator;
@@ -20,7 +20,7 @@ unsafe fn map_heap(mapper: &mut KernelMapper, offset: usize, size: usize) {
     let heap_start_page = Page::containing_address(VirtualAddress::new(offset));
     let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1));
     for page in Page::range_inclusive(heap_start_page, heap_end_page) {
-        let result = mapper.map(page.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))))
+        let result = mapper.map(page.start_address(), PageFlags::new().write(true).global(cfg!(not(feature = "pti"))))
             .expect("failed to map kernel heap");
         flush_all.consume(result);
     }
diff --git a/src/arch/aarch64/consts.rs b/src/arch/aarch64/consts.rs
index 1656c8c26f3e5cc404f377ba5e828da323b802a6..22ad2f0a5997d8f860fed6641a7137c0f2d95696 100644
--- a/src/arch/aarch64/consts.rs
+++ b/src/arch/aarch64/consts.rs
@@ -2,41 +2,41 @@
 // The lower 256 PML4 entries are reserved for userspace
 // Each PML4 entry references up to 512 GB of memory
 // The second from the top (510) PML4 is reserved for the kernel
-    /// The size of a single PML4
-    pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
-    pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
-
-    /// Offset of recursive paging (deprecated, but still reserved)
-    pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
-    pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE;
-
-    /// Offset of kernel
-    pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
-    pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
-
-    /// Offset to kernel heap
-    pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
-    pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
-    /// Size of kernel heap
-    pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
-
-    /// Offset of temporary mapping for misc kernel bring-up actions
-    pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
-
-    /// Offset to kernel percpu variables
-    pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE;
-    pub const KERNEL_PERCPU_PML4: usize = (KERNEL_PERCPU_OFFSET & PML4_MASK)/PML4_SIZE;
-    /// Size of kernel percpu variables
-    pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB
-    pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT;
-
-    /// Offset of physmap
-    // This needs to match RMM's PHYS_OFFSET
-    pub const PHYS_OFFSET: usize = 0xFFFF_8000_0000_0000;
-    pub const PHYS_PML4: usize = (PHYS_OFFSET & PML4_MASK)/PML4_SIZE;
-
-    /// Offset to user image
-    pub const USER_OFFSET: usize = 0;
-
-    /// End offset of the user image, i.e. kernel start
-    pub const USER_END_OFFSET: usize = 256 * PML4_SIZE;
+/// The size of a single PML4
+pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
+pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
+
+/// Offset of recursive paging (deprecated, but still reserved)
+pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
+pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE;
+
+/// Offset of kernel
+pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
+pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
+
+/// Offset to kernel heap
+pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
+pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
+/// Size of kernel heap
+pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
+
+/// Offset of temporary mapping for misc kernel bring-up actions
+pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
+
+/// Offset to kernel percpu variables
+pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE;
+pub const KERNEL_PERCPU_PML4: usize = (KERNEL_PERCPU_OFFSET & PML4_MASK)/PML4_SIZE;
+/// Size of kernel percpu variables
+pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB
+pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT;
+
+/// Offset of physmap
+// This needs to match RMM's PHYS_OFFSET
+pub const PHYS_OFFSET: usize = 0xFFFF_8000_0000_0000;
+pub const PHYS_PML4: usize = (PHYS_OFFSET & PML4_MASK)/PML4_SIZE;
+
+/// Offset to user image
+pub const USER_OFFSET: usize = 0;
+
+/// End offset of the user image, i.e. kernel start
+pub const USER_END_OFFSET: usize = 256 * PML4_SIZE;
diff --git a/src/arch/aarch64/device/cpu/registers/control_regs.rs b/src/arch/aarch64/device/cpu/registers/control_regs.rs
index 8a74c29a00d2c4db54d2e2b085be4a350251f587..0272fae8773c7c2e9e93104530fb2dd8c689068b 100644
--- a/src/arch/aarch64/device/cpu/registers/control_regs.rs
+++ b/src/arch/aarch64/device/cpu/registers/control_regs.rs
@@ -4,9 +4,9 @@ use core::arch::asm;
 
 bitflags! {
     pub struct MairEl1: u64 {
-        const DEVICE_MEMORY = 0x00;
+        const DEVICE_MEMORY = 0x00 << 16;
         const NORMAL_UNCACHED_MEMORY = 0x44 << 8;
-        const NORMAL_WRITEBACK_MEMORY = 0xff << 16;
+        const NORMAL_WRITEBACK_MEMORY = 0xff;
     }
 }
 
diff --git a/src/arch/aarch64/interrupt/handler.rs b/src/arch/aarch64/interrupt/handler.rs
index cf743c22557a0a96bdf3a0ecd23c3fee14a4496c..e670d45cfebb8054216d3f09e791095903c41285 100644
--- a/src/arch/aarch64/interrupt/handler.rs
+++ b/src/arch/aarch64/interrupt/handler.rs
@@ -240,7 +240,7 @@ macro_rules! function {
 macro_rules! push_scratch {
     () => { "
         // Push scratch registers
-        stp     x18, x18, [sp, #-16]!
+        str     x18,      [sp, #-16]!
         stp     x16, x17, [sp, #-16]!
         stp     x14, x15, [sp, #-16]!
         stp     x12, x13, [sp, #-16]!
@@ -266,7 +266,7 @@ macro_rules! pop_scratch {
         ldp     x12, x13, [sp], #16
         ldp     x14, x15, [sp], #16
         ldp     x16, x17, [sp], #16
-        ldp     x18, x18, [sp], #16
+        ldr     x18,      [sp], #16
     " };
 }
 
diff --git a/src/arch/aarch64/paging/entry.rs b/src/arch/aarch64/paging/entry.rs
deleted file mode 100644
index d326780bd72bce2bfbc32589d0bd0a2f1cabb66f..0000000000000000000000000000000000000000
--- a/src/arch/aarch64/paging/entry.rs
+++ /dev/null
@@ -1,155 +0,0 @@
-//! # Page table entry
-//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
-
-use crate::memory::Frame;
-
-use super::{PageFlags, PhysicalAddress, RmmA, RmmArch};
-
-/// A page table entry
-#[derive(Debug)]
-pub struct Entry(u64);
-
-/// A page descriptor
-#[derive(Debug)]
-pub struct PageDescriptor(u64);
-
-bitflags! {
-    pub struct TableDescriptorFlags: u64 {
-        const PRESENT =                     1 << 0;
-        const VALID =                       1 << 0;
-        const TABLE =                       1 << 1;
-        const AF =                          1 << 10;    /* NOTE: TableDescriptors don't actually have an AF bit! */
-        const PXNTABLE =                    1 << 59;
-        const UXNTABLE =                    1 << 60;
-        const APTABLE_0 =                   1 << 61;
-        const APTABLE_1 =                   1 << 62;
-        const SUBLEVEL_NO_EL0_ACCESS =      (0 << 62) | (1 << 61);
-        const SUBLEVEL_NO_WANY_ACCESS =     (1 << 62) | (0 << 61);
-        const SUBLEVEL_NO_WANY_NO_REL0 =    (1 << 62) | (1 << 61);
-        const NSTABLE =                     1 << 63;
-    }
-}
-
-bitflags! {
-    pub struct PageDescriptorFlags: u64 {
-        const PRESENT =             1 << 0;
-        const VALID =               1 << 0;
-        const PAGE =                1 << 1;
-        const ATTR_INDEX_0 =        1 << 2;
-        const ATTR_INDEX_1 =        1 << 3;
-        const ATTR_INDEX_2 =        1 << 4;
-        const NS =                  1 << 5;
-        const AP_1 =                1 << 6;
-        const AP_2 =                1 << 7;
-        const SH_0 =                1 << 8;
-        const SH_1 =                1 << 9;
-        const AF =                  1 << 10;
-        const NG =                  1 << 11;
-        const DBM =                 1 << 51;
-        const CONTIGUOUS =          1 << 52;
-        const PXN =                 1 << 53;
-        const UXN =                 1 << 54;
-    }
-}
-
-// These are 'virtual' flags that are used to minimise changes to the generic paging code.
-// These are translated to AArch64 specific Page and Table descriptors as and when needed.
-bitflags! {
-    #[derive(Default)]
-    pub struct EntryFlags: usize {
-        const PRESENT =             1 << 0;
-        const HUGE_PAGE =           1 << 1;
-        const GLOBAL =              1 << 2;
-        const NO_EXECUTE =          1 << 3;
-        const USER_ACCESSIBLE =     1 << 4;
-        const WRITABLE =            1 << 5;
-        const TLS =                 1 << 6;
-        const AF =                  1 << 10;
-    }
-}
-
-pub const ADDRESS_MASK: usize = 0x0000_ffff_ffff_f000;
-pub const COUNTER_MASK: u64 = 0x0008_0000_0000_0000;
-
-impl Entry {
-    /// Clear entry
-    pub fn set_zero(&mut self) {
-        self.0 = 0;
-    }
-
-    /// Is the entry unused?
-    pub fn is_unused(&self) -> bool {
-        self.0 == (self.0 & COUNTER_MASK)
-    }
-
-    /// Make the entry unused
-    pub fn set_unused(&mut self) {
-        self.0 &= COUNTER_MASK;
-    }
-
-    /// Get the address this page references
-    pub fn address(&self) -> PhysicalAddress {
-        PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
-    }
-
-    /// Get the current entry flags
-    pub fn page_table_entry_flags(&self) -> TableDescriptorFlags {
-        TableDescriptorFlags::from_bits_truncate(self.0)
-    }
-
-    pub fn page_descriptor_entry_flags(&self) -> PageDescriptorFlags {
-        PageDescriptorFlags::from_bits_truncate(self.0)
-    }
-
-    /// Get the current entry flags
-    pub fn flags(&self) -> PageFlags<RmmA> {
-        unsafe { PageFlags::from_data((self.0 as usize & RmmA::ENTRY_FLAGS_MASK) & !(COUNTER_MASK as usize)) }
-    }
-
-    /// Get the associated frame, if available, for a level 4, 3, or 2 page
-    pub fn pointed_frame(&self) -> Option<Frame> {
-        if self.page_table_entry_flags().contains(TableDescriptorFlags::VALID) {
-            Some(Frame::containing_address(self.address()))
-        } else {
-            None
-        }
-    }
-
-    /// Get the associated frame, if available, for a level 1 page
-    pub fn pointed_frame_at_l1(&self) -> Option<Frame> {
-        if self.page_descriptor_entry_flags().contains(PageDescriptorFlags::VALID) {
-            Some(Frame::containing_address(self.address()))
-        } else {
-            None
-        }
-    }
-
-    pub fn page_table_entry_set(&mut self, frame: Frame, flags: TableDescriptorFlags) {
-        debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0);
-        // ODDNESS Alert: We need to set the AF bit - despite this being a TableDescriptor!!!
-        // The Arm ARM says this bit (bit 10) is IGNORED in Table Descriptors so hopefully this is OK
-        let access_flag = TableDescriptorFlags::AF;
-        self.0 = (frame.start_address().data() as u64) | flags.bits() | access_flag.bits() | (self.0 & COUNTER_MASK);
-    }
-
-    pub fn page_descriptor_entry_set(&mut self, frame: Frame, flags: PageDescriptorFlags) {
-        debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0);
-        let access_flag = PageDescriptorFlags::AF;
-        self.0 = (frame.start_address().data() as u64) | flags.bits() | access_flag.bits() | (self.0 & COUNTER_MASK);
-    }
-
-    pub fn set(&mut self, frame: Frame, flags: PageFlags<RmmA>) {
-        debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0);
-        self.0 = (frame.start_address().data() as u64) | (flags.data() as u64) | (self.0 & COUNTER_MASK);
-    }
-
-    /// Get bit 51 in entry, used as 1 of 9 bits (in 9 entries) used as a counter for the page table
-    pub fn counter_bits(&self) -> u64 {
-        (self.0 & COUNTER_MASK) >> 51
-    }
-
-    /// Set bit 51 in entry, used as 1 of 9 bits (in 9 entries) used as a counter for the page table
-    pub fn set_counter_bits(&mut self, count: u64) {
-        self.0 = (self.0 & !COUNTER_MASK) | ((count & 0x1) << 51);
-    }
-}
diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs
index b1e3aed7f847be4c3ce466a1bc048e582f688846..f3c5e7ba6209a92f568eda183fba43ac602b83c6 100644
--- a/src/arch/aarch64/paging/mod.rs
+++ b/src/arch/aarch64/paging/mod.rs
@@ -5,7 +5,6 @@ use core::{mem, ptr};
 
 use crate::device::cpu::registers::{control_regs, tlb};
 
-use self::entry::EntryFlags;
 use self::mapper::PageFlushAll;
 
 pub use rmm::{
@@ -21,7 +20,6 @@ pub use super::CurrentRmmArch as RmmA;
 pub type PageMapper = rmm::PageMapper<RmmA, crate::arch::rmm::LockedAllocator>;
 pub use crate::rmm::KernelMapper;
 
-pub mod entry;
 pub mod mapper;
 
 /// Number of entries per page table
@@ -64,7 +62,7 @@ unsafe fn map_percpu(cpu_id: usize, mapper: &mut PageMapper) -> PageFlushAll<Rmm
     for page in Page::range_inclusive(start_page, end_page) {
         let result = mapper.map(
             page.start_address(),
-            PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
+            PageFlags::new().write(true).global(cfg!(not(feature = "pti"))),
         )
         .expect("failed to allocate page table frames while mapping percpu");
         flush_all.consume(result);
diff --git a/src/arch/aarch64/rmm.rs b/src/arch/aarch64/rmm.rs
index bc38a05d3807970fd385db209d853a0e571d9c70..5fed354e88f678bd45735cb52c0a085d9bbed180 100644
--- a/src/arch/aarch64/rmm.rs
+++ b/src/arch/aarch64/rmm.rs
@@ -178,7 +178,6 @@ unsafe fn inner<A: Arch>(
         #[cfg(feature = "graphical_debug")]
         {
             use crate::devices::graphical_debug::FRAMEBUFFER;
-            use super::paging::entry::EntryFlags;
 
             let (phys, virt, size) = *FRAMEBUFFER.lock();
 
diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs
index 71f65f8625410973463837a74256dda7552973b0..cd127f16e71dbf2952f8db4ce41eb15cf715d719 100644
--- a/src/arch/aarch64/start.rs
+++ b/src/arch/aarch64/start.rs
@@ -58,7 +58,7 @@ pub struct KernelArgs {
 
 /// The entry to Rust, all things must be initialized
 #[no_mangle]
-pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
+pub unsafe extern "C" fn kstart(args_ptr: *const KernelArgs) -> ! {
     let bootstrap = {
         let args = &*args_ptr;
 
@@ -113,15 +113,12 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
         info!("Bootstrap entry point: {:X}", {args.bootstrap_entry});
 
         // Setup interrupt handlers
-        extern "C" {
-            fn exception_vector_base();
-        }
         core::arch::asm!(
             "
-            ldr x0, =exception_vector_base
-            msr vbar_el1, x0
+            ldr {tmp}, =exception_vector_base
+            msr vbar_el1, {tmp}
             ",
-            out("x0") _,
+            tmp = out(reg) _,
         );
 
         /* NOT USED WITH UEFI
diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs
index 31a74e219716a3ff1b58ae36562f4897bdc0dd5e..336469bfa16a07697b1601c66a2976c8c138480c 100644
--- a/src/arch/x86/paging/mod.rs
+++ b/src/arch/x86/paging/mod.rs
@@ -4,7 +4,6 @@
 use core::{mem, ptr};
 use x86::msr;
 
-use self::entry::EntryFlags;
 use self::mapper::PageFlushAll;
 
 pub use rmm::{
@@ -84,7 +83,7 @@ unsafe fn map_percpu(cpu_id: usize, mapper: &mut PageMapper) -> PageFlushAll<Rmm
     for page in Page::range_inclusive(start_page, end_page) {
         let result = mapper.map(
             page.start_address(),
-            PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
+            PageFlags::new().write(true).global(cfg!(not(feature = "pti"))),
         )
         .expect("failed to allocate page table frames while mapping percpu");
         flush_all.consume(result);
diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index 31a74e219716a3ff1b58ae36562f4897bdc0dd5e..336469bfa16a07697b1601c66a2976c8c138480c 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -4,7 +4,6 @@
 use core::{mem, ptr};
 use x86::msr;
 
-use self::entry::EntryFlags;
 use self::mapper::PageFlushAll;
 
 pub use rmm::{
@@ -84,7 +83,7 @@ unsafe fn map_percpu(cpu_id: usize, mapper: &mut PageMapper) -> PageFlushAll<Rmm
     for page in Page::range_inclusive(start_page, end_page) {
         let result = mapper.map(
             page.start_address(),
-            PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
+            PageFlags::new().write(true).global(cfg!(not(feature = "pti"))),
         )
         .expect("failed to allocate page table frames while mapping percpu");
         flush_all.consume(result);
diff --git a/src/context/list.rs b/src/context/list.rs
index efca76da2409fa11934723124bb3e5fa2a5a2199..2944d0e914a5dcb9c743b7ca87b3ae3dc6535896 100644
--- a/src/context/list.rs
+++ b/src/context/list.rs
@@ -91,11 +91,12 @@ impl ContextList {
             let _ = context.set_addr_space(super::memory::new_addrspace()?);
 
             let mut stack = vec![0; 65_536].into_boxed_slice();
-            let offset = stack.len() - mem::size_of::<usize>();
+            let mut offset = stack.len();
 
             #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
             unsafe {
-                let offset = stack.len() - mem::size_of::<usize>();
+                // Space for return address on stack
+                offset -= mem::size_of::<usize>();
                 let func_ptr = stack.as_mut_ptr().add(offset);
                 *(func_ptr as *mut usize) = func as usize;
             }
@@ -105,6 +106,8 @@ impl ContextList {
                 let context_id = context.id.into();
                 context.arch.set_lr(func as usize);
                 context.arch.set_context_handle();
+                // Stack should be 16 byte aligned
+                offset -= (stack.as_ptr() as usize + offset) % 16;
             }
 
             context.arch.set_stack(stack.as_ptr() as usize + offset);
diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs
index a371d5dee0c79f03f202f7ea206b1545c717650a..170580bed676202a28dadada190955c2e8654532 100644
--- a/src/syscall/driver.rs
+++ b/src/syscall/driver.rs
@@ -1,12 +1,14 @@
 use crate::interrupt::InterruptStack;
 use crate::memory::{allocate_frames_complex, deallocate_frames, Frame, PAGE_SIZE};
 use crate::paging::{PageFlags, PhysicalAddress, VirtualAddress, mapper::PageFlushAll};
-use crate::paging::entry::EntryFlags;
 use crate::context;
 use crate::context::memory::{Grant, Region};
 use crate::syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
 use crate::syscall::flag::{PhysallocFlags, PartialAllocStrategy, PhysmapFlags, PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
 
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+use crate::paging::entry::EntryFlags;
+
 use alloc::sync::Arc;
 
 fn enforce_root() -> Result<()> {