From d23773d323a7dbfd419daa0b2584aef6c08da347 Mon Sep 17 00:00:00 2001
From: jD91mZM2 <me@krake.one>
Date: Fri, 7 Aug 2020 11:51:29 +0200
Subject: [PATCH] Statically ensure page-aligned addresses

---
 src/arch/x86_64/paging/mod.rs | 10 +++++
 src/common/mod.rs             |  3 ++
 src/common/page_aligned.rs    | 73 +++++++++++++++++++++++++++++++++++
 src/scheme/user.rs            | 12 +++---
 src/syscall/fs.rs             |  9 ++---
 src/syscall/process.rs        | 11 +++---
 6 files changed, 101 insertions(+), 17 deletions(-)
 create mode 100644 src/common/page_aligned.rs

diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index f748d97..2aa2e89 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -555,6 +555,16 @@ impl VirtualAddress {
         self.0
     }
 }
+impl From<VirtualAddress> for usize {
+    fn from(address: VirtualAddress) -> usize {
+        address.get()
+    }
+}
+impl From<usize> for VirtualAddress {
+    fn from(address: usize) -> Self {
+        VirtualAddress(address)
+    }
+}
 
 /// Page
 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
diff --git a/src/common/mod.rs b/src/common/mod.rs
index 4bdafba..af0b269 100644
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -1,7 +1,10 @@
 #[macro_use]
 pub mod int_like;
+pub mod page_aligned;
 pub mod unique;
 
+pub use page_aligned::PageAligned;
+
 /// Debug macro, lifted from the std
 #[macro_export]
 macro_rules! dbg {
diff --git a/src/common/page_aligned.rs b/src/common/page_aligned.rs
new file mode 100644
index 0000000..cd4ef90
--- /dev/null
+++ b/src/common/page_aligned.rs
@@ -0,0 +1,73 @@
+use crate::memory::PAGE_SIZE;
+
+use core::ops::{Add, Sub};
+use syscall::error::*;
+
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub struct PageAligned<T: Into<usize> + From<usize> + Copy> {
+    number: T,
+}
+impl<T: Into<usize> + From<usize> + Copy> PageAligned<T> {
+    /// Create a new page-aligned number by subtracting the offset to the
+    /// nearest page size, rounded down. Returns the page-aligned number, and
+    /// the offset required to reach the input. If the offset is 0, the number
+    /// was already page aligned.
+    #[inline(always)]
+    pub fn new(number: T) -> (Self, usize) {
+        let mut number = number.into();
+        let offset = number % PAGE_SIZE;
+        number -= offset;
+
+        (Self {
+            number: T::from(number),
+        }, offset)
+    }
+
+    /// Assure that a number is page-aligned already, or return EINVAL
+    #[inline(always)]
+    pub fn try_new(number: T) -> Result<Self> {
+        if number.into() % PAGE_SIZE == 0 {
+            Ok(Self { number, })
+        } else {
+            Err(Error::new(EINVAL))
+        }
+    }
+
+    /// Round the input address down to the nearest page size. round_down(5000) = 4096
+    #[inline(always)]
+    pub fn round_down(address: T) -> Self {
+        Self::new(address).0
+    }
+
+    /// Round the input address up to the nearest page size. round_up(5000) = 8192
+    #[inline(always)]
+    pub fn round_up(address: T) -> Self {
+        Self::new(T::from(address.into() + (PAGE_SIZE - 1))).0
+    }
+
+    /// Return the address
+    #[inline(always)]
+    pub fn get(self) -> T {
+        self.number
+    }
+}
+
+impl<T: Into<usize> + From<usize> + Copy> Add<Self> for PageAligned<T> {
+    type Output = Self;
+
+    fn add(self, rhs: Self) -> Self::Output {
+        Self {
+            number: T::from(self.number.into() + rhs.number.into())
+        }
+    }
+}
+
+impl<T: Into<usize> + From<usize> + Copy> Sub<Self> for PageAligned<T> {
+    type Output = Self;
+
+    fn sub(self, rhs: Self) -> Self::Output {
+        Self {
+            number: T::from(self.number.into() - rhs.number.into())
+        }
+    }
+}
diff --git a/src/scheme/user.rs b/src/scheme/user.rs
index df6c436..0c5406e 100644
--- a/src/scheme/user.rs
+++ b/src/scheme/user.rs
@@ -8,9 +8,10 @@ use spin::{Mutex, RwLock};
 
 use crate::context::{self, Context};
 use crate::context::file::FileDescriptor;
-use crate::context::memory::{entry_flags, round_down_pages, Grant, Region};
+use crate::common::PageAligned;
+use crate::context::memory::{entry_flags, Grant, Region};
 use crate::event;
-use crate::paging::{PAGE_SIZE, InactivePageTable, Page, VirtualAddress};
+use crate::paging::{InactivePageTable, Page, VirtualAddress};
 use crate::paging::temporary_page::TemporaryPage;
 use crate::scheme::{AtomicSchemeId, SchemeId};
 use crate::sync::{WaitQueue, WaitMap};
@@ -128,9 +129,8 @@ impl UserInner {
 
         let mut grants = context.grants.lock();
 
-        let from_address = round_down_pages(address);
-        let offset = address - from_address;
-        let from_region = Region::new(VirtualAddress::new(from_address), offset + size).round();
+        let (from_address, offset) = PageAligned::new(VirtualAddress::new(address));
+        let from_region = Region::new(from_address.get(), offset + size).round();
         let to_region = grants.find_free_at(VirtualAddress::new(to_address), from_region.size(), flags)?;
 
         //TODO: Use syscall_head and syscall_tail to avoid leaking data
@@ -215,7 +215,7 @@ impl UserInner {
             } else {
                 if let Some((context_weak, desc, map)) = self.fmap.lock().remove(&packet.id) {
                     if let Ok(address) = Error::demux(packet.a) {
-                        if address % PAGE_SIZE > 0 {
+                        if PageAligned::try_new(address).is_err() {
                             println!("scheme returned unaligned address, causing extra frame to be allocated");
                         }
                         let res = UserInner::capture_inner(&context_weak, map.address, address, map.size, map.flags, Some(desc));
diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs
index 3ae8e06..e9ad001 100644
--- a/src/syscall/fs.rs
+++ b/src/syscall/fs.rs
@@ -4,10 +4,10 @@ use alloc::vec::Vec;
 use core::sync::atomic::Ordering;
 use spin::RwLock;
 
+use crate::common::PageAligned;
 use crate::context::file::{FileDescriptor, FileDescription};
 use crate::context::memory::Region;
 use crate::context;
-use crate::memory::PAGE_SIZE;
 use crate::paging::VirtualAddress;
 use crate::scheme::{self, FileHandle};
 use crate::syscall::data::{Packet, Stat};
@@ -489,14 +489,13 @@ pub fn funmap(virtual_address: usize) -> Result<usize> {
 pub fn funmap2(virtual_address: usize, length: usize) -> Result<usize> {
     if virtual_address == 0 || length == 0 {
         return Ok(0);
-    } else if virtual_address % PAGE_SIZE != 0 {
-        return Err(Error::new(EINVAL));
     }
 
+    let virtual_address = PageAligned::try_new(VirtualAddress::new(virtual_address))?;
+
     let mut notify_files = Vec::new();
 
-    let virtual_address = VirtualAddress::new(virtual_address);
-    let requested = Region::new(virtual_address, length);
+    let requested = Region::new(virtual_address.get(), length);
 
     {
         let contexts = context::contexts();
diff --git a/src/syscall/process.rs b/src/syscall/process.rs
index e051197..28ef038 100644
--- a/src/syscall/process.rs
+++ b/src/syscall/process.rs
@@ -9,6 +9,7 @@ use spin::Mutex;
 
 use crate::context::file::FileDescriptor;
 use crate::context::{ContextId, WaitpidKey};
+use crate::common::PageAligned;
 use crate::context::memory::{UserGrants, Region};
 use crate::context;
 #[cfg(not(feature="doc"))]
@@ -720,11 +721,10 @@ fn fexec_noreturn(
                 for segment in elf.segments() {
                     match segment.p_type {
                         program_header::PT_LOAD => {
-                            let voff = segment.p_vaddr as usize % PAGE_SIZE;
-                            let vaddr = segment.p_vaddr as usize - voff;
+                            let (vaddr, voff) = PageAligned::new(segment.p_vaddr as usize);
 
                             let mut memory = context::memory::Memory::new(
-                                VirtualAddress::new(vaddr),
+                                VirtualAddress::new(vaddr.get()),
                                 segment.p_memsz as usize + voff,
                                 EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
                                 true
@@ -1078,13 +1078,12 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]>
                 );
             },
             program_header::PT_LOAD => {
-                let voff = segment.p_vaddr as usize % PAGE_SIZE;
-                let vaddr = segment.p_vaddr as usize - voff;
+                let vaddr = PageAligned::round_down(segment.p_vaddr as usize);
 
                 // Due to the Userspace and kernel TLS bases being located right above 2GB,
                 // limit any loadable sections to lower than that. Eventually we will need
                 // to replace this with a more intelligent TLS address
-                if vaddr >= 0x8000_0000 {
+                if vaddr.get() >= 0x8000_0000 {
                     println!("exec: invalid section address {:X}", segment.p_vaddr);
                     return Err(Error::new(ENOEXEC));
                 }
-- 
GitLab