diff --git a/src/common/page_aligned.rs b/src/common/page_aligned.rs index cd4ef9086bcfa521e641afbd59639fd36eb2454c..e0c07a4410eea3613849ddf163b6024ebc5e4f8f 100644 --- a/src/common/page_aligned.rs +++ b/src/common/page_aligned.rs @@ -1,8 +1,23 @@ -use crate::memory::PAGE_SIZE; +use crate::{ + memory::PAGE_SIZE, + paging::VirtualAddress, +}; use core::ops::{Add, Sub}; use syscall::error::*; +/// This data type will only ever hold page-aligned numbers (numbers that are +/// divisible by `PAGE_SIZE`). The runtime check/rounding is executed *once*, +/// and after that the type-checker takes over to ensure we only perform safe +/// actions. +/// +/// This type offers a way to add/subtract multiples while retaining the +/// page-aligned type, since `kx + ky = kz`. More safe operations should be +/// implemented, as needed. +/// +/// The goal for this type is to slowly consume more parts of Redox OS' memory +/// logic, eventually making it easy to verify with a simple glance that code is +/// correct. #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct PageAligned<T: Into<usize> + From<usize> + Copy> { number: T, @@ -52,22 +67,36 @@ impl<T: Into<usize> + From<usize> + Copy> PageAligned<T> { } } -impl<T: Into<usize> + From<usize> + Copy> Add<Self> for PageAligned<T> { +impl<X, Y> Add<PageAligned<Y>> for PageAligned<X> +where + X: Into<usize> + From<usize> + Copy, + Y: Into<usize> + From<usize> + Copy, +{ type Output = Self; - fn add(self, rhs: Self) -> Self::Output { + fn add(self, rhs: PageAligned<Y>) -> Self::Output { Self { - number: T::from(self.number.into() + rhs.number.into()) + number: X::from(self.number.into() + rhs.number.into()) } } } -impl<T: Into<usize> + From<usize> + Copy> Sub<Self> for PageAligned<T> { +impl<X, Y> Sub<PageAligned<Y>> for PageAligned<X> +where + X: Into<usize> + From<usize> + Copy, + Y: Into<usize> + From<usize> + Copy, +{ type Output = Self; - fn sub(self, rhs: Self) -> Self::Output { + fn sub(self, rhs: PageAligned<Y>) -> Self::Output { Self { - number: T::from(self.number.into() - rhs.number.into()) + number: X::from(self.number.into() - rhs.number.into()) } } } + +impl From<PageAligned<VirtualAddress>> for usize { + fn from(number: PageAligned<VirtualAddress>) -> Self { + number.get().get() + } +} diff --git a/src/context/memory.rs b/src/context/memory.rs index ab4dc9ce02929d55899b2877cacb24e4a93cc09b..faaabf5dfc521c40ce031e38f75b394a687fbf9d 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -12,6 +12,7 @@ use syscall::{ }; use crate::arch::paging::PAGE_SIZE; +use crate::common::PageAligned; use crate::context::file::FileDescriptor; use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::memory::Frame; @@ -125,21 +126,24 @@ impl DerefMut for UserGrants { } } +/// A region of memory. Each region will always be aligned to the page size both +/// with start/end. Region::new(start, size) will transparently round start down +/// and size up, in order to target page boundaries exactly. #[derive(Clone, Copy)] pub struct Region { - start: VirtualAddress, - size: usize, + start: PageAligned<VirtualAddress>, + size: PageAligned<usize>, } impl Region { /// Create a new region with the given size pub fn new(start: VirtualAddress, size: usize) -> Self { Self { - start: VirtualAddress::new(round_down_pages(start.get())), - size: round_up_pages(size), + start: PageAligned::round_down(start), + size: PageAligned::round_up(size), } } - /// Create a new region spanning exactly one byte + /// Create a new region spanning exactly one page pub fn page(address: VirtualAddress) -> Self { Self::new(address, 1) } @@ -163,36 +167,36 @@ impl Region { /// Get the start address of the region pub fn start_address(&self) -> VirtualAddress { - self.start + self.start.get() } /// Set the start address of the region - pub fn set_start_address(&mut self, start: VirtualAddress) { + pub fn set_start_address(&mut self, start: PageAligned<VirtualAddress>) { self.start = start; } /// Get the last address in the region (inclusive end) pub fn final_address(&self) -> VirtualAddress { - VirtualAddress::new(self.start.get() + self.size - 1) + VirtualAddress::new(usize::from(self.start) + (self.size.get() - 1)) } /// Get the start address of the next region (exclusive end) pub fn end_address(&self) -> VirtualAddress { - VirtualAddress::new(self.start.get() + self.size) + (self.start + self.size).get() } /// Return the exact size of the region pub fn size(&self) -> usize { - self.size + self.size.get() } /// Return true if the size of this region is zero. Grants with such a /// region should never exist. pub fn is_empty(&self) -> bool { - self.size == 0 + self.size.get() == 0 } /// Set the exact size of the region - pub fn set_size(&mut self, size: usize) { + pub fn set_size(&mut self, size: PageAligned<usize>) { self.size = size; } @@ -308,10 +312,7 @@ impl Grant { flush_all.flush(&mut active_table); Grant { - region: Region { - start: to, - size, - }, + region: Region::new(to, size), flags, mapped: true, owned: false, @@ -334,10 +335,7 @@ impl Grant { flush_all.flush(&mut active_table); Grant { - region: Region { - start: to, - size, - }, + region: Region::new(to, size), flags, mapped: true, owned: true, @@ -372,10 +370,7 @@ impl Grant { ipi(IpiKind::Tlb, IpiTarget::Other); Grant { - region: Region { - start: to, - size, - }, + region: Region::new(to, size), flags, mapped: true, owned: false, @@ -430,10 +425,7 @@ impl Grant { } Grant { - region: Region { - start: new_start, - size: self.size(), - }, + region: Region::new(new_start, self.size()), flags: self.flags, mapped: true, owned: self.owned, @@ -467,7 +459,7 @@ impl Grant { flush_all.flush(&mut active_table); unsafe { - self.region_mut().set_start_address(new_start); + self.region_mut().set_start_address(PageAligned::round_down(new_start)); } }