diff --git a/context/context.rs b/context/context.rs index 4b4cc3a7d568b176153a3c5f8189561d193ba1fa..73f31f417313f7dee206668d8bd53facccd04804 100644 --- a/context/context.rs +++ b/context/context.rs @@ -5,7 +5,7 @@ use spin::Mutex; use arch; use super::file::File; -use super::memory::{Memory, SharedMemory}; +use super::memory::{Grant, Memory, SharedMemory}; #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Status { @@ -33,6 +33,8 @@ pub struct Context { pub heap: Option<SharedMemory>, /// User stack pub stack: Option<Memory>, + /// User grants + pub grants: Arc<Mutex<Vec<Grant>>>, /// The current working directory pub cwd: Arc<Mutex<Vec<u8>>>, /// The open files in the scheme @@ -51,6 +53,7 @@ impl Context { image: Vec::new(), heap: None, stack: None, + grants: Arc::new(Mutex::new(Vec::new())), cwd: Arc::new(Mutex::new(Vec::new())), files: Arc::new(Mutex::new(Vec::new())) } diff --git a/context/list.rs b/context/list.rs index 7e60e767604fa7f394b061446108c9ef62c84151..22ebbe96213fb4ddda57d03708086ac688ebda6a 100644 --- a/context/list.rs +++ b/context/list.rs @@ -1,3 +1,4 @@ +use alloc::arc::Arc; use collections::BTreeMap; use core::mem; use core::sync::atomic::Ordering; @@ -9,7 +10,7 @@ use super::context::Context; /// Context list type pub struct ContextList { - map: BTreeMap<usize, RwLock<Context>>, + map: BTreeMap<usize, Arc<RwLock<Context>>>, next_id: usize } @@ -23,21 +24,21 @@ impl ContextList { } /// Get the nth context. - pub fn get(&self, id: usize) -> Option<&RwLock<Context>> { + pub fn get(&self, id: usize) -> Option<&Arc<RwLock<Context>>> { self.map.get(&id) } /// Get the current context. - pub fn current(&self) -> Option<&RwLock<Context>> { + pub fn current(&self) -> Option<&Arc<RwLock<Context>>> { self.map.get(&super::CONTEXT_ID.load(Ordering::SeqCst)) } - pub fn iter(&self) -> ::collections::btree_map::Iter<usize, RwLock<Context>> { + pub fn iter(&self) -> ::collections::btree_map::Iter<usize, Arc<RwLock<Context>>> { self.map.iter() } /// Create a new context. - pub fn new_context(&mut self) -> Result<&RwLock<Context>> { + pub fn new_context(&mut self) -> Result<&Arc<RwLock<Context>>> { if self.next_id >= super::CONTEXT_MAX_CONTEXTS { self.next_id = 1; } @@ -53,13 +54,13 @@ impl ContextList { let id = self.next_id; self.next_id += 1; - assert!(self.map.insert(id, RwLock::new(Context::new(id))).is_none()); + assert!(self.map.insert(id, Arc::new(RwLock::new(Context::new(id)))).is_none()); Ok(self.map.get(&id).expect("Failed to insert new context. ID is out of bounds.")) } /// Spawn a context from a function. - pub fn spawn(&mut self, func: extern fn()) -> Result<&RwLock<Context>> { + pub fn spawn(&mut self, func: extern fn()) -> Result<&Arc<RwLock<Context>>> { let context_lock = self.new_context()?; { let mut context = context_lock.write(); @@ -77,7 +78,7 @@ impl ContextList { Ok(context_lock) } - pub fn remove(&mut self, id: usize) -> Option<RwLock<Context>> { + pub fn remove(&mut self, id: usize) -> Option<Arc<RwLock<Context>>> { self.map.remove(&id) } } diff --git a/context/memory.rs b/context/memory.rs index 8f1fecbb61b5c789cd4870e4a8bb1b715c75e225..3121794dc5706034e34bc82e03d33d06a1ec9a96 100644 --- a/context/memory.rs +++ b/context/memory.rs @@ -1,4 +1,5 @@ use alloc::arc::{Arc, Weak}; +use collections::VecDeque; use spin::Mutex; use arch::externs::memset; @@ -7,12 +8,66 @@ use arch::paging::entry::{self, EntryFlags}; use arch::paging::temporary_page::TemporaryPage; #[derive(Debug)] -pub struct Memory { +pub struct Grant { start: VirtualAddress, size: usize, flags: EntryFlags } +impl Grant { + pub fn new(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant { + let mut active_table = unsafe { ActivePageTable::new() }; + + let mut frames = VecDeque::new(); + + let start_page = Page::containing_address(from); + let end_page = Page::containing_address(VirtualAddress::new(from.get() + size - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let frame = active_table.translate_page(page).expect("grant references unmapped memory"); + frames.push_back(frame); + } + + active_table.with(new_table, temporary_page, |mapper| { + let start_page = Page::containing_address(to); + let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let frame = frames.pop_front().expect("grant did not find enough frames"); + mapper.map_to(page, frame, flags); + } + }); + + Grant { + start: to, + size: size, + flags: flags + } + } + + pub fn destroy(self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { + let mut active_table = unsafe { ActivePageTable::new() }; + + active_table.with(new_table, temporary_page, |mapper| { + let start_page = Page::containing_address(self.start); + let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1)); + for page in Page::range_inclusive(start_page, end_page) { + mapper.unmap_return(page); + } + }); + } + + pub fn start_address(&self) -> VirtualAddress { + self.start + } + + pub fn size(&self) -> usize { + self.size + } + + pub fn flags(&self) -> EntryFlags { + self.flags + } +} + #[derive(Clone, Debug)] pub enum SharedMemory { Owned(Arc<Mutex<Memory>>), @@ -42,6 +97,13 @@ impl SharedMemory { } } +#[derive(Debug)] +pub struct Memory { + start: VirtualAddress, + size: usize, + flags: EntryFlags +} + impl Memory { pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self { let mut memory = Memory { diff --git a/scheme/root.rs b/scheme/root.rs index 23a9e57f859064e148de71903b17967e97b7c709..2264adea572ebb3667c9c89f798e3f02541717d7 100644 --- a/scheme/root.rs +++ b/scheme/root.rs @@ -4,6 +4,7 @@ use collections::BTreeMap; use core::sync::atomic::{AtomicUsize, Ordering}; use spin::RwLock; +use context; use syscall::{Error, Result}; use scheme::{self, Scheme}; use scheme::user::{UserInner, UserScheme}; @@ -24,12 +25,18 @@ impl RootScheme { impl Scheme for RootScheme { fn open(&self, path: &[u8], _flags: usize) -> Result<usize> { + let context = { + let contexts = context::contexts(); + let context = contexts.current().ok_or(Error::NoProcess)?; + Arc::downgrade(&context) + }; + let inner = { let mut schemes = scheme::schemes_mut(); if schemes.get_name(path).is_some() { return Err(Error::FileExists); } - let inner = Arc::new(UserInner::new()); + let inner = Arc::new(UserInner::new(context)); schemes.insert(path.to_vec().into_boxed_slice(), Arc::new(Box::new(UserScheme::new(Arc::downgrade(&inner))))).expect("failed to insert user scheme"); inner }; diff --git a/scheme/user.rs b/scheme/user.rs index 7da21d47f0c3908c84ed86c7526b8cab1f5c1179..9825f9dcc2706cbd76f67196e3c386415f04ae1c 100644 --- a/scheme/user.rs +++ b/scheme/user.rs @@ -2,9 +2,13 @@ use alloc::arc::Weak; use collections::{BTreeMap, VecDeque}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::{mem, usize}; -use spin::Mutex; +use spin::{Mutex, RwLock}; -use context; +use arch; +use arch::paging::{InactivePageTable, Page, VirtualAddress, entry}; +use arch::paging::temporary_page::TemporaryPage; +use context::{self, Context}; +use context::memory::Grant; use syscall::{convert_to_result, Call, Error, Result}; use super::Scheme; @@ -21,14 +25,16 @@ pub struct Packet { pub struct UserInner { next_id: AtomicUsize, + context: Weak<RwLock<Context>>, todo: Mutex<VecDeque<Packet>>, done: Mutex<BTreeMap<usize, usize>> } impl UserInner { - pub fn new() -> UserInner { + pub fn new(context: Weak<RwLock<Context>>) -> UserInner { UserInner { next_id: AtomicUsize::new(0), + context: context, todo: Mutex::new(VecDeque::new()), done: Mutex::new(BTreeMap::new()) } @@ -59,6 +65,87 @@ impl UserInner { } } + pub fn capture(&self, buf: &[u8]) -> Result<usize> { + self.capture_inner(buf.as_ptr() as usize, buf.len(), false) + } + + pub fn capture_mut(&self, buf: &mut [u8]) -> Result<usize> { + self.capture_inner(buf.as_mut_ptr() as usize, buf.len(), true) + } + + fn capture_inner(&self, address: usize, size: usize, writable: bool) -> Result<usize> { + let context_lock = self.context.upgrade().ok_or(Error::NoProcess)?; + let context = context_lock.read(); + + let mut grants = context.grants.lock(); + + let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; + let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET))); + + let from_address = (address/4096) * 4096; + let offset = address - from_address; + let full_size = ((offset + size + 4095)/4096) * 4096; + let mut to_address = arch::USER_GRANT_OFFSET; + + let mut flags = entry::PRESENT | entry::NO_EXECUTE; + if writable { + flags |= entry::WRITABLE; + } + + for i in 0 .. grants.len() { + let start = grants[i].start_address().get(); + if to_address + full_size < start { + grants.insert(i, Grant::new( + VirtualAddress::new(from_address), + VirtualAddress::new(to_address), + full_size, + flags, + &mut new_table, + &mut temporary_page + )); + + return Ok(to_address + offset); + } else { + let pages = (grants[i].size() + 4095) / 4096; + let end = start + pages * 4096; + to_address = end; + } + } + + grants.push(Grant::new( + VirtualAddress::new(from_address), + VirtualAddress::new(to_address), + full_size, + flags, + &mut new_table, + &mut temporary_page + )); + + return Ok(to_address + offset); + } + + pub fn release(&self, address: usize) -> Result<()> { + let context_lock = self.context.upgrade().ok_or(Error::NoProcess)?; + let context = context_lock.read(); + + let mut grants = context.grants.lock(); + + let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; + let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET))); + + for i in 0 .. grants.len() { + let start = grants[i].start_address().get(); + let end = start + grants[i].size(); + if address >= start && address < end { + grants.remove(i).destroy(&mut new_table, &mut temporary_page); + + return Ok(()); + } + } + + Err(Error::Fault) + } + pub fn read(&self, buf: &mut [u8]) -> Result<usize> { let packet_size = mem::size_of::<Packet>(); let len = buf.len()/packet_size; @@ -115,7 +202,10 @@ impl UserScheme { impl Scheme for UserScheme { fn open(&self, path: &[u8], flags: usize) -> Result<usize> { let inner = self.inner.upgrade().ok_or(Error::NoDevice)?; - inner.call(Call::Open, path.as_ptr() as usize, path.len(), flags) + let address = inner.capture(path)?; + let result = inner.call(Call::Open, address, path.len(), flags); + let _ = inner.release(address); + result } fn dup(&self, file: usize) -> Result<usize> { @@ -125,12 +215,18 @@ impl Scheme for UserScheme { fn read(&self, file: usize, buf: &mut [u8]) -> Result<usize> { let inner = self.inner.upgrade().ok_or(Error::NoDevice)?; - inner.call(Call::Read, file, buf.as_mut_ptr() as usize, buf.len()) + let address = inner.capture_mut(buf)?; + let result = inner.call(Call::Read, file, address, buf.len()); + let _ = inner.release(address); + result } fn write(&self, file: usize, buf: &[u8]) -> Result<usize> { let inner = self.inner.upgrade().ok_or(Error::NoDevice)?; - inner.call(Call::Write, file, buf.as_ptr() as usize, buf.len()) + let address = inner.capture(buf)?; + let result = inner.call(Call::Write, file, buf.as_ptr() as usize, buf.len()); + let _ = inner.release(address); + result } fn fsync(&self, file: usize) -> Result<()> {