Commit 22aa5f48 authored by Jeremy Soller's avatar Jeremy Soller

Merge branch 'master' into 'master'

Implement fmap for memory:

See merge request !93
parents a2ce83f6 55f2303d
......@@ -22,7 +22,6 @@
#![feature(const_max_value)]
#![feature(const_size_of)]
#![feature(core_intrinsics)]
#![feature(global_allocator)]
#![feature(integer_atomics)]
#![feature(lang_items)]
#![feature(naked_functions)]
......
use alloc::{BTreeMap, Vec};
use core::sync::atomic::{AtomicUsize, Ordering};
use memory::{free_frames, used_frames};
use spin::Mutex;
use syscall::data::StatVfs;
use syscall::error::*;
use syscall::scheme::Scheme;
use syscall;
pub struct MemoryScheme;
struct Address {
phys: usize,
len: usize,
virt: usize
}
pub struct MemoryScheme {
handles: Mutex<BTreeMap<usize, Vec<Address>>>,
next_id: AtomicUsize
}
impl MemoryScheme {
pub fn new() -> Self {
Self {
handles: Mutex::new(BTreeMap::new()),
next_id: AtomicUsize::new(0)
}
}
}
impl Scheme for MemoryScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.lock().insert(id, Vec::new());
Ok(0)
}
......@@ -23,6 +45,26 @@ impl Scheme for MemoryScheme {
Ok(0)
}
fn fmap(&self, id: usize, _offset: usize, len: usize) -> Result<usize> {
let mut handles = self.handles.lock();
let handle = handles.get_mut(&id).ok_or(Error::new(ENOENT))?;
// Warning: These functions are bypassing the root check.
let phys = syscall::inner_physalloc(len)?;
let virt = syscall::inner_physmap(phys, len, syscall::flag::MAP_WRITE).map_err(|err| {
syscall::inner_physfree(phys, len).expect("newly allocated region failed to free");
err
})?;
handle.push(Address {
phys,
len,
virt
});
Ok(virt)
}
fn fcntl(&self, _id: usize, _cmd: usize, _arg: usize) -> Result<usize> {
Ok(0)
}
......@@ -37,8 +79,23 @@ impl Scheme for MemoryScheme {
Ok(i)
}
/// Close the file `number`
fn close(&self, _file: usize) -> Result<usize> {
fn close(&self, id: usize) -> Result<usize> {
let allocations = self.handles.lock()
.remove(&id)
.ok_or(Error::new(ENOENT))?;
for addr in allocations {
// physunmap fails if already unmapped
// physfree can't currently fail
//
// What if somebody with root already freed the physical address?
// (But left the mapping, which means we attempt to free it again)
// I'd rather not think about it.
// (Still, that requires root)
let _ = syscall::inner_physunmap(addr.virt)
.and_then(|_| syscall::inner_physfree(addr.phys, addr.len));
}
Ok(0)
}
}
......@@ -120,7 +120,7 @@ impl SchemeList {
self.insert(ns, Box::new(*b""), |scheme_id| Arc::new(Box::new(RootScheme::new(ns, scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"event"), |_| Arc::new(Box::new(EventScheme))).unwrap();
self.insert(ns, Box::new(*b"env"), |_| Arc::new(Box::new(EnvScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"memory"), |_| Arc::new(Box::new(MemoryScheme))).unwrap();
self.insert(ns, Box::new(*b"memory"), |_| Arc::new(Box::new(MemoryScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"sys"), |_| Arc::new(Box::new(SysScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"time"), |scheme_id| Arc::new(Box::new(TimeScheme::new(scheme_id)))).unwrap();
......
......@@ -30,24 +30,27 @@ pub fn iopl(level: usize, stack: &mut SyscallStack) -> Result<usize> {
Ok(0)
}
pub fn physalloc(size: usize) -> Result<usize> {
enforce_root()?;
pub fn inner_physalloc(size: usize) -> Result<usize> {
allocate_frames((size + 4095)/4096).ok_or(Error::new(ENOMEM)).map(|frame| frame.start_address().get())
}
pub fn physfree(physical_address: usize, size: usize) -> Result<usize> {
pub fn physalloc(size: usize) -> Result<usize> {
enforce_root()?;
inner_physalloc(size)
}
pub fn inner_physfree(physical_address: usize, size: usize) -> Result<usize> {
deallocate_frames(Frame::containing_address(PhysicalAddress::new(physical_address)), (size + 4095)/4096);
//TODO: Check that no double free occured
Ok(0)
}
//TODO: verify exlusive access to physical memory
pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
pub fn physfree(physical_address: usize, size: usize) -> Result<usize> {
enforce_root()?;
inner_physfree(physical_address, size)
}
//TODO: verify exlusive access to physical memory
pub fn inner_physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
if size == 0 {
Ok(0)
} else {
......@@ -98,10 +101,12 @@ pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usi
Ok(to_address + offset)
}
}
pub fn physunmap(virtual_address: usize) -> Result<usize> {
pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
enforce_root()?;
inner_physmap(physical_address, size, flags)
}
pub fn inner_physunmap(virtual_address: usize) -> Result<usize> {
if virtual_address == 0 {
Ok(0)
} else {
......@@ -124,6 +129,10 @@ pub fn physunmap(virtual_address: usize) -> Result<usize> {
Err(Error::new(EFAULT))
}
}
pub fn physunmap(virtual_address: usize) -> Result<usize> {
enforce_root()?;
inner_physunmap(virtual_address)
}
pub fn virttophys(virtual_address: usize) -> Result<usize> {
enforce_root()?;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment