Commit a31af81f authored by Jeremy Soller's avatar Jeremy Soller

Update fmap support

parent ef919f3d
This diff is collapsed.
......@@ -42,6 +42,28 @@ impl Grant {
}
}
pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = active_table.map(page, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
Grant {
start: to,
size: size,
flags: flags,
mapped: true
}
}
pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
let mut active_table = unsafe { ActivePageTable::new() };
......
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use context;
use context::memory::Grant;
use memory::{free_frames, used_frames};
use spin::Mutex;
use syscall::data::StatVfs;
use paging::VirtualAddress;
use paging::entry::EntryFlags;
use syscall::data::{Map, StatVfs};
use syscall::error::*;
use syscall::flag::{PROT_EXEC, PROT_READ, PROT_WRITE};
use syscall::scheme::Scheme;
use syscall;
struct Address {
phys: usize,
len: usize,
virt: usize
}
pub struct MemoryScheme {
handles: Mutex<BTreeMap<usize, Vec<Address>>>,
next_id: AtomicUsize
}
pub struct MemoryScheme;
impl MemoryScheme {
pub fn new() -> Self {
Self {
handles: Mutex::new(BTreeMap::new()),
next_id: AtomicUsize::new(0)
}
MemoryScheme
}
}
impl Scheme for MemoryScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.lock().insert(id, Vec::new());
Ok(0)
}
......@@ -46,24 +32,52 @@ impl Scheme for MemoryScheme {
Ok(0)
}
fn fmap(&self, id: usize, _offset: usize, len: usize) -> Result<usize> {
let mut handles = self.handles.lock();
let handle = handles.get_mut(&id).ok_or(Error::new(ENOENT))?;
fn fmap(&self, _id: usize, map: &Map) -> Result<usize> {
//TODO: Abstract with other grant creation
if map.size == 0 {
Ok(0)
} else {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let full_size = ((map.size + 4095)/4096) * 4096;
let mut to_address = ::USER_GRANT_OFFSET;
// Warning: These functions are bypassing the root check.
let phys = syscall::inner_physalloc(len)?;
let virt = syscall::inner_physmap(phys, len, syscall::flag::MAP_WRITE).map_err(|err| {
syscall::inner_physfree(phys, len).expect("newly allocated region failed to free");
err
})?;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if map.flags & PROT_EXEC == 0 {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if map.flags & PROT_READ > 0 {
//TODO: PROT_READ
}
if map.flags & PROT_WRITE > 0 {
entry_flags |= EntryFlags::WRITABLE;
}
handle.push(Address {
phys,
len,
virt
});
let mut i = 0;
while i < grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
break;
}
Ok(virt)
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
i += 1;
}
grants.insert(i, Grant::map(
VirtualAddress::new(to_address),
full_size,
entry_flags
));
Ok(to_address)
}
}
fn fcntl(&self, _id: usize, _cmd: usize, _arg: usize) -> Result<usize> {
......@@ -80,23 +94,7 @@ impl Scheme for MemoryScheme {
Ok(i)
}
fn close(&self, id: usize) -> Result<usize> {
let allocations = self.handles.lock()
.remove(&id)
.ok_or(Error::new(ENOENT))?;
for addr in allocations {
// physunmap fails if already unmapped
// physfree can't currently fail
//
// What if somebody with root already freed the physical address?
// (But left the mapping, which means we attempt to free it again)
// I'd rather not think about it.
// (Still, that requires root)
let _ = syscall::inner_physunmap(addr.virt)
.and_then(|_| syscall::inner_physfree(addr.phys, addr.len));
}
fn close(&self, _id: usize) -> Result<usize> {
Ok(0)
}
}
......@@ -13,9 +13,9 @@ use paging::entry::EntryFlags;
use paging::temporary_page::TemporaryPage;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use sync::{WaitQueue, WaitMap};
use syscall::data::{Packet, Stat, StatVfs, TimeSpec};
use syscall::data::{Map, Packet, Stat, StatVfs, TimeSpec};
use syscall::error::*;
use syscall::flag::{EVENT_READ, O_NONBLOCK};
use syscall::flag::{EVENT_READ, O_NONBLOCK, PROT_EXEC, PROT_READ, PROT_WRITE};
use syscall::number::*;
use syscall::scheme::Scheme;
......@@ -28,7 +28,7 @@ pub struct UserInner {
next_id: AtomicU64,
context: Weak<RwLock<Context>>,
todo: WaitQueue<Packet>,
fmap: Mutex<BTreeMap<u64, (Weak<RwLock<Context>>, usize)>>,
fmap: Mutex<BTreeMap<u64, (Weak<RwLock<Context>>, Map)>>,
done: WaitMap<u64, usize>
}
......@@ -78,14 +78,15 @@ impl UserInner {
}
pub fn capture(&self, buf: &[u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, buf.as_ptr() as usize, buf.len(), false)
UserInner::capture_inner(&self.context, buf.as_ptr() as usize, buf.len(), PROT_READ)
}
pub fn capture_mut(&self, buf: &mut [u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, buf.as_mut_ptr() as usize, buf.len(), true)
UserInner::capture_inner(&self.context, buf.as_mut_ptr() as usize, buf.len(), PROT_WRITE)
}
fn capture_inner(context_weak: &Weak<RwLock<Context>>, address: usize, size: usize, writable: bool) -> Result<usize> {
fn capture_inner(context_weak: &Weak<RwLock<Context>>, address: usize, size: usize, flags: usize) -> Result<usize> {
//TODO: Abstract with other grant creation
if size == 0 {
Ok(0)
} else {
......@@ -102,36 +103,35 @@ impl UserInner {
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = ::USER_GRANT_OFFSET;
let mut flags = EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if writable {
flags |= EntryFlags::WRITABLE;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if flags & PROT_EXEC == 0 {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if flags & PROT_READ > 0 {
//TODO: PROT_READ
}
if flags & PROT_WRITE > 0 {
entry_flags |= EntryFlags::WRITABLE;
}
for i in 0 .. grants.len() {
let mut i = 0;
while i < grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
grants.insert(i, Grant::map_inactive(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
flags,
&mut new_table,
&mut temporary_page
));
return Ok(to_address + offset);
} else {
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
break;
}
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
i += 1;
}
grants.push(Grant::map_inactive(
grants.insert(i, Grant::map_inactive(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
flags,
entry_flags,
&mut new_table,
&mut temporary_page
));
......@@ -186,9 +186,9 @@ impl UserInner {
_ => println!("Unknown scheme -> kernel message {}", packet.a)
}
} else {
if let Some((context_weak, size)) = self.fmap.lock().remove(&packet.id) {
if let Some((context_weak, map)) = self.fmap.lock().remove(&packet.id) {
if let Ok(address) = Error::demux(packet.a) {
packet.a = Error::mux(UserInner::capture_inner(&context_weak, address, size, true));
packet.a = Error::mux(UserInner::capture_inner(&context_weak, address, map.size, map.flags));
}
}
......@@ -304,9 +304,11 @@ impl Scheme for UserScheme {
inner.call(SYS_FEVENT, file, flags, 0)
}
fn fmap(&self, file: usize, offset: usize, size: usize) -> Result<usize> {
fn fmap(&self, file: usize, map: &Map) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(map)?;
let (pid, uid, gid, context_lock) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
......@@ -316,18 +318,22 @@ impl Scheme for UserScheme {
let id = inner.next_id.fetch_add(1, Ordering::SeqCst);
inner.fmap.lock().insert(id, (context_lock, size));
inner.fmap.lock().insert(id, (context_lock, *map));
inner.call_inner(Packet {
let result = inner.call_inner(Packet {
id: id,
pid: pid.into(),
uid: uid,
gid: gid,
a: SYS_FMAP,
b: file,
c: offset,
d: size
})
c: address,
d: mem::size_of::<Map>()
});
let _ = inner.release(address);
result
}
fn fpath(&self, file: usize, buf: &mut [u8]) -> Result<usize> {
......
......@@ -3,7 +3,7 @@ use core::ops::Range;
use alloc::string::String;
use alloc::vec::Vec;
use super::data::{Stat, TimeSpec};
use super::data::{Map, Stat, TimeSpec};
use super::flag::*;
use super::number::*;
use super::validate::*;
......@@ -131,10 +131,12 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -
d
),
SYS_FMAP => format!(
"fmap({}, {:#X}, {})",
"fmap({}, {:?})",
b,
c,
d
validate_slice(
c as *const Map,
d/mem::size_of::<Map>()
),
),
SYS_FUNMAP => format!(
"funmap({:#X})",
......
......@@ -5,7 +5,7 @@ use paging::entry::EntryFlags;
use context;
use context::memory::Grant;
use syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
use syscall::flag::{MAP_WRITE, MAP_WRITE_COMBINE};
use syscall::flag::{PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE};
fn enforce_root() -> Result<()> {
let contexts = context::contexts();
......@@ -51,6 +51,7 @@ pub fn physfree(physical_address: usize, size: usize) -> Result<usize> {
//TODO: verify exlusive access to physical memory
pub fn inner_physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
//TODO: Abstract with other grant creation
if size == 0 {
Ok(0)
} else {
......@@ -66,32 +67,27 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: usize) -> Resu
let mut to_address = ::USER_GRANT_OFFSET;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if flags & MAP_WRITE == MAP_WRITE {
if flags & PHYSMAP_WRITE == PHYSMAP_WRITE {
entry_flags |= EntryFlags::WRITABLE;
}
if flags & MAP_WRITE_COMBINE == MAP_WRITE_COMBINE {
if flags & PHYSMAP_WRITE_COMBINE == PHYSMAP_WRITE_COMBINE {
entry_flags |= EntryFlags::HUGE_PAGE;
}
for i in 0 .. grants.len() {
let mut i = 0;
while i < grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
grants.insert(i, Grant::physmap(
PhysicalAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
entry_flags
));
return Ok(to_address + offset);
} else {
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
break;
}
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
i += 1;
}
grants.push(Grant::physmap(
grants.insert(i, Grant::physmap(
PhysicalAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
......
Subproject commit f479a66884984a8efd7520bd9767e745730db953
Subproject commit 31b7ae8eef2b108e453c2f9b154ca516be900c11
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment