From 70b4d99c96ecb148de23e47ea9b819d2797b3197 Mon Sep 17 00:00:00 2001 From: 4lDO2 <4lDO2@protonmail.com> Date: Mon, 3 Jul 2023 18:33:16 +0200 Subject: [PATCH] Implement MAP_FIXED without MAP_FIXED_NOREPLACE. --- src/context/memory.rs | 89 ++++++++++++++++++++++++------------------- src/scheme/memory.rs | 11 ++++-- src/scheme/proc.rs | 10 +++-- src/scheme/user.rs | 15 ++++---- src/syscall/driver.rs | 1 + src/syscall/mod.rs | 4 +- 6 files changed, 76 insertions(+), 54 deletions(-) diff --git a/src/context/memory.rs b/src/context/memory.rs index ad29f58a..f0fd4f73 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -203,7 +203,8 @@ impl AddrSpace { } Ok(()) } - pub fn munmap(mut self: RwLockWriteGuard<'_, Self>, mut requested_span: PageSpan, unpin: bool) -> Result<()> { + #[must_use = "needs to notify files"] + pub fn munmap(&mut self, mut requested_span: PageSpan, unpin: bool) -> Result<Vec<UnmapResult>> { let mut notify_files = Vec::new(); let mut flusher = PageFlushAll::new(); @@ -249,17 +250,44 @@ impl AddrSpace { notify_files.push(unmap_result); } } - drop(self); - for unmap_result in notify_files { - let _ = unmap_result.unmap(); - } + Ok(notify_files) + } + pub fn mmap_anywhere(&mut self, page_count: NonZeroUsize, flags: MapFlags, map: impl FnOnce(Page, PageFlags<RmmA>, &mut PageMapper, &mut dyn Flusher<RmmA>) -> Result<Grant>) -> Result<Page> { + self.mmap(None, page_count, flags, &mut Vec::new(), map) + } + pub fn mmap( + &mut self, + requested_base_opt: Option<Page>, + page_count: NonZeroUsize, + flags: MapFlags, + notify_files_out: &mut Vec<UnmapResult>, + map: impl FnOnce(Page, PageFlags<RmmA>, &mut PageMapper, &mut dyn Flusher<RmmA>) -> Result<Grant>, + ) -> Result<Page> { + let selected_span = match requested_base_opt { + Some(requested_base) => { + let requested_span = PageSpan::new(requested_base, page_count.get()); + + if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) && self.grants.conflicts(requested_span).next().is_some() { + return Err(Error::new(EEXIST)); + } - Ok(()) - } - pub fn mmap(&mut self, page: Option<Page>, page_count: NonZeroUsize, flags: MapFlags, map: impl FnOnce(Page, PageFlags<RmmA>, &mut PageMapper, &mut dyn Flusher<RmmA>) -> Result<Grant>) -> Result<Page> { - // Finally, the end of all "T0DO: Abstract with other grant creation"! - let selected_span = self.grants.find_free_at(self.mmap_min, page, page_count.get(), flags)?; + // TODO: Rename MAP_FIXED+MAP_FIXED_NOREPLACE to MAP_FIXED and + // MAP_FIXED_REPLACE/MAP_REPLACE? + let map_fixed_replace = flags.contains(MapFlags::MAP_FIXED); + + if map_fixed_replace { + let unpin = false; + let mut notify_files = self.munmap(requested_span, unpin)?; + notify_files_out.append(&mut notify_files); + + requested_span + } else { + self.grants.find_free_near(self.mmap_min, page_count.get(), Some(requested_base)).ok_or(Error::new(ENOMEM))? + } + } + None => self.grants.find_free(self.mmap_min, page_count.get()).ok_or(Error::new(ENOMEM))?, + }; // TODO: Threads share address spaces, so not only the inactive flusher should be sending // out IPIs. IPIs will only be sent when downgrading mappings (i.e. when a stale TLB entry @@ -279,7 +307,7 @@ impl AddrSpace { Ok(selected_span.base) } - pub fn r#move(dst: &mut AddrSpace, src: &mut AddrSpace, src_span: PageSpan, requested_dst_base: Option<Page>, new_flags: MapFlags) -> Result<Page> { + pub fn r#move(dst: &mut AddrSpace, src: &mut AddrSpace, src_span: PageSpan, requested_dst_base: Option<Page>, new_flags: MapFlags, notify_files: &mut Vec<UnmapResult>) -> Result<Page> { let nz_count = NonZeroUsize::new(src_span.count).ok_or(Error::new(EINVAL))?; let grant_base = { @@ -305,7 +333,7 @@ impl AddrSpace { let src_flusher = PageFlushAll::new(); - dst.mmap(requested_dst_base, nz_count, new_flags, |dst_page, flags, dst_mapper, dst_flusher| middle.transfer(dst_page, flags, &mut src.table.utable, dst_mapper, src_flusher, dst_flusher)) + dst.mmap(requested_dst_base, nz_count, new_flags, notify_files, |dst_page, flags, dst_mapper, dst_flusher| middle.transfer(dst_page, flags, &mut src.table.utable, dst_mapper, src_flusher, dst_flusher)) } } @@ -447,7 +475,8 @@ impl UserGrants { } /// Return a free region with the specified size // TODO: Alignment (x86_64: 4 KiB, 2 MiB, or 1 GiB). - pub fn find_free(&self, min: usize, page_count: usize) -> Option<PageSpan> { + // TODO: Support finding grant close to a requested address? + pub fn find_free_near(&self, min: usize, page_count: usize, _near: Option<Page>) -> Option<PageSpan> { // Get first available hole, but do reserve the page starting from zero as most compiled // languages cannot handle null pointers safely even if they point to valid memory. If an // application absolutely needs to map the 0th page, they will have to do so explicitly via @@ -468,32 +497,8 @@ impl UserGrants { // Create new region Some(PageSpan::new(Page::containing_address(VirtualAddress::new(cmp::max(hole_start.data(), min))), page_count)) } - /// Return a free region, respecting the user's hinted address and flags. Address may be null. - pub fn find_free_at(&mut self, min: usize, base: Option<Page>, page_count: usize, flags: MapFlags) -> Result<PageSpan> { - let Some(requested_base) = base else { - // Free hands! - return self.find_free(min, page_count).ok_or(Error::new(ENOMEM)); - }; - - // The user wished to have this region... - let requested_span = PageSpan::new(requested_base, page_count); - - if let Some(_grant) = self.conflicts(requested_span).next() { - // ... but it already exists - - if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) { - return Err(Error::new(EEXIST)); - } - if flags.contains(MapFlags::MAP_FIXED) { - // TODO: find_free_at -> Result<(PageSpan, needs_to_unmap: PageSpan)> - return Err(Error::new(EOPNOTSUPP)); - } else { - // TODO: Find grant close to requested address? - return self.find_free(min, page_count).ok_or(Error::new(ENOMEM)); - } - } - - Ok(requested_span) + pub fn find_free(&self, min: usize, page_count: usize) -> Option<PageSpan> { + self.find_free_near(min, page_count, None) } fn reserve(&mut self, base: Page, page_count: usize) { let start_address = base.start_address(); @@ -1484,3 +1489,9 @@ pub struct BorrowedFmapSource<'a> { pub addr_space_lock: &'a RwLock<AddrSpace>, pub addr_space_guard: RwLockWriteGuard<'a, AddrSpace>, } + +pub fn handle_notify_files(notify_files: Vec<UnmapResult>) { + for file in notify_files { + let _ = file.unmap(); + } +} diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs index a4f3ac08..a76cc4e5 100644 --- a/src/scheme/memory.rs +++ b/src/scheme/memory.rs @@ -2,11 +2,12 @@ use core::num::NonZeroUsize; use alloc::sync::Arc; use rmm::PhysicalAddress; +use alloc::vec::Vec; use spin::RwLock; use syscall::MapFlags; -use crate::context::memory::{AddrSpace, Grant, PageSpan}; use crate::memory::{free_frames, used_frames, PAGE_SIZE, Frame}; +use crate::context::memory::{AddrSpace, Grant, PageSpan, handle_notify_files}; use crate::paging::VirtualAddress; use crate::paging::entry::EntryFlags; @@ -59,12 +60,16 @@ impl MemoryScheme { let span = PageSpan::validate_nonempty(VirtualAddress::new(map.address), map.size).ok_or(Error::new(EINVAL))?; let page_count = NonZeroUsize::new(span.count).ok_or(Error::new(EINVAL))?; + let mut notify_files = Vec::new(); + let page = addr_space .write() - .mmap((map.address != 0).then_some(span.base), page_count, map.flags, |dst_page, flags, mapper, flusher| { + .mmap((map.address != 0).then_some(span.base), page_count, map.flags, &mut notify_files, |dst_page, flags, mapper, flusher| { Ok(Grant::zeroed(PageSpan::new(dst_page, page_count.get()), flags, mapper, flusher)?) })?; + handle_notify_files(notify_files); + Ok(page.start_address().data()) } pub fn physmap(physical_address: usize, size: usize, flags: MapFlags, memory_type: MemoryType) -> Result<usize> { @@ -82,7 +87,7 @@ impl MemoryScheme { } let page_count = NonZeroUsize::new(size.div_ceil(PAGE_SIZE)).ok_or(Error::new(EINVAL))?; - AddrSpace::current()?.write().mmap(None, page_count, flags, |dst_page, mut page_flags, dst_mapper, dst_flusher| { + AddrSpace::current()?.write().mmap_anywhere(page_count, flags, |dst_page, mut page_flags, dst_mapper, dst_flusher| { match memory_type { // Default MemoryType::Writeback => (), diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index 2939db90..41537b5a 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -1,6 +1,6 @@ use crate::{ arch::paging::{mapper::InactiveFlusher, Page, RmmA, RmmArch, VirtualAddress}, - context::{self, Context, ContextId, Status, file::{FileDescription, FileDescriptor}, memory::{AddrSpace, Grant, new_addrspace, map_flags, PageSpan}, BorrowedHtBuf}, + context::{self, Context, ContextId, Status, file::{FileDescription, FileDescriptor}, memory::{AddrSpace, Grant, new_addrspace, map_flags, PageSpan, handle_notify_files}, BorrowedHtBuf}, memory::PAGE_SIZE, ptrace, scheme::{self, FileHandle, KernelScheme, SchemeId}, @@ -690,13 +690,17 @@ impl KernelScheme for ProcScheme { let src_page_count = NonZeroUsize::new(src_span.count).ok_or(Error::new(EINVAL))?; + let mut notify_files = Vec::new(); + // TODO: Validate flags let result_base = if consume { - AddrSpace::r#move(&mut *dst_addr_space, &mut *src_addr_space, src_span, requested_dst_base, map.flags)? + AddrSpace::r#move(&mut *dst_addr_space, &mut *src_addr_space, src_span, requested_dst_base, map.flags, &mut notify_files)? } else { - dst_addr_space.mmap(requested_dst_base, src_page_count, map.flags, |dst_page, flags, dst_mapper, flusher| Ok(Grant::borrow(Arc::clone(addrspace), &*src_addr_space, src_span.base, dst_page, src_span.count, flags, dst_mapper, flusher, true, true, false)?))? + dst_addr_space.mmap(requested_dst_base, src_page_count, map.flags, &mut notify_files, |dst_page, flags, dst_mapper, flusher| Ok(Grant::borrow(Arc::clone(addrspace), &*src_addr_space, src_span.base, dst_page, src_span.count, flags, dst_mapper, flusher, true, true, false)?))? }; + handle_notify_files(notify_files); + Ok(result_base.start_address().data()) } _ => Err(Error::new(EBADF)), diff --git a/src/scheme/user.rs b/src/scheme/user.rs index 78993876..4026b810 100644 --- a/src/scheme/user.rs +++ b/src/scheme/user.rs @@ -1,7 +1,8 @@ use alloc::sync::{Arc, Weak}; use alloc::boxed::Box; use alloc::collections::BTreeMap; -use syscall::{SKMSG_FRETURNFD, CallerCtx, SKMSG_PROVIDE_MMAP}; +use alloc::vec::Vec; +use syscall::{SKMSG_FRETURNFD, CallerCtx, SKMSG_PROVIDE_MMAP, MAP_FIXED_NOREPLACE}; use core::num::NonZeroUsize; use core::sync::atomic::{AtomicBool, Ordering}; use core::{mem, usize}; @@ -11,7 +12,7 @@ use spin::{Mutex, RwLock}; use crate::context::context::HardBlockedReason; use crate::context::{self, Context, BorrowedHtBuf, Status}; use crate::context::file::FileDescription; -use crate::context::memory::{AddrSpace, DANGLING, Grant, GrantFileRef, PageSpan, MmapMode, page_flags, BorrowedFmapSource}; +use crate::context::memory::{AddrSpace, DANGLING, Grant, GrantFileRef, PageSpan, MmapMode, page_flags, BorrowedFmapSource, handle_notify_files}; use crate::event; use crate::memory::Frame; use crate::paging::{PAGE_SIZE, Page, VirtualAddress}; @@ -146,7 +147,7 @@ impl UserInner { let src_page = Page::containing_address(VirtualAddress::new(tail.buf_mut().as_ptr() as usize)); let is_pinned = true; - let dst_page = dst_addr_space.write().mmap(None, ONE, PROT_READ, |dst_page, flags, mapper, flusher| Ok(Grant::physmap(tail_frame, PageSpan::new(dst_page, 1), flags, mapper, flusher, is_pinned)?))?; + let dst_page = dst_addr_space.write().mmap_anywhere(ONE, PROT_READ, |dst_page, flags, mapper, flusher| Ok(Grant::physmap(tail_frame, PageSpan::new(dst_page, 1), flags, mapper, flusher, is_pinned)?))?; Ok(CaptureGuard { destroyed: false, @@ -240,7 +241,7 @@ impl UserInner { } } - dst_space.mmap(Some(free_span.base), ONE, map_flags, move |dst_page, page_flags, mapper, flusher| { + dst_space.mmap(Some(free_span.base), ONE, map_flags | MAP_FIXED_NOREPLACE, &mut Vec::new(), move |dst_page, page_flags, mapper, flusher| { let is_pinned = true; Ok(Grant::physmap(frame, PageSpan::new(dst_page, 1), page_flags, mapper, flusher, is_pinned)?) })?; @@ -265,7 +266,7 @@ impl UserInner { let (_middle_part_of_buf, tail_part_of_buf) = middle_tail_part_of_buf.split_at(middle_page_count * PAGE_SIZE).expect("split must succeed"); if let Some(middle_page_count) = NonZeroUsize::new(middle_page_count) { - dst_space.mmap(Some(first_middle_dst_page), middle_page_count, map_flags, move |dst_page, page_flags, mapper, flusher| { + dst_space.mmap(Some(first_middle_dst_page), middle_page_count, map_flags | MAP_FIXED_NOREPLACE, &mut Vec::new(), move |dst_page, page_flags, mapper, flusher| { let eager = true; // It doesn't make sense to allow a context, that has borrowed non-RAM physical @@ -304,7 +305,7 @@ impl UserInner { } } - dst_space.mmap(Some(tail_dst_page), ONE, map_flags, move |dst_page, page_flags, mapper, flusher| { + dst_space.mmap(Some(tail_dst_page), ONE, map_flags | MAP_FIXED_NOREPLACE, &mut Vec::new(), move |dst_page, page_flags, mapper, flusher| { let is_pinned = true; Ok(Grant::physmap(frame, PageSpan::new(dst_page, 1), page_flags, mapper, flusher, is_pinned)?) })?; @@ -545,7 +546,7 @@ impl UserInner { }; let page_count_nz = NonZeroUsize::new(page_count).expect("already validated map.size != 0"); - let dst_base = dst_addr_space.write().mmap(dst_base, page_count_nz, map.flags, |dst_base, flags, mapper, flusher| { + let dst_base = dst_addr_space.write().mmap(dst_base, page_count_nz, map.flags | MAP_FIXED_NOREPLACE, &mut Vec::new(), |dst_base, flags, mapper, flusher| { Grant::borrow_fmap(PageSpan::new(dst_base, page_count), page_flags(map.flags), file_ref, src, mapper, flusher) })?; diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index cdc17b52..608d5f62 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -9,6 +9,7 @@ use crate::syscall::flag::{MapFlags, PhysallocFlags, PartialAllocStrategy, Physm #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use alloc::sync::Arc; +use alloc::vec::Vec; use super::usercopy::UserSliceRw; diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index fd0c3d9b..45b240b1 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -174,13 +174,13 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack } } - let mut debug = false; + let mut debug = true; debug = debug && { let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - if context.name.contains("orb") { + if context.name.contains("dash") { if a == SYS_CLOCK_GETTIME || a == SYS_YIELD { false } else if (a == SYS_WRITE || a == SYS_FSYNC) && (b == 1 || b == 2) { -- GitLab