diff --git a/src/arch/x86_64/interrupt/exception.rs b/src/arch/x86_64/interrupt/exception.rs index 9fbd3780f8cdc2f0bc9d3553cb13079fe6c356f2..c98e579433a63195c83e16d152fbfb4744d7beb5 100644 --- a/src/arch/x86_64/interrupt/exception.rs +++ b/src/arch/x86_64/interrupt/exception.rs @@ -1,6 +1,4 @@ use crate::{ - common::unique::Unique, - context, interrupt::stack_trace, ptrace, syscall::flag::* @@ -20,15 +18,7 @@ interrupt_stack!(divide_by_zero, stack, { interrupt_stack!(debug, stack, { let mut handled = false; - { - let contexts = context::contexts(); - if let Some(context) = contexts.current() { - let mut context = context.write(); - if let Some(ref mut kstack) = context.kstack { - context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack))); - } - } - } + let guard = ptrace::set_process_regs(stack); // Disable singlestep before their is a breakpoint, since the // breakpoint handler might end up setting it again but unless it @@ -36,20 +26,14 @@ interrupt_stack!(debug, stack, { let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8; stack.set_singlestep(false); - if ptrace::breakpoint_callback(true).is_some() { + if ptrace::breakpoint_callback(syscall::PTRACE_SINGLESTEP).is_some() { handled = true; } else { // There was no breakpoint, restore original value stack.set_singlestep(had_singlestep); } - { - let contexts = context::contexts(); - if let Some(context) = contexts.current() { - let mut context = context.write(); - context.regs = None; - } - } + drop(guard); if !handled { println!("Debug trap"); diff --git a/src/arch/x86_64/interrupt/irq.rs b/src/arch/x86_64/interrupt/irq.rs index fbc5bb87159574d344a5c9f9faee516aa2830f9d..8c755baa31dd4974fab94ad9c74786c6f2806bf0 100644 --- a/src/arch/x86_64/interrupt/irq.rs +++ b/src/arch/x86_64/interrupt/irq.rs @@ -1,13 +1,11 @@ use core::sync::atomic::{AtomicUsize, Ordering}; -use crate::common::unique::Unique; -use crate::context; use crate::context::timeout; use crate::device::pic; use crate::device::serial::{COM1, COM2}; use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::scheme::debug::debug_input; -use crate::time; +use crate::{context, ptrace, time}; //resets to 0 in context::switch() pub static PIT_TICKS: AtomicUsize = AtomicUsize::new(0); @@ -62,25 +60,8 @@ interrupt_stack!(pit, stack, { timeout::trigger(); if PIT_TICKS.fetch_add(1, Ordering::SeqCst) >= 10 { - { - let contexts = crate::context::contexts(); - if let Some(context) = contexts.current() { - let mut context = context.write(); - // Make all registers available to e.g. the proc: - // scheme - if let Some(ref mut kstack) = context.kstack { - context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack))); - } - } - } + let _guard = ptrace::set_process_regs(stack); let _ = context::switch(); - { - let contexts = crate::context::contexts(); - if let Some(context) = contexts.current() { - let mut context = context.write(); - context.regs = None; - } - } } }); diff --git a/src/arch/x86_64/interrupt/syscall.rs b/src/arch/x86_64/interrupt/syscall.rs index 6c845cc405a79dbfde23d8e48de7684ebac048e5..6b9dfe615db217d6f51180a3dc9f23d84f1998fd 100644 --- a/src/arch/x86_64/interrupt/syscall.rs +++ b/src/arch/x86_64/interrupt/syscall.rs @@ -1,7 +1,6 @@ use crate::arch::macros::InterruptStack; use crate::arch::{gdt, pti}; -use crate::common::unique::Unique; -use crate::{context, ptrace, syscall}; +use crate::{ptrace, syscall}; use x86::shared::msr; pub unsafe fn init() { @@ -20,18 +19,10 @@ macro_rules! with_interrupt_stack { (unsafe fn $wrapped:ident($stack:ident) -> usize $code:block) => { #[inline(never)] unsafe fn $wrapped(stack: *mut InterruptStack) { - let stack = &mut *stack; - { - let contexts = context::contexts(); - if let Some(context) = contexts.current() { - let mut context = context.write(); - if let Some(ref mut kstack) = context.kstack { - context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut *stack))); - } - } - } + let _guard = ptrace::set_process_regs(stack); - let is_sysemu = ptrace::breakpoint_callback(false); + let is_sysemu = ptrace::breakpoint_callback(syscall::flag::PTRACE_SYSCALL) + .map(|fl| fl & syscall::flag::PTRACE_SYSEMU == syscall::flag::PTRACE_SYSEMU); if !is_sysemu.unwrap_or(false) { // If not on a sysemu breakpoint let $stack = &mut *stack; @@ -40,15 +31,7 @@ macro_rules! with_interrupt_stack { if is_sysemu.is_some() { // Only callback if there was a pre-syscall // callback too. - ptrace::breakpoint_callback(false); - } - } - - { - let contexts = context::contexts(); - if let Some(context) = contexts.current() { - let mut context = context.write(); - context.regs = None; + ptrace::breakpoint_callback(::syscall::PTRACE_SYSCALL); } } } diff --git a/src/arch/x86_64/macros.rs b/src/arch/x86_64/macros.rs index 6a406c5a3cd8ac1463f67d5d990ed61a3806c3a9..5ad34d8d4ce6d57d75f7bc599ecbb5d2a2eac6e7 100644 --- a/src/arch/x86_64/macros.rs +++ b/src/arch/x86_64/macros.rs @@ -280,8 +280,8 @@ impl InterruptStack { self.scratch.rdx = all.rdx; self.scratch.rcx = all.rcx; self.scratch.rax = all.rax; - // self.iret.rip = all.rip; - // self.iret.cs = all.cs; + self.iret.rip = all.rip; + self.iret.cs = all.cs; // self.iret.rflags = all.eflags; } /// Enables the "Trap Flag" in the FLAGS register, causing the CPU diff --git a/src/common/mod.rs b/src/common/mod.rs index 7ad826b3c8b793cd4872df57e6e4ceaea44e6d9d..4bdafba1f5234701e7f4b3c0aec3654f516766ac 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -1,3 +1,27 @@ #[macro_use] pub mod int_like; pub mod unique; + +/// Debug macro, lifted from the std +#[macro_export] +macro_rules! dbg { + () => { + $crate::println!("[{}:{}]", file!(), line!()); + }; + ($val:expr) => { + // Use of `match` here is intentional because it affects the lifetimes + // of temporaries - https://stackoverflow.com/a/48732525/1063961 + match $val { + tmp => { + $crate::println!("[{}:{}] {} = {:#?}", + file!(), line!(), stringify!($val), &tmp); + tmp + } + } + }; + // Trailing comma with single argument is ignored + ($val:expr,) => { $crate::dbg!($val) }; + ($($val:expr),+ $(,)?) => { + ($($crate::dbg!($val)),+,) + }; +} diff --git a/src/common/unique.rs b/src/common/unique.rs index 214f5b53a3a02dd1ab23f2e3f9e6d79e0f5a9d71..f426c525a0902ca8aa12245c7b9388350d17dc09 100644 --- a/src/common/unique.rs +++ b/src/common/unique.rs @@ -17,7 +17,7 @@ unsafe impl<T> Sync for Unique<T> {} impl<T> Unique<T> { pub fn new(ptr: *mut T) -> Self { - Self(NonNull::new(ptr).unwrap()) + Self(NonNull::new(ptr).expect("Did not expect pointer to be null")) } pub unsafe fn new_unchecked(ptr: *mut T) -> Self { Self(NonNull::new_unchecked(ptr)) diff --git a/src/context/context.rs b/src/context/context.rs index 269b4ee4095953e4fa4302edac360205d76cfe67..0a0f1869b9c6ad635e9d2ba16a2284ff31550993 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -142,8 +142,8 @@ pub struct Context { pub kfx: Option<Box<[u8]>>, /// Kernel stack pub kstack: Option<Box<[u8]>>, - /// Kernel signal backup - pub ksig: Option<(arch::Context, Option<Box<[u8]>>, Option<Box<[u8]>>)>, + /// Kernel signal backup: Registers, Kernel FX, Kernel Stack, Signal number + pub ksig: Option<(arch::Context, Option<Box<[u8]>>, Option<Box<[u8]>>, u8)>, /// Restore ksig context on next switch pub ksig_restore: bool, /// Executable image diff --git a/src/context/list.rs b/src/context/list.rs index 138acecbe575a87ce2af099c0cdf8adb3043bcfe..ef92c9ceaf991d199cb0bde4cbfff9dd19feb09d 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -2,7 +2,7 @@ use alloc::sync::Arc; use alloc::boxed::Box; use alloc::collections::BTreeMap; use core::alloc::{GlobalAlloc, Layout}; -use core::mem; +use core::{iter, mem}; use core::sync::atomic::Ordering; use crate::paging; use spin::RwLock; @@ -30,6 +30,15 @@ impl ContextList { self.map.get(&id) } + /// Get an iterator of all parents + pub fn anchestors(&'_ self, id: ContextId) -> impl Iterator<Item = (ContextId, &Arc<RwLock<Context>>)> + '_ { + iter::successors(self.get(id).map(|context| (id, context)), move |(_id, context)| { + let context = context.read(); + let id = context.ppid; + self.get(id).map(|context| (id, context)) + }) + } + /// Get the current context. pub fn current(&self) -> Option<&Arc<RwLock<Context>>> { self.map.get(&super::CONTEXT_ID.load(Ordering::SeqCst)) diff --git a/src/context/signal.rs b/src/context/signal.rs index fc8e462698406bdda6a8f295a449bde7a700e64c..d5d2ff18cb3e48918ca16cb2532148fa6856b648 100644 --- a/src/context/signal.rs +++ b/src/context/signal.rs @@ -3,8 +3,14 @@ use core::mem; use crate::context::{contexts, switch, Status, WaitpidKey}; use crate::start::usermode; -use crate::syscall; -use crate::syscall::flag::{SIG_DFL, SIG_IGN, SIGCHLD, SIGCONT, SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU}; +use crate::{ptrace, syscall}; +use crate::syscall::flag::{PTRACE_EVENT_SIGNAL, PTRACE_SIGNAL, SIG_DFL, SIG_IGN, SIGCHLD, SIGCONT, SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU}; +use crate::syscall::data::{PtraceEvent, PtraceEventData}; + +pub fn is_user_handled(handler: Option<extern "C" fn(usize)>) -> bool { + let handler = handler.map(|ptr| ptr as usize).unwrap_or(0); + handler != SIG_DFL && handler != SIG_IGN +} pub extern "C" fn signal_handler(sig: usize) { let (action, restorer) = { @@ -15,7 +21,12 @@ pub extern "C" fn signal_handler(sig: usize) { actions[sig] }; - let handler = action.sa_handler as usize; + ptrace::send_event(PtraceEvent { + tag: PTRACE_EVENT_SIGNAL, + data: PtraceEventData { signal: sig } + }); + + let handler = action.sa_handler.map(|ptr| ptr as usize).unwrap_or(0); if handler == SIG_DFL { match sig { SIGCHLD => { @@ -89,6 +100,8 @@ pub extern "C" fn signal_handler(sig: usize) { } else { // println!("Call {:X}", handler); + ptrace::breakpoint_callback(PTRACE_SIGNAL); + unsafe { let mut sp = crate::USER_SIGSTACK_OFFSET + crate::USER_SIGSTACK_SIZE - 256; diff --git a/src/context/switch.rs b/src/context/switch.rs index cf55220fc802dbbcbda6456318df42a4fcf20efa..2a5f6c51aed09eb27f63562faf68cfbc4c47dc73 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -151,7 +151,7 @@ pub unsafe fn switch() -> bool { let arch = (&mut *to_ptr).arch.clone(); let kfx = (&mut *to_ptr).kfx.clone(); let kstack = (&mut *to_ptr).kstack.clone(); - (&mut *to_ptr).ksig = Some((arch, kfx, kstack)); + (&mut *to_ptr).ksig = Some((arch, kfx, kstack, sig)); (&mut *to_ptr).arch.signal_stack(signal_handler, sig); } diff --git a/src/ptrace.rs b/src/ptrace.rs index 5a88d3a7902746ebb351d1a0047657e9dfd7999b..3f3390f492b7a6ee1b4bdb32737a3723f35c44dc 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -9,18 +9,151 @@ use crate::{ } }, common::unique::Unique, - context::{self, Context, ContextId, Status}, + context::{self, signal, Context, ContextId, Status}, + event, + scheme::proc, sync::WaitCondition }; use alloc::{ boxed::Box, - collections::BTreeMap, + collections::{ + BTreeMap, + VecDeque, + btree_map::Entry + }, sync::Arc, vec::Vec }; +use core::{ + cmp, + sync::atomic::Ordering +}; use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use syscall::error::*; +use syscall::{ + data::PtraceEvent, + error::*, + flag::* +}; + +// ____ _ +// / ___| ___ ___ ___(_) ___ _ __ ___ +// \___ \ / _ \/ __/ __| |/ _ \| '_ \/ __| +// ___) | __/\__ \__ \ | (_) | | | \__ \ +// |____/ \___||___/___/_|\___/|_| |_|___/ + +#[derive(Debug)] +struct Session { + file_id: usize, + events: VecDeque<PtraceEvent>, + breakpoint: Option<Breakpoint>, + tracer: Arc<WaitCondition> +} + +type SessionMap = BTreeMap<ContextId, Session>; + +static SESSIONS: Once<RwLock<SessionMap>> = Once::new(); + +fn init_sessions() -> RwLock<SessionMap> { + RwLock::new(BTreeMap::new()) +} +fn sessions() -> RwLockReadGuard<'static, SessionMap> { + SESSIONS.call_once(init_sessions).read() +} +fn sessions_mut() -> RwLockWriteGuard<'static, SessionMap> { + SESSIONS.call_once(init_sessions).write() +} + +/// Try to create a new session, but fail if one already exists for +/// this process +pub fn try_new_session(pid: ContextId, file_id: usize) -> bool { + let mut sessions = sessions_mut(); + + match sessions.entry(pid) { + Entry::Occupied(_) => false, + Entry::Vacant(vacant) => { + vacant.insert(Session { + file_id, + events: VecDeque::new(), + breakpoint: None, + tracer: Arc::new(WaitCondition::new()) + }); + true + } + } +} + +/// Returns true if a session is attached to this process +pub fn is_traced(pid: ContextId) -> bool { + sessions().contains_key(&pid) +} + +/// Used for getting the flags in fevent +pub fn session_fevent_flags(pid: ContextId) -> Option<usize> { + let sessions = sessions(); + let session = sessions.get(&pid)?; + let mut flags = 0; + if !session.events.is_empty() { + flags |= EVENT_READ; + } + if session.breakpoint.as_ref().map(|b| b.reached).unwrap_or(true) { + flags |= EVENT_WRITE; + } + Some(flags) +} + +/// Remove the session from the list of open sessions and notify any +/// waiting processes +pub fn close_session(pid: ContextId) { + if let Some(session) = sessions_mut().remove(&pid) { + session.tracer.notify(); + if let Some(breakpoint) = session.breakpoint { + breakpoint.tracee.notify(); + } + } +} + +/// Trigger a notification to the event: scheme +fn proc_trigger_event(file_id: usize, flags: usize) { + event::trigger(proc::PROC_SCHEME_ID.load(Ordering::SeqCst), file_id, flags); +} + +/// Dispatch an event to any tracer tracing `self`. This will cause +/// the tracer to wake up and poll for events. Returns Some(()) if an +/// event was sent. +pub fn send_event(event: PtraceEvent) -> Option<()> { + let contexts = context::contexts(); + let context = contexts.current()?; + let context = context.read(); + + let mut sessions = sessions_mut(); + let session = sessions.get_mut(&context.id)?; + + session.events.push_back(event); + + // Notify nonblocking tracers + if session.events.len() == 1 { + // If the list of events was previously empty, alert now + proc_trigger_event(session.file_id, EVENT_READ); + } + + // Alert blocking tracers + session.tracer.notify(); + + Some(()) +} + +/// Poll events, return the amount read +pub fn recv_events(pid: ContextId, out: &mut [PtraceEvent]) -> Option<usize> { + let mut sessions = sessions_mut(); + let session = sessions.get_mut(&pid)?; + + let len = cmp::min(out.len(), session.events.len()); + for (dst, src) in out.iter_mut().zip(session.events.drain(..len)) { + *dst = src; + } + Some(len) +} // ____ _ _ _ // | __ ) _ __ ___ __ _| | ___ __ ___ (_)_ __ | |_ ___ @@ -29,33 +162,23 @@ use syscall::error::*; // |____/|_| \___|\__,_|_|\_\ .__/ \___/|_|_| |_|\__|___/ // |_| -struct Handle { +#[derive(Debug)] +struct Breakpoint { tracee: Arc<WaitCondition>, - tracer: Arc<WaitCondition>, reached: bool, - - sysemu: bool, - singlestep: bool -} - -static BREAKPOINTS: Once<RwLock<BTreeMap<ContextId, Handle>>> = Once::new(); - -fn init_breakpoints() -> RwLock<BTreeMap<ContextId, Handle>> { - RwLock::new(BTreeMap::new()) -} -fn breakpoints() -> RwLockReadGuard<'static, BTreeMap<ContextId, Handle>> { - BREAKPOINTS.call_once(init_breakpoints).read() -} -fn breakpoints_mut() -> RwLockWriteGuard<'static, BTreeMap<ContextId, Handle>> { - BREAKPOINTS.call_once(init_breakpoints).write() + flags: u8 } -fn inner_cont(pid: ContextId) -> Option<Handle> { +fn inner_cont(pid: ContextId) -> Option<Breakpoint> { // Remove the breakpoint to both save space and also make sure any // yet unreached but obsolete breakpoints don't stop the program. - let handle = breakpoints_mut().remove(&pid)?; - handle.tracee.notify(); - Some(handle) + let mut sessions = sessions_mut(); + let session = sessions.get_mut(&pid)?; + let breakpoint = session.breakpoint.take()?; + + breakpoint.tracee.notify(); + + Some(breakpoint) } /// Continue the process with the specified ID @@ -63,92 +186,108 @@ pub fn cont(pid: ContextId) { inner_cont(pid); } -/// Create a new breakpoint for the specified tracee, optionally with a sysemu flag -pub fn set_breakpoint(pid: ContextId, sysemu: bool, singlestep: bool) { - let (tracee, tracer) = match inner_cont(pid) { - Some(breakpoint) => (breakpoint.tracee, breakpoint.tracer), - None => ( - Arc::new(WaitCondition::new()), - Arc::new(WaitCondition::new()) - ) - }; +/// Create a new breakpoint for the specified tracee, optionally with +/// a sysemu flag. Panics if the session is invalid. +pub fn set_breakpoint(pid: ContextId, flags: u8) { + let tracee = inner_cont(pid) + .map(|b| b.tracee) + .unwrap_or_else(|| Arc::new(WaitCondition::new())); - breakpoints_mut().insert(pid, Handle { + let mut sessions = sessions_mut(); + let session = sessions.get_mut(&pid).expect("proc (set_breakpoint): invalid session"); + session.breakpoint = Some(Breakpoint { tracee, - tracer, reached: false, - sysemu, - singlestep + flags }); } -/// Wait for the tracee to stop. -/// Note: Don't call while holding any locks, this will switch contexts -pub fn wait_breakpoint(pid: ContextId) -> Result<()> { - let tracer = { - let breakpoints = breakpoints(); - match breakpoints.get(&pid) { - Some(breakpoint) if !breakpoint.reached => Arc::clone(&breakpoint.tracer), - _ => return Ok(()) +/// Wait for the tracee to stop. If an event occurs, it returns a copy +/// of that. It will still be available for read using recv_event. +/// +/// Note: Don't call while holding any locks, this will switch +/// contexts +pub fn wait(pid: ContextId) -> Result<Option<PtraceEvent>> { + let tracer: Arc<WaitCondition> = { + let sessions = sessions(); + match sessions.get(&pid) { + Some(session) if session.breakpoint.as_ref().map(|b| !b.reached).unwrap_or(true) => { + if let Some(event) = session.events.front() { + return Ok(Some(event.clone())); + } + Arc::clone(&session.tracer) + }, + _ => return Ok(None) } }; + while !tracer.wait() {} + { + let sessions = sessions(); + if let Some(session) = sessions.get(&pid) { + if let Some(event) = session.events.front() { + return Ok(Some(event.clone())); + } + } + } + let contexts = context::contexts(); let context = contexts.get(pid).ok_or(Error::new(ESRCH))?; let context = context.read(); if let Status::Exited(_) = context.status { return Err(Error::new(ESRCH)); } - Ok(()) + + Ok(None) } /// Notify the tracer and await green flag to continue. /// Note: Don't call while holding any locks, this will switch contexts -pub fn breakpoint_callback(singlestep: bool) -> Option<bool> { +pub fn breakpoint_callback(match_flags: u8) -> Option<u8> { // Can't hold any locks when executing wait() - let (tracee, sysemu) = { + let (tracee, flags) = { let contexts = context::contexts(); let context = contexts.current()?; let context = context.read(); - let mut breakpoints = breakpoints_mut(); - let breakpoint = breakpoints.get_mut(&context.id)?; + let mut sessions = sessions_mut(); + let session = sessions.get_mut(&context.id)?; + let breakpoint = session.breakpoint.as_mut()?; // TODO: How should singlesteps interact with syscalls? How // does Linux handle this? - // if singlestep && !breakpoint.singlestep { - if breakpoint.singlestep != singlestep { + if breakpoint.flags & PTRACE_OPERATIONMASK != match_flags & PTRACE_OPERATIONMASK { return None; } - breakpoint.tracer.notify(); // In case no tracer is waiting, make sure the next one gets // the memo breakpoint.reached = true; + session.tracer.notify(); + proc_trigger_event(session.file_id, EVENT_WRITE); + ( Arc::clone(&breakpoint.tracee), - breakpoint.sysemu + breakpoint.flags ) }; while !tracee.wait() {} - Some(sysemu) + Some(flags) } /// Call when a context is closed to alert any tracers -pub fn close(pid: ContextId) { - { - let breakpoints = breakpoints(); - if let Some(breakpoint) = breakpoints.get(&pid) { - breakpoint.tracer.notify(); - } - } +pub fn close_tracee(pid: ContextId) -> Option<()> { + let mut sessions = sessions_mut(); + let session = sessions.get_mut(&pid)?; - breakpoints_mut().remove(&pid); + session.breakpoint = None; + session.tracer.notify(); + Some(()) } // ____ _ _ @@ -158,6 +297,43 @@ pub fn close(pid: ContextId) { // |_| \_\___|\__, |_|___/\__\___|_| |___/ // |___/ +pub struct ProcessRegsGuard; + +/// Make all registers available to e.g. the proc: scheme +/// --- +/// For use inside arch-specific code to assign the pointer of the +/// interupt stack to the current process. Meant to reduce the amount +/// of ptrace-related code that has to lie in arch-specific bits. +/// ```rust,ignore +/// let _guard = ptrace::set_process_regs(pointer); +/// ... +/// // (_guard implicitly dropped) +/// ``` +pub fn set_process_regs(pointer: *mut InterruptStack) -> Option<ProcessRegsGuard> { + let contexts = context::contexts(); + let context = contexts.current()?; + let mut context = context.write(); + + let kstack = context.kstack.as_mut()?; + + context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new(pointer))); + Some(ProcessRegsGuard) +} + +impl Drop for ProcessRegsGuard { + fn drop(&mut self) { + fn clear_process_regs() -> Option<()> { + let contexts = context::contexts(); + let context = contexts.current()?; + let mut context = context.write(); + + context.regs = None; + Some(()) + } + clear_process_regs(); + } +} + /// Return the InterruptStack pointer, but relative to the specified /// stack instead of the original. pub unsafe fn rebase_regs_ptr( @@ -185,18 +361,44 @@ pub unsafe fn rebase_regs_ptr_mut( /// restored and otherwise undo all your changes. See `update(...)` in /// context/switch.rs. pub unsafe fn regs_for(context: &Context) -> Option<&InterruptStack> { - Some(&*match context.ksig { - Some((_, _, ref kstack)) => rebase_regs_ptr(context.regs, kstack.as_ref())?, - None => context.regs?.1.as_ptr() - }) + let signal_backup_regs = match context.ksig { + None => None, + Some((_, _, ref kstack, signum)) => { + let is_user_handled = { + let actions = context.actions.lock(); + signal::is_user_handled(actions[signum as usize].0.sa_handler) + }; + if is_user_handled { + None + } else { + Some(rebase_regs_ptr(context.regs, kstack.as_ref())?) + } + } + }; + signal_backup_regs + .or_else(|| context.regs.map(|regs| regs.1.as_ptr() as *const _)) + .map(|ptr| &*ptr) } /// Mutable version of `regs_for` pub unsafe fn regs_for_mut(context: &mut Context) -> Option<&mut InterruptStack> { - Some(&mut *match context.ksig { - Some((_, _, ref mut kstack)) => rebase_regs_ptr_mut(context.regs, kstack.as_mut())?, - None => context.regs?.1.as_ptr() - }) + let signal_backup_regs = match context.ksig { + None => None, + Some((_, _, ref mut kstack, signum)) => { + let is_user_handled = { + let actions = context.actions.lock(); + signal::is_user_handled(actions[signum as usize].0.sa_handler) + }; + if is_user_handled { + None + } else { + Some(rebase_regs_ptr_mut(context.regs, kstack.as_mut())?) + } + } + }; + signal_backup_regs + .or_else(|| context.regs.map(|regs| regs.1.as_ptr())) + .map(|ptr| &mut *ptr) } // __ __ diff --git a/src/scheme/mod.rs b/src/scheme/mod.rs index 83becb8fe07c6f80272fb645773f990dcf6c08d5..8d2e345ec789baf461ee660f931615507a120377 100644 --- a/src/scheme/mod.rs +++ b/src/scheme/mod.rs @@ -96,7 +96,7 @@ impl<'a> Iterator for SchemeIter<'a> { /// Scheme list type pub struct SchemeList { - map: BTreeMap<SchemeId, Arc<Box<Scheme + Send + Sync>>>, + map: BTreeMap<SchemeId, Arc<Box<dyn Scheme + Send + Sync>>>, names: BTreeMap<SchemeNamespace, BTreeMap<Box<[u8]>, SchemeId>>, next_ns: usize, next_id: usize @@ -141,7 +141,7 @@ impl SchemeList { self.insert(ns, Box::new(*b"debug"), |scheme_id| Arc::new(Box::new(DebugScheme::new(scheme_id)))).unwrap(); self.insert(ns, Box::new(*b"initfs"), |_| Arc::new(Box::new(InitFsScheme::new()))).unwrap(); self.insert(ns, Box::new(*b"irq"), |scheme_id| Arc::new(Box::new(IrqScheme::new(scheme_id)))).unwrap(); - self.insert(ns, Box::new(*b"proc"), |_| Arc::new(Box::new(ProcScheme::new()))).unwrap(); + self.insert(ns, Box::new(*b"proc"), |scheme_id| Arc::new(Box::new(ProcScheme::new(scheme_id)))).unwrap(); #[cfg(feature = "live")] { self.insert(ns, Box::new(*b"disk/live"), |_| Arc::new(Box::new(self::live::DiskScheme::new()))).unwrap(); @@ -184,7 +184,7 @@ impl SchemeList { } /// Get the nth scheme. - pub fn get(&self, id: SchemeId) -> Option<&Arc<Box<Scheme + Send + Sync>>> { + pub fn get(&self, id: SchemeId) -> Option<&Arc<Box<dyn Scheme + Send + Sync>>> { self.map.get(&id) } diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index f1694fc76e269e381c164367869663f2a23495ca..5a04f9d55479a2b881ae11307eefb059c93d5ff4 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -1,12 +1,13 @@ use crate::{ arch::paging::VirtualAddress, context::{self, ContextId, Status}, - syscall::validate, - ptrace + ptrace, + scheme::{ATOMIC_SCHEMEID_INIT, AtomicSchemeId, SchemeId}, + syscall::validate }; use alloc::{ - collections::{BTreeMap, BTreeSet}, + collections::BTreeMap, sync::Arc }; use core::{ @@ -17,7 +18,7 @@ use core::{ }; use spin::{Mutex, RwLock}; use syscall::{ - data::{IntRegisters, FloatRegisters}, + data::{FloatRegisters, IntRegisters, PtraceEvent}, error::*, flag::*, scheme::Scheme @@ -32,7 +33,9 @@ enum RegsKind { enum Operation { Memory(VirtualAddress), Regs(RegsKind), - Trace + Trace { + new_child: Option<ContextId> + } } #[derive(Clone, Copy)] @@ -41,19 +44,37 @@ struct Handle { pid: ContextId, operation: Operation } +impl Handle { + fn continue_ignored_child(&mut self) -> Option<()> { + let pid = match self.operation { + Operation::Trace { ref mut new_child } => new_child.take()?, + _ => return None + }; + if ptrace::is_traced(pid) { + return None; + } + let contexts = context::contexts(); + let context = contexts.get(pid)?; + let mut context = context.write(); + context.ptrace_stop = false; + Some(()) + } +} + +pub static PROC_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT; pub struct ProcScheme { next_id: AtomicUsize, - handles: RwLock<BTreeMap<usize, Arc<Mutex<Handle>>>>, - traced: Mutex<BTreeSet<ContextId>> + handles: RwLock<BTreeMap<usize, Arc<Mutex<Handle>>>> } impl ProcScheme { - pub fn new() -> Self { + pub fn new(scheme_id: SchemeId) -> Self { + PROC_SCHEME_ID.store(scheme_id, Ordering::SeqCst); + Self { next_id: AtomicUsize::new(0), handles: RwLock::new(BTreeMap::new()), - traced: Mutex::new(BTreeSet::new()) } } } @@ -70,36 +91,59 @@ impl Scheme for ProcScheme { Some("mem") => Operation::Memory(VirtualAddress::new(0)), Some("regs/float") => Operation::Regs(RegsKind::Float), Some("regs/int") => Operation::Regs(RegsKind::Int), - Some("trace") => Operation::Trace, + Some("trace") => Operation::Trace { + new_child: None + }, _ => return Err(Error::new(EINVAL)) }; let contexts = context::contexts(); - let context = contexts.get(pid).ok_or(Error::new(ESRCH))?; + let target = contexts.get(pid).ok_or(Error::new(ESRCH))?; { - // TODO: Put better security here? + let target = target.read(); - let context = context.read(); - if uid != 0 && gid != 0 - && uid != context.euid && gid != context.egid { - return Err(Error::new(EPERM)); + if let Status::Exited(_) = target.status { + return Err(Error::new(ESRCH)); + } + + // Unless root, check security + if uid != 0 && gid != 0 { + let current = contexts.current().ok_or(Error::new(ESRCH))?; + let current = current.read(); + + // Do we own the process? + if uid != target.euid && gid != target.egid { + return Err(Error::new(EPERM)); + } + + // Is it a subprocess of us? In the future, a capability + // could bypass this check. + match contexts.anchestors(target.ppid).find(|&(id, _context)| id == current.id) { + Some((id, context)) => { + // Paranoid sanity check, as ptrace security holes + // wouldn't be fun + assert_eq!(id, current.id); + assert_eq!(id, context.read().id); + }, + None => return Err(Error::new(EPERM)) + } } } - if let Operation::Trace = operation { - let mut traced = self.traced.lock(); + let id = self.next_id.fetch_add(1, Ordering::SeqCst); - if traced.contains(&pid) { + if let Operation::Trace { .. } = operation { + if !ptrace::try_new_session(pid, id) { + // There is no good way to handle id being occupied + // for nothing here, is there? return Err(Error::new(EBUSY)); } - traced.insert(pid); - let mut context = context.write(); - context.ptrace_stop = true; + let mut target = target.write(); + target.ptrace_stop = true; } - let id = self.next_id.fetch_add(1, Ordering::SeqCst); self.handles.write().insert(id, Arc::new(Mutex::new(Handle { flags, pid, @@ -230,7 +274,16 @@ impl Scheme for ProcScheme { Ok(len) }, - Operation::Trace => Err(Error::new(EBADF)) + Operation::Trace { .. } => { + let read = ptrace::recv_events(handle.pid, unsafe { + slice::from_raw_parts_mut( + buf.as_mut_ptr() as *mut PtraceEvent, + buf.len() / mem::size_of::<PtraceEvent>() + ) + }).unwrap_or(0); + + Ok(read * mem::size_of::<PtraceEvent>()) + } } } @@ -241,7 +294,11 @@ impl Scheme for ProcScheme { Arc::clone(handles.get(&id).ok_or(Error::new(EBADF))?) }; let mut handle = handle.lock(); + handle.continue_ignored_child(); + + // Some operations borrow Operation:: mutably let pid = handle.pid; + let flags = handle.flags; let mut first = true; match handle.operation { @@ -306,28 +363,22 @@ impl Scheme for ProcScheme { } }; }, - Operation::Trace => { + Operation::Trace { ref mut new_child } => { if buf.len() < 1 { return Ok(0); } let op = buf[0]; - let sysemu = op & PTRACE_SYSEMU == PTRACE_SYSEMU; - let mut blocking = handle.flags & O_NONBLOCK != O_NONBLOCK; - let mut wait_breakpoint = false; + let mut blocking = flags & O_NONBLOCK != O_NONBLOCK; let mut singlestep = false; match op & PTRACE_OPERATIONMASK { - PTRACE_CONT => { ptrace::cont(handle.pid); }, - PTRACE_SYSCALL | PTRACE_SINGLESTEP => { // <- not a bitwise OR + PTRACE_CONT => { ptrace::cont(pid); }, + PTRACE_SYSCALL | PTRACE_SINGLESTEP | PTRACE_SIGNAL => { // <- not a bitwise OR singlestep = op & PTRACE_OPERATIONMASK == PTRACE_SINGLESTEP; - ptrace::set_breakpoint(handle.pid, sysemu, singlestep); - wait_breakpoint = true; - }, - PTRACE_WAIT => { - wait_breakpoint = true; - blocking = true; + ptrace::set_breakpoint(pid, op); }, + PTRACE_WAIT => blocking = true, _ => return Err(Error::new(EINVAL)) } @@ -340,7 +391,7 @@ impl Scheme for ProcScheme { first = false; let contexts = context::contexts(); - let context = contexts.get(handle.pid).ok_or(Error::new(ESRCH))?; + let context = contexts.get(pid).ok_or(Error::new(ESRCH))?; let mut context = context.write(); if let Status::Exited(_) = context.status { return Err(Error::new(ESRCH)); @@ -357,8 +408,13 @@ impl Scheme for ProcScheme { break; } - if wait_breakpoint && blocking { - ptrace::wait_breakpoint(handle.pid)?; + if blocking { + if let Some(event) = ptrace::wait(pid)? { + if event.tag == PTRACE_EVENT_CLONE { + *new_child = Some(ContextId::from(unsafe { event.data.clone })); + } + return Ok(0); + } } Ok(1) @@ -378,6 +434,14 @@ impl Scheme for ProcScheme { } } + fn fevent(&self, id: usize, _flags: usize) -> Result<usize> { + let handles = self.handles.read(); + let handle = handles.get(&id).ok_or(Error::new(EBADF))?; + let handle = handle.lock(); + + Ok(ptrace::session_fevent_flags(handle.pid).expect("proc (fevent): invalid session")) + } + fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> { let handles = self.handles.read(); let handle = handles.get(&id).ok_or(Error::new(EBADF))?; @@ -387,7 +451,7 @@ impl Scheme for ProcScheme { Operation::Memory(_) => "mem", Operation::Regs(RegsKind::Float) => "regs/float", Operation::Regs(RegsKind::Int) => "regs/int", - Operation::Trace => "trace" + Operation::Trace { .. } => "trace" }); let len = cmp::min(path.len(), buf.len()); @@ -398,11 +462,11 @@ impl Scheme for ProcScheme { fn close(&self, id: usize) -> Result<usize> { let handle = self.handles.write().remove(&id).ok_or(Error::new(EBADF))?; - let handle = handle.lock(); + let mut handle = handle.lock(); + handle.continue_ignored_child(); - if let Operation::Trace = handle.operation { - ptrace::cont(handle.pid); - self.traced.lock().remove(&handle.pid); + if let Operation::Trace { .. } = handle.operation { + ptrace::close_session(handle.pid); } let contexts = context::contexts(); diff --git a/src/syscall/debug.rs b/src/syscall/debug.rs index 853380f1dd236ff667201fc1cb56a557460fbfc0..94f4552867d76bf43c456411a667824351b229aa 100644 --- a/src/syscall/debug.rs +++ b/src/syscall/debug.rs @@ -81,6 +81,17 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) - }, d ), + SYS_FCHMOD => format!( + "fchmod({}, {:#o})", + b, + c + ), + SYS_FCHOWN => format!( + "fchown({}, {}, {})", + b, + c, + d + ), SYS_FCNTL => format!( "fcntl({}, {} ({}), {:#X})", b, @@ -113,6 +124,11 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) - c, d ), + SYS_FRENAME => format!( + "frename({}, {:?})", + b, + validate_slice(c as *const u8, d).map(ByteStr), + ), SYS_FSTAT => format!( "fstat({}, {:?})", b, @@ -136,6 +152,14 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) - b, c ), + SYS_FUTIMENS => format!( + "futimens({}, {:?})", + b, + validate_slice( + c as *const TimeSpec, + d/mem::size_of::<TimeSpec>() + ), + ), SYS_BRK => format!( "brk({:#X})", @@ -200,7 +224,9 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) - SYS_GETEUID => format!("geteuid()"), SYS_GETGID => format!("getgid()"), SYS_GETNS => format!("getns()"), + SYS_GETPGID => format!("getpgid()"), SYS_GETPID => format!("getpid()"), + SYS_GETPPID => format!("getppid()"), SYS_GETUID => format!("getuid()"), SYS_IOPL => format!( "iopl({})", diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 7d07c4191ad00baf1750211421a76451c0ab7fc7..e683b4cfc644beeaff9d50765ad20fd31fcbbbd7 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -21,12 +21,12 @@ use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PA use crate::ptrace; use crate::scheme::FileHandle; use crate::start::usermode; -use crate::syscall::data::{SigAction, Stat}; +use crate::syscall::data::{PtraceEvent, PtraceEventData, SigAction, Stat}; use crate::syscall::error::*; use crate::syscall::flag::{CLONE_VFORK, CLONE_VM, CLONE_FS, CLONE_FILES, CLONE_SIGHAND, CLONE_STACK, - PROT_EXEC, PROT_READ, PROT_WRITE, - SIG_DFL, SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK, SIGCONT, SIGTERM, - WCONTINUED, WNOHANG, WUNTRACED, wifcontinued, wifstopped}; + PROT_EXEC, PROT_READ, PROT_WRITE, PTRACE_EVENT_CLONE, + SIG_DFL, SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK, SIGCONT, SIGTERM, + WCONTINUED, WNOHANG, WUNTRACED, wifcontinued, wifstopped}; use crate::syscall::validate::{validate_slice, validate_slice_mut}; use crate::syscall; @@ -585,6 +585,22 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> { } } + let ptrace_event = PtraceEvent { + tag: PTRACE_EVENT_CLONE, + data: PtraceEventData { + clone: pid.into() + } + }; + + if ptrace::send_event(ptrace_event).is_some() { + // Freeze the clone, allow ptrace to put breakpoints + // to it before it starts + let contexts = context::contexts(); + let context = contexts.get(pid).expect("Newly created context doesn't exist??"); + let mut context = context.write(); + context.ptrace_stop = true; + } + // Race to pick up the new process! ipi(IpiKind::Switch, IpiTarget::Other); @@ -1068,8 +1084,6 @@ pub fn exit(status: usize) -> ! { context.id }; - ptrace::close(pid); - // Files must be closed while context is valid so that messages can be passed for (_fd, file_option) in close_files.drain(..).enumerate() { if let Some(file) = file_option { @@ -1136,6 +1150,9 @@ pub fn exit(status: usize) -> ! { } } + // Alert any tracers waiting for process (important: AFTER sending waitpid event) + ptrace::close_tracee(pid); + if pid == ContextId::from(1) { println!("Main kernel thread exited with status {:X}", status); diff --git a/syscall b/syscall index 49dd22260bd8bada8b835d12ee8e460a5a1c4af4..844650c4fb9725cd9029de6277826bfe0fb19909 160000 --- a/syscall +++ b/syscall @@ -1 +1 @@ -Subproject commit 49dd22260bd8bada8b835d12ee8e460a5a1c4af4 +Subproject commit 844650c4fb9725cd9029de6277826bfe0fb19909