Newer
Older
//! The backend of the "proc:" scheme. Most internal breakpoint
//! handling should go here, unless they closely depend on the design
//! of the scheme.
arch::{
macros::InterruptStack,
paging::{
entry::EntryFlags,
mapper::MapperFlushAll,
temporary_page::TemporaryPage,
ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress
}
},
context::{self, signal, Context, ContextId, Status},
event,
scheme::proc,
sync::WaitCondition
};
use alloc::{
boxed::Box,
collections::{
BTreeMap,
VecDeque,
btree_map::Entry
},
sync::Arc,
vec::Vec
use core::{
cmp,
sync::atomic::Ordering
};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::{
data::PtraceEvent,
error::*,
};
// ____ _
// / ___| ___ ___ ___(_) ___ _ __ ___
// \___ \ / _ \/ __/ __| |/ _ \| '_ \/ __|
// ___) | __/\__ \__ \ | (_) | | | \__ \
// |____/ \___||___/___/_|\___/|_| |_|___/
#[derive(Debug)]
struct Session {
breakpoint: Option<Breakpoint>,
events: VecDeque<PtraceEvent>,
file_id: usize,
tracee: Arc<WaitCondition>,
tracer: Arc<WaitCondition>,
}
type SessionMap = BTreeMap<ContextId, Session>;
static SESSIONS: Once<RwLock<SessionMap>> = Once::new();
fn init_sessions() -> RwLock<SessionMap> {
RwLock::new(BTreeMap::new())
}
fn sessions() -> RwLockReadGuard<'static, SessionMap> {
SESSIONS.call_once(init_sessions).read()
}
fn sessions_mut() -> RwLockWriteGuard<'static, SessionMap> {
SESSIONS.call_once(init_sessions).write()
}
/// Try to create a new session, but fail if one already exists for
/// this process
pub fn try_new_session(pid: ContextId, file_id: usize) -> bool {
let mut sessions = sessions_mut();
match sessions.entry(pid) {
Entry::Occupied(_) => false,
Entry::Vacant(vacant) => {
vacant.insert(Session {
breakpoint: None,
events: VecDeque::new(),
file_id,
tracee: Arc::new(WaitCondition::new()),
tracer: Arc::new(WaitCondition::new()),
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
});
true
}
}
}
/// Returns true if a session is attached to this process
pub fn is_traced(pid: ContextId) -> bool {
sessions().contains_key(&pid)
}
/// Used for getting the flags in fevent
pub fn session_fevent_flags(pid: ContextId) -> Option<usize> {
let sessions = sessions();
let session = sessions.get(&pid)?;
let mut flags = 0;
if !session.events.is_empty() {
flags |= EVENT_READ;
}
if session.breakpoint.as_ref().map(|b| b.reached).unwrap_or(true) {
flags |= EVENT_WRITE;
}
Some(flags)
}
/// Remove the session from the list of open sessions and notify any
/// waiting processes
pub fn close_session(pid: ContextId) {
if let Some(session) = sessions_mut().remove(&pid) {
session.tracer.notify();
}
}
/// Trigger a notification to the event: scheme
fn proc_trigger_event(file_id: usize, flags: usize) {
event::trigger(proc::PROC_SCHEME_ID.load(Ordering::SeqCst), file_id, flags);
}
/// Dispatch an event to any tracer tracing `self`. This will cause
/// the tracer to wake up and poll for events. Returns Some(()) if an
/// event was sent.
pub fn send_event(event: PtraceEvent) -> Option<()> {
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let mut sessions = sessions_mut();
let session = sessions.get_mut(&context.id)?;
let breakpoint = session.breakpoint.as_ref()?;
if event.cause & breakpoint.flags != event.cause {
return None;
}
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
session.events.push_back(event);
// Notify nonblocking tracers
if session.events.len() == 1 {
// If the list of events was previously empty, alert now
proc_trigger_event(session.file_id, EVENT_READ);
}
// Alert blocking tracers
session.tracer.notify();
Some(())
}
/// Poll events, return the amount read
pub fn recv_events(pid: ContextId, out: &mut [PtraceEvent]) -> Option<usize> {
let mut sessions = sessions_mut();
let session = sessions.get_mut(&pid)?;
let len = cmp::min(out.len(), session.events.len());
for (dst, src) in out.iter_mut().zip(session.events.drain(..len)) {
*dst = src;
}
Some(len)
}
// ____ _ _ _
// | __ ) _ __ ___ __ _| | ___ __ ___ (_)_ __ | |_ ___
// | _ \| '__/ _ \/ _` | |/ / '_ \ / _ \| | '_ \| __/ __|
// | |_) | | | __/ (_| | <| |_) | (_) | | | | | |_\__ \
// |____/|_| \___|\__,_|_|\_\ .__/ \___/|_|_| |_|\__|___/
// |_|
#[derive(Debug)]
struct Breakpoint {
/// Continue the process with the specified ID
pub fn cont(pid: ContextId) {
let mut sessions = sessions_mut();
let session = match sessions.get_mut(&pid) {
Some(session) => session,
None => return
};
// Remove the breakpoint to make sure any yet unreached but
// obsolete breakpoints don't stop the program.
session.breakpoint = None;
/// Create a new breakpoint for the specified tracee, optionally with
/// a sysemu flag. Panics if the session is invalid.
pub fn set_breakpoint(pid: ContextId, flags: u64) {
let mut sessions = sessions_mut();
let session = sessions.get_mut(&pid).expect("proc (set_breakpoint): invalid session");
session.breakpoint = Some(Breakpoint {
/// Wait for the tracee to stop. If an event occurs, it returns a copy
/// of that. It will still be available for read using recv_event.
///
/// Note: Don't call while holding any locks, this will switch
/// contexts
pub fn wait(pid: ContextId) -> Result<Option<PtraceEvent>> {
let tracer: Arc<WaitCondition> = {
let sessions = sessions();
match sessions.get(&pid) {
Some(session) if session.breakpoint.as_ref().map(|b| !b.reached).unwrap_or(true) => {
}
Arc::clone(&session.tracer)
},
_ => return Ok(None)
{
let sessions = sessions();
if let Some(session) = sessions.get(&pid) {
if let Some(event) = session.events.front() {
return Ok(Some(event.clone()));
}
}
}
let contexts = context::contexts();
let context = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context.read();
if let Status::Exited(_) = context.status {
return Err(Error::new(ESRCH));
}
}
/// Notify the tracer and await green flag to continue.
/// Note: Don't call while holding any locks, this will switch contexts
pub fn breakpoint_callback(match_flags: u64, event: Option<PtraceEvent>) -> Option<u64> {
// Can't hold any locks when executing wait()
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let mut sessions = sessions_mut();
let session = sessions.get_mut(&context.id)?;
let breakpoint = session.breakpoint.as_mut()?;
if breakpoint.flags & match_flags != match_flags {
session.events.push_back(event.unwrap_or(ptrace_event!(match_flags)));
// Notify nonblocking tracers
if session.events.len() == 1 {
// If the list of events was previously empty, alert now
proc_trigger_event(session.file_id, EVENT_READ);
}
// In case no tracer is waiting, make sure the next one gets
// the memo
breakpoint.reached = true;
session.tracer.notify();
)
};
while !tracee.wait() {}
/// Obtain the next breakpoint flags for the current process. This is
/// used for detecting whether or not the tracer decided to use sysemu
/// mode.
pub fn next_breakpoint() -> Option<u64> {
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let sessions = sessions();
let session = sessions.get(&context.id)?;
let breakpoint = session.breakpoint.as_ref()?;
Some(breakpoint.flags)
}
/// Call when a context is closed to alert any tracers
pub fn close_tracee(pid: ContextId) -> Option<()> {
let mut sessions = sessions_mut();
let session = sessions.get_mut(&pid)?;
session.breakpoint = None;
session.tracer.notify();
Some(())
}
// ____ _ _
// | _ \ ___ __ _(_)___| |_ ___ _ __ ___
// | |_) / _ \/ _` | / __| __/ _ \ '__/ __|
// | _ < __/ (_| | \__ \ || __/ | \__ \
// |_| \_\___|\__, |_|___/\__\___|_| |___/
// |___/
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
pub struct ProcessRegsGuard;
/// Make all registers available to e.g. the proc: scheme
/// ---
/// For use inside arch-specific code to assign the pointer of the
/// interupt stack to the current process. Meant to reduce the amount
/// of ptrace-related code that has to lie in arch-specific bits.
/// ```rust,ignore
/// let _guard = ptrace::set_process_regs(pointer);
/// ...
/// // (_guard implicitly dropped)
/// ```
pub fn set_process_regs(pointer: *mut InterruptStack) -> Option<ProcessRegsGuard> {
let contexts = context::contexts();
let context = contexts.current()?;
let mut context = context.write();
let kstack = context.kstack.as_mut()?;
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new(pointer)));
Some(ProcessRegsGuard)
}
impl Drop for ProcessRegsGuard {
fn drop(&mut self) {
fn clear_process_regs() -> Option<()> {
let contexts = context::contexts();
let context = contexts.current()?;
let mut context = context.write();
context.regs = None;
Some(())
}
clear_process_regs();
}
}
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
/// Return the InterruptStack pointer, but relative to the specified
/// stack instead of the original.
pub unsafe fn rebase_regs_ptr(
regs: Option<(usize, Unique<InterruptStack>)>,
kstack: Option<&Box<[u8]>>
) -> Option<*const InterruptStack> {
let (old_base, ptr) = regs?;
let new_base = kstack?.as_ptr() as usize;
Some((ptr.as_ptr() as usize - old_base + new_base) as *const _)
}
/// Return the InterruptStack pointer, but relative to the specified
/// stack instead of the original.
pub unsafe fn rebase_regs_ptr_mut(
regs: Option<(usize, Unique<InterruptStack>)>,
kstack: Option<&mut Box<[u8]>>
) -> Option<*mut InterruptStack> {
let (old_base, ptr) = regs?;
let new_base = kstack?.as_mut_ptr() as usize;
Some((ptr.as_ptr() as usize - old_base + new_base) as *mut _)
}
/// Return a reference to the InterruptStack struct in memory. If the
/// kernel stack has been backed up by a signal handler, this instead
/// returns the struct inside that memory, as that will later be
/// restored and otherwise undo all your changes. See `update(...)` in
/// context/switch.rs.
pub unsafe fn regs_for(context: &Context) -> Option<&InterruptStack> {
let signal_backup_regs = match context.ksig {
None => None,
Some((_, _, ref kstack, signum)) => {
let is_user_handled = {
let actions = context.actions.lock();
signal::is_user_handled(actions[signum as usize].0.sa_handler)
};
if is_user_handled {
None
} else {
Some(rebase_regs_ptr(context.regs, kstack.as_ref())?)
}
}
};
signal_backup_regs
.or_else(|| context.regs.map(|regs| regs.1.as_ptr() as *const _))
.map(|ptr| &*ptr)
}
/// Mutable version of `regs_for`
pub unsafe fn regs_for_mut(context: &mut Context) -> Option<&mut InterruptStack> {
let signal_backup_regs = match context.ksig {
None => None,
Some((_, _, ref mut kstack, signum)) => {
let is_user_handled = {
let actions = context.actions.lock();
signal::is_user_handled(actions[signum as usize].0.sa_handler)
};
if is_user_handled {
None
} else {
Some(rebase_regs_ptr_mut(context.regs, kstack.as_mut())?)
}
}
};
signal_backup_regs
.or_else(|| context.regs.map(|regs| regs.1.as_ptr()))
.map(|ptr| &mut *ptr)
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
// __ __
// | \/ | ___ _ __ ___ ___ _ __ _ _
// | |\/| |/ _ \ '_ ` _ \ / _ \| '__| | | |
// | | | | __/ | | | | | (_) | | | |_| |
// |_| |_|\___|_| |_| |_|\___/|_| \__, |
// |___/
pub fn with_context_memory<F>(context: &Context, offset: VirtualAddress, len: usize, f: F) -> Result<()>
where F: FnOnce(*mut u8) -> Result<()>
{
// TODO: Is using USER_TMP_MISC_OFFSET safe? I guess make sure
// it's not too large.
let start = Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET));
let mut active_page_table = unsafe { ActivePageTable::new() };
let mut target_page_table = unsafe {
InactivePageTable::from_address(context.arch.get_page_table())
};
// Find the physical frames for all pages
let mut frames = Vec::new();
let mut result = None;
active_page_table.with(&mut target_page_table, &mut TemporaryPage::new(start), |mapper| {
let mut inner = || -> Result<()> {
let start = Page::containing_address(offset);
let end = Page::containing_address(VirtualAddress::new(offset.get() + len - 1));
for page in Page::range_inclusive(start, end) {
frames.push((
mapper.translate_page(page).ok_or(Error::new(EFAULT))?,
mapper.translate_page_flags(page).ok_or(Error::new(EFAULT))?
));
}
Ok(())
};
result = Some(inner());
});
result.expect("with(...) callback should always be called")?;
// Map all the physical frames into linear pages
let pages = frames.len();
let mut page = start;
let mut flusher = MapperFlushAll::new();
for (frame, mut flags) in frames {
flags |= EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE;
flusher.consume(active_page_table.map_to(page, frame, flags));
page = page.next();
}
flusher.flush(&mut active_page_table);
let res = f((start.start_address().get() + offset.get() % PAGE_SIZE) as *mut u8);
// Unmap all the pages (but allow no deallocation!)
let mut page = start;
let mut flusher = MapperFlushAll::new();
for _ in 0..pages {
flusher.consume(active_page_table.unmap_return(page, true).0);
page = page.next();
}
flusher.flush(&mut active_page_table);
res
}