Verified Commit dcad3bbe authored by jD91mZM2's avatar jD91mZM2

WIP(ptrace): Singlestepping support

parent 49d8c788
use crate::interrupt::stack_trace;
use crate::syscall::flag::*;
use crate::{
common::unique::Unique,
context,
interrupt::stack_trace,
ptrace,
syscall::flag::*
};
extern {
fn ksignal(signal: usize);
......@@ -13,9 +18,41 @@ interrupt_stack_p!(divide_by_zero, stack, {
});
interrupt_stack!(debug, stack, {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
match ptrace::breakpoint_callback_dryrun(true) {
Some(_) => {
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
}
let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8;
stack.set_singlestep(false);
if ptrace::breakpoint_callback(true).is_none() {
// There is no guarantee that this is Some(_) just
// because the dryrun is Some(_). So, if there wasn't
// *actually* any breakpoint, restore the trap flag.
stack.set_singlestep(had_singlestep);
}
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
},
None => {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
}
}
});
interrupt_stack!(non_maskable, stack, {
......
......@@ -68,7 +68,9 @@ interrupt_stack!(pit, stack, {
let mut context = context.write();
// Make all registers available to e.g. the proc:
// scheme
context.interrupt_stack = Some(Unique::new_unchecked(stack));
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
}
let _ = context::switch();
......@@ -76,7 +78,7 @@ interrupt_stack!(pit, stack, {
let contexts = crate::context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.interrupt_stack = None;
context.regs = None;
}
}
}
......
......@@ -7,7 +7,7 @@ use x86::shared::msr;
pub unsafe fn init() {
msr::wrmsr(msr::IA32_STAR, ((gdt::GDT_KERNEL_CODE as u64) << 3) << 32);
msr::wrmsr(msr::IA32_LSTAR, syscall_instruction as u64);
msr::wrmsr(msr::IA32_FMASK, 1 << 9);
msr::wrmsr(msr::IA32_FMASK, 0x0300); // Clear trap flag and interrupt enable
msr::wrmsr(msr::IA32_KERNEL_GS_BASE, &gdt::TSS as *const _ as u64);
let efer = msr::rdmsr(msr::IA32_EFER);
......@@ -29,11 +29,13 @@ macro_rules! with_interrupt_stack {
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.interrupt_stack = Some(Unique::new_unchecked(&mut stack.interrupt_stack));
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut stack.interrupt_stack)));
}
}
}
let is_sysemu = ptrace::syscall_callback();
let is_sysemu = ptrace::breakpoint_callback(false);
if !is_sysemu.unwrap_or(false) {
// If not on a sysemu breakpoint
let $stack = &mut *stack;
......@@ -42,7 +44,7 @@ macro_rules! with_interrupt_stack {
if is_sysemu.is_some() {
// Only callback if there was a pre-syscall
// callback too.
ptrace::syscall_callback();
ptrace::breakpoint_callback(false);
}
}
......@@ -50,7 +52,7 @@ macro_rules! with_interrupt_stack {
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.interrupt_stack = None;
context.regs = None;
}
}
}
......@@ -167,8 +169,16 @@ pub struct SyscallStack {
}
#[naked]
pub unsafe extern fn clone_ret() {
asm!("pop rbp
xor rax, rax"
: : : : "intel", "volatile");
pub unsafe extern "C" fn clone_ret() {
// The C x86_64 ABI specifies that rbp is pushed to save the old
// call frame. Popping rbp means we're using the parent's call
// frame and thus will not only return from this function but also
// from the function above this one.
// When this is called, the stack should have been
// interrupt->inner->syscall->clone
// then changed to
// interrupt->inner->clone_ret->clone
// so this will return from "inner".
asm!("pop rbp" : : : : "intel", "volatile");
}
......@@ -251,6 +251,16 @@ impl InterruptStack {
self.iret.cs = all.cs;
self.iret.rflags = all.eflags;
}
/// Enables the "Trap Flag" in the FLAGS register, causing the CPU
/// to send a Debug exception after the next instruction. This is
/// used for singlestep in the proc: scheme.
pub fn set_singlestep(&mut self, enabled: bool) {
if enabled {
self.iret.rflags |= 1 << 8;
} else {
self.iret.rflags &= !(1 << 8);
}
}
}
#[macro_export]
......
......@@ -166,10 +166,13 @@ pub struct Context {
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Signal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
/// The interrupt stack which holds all the context's registers
pub interrupt_stack: Option<Unique<InterruptStack>>,
/// The pointer to the user-space registers, saved after certain
/// interrupts. This pointer is somewhere inside kstack, and the
/// kstack address at the time of creation is the first element in
/// this tuple.
pub regs: Option<(usize, Unique<InterruptStack>)>,
/// A somewhat hacky way to initially stop a context when creating
/// a new instance of the proc: scheme, entirely separate from any
/// a new instance of the proc: scheme, entirely separate from
/// signals or any other way to restart a process.
pub ptrace_stop: bool
}
......@@ -223,7 +226,7 @@ impl Context {
},
0
); 128])),
interrupt_stack: None,
regs: None,
ptrace_stop: false
}
}
......
use crate::{
context::{self, ContextId, Status},
arch::macros::InterruptStack,
common::unique::Unique,
context::{self, Context, ContextId, Status},
sync::WaitCondition
};
use alloc::{
boxed::Box,
collections::BTreeMap,
sync::Arc
};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::error::*;
// ____ _ _ _
// | __ ) _ __ ___ __ _| | ___ __ ___ (_)_ __ | |_ ___
// | _ \| '__/ _ \/ _` | |/ / '_ \ / _ \| | '_ \| __/ __|
// | |_) | | | __/ (_| | <| |_) | (_) | | | | | |_\__ \
// |____/|_| \___|\__,_|_|\_\ .__/ \___/|_|_| |_|\__|___/
// |_|
struct Handle {
tracee: Arc<WaitCondition>,
tracer: Arc<WaitCondition>,
sysemu: bool
sysemu: bool,
singlestep: bool
}
static SYSCALL_BREAKPOINTS: Once<RwLock<BTreeMap<ContextId, Handle>>> = Once::new();
static BREAKPOINTS: Once<RwLock<BTreeMap<ContextId, Handle>>> = Once::new();
fn init_breakpoints() -> RwLock<BTreeMap<ContextId, Handle>> {
RwLock::new(BTreeMap::new())
}
fn breakpoints() -> RwLockReadGuard<'static, BTreeMap<ContextId, Handle>> {
SYSCALL_BREAKPOINTS.call_once(init_breakpoints).read()
BREAKPOINTS.call_once(init_breakpoints).read()
}
fn breakpoints_mut() -> RwLockWriteGuard<'static, BTreeMap<ContextId, Handle>> {
SYSCALL_BREAKPOINTS.call_once(init_breakpoints).write()
BREAKPOINTS.call_once(init_breakpoints).write()
}
/// Continue the process with the specified ID
......@@ -37,7 +48,7 @@ pub fn cont(pid: ContextId) {
}
/// Create a new breakpoint for the specified tracee, optionally with a sysemu flag
pub fn break_syscall(pid: ContextId, sysemu: bool) {
pub fn set_breakpoint(pid: ContextId, sysemu: bool, singlestep: bool) {
// Continue execution of the tracee and therefore also release
// locks on breakpoints(). This has to be done before trying a
// mutable lock.
......@@ -54,7 +65,8 @@ pub fn break_syscall(pid: ContextId, sysemu: bool) {
breakpoints_mut().insert(pid, Handle {
tracee,
tracer,
sysemu
sysemu,
singlestep
});
}
......@@ -79,9 +91,26 @@ pub fn wait_breakpoint(pid: ContextId) -> Result<()> {
Ok(())
}
/// Returns the same value as breakpoint_callback would do, but
/// doesn't actually perform the action. You should not rely too
/// heavily on this value, as the lock *is* released between this call
/// and another.
pub fn breakpoint_callback_dryrun(singlestep: bool) -> Option<bool> {
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let breakpoints = breakpoints();
let breakpoint = breakpoints.get(&context.id)?;
if breakpoint.singlestep != singlestep {
return None;
}
Some(breakpoint.sysemu)
}
/// Notify the tracer and await green flag to continue.
/// Note: Don't call while holding any locks, this will switch contexts
pub fn syscall_callback() -> Option<bool> {
pub fn breakpoint_callback(singlestep: bool) -> Option<bool> {
// Can't hold any locks when executing wait()
let (tracee, sysemu) = {
let contexts = context::contexts();
......@@ -90,6 +119,15 @@ pub fn syscall_callback() -> Option<bool> {
let breakpoints = breakpoints();
let breakpoint = breakpoints.get(&context.id)?;
// TODO: How should singlesteps interact with syscalls? How
// does Linux handle this?
// if singlestep && !breakpoint.singlestep {
if breakpoint.singlestep != singlestep {
return None;
}
breakpoint.tracer.notify();
(
Arc::clone(&breakpoint.tracee),
......@@ -113,3 +151,51 @@ pub fn close(pid: ContextId) {
breakpoints_mut().remove(&pid);
}
// ____ _ _
// | _ \ ___ __ _(_)___| |_ ___ _ __ ___
// | |_) / _ \/ _` | / __| __/ _ \ '__/ __|
// | _ < __/ (_| | \__ \ || __/ | \__ \
// |_| \_\___|\__, |_|___/\__\___|_| |___/
// |___/
/// Return the InterruptStack pointer, but relative to the specified
/// stack instead of the original.
pub unsafe fn rebase_regs_ptr(
regs: Option<(usize, Unique<InterruptStack>)>,
kstack: Option<&Box<[u8]>>
) -> Option<*const InterruptStack> {
let (old_base, ptr) = regs?;
let new_base = kstack?.as_ptr() as usize;
Some((ptr.as_ptr() as usize - old_base + new_base) as *const _)
}
/// Return the InterruptStack pointer, but relative to the specified
/// stack instead of the original.
pub unsafe fn rebase_regs_ptr_mut(
regs: Option<(usize, Unique<InterruptStack>)>,
kstack: Option<&mut Box<[u8]>>
) -> Option<*mut InterruptStack> {
let (old_base, ptr) = regs?;
let new_base = kstack?.as_mut_ptr() as usize;
Some((ptr.as_ptr() as usize - old_base + new_base) as *mut _)
}
/// Return a reference to the InterruptStack struct in memory. If the
/// kernel stack has been backed up by a signal handler, this instead
/// returns the struct inside that memory, as that will later be
/// restored and otherwise undo all your changes. See `update(...)` in
/// context/switch.rs.
pub unsafe fn regs_for(context: &Context) -> Option<&InterruptStack> {
Some(&*match context.ksig {
Some((_, _, ref kstack)) => rebase_regs_ptr(context.regs, kstack.as_ref())?,
None => context.regs?.1.as_ptr()
})
}
/// Mutable version of `regs_for`
pub unsafe fn regs_for_mut(context: &mut Context) -> Option<&mut InterruptStack> {
Some(&mut *match context.ksig {
Some((_, _, ref mut kstack)) => rebase_regs_ptr_mut(context.regs, kstack.as_mut())?,
None => context.regs?.1.as_ptr()
})
}
......@@ -156,7 +156,7 @@ impl Scheme for ProcScheme {
let mut first = true;
let (output, size) = loop {
if !first {
// `continue` = Delay and repeat
// We've tried this before, so lets wait before retrying
unsafe { context::switch(); }
}
first = false;
......@@ -171,7 +171,7 @@ impl Scheme for ProcScheme {
// (Output { float: FloatRegisters::default() }, mem::size_of::<FloatRegisters>())
return Err(Error::new(EBADF));
},
RegsKind::Int => match context.interrupt_stack {
RegsKind::Int => match unsafe { ptrace::regs_for(&context) } {
None => {
// Another CPU is running this process, wait until it's stopped.
continue;
......@@ -179,9 +179,7 @@ impl Scheme for ProcScheme {
Some(stack) => {
let mut regs = IntRegisters::default();
unsafe {
(&*stack.as_ptr()).save(&mut regs);
}
stack.save(&mut regs);
(Output { int: regs }, mem::size_of::<IntRegisters>())
}
......@@ -217,20 +215,21 @@ impl Scheme for ProcScheme {
},
Operation::Regs(kind) => loop {
if !first {
// We've tried this before, so lets wait before retrying
unsafe { context::switch(); }
}
first = false;
let contexts = context::contexts();
let context = contexts.get(handle.pid).ok_or(Error::new(ESRCH))?;
let context = context.write();
let mut context = context.write();
break match kind {
RegsKind::Float => {
// TODO!!
unimplemented!();
},
RegsKind::Int => match context.interrupt_stack {
RegsKind::Int => match unsafe { ptrace::regs_for_mut(&mut context) } {
None => {
// Another CPU is running this process, wait until it's stopped.
continue;
......@@ -243,9 +242,8 @@ impl Scheme for ProcScheme {
*(buf as *const _ as *const IntRegisters)
};
unsafe {
(&mut *stack.as_ptr()).load(&regs);
}
stack.load(&regs);
Ok(mem::size_of::<IntRegisters>())
}
}
......@@ -259,19 +257,27 @@ impl Scheme for ProcScheme {
let sysemu = op & PTRACE_SYSEMU == PTRACE_SYSEMU;
let mut wait_breakpoint = false;
let mut singlestep = false;
match op & PTRACE_OPERATIONMASK {
PTRACE_CONT => { ptrace::cont(handle.pid); },
// PTRACE_SINGLESTEP => unimplemented!(),
PTRACE_SYSCALL => {
ptrace::break_syscall(handle.pid, sysemu);
PTRACE_SYSCALL | PTRACE_SINGLESTEP => { // <- not a bitwise OR
singlestep = op & PTRACE_OPERATIONMASK == PTRACE_SINGLESTEP;
ptrace::set_breakpoint(handle.pid, sysemu, singlestep);
wait_breakpoint = true;
},
// PTRACE_WAIT => {},
_ => return Err(Error::new(EINVAL))
}
{
let mut first = true;
loop {
if !first {
// We've tried this before, so lets wait before retrying
unsafe { context::switch(); }
}
first = false;
let contexts = context::contexts();
let context = contexts.get(handle.pid).ok_or(Error::new(ESRCH))?;
let mut context = context.write();
......@@ -279,7 +285,15 @@ impl Scheme for ProcScheme {
return Err(Error::new(ESRCH));
}
if singlestep {
match unsafe { ptrace::regs_for_mut(&mut context) } {
None => continue,
Some(stack) => stack.set_singlestep(true)
}
}
context.ptrace_stop = false;
break;
}
if wait_breakpoint && handle.flags & O_NONBLOCK != O_NONBLOCK {
......
......@@ -6,7 +6,6 @@ use core::{intrinsics, mem};
use core::ops::DerefMut;
use spin::Mutex;
use crate::arch::macros::InterruptStack;
use crate::context::file::FileDescriptor;
use crate::context::{ContextId, WaitpidKey};
use crate::context;
......@@ -130,18 +129,25 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
}
if let Some(ref stack) = context.kstack {
// Get the relative offset to the return address of this function
// (base pointer - start of stack) - one
offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
let mut new_stack = stack.clone();
unsafe {
if let Some(old_ptr) = context.interrupt_stack {
let new_ptr = (
old_ptr.as_ptr() as usize - stack.as_ptr() as usize + new_stack.as_ptr() as usize
) as *mut InterruptStack;
(&mut *new_ptr).scratch.rax = 0;
if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) {
// We'll need to tell the clone that it should
// return 0, but that's it. We don't actually
// clone the registers, because it will then
// become None and be exempt from all kinds of
// ptracing until the current syscall has
// completed.
(*regs).scratch.rax = 0;
}
// Change the return address of the child
// (previously syscall) to the arch-specific
// clone_ret callback
let func_ptr = new_stack.as_mut_ptr().offset(offset as isize);
*(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment