Commit 788526a3 authored by Jeremy Soller's avatar Jeremy Soller

Bare-bones ptracing functionality

Since even a very basic ptrace can be nice to have, I thought I would split
the, perhaps rather big, ptrace project up in multiple PRs to make as few
changes as necessary in each. This PR contains the initial registry modifying
bits and only a very basic security measure. Letting this out to the community
should be good for spotting bugs and maybe getting some hype ;)
parent 45ea6347
use crate::interrupt::stack_trace; use crate::{
use crate::syscall::flag::*; common::unique::Unique,
context,
interrupt::stack_trace,
ptrace,
syscall::flag::*
};
extern { extern {
fn ksignal(signal: usize); fn ksignal(signal: usize);
...@@ -13,9 +18,41 @@ interrupt_stack_p!(divide_by_zero, stack, { ...@@ -13,9 +18,41 @@ interrupt_stack_p!(divide_by_zero, stack, {
}); });
interrupt_stack!(debug, stack, { interrupt_stack!(debug, stack, {
println!("Debug trap"); match ptrace::breakpoint_callback_dryrun(true) {
stack.dump(); Some(_) => {
ksignal(SIGTRAP); {
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
}
let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8;
stack.set_singlestep(false);
if ptrace::breakpoint_callback(true).is_none() {
// There is no guarantee that this is Some(_) just
// because the dryrun is Some(_). So, if there wasn't
// *actually* any breakpoint, restore the trap flag.
stack.set_singlestep(had_singlestep);
}
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
},
None => {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
}
}
}); });
interrupt_stack!(non_maskable, stack, { interrupt_stack!(non_maskable, stack, {
......
use core::sync::atomic::{AtomicUsize, Ordering}; use core::sync::atomic::{AtomicUsize, Ordering};
use crate::common::unique::Unique;
use crate::context; use crate::context;
use crate::context::timeout; use crate::context::timeout;
use crate::device::pic; use crate::device::pic;
...@@ -40,7 +41,7 @@ pub unsafe fn acknowledge(irq: usize) { ...@@ -40,7 +41,7 @@ pub unsafe fn acknowledge(irq: usize) {
} }
} }
interrupt!(pit, { interrupt_stack!(pit, stack, {
// Saves CPU time by not sending IRQ event irq_trigger(0); // Saves CPU time by not sending IRQ event irq_trigger(0);
const PIT_RATE: u64 = 2_250_286; const PIT_RATE: u64 = 2_250_286;
...@@ -61,7 +62,25 @@ interrupt!(pit, { ...@@ -61,7 +62,25 @@ interrupt!(pit, {
timeout::trigger(); timeout::trigger();
if PIT_TICKS.fetch_add(1, Ordering::SeqCst) >= 10 { if PIT_TICKS.fetch_add(1, Ordering::SeqCst) >= 10 {
{
let contexts = crate::context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
// Make all registers available to e.g. the proc:
// scheme
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
}
let _ = context::switch(); let _ = context::switch();
{
let contexts = crate::context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
} }
}); });
......
use crate::arch::macros::InterruptStack;
use crate::arch::{gdt, pti}; use crate::arch::{gdt, pti};
use crate::syscall; use crate::common::unique::Unique;
use crate::{context, ptrace, syscall};
use x86::shared::msr; use x86::shared::msr;
pub unsafe fn init() { pub unsafe fn init() {
msr::wrmsr(msr::IA32_STAR, ((gdt::GDT_KERNEL_CODE as u64) << 3) << 32); msr::wrmsr(msr::IA32_STAR, ((gdt::GDT_KERNEL_CODE as u64) << 3) << 32);
msr::wrmsr(msr::IA32_LSTAR, syscall_instruction as u64); msr::wrmsr(msr::IA32_LSTAR, syscall_instruction as u64);
msr::wrmsr(msr::IA32_FMASK, 1 << 9); msr::wrmsr(msr::IA32_FMASK, 0x0300); // Clear trap flag and interrupt enable
msr::wrmsr(msr::IA32_KERNEL_GS_BASE, &gdt::TSS as *const _ as u64); msr::wrmsr(msr::IA32_KERNEL_GS_BASE, &gdt::TSS as *const _ as u64);
let efer = msr::rdmsr(msr::IA32_EFER); let efer = msr::rdmsr(msr::IA32_EFER);
msr::wrmsr(msr::IA32_EFER, efer | 1); msr::wrmsr(msr::IA32_EFER, efer | 1);
} }
// Not a function pointer because it somehow messes up the returning
// from clone() (via clone_ret()). Not sure what the problem is.
macro_rules! with_interrupt_stack {
(unsafe fn $wrapped:ident($stack:ident) -> usize $code:block) => {
/// Because of how clones work, we need a function that returns a
/// usize. Here, `inner` will be this function. The child process in a
/// clone will terminate this function with a 0 return value, and it
/// might also have updated the interrupt_stack pointer.
#[inline(never)]
unsafe fn $wrapped(stack: *mut SyscallStack) {
let stack = &mut *stack;
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut stack.interrupt_stack)));
}
}
}
let is_sysemu = ptrace::breakpoint_callback(false);
if !is_sysemu.unwrap_or(false) {
// If not on a sysemu breakpoint
let $stack = &mut *stack;
$stack.interrupt_stack.scratch.rax = $code;
if is_sysemu.is_some() {
// Only callback if there was a pre-syscall
// callback too.
ptrace::breakpoint_callback(false);
}
}
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
}
}
}
#[naked] #[naked]
pub unsafe extern fn syscall_instruction() { pub unsafe extern fn syscall_instruction() {
#[inline(never)] with_interrupt_stack! {
unsafe fn inner(stack: &mut SyscallStack) -> usize { unsafe fn inner(stack) -> usize {
let rbp; let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile"); asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
syscall::syscall(stack.rax, stack.rdi, stack.rsi, stack.rdx, stack.r10, stack.r8, rbp, stack) let scratch = &stack.interrupt_stack.scratch;
syscall::syscall(scratch.rax, scratch.rdi, scratch.rsi, scratch.rdx, scratch.r10, scratch.r8, rbp, stack)
}
} }
// Yes, this is magic. No, you don't need to understand // Yes, this is magic. No, you don't need to understand
...@@ -40,76 +89,52 @@ pub unsafe extern fn syscall_instruction() { ...@@ -40,76 +89,52 @@ pub unsafe extern fn syscall_instruction() {
: :
: "intel", "volatile"); : "intel", "volatile");
// Push scratch registers // Push scratch registers
asm!("push rax scratch_push!();
push rbx asm!("push fs
push rcx mov r11, 0x18
push rdx mov fs, r11
push rdi push rbx"
push rsi : : : : "intel", "volatile");
push r8
push r9 // Get reference to stack variables
push r10 let rsp: usize;
push r11 asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
push fs
mov r11, 0x18 // Map kernel
mov fs, r11" pti::map();
: : : : "intel", "volatile");
inner(rsp as *mut SyscallStack);
// Get reference to stack variables
let rsp: usize; // Unmap kernel
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile"); pti::unmap();
// Map kernel // Interrupt return
pti::map(); asm!("pop rbx
pop fs"
let a = inner(&mut *(rsp as *mut SyscallStack)); : : : : "intel", "volatile");
scratch_pop!();
// Unmap kernel asm!("iretq" : : : : "intel", "volatile");
pti::unmap();
asm!("" : : "{rax}"(a) : : "intel", "volatile");
// Interrupt return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rbx
add rsp, 8
iretq"
: : : : "intel", "volatile");
} }
#[naked] #[naked]
pub unsafe extern fn syscall() { pub unsafe extern fn syscall() {
#[inline(never)] with_interrupt_stack! {
unsafe fn inner(stack: &mut SyscallStack) -> usize { unsafe fn inner(stack) -> usize {
let rbp; let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile"); asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
syscall::syscall(stack.rax, stack.rbx, stack.rcx, stack.rdx, stack.rsi, stack.rdi, rbp, stack) let scratch = &stack.interrupt_stack.scratch;
syscall::syscall(scratch.rax, stack.rbx, scratch.rcx, scratch.rdx, scratch.rsi, scratch.rdi, rbp, stack)
}
} }
// Push scratch registers // Push scratch registers
asm!("push rax scratch_push!();
push rbx asm!("push fs
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov r11, 0x18 mov r11, 0x18
mov fs, r11" mov fs, r11
push rbx"
: : : : "intel", "volatile"); : : : : "intel", "volatile");
// Get reference to stack variables // Get reference to stack variables
...@@ -119,56 +144,41 @@ pub unsafe extern fn syscall() { ...@@ -119,56 +144,41 @@ pub unsafe extern fn syscall() {
// Map kernel // Map kernel
pti::map(); pti::map();
let a = inner(&mut *(rsp as *mut SyscallStack)); inner(rsp as *mut SyscallStack);
// Unmap kernel // Unmap kernel
pti::unmap(); pti::unmap();
asm!("" : : "{rax}"(a) : : "intel", "volatile");
// Interrupt return // Interrupt return
asm!("pop fs asm!("pop rbx
pop r11 pop fs"
pop r10 : : : : "intel", "volatile");
pop r9 scratch_pop!();
pop r8 asm!("iretq" : : : : "intel", "volatile");
pop rsi
pop rdi
pop rdx
pop rcx
pop rbx
add rsp, 8
iretq"
: : : : "intel", "volatile");
} }
#[allow(dead_code)] #[allow(dead_code)]
#[repr(packed)] #[repr(packed)]
pub struct SyscallStack { pub struct SyscallStack {
pub fs: usize,
pub r11: usize,
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rsi: usize,
pub rdi: usize,
pub rdx: usize,
pub rcx: usize,
pub rbx: usize, pub rbx: usize,
pub rax: usize, pub interrupt_stack: InterruptStack,
pub rip: usize,
pub cs: usize,
pub rflags: usize,
// Will only be present if syscall is called from another ring // Will only be present if syscall is called from another ring
pub rsp: usize, pub rsp: usize,
pub ss: usize, pub ss: usize,
} }
#[naked] #[naked]
pub unsafe extern fn clone_ret() { pub unsafe extern "C" fn clone_ret() {
asm!(" // The C x86_64 ABI specifies that rbp is pushed to save the old
pop rbp // call frame. Popping rbp means we're using the parent's call
xor rax, rax // frame and thus will not only return from this function but also
" // from the function above this one.
: : : : "intel", "volatile"); // When this is called, the stack should have been
// interrupt->inner->syscall->clone
// then changed to
// interrupt->inner->clone_ret->clone
// so this will return from "inner".
asm!("pop rbp" : : : : "intel", "volatile");
} }
use syscall::data::IntRegisters;
/// Print to console /// Print to console
#[macro_export] #[macro_export]
macro_rules! print { macro_rules! print {
...@@ -204,6 +206,50 @@ impl InterruptStack { ...@@ -204,6 +206,50 @@ impl InterruptStack {
self.scratch.dump(); self.scratch.dump();
println!("FS: {:>016X}", { self.fs }); println!("FS: {:>016X}", { self.fs });
} }
/// Saves all registers to a struct used by the proc:
/// scheme to read/write registers.
pub fn save(&self, all: &mut IntRegisters) {
all.fs = self.fs;
all.r11 = self.scratch.r11;
all.r10 = self.scratch.r10;
all.r9 = self.scratch.r9;
all.r8 = self.scratch.r8;
all.rsi = self.scratch.rsi;
all.rdi = self.scratch.rdi;
all.rdx = self.scratch.rdx;
all.rcx = self.scratch.rcx;
all.rax = self.scratch.rax;
all.rip = self.iret.rip;
all.cs = self.iret.cs;
all.eflags = self.iret.rflags;
}
/// Loads all registers from a struct used by the proc:
/// scheme to read/write registers.
pub fn load(&mut self, all: &IntRegisters) {
self.fs = all.fs;
self.scratch.r11 = all.r11;
self.scratch.r10 = all.r10;
self.scratch.r9 = all.r9;
self.scratch.r8 = all.r8;
self.scratch.rsi = all.rsi;
self.scratch.rdi = all.rdi;
self.scratch.rdx = all.rdx;
self.scratch.rcx = all.rcx;
self.scratch.rax = all.rax;
self.iret.rip = all.rip;
self.iret.cs = all.cs;
self.iret.rflags = all.eflags;
}
/// Enables the "Trap Flag" in the FLAGS register, causing the CPU
/// to send a Debug exception after the next instruction. This is
/// used for singlestep in the proc: scheme.
pub fn set_singlestep(&mut self, enabled: bool) {
if enabled {
self.iret.rflags |= 1 << 8;
} else {
self.iret.rflags &= !(1 << 8);
}
}
} }
#[macro_export] #[macro_export]
......
#[macro_use] #[macro_use]
pub mod int_like; pub mod int_like;
pub mod unique;
use core::{fmt, ptr::NonNull};
/// A small wrapper around NonNull<T> that is Send + Sync, which is
/// only correct if the pointer is never accessed from multiple
/// locations across threads. Which is always, if the pointer is
/// unique.
pub struct Unique<T>(NonNull<T>);
impl<T> Copy for Unique<T> {}
impl<T> Clone for Unique<T> {
fn clone(&self) -> Self {
*self
}
}
unsafe impl<T> Send for Unique<T> {}
unsafe impl<T> Sync for Unique<T> {}
impl<T> Unique<T> {
pub fn new(ptr: *mut T) -> Self {
Self(NonNull::new(ptr).unwrap())
}
pub unsafe fn new_unchecked(ptr: *mut T) -> Self {
Self(NonNull::new_unchecked(ptr))
}
pub fn as_ptr(&self) -> *mut T {
self.0.as_ptr()
}
}
impl<T> fmt::Debug for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
...@@ -7,15 +7,16 @@ use core::cmp::Ordering; ...@@ -7,15 +7,16 @@ use core::cmp::Ordering;
use core::mem; use core::mem;
use spin::Mutex; use spin::Mutex;
use crate::arch::paging::PAGE_SIZE; use crate::arch::{macros::InterruptStack, paging::PAGE_SIZE};
use crate::common::unique::Unique;
use crate::context::arch; use crate::context::arch;
use crate::context::file::FileDescriptor; use crate::context::file::FileDescriptor;
use crate::context::memory::{Grant, Memory, SharedMemory, Tls}; use crate::context::memory::{Grant, Memory, SharedMemory, Tls};
use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::{SchemeNamespace, FileHandle}; use crate::scheme::{SchemeNamespace, FileHandle};
use crate::sync::WaitMap;
use crate::syscall::data::SigAction; use crate::syscall::data::SigAction;
use crate::syscall::flag::SIG_DFL; use crate::syscall::flag::SIG_DFL;
use crate::sync::WaitMap;
/// Unique identifier for a context (i.e. `pid`). /// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize; use ::core::sync::atomic::AtomicUsize;
...@@ -165,6 +166,15 @@ pub struct Context { ...@@ -165,6 +166,15 @@ pub struct Context {
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>, pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Signal actions /// Signal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>, pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
/// The pointer to the user-space registers, saved after certain
/// interrupts. This pointer is somewhere inside kstack, and the
/// kstack address at the time of creation is the first element in
/// this tuple.
pub regs: Option<(usize, Unique<InterruptStack>)>,
/// A somewhat hacky way to initially stop a context when creating
/// a new instance of the proc: scheme, entirely separate from
/// signals or any other way to restart a process.
pub ptrace_stop: bool
} }
impl Context { impl Context {
...@@ -216,6 +226,8 @@ impl Context { ...@@ -216,6 +226,8 @@ impl Context {
}, },
0 0
); 128])), ); 128])),
regs: None,
ptrace_stop: false
} }
} }
......
...@@ -55,7 +55,7 @@ unsafe fn update(context: &mut Context, cpu_id: usize) { ...@@ -55,7 +55,7 @@ unsafe fn update(context: &mut Context, cpu_id: usize) {
unsafe fn runnable(context: &Context, cpu_id: usize) -> bool { unsafe fn runnable(context: &Context, cpu_id: usize) -> bool {
// Switch to context if it needs to run, is not currently running, and is owned by the current CPU // Switch to context if it needs to run, is not currently running, and is owned by the current CPU
!context.running && context.status == Status::Runnable && context.cpu_id == Some(cpu_id) !context.running && !context.ptrace_stop && context.status == Status::Runnable && context.cpu_id == Some(cpu_id)
} }
/// Switch to the next context /// Switch to the next context
......
...@@ -93,6 +93,9 @@ pub mod memory; ...@@ -93,6 +93,9 @@ pub mod memory;
#[cfg(not(any(feature="doc", test)))] #[cfg(not(any(feature="doc", test)))]
pub mod panic; pub mod panic;
/// Process tracing
pub mod ptrace;
/// Schemes, filesystem handlers /// Schemes, filesystem handlers
pub mod scheme; pub mod scheme;
......
use crate::{
arch::macros::InterruptStack,
common::unique::Unique,
context::{self, Context, ContextId, Status},
sync::WaitCondition
};
use alloc::{
boxed::Box,
collections::BTreeMap,
sync::Arc
};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::error::*;
// ____ _ _ _
// | __ ) _ __ ___ __ _| | ___ __ ___ (_)_ __ | |_ ___
// | _ \| '__/ _ \/ _` | |/ / '_ \ / _ \| | '_ \| __/ __|
// | |_) | | | __/ (_| | <| |_) | (_) | | | | | |_\__ \
// |____/|_| \___|\__,_|_|\_\ .__/ \___/|_|_| |_|\__|___/
// |_|
struct Handle {
tracee: Arc<WaitCondition>,
tracer: Arc<WaitCondition>,
reached: bool,
sysemu: bool,
singlestep: bool
}
static BREAKPOINTS: Once<RwLock<BTreeMap<ContextId, Handle>>> = Once::new();
fn init_breakpoints() -> RwLock<BTreeMap<ContextId, Handle>> {
RwLock::new(BTreeMap::new())
}
fn breakpoints() -> RwLockReadGuard<'static, BTreeMap<ContextId, Handle>> {
BREAKPOINTS.call_once(init_breakpoints).read()
}
fn breakpoints_mut() -> RwLockWriteGuard<'static, BTreeMap<ContextId, Handle>> {
BREAKPOINTS.call_once(init_breakpoints).write()
}
fn inner_cont(pid: ContextId) -> Option<Handle> {
// Remove the breakpoint to both save space and also make sure any
// yet unreached but obsolete breakpoints don't stop the program.
let handle = breakpoints_mut().remove(&pid)?;
handle.tracee.notify();
Some(handle)
}
/// Continue the process with the specified ID
pub fn cont(pid: ContextId) {
inner_cont(pid);
}
/// Create a new breakpoint for the specified tracee, optionally with a sysemu flag
pub fn set_breakpoint(pid: ContextId, sysemu: bool, singlestep: bool) {
let (tracee, tracer) = match inner_cont(pid) {
Some(breakpoint) => (breakpoint.tracee, breakpoint.tracer),
None => (
Arc::new(WaitCondition::new()),
Arc::new(WaitCondition::new())
)
};
breakpoints_mut().insert(pid, Handle {
tracee,
tracer,
reached: false,
sysemu,
singlestep
});
}
/// Wait for the tracee to stop.
/// Note: Don't call while holding any locks, this will switch contexts
pub fn wait_breakpoint(pid: ContextId) -> Result<()> {
let tracer = {
let breakpoints = breakpoints();
match breakpoints.get(&pid) {
Some(breakpoint) if !breakpoint.reached => Arc::clone(&breakpoint.tracer),
_ => return Ok(())
}
};
while !tracer.wait() {}
let contexts = context::contexts();
let context = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context.read();
if let Status::Exited(_) = context.status {
return Err(Error::new(ESRCH));
}
Ok(())
}
/// Returns the same value as breakpoint_callback would do, but
/// doesn't actually perform the action. You should not rely too
/// heavily on this value, as the lock *is* released between this call
/// and another.
pub fn breakpoint_callback_dryrun(singlestep: bool) -> Option<bool> {
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let breakpoints = breakpoints();
let breakpoint = breakpoints.get(&context.id)?;
if breakpoint.singlestep != singlestep {
return None;
}
Some(breakpoint.sysemu)