Verified Commit ccaaf08e authored by jD91mZM2's avatar jD91mZM2

WIP(ptrace): Initial registry reading support

parent 78e79fc4
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::common::unique::Unique;
use crate::context;
use crate::context::timeout;
use crate::device::pic;
......@@ -40,7 +41,7 @@ pub unsafe fn acknowledge(irq: usize) {
}
}
interrupt!(pit, {
interrupt_stack!(pit, stack, {
// Saves CPU time by not sending IRQ event irq_trigger(0);
const PIT_RATE: u64 = 2_250_286;
......@@ -61,7 +62,23 @@ interrupt!(pit, {
timeout::trigger();
if PIT_TICKS.fetch_add(1, Ordering::SeqCst) >= 10 {
{
let contexts = crate::context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
// Make all registers available to e.g. the proc:
// scheme
context.interrupt_stack = Some(Unique::new_unchecked(stack));
}
}
let _ = context::switch();
{
let contexts = crate::context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.interrupt_stack = None;
}
}
}
});
......
use crate::arch::macros::InterruptStack;
use crate::arch::{gdt, pti};
use crate::syscall;
use crate::common::unique::Unique;
use crate::{context, syscall};
use x86::shared::msr;
pub unsafe fn init() {
......@@ -12,14 +14,52 @@ pub unsafe fn init() {
msr::wrmsr(msr::IA32_EFER, efer | 1);
}
// Not a function pointer because it seemed unreliable even with
// #[inline(never)]. Not sure what the problem is.
macro_rules! with_interrupt_stack {
(unsafe fn $wrapped:ident($stack:ident) -> usize $code:block) => {
/// Because of how clones work, we need a function that returns a
/// usize. Here, `inner` will be this function. The child process in a
/// clone will terminate this function with a 0 return value, and it
/// might also have updated the interrupt_stack pointer.
#[inline(never)]
unsafe fn $wrapped(stack: *mut SyscallStack) {
let stack = &mut *stack;
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.interrupt_stack = Some(Unique::new_unchecked(&mut stack.interrupt_stack));
}
}
let ret = {
let $stack = &mut *stack;
$code
};
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.interrupt_stack = None;
stack.interrupt_stack.scratch.rax = ret;
}
}
}
}
}
#[naked]
pub unsafe extern fn syscall_instruction() {
#[inline(never)]
unsafe fn inner(stack: &mut SyscallStack) -> usize {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
syscall::syscall(stack.rax, stack.rdi, stack.rsi, stack.rdx, stack.r10, stack.r8, rbp, stack)
with_interrupt_stack! {
unsafe fn inner(stack) -> usize {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
let scratch = &stack.interrupt_stack.scratch;
syscall::syscall(scratch.rax, scratch.rdi, scratch.rsi, scratch.rdx, scratch.r10, scratch.r8, rbp, stack)
}
}
// Yes, this is magic. No, you don't need to understand
......@@ -40,76 +80,52 @@ pub unsafe extern fn syscall_instruction() {
:
: "intel", "volatile");
// Push scratch registers
asm!("push rax
push rbx
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov r11, 0x18
mov fs, r11"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Map kernel
pti::map();
let a = inner(&mut *(rsp as *mut SyscallStack));
// Unmap kernel
pti::unmap();
asm!("" : : "{rax}"(a) : : "intel", "volatile");
// Interrupt return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rbx
add rsp, 8
iretq"
: : : : "intel", "volatile");
// Push scratch registers
scratch_push!();
asm!("push fs
mov r11, 0x18
mov fs, r11
push rbx"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Map kernel
pti::map();
inner(rsp as *mut SyscallStack);
// Unmap kernel
pti::unmap();
// Interrupt return
asm!("pop rbx
pop fs"
: : : : "intel", "volatile");
scratch_pop!();
asm!("iretq" : : : : "intel", "volatile");
}
#[naked]
pub unsafe extern fn syscall() {
#[inline(never)]
unsafe fn inner(stack: &mut SyscallStack) -> usize {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
syscall::syscall(stack.rax, stack.rbx, stack.rcx, stack.rdx, stack.rsi, stack.rdi, rbp, stack)
with_interrupt_stack! {
unsafe fn inner(stack) -> usize {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
let scratch = &stack.interrupt_stack.scratch;
syscall::syscall(scratch.rax, stack.rbx, scratch.rcx, scratch.rdx, scratch.rsi, scratch.rdi, rbp, stack)
}
}
// Push scratch registers
asm!("push rax
push rbx
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
scratch_push!();
asm!("push fs
mov r11, 0x18
mov fs, r11"
mov fs, r11
push rbx"
: : : : "intel", "volatile");
// Get reference to stack variables
......@@ -119,46 +135,25 @@ pub unsafe extern fn syscall() {
// Map kernel
pti::map();
let a = inner(&mut *(rsp as *mut SyscallStack));
inner(rsp as *mut SyscallStack);
// Unmap kernel
pti::unmap();
asm!("" : : "{rax}"(a) : : "intel", "volatile");
// Interrupt return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rbx
add rsp, 8
iretq"
: : : : "intel", "volatile");
asm!("pop rbx
pop fs"
: : : : "intel", "volatile");
scratch_pop!();
asm!("iretq" : : : : "intel", "volatile");
}
#[allow(dead_code)]
#[repr(packed)]
pub struct SyscallStack {
pub fs: usize,
pub r11: usize,
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rsi: usize,
pub rdi: usize,
pub rdx: usize,
pub rcx: usize,
pub rbx: usize,
pub rax: usize,
pub rip: usize,
pub cs: usize,
pub rflags: usize,
pub interrupt_stack: InterruptStack,
// Will only be present if syscall is called from another ring
pub rsp: usize,
pub ss: usize,
......@@ -166,9 +161,7 @@ pub struct SyscallStack {
#[naked]
pub unsafe extern fn clone_ret() {
asm!("
pop rbp
xor rax, rax
"
: : : : "intel", "volatile");
asm!("pop rbp
xor rax, rax"
: : : : "intel", "volatile");
}
use syscall::data::IntRegisters;
/// Print to console
#[macro_export]
macro_rules! print {
......@@ -95,6 +97,17 @@ impl PreservedRegisters {
}
}
#[allow(dead_code)]
#[repr(packed)]
pub struct FsRegisters {
pub r15: usize,
pub r14: usize,
pub r13: usize,
pub r12: usize,
pub rbp: usize,
pub rbx: usize,
}
macro_rules! preserved_push {
() => (asm!(
"push rbx
......@@ -204,6 +217,40 @@ impl InterruptStack {
self.scratch.dump();
println!("FS: {:>016X}", { self.fs });
}
/// Saves all registers to a struct used by the proc:
/// scheme to read/write registers.
pub fn save(&self, all: &mut IntRegisters) {
all.fs = self.fs;
all.r11 = self.scratch.r11;
all.r10 = self.scratch.r10;
all.r9 = self.scratch.r9;
all.r8 = self.scratch.r8;
all.rsi = self.scratch.rsi;
all.rdi = self.scratch.rdi;
all.rdx = self.scratch.rdx;
all.rcx = self.scratch.rcx;
all.rax = self.scratch.rax;
all.rip = self.iret.rip;
all.cs = self.iret.cs;
all.eflags = self.iret.rflags;
}
/// Loads all registers from a struct used by the proc:
/// scheme to read/write registers.
pub fn load(&mut self, all: &IntRegisters) {
self.fs = all.fs;
self.scratch.r11 = all.r11;
self.scratch.r10 = all.r10;
self.scratch.r9 = all.r9;
self.scratch.r8 = all.r8;
self.scratch.rsi = all.rsi;
self.scratch.rdi = all.rdi;
self.scratch.rdx = all.rdx;
self.scratch.rcx = all.rcx;
self.scratch.rax = all.rax;
self.iret.rip = all.rip;
self.iret.cs = all.cs;
self.iret.rflags = all.eflags;
}
}
#[macro_export]
......
#[macro_use]
pub mod int_like;
pub mod unique;
use core::{fmt, ptr::NonNull};
/// A small wrapper around NonNull<T> that is Send + Sync, which is
/// only correct if the pointer is never accessed from multiple
/// locations across threads. Which is always, if the pointer is
/// unique.
pub struct Unique<T>(NonNull<T>);
impl<T> Copy for Unique<T> {}
impl<T> Clone for Unique<T> {
fn clone(&self) -> Self {
*self
}
}
unsafe impl<T> Send for Unique<T> {}
unsafe impl<T> Sync for Unique<T> {}
impl<T> Unique<T> {
pub fn new(ptr: *mut T) -> Self {
Self(NonNull::new(ptr).unwrap())
}
pub unsafe fn new_unchecked(ptr: *mut T) -> Self {
Self(NonNull::new_unchecked(ptr))
}
pub fn as_ptr(&self) -> *mut T {
self.0.as_ptr()
}
}
impl<T> fmt::Debug for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
......@@ -7,15 +7,16 @@ use core::cmp::Ordering;
use core::mem;
use spin::Mutex;
use crate::arch::paging::PAGE_SIZE;
use crate::arch::{macros::InterruptStack, paging::PAGE_SIZE};
use crate::common::unique::Unique;
use crate::context::arch;
use crate::context::file::FileDescriptor;
use crate::context::memory::{Grant, Memory, SharedMemory, Tls};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::{SchemeNamespace, FileHandle};
use crate::sync::WaitMap;
use crate::syscall::data::SigAction;
use crate::syscall::flag::SIG_DFL;
use crate::sync::WaitMap;
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
......@@ -165,6 +166,8 @@ pub struct Context {
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Signal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
/// The interrupt stack which holds all the context's registers
pub interrupt_stack: Option<Unique<InterruptStack>>,
}
impl Context {
......@@ -216,6 +219,7 @@ impl Context {
},
0
); 128])),
interrupt_stack: None
}
}
......
......@@ -14,8 +14,8 @@ use crate::paging::temporary_page::TemporaryPage;
#[derive(Debug)]
pub struct Grant {
start: VirtualAddress,
size: usize,
pub start: VirtualAddress,
pub size: usize,
flags: EntryFlags,
mapped: bool,
owned: bool,
......
......@@ -93,6 +93,9 @@ pub mod memory;
#[cfg(not(any(feature="doc", test)))]
pub mod panic;
/// Process tracing
pub mod ptrace;
/// Schemes, filesystem handlers
pub mod scheme;
......
use crate::{
context::{self, ContextId},
sync::WaitCondition
};
use alloc::{
collections::BTreeMap,
sync::Arc
};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::data::IntRegisters;
struct Handle {
condition: Arc<WaitCondition>,
sysemu: bool
}
static SYSCALL_BREAKPOINTS: Once<RwLock<BTreeMap<ContextId, Handle>>> = Once::new();
fn init_breakpoints() -> RwLock<BTreeMap<ContextId, Handle>> {
RwLock::new(BTreeMap::new())
}
fn breakpoints() -> RwLockReadGuard<'static, BTreeMap<ContextId, Handle>> {
SYSCALL_BREAKPOINTS.call_once(init_breakpoints).read()
}
fn breakpoints_mut() -> RwLockWriteGuard<'static, BTreeMap<ContextId, Handle>> {
SYSCALL_BREAKPOINTS.call_once(init_breakpoints).write()
}
pub fn ptrace_cont(pid: ContextId) {
let breakpoints = breakpoints();
if let Some(breakpoint) = breakpoints.get(&pid) {
breakpoint.condition.notify();
}
}
pub fn ptrace_break_syscall(pid: ContextId, sysemu: bool) {
// Continue execution of the tracee and therefore also release
// locks on breakpoints(). This has to be done before trying a
// mutable lock.
ptrace_cont(pid);
// TODO: reuse WaitConditions?
breakpoints_mut().insert(pid, Handle {
condition: Arc::new(WaitCondition::new()),
sysemu
});
}
/// Note: Don't call while holding any locks, this will switch contexts
pub fn ptrace_syscall_callback() -> Option<bool> {
// Can't hold any locks when executing wait()
let (condition, sysemu) = {
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let breakpoints = breakpoints();
let breakpoint = breakpoints.get(&context.id)?;
(
Arc::clone(&breakpoint.condition),
breakpoint.sysemu
)
};
// TODO: How should signals affect the wait?
while !condition.wait() {}
Some(sysemu)
}
......@@ -22,6 +22,7 @@ use self::irq::IrqScheme;
use self::itimer::ITimerScheme;
use self::memory::MemoryScheme;
use self::pipe::PipeScheme;
use self::proc::ProcScheme;
use self::root::RootScheme;
use self::sys::SysScheme;
use self::time::TimeScheme;
......@@ -51,6 +52,9 @@ pub mod memory;
/// `pipe:` - used internally by the kernel to implement `pipe`
pub mod pipe;
/// `proc:` - allows tracing processes and reading/writing their memory
pub mod proc;
/// `:` - allows the creation of userspace schemes, tightly dependent on `user`
pub mod root;
......@@ -128,29 +132,21 @@ impl SchemeList {
}
/// Initialize the root namespace
#[cfg(not(feature="live"))]
fn new_root(&mut self) {
// Do common namespace initialization
let ns = self.new_ns();
// Debug, Initfs and IRQ are only available in the root namespace. Pipe is special
// These schemes should only be available on the root
self.insert(ns, Box::new(*b"debug"), |scheme_id| Arc::new(Box::new(DebugScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"initfs"), |_| Arc::new(Box::new(InitFsScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"irq"), |scheme_id| Arc::new(Box::new(IrqScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"pipe"), |scheme_id| Arc::new(Box::new(PipeScheme::new(scheme_id)))).unwrap();
}
self.insert(ns, Box::new(*b"proc"), |_| Arc::new(Box::new(ProcScheme::new()))).unwrap();
/// Initialize the root namespace - with live disk
#[cfg(feature="live")]
fn new_root(&mut self) {
// Do common namespace initialization
let ns = self.new_ns();
#[cfg(feature = "live")] {
self.insert(ns, Box::new(*b"disk/live"), |_| Arc::new(Box::new(self::live::DiskScheme::new()))).unwrap();
}
// Debug, Disk, Initfs and IRQ are only available in the root namespace. Pipe is special
self.insert(ns, Box::new(*b"debug"), |scheme_id| Arc::new(Box::new(DebugScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"disk/live"), |_| Arc::new(Box::new(self::live::DiskScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"initfs"), |_| Arc::new(Box::new(InitFsScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"irq"), |scheme_id| Arc::new(Box::new(IrqScheme::new(scheme_id)))).unwrap();
// Pipe is special and needs to be in the root namespace
self.insert(ns, Box::new(*b"pipe"), |scheme_id| Arc::new(Box::new(PipeScheme::new(scheme_id)))).unwrap();
}
......
use crate::{
context::{self, ContextId},
syscall
};
use alloc::collections::BTreeMap;
use core::{
cmp,
mem,
slice,
sync::atomic::{AtomicUsize, Ordering}
};
use spin::RwLock;
use ::syscall::{
data::{IntRegisters, FloatRegisters},
error::*,
flag::*,
scheme::Scheme
};
#[derive(Clone, Copy)]
enum RegsKind {
Float,
Int
}
#[derive(Clone, Copy)]
enum Operation {
Memory,
Regs(RegsKind),
Trace
}
#[derive(Clone, Copy)]
struct Handle {
pid: ContextId,
operation: Operation
}
pub struct ProcScheme {
next_id: AtomicUsize,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl ProcScheme {
pub fn new() -> Self {
Self {
next_id: AtomicUsize::new(0),
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for ProcScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path = core::str::from_utf8(path).map_err(|_| Error::new(EINVAL))?;
let mut parts = path.splitn(2, '/');
let pid = parts.next()
.and_then(|s| s.parse().ok())
.map(ContextId::from)
.ok_or(Error::new(EINVAL))?;
let operation = match parts.next() {
Some("mem") => Operation::Memory,
Some("regs/float") => Operation::Regs(RegsKind::Float),
Some("regs/int") => Operation::Regs(RegsKind::Int),
Some("trace") => Operation::Trace,
_ => return Err(Error::new(EINVAL))
};
// TODO: Put security here!!! Maybe check if user/group owns the process?
if let Operation::Trace = operation {
syscall::kill(pid, SIGSTOP)?;
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
pid,
operation
});
Ok(id)
}
/// Using dup for `proc:` simply opens another operation on the same PID
/// ```rust,ignore
/// let trace = syscall::open("proc:1234/trace")?;
///
/// // let regs = syscall::open("proc:1234/regs/int")?;
/// let regs = syscall::dup(trace, "regs/int")?;
/// ```
fn dup(&self, old_id: usize, buf: &[u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&old_id).ok_or(Error::new(EBADF))?;
let mut path = format!("proc:{}/", handle.pid.into()).into_bytes();
path.extend_from_slice(buf);
// NOTE: If security relies on uid or gid, DO NOT ZERO THEM (0=root)
self.open(&path, 0, 0, 0)
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
// Can't hold locks during the context switch later when
// waiting for a process to stop running.
let (operation, pid) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.operation, handle.pid)
};
match operation {
Operation::Memory => {
let contexts = context::contexts();
let context = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context.read();
for grant in &*context.grants.lock() {
println!("Grant: {} -> {}", grant.start.get(), grant.size);
}
unimplemented!();
},
Operation::Regs(kind) => {
union Output {
float: FloatRegisters,
int: IntRegisters
}
let mut first = true;
let (output, size) = loop {
if !first {
// `continue` = Delay and repeat
unsafe { context::switch(); }
}
first = false;
let contexts = context::contexts();
let context = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context.read();
break match kind {
RegsKind::Float => {
// TODO!!
(Output { float: FloatRegisters::default() }, mem::size_of::<FloatRegisters>())
},
RegsKind::Int => match context.interrupt_stack {
None => {
println!("No interrupt_stack");
// Another CPU is running this process, wait until it's stopped.
continue;
},
Some(stack) => {
let mut regs = IntRegisters::default();
unsafe {
(&*stack.as_ptr()).save(&mut regs);
}