Commit cbc892d1 authored by Jeremy Soller's avatar Jeremy Soller

Merge branch 'ptrace-3' into 'master'

Ptrace memory reading and floating point registers support

See merge request !104
parents 788526a3 6fbb4fba
......@@ -180,7 +180,7 @@ dependencies = [
"goblin 0.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
"linked_list_allocator 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.54",
"redox_syscall 0.1.56",
"rustc-demangle 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
"slab_allocator 0.3.1",
"spin 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
......@@ -277,7 +277,7 @@ dependencies = [
[[package]]
name = "redox_syscall"
version = "0.1.54"
version = "0.1.56"
[[package]]
name = "regex"
......
......@@ -30,6 +30,7 @@ pub unsafe fn init_paging() {
IDT[1].set_func(exception::debug);
IDT[2].set_func(exception::non_maskable);
IDT[3].set_func(exception::breakpoint);
IDT[3].set_flags(IdtFlags::PRESENT | IdtFlags::RING_3 | IdtFlags::INTERRUPT);
IDT[4].set_func(exception::overflow);
IDT[5].set_func(exception::bound_range);
IDT[6].set_func(exception::invalid_opcode);
......
......@@ -10,7 +10,7 @@ extern {
fn ksignal(signal: usize);
}
interrupt_stack_p!(divide_by_zero, stack, {
interrupt_stack!(divide_by_zero, stack, {
println!("Divide by zero");
stack.dump();
stack_trace();
......@@ -18,41 +18,44 @@ interrupt_stack_p!(divide_by_zero, stack, {
});
interrupt_stack!(debug, stack, {
match ptrace::breakpoint_callback_dryrun(true) {
Some(_) => {
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
let mut handled = false;
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
}
let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8;
stack.set_singlestep(false);
if ptrace::breakpoint_callback(true).is_none() {
// There is no guarantee that this is Some(_) just
// because the dryrun is Some(_). So, if there wasn't
// *actually* any breakpoint, restore the trap flag.
stack.set_singlestep(had_singlestep);
}
// Disable singlestep before their is a breakpoint, since the
// breakpoint handler might end up setting it again but unless it
// does we want the default to be false.
let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8;
stack.set_singlestep(false);
if ptrace::breakpoint_callback(true).is_some() {
handled = true;
} else {
// There was no breakpoint, restore original value
stack.set_singlestep(had_singlestep);
}
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
},
None => {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
if !handled {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
}
});
interrupt_stack!(non_maskable, stack, {
......@@ -66,70 +69,70 @@ interrupt_stack!(breakpoint, stack, {
ksignal(SIGTRAP);
});
interrupt_stack_p!(overflow, stack, {
interrupt_stack!(overflow, stack, {
println!("Overflow trap");
stack.dump();
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack_p!(bound_range, stack, {
interrupt_stack!(bound_range, stack, {
println!("Bound range exceeded fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_stack_p!(invalid_opcode, stack, {
interrupt_stack!(invalid_opcode, stack, {
println!("Invalid opcode fault");
stack.dump();
stack_trace();
ksignal(SIGILL);
});
interrupt_stack_p!(device_not_available, stack, {
interrupt_stack!(device_not_available, stack, {
println!("Device not available fault");
stack.dump();
stack_trace();
ksignal(SIGILL);
});
interrupt_error_p!(double_fault, stack, {
interrupt_error!(double_fault, stack, {
println!("Double fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(invalid_tss, stack, {
interrupt_error!(invalid_tss, stack, {
println!("Invalid TSS fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(segment_not_present, stack, {
interrupt_error!(segment_not_present, stack, {
println!("Segment not present fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(stack_segment, stack, {
interrupt_error!(stack_segment, stack, {
println!("Stack segment fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(protection, stack, {
interrupt_error!(protection, stack, {
println!("Protection fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(page, stack, {
interrupt_error!(page, stack, {
let cr2: usize;
asm!("mov rax, cr2" : "={rax}"(cr2) : : : "intel", "volatile");
println!("Page fault: {:>016X}", cr2);
......@@ -138,42 +141,42 @@ interrupt_error_p!(page, stack, {
ksignal(SIGSEGV);
});
interrupt_stack_p!(fpu, stack, {
interrupt_stack!(fpu, stack, {
println!("FPU floating point fault");
stack.dump();
stack_trace();
ksignal(SIGFPE);
});
interrupt_error_p!(alignment_check, stack, {
interrupt_error!(alignment_check, stack, {
println!("Alignment check fault");
stack.dump();
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack_p!(machine_check, stack, {
interrupt_stack!(machine_check, stack, {
println!("Machine check fault");
stack.dump();
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack_p!(simd, stack, {
interrupt_stack!(simd, stack, {
println!("SIMD floating point fault");
stack.dump();
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack_p!(virtualization, stack, {
interrupt_stack!(virtualization, stack, {
println!("Virtualization fault");
stack.dump();
stack_trace();
ksignal(SIGBUS);
});
interrupt_error_p!(security, stack, {
interrupt_error!(security, stack, {
println!("Security exception");
stack.dump();
stack_trace();
......
......@@ -18,19 +18,15 @@ pub unsafe fn init() {
// from clone() (via clone_ret()). Not sure what the problem is.
macro_rules! with_interrupt_stack {
(unsafe fn $wrapped:ident($stack:ident) -> usize $code:block) => {
/// Because of how clones work, we need a function that returns a
/// usize. Here, `inner` will be this function. The child process in a
/// clone will terminate this function with a 0 return value, and it
/// might also have updated the interrupt_stack pointer.
#[inline(never)]
unsafe fn $wrapped(stack: *mut SyscallStack) {
unsafe fn $wrapped(stack: *mut InterruptStack) {
let stack = &mut *stack;
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut stack.interrupt_stack)));
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut *stack)));
}
}
}
......@@ -39,7 +35,7 @@ macro_rules! with_interrupt_stack {
if !is_sysemu.unwrap_or(false) {
// If not on a sysemu breakpoint
let $stack = &mut *stack;
$stack.interrupt_stack.scratch.rax = $code;
$stack.scratch.rax = $code;
if is_sysemu.is_some() {
// Only callback if there was a pre-syscall
......@@ -66,7 +62,7 @@ pub unsafe extern fn syscall_instruction() {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
let scratch = &stack.interrupt_stack.scratch;
let scratch = &stack.scratch;
syscall::syscall(scratch.rax, scratch.rdi, scratch.rsi, scratch.rdx, scratch.r10, scratch.r8, rbp, stack)
}
}
......@@ -91,10 +87,10 @@ pub unsafe extern fn syscall_instruction() {
// Push scratch registers
scratch_push!();
preserved_push!();
asm!("push fs
mov r11, 0x18
mov fs, r11
push rbx"
mov fs, r11"
: : : : "intel", "volatile");
// Get reference to stack variables
......@@ -104,15 +100,14 @@ pub unsafe extern fn syscall_instruction() {
// Map kernel
pti::map();
inner(rsp as *mut SyscallStack);
inner(rsp as *mut InterruptStack);
// Unmap kernel
pti::unmap();
// Interrupt return
asm!("pop rbx
pop fs"
: : : : "intel", "volatile");
asm!("pop fs" : : : : "intel", "volatile");
preserved_pop!();
scratch_pop!();
asm!("iretq" : : : : "intel", "volatile");
}
......@@ -124,17 +119,17 @@ pub unsafe extern fn syscall() {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
let scratch = &stack.interrupt_stack.scratch;
syscall::syscall(scratch.rax, stack.rbx, scratch.rcx, scratch.rdx, scratch.rsi, scratch.rdi, rbp, stack)
let scratch = &stack.scratch;
syscall::syscall(scratch.rax, stack.preserved.rbx, scratch.rcx, scratch.rdx, scratch.rsi, scratch.rdi, rbp, stack)
}
}
// Push scratch registers
scratch_push!();
preserved_push!();
asm!("push fs
mov r11, 0x18
mov fs, r11
push rbx"
mov fs, r11"
: : : : "intel", "volatile");
// Get reference to stack variables
......@@ -144,30 +139,18 @@ pub unsafe extern fn syscall() {
// Map kernel
pti::map();
inner(rsp as *mut SyscallStack);
inner(rsp as *mut InterruptStack);
// Unmap kernel
pti::unmap();
// Interrupt return
asm!("pop rbx
pop fs"
: : : : "intel", "volatile");
asm!("pop fs" : : : : "intel", "volatile");
preserved_pop!();
scratch_pop!();
asm!("iretq" : : : : "intel", "volatile");
}
#[allow(dead_code)]
#[repr(packed)]
pub struct SyscallStack {
pub rbx: usize,
pub interrupt_stack: InterruptStack,
// Will only be present if syscall is called from another ring
pub rsp: usize,
pub ss: usize,
}
#[naked]
pub unsafe extern "C" fn clone_ret() {
// The C x86_64 ABI specifies that rbp is pushed to save the old
......
use core::mem;
use syscall::data::IntRegisters;
/// Print to console
......@@ -143,6 +144,10 @@ pub struct IretRegisters {
pub rip: usize,
pub cs: usize,
pub rflags: usize,
// Will only be present if interrupt is raised from another
// privilege ring
pub rsp: usize,
pub ss: usize
}
impl IretRegisters {
......@@ -196,6 +201,7 @@ macro_rules! interrupt {
#[repr(packed)]
pub struct InterruptStack {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub iret: IretRegisters,
}
......@@ -204,12 +210,20 @@ impl InterruptStack {
pub fn dump(&self) {
self.iret.dump();
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
/// Saves all registers to a struct used by the proc:
/// scheme to read/write registers.
pub fn save(&self, all: &mut IntRegisters) {
all.fs = self.fs;
all.r15 = self.preserved.r15;
all.r14 = self.preserved.r14;
all.r13 = self.preserved.r13;
all.r12 = self.preserved.r12;
all.rbp = self.preserved.rbp;
all.rbx = self.preserved.rbx;
all.r11 = self.scratch.r11;
all.r10 = self.scratch.r10;
all.r9 = self.scratch.r9;
......@@ -221,12 +235,42 @@ impl InterruptStack {
all.rax = self.scratch.rax;
all.rip = self.iret.rip;
all.cs = self.iret.cs;
all.eflags = self.iret.rflags;
all.rflags = self.iret.rflags;
// Set rsp and ss:
const CPL_MASK: usize = 0b11;
let cs: usize;
unsafe {
asm!("mov $0, cs" : "=r"(cs) ::: "intel");
}
if self.iret.cs & CPL_MASK == cs & CPL_MASK {
// Privilege ring didn't change, so neither did the stack
all.rsp = self as *const Self as usize // rsp after Self was pushed to the stack
+ mem::size_of::<Self>() // disregard Self
- mem::size_of::<usize>() * 2; // well, almost: rsp and ss need to be excluded as they aren't present
unsafe {
asm!("mov $0, ss" : "=r"(all.ss) ::: "intel");
}
} else {
all.rsp = self.iret.rsp;
all.ss = self.iret.ss;
}
}
/// Loads all registers from a struct used by the proc:
/// scheme to read/write registers.
pub fn load(&mut self, all: &IntRegisters) {
self.fs = all.fs;
// TODO: Which of these should be allowed to change?
// self.fs = all.fs;
self.preserved.r15 = all.r15;
self.preserved.r14 = all.r14;
self.preserved.r13 = all.r13;
self.preserved.r12 = all.r12;
self.preserved.rbp = all.rbp;
self.preserved.rbx = all.rbx;
self.scratch.r11 = all.r11;
self.scratch.r10 = all.r10;
self.scratch.r9 = all.r9;
......@@ -236,9 +280,9 @@ impl InterruptStack {
self.scratch.rdx = all.rdx;
self.scratch.rcx = all.rcx;
self.scratch.rax = all.rax;
self.iret.rip = all.rip;
self.iret.cs = all.cs;
self.iret.rflags = all.eflags;
// self.iret.rip = all.rip;
// self.iret.cs = all.cs;
// self.iret.rflags = all.eflags;
}
/// Enables the "Trap Flag" in the FLAGS register, causing the CPU
/// to send a Debug exception after the next instruction. This is
......@@ -264,6 +308,7 @@ macro_rules! interrupt_stack {
// Push scratch registers
scratch_push!();
preserved_push!();
fs_push!();
// Get reference to stack variables
......@@ -281,6 +326,7 @@ macro_rules! interrupt_stack {
// Pop scratch registers and return
fs_pop!();
preserved_pop!();
scratch_pop!();
iret!();
}
......@@ -291,6 +337,7 @@ macro_rules! interrupt_stack {
#[repr(packed)]
pub struct InterruptErrorStack {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub code: usize,
pub iret: IretRegisters,
......@@ -301,6 +348,7 @@ impl InterruptErrorStack {
self.iret.dump();
println!("CODE: {:>016X}", { self.code });
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
}
......@@ -315,60 +363,6 @@ macro_rules! interrupt_error {
$func
}
// Push scratch registers
scratch_push!();
fs_push!();
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Map kernel
$crate::arch::x86_64::pti::map();
// Call inner rust function
inner(&*(rsp as *const $crate::arch::x86_64::macros::InterruptErrorStack));
// Unmap kernel
$crate::arch::x86_64::pti::unmap();
// Pop scratch registers, error code, and return
fs_pop!();
scratch_pop!();
asm!("add rsp, 8" : : : : "intel", "volatile");
iret!();
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptStackP {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub iret: IretRegisters,
}
impl InterruptStackP {
pub fn dump(&self) {
self.iret.dump();
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
}
#[macro_export]
macro_rules! interrupt_stack_p {
($name:ident, $stack: ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &mut $crate::arch::x86_64::macros::InterruptStackP) {
$func
}
// Push scratch registers
scratch_push!();
preserved_push!();
......@@ -382,64 +376,7 @@ macro_rules! interrupt_stack_p {
$crate::arch::x86_64::pti::map();
// Call inner rust function
inner(&mut *(rsp as *mut $crate::arch::x86_64::macros::InterruptStackP));
// Unmap kernel
$crate::arch::x86_64::pti::unmap();
// Pop scratch registers and return
fs_pop!();
preserved_pop!();
scratch_pop!();
iret!();
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptErrorStackP {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub code: usize,
pub iret: IretRegisters,
}
impl InterruptErrorStackP {
pub fn dump(&self) {
self.iret.dump();
println!("CODE: {:>016X}", { self.code });
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
}
#[macro_export]
macro_rules! interrupt_error_p {
($name:ident, $stack:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::arch::x86_64::macros::InterruptErrorStackP) {
$func
}
// Push scratch registers
scratch_push!();
preserved_push!();
fs_push!();
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Map kernel
$crate::arch::x86_64::pti::map();
// Call inner rust function
inner(&*(rsp as *const $crate::arch::x86_64::macros::InterruptErrorStackP));
inner(&*(rsp as *const $crate::arch::x86_64::macros::InterruptErrorStack));
// Unmap kernel
$crate::arch::x86_64::pti::unmap();
......
......@@ -425,6 +425,10 @@ impl Page {
end: end,
}
}
pub fn next(self) -> Page {
Self { number: self.number + 1 }
}
}
pub struct PageIter {
......@@ -438,7 +442,7 @@ impl Iterator for PageIter {
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
self.start = self.start.next();
Some(page)
} else {
None
......
use core::mem;
use core::sync::atomic::AtomicBool;
use syscall::data::FloatRegisters;
/// This must be used by the kernel to ensure that context switches are done atomically
/// Compare and exchange this to true when beginning a context switch on any CPU
......@@ -7,6 +8,8 @@ use core::sync::atomic::AtomicBool;
/// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicBool = AtomicBool::new(false);
const ST_RESERVED: u128 = 0xFFFF_FFFF_FFFF_0000_0000_0000_0000_0000;
#[derive(Clone, Debug)]
pub struct Context {
/// FX valid?
......@@ -54,6 +57,44 @@ impl Context {
self.cr3
}
pub fn get_fx_regs(&self) -> Option<FloatRegisters> {
if !self.loadable {
return None;
}
let mut regs = unsafe { *(self.fx as *const FloatRegisters) };
regs._reserved = 0;
let mut new_st = regs.st_space;
for st in &mut new_st {
// Only allow access to the 80 lowest bits
*st &= !ST_RESERVED;
}
regs.st_space = new_st;
Some(regs)
}
pub fn set_fx_regs(&mut self, mut new: FloatRegisters) -> bool {
if !self.loadable {
return false;
}
let old = unsafe { &*(self.fx as *const FloatRegisters) };
new._reserved = old._reserved;
let old_st = new.st_space;
let mut new_st = new.st_space;
for (new_st, old_st) in new_st.iter_mut().zip(&old_st) {
*new_st &= !ST_RESERVED;
*new_st |= old_st & ST_RESERVED;
}
new.st_space = new_st;
// Make sure we don't use `old` from now on
drop(old);
unsafe {
*(self.fx as *mut FloatRegisters) = new;
}
true
}
pub fn set_fx(&mut self, address: usize) {
self.fx = address;
}
......@@ -88,10 +129,10 @@ impl Context {
#[inline(never)]
#[naked]
pub unsafe fn switch_to(&mut self, next: &mut Context) {
asm!("fxsave [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
asm!("fxsave64 [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
self.loadable = true;
if next.loadable {
asm!("fxrstor [$0]" : : "r"(next.fx) : "memory" : "intel", "volatile");
asm!("fxrstor64 [$0]" : : "r"(next.fx) : "memory" : "intel", "volatile");
}else{
asm!("fninit" : : : "memory" : "intel", "volatile");
}
......
use crate::{
arch::macros::InterruptStack,
arch::{
macros::InterruptStack,
paging::{
entry::EntryFlags,
mapper::MapperFlushAll,
temporary_page::TemporaryPage,
ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress
}
},
common::unique::Unique,
context::{self, Context, ContextId, Status},
sync::WaitCondition
......@@ -8,7 +16,8 @@ use crate::{
use alloc::{
boxed::Box,
collections::BTreeMap,
sync::Arc
sync::Arc,
vec::Vec