...
 
Commits (4)
......@@ -180,7 +180,7 @@ dependencies = [
"goblin 0.0.21 (registry+https://github.com/rust-lang/crates.io-index)",
"linked_list_allocator 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.54",
"redox_syscall 0.1.56",
"rustc-demangle 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
"slab_allocator 0.3.1",
"spin 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
......@@ -277,7 +277,7 @@ dependencies = [
[[package]]
name = "redox_syscall"
version = "0.1.54"
version = "0.1.56"
[[package]]
name = "regex"
......
......@@ -14,6 +14,7 @@ fn cvt_bcd(value: usize) -> usize {
pub struct Rtc {
addr: Pio<u8>,
data: Pio<u8>,
nmi: bool,
}
impl Rtc {
......@@ -22,54 +23,59 @@ impl Rtc {
Rtc {
addr: Pio::<u8>::new(0x70),
data: Pio::<u8>::new(0x71),
nmi: false,
}
}
/// Read
unsafe fn read(&mut self, reg: u8) -> u8 {
self.addr.write(reg);
if self.nmi {
self.addr.write(reg & 0x7F);
} else {
self.addr.write(reg | 0x80);
}
self.data.read()
}
/// Wait
unsafe fn wait(&mut self) {
while self.read(0xA) & 0x80 != 0x80 {}
while self.read(0xA) & 0x80 == 0x80 {}
/// Write
unsafe fn write(&mut self, reg: u8, value: u8) {
if self.nmi {
self.addr.write(reg & 0x7F);
} else {
self.addr.write(reg | 0x80);
}
self.data.write(value);
}
/// Get time
pub fn time(&mut self) -> u64 {
let mut second;
let mut minute;
let mut hour;
let mut day;
let mut month;
let mut year;
let mut century;
let register_b;
/// Wait for an update, can take one second if full is specified!
unsafe fn wait(&mut self, full: bool) {
if full {
while self.read(0xA) & 0x80 != 0x80 {}
}
while self.read(0xA) & 0x80 == 0x80 {}
}
/// Get time without waiting
pub unsafe fn time_no_wait(&mut self) -> u64 {
/*let century_register = if let Some(ref fadt) = acpi::ACPI_TABLE.lock().fadt {
Some(fadt.century)
} else {
None
};*/
unsafe {
self.wait();
second = self.read(0) as usize;
minute = self.read(2) as usize;
hour = self.read(4) as usize;
day = self.read(7) as usize;
month = self.read(8) as usize;
year = self.read(9) as usize;
century = /* TODO: Fix invalid value from VirtualBox
if let Some(century_reg) = century_register {
self.read(century_reg) as usize
} else */ {
20
};
register_b = self.read(0xB);
}
let mut second = self.read(0) as usize;
let mut minute = self.read(2) as usize;
let mut hour = self.read(4) as usize;
let mut day = self.read(7) as usize;
let mut month = self.read(8) as usize;
let mut year = self.read(9) as usize;
let mut century = /* TODO: Fix invalid value from VirtualBox
if let Some(century_reg) = century_register {
self.read(century_reg) as usize
} else */ {
20
};
let register_b = self.read(0xB);
if register_b & 4 != 4 {
second = cvt_bcd(second);
......@@ -123,4 +129,19 @@ impl Rtc {
secs
}
/// Get time
pub fn time(&mut self) -> u64 {
loop {
unsafe {
self.wait(false);
let time = self.time_no_wait();
self.wait(false);
let next_time = self.time_no_wait();
if time == next_time {
return time;
}
}
}
}
}
......@@ -30,6 +30,7 @@ pub unsafe fn init_paging() {
IDT[1].set_func(exception::debug);
IDT[2].set_func(exception::non_maskable);
IDT[3].set_func(exception::breakpoint);
IDT[3].set_flags(IdtFlags::PRESENT | IdtFlags::RING_3 | IdtFlags::INTERRUPT);
IDT[4].set_func(exception::overflow);
IDT[5].set_func(exception::bound_range);
IDT[6].set_func(exception::invalid_opcode);
......
......@@ -10,7 +10,7 @@ extern {
fn ksignal(signal: usize);
}
interrupt_stack_p!(divide_by_zero, stack, {
interrupt_stack!(divide_by_zero, stack, {
println!("Divide by zero");
stack.dump();
stack_trace();
......@@ -18,41 +18,44 @@ interrupt_stack_p!(divide_by_zero, stack, {
});
interrupt_stack!(debug, stack, {
match ptrace::breakpoint_callback_dryrun(true) {
Some(_) => {
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
let mut handled = false;
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(stack)));
}
}
}
let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8;
stack.set_singlestep(false);
if ptrace::breakpoint_callback(true).is_none() {
// There is no guarantee that this is Some(_) just
// because the dryrun is Some(_). So, if there wasn't
// *actually* any breakpoint, restore the trap flag.
stack.set_singlestep(had_singlestep);
}
// Disable singlestep before their is a breakpoint, since the
// breakpoint handler might end up setting it again but unless it
// does we want the default to be false.
let had_singlestep = stack.iret.rflags & (1 << 8) == 1 << 8;
stack.set_singlestep(false);
if ptrace::breakpoint_callback(true).is_some() {
handled = true;
} else {
// There was no breakpoint, restore original value
stack.set_singlestep(had_singlestep);
}
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
},
None => {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
context.regs = None;
}
}
if !handled {
println!("Debug trap");
stack.dump();
ksignal(SIGTRAP);
}
});
interrupt_stack!(non_maskable, stack, {
......@@ -66,70 +69,70 @@ interrupt_stack!(breakpoint, stack, {
ksignal(SIGTRAP);
});
interrupt_stack_p!(overflow, stack, {
interrupt_stack!(overflow, stack, {
println!("Overflow trap");
stack.dump();
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack_p!(bound_range, stack, {
interrupt_stack!(bound_range, stack, {
println!("Bound range exceeded fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_stack_p!(invalid_opcode, stack, {
interrupt_stack!(invalid_opcode, stack, {
println!("Invalid opcode fault");
stack.dump();
stack_trace();
ksignal(SIGILL);
});
interrupt_stack_p!(device_not_available, stack, {
interrupt_stack!(device_not_available, stack, {
println!("Device not available fault");
stack.dump();
stack_trace();
ksignal(SIGILL);
});
interrupt_error_p!(double_fault, stack, {
interrupt_error!(double_fault, stack, {
println!("Double fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(invalid_tss, stack, {
interrupt_error!(invalid_tss, stack, {
println!("Invalid TSS fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(segment_not_present, stack, {
interrupt_error!(segment_not_present, stack, {
println!("Segment not present fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(stack_segment, stack, {
interrupt_error!(stack_segment, stack, {
println!("Stack segment fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(protection, stack, {
interrupt_error!(protection, stack, {
println!("Protection fault");
stack.dump();
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error_p!(page, stack, {
interrupt_error!(page, stack, {
let cr2: usize;
asm!("mov rax, cr2" : "={rax}"(cr2) : : : "intel", "volatile");
println!("Page fault: {:>016X}", cr2);
......@@ -138,42 +141,42 @@ interrupt_error_p!(page, stack, {
ksignal(SIGSEGV);
});
interrupt_stack_p!(fpu, stack, {
interrupt_stack!(fpu, stack, {
println!("FPU floating point fault");
stack.dump();
stack_trace();
ksignal(SIGFPE);
});
interrupt_error_p!(alignment_check, stack, {
interrupt_error!(alignment_check, stack, {
println!("Alignment check fault");
stack.dump();
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack_p!(machine_check, stack, {
interrupt_stack!(machine_check, stack, {
println!("Machine check fault");
stack.dump();
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack_p!(simd, stack, {
interrupt_stack!(simd, stack, {
println!("SIMD floating point fault");
stack.dump();
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack_p!(virtualization, stack, {
interrupt_stack!(virtualization, stack, {
println!("Virtualization fault");
stack.dump();
stack_trace();
ksignal(SIGBUS);
});
interrupt_error_p!(security, stack, {
interrupt_error!(security, stack, {
println!("Security exception");
stack.dump();
stack_trace();
......
......@@ -18,19 +18,15 @@ pub unsafe fn init() {
// from clone() (via clone_ret()). Not sure what the problem is.
macro_rules! with_interrupt_stack {
(unsafe fn $wrapped:ident($stack:ident) -> usize $code:block) => {
/// Because of how clones work, we need a function that returns a
/// usize. Here, `inner` will be this function. The child process in a
/// clone will terminate this function with a 0 return value, and it
/// might also have updated the interrupt_stack pointer.
#[inline(never)]
unsafe fn $wrapped(stack: *mut SyscallStack) {
unsafe fn $wrapped(stack: *mut InterruptStack) {
let stack = &mut *stack;
{
let contexts = context::contexts();
if let Some(context) = contexts.current() {
let mut context = context.write();
if let Some(ref mut kstack) = context.kstack {
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut stack.interrupt_stack)));
context.regs = Some((kstack.as_mut_ptr() as usize, Unique::new_unchecked(&mut *stack)));
}
}
}
......@@ -39,7 +35,7 @@ macro_rules! with_interrupt_stack {
if !is_sysemu.unwrap_or(false) {
// If not on a sysemu breakpoint
let $stack = &mut *stack;
$stack.interrupt_stack.scratch.rax = $code;
$stack.scratch.rax = $code;
if is_sysemu.is_some() {
// Only callback if there was a pre-syscall
......@@ -66,7 +62,7 @@ pub unsafe extern fn syscall_instruction() {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
let scratch = &stack.interrupt_stack.scratch;
let scratch = &stack.scratch;
syscall::syscall(scratch.rax, scratch.rdi, scratch.rsi, scratch.rdx, scratch.r10, scratch.r8, rbp, stack)
}
}
......@@ -91,10 +87,10 @@ pub unsafe extern fn syscall_instruction() {
// Push scratch registers
scratch_push!();
preserved_push!();
asm!("push fs
mov r11, 0x18
mov fs, r11
push rbx"
mov fs, r11"
: : : : "intel", "volatile");
// Get reference to stack variables
......@@ -104,15 +100,14 @@ pub unsafe extern fn syscall_instruction() {
// Map kernel
pti::map();
inner(rsp as *mut SyscallStack);
inner(rsp as *mut InterruptStack);
// Unmap kernel
pti::unmap();
// Interrupt return
asm!("pop rbx
pop fs"
: : : : "intel", "volatile");
asm!("pop fs" : : : : "intel", "volatile");
preserved_pop!();
scratch_pop!();
asm!("iretq" : : : : "intel", "volatile");
}
......@@ -124,17 +119,17 @@ pub unsafe extern fn syscall() {
let rbp;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
let scratch = &stack.interrupt_stack.scratch;
syscall::syscall(scratch.rax, stack.rbx, scratch.rcx, scratch.rdx, scratch.rsi, scratch.rdi, rbp, stack)
let scratch = &stack.scratch;
syscall::syscall(scratch.rax, stack.preserved.rbx, scratch.rcx, scratch.rdx, scratch.rsi, scratch.rdi, rbp, stack)
}
}
// Push scratch registers
scratch_push!();
preserved_push!();
asm!("push fs
mov r11, 0x18
mov fs, r11
push rbx"
mov fs, r11"
: : : : "intel", "volatile");
// Get reference to stack variables
......@@ -144,30 +139,18 @@ pub unsafe extern fn syscall() {
// Map kernel
pti::map();
inner(rsp as *mut SyscallStack);
inner(rsp as *mut InterruptStack);
// Unmap kernel
pti::unmap();
// Interrupt return
asm!("pop rbx
pop fs"
: : : : "intel", "volatile");
asm!("pop fs" : : : : "intel", "volatile");
preserved_pop!();
scratch_pop!();
asm!("iretq" : : : : "intel", "volatile");
}
#[allow(dead_code)]
#[repr(packed)]
pub struct SyscallStack {
pub rbx: usize,
pub interrupt_stack: InterruptStack,
// Will only be present if syscall is called from another ring
pub rsp: usize,
pub ss: usize,
}
#[naked]
pub unsafe extern "C" fn clone_ret() {
// The C x86_64 ABI specifies that rbp is pushed to save the old
......
use core::mem;
use syscall::data::IntRegisters;
/// Print to console
......@@ -143,6 +144,10 @@ pub struct IretRegisters {
pub rip: usize,
pub cs: usize,
pub rflags: usize,
// Will only be present if interrupt is raised from another
// privilege ring
pub rsp: usize,
pub ss: usize
}
impl IretRegisters {
......@@ -196,6 +201,7 @@ macro_rules! interrupt {
#[repr(packed)]
pub struct InterruptStack {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub iret: IretRegisters,
}
......@@ -204,12 +210,20 @@ impl InterruptStack {
pub fn dump(&self) {
self.iret.dump();
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
/// Saves all registers to a struct used by the proc:
/// scheme to read/write registers.
pub fn save(&self, all: &mut IntRegisters) {
all.fs = self.fs;
all.r15 = self.preserved.r15;
all.r14 = self.preserved.r14;
all.r13 = self.preserved.r13;
all.r12 = self.preserved.r12;
all.rbp = self.preserved.rbp;
all.rbx = self.preserved.rbx;
all.r11 = self.scratch.r11;
all.r10 = self.scratch.r10;
all.r9 = self.scratch.r9;
......@@ -221,12 +235,42 @@ impl InterruptStack {
all.rax = self.scratch.rax;
all.rip = self.iret.rip;
all.cs = self.iret.cs;
all.eflags = self.iret.rflags;
all.rflags = self.iret.rflags;
// Set rsp and ss:
const CPL_MASK: usize = 0b11;
let cs: usize;
unsafe {
asm!("mov $0, cs" : "=r"(cs) ::: "intel");
}
if self.iret.cs & CPL_MASK == cs & CPL_MASK {
// Privilege ring didn't change, so neither did the stack
all.rsp = self as *const Self as usize // rsp after Self was pushed to the stack
+ mem::size_of::<Self>() // disregard Self
- mem::size_of::<usize>() * 2; // well, almost: rsp and ss need to be excluded as they aren't present
unsafe {
asm!("mov $0, ss" : "=r"(all.ss) ::: "intel");
}
} else {
all.rsp = self.iret.rsp;
all.ss = self.iret.ss;
}
}
/// Loads all registers from a struct used by the proc:
/// scheme to read/write registers.
pub fn load(&mut self, all: &IntRegisters) {
self.fs = all.fs;
// TODO: Which of these should be allowed to change?
// self.fs = all.fs;
self.preserved.r15 = all.r15;
self.preserved.r14 = all.r14;
self.preserved.r13 = all.r13;
self.preserved.r12 = all.r12;
self.preserved.rbp = all.rbp;
self.preserved.rbx = all.rbx;
self.scratch.r11 = all.r11;
self.scratch.r10 = all.r10;
self.scratch.r9 = all.r9;
......@@ -236,9 +280,9 @@ impl InterruptStack {
self.scratch.rdx = all.rdx;
self.scratch.rcx = all.rcx;
self.scratch.rax = all.rax;
self.iret.rip = all.rip;
self.iret.cs = all.cs;
self.iret.rflags = all.eflags;
// self.iret.rip = all.rip;
// self.iret.cs = all.cs;
// self.iret.rflags = all.eflags;
}
/// Enables the "Trap Flag" in the FLAGS register, causing the CPU
/// to send a Debug exception after the next instruction. This is
......@@ -264,6 +308,7 @@ macro_rules! interrupt_stack {
// Push scratch registers
scratch_push!();
preserved_push!();
fs_push!();
// Get reference to stack variables
......@@ -281,6 +326,7 @@ macro_rules! interrupt_stack {
// Pop scratch registers and return
fs_pop!();
preserved_pop!();
scratch_pop!();
iret!();
}
......@@ -291,6 +337,7 @@ macro_rules! interrupt_stack {
#[repr(packed)]
pub struct InterruptErrorStack {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub code: usize,
pub iret: IretRegisters,
......@@ -301,6 +348,7 @@ impl InterruptErrorStack {
self.iret.dump();
println!("CODE: {:>016X}", { self.code });
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
}
......@@ -315,60 +363,6 @@ macro_rules! interrupt_error {
$func
}
// Push scratch registers
scratch_push!();
fs_push!();
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Map kernel
$crate::arch::x86_64::pti::map();
// Call inner rust function
inner(&*(rsp as *const $crate::arch::x86_64::macros::InterruptErrorStack));
// Unmap kernel
$crate::arch::x86_64::pti::unmap();
// Pop scratch registers, error code, and return
fs_pop!();
scratch_pop!();
asm!("add rsp, 8" : : : : "intel", "volatile");
iret!();
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptStackP {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub iret: IretRegisters,
}
impl InterruptStackP {
pub fn dump(&self) {
self.iret.dump();
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
}
#[macro_export]
macro_rules! interrupt_stack_p {
($name:ident, $stack: ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &mut $crate::arch::x86_64::macros::InterruptStackP) {
$func
}
// Push scratch registers
scratch_push!();
preserved_push!();
......@@ -382,64 +376,7 @@ macro_rules! interrupt_stack_p {
$crate::arch::x86_64::pti::map();
// Call inner rust function
inner(&mut *(rsp as *mut $crate::arch::x86_64::macros::InterruptStackP));
// Unmap kernel
$crate::arch::x86_64::pti::unmap();
// Pop scratch registers and return
fs_pop!();
preserved_pop!();
scratch_pop!();
iret!();
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptErrorStackP {
pub fs: usize,
pub preserved: PreservedRegisters,
pub scratch: ScratchRegisters,
pub code: usize,
pub iret: IretRegisters,
}
impl InterruptErrorStackP {
pub fn dump(&self) {
self.iret.dump();
println!("CODE: {:>016X}", { self.code });
self.scratch.dump();
self.preserved.dump();
println!("FS: {:>016X}", { self.fs });
}
}
#[macro_export]
macro_rules! interrupt_error_p {
($name:ident, $stack:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::arch::x86_64::macros::InterruptErrorStackP) {
$func
}
// Push scratch registers
scratch_push!();
preserved_push!();
fs_push!();
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Map kernel
$crate::arch::x86_64::pti::map();
// Call inner rust function
inner(&*(rsp as *const $crate::arch::x86_64::macros::InterruptErrorStackP));
inner(&*(rsp as *const $crate::arch::x86_64::macros::InterruptErrorStack));
// Unmap kernel
$crate::arch::x86_64::pti::unmap();
......
......@@ -425,6 +425,10 @@ impl Page {
end: end,
}
}
pub fn next(self) -> Page {
Self { number: self.number + 1 }
}
}
pub struct PageIter {
......@@ -438,7 +442,7 @@ impl Iterator for PageIter {
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
self.start = self.start.next();
Some(page)
} else {
None
......
use core::mem;
use core::sync::atomic::AtomicBool;
use syscall::data::FloatRegisters;
/// This must be used by the kernel to ensure that context switches are done atomically
/// Compare and exchange this to true when beginning a context switch on any CPU
......@@ -7,6 +8,8 @@ use core::sync::atomic::AtomicBool;
/// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicBool = AtomicBool::new(false);
const ST_RESERVED: u128 = 0xFFFF_FFFF_FFFF_0000_0000_0000_0000_0000;
#[derive(Clone, Debug)]
pub struct Context {
/// FX valid?
......@@ -54,6 +57,44 @@ impl Context {
self.cr3
}
pub fn get_fx_regs(&self) -> Option<FloatRegisters> {
if !self.loadable {
return None;
}
let mut regs = unsafe { *(self.fx as *const FloatRegisters) };
regs._reserved = 0;
let mut new_st = regs.st_space;
for st in &mut new_st {
// Only allow access to the 80 lowest bits
*st &= !ST_RESERVED;
}
regs.st_space = new_st;
Some(regs)
}
pub fn set_fx_regs(&mut self, mut new: FloatRegisters) -> bool {
if !self.loadable {
return false;
}
let old = unsafe { &*(self.fx as *const FloatRegisters) };
new._reserved = old._reserved;
let old_st = new.st_space;
let mut new_st = new.st_space;
for (new_st, old_st) in new_st.iter_mut().zip(&old_st) {
*new_st &= !ST_RESERVED;
*new_st |= old_st & ST_RESERVED;
}
new.st_space = new_st;
// Make sure we don't use `old` from now on
drop(old);
unsafe {
*(self.fx as *mut FloatRegisters) = new;
}
true
}
pub fn set_fx(&mut self, address: usize) {
self.fx = address;
}
......@@ -88,10 +129,10 @@ impl Context {
#[inline(never)]
#[naked]
pub unsafe fn switch_to(&mut self, next: &mut Context) {
asm!("fxsave [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
asm!("fxsave64 [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
self.loadable = true;
if next.loadable {
asm!("fxrstor [$0]" : : "r"(next.fx) : "memory" : "intel", "volatile");
asm!("fxrstor64 [$0]" : : "r"(next.fx) : "memory" : "intel", "volatile");
}else{
asm!("fninit" : : : "memory" : "intel", "volatile");
}
......
use crate::{
arch::macros::InterruptStack,
arch::{
macros::InterruptStack,
paging::{
entry::EntryFlags,
mapper::MapperFlushAll,
temporary_page::TemporaryPage,
ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress
}
},
common::unique::Unique,
context::{self, Context, ContextId, Status},
sync::WaitCondition
......@@ -8,7 +16,8 @@ use crate::{
use alloc::{
boxed::Box,
collections::BTreeMap,
sync::Arc
sync::Arc,
vec::Vec
};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::error::*;
......@@ -94,23 +103,6 @@ pub fn wait_breakpoint(pid: ContextId) -> Result<()> {
Ok(())
}
/// Returns the same value as breakpoint_callback would do, but
/// doesn't actually perform the action. You should not rely too
/// heavily on this value, as the lock *is* released between this call
/// and another.
pub fn breakpoint_callback_dryrun(singlestep: bool) -> Option<bool> {
let contexts = context::contexts();
let context = contexts.current()?;
let context = context.read();
let breakpoints = breakpoints();
let breakpoint = breakpoints.get(&context.id)?;
if breakpoint.singlestep != singlestep {
return None;
}
Some(breakpoint.sysemu)
}
/// Notify the tracer and await green flag to continue.
/// Note: Don't call while holding any locks, this will switch contexts
pub fn breakpoint_callback(singlestep: bool) -> Option<bool> {
......@@ -206,3 +198,70 @@ pub unsafe fn regs_for_mut(context: &mut Context) -> Option<&mut InterruptStack>
None => context.regs?.1.as_ptr()
})
}
// __ __
// | \/ | ___ _ __ ___ ___ _ __ _ _
// | |\/| |/ _ \ '_ ` _ \ / _ \| '__| | | |
// | | | | __/ | | | | | (_) | | | |_| |
// |_| |_|\___|_| |_| |_|\___/|_| \__, |
// |___/
pub fn with_context_memory<F>(context: &Context, offset: VirtualAddress, len: usize, f: F) -> Result<()>
where F: FnOnce(*mut u8) -> Result<()>
{
// TODO: Is using USER_TMP_MISC_OFFSET safe? I guess make sure
// it's not too large.
let start = Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET));
let mut active_page_table = unsafe { ActivePageTable::new() };
let mut target_page_table = unsafe {
InactivePageTable::from_address(context.arch.get_page_table())
};
// Find the physical frames for all pages
let mut frames = Vec::new();
let mut result = None;
active_page_table.with(&mut target_page_table, &mut TemporaryPage::new(start), |mapper| {
let mut inner = || -> Result<()> {
let start = Page::containing_address(offset);
let end = Page::containing_address(VirtualAddress::new(offset.get() + len - 1));
for page in Page::range_inclusive(start, end) {
frames.push((
mapper.translate_page(page).ok_or(Error::new(EFAULT))?,
mapper.translate_page_flags(page).ok_or(Error::new(EFAULT))?
));
}
Ok(())
};
result = Some(inner());
});
result.expect("with(...) callback should always be called")?;
// Map all the physical frames into linear pages
let pages = frames.len();
let mut page = start;
let mut flusher = MapperFlushAll::new();
for (frame, mut flags) in frames {
flags |= EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE;
flusher.consume(active_page_table.map_to(page, frame, flags));
page = page.next();
}
flusher.flush(&mut active_page_table);
let res = f((start.start_address().get() + offset.get() % PAGE_SIZE) as *mut u8);
// Unmap all the pages (but allow no deallocation!)
let mut page = start;
let mut flusher = MapperFlushAll::new();
for _ in 0..pages {
flusher.consume(active_page_table.unmap_return(page, true).0);
page = page.next();
}
flusher.flush(&mut active_page_table);
res
}
use crate::{
arch::paging::VirtualAddress,
context::{self, ContextId, Status},
syscall::validate,
ptrace
};
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::{
collections::{BTreeMap, BTreeSet},
sync::Arc
};
use core::{
cmp,
mem,
......@@ -25,7 +30,7 @@ enum RegsKind {
}
#[derive(Clone, Copy)]
enum Operation {
Memory,
Memory(VirtualAddress),
Regs(RegsKind),
Trace
}
......@@ -39,7 +44,7 @@ struct Handle {
pub struct ProcScheme {
next_id: AtomicUsize,
handles: RwLock<BTreeMap<usize, Handle>>,
handles: RwLock<BTreeMap<usize, Arc<Mutex<Handle>>>>,
traced: Mutex<BTreeSet<ContextId>>
}
......@@ -62,7 +67,7 @@ impl Scheme for ProcScheme {
.map(ContextId::from)
.ok_or(Error::new(EINVAL))?;
let operation = match parts.next() {
Some("mem") => Operation::Memory,
Some("mem") => Operation::Memory(VirtualAddress::new(0)),
Some("regs/float") => Operation::Regs(RegsKind::Float),
Some("regs/int") => Operation::Regs(RegsKind::Int),
Some("trace") => Operation::Trace,
......@@ -95,11 +100,11 @@ impl Scheme for ProcScheme {
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
self.handles.write().insert(id, Arc::new(Mutex::new(Handle {
flags,
pid,
operation
});
})));
Ok(id)
}
......@@ -113,8 +118,11 @@ impl Scheme for ProcScheme {
fn dup(&self, old_id: usize, buf: &[u8]) -> Result<usize> {
let handle = {
let handles = self.handles.read();
*handles.get(&old_id).ok_or(Error::new(EBADF))?
let handle = handles.get(&old_id).ok_or(Error::new(EBADF))?;
let handle = handle.lock();
*handle
};
let mut path = format!("{}/", handle.pid.into()).into_bytes();
path.extend_from_slice(buf);
......@@ -128,29 +136,52 @@ impl Scheme for ProcScheme {
self.open(&path, handle.flags, uid, gid)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut handle = handle.lock();
match handle.operation {
Operation::Memory(ref mut offset) => Ok({
*offset = VirtualAddress::new(match whence {
SEEK_SET => pos,
SEEK_CUR => cmp::max(0, offset.get() as isize + pos as isize) as usize,
SEEK_END => cmp::max(0, isize::max_value() + pos as isize) as usize,
_ => return Err(Error::new(EBADF))
});
offset.get()
}),
_ => Err(Error::new(EBADF))
}
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
// Can't hold locks during the context switch later when
// waiting for a process to stop running.
// Don't hold a global lock during the context switch later on
let handle = {
let handles = self.handles.read();
*handles.get(&id).ok_or(Error::new(EBADF))?
Arc::clone(handles.get(&id).ok_or(Error::new(EBADF))?)
};
// TODO: Make sure handle can't deadlock
let mut handle = handle.lock();
let pid = handle.pid;
match handle.operation {
Operation::Memory => {
// let contexts = context::contexts();
// let context = contexts.get(handle.pid).ok_or(Error::new(ESRCH))?;
// let context = context.read();
// for grant in &*context.grants.lock() {
// println!("Grant: {} -> {}", grant.start.get(), grant.size);
// }
// unimplemented!();
return Err(Error::new(EBADF));
Operation::Memory(ref mut offset) => {
let contexts = context::contexts();
let context = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context.read();
ptrace::with_context_memory(&context, *offset, buf.len(), |ptr| {
buf.copy_from_slice(validate::validate_slice(ptr, buf.len())?);
Ok(())
})?;
*offset = VirtualAddress::new(offset.get() + buf.len());
Ok(buf.len())
},
Operation::Regs(kind) => {
union Output {
_float: FloatRegisters,
float: FloatRegisters,
int: IntRegisters
}
let mut first = true;
......@@ -167,9 +198,13 @@ impl Scheme for ProcScheme {
break match kind {
RegsKind::Float => {
// TODO!!
// (Output { float: FloatRegisters::default() }, mem::size_of::<FloatRegisters>())
return Err(Error::new(EBADF));
// NOTE: The kernel will never touch floats
// In the rare case of not having floating
// point registers uninitiated, return
// empty everything.
let fx = context.arch.get_fx_regs().unwrap_or_default();
(Output { float: fx }, mem::size_of::<FloatRegisters>())
},
RegsKind::Int => match unsafe { ptrace::regs_for(&context) } {
None => {
......@@ -200,18 +235,28 @@ impl Scheme for ProcScheme {
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
// Can't hold locks during the context switch later when
// waiting for a process to stop running.
// Don't hold a global lock during the context switch later on
let handle = {
let handles = self.handles.read();
*handles.get(&id).ok_or(Error::new(EBADF))?
Arc::clone(handles.get(&id).ok_or(Error::new(EBADF))?)
};
let mut handle = handle.lock();
let pid = handle.pid;
let mut first = true;
match handle.operation {
Operation::Memory => {
// unimplemented!()
return Err(Error::new(EBADF));
Operation::Memory(ref mut offset) => {
let contexts = context::contexts();
let context = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context.read();
ptrace::with_context_memory(&context, *offset, buf.len(), |ptr| {
validate::validate_slice_mut(ptr, buf.len())?.copy_from_slice(buf);
Ok(())
})?;
*offset = VirtualAddress::new(offset.get() + buf.len());
Ok(buf.len())
},
Operation::Regs(kind) => loop {
if !first {
......@@ -226,8 +271,20 @@ impl Scheme for ProcScheme {
break match kind {
RegsKind::Float => {
// TODO!!
unimplemented!();
if buf.len() < mem::size_of::<FloatRegisters>() {
return Ok(0);
}
let regs = unsafe {
*(buf as *const _ as *const FloatRegisters)
};
// NOTE: The kernel will never touch floats
// Ignore the rare case of floating point
// registers being uninitiated
let _ = context.arch.set_fx_regs(regs);
Ok(mem::size_of::<FloatRegisters>())
},
RegsKind::Int => match unsafe { ptrace::regs_for_mut(&mut context) } {
None => {
......@@ -310,8 +367,9 @@ impl Scheme for ProcScheme {
}
fn fcntl(&self, id: usize, cmd: usize, arg: usize) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut handle = handle.lock();
match cmd {
F_SETFL => { handle.flags = arg; Ok(0) },
......@@ -323,9 +381,10 @@ impl Scheme for ProcScheme {
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let handle = handle.lock();
let path = format!("proc:{}/{}", handle.pid.into(), match handle.operation {
Operation::Memory => "mem",
Operation::Memory(_) => "mem",
Operation::Regs(RegsKind::Float) => "regs/float",
Operation::Regs(RegsKind::Int) => "regs/int",
Operation::Trace => "trace"
......@@ -339,7 +398,12 @@ impl Scheme for ProcScheme {
fn close(&self, id: usize) -> Result<usize> {
let handle = self.handles.write().remove(&id).ok_or(Error::new(EBADF))?;
ptrace::cont(handle.pid);
let handle = handle.lock();
if let Operation::Trace = handle.operation {
ptrace::cont(handle.pid);
self.traced.lock().remove(&handle.pid);
}
let contexts = context::contexts();
if let Some(context) = contexts.get(handle.pid) {
......
use crate::interrupt::syscall::SyscallStack;
use crate::macros::InterruptStack;
use crate::memory::{allocate_frames, deallocate_frames, Frame};
use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress};
use crate::paging::entry::EntryFlags;
......@@ -18,15 +18,14 @@ fn enforce_root() -> Result<()> {
}
}
pub fn iopl(level: usize, stack: &mut SyscallStack) -> Result<usize> {
pub fn iopl(level: usize, stack: &mut InterruptStack) -> Result<usize> {
enforce_root()?;
if level > 3 {
return Err(Error::new(EINVAL));
}
let iret = &mut stack.interrupt_stack.iret;
iret.rflags = (iret.rflags & !(3 << 12)) | ((level & 3) << 12);
stack.iret.rflags = (stack.iret.rflags & !(3 << 12)) | ((level & 3) << 12);
Ok(0)
}
......
......@@ -19,7 +19,7 @@ use self::error::{Error, Result, ENOSYS};
use self::number::*;
use crate::context::ContextId;
use crate::interrupt::syscall::SyscallStack;
use crate::macros::InterruptStack;
use crate::scheme::{FileHandle, SchemeNamespace};
/// Debug
......@@ -48,9 +48,9 @@ pub mod validate;
/// This function is the syscall handler of the kernel, it is composed of an inner function that returns a `Result<usize>`. After the inner function runs, the syscall
/// function calls [`Error::mux`] on it.
pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: usize, stack: &mut SyscallStack) -> usize {
pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: usize, stack: &mut InterruptStack) -> usize {
#[inline(always)]
fn inner(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: usize, stack: &mut SyscallStack) -> Result<usize> {
fn inner(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: usize, stack: &mut InterruptStack) -> Result<usize> {
//SYS_* is declared in kernel/syscall/src/number.rs
match a & SYS_CLASS {
SYS_CLASS_FILE => {
......@@ -94,12 +94,12 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
SYS_GETPGID => getpgid(ContextId::from(b)).map(ContextId::into),
SYS_GETPPID => getppid().map(ContextId::into),
SYS_CLONE => {
let old_rsp = stack.rsp;
let old_rsp = stack.iret.rsp;
if b & flag::CLONE_STACK == flag::CLONE_STACK {
stack.rsp = c;
stack.iret.rsp = c;
}
let ret = clone(b, bp).map(ContextId::into);
stack.rsp = old_rsp;
stack.iret.rsp = old_rsp;
ret
},
SYS_EXIT => exit((b & 0xFF) << 8),
......
Subproject commit f8eda5ce1bd6fe7f276302493ec54a75a7335fd0
Subproject commit 49dd22260bd8bada8b835d12ee8e460a5a1c4af4