diff --git a/redox-rt/src/arch/x86_64.rs b/redox-rt/src/arch/x86_64.rs index 71ca1ce271b364751c02ad91df8e77eb261a5042..bd823c981137922b8a74e0812d0cf0b13b0f9f3f 100644 --- a/redox-rt/src/arch/x86_64.rs +++ b/redox-rt/src/arch/x86_64.rs @@ -14,9 +14,9 @@ pub(crate) const STACK_SIZE: usize = 1024 * 1024; #[derive(Debug, Default)] pub struct SigArea { - altstack_top: usize, - altstack_bottom: usize, - tmp: usize, + pub altstack_top: usize, + pub altstack_bottom: usize, + pub tmp: usize, pub onstack: u64, pub disable_signals_depth: u64, } diff --git a/redox-rt/src/signal.rs b/redox-rt/src/signal.rs index 03da284d261ab277e644e47e9240b8d032e451ef..d90c2c89cb208c11134855bcc2d3b0e53fb4539a 100644 --- a/redox-rt/src/signal.rs +++ b/redox-rt/src/signal.rs @@ -1,4 +1,4 @@ -use core::cell::Cell; +use core::cell::{Cell, UnsafeCell}; use core::ffi::c_int; use core::sync::atomic::{AtomicU64, Ordering}; @@ -43,6 +43,8 @@ pub struct SigStack { #[inline(always)] unsafe fn inner(stack: &mut SigStack) { + let _ = syscall::write(1, b"INNER SIGNAL HANDLER\n"); + loop {} let handler: extern "C" fn(c_int) = core::mem::transmute(stack.sa_handler); handler(stack.sig_num as c_int) } @@ -197,10 +199,12 @@ pub struct TmpDisableSignalsGuard { _inner: () } pub fn tmp_disable_signals() -> TmpDisableSignalsGuard { unsafe { - let ctl = current_sigctl().control_flags.get(); - ctl.write_volatile(ctl.read_volatile() | syscall::flag::INHIBIT_DELIVERY); + let ctl = ¤t_sigctl().control_flags; + ctl.store(ctl.load(Ordering::Relaxed) | syscall::flag::INHIBIT_DELIVERY.bits(), Ordering::Release); + core::sync::atomic::compiler_fence(Ordering::Acquire); + // TODO: fence? - Tcb::current().unwrap().os_specific.arch.disable_signals_depth += 1; + (*Tcb::current().unwrap().os_specific.arch.get()).disable_signals_depth += 1; } TmpDisableSignalsGuard { _inner: () } @@ -208,12 +212,13 @@ pub fn tmp_disable_signals() -> TmpDisableSignalsGuard { impl Drop for TmpDisableSignalsGuard { fn drop(&mut self) { unsafe { - let depth = &mut Tcb::current().unwrap().os_specific.arch.disable_signals_depth; + let depth = &mut (*Tcb::current().unwrap().os_specific.arch.get()).disable_signals_depth; *depth -= 1; if *depth == 0 { - let ctl = current_sigctl().control_flags.get(); - ctl.write_volatile(ctl.read_volatile() & !syscall::flag::INHIBIT_DELIVERY); + let ctl = ¤t_sigctl().control_flags; + ctl.store(ctl.load(Ordering::Relaxed) & !syscall::flag::INHIBIT_DELIVERY.bits(), Ordering::Release); + core::sync::atomic::compiler_fence(Ordering::Acquire); } } } @@ -278,13 +283,20 @@ const fn sig_bit(sig: usize) -> u64 { 1 << (sig - 1) } -pub fn setup_sighandler(control: &Sigcontrol) { +pub fn setup_sighandler(area: &RtSigarea) { { let mut sigactions = SIGACTIONS.lock(); } + let arch = unsafe { &mut *area.arch.get() }; #[cfg(target_arch = "x86_64")] { + // The asm decides whether to use the altstack, based on whether the saved stack pointer + // was already on that stack. Thus, setting the altstack to the entire address space, is + // equivalent to not using any altstack at all (the default). + arch.altstack_top = usize::MAX; + arch.altstack_bottom = 0; + let cpuid_eax1_ecx = unsafe { core::arch::x86_64::__cpuid(1) }.ecx; CPUID_EAX1_ECX.store(cpuid_eax1_ecx, core::sync::atomic::Ordering::Relaxed); } @@ -302,7 +314,7 @@ pub fn setup_sighandler(control: &Sigcontrol) { #[derive(Debug, Default)] pub struct RtSigarea { pub control: Sigcontrol, - pub arch: crate::arch::SigArea, + pub arch: UnsafeCell<crate::arch::SigArea>, } pub fn current_setsighandler_struct() -> SetSighandlerData { SetSighandlerData { diff --git a/src/start.rs b/src/start.rs index 9bf2f8da5b51f852cc50a3b2018d15119f902d27..f17bd48723756ebf889b5f391a6b9e75e04c88df 100644 --- a/src/start.rs +++ b/src/start.rs @@ -158,7 +158,7 @@ pub unsafe extern "C" fn relibc_start(sp: &'static Stack) -> ! { tcb.linker_ptr = Box::into_raw(Box::new(Mutex::new(linker))); } #[cfg(target_os = "redox")] - redox_rt::signal::setup_sighandler(&tcb.os_specific.control); + redox_rt::signal::setup_sighandler(&tcb.os_specific); } // Set up argc and argv