diff --git a/src/arch/x86/idt.rs b/src/arch/x86/idt.rs index bfca4d01d413fbc5853c40ff5d7e52a5f02965ef..45a5e9e84e3c74d49dc46ee2f56a5aad57bb64fd 100644 --- a/src/arch/x86/idt.rs +++ b/src/arch/x86/idt.rs @@ -27,7 +27,7 @@ pub static mut IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointe pub type IdtEntries = [IdtEntry; 256]; pub type IdtReservations = [AtomicU32; 8]; -#[repr(packed)] +#[repr(C)] pub struct Idt { entries: IdtEntries, reservations: IdtReservations, diff --git a/src/arch/x86_64/idt.rs b/src/arch/x86_64/idt.rs index 1377ff75feda98ace3d3b6dd3a5c7ad55ba641fd..6063f7d2be20ad5bbafe07fe8553f4e46cf4b810 100644 --- a/src/arch/x86_64/idt.rs +++ b/src/arch/x86_64/idt.rs @@ -27,7 +27,7 @@ pub static mut IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointe pub type IdtEntries = [IdtEntry; 256]; pub type IdtReservations = [AtomicU64; 4]; -#[repr(packed)] +#[repr(C)] pub struct Idt { entries: IdtEntries, reservations: IdtReservations, diff --git a/src/devices/uart_16550.rs b/src/devices/uart_16550.rs index 36f09bd5ddcddb60f2f652aecb505108519e00d8..51a0b09d5cce39d33abdfd0632ac5842f63e763c 100644 --- a/src/devices/uart_16550.rs +++ b/src/devices/uart_16550.rs @@ -1,4 +1,5 @@ use core::convert::TryInto; +use core::ptr::{addr_of, addr_of_mut}; use crate::syscall::io::{Io, Mmio, ReadOnly}; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] @@ -70,20 +71,23 @@ where T::Value: From<u8> + TryInto<u8>, { pub fn init(&mut self) { - //TODO: Cleanup - self.int_en.write(0x00.into()); - self.line_ctrl.write(0x80.into()); - self.data.write(0x01.into()); - self.int_en.write(0x00.into()); - self.line_ctrl.write(0x03.into()); - self.fifo_ctrl.write(0xC7.into()); - self.modem_ctrl.write(0x0B.into()); - self.int_en.write(0x01.into()); + unsafe { + //TODO: Cleanup + // FIXME: Fix UB if unaligned + (&mut *addr_of_mut!(self.int_en)).write(0x00.into()); + (&mut *addr_of_mut!(self.line_ctrl)).write(0x80.into()); + (&mut *addr_of_mut!(self.data)).write(0x01.into()); + (&mut *addr_of_mut!(self.int_en)).write(0x00.into()); + (&mut *addr_of_mut!(self.line_ctrl)).write(0x03.into()); + (&mut *addr_of_mut!(self.fifo_ctrl)).write(0xC7.into()); + (&mut *addr_of_mut!(self.modem_ctrl)).write(0x0B.into()); + (&mut *addr_of_mut!(self.int_en)).write(0x01.into()); + } } fn line_sts(&self) -> LineStsFlags { LineStsFlags::from_bits_truncate( - (self.line_sts.read() & 0xFF.into()) + (unsafe { &*addr_of!(self.line_sts) }.read() & 0xFF.into()) .try_into() .unwrap_or(0), ) @@ -92,7 +96,7 @@ where pub fn receive(&mut self) -> Option<u8> { if self.line_sts().contains(LineStsFlags::INPUT_FULL) { Some( - (self.data.read() & 0xFF.into()) + (unsafe { &*addr_of!(self.data) }.read() & 0xFF.into()) .try_into() .unwrap_or(0), ) @@ -103,7 +107,7 @@ where pub fn send(&mut self, data: u8) { while !self.line_sts().contains(LineStsFlags::OUTPUT_EMPTY) {} - self.data.write(data.into()) + unsafe { &mut *addr_of_mut!(self.data) }.write(data.into()) } pub fn write(&mut self, buf: &[u8]) { diff --git a/src/lib.rs b/src/lib.rs index 4b47c1cee573ddbc9daa886bf90c2d85ba496bd1..3c03ae532b39c79d035ca33232258bbc4a50a60a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,8 +3,6 @@ //! The Redox OS Kernel is a microkernel that supports `x86_64` systems and //! provides Unix-like syscalls for primarily Rust applications -//TODO: fix the need to generate references to packed fields -#![allow(unaligned_references)] // Useful for adding comments about different branches #![allow(clippy::if_same_then_else)] // Useful in the syscall function diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index b90b5d9e78fe71ec653f6ad91e403d940caebe27..176eac07df06a5a0eedcd09eaea0ee570a0b8895 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -1358,11 +1358,16 @@ impl KernelScheme for ProcScheme { extern "C" fn clone_handler() { let context_lock = Arc::clone(context::contexts().current().expect("expected the current context to be set in a spawn closure")); - unsafe { - let [ip, sp] = context_lock.read().clone_entry.expect("clone_entry must be set"); - let [arg, is_singlestep] = [0; 2]; + loop { + unsafe { + let Some([ip, sp]) = ({ context_lock.read().clone_entry }) else { + context_lock.write().status = Status::Stopped(SIGSTOP); + continue; + }; + let [arg, is_singlestep] = [0; 2]; - crate::start::usermode(ip, sp, arg, is_singlestep); + crate::start::usermode(ip, sp, arg, is_singlestep); + } } } diff --git a/src/syscall/futex.rs b/src/syscall/futex.rs index 2c195582fc866290e7232ed1f90a3e9147536c3c..8f7ca2fc8f36d965bc497c55583ec7624fb6e48a 100644 --- a/src/syscall/futex.rs +++ b/src/syscall/futex.rs @@ -2,20 +2,19 @@ //! Futex or Fast Userspace Mutex is "a method for waiting until a certain condition becomes true." //! //! For more information about futexes, please read [this](https://eli.thegreenplace.net/2018/basics-of-futexes/) blog post, and the [futex(2)](http://man7.org/linux/man-pages/man2/futex.2.html) man page -use alloc::sync::Arc; use alloc::collections::VecDeque; +use alloc::sync::Arc; use core::intrinsics; -use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use spin::RwLock; -use rmm::Arch; - -use crate::context::{self, Context}; -use crate::time; +use crate::context::{self, memory::AddrSpace, Context}; use crate::memory::PhysicalAddress; -use crate::paging::VirtualAddress; +use crate::paging::{Page, VirtualAddress}; +use crate::time; + use crate::syscall::data::TimeSpec; -use crate::syscall::error::{Error, Result, ESRCH, EAGAIN, EFAULT, EINVAL}; -use crate::syscall::flag::{FUTEX_WAIT, FUTEX_WAIT64, FUTEX_WAKE, FUTEX_REQUEUE}; +use crate::syscall::error::{Error, Result, EAGAIN, EFAULT, EINVAL, ESRCH}; +use crate::syscall::flag::{FUTEX_REQUEUE, FUTEX_WAIT, FUTEX_WAIT64, FUTEX_WAKE}; use crate::syscall::validate::validate_array; type FutexList = VecDeque<FutexEntry>; @@ -25,42 +24,30 @@ pub struct FutexEntry { context_lock: Arc<RwLock<Context>>, } -/// Fast userspace mutex list -static FUTEXES: Once<RwLock<FutexList>> = Once::new(); +// TODO: Process-private futexes? In that case, put the futex table in each AddrSpace. +// TODO: Hash table? +static FUTEXES: RwLock<FutexList> = RwLock::new(FutexList::new()); -/// Initialize futexes, called if needed -fn init_futexes() -> RwLock<FutexList> { - RwLock::new(VecDeque::new()) -} +fn validate_and_translate_virt(space: &AddrSpace, addr: VirtualAddress) -> Option<PhysicalAddress> { + // TODO: Move this elsewhere! + if addr.data().saturating_add(core::mem::size_of::<usize>()) >= crate::USER_END_OFFSET { + return None; + } -/// Get the global futexes list, const -pub fn futexes() -> RwLockReadGuard<'static, FutexList> { - FUTEXES.call_once(init_futexes).read() -} + let page = Page::containing_address(addr); + let off = addr.data() - page.start_address().data(); + + let (frame, _) = space.table.utable.translate(page.start_address())?; -/// Get the global futexes list, mutable -pub fn futexes_mut() -> RwLockWriteGuard<'static, FutexList> { - FUTEXES.call_once(init_futexes).write() + Some(frame.add(off)) } pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> Result<usize> { let addr_space = Arc::clone(context::current()?.read().addr_space()?); - let (target_physaddr, _) = { - let virtual_address = VirtualAddress::new(addr); - - if !crate::CurrentRmmArch::virt_is_valid(virtual_address) { - return Err(Error::new(EFAULT)); - } - // TODO: Use this all over the code, making sure that no user pointers that are higher half - // can get to the page table walking procedure. - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] - if virtual_address.data() & (1 << 63) == (1 << 63) { - return Err(Error::new(EFAULT)); - } - - addr_space.read().table.utable.translate(virtual_address).ok_or(Error::new(EFAULT))? - }; + let target_physaddr = + validate_and_translate_virt(&*addr_space.read(), VirtualAddress::new(addr)) + .ok_or(Error::new(EFAULT))?; match op { // TODO: FUTEX_WAIT_MULTIPLE? @@ -75,28 +62,31 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R }; { - let mut futexes = futexes_mut(); + let mut futexes = FUTEXES.write(); - let context_lock = { - let contexts = context::contexts(); - let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; - Arc::clone(context_lock) - }; + let context_lock = context::current()?; - // TODO: Is the implicit SeqCst ordering too strong here? let (fetched, expected) = if op == FUTEX_WAIT { // Must be aligned, otherwise it could cross a page boundary and mess up the // (simpler) validation we did in the first place. if addr % 4 != 0 { return Err(Error::new(EINVAL)); } - (u64::from(unsafe { intrinsics::atomic_load_seqcst::<u32>(addr as *const u32) }), u64::from(val as u32)) + ( + u64::from(unsafe { + intrinsics::atomic_load_seqcst::<u32>(addr as *const u32) + }), + u64::from(val as u32), + ) } else { // op == FUTEX_WAIT64 if addr % 8 != 0 { return Err(Error::new(EINVAL)); } - (unsafe { intrinsics::atomic_load_seqcst::<u64>(addr as *const u64) }, val as u64) + ( + unsafe { intrinsics::atomic_load_seqcst::<u64>(addr as *const u64) }, + val as u64, + ) }; if fetched != expected { return Err(Error::new(EAGAIN)); @@ -107,7 +97,9 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R if let Some(timeout) = timeout_opt { let start = time::monotonic(); - let end = start + (timeout.tv_sec as u128 * time::NANOS_PER_SEC) + (timeout.tv_nsec as u128); + let end = start + + (timeout.tv_sec as u128 * time::NANOS_PER_SEC) + + (timeout.tv_nsec as u128); context.wake = Some(end); } @@ -120,7 +112,9 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R }); } - unsafe { context::switch(); } + unsafe { + context::switch(); + } if timeout_opt.is_some() { let context_lock = { @@ -136,12 +130,12 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R } Ok(0) - }, + } FUTEX_WAKE => { let mut woken = 0; { - let mut futexes = futexes_mut(); + let mut futexes = FUTEXES.write(); let mut i = 0; @@ -160,29 +154,17 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R } Ok(woken) - }, + } FUTEX_REQUEUE => { - let (addr2_physaddr, _) = { - let addr2_virt = VirtualAddress::new(addr2); - - if !crate::CurrentRmmArch::virt_is_valid(addr2_virt) { - return Err(Error::new(EFAULT)); - } - - // TODO - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] - if addr2_virt.data() & (1 << 63) == (1 << 63) { - return Err(Error::new(EFAULT)); - } - - addr_space.read().table.utable.translate(addr2_virt).ok_or(Error::new(EFAULT))? - }; + let addr2_physaddr = + validate_and_translate_virt(&*addr_space.read(), VirtualAddress::new(addr2)) + .ok_or(Error::new(EFAULT))?; let mut woken = 0; let mut requeued = 0; { - let mut futexes = futexes_mut(); + let mut futexes = FUTEXES.write(); let mut i = 0; while i < futexes.len() && woken < val { @@ -204,7 +186,7 @@ pub fn futex(addr: usize, op: usize, val: usize, val2: usize, addr2: usize) -> R } Ok(woken) - }, - _ => Err(Error::new(EINVAL)) + } + _ => Err(Error::new(EINVAL)), } }