Skip to content
Snippets Groups Projects
Verified Commit d704a35b authored by jD91mZM2's avatar jD91mZM2
Browse files

Untested: Remove duplicate Mutex efforts in pte.rs

See #151
parent 2f4e57f8
No related branches found
No related tags found
No related merge requests found
use core::cell::UnsafeCell;
use core::intrinsics;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::AtomicI32 as AtomicInt;
use core::sync::atomic::Ordering::SeqCst;
use core::sync::atomic;
use platform::types::*;
use platform::{Pal, Sys};
......@@ -9,30 +10,43 @@ pub const FUTEX_WAIT: c_int = 0;
pub const FUTEX_WAKE: c_int = 1;
pub struct Mutex<T> {
lock: UnsafeCell<c_int>,
lock: UnsafeCell<AtomicInt>,
content: UnsafeCell<T>,
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
impl<T> Mutex<T> {
/// Create a new mutex
pub fn new(content: T) -> Self {
pub const fn new(content: T) -> Self {
Self {
lock: UnsafeCell::new(0),
lock: UnsafeCell::new(AtomicInt::new(0)),
content: UnsafeCell::new(content),
}
}
/// Create a new mutex that is already locked. This is a more
/// efficient way to do the following:
/// ```rust
/// let mut mutex = Mutex::new(());
/// mutex.manual_lock();
/// ```
pub unsafe fn locked(content: T) -> Self {
Self {
lock: UnsafeCell::new(AtomicInt::new(1)),
content: UnsafeCell::new(content),
}
}
unsafe fn atomic(&self) -> &mut AtomicInt {
&mut *self.lock.get()
}
/// Tries to lock the mutex, fails if it's already locked. Manual means
/// it's up to you to unlock it after mutex. Returns the last atomic value
/// on failure. You should probably not worry about this, it's used for
/// internal optimizations.
pub unsafe fn manual_try_lock(&self) -> Result<&mut T, c_int> {
let value = intrinsics::atomic_cxchg(self.lock.get(), 0, 1).0;
if value == 0 {
return Ok(&mut *self.content.get());
}
Err(value)
self.atomic().compare_exchange(0, 1, SeqCst, SeqCst)
.map(|_| &mut *self.content.get())
}
/// Lock the mutex, returning the inner content. After doing this, it's
/// your responsibility to unlock it after usage. Mostly useful for FFI:
......@@ -56,8 +70,8 @@ impl<T> Mutex<T> {
//
// - Skip the atomic operation if the last value was 2, since it most likely hasn't changed.
// - Skip the futex wait if the atomic operation says the mutex is unlocked.
if last == 2 || intrinsics::atomic_cxchg(self.lock.get(), 1, 2).0 != 0 {
Sys::futex(self.lock.get(), FUTEX_WAIT, 2);
if last == 2 || self.atomic().compare_exchange(1, 2, SeqCst, SeqCst).unwrap_or_else(|err| err) == 2 {
Sys::futex(self.atomic().get_mut(), FUTEX_WAIT, 2);
}
last = match self.manual_try_lock() {
......@@ -68,9 +82,9 @@ impl<T> Mutex<T> {
}
/// Unlock the mutex, if it's locked.
pub unsafe fn manual_unlock(&self) {
if intrinsics::atomic_xchg(self.lock.get(), 0) == 2 {
if self.atomic().swap(0, SeqCst) == 2 {
// At least one futex is up, so let's notify it
Sys::futex(self.lock.get(), FUTEX_WAKE, 1);
Sys::futex(self.atomic().get_mut(), FUTEX_WAKE, 1);
}
}
......
......@@ -8,17 +8,17 @@ use core::{intrinsics, ptr};
use header::sys_mman;
use header::time::timespec;
use ld_so::tcb::{Master, Tcb};
use mutex::{FUTEX_WAIT, FUTEX_WAKE};
use mutex::Mutex;
use platform::types::{c_int, c_uint, c_void, pid_t, size_t};
use platform::{Pal, Sys};
pub struct Semaphore {
lock: i32,
lock: Mutex<()>,
count: i32,
}
type pte_osThreadHandle = pid_t;
type pte_osMutexHandle = *mut i32;
type pte_osMutexHandle = *mut Mutex<()>;
type pte_osSemaphoreHandle = *mut Semaphore;
type pte_osThreadEntryPoint = unsafe extern "C" fn(params: *mut c_void) -> c_int;
......@@ -37,10 +37,10 @@ pub enum pte_osResult {
use self::pte_osResult::*;
static mut pid_mutexes: Option<BTreeMap<pte_osThreadHandle, pte_osMutexHandle>> = None;
static mut pid_mutexes_lock: i32 = 0;
static mut pid_mutexes_lock: Mutex<()> = Mutex::new(());
static mut pid_stacks: Option<BTreeMap<pte_osThreadHandle, (*mut c_void, size_t)>> = None;
static mut pid_stacks_lock: i32 = 0;
static mut pid_stacks_lock: Mutex<()> = Mutex::new(());
#[thread_local]
static mut LOCALS: *mut BTreeMap<c_uint, *mut c_void> = ptr::null_mut();
......@@ -94,7 +94,7 @@ pub unsafe extern "C" fn pte_osThreadCreate(
ppte_osThreadHandle: *mut pte_osThreadHandle,
) -> pte_osResult {
// Create a locked mutex, unlocked by pte_osThreadStart
let mutex: pte_osMutexHandle = Box::into_raw(Box::new(2));
let mutex: pte_osMutexHandle = Box::into_raw(Box::new(Mutex::locked(())));
let stack_size = if stackSize == 0 {
1024 * 1024
......@@ -278,7 +278,7 @@ pub unsafe extern "C" fn pte_osThreadGetDefaultPriority() -> c_int {
#[no_mangle]
pub unsafe extern "C" fn pte_osMutexCreate(pHandle: *mut pte_osMutexHandle) -> pte_osResult {
*pHandle = Box::into_raw(Box::new(0));
*pHandle = Box::into_raw(Box::new(Mutex::new(())));
PTE_OS_OK
}
......@@ -290,38 +290,13 @@ pub unsafe extern "C" fn pte_osMutexDelete(handle: pte_osMutexHandle) -> pte_osR
#[no_mangle]
pub unsafe extern "C" fn pte_osMutexLock(handle: pte_osMutexHandle) -> pte_osResult {
let mut c = 0;
for _i in 0..100 {
c = intrinsics::atomic_cxchg(handle, 0, 1).0;
if c == 0 {
break;
}
}
if c == 1 {
c = intrinsics::atomic_xchg(handle, 2);
}
while c != 0 {
Sys::futex(handle, FUTEX_WAIT, 2);
c = intrinsics::atomic_xchg(handle, 2);
}
(*handle).manual_lock();
PTE_OS_OK
}
#[no_mangle]
pub unsafe extern "C" fn pte_osMutexUnlock(handle: pte_osMutexHandle) -> pte_osResult {
if *handle == 2 {
*handle = 0;
} else if intrinsics::atomic_xchg(handle, 0) == 1 {
return PTE_OS_OK;
}
for _i in 0..100 {
if *handle != 0 && intrinsics::atomic_cxchg(handle, 1, 2).0 != 0 {
return PTE_OS_OK;
}
}
Sys::futex(handle, FUTEX_WAKE, 1);
(*handle).manual_unlock();
PTE_OS_OK
}
......@@ -331,7 +306,7 @@ pub unsafe extern "C" fn pte_osSemaphoreCreate(
pHandle: *mut pte_osSemaphoreHandle,
) -> pte_osResult {
*pHandle = Box::into_raw(Box::new(Semaphore {
lock: 0,
lock: Mutex::new(()),
count: initialValue,
}));
PTE_OS_OK
......@@ -349,9 +324,8 @@ pub unsafe extern "C" fn pte_osSemaphorePost(
count: c_int,
) -> pte_osResult {
let semaphore = &mut *handle;
pte_osMutexLock(&mut semaphore.lock);
let _guard = semaphore.lock.lock();
intrinsics::atomic_xadd(&mut semaphore.count, 1);
pte_osMutexUnlock(&mut semaphore.lock);
PTE_OS_OK
}
......@@ -364,12 +338,11 @@ pub unsafe extern "C" fn pte_osSemaphorePend(
let semaphore = &mut *handle;
let mut acquired = false;
while !acquired {
pte_osMutexLock(&mut semaphore.lock);
let _guard = semaphore.lock.lock();
if intrinsics::atomic_load(&semaphore.count) > 0 {
intrinsics::atomic_xsub(&mut semaphore.count, 1);
acquired = true;
}
pte_osMutexUnlock(&mut semaphore.lock);
Sys::sched_yield();
}
PTE_OS_OK
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment