handle.rs 37.2 KB
Newer Older
4lDO2's avatar
4lDO2 committed
1
use core::cell::UnsafeCell;
2
use core::convert::TryFrom;
4lDO2's avatar
4lDO2 committed
3
use core::intrinsics::unlikely;
4
use core::mem::ManuallyDrop;
4lDO2's avatar
4lDO2 committed
5
use core::pin::Pin;
6
use core::sync::atomic::{self, AtomicBool, AtomicU64, AtomicUsize, Ordering};
4lDO2's avatar
4lDO2 committed
7
use core::{cmp, fmt, mem, ptr, task};
4lDO2's avatar
4lDO2 committed
8 9

use alloc::boxed::Box;
10
use alloc::collections::{BinaryHeap, BTreeMap};
11 12
use alloc::sync::{Arc, Weak};
use alloc::vec::Vec;
4lDO2's avatar
4lDO2 committed
13 14 15 16 17

use either::*;
use spin::{Mutex, Once};

use crate::event;
18
use crate::io_uring::{HandleSubmissionFuture, RingInfo};
19
use crate::io_uring::scheme::{Handle, IoUringScheme};
4lDO2's avatar
4lDO2 committed
20 21 22 23 24
use crate::memory::{self, PhysicalAddress};
use crate::paging::{ActivePageTable, VirtualAddress};
use crate::scheme::ContextOrKernel;

use crate::syscall::error::{Error, Result};
25
use crate::syscall::error::{EBADF, EBADFD, EBUSY, EINVAL};
4lDO2's avatar
4lDO2 committed
26
use crate::syscall::flag::MapFlags;
4lDO2's avatar
4lDO2 committed
27 28 29 30
use crate::syscall::io_uring::v1::{
    CachePadded, CqEntry32, CqEntry64, IoUringCreateFlags, Priority, SqEntry32, SqEntry64,
    CQ_ENTRIES_MMAP_OFFSET, CQ_HEADER_MMAP_OFFSET, SQ_ENTRIES_MMAP_OFFSET, SQ_HEADER_MMAP_OFFSET,
};
4lDO2's avatar
4lDO2 committed
31 32 33 34
use crate::syscall::io_uring::{IoUringCreateInfo, IoUringVersion};

#[derive(Debug)]
pub struct RingHandle {
4lDO2's avatar
4lDO2 committed
35
    pub(crate) rings: Once<Rings>,
36
    pub(crate) consumer_state: Once<RingHandleConsumerState>,
4lDO2's avatar
4lDO2 committed
37 38 39 40 41
    pub(crate) state: Mutex<RingHandleState>,
    pub(crate) refcount: AtomicUsize,

    pub(crate) owner_context: Once<ContextOrKernel>,
    pub(crate) attached_context: Once<ContextOrKernel>,
42 43

    pub(crate) pool: Once<Weak<Handle>>,
4lDO2's avatar
4lDO2 committed
44
}
4lDO2's avatar
4lDO2 committed
45
impl Rings {
4lDO2's avatar
4lDO2 committed
46 47 48 49 50 51 52 53 54
    fn inner_map_phys<T>(info: &RingInfo<T>) -> (PhysicalAddress, PhysicalAddress) {
        (info.ring_physaddr, info.entries_physaddr)
    }
    fn inner_map_virt<T>(info: &RingInfo<T>) -> (VirtualAddress, VirtualAddress) {
        (
            VirtualAddress::new(info.ring.as_ptr() as usize),
            VirtualAddress::new(info.entries.as_ptr() as usize),
        )
    }
4lDO2's avatar
4lDO2 committed
55
    pub fn submission_ring_physaddr(&self) -> PhysicalAddress {
4lDO2's avatar
4lDO2 committed
56 57 58 59
        let (ring_physaddr, _) = self
            .submission_ring
            .as_ref()
            .either(Self::inner_map_phys, Self::inner_map_phys);
4lDO2's avatar
4lDO2 committed
60
        ring_physaddr
4lDO2's avatar
4lDO2 committed
61
    }
4lDO2's avatar
4lDO2 committed
62
    pub fn submission_entries_physaddr(&self) -> PhysicalAddress {
4lDO2's avatar
4lDO2 committed
63 64 65 66
        let (_, entries_physaddr) = self
            .submission_ring
            .as_ref()
            .either(Self::inner_map_phys, Self::inner_map_phys);
4lDO2's avatar
4lDO2 committed
67
        entries_physaddr
4lDO2's avatar
4lDO2 committed
68
    }
4lDO2's avatar
4lDO2 committed
69
    pub fn completion_ring_physaddr(&self) -> PhysicalAddress {
4lDO2's avatar
4lDO2 committed
70 71 72 73 74 75
        let (ring_physaddr, _) = self
            .completion_ring
            .as_ref()
            .map_left(|(i, _)| i)
            .map_right(|(i, _)| i)
            .either(Self::inner_map_phys, Self::inner_map_phys);
4lDO2's avatar
4lDO2 committed
76
        ring_physaddr
4lDO2's avatar
4lDO2 committed
77
    }
4lDO2's avatar
4lDO2 committed
78
    pub fn completion_entries_physaddr(&self) -> PhysicalAddress {
4lDO2's avatar
4lDO2 committed
79 80 81 82 83 84
        let (_, entries_physaddr) = self
            .completion_ring
            .as_ref()
            .map_left(|(i, _)| i)
            .map_right(|(i, _)| i)
            .either(Self::inner_map_phys, Self::inner_map_phys);
4lDO2's avatar
4lDO2 committed
85
        entries_physaddr
4lDO2's avatar
4lDO2 committed
86
    }
4lDO2's avatar
4lDO2 committed
87
    pub fn submission_ring_virtaddr(&self) -> VirtualAddress {
4lDO2's avatar
4lDO2 committed
88 89 90 91
        let (ring_virtaddr, _) = self
            .submission_ring
            .as_ref()
            .either(Self::inner_map_virt, Self::inner_map_virt);
4lDO2's avatar
4lDO2 committed
92
        ring_virtaddr
4lDO2's avatar
4lDO2 committed
93
    }
4lDO2's avatar
4lDO2 committed
94
    pub fn submission_entries_virtaddr(&self) -> VirtualAddress {
4lDO2's avatar
4lDO2 committed
95 96 97 98
        let (_, entries_virtaddr) = self
            .submission_ring
            .as_ref()
            .either(Self::inner_map_virt, Self::inner_map_virt);
4lDO2's avatar
4lDO2 committed
99
        entries_virtaddr
4lDO2's avatar
4lDO2 committed
100
    }
4lDO2's avatar
4lDO2 committed
101
    pub fn completion_ring_virtaddr(&self) -> VirtualAddress {
4lDO2's avatar
4lDO2 committed
102 103 104 105 106 107
        let (ring_virtaddr, _) = self
            .completion_ring
            .as_ref()
            .map_left(|(i, _)| i)
            .map_right(|(i, _)| i)
            .either(Self::inner_map_virt, Self::inner_map_virt);
4lDO2's avatar
4lDO2 committed
108
        ring_virtaddr
4lDO2's avatar
4lDO2 committed
109
    }
4lDO2's avatar
4lDO2 committed
110
    pub fn completion_entries_virtaddr(&self) -> VirtualAddress {
4lDO2's avatar
4lDO2 committed
111 112 113 114 115 116
        let (_, entries_virtaddr) = self
            .completion_ring
            .as_ref()
            .map_left(|(i, _)| i)
            .map_right(|(i, _)| i)
            .either(Self::inner_map_virt, Self::inner_map_virt);
4lDO2's avatar
4lDO2 committed
117
        entries_virtaddr
4lDO2's avatar
4lDO2 committed
118
    }
4lDO2's avatar
4lDO2 committed
119 120
}
impl RingHandle {
121 122 123
    pub fn runtime_state(&self) -> Option<&RingHandleRuntimeState> {
        Some(&self.consumer_state.r#try()?.runtime_state)
    }
4lDO2's avatar
4lDO2 committed
124
    pub fn submission_entry_count(&self) -> Option<usize> {
125
        Some(self.runtime_state()?.sq_entry_count)
4lDO2's avatar
4lDO2 committed
126 127
    }
    pub fn completion_entry_count(&self) -> Option<usize> {
128
        Some(self.runtime_state()?.cq_entry_count)
4lDO2's avatar
4lDO2 committed
129 130 131 132
    }
    pub fn sq_entry_size(&self) -> Option<usize> {
        Some(
            if self
133
                .consumer_state.r#try()?
4lDO2's avatar
4lDO2 committed
134 135 136 137 138 139 140 141 142 143 144 145
                .flags
                .contains(IoUringCreateFlags::BITS_32)
            {
                mem::size_of::<SqEntry32>()
            } else {
                mem::size_of::<SqEntry64>()
            },
        )
    }
    pub fn cq_entry_size(&self) -> Option<usize> {
        Some(
            if self
146
                .consumer_state.r#try()?
4lDO2's avatar
4lDO2 committed
147 148 149 150 151 152 153 154 155 156
                .flags
                .contains(IoUringCreateFlags::BITS_32)
            {
                mem::size_of::<CqEntry32>()
            } else {
                mem::size_of::<CqEntry64>()
            },
        )
    }
    pub fn version(&self) -> Option<IoUringVersion> {
157
        Some(self.consumer_state.r#try()?.version)
4lDO2's avatar
4lDO2 committed
158 159
    }
    pub fn flags(&self) -> Option<IoUringCreateFlags> {
160
        Some(self.consumer_state.r#try()?.flags)
4lDO2's avatar
4lDO2 committed
161 162 163 164 165 166 167 168 169 170 171 172 173 174
    }
    pub fn state(&self) -> RingHandleState {
        *self.state.lock()
    }
    pub fn transition_into_initialized_state(&self, init: &IoUringCreateInfo) -> Result<()> {
        let flags = IoUringCreateFlags::from_bits(init.flags).ok_or(Error::new(EINVAL))?;
        let mut state_lock = self.state.lock();

        if let &RingHandleState::Initialized { .. } = &*state_lock {
            return Err(Error::new(EINVAL));
        }

        *state_lock = RingHandleState::Initialized;

175 176 177 178 179 180 181 182 183
        self.consumer_state.call_once(move || RingHandleConsumerState {
            runtime_state: RingHandleRuntimeState {
                sq_entry_count: init.sq_entry_count,
                cq_entry_count: init.cq_entry_count,
                last_sq_push_epoch: CachePadded(AtomicUsize::new(0)),
                last_sq_pop_epoch: CachePadded(AtomicUsize::new(0)),
                last_cq_push_epoch: CachePadded(AtomicUsize::new(0)),
                last_cq_pop_epoch: CachePadded(AtomicUsize::new(0)),
            },
4lDO2's avatar
4lDO2 committed
184 185 186 187 188
            version: init.version,
            flags,
            is_owner: true,
            event_queue: Once::new(),
            event_seq: AtomicU64::new(0),
189
            secondary_rings: Mutex::new(Vec::new()),
4lDO2's avatar
4lDO2 committed
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
        });
        self.refcount.fetch_add(1, Ordering::Relaxed);

        Ok(())
    }
    pub fn transition_into_attached_state(&self, attached_context: ContextOrKernel) {
        {
            let mut state_lock = self.state.lock();
            assert_eq!(*state_lock, RingHandleState::Initialized);
            *state_lock = RingHandleState::Attached;
        }
        self.refcount.fetch_add(1, Ordering::Relaxed);
        assert!(self.attached_context.r#try().is_none());
        self.attached_context.call_once(|| attached_context);
    }
    pub fn map_mem_kernel(
        &self,
        map_offset: usize,
        map_flags: MapFlags,
        map_size: usize,
    ) -> Result<(
        PhysicalAddress,
        VirtualAddress,
        usize,
        usize,
        bool,
        bool,
        usize,
    )> {
        if map_flags.contains(MapFlags::PROT_EXEC) || map_flags.contains(MapFlags::MAP_PRIVATE) {
            return Err(Error::new(EINVAL));
        }

        let state_lock;
224
        let consumer_state: &RingHandleConsumerState = {
4lDO2's avatar
4lDO2 committed
225 226 227
            state_lock = self.state.lock();
            match &*state_lock {
                &RingHandleState::Initialized => {
228
                    self.consumer_state.r#try().ok_or(Error::new(EBADFD))?
4lDO2's avatar
4lDO2 committed
229 230 231 232 233
                }
                _ => return Err(Error::new(EINVAL)),
            }
        };
        let (sq_entry_count, cq_entry_count, flags) = (
234 235 236
            consumer_state.runtime_state.sq_entry_count,
            consumer_state.runtime_state.cq_entry_count,
            consumer_state.flags,
4lDO2's avatar
4lDO2 committed
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
        );

        let mut active_table = unsafe { ActivePageTable::new() };

        fn get_ring_addr_pair<T>(info: &RingInfo<T>) -> (PhysicalAddress, *mut u8, bool, usize) {
            (
                info.ring_physaddr,
                info.ring.as_ptr() as *mut u8,
                true,
                mem::size_of::<T>(),
            )
        }
        fn get_entries_addr_pair<T>(info: &RingInfo<T>) -> (PhysicalAddress, *mut u8, bool, usize) {
            (
                info.entries_physaddr,
                info.entries.as_ptr() as *mut u8,
                false,
                mem::size_of::<T>(),
            )
        }
4lDO2's avatar
4lDO2 committed
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276

        fn get_or_init<'a>(rings_once: &'a Once<Rings>, active_table: &mut ActivePageTable, sq_entry_count: usize, cq_entry_count: usize, flags: IoUringCreateFlags) -> Result<&'a Rings> {
            Ok(match rings_once.r#try() {
                Some(r) => r,
                None => {
                    // TODO: Use a better spinlock crate (maybe my own, `spinning`), which supports
                    // fallible initialization. Currently, if some other thread were to initialize
                    // the rings simultaneously, one of them would be leaked, since the other
                    // thread wouldn't know that we were initializing it.

                    let rings = IoUringScheme::init_rings(
                        active_table,
                        sq_entry_count,
                        cq_entry_count,
                        flags,
                    )?;
                    rings_once
                        .call_once(|| rings)
                }
            })
4lDO2's avatar
4lDO2 committed
277 278 279 280 281 282 283
        }

        let is_sq;

        let (kernel_physaddr, kernel_virtaddr, is_ring_header, entry_size) = match map_offset {
            SQ_HEADER_MMAP_OFFSET => {
                is_sq = true;
4lDO2's avatar
4lDO2 committed
284 285
                get_or_init(&self.rings, &mut active_table, sq_entry_count, cq_entry_count, flags)?
                    .submission_ring
4lDO2's avatar
4lDO2 committed
286 287
                    .as_ref()
                    .either(get_ring_addr_pair, get_ring_addr_pair)
4lDO2's avatar
4lDO2 committed
288

4lDO2's avatar
4lDO2 committed
289 290 291 292
            }

            SQ_ENTRIES_MMAP_OFFSET => {
                is_sq = true;
4lDO2's avatar
4lDO2 committed
293 294
                get_or_init(&self.rings, &mut active_table, sq_entry_count, cq_entry_count, flags)?
                    .submission_ring
4lDO2's avatar
4lDO2 committed
295 296 297 298 299 300 301
                    .as_ref()
                    .either(get_entries_addr_pair, get_entries_addr_pair)
            }

            CQ_HEADER_MMAP_OFFSET => {
                is_sq = false;

4lDO2's avatar
4lDO2 committed
302 303
                get_or_init(&self.rings, &mut active_table, sq_entry_count, cq_entry_count, flags)?
                    .completion_ring
4lDO2's avatar
4lDO2 committed
304 305 306 307 308 309 310 311
                    .as_ref()
                    .map_left(|(i, _)| i)
                    .map_right(|(i, _)| i)
                    .either(get_ring_addr_pair, get_ring_addr_pair)
            }

            CQ_ENTRIES_MMAP_OFFSET => {
                is_sq = false;
4lDO2's avatar
4lDO2 committed
312 313
                get_or_init(&self.rings, &mut active_table, sq_entry_count, cq_entry_count, flags)?
                    .completion_ring
4lDO2's avatar
4lDO2 committed
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
                    .as_ref()
                    .map_left(|(i, _)| i)
                    .map_right(|(i, _)| i)
                    .either(get_entries_addr_pair, get_entries_addr_pair)
            }

            _ => return Err(Error::new(EINVAL)),
        };
        if unlikely(is_ring_header && map_size != memory::PAGE_SIZE) {
            return Err(Error::new(EINVAL));
        }
        // TODO: Validate entries as well.
        Ok((
            kernel_physaddr,
            VirtualAddress::new(kernel_virtaddr as usize),
            sq_entry_count,
            cq_entry_count,
            is_ring_header,
            is_sq,
            entry_size,
        ))
    }
}
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
impl RingHandleRuntimeState {
    /// Returns four booleans which indicate whether ring(s) has been updated.
    ///
    /// Order: ((SQ push, SQ pop), (CQ push, CQ pop)).
    pub fn check_for_update(&self, rings: &Rings) -> ((bool, bool), (bool, bool)) {
        fn get_ring_epochs<T>(info: &RingInfo<T>) -> (usize, usize) {
            let ring_ref = unsafe { info.ring.as_ref() };

            let push_epoch = ring_ref.push_epoch.load(Ordering::Relaxed);
            let pull_epoch = ring_ref.pop_epoch.load(Ordering::Relaxed);
            (push_epoch, pull_epoch)
        }
        let (current_sq_push_epoch, current_sq_pop_epoch) =
            match rings.submission_ring {
                Either::Left(ref ring) => get_ring_epochs(ring),
                Either::Right(ref ring) => get_ring_epochs(ring),
            };
        let (current_cq_push_epoch, current_cq_pop_epoch) =
            match rings.completion_ring {
                Either::Left((ref ring, _)) => get_ring_epochs(ring),
                Either::Right((ref ring, _)) => get_ring_epochs(ring),
            };

        // Even though it is slower, we use SeqCst here since there is no synchronization
        // between different atomic variables otherwise.
        //
        // Makes sure that every store to the last_[sc]q-epoch counters happens after the loads
        // from the rings.
        //
        // TODO: Are Acquire-Release fences sufficient here?
        atomic::fence(Ordering::SeqCst);

        let prev_sq_push_epoch = self
            .last_sq_push_epoch
            .swap(current_sq_push_epoch, Ordering::Relaxed);
        let prev_sq_pop_epoch = self
            .last_sq_pop_epoch
            .swap(current_sq_pop_epoch, Ordering::Relaxed);
        let prev_cq_push_epoch = self
            .last_cq_push_epoch
            .swap(current_cq_push_epoch, Ordering::Relaxed);
        let prev_cq_pop_epoch = self
            .last_cq_pop_epoch
            .swap(current_cq_pop_epoch, Ordering::Relaxed);

        // TODO: Is this fence also necessary?
        atomic::fence(Ordering::SeqCst);

        (
            (
                prev_sq_push_epoch != current_sq_push_epoch,
                prev_sq_pop_epoch != current_sq_pop_epoch,
            ),
            (
                prev_cq_push_epoch != current_cq_push_epoch,
                prev_cq_pop_epoch != current_cq_pop_epoch,
            ),
        )
    }
}
4lDO2's avatar
4lDO2 committed
397
#[derive(Debug)]
398
pub struct Rings {
4lDO2's avatar
4lDO2 committed
399
    pub(crate) submission_ring: Either<RingInfo<SqEntry32>, RingInfo<SqEntry64>>,
400 401 402 403 404
    // TODO: Since we don't longer type erase the submission entries (which are indeed needed by
    // the futures), the currently only valid combinations of submission and completio types are
    // 32+32 and 64+64. Maybe this restriction should be loosed (even though it's not possible yet
    // at the interface level).
    pub(crate) completion_ring: Either<(RingInfo<CqEntry32>, Runqueue<SqEntry32, CqEntry32>), (RingInfo<CqEntry64>, Runqueue<SqEntry64, CqEntry64>)>,
4lDO2's avatar
4lDO2 committed
405
}
4lDO2's avatar
4lDO2 committed
406

4lDO2's avatar
4lDO2 committed
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
#[cfg(target_pointer_width = "64")]
mod types {
    use core::sync::atomic::AtomicU32;

    pub type PendingTag = u32;
    pub type AtomicPendingTag = AtomicU32;

    pub type FastHandleIdx = u32;
}
#[cfg(target_pointer_width = "32")]
mod types {
    use core::sync::atomic::AtomicU16;

    pub type PendingTag = u16;
    pub type AtomicPendingTag = AtomicU16;

    pub type FastHandleIdx = u16;
}

pub use types::*;

#[derive(Debug)]
pub struct Runqueue<S: 'static, C: 'static> {
    pub(crate) first_vacant_slot: AtomicUsize,
    pub(crate) last_vacant_slot: AtomicUsize,
    pub(crate) tasks: Pin<Box<[UnsafeCell<TaskSlot<S, C>>]>>,
    pub(crate) task_locks: Box<[AtomicUsize]>,

    pub(crate) ready: Mutex<BinaryHeap<TaskRef>>,
    pub(crate) pending: Mutex<BTreeMap<PendingTag, TaskRef>>,
    pub(crate) next_pending_tag: AtomicPendingTag,
    pub(crate) tag_overflow: AtomicBool,
}
unsafe impl<S: Send + Sync + 'static, C: Send + Sync + 'static> Send for Runqueue<S, C> {}
unsafe impl<S: Send + Sync + 'static, C: Send + Sync + 'static> Sync for Runqueue<S, C> {}

pub struct TaskGuard<'runqueue, S: 'static, C: 'static> {
    index: usize,
    runqueue: &'runqueue Runqueue<S, C>,
}
impl<'runqueue, S, C> TaskGuard<'runqueue, S, C>
where
    S: 'static,
    C: 'static,
{
4lDO2's avatar
4lDO2 committed
452
    pub fn task_ref(&mut self) -> Option<TaskRef> {
4lDO2's avatar
4lDO2 committed
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
        Some(TaskRef {
            index: self.index,
            priority: self.as_mut().as_task_mut()?.priority(),
        })
    }
    pub fn as_mut<'guard>(&'guard mut self) -> Pin<&'guard mut TaskSlot<S, C>> {
        unsafe {
            let pinned_task_cell = self.runqueue.tasks.as_ref().map_unchecked(|tasks| &tasks[self.index]);
            let task_cell = pinned_task_cell.get_ref();
            let task: &mut TaskSlot<S, C> = &mut *task_cell.get();
            Pin::new_unchecked(task)
        }
    }
}
impl<'runqueue, S: 'static, C: 'static> Drop for TaskGuard<'runqueue, S, C> {
    fn drop(&mut self) {
        unsafe { self.runqueue.unlock_task_raw(self.index) }
    }
}

#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct TaskRef {
    pub index: usize,
    pub priority: Priority,
}

#[derive(Debug)]
pub enum TaskSlot<S: 'static, C: 'static> {
    Occupied(Task<S, C>),
    Vacant { next: usize },
}
impl<S: 'static, C: 'static> TaskSlot<S, C> {
    pub fn as_task_mut(self: Pin<&mut Self>) -> Option<Pin<&mut Task<S, C>>> {
        unsafe {
            match self.get_unchecked_mut() {
                &mut Self::Occupied(ref mut task) => Some(Pin::new_unchecked(task)),
                &mut Self::Vacant { .. } => None,
            }
        }
    }
4lDO2's avatar
4lDO2 committed
493 494 495 496 497 498 499 500
    pub fn as_vacant_next(self: Pin<&mut Self>) -> Option<&mut usize>
    where
        S: Unpin,
        C: Unpin,
    {
        match self.get_mut() {
            &mut Self::Vacant { ref mut next } => Some(next),
            &mut Self::Occupied(_) => None,
4lDO2's avatar
4lDO2 committed
501 502
        }
    }
4lDO2's avatar
4lDO2 committed
503 504 505 506 507 508 509 510
    pub fn get_ref_if_vacant(self: Pin<&mut Self>) -> Option<&mut Self>
    where
        S: Unpin,
        C: Unpin,
    {
        match self.get_mut() {
            // Don't permit occupied entries from being accessed mutably without Pin...
            &mut Self::Occupied(_) => None,
4lDO2's avatar
4lDO2 committed
511

4lDO2's avatar
4lDO2 committed
512 513
            // ... but do allow vacant entries from being accessed, to occupy them
            this @ &mut Self::Vacant { .. } => Some(this),
4lDO2's avatar
4lDO2 committed
514 515 516 517 518 519
        }
    }
    /// Replaces an occupied task with a new orphan vacant entry, safely dropping the future
    /// inside.
    pub fn free(self: Pin<&mut Self>) {
        unsafe {
4lDO2's avatar
4lDO2 committed
520 521
            let this = self.get_unchecked_mut();
            let this_ptr = this as *mut Self;
4lDO2's avatar
4lDO2 committed
522

4lDO2's avatar
4lDO2 committed
523 524 525 526 527 528 529 530 531 532
            {
                let pointer = Pin::new_unchecked(this)
                    .as_task_mut()
                    .expect("expected Task::free to free an occupied entry")
                    .get_unchecked_mut() as *mut Task<S, C>;

                // Since the task and the future inside that task is pinned, we need to drop manually
                // with drop_in_place.
                ptr::drop_in_place(pointer);
            }
4lDO2's avatar
4lDO2 committed
533 534 535

            // And, now that it's dropped, we cannot in any way use self again by reference, so we
            // need to ptr::write the new value.
4lDO2's avatar
4lDO2 committed
536
            ptr::write(this_ptr, Self::Vacant { next: usize::MAX });
4lDO2's avatar
4lDO2 committed
537 538 539 540 541 542 543
        }
    }
}

pub struct Task<S: 'static, C: 'static> {
    // TODO: Array of structures of structure of arrays? In other words, the priority separated
    // from the future?
544
    pub(crate) priority: Priority,
4lDO2's avatar
4lDO2 committed
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
    pub(crate) future: HandleSubmissionFuture<S, C>,
}
impl<S, C> Task<S, C>
where
    S: 'static,
    C: 'static,
{
    pub fn as_future_mut(self: Pin<&mut Self>) -> Pin<&mut HandleSubmissionFuture<S, C>> {
        unsafe { self.map_unchecked_mut(|this| &mut this.future) }
    }
    pub fn as_priority_mut(self: Pin<&mut Self>) -> &mut Priority {
        unsafe { self.map_unchecked_mut(|this| &mut this.priority).get_mut() }
    }
    pub fn priority(self: Pin<&mut Self>) -> Priority {
        *self.as_priority_mut()
    }
4lDO2's avatar
4lDO2 committed
561
}
4lDO2's avatar
4lDO2 committed
562 563

impl<S, C> fmt::Debug for Task<S, C>
564 565 566 567
where
    S: 'static,
    C: 'static,
{
4lDO2's avatar
4lDO2 committed
568
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4lDO2's avatar
4lDO2 committed
569 570 571 572 573 574
        struct OpaqueStr;
        impl fmt::Debug for OpaqueStr {
            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
                write!(f, "[opaque future of type `HandleSubmissionFuture`]")
            }
        }
4lDO2's avatar
4lDO2 committed
575 576 577

        f.debug_struct("QueueItem")
            .field("priority", &self.priority)
4lDO2's avatar
4lDO2 committed
578
            .field("future", &OpaqueStr)
4lDO2's avatar
4lDO2 committed
579 580 581 582
            .finish()
    }
}

4lDO2's avatar
4lDO2 committed
583
impl<S, C> PartialEq for Task<S, C> {
4lDO2's avatar
4lDO2 committed
584 585 586 587
    fn eq(&self, other: &Self) -> bool {
        self.priority == other.priority
    }
}
4lDO2's avatar
4lDO2 committed
588
impl<S, C> Eq for Task<S, C> {}
4lDO2's avatar
4lDO2 committed
589

4lDO2's avatar
4lDO2 committed
590
impl<S, C> Ord for Task<S, C> {
4lDO2's avatar
4lDO2 committed
591 592 593 594
    fn cmp(&self, other: &Self) -> cmp::Ordering {
        Ord::cmp(&self.priority, &other.priority)
    }
}
4lDO2's avatar
4lDO2 committed
595
impl<S, C> PartialOrd for Task<S, C> {
4lDO2's avatar
4lDO2 committed
596 597 598 599 600
    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
        Some(Ord::cmp(self, other))
    }
}

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735

// The "slow" waking procedure, that works for all handle sizes, but requires storing the waker in
// an Arc.
pub(crate) mod slow_vtable {
    use super::*;

    pub(crate) struct Wrapper<F>(F);

    impl<F> Wrapper<F>
    where
        F: Fn(),
    {
        unsafe fn vtable_clone(pointer: *const ()) -> task::RawWaker {
            let arc_orig = ManuallyDrop::new(Arc::from_raw(pointer as *const F));
            let arc_clone = Arc::clone(&arc_orig);
            mem::forget(arc_orig);
            task::RawWaker::new(Arc::into_raw(arc_clone) as *const (), &Self::VTABLE)
        }
        unsafe fn vtable_wake(pointer: *const ()) {
            let arc = Arc::from_raw(pointer as *const F);
            (arc)();
        }
        unsafe fn vtable_wake_by_ref(pointer: *const ()) {
            let arc_orig = ManuallyDrop::new(Arc::from_raw(pointer as *const F));
            (arc_orig)();
            mem::forget(arc_orig);
        }
        unsafe fn vtable_drop(pointer: *const ()) {
            let arc = ManuallyDrop::new(Arc::from_raw(pointer as *const F));
            drop(ManuallyDrop::into_inner(arc));
        }
        pub(crate) const VTABLE: task::RawWakerVTable = task::RawWakerVTable::new(Self::vtable_clone, Self::vtable_wake, Self::vtable_wake_by_ref, Self::vtable_drop);

        pub(crate) fn new(f: F) -> task::Waker {
            unsafe { task::Waker::from_raw(task::RawWaker::new(Arc::into_raw(Arc::new(f)) as *const (), &Self::VTABLE)) }
        }
    }

}
// The "fast" waking producedure, that takes the io_uring handle index as the higher bits, and the
// number of the task (TODO: Maybe introduce task groups if waking up multiple futures at once,
// where some tasks in the group some may not be able to make progress).
//
// On a 64-bit system, which the Redox kernel basically only supports, this will result in 32 bits
// of the handle number (so 4 billion io_urings in total), and 32 bits for the tag.
//
// At the moment there is no limit on how many io_urings can be present; therefore, if it were to
// exceed 2^32, we'll simply use the slow waker instead.
//
// As tags may easily overflow if a process reaches 2^32 pending futures, this may allow spurious
// wakeup if it were to overflow before a waker gets dropped. Since this doesn't really violate any
// logical contracts for futures, this is okay.
//
// This also limits the number of possible tags to 2^32, but it wouldn't really make sense to have
// more pending tasks than that.
//
// On a 32-bit system however, these numbers would instead be 16-bit each, or 65536 pending tasks
// and io_uring handles, respectively.
pub(crate) mod fast_vtable {
    use super::*;

    #[cfg(target_pointer_width = "64")]
    pub(crate) fn disassemble_pointer(pointer: usize) -> (u32, u32) {
        let pointer = pointer as u64;
        let lo = (pointer & 0xFFFF_FFFF) as u32;
        let hi = ((pointer >> 32) & 0xFFFF_FFFF) as u32;

        (lo, hi)
    }
    #[cfg(target_pointer_width = "32")]
    pub(crate) fn disassemble_pointer(pointer: usize) -> (u16, u16) {
        let pointer = pointer as u32;
        let lo = (pointer & 0xFFFF) as u16;
        let hi = ((pointer >> 16) & 0xFFFF) as u16;

        (lo, hi)
    }

    #[cfg(target_pointer_width = "64")]
    pub(crate) fn assemble_pointer((lo, hi): (u32, u32)) -> usize {
        let pointer = (u64::from(hi) << 32) | u64::from(lo);
        pointer as usize
    }
    #[cfg(target_pointer_width = "32")]
    pub(crate) fn assemble_pointer((lo, hi): (u16, u16)) -> usize {
        let pointer = (u32::from(hi) << 32) | u32::from(lo);
        pointer as usize
    }

    #[cfg_attr(not(any(target_pointer_width = "32", target_pointer_width = "64")), error("invalid pointer width"))]

    unsafe fn vtable_clone(pointer: *const ()) -> task::RawWaker {
        task::RawWaker::new(pointer, &VTABLE)
    }
    unsafe fn vtable_wake(pointer: *const ()) {
        vtable_wake_by_ref(pointer)
    }
    unsafe fn vtable_wake_by_ref(pointer: *const ()) {
        let (handle_idx, tag) = disassemble_pointer(pointer as usize);

        let handle_idx = usize::try_from(handle_idx)
            .expect("expected handle_idx type to at least be smaller than usize");

        let handle = {
            let handles = crate::io_uring::scheme::handles();

            // Note that cloning this may impose higher overhead, but since this frees the handles
            // early, it reduces lock contention, which helps even more.
            match handles.get(&handle_idx) {
                Some(h) => Arc::clone(h),
                None => return,
            }
        };

        let rh = match *handle {
            Handle::Ring(ref rh) => rh,
            _ => return,
        };
        let rings = match rh.rings.r#try() {
            Some(r) => r,
            None => return,
        };
        match rings.completion_ring {
            Left((_, ref runqueue32)) => waker::handle_either(runqueue32, tag),
            Right((_, ref runqueue64)) => waker::handle_either(runqueue64, tag),
        }
    }
    unsafe fn vtable_drop(_pointer: *const ()) {}

    pub(crate) const VTABLE: task::RawWakerVTable = task::RawWakerVTable::new(vtable_clone, vtable_wake, vtable_wake_by_ref, vtable_drop);
}

pub mod waker {
    use super::*;

736
    pub(crate) fn handle_either<S, C>(runqueue: &Runqueue<S, C>, tag: PendingTag) {
4lDO2's avatar
4lDO2 committed
737 738
        if let Some(task_ref) = runqueue.pending.lock().remove(&tag) {
            runqueue.ready.lock().push(task_ref);
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
        }
    }
    /// Create a waker that uses an Arc under the hood.
    pub fn slow_waker(handle: Arc<Handle>, tag: PendingTag) -> task::Waker {
        slow_vtable::Wrapper::new(move || {
            let ring_handle = match *handle {
                Handle::Ring(ref rh) => rh,
                _ => unreachable!(),
            };

            let rings = ring_handle
                .rings
                .r#try()
                .expect("expected a ring handle that managed to create a waker, to at least have initialized its rings");
            match rings.completion_ring {
                Left((_, ref runqueue)) => handle_either(runqueue, tag),
                Right((_, ref runqueue)) => handle_either(runqueue, tag),
            }
            let owner = &ring_handle
                .owner_context
                .r#try()
                .expect("expected all rings which submissions are being handled by the kernel, to actually be initialized");

            match owner {
                ContextOrKernel::Kernel => (), // TODO: Actually wake up kernel contexts
                ContextOrKernel::Context(ref context) => {
                    let arc = match context.upgrade() {
                        Some(c) => c,
                        None => return,
                    };

                    let guard = arc.upgradeable_read();

                    if guard.ioring_completions_left.load(Ordering::Acquire) > 0 {
                        guard.upgrade().unblock();
                    }
                }
            }
        })
    }

    pub fn fast_waker(handle_index: usize, tag: PendingTag) -> Option<task::Waker> {
        let handle = FastHandleIdx::try_from(handle_index).ok()?;
        let pointer = fast_vtable::assemble_pointer((handle, tag)) as *const ();
        Some(unsafe { task::Waker::from_raw(task::RawWaker::new(pointer, &fast_vtable::VTABLE)) })
    }
    pub fn default_waker(handle_index: usize, tag: PendingTag) -> Result<task::Waker> {
        fast_waker(handle_index, tag)
            .or_else(move || {
                let handles = crate::io_uring::scheme::handles();
                let handle = Arc::clone(handles.get(&handle_index)?);
                Some(slow_waker(handle, tag))
            })
            .ok_or(Error::new(EBADF))
    }
}
4lDO2's avatar
4lDO2 committed
795 796 797 798

pub const TASK_QUOT_MUL: usize = 1;
pub const TASK_QUOT_DIV: usize = 4;

799
impl<S, C> Runqueue<S, C> {
4lDO2's avatar
4lDO2 committed
800 801 802 803 804
    pub fn new(pending_task_count: usize) -> Self
    where
        S: Unpin,
        C: Unpin,
    {
4lDO2's avatar
4lDO2 committed
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
        assert_ne!(pending_task_count, usize::MAX);
        let usize_size = mem::size_of::<usize>();
        let lock_word_count = (pending_task_count + usize_size - 1) / usize_size * usize_size;

        Self {
            first_vacant_slot: AtomicUsize::new(0),
            last_vacant_slot: AtomicUsize::new(pending_task_count - 1),
            next_pending_tag: Default::default(),
            tag_overflow: AtomicBool::new(false),

            pending: Mutex::new(BTreeMap::new()),
            ready: Mutex::new(BinaryHeap::new()),

            // UNOPTIMIZED: Maybe an unsafe cast from Box<[usize]> to Box<[AtomicUsize]>?
            task_locks: (0..lock_word_count).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>().into_boxed_slice(),
            tasks: Pin::new((1..=pending_task_count).map(|next_index| {
                let next = if next_index == pending_task_count {
                    usize::MAX
                } else {
                    next_index
                };
                TaskSlot::Vacant { next }
            }).map(UnsafeCell::new).collect::<Vec<_>>().into_boxed_slice()),
        }
    }
830 831 832 833 834 835 836 837 838 839
    pub fn next_tag(&self) -> Result<PendingTag> {
        let has_overflown = self.tag_overflow.load(Ordering::Acquire);

        if !has_overflown {
            let next_tag = self.next_pending_tag.fetch_add(1, Ordering::Acquire);
            if next_tag == PendingTag::MAX {
                self.tag_overflow.store(true, Ordering::Release);
            }
            Ok(next_tag)
        } else {
4lDO2's avatar
4lDO2 committed
840
            let guard = self.pending.lock();
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873

            let mut last_tag_opt = None;

            // This is the most likely scenario, that the tags have overflown, but that the initial
            // tag has been dropped for a long time.
            if !guard.contains_key(&0) {
                return Ok(0);
            }

            // Otherwise, we try to find a tag that isn't already occpuied, trying at least 64
            // times before failing eith EBUSY.

            const MAX_MISSES: usize = 64;

            for tag in guard.keys().copied().take(MAX_MISSES) {
                let last_tag = match last_tag_opt {
                    Some(t) => t,
                    None => {
                        last_tag_opt = Some(tag);
                        continue;
                    }
                };
                if tag == last_tag + 1 {
                    // The next tag is adjacent, there is no hole that we can use.
                    last_tag_opt = Some(tag);
                    continue;
                } else {
                    return Ok(last_tag + 1)
                }
            }
            return Err(Error::new(EBUSY));
        }
    }
4lDO2's avatar
4lDO2 committed
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
    #[inline]
    pub fn try_lock_task_raw(&self, at: usize) -> bool {
        assert_ne!(at, usize::MAX);

        let byte_index = at / mem::size_of::<usize>();
        let bit_pos = at % mem::size_of::<usize>();
        let bit = 1 << bit_pos;

        let prev = self.task_locks[byte_index].fetch_or(bit, Ordering::Acquire);
        let already_locked = prev & bit == bit;
        !already_locked
    }
    #[inline]
    pub fn lock_task_raw(&self, at: usize) {
        assert_ne!(at, usize::MAX);

        while !self.try_lock_task_raw(at) {
            core::sync::atomic::spin_loop_hint();
        }
    }
    #[inline]
    pub unsafe fn unlock_task_raw(&self, at: usize) {
        assert_ne!(at, usize::MAX);

        let byte_index = at / mem::size_of::<usize>();
        let bit_pos = at % mem::size_of::<usize>();
        let bit = 1 << bit_pos;

        let prev = self.task_locks[byte_index].fetch_and(!bit, Ordering::Release);
        assert_eq!(prev & bit, bit, "attempting to release a task lock at {} that wasn't locked", at);
    }
    #[inline]
    pub fn try_lock_task<'runqueue>(&'runqueue self, at: usize) -> Option<TaskGuard<'runqueue, S, C>> {
        assert_ne!(at, usize::MAX);
        assert!(at < self.tasks.as_ref().get_ref().len());

        if self.try_lock_task_raw(at) {
            Some(TaskGuard {
                index: at,
                runqueue: self,
            })
        } else {
            None
        }
    }
    #[inline]
    pub fn lock_task<'runqueue>(&'runqueue self, at: usize) -> TaskGuard<'runqueue, S, C> {
        assert_ne!(at, usize::MAX);
        assert!(at < self.tasks.as_ref().get_ref().len());

        self.lock_task_raw(at);

        TaskGuard {
            index: at,
            runqueue: self,
        }
    }
    #[inline]
    pub fn lock_task_ref<'runqueue>(&'runqueue self, task_ref: TaskRef) -> TaskGuard<'runqueue, S, C> {
        self.lock_task(task_ref.index)
    }
    fn index_from_raw(index: usize) -> Option<usize> {
        if index == usize::MAX {
            Some(index)
        } else{ 
            None
        }
    }
    fn index_to_raw(index: Option<usize>) -> usize {
        match index {
            Some(i) => {
                assert_ne!(i, usize::MAX);
                i
            }
            None => usize::MAX,
        }
    }
4lDO2's avatar
4lDO2 committed
951 952 953 954 955 956
    fn allocate_new_task_slot_inner(&self, current: usize) -> bool
    where
        S: Unpin,
        C: Unpin,
    {
        let mut current_slot = self.lock_task(current);
4lDO2's avatar
4lDO2 committed
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
        let current_slot_next_raw = current_slot
            .as_mut()
            .as_vacant_next()
            .expect("expected the embedded free list to only point to vacant entries");
        let current_slot_next = match Self::index_from_raw(*current_slot_next_raw) {
            Some(c) => c,
            None => return false,
        };

        debug_assert!(
            self.lock_task(current_slot_next)
                .as_mut()
                .as_vacant_next()
                .is_some()
        );

        match self.first_vacant_slot.compare_exchange_weak(current, current_slot_next, Ordering::Acquire, Ordering::Relaxed) {
            Ok(_) => {
                *current_slot_next_raw = Self::index_to_raw(None);
                return true;
            }
            Err(newer) => {
                drop(current_slot);
                return self.allocate_new_task_slot_inner(newer);
            }
        }
    }
4lDO2's avatar
4lDO2 committed
984 985 986 987 988
    pub fn allocate_new_task_slot<'runqueue>(&'runqueue self) -> Option<TaskGuard<'runqueue, S, C>>
    where
        S: Unpin,
        C: Unpin,
    {
4lDO2's avatar
4lDO2 committed
989 990 991 992 993 994 995
        let initial = Self::index_from_raw(self.first_vacant_slot.load(Ordering::Acquire))?;
        let index = match self.allocate_new_task_slot_inner(initial) {
            true => initial,
            false => return None,
        };
        Some(self.lock_task(index))
    }
4lDO2's avatar
4lDO2 committed
996 997 998 999 1000
    pub fn free_task_slot<'runqueue>(&'runqueue self, at: usize)
    where
        S: Unpin,
        C: Unpin,
    {
4lDO2's avatar
4lDO2 committed
1001 1002 1003 1004 1005 1006 1007 1008
        debug_assert!(
            self.lock_task(at)
                .as_mut()
                .as_vacant_next()
                .is_some()
        );
        let last_index = self.last_vacant_slot.load(Ordering::Acquire);

4lDO2's avatar
4lDO2 committed
1009
        let mut last_task_slot = self.lock_task(last_index);
4lDO2's avatar
4lDO2 committed
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

        *last_task_slot
            .as_mut()
            .as_vacant_next()
            .expect("expected last vacant entry not to be occupied") = at;

        match self.last_vacant_slot.compare_exchange_weak(last_index, at, Ordering::Acquire, Ordering::Relaxed) {
            Ok(_) => return,
            Err(newer) => {
                assert_ne!(newer, at);
                self.free_task_slot(at);
            }
        }
    }
1024
}
4lDO2's avatar
4lDO2 committed
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034
#[derive(Debug)]
pub(crate) struct SecondaryRingRef {
    pub(crate) ring_handle: Weak<Handle>,
    pub(crate) read: bool,
    pub(crate) write: bool,
    pub(crate) fd_for_consumer: usize,
    pub(crate) user_data: u64,
}

4lDO2's avatar
4lDO2 committed
1035 1036 1037 1038
#[derive(Debug)]
pub struct RingHandleRuntimeState {
    pub(crate) sq_entry_count: usize,
    pub(crate) cq_entry_count: usize,
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

    pub(crate) last_sq_push_epoch: CachePadded<AtomicUsize>,
    pub(crate) last_sq_pop_epoch: CachePadded<AtomicUsize>,
    pub(crate) last_cq_push_epoch: CachePadded<AtomicUsize>,
    pub(crate) last_cq_pop_epoch: CachePadded<AtomicUsize>,
}
#[derive(Debug)]
pub struct RingHandleConsumerState {
    pub(crate) version: IoUringVersion,
    pub(crate) flags: IoUringCreateFlags,
4lDO2's avatar
4lDO2 committed
1049 1050 1051 1052 1053
    pub(crate) is_owner: bool,

    pub(crate) event_queue: Once<event::EventQueueId>,
    pub(crate) event_seq: AtomicU64,

1054 1055
    pub(crate) secondary_rings: Mutex<Vec<SecondaryRingRef>>,

1056
    pub(crate) runtime_state: RingHandleRuntimeState,
4lDO2's avatar
4lDO2 committed
1057 1058
}

1059
impl RingHandleConsumerState {
4lDO2's avatar
4lDO2 committed
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
    pub fn event_queue_or_init(&self) -> event::EventQueueId {
        fn init_event_queue() -> event::EventQueueId {
            let qid = event::next_queue_id();
            event::queues_mut().insert(qid, Arc::new(event::EventQueue::new(qid)));
            qid
        }
        *self.event_queue.call_once(init_event_queue)
    }
}

#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum RingHandleState {
    /// The io_uring has been opened as a file, and is in its initial state.
    Start,

    /// The io_uring has been initialized and is ready to be attached.
    Initialized,

    /// The io_uring has been attached to a scheme, and is now ready to be mmapped and used.
    Attached,
}