Verified Commit 5fd71ea9 authored by 4lDO2's avatar 4lDO2 🖖

Replace Option with Result<(), Eintr>, fix warns.

parent c57f693c
......@@ -7,10 +7,7 @@ use spin::Mutex;
use crate::acpi::madt::{self, Madt, MadtEntry, MadtIoApic, MadtIntSrcOverride};
use crate::arch::interrupt::irq;
use crate::memory::Frame;
use crate::paging::{ActivePageTable, entry::EntryFlags, Page, PhysicalAddress, VirtualAddress};
use super::pic;
use crate::paging::ActivePageTable;
pub struct IoApicRegs {
pointer: *const u32,
......
mod private {
pub trait Integer: Sized + Copy {
fn one() -> Self;
fn checked_add_inner(self, rhs: Self) -> Option<Self>;
fn checked_sub_inner(self, rhs: Self) -> Option<Self>;
fn checked_mul_inner(self, rhs: Self) -> Option<Self>;
fn checked_div_inner(self, rhs: Self) -> Option<Self>;
}
macro_rules! implement(
($ty:ident) => {
impl Integer for $ty {
fn one() -> Self {
1
}
fn checked_add_inner(self, rhs: Self) -> Option<Self> {
Self::checked_add(self, rhs)
}
fn checked_sub_inner(self, rhs: Self) -> Option<Self> {
Self::checked_sub(self, rhs)
}
fn checked_mul_inner(self, rhs: Self) -> Option<Self> {
Self::checked_mul(self, rhs)
}
fn checked_div_inner(self, rhs: Self) -> Option<Self> {
Self::checked_div(self, rhs)
}
}
}
);
implement!(u8);
implement!(u16);
implement!(u32);
implement!(u64);
implement!(u128);
implement!(usize);
implement!(i8);
implement!(i16);
implement!(i32);
implement!(i64);
implement!(i128);
implement!(isize);
}
#[inline]
pub fn checked_div_round_up<T>(dividend: T, divisor: T) -> Option<T>
where
T: private::Integer,
{
dividend.checked_add_inner(divisor.checked_sub_inner(T::one())?)?.checked_div_inner(divisor)
}
#[inline]
pub fn checked_round_up<T>(number: T, to: T) -> Option<T>
where
T: private::Integer,
{
checked_div_round_up(number, to)?.checked_mul_inner(to)
}
pub mod div_round_up;
#[macro_use]
pub mod int_like;
pub mod unique;
pub use div_round_up::{checked_div_round_up, checked_round_up};
/// Debug macro, lifted from the std
#[macro_export]
macro_rules! dbg {
......@@ -26,24 +29,15 @@ macro_rules! dbg {
};
}
#[allow(unused_unsafe)]
pub(crate) fn unlikely_inner(cond: bool) -> bool {
unsafe { ::core::intrinsics::unlikely(cond) }
}
#[allow(unused_unsafe)]
pub(crate) fn likely_inner(cond: bool) -> bool {
unsafe { ::core::intrinsics::likely(cond) }
}
#[macro_export]
macro_rules! unlikely(
($cond:expr) => {
crate::common::unlikely_inner($cond)
::core::intrinsics::unlikely($cond)
}
);
#[macro_export]
macro_rules! likely(
($cond:expr) => {
crate::common::likely_inner($cond)
::core::intrinsics::likely($cond)
}
);
......@@ -23,7 +23,7 @@ use crate::syscall::error::{Error, Result};
use crate::syscall::error::{EBADFD, EMFILE, ENODEV, ENOMEM, ESRCH};
use crate::syscall::flag::{SIG_DFL, SigActionFlags};
use crate::syscall::io_uring::{IoUringCreateFlags, IoUringCreateInfo, IoUringVersion, IoUringRecvInfo, IoUringRecvFlags};
use crate::syscall::scheme::{async_scheme::AsyncScheme, Scheme};
use crate::syscall::scheme::async_scheme::AsyncScheme;
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
......
......@@ -14,7 +14,7 @@ use syscall::{
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::{self, Frame};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
......
......@@ -83,7 +83,8 @@ pub extern "C" fn signal_handler(sig: usize) {
Arc::clone(&parent.waitpid)
};
waitpid.send(WaitpidKey {
// TODO: Handle signal interruption error.
let _eintr = waitpid.send_sync(WaitpidKey {
pid: Some(pid),
pgid: Some(pgid)
}, (pid, 0xFFFF));
......@@ -111,7 +112,8 @@ pub extern "C" fn signal_handler(sig: usize) {
Arc::clone(&parent.waitpid)
};
waitpid.send(WaitpidKey {
// TODO: Handle signal interruption error.
let _eintr = waitpid.send_sync(WaitpidKey {
pid: Some(pid),
pgid: Some(pgid)
}, (pid, (sig << 8) | 0x7F));
......
......@@ -11,10 +11,10 @@ use crate::context::{self, Context};
use crate::context::signal::SignalEpoch;
use crate::scheme::{self, SchemeId};
use crate::sync::wait_queue::Op as WaitQueueOp;
use crate::sync::WaitQueue;
use crate::sync::{Eintr, WaitQueue};
use crate::syscall::data::Event;
use crate::syscall::error::{Error, Result, EBADF, EINTR, ESRCH};
use crate::syscall::error::{Error, Result, EBADF, ESRCH};
use crate::syscall::flag::EventFlags;
int_like!(EventQueueId, AtomicEventQueueId, usize, AtomicUsize);
......@@ -36,17 +36,18 @@ impl EventQueue {
}
pub fn read_sync(&self, events: &mut [Event]) -> Result<usize> {
self.queue.receive_into_sync(events, true, "EventQueue::read").ok_or(Error::new(EINTR))
self.queue.receive_into_sync(events, true, "EventQueue::read").map_err(Eintr::into)
}
pub fn read_nonblocking(&self, events: &mut [Event]) -> Result<usize> {
let blocking = false;
self.queue.receive_into_sync(events, blocking, "EventQueue::read_nonblocking").ok_or(Error::new(EINTR))
self.queue.receive_into_sync(events, blocking, "EventQueue::read_nonblocking").map_err(Eintr::into)
}
pub fn poll_read(&self, events: &mut [Event], signal_epoch: SignalEpoch, cx: &mut task::Context<'_>) -> task::Poll<Option<usize>> {
pub fn poll_read(&self, events: &mut [Event], signal_epoch: SignalEpoch, cx: &mut task::Context<'_>) -> task::Poll<Result<usize>> {
const FAIL_ON_SIGNAL: bool = true;
self.queue
.poll_receive_into(events, FAIL_ON_SIGNAL, signal_epoch, cx)
.map_err(Eintr::into)
}
pub fn write<I, T>(&self, events: I) -> Result<usize>
......@@ -202,18 +203,22 @@ pub fn unregister_file(scheme: SchemeId, number: usize) {
pub fn trigger(scheme: SchemeId, number: usize, flags: EventFlags) {
let registry = registry();
if let Some(queue_list) = registry.get(&RegKey { scheme, number }) {
for (queue_key, &queue_flags) in queue_list.iter() {
let common_flags = flags & queue_flags;
if !common_flags.is_empty() {
let queues = queues();
if let Some(queue) = queues.get(&queue_key.queue) {
queue.queue.send_sync(Event {
id: queue_key.id,
flags: common_flags,
data: queue_key.data
}, "event::trigger");
}
let queue_list = match registry.get(&RegKey { scheme, number }) {
Some(queue_list) => queue_list,
// TODO: Handle error?
None => return,
};
for (queue_key, &queue_flags) in queue_list.iter() {
let common_flags = flags & queue_flags;
if !common_flags.is_empty() {
let queues = queues();
if let Some(queue) = queues.get(&queue_key.queue) {
queue.queue.send_sync(Event {
id: queue_key.id,
flags: common_flags,
data: queue_key.data
}, "event::trigger");
}
}
}
......
use core::convert::TryFrom;
use core::future::Future;
use core::intrinsics::unlikely;
use core::mem::ManuallyDrop;
use core::pin::Pin;
use core::sync::atomic::{self, AtomicBool, AtomicU64, AtomicUsize, Ordering};
......@@ -443,8 +442,6 @@ impl<C> PartialOrd for QueueItem<C> {
#[cfg(target_pointer_width = "64")]
mod types {
use super::*;
use core::sync::atomic::AtomicU32;
pub type PendingTag = u32;
......@@ -454,8 +451,6 @@ mod types {
}
#[cfg(target_pointer_width = "32")]
mod types {
use super::*;
use core::sync::atomic::AtomicU16;
pub type PendingTag = u16;
......
......@@ -11,7 +11,7 @@ use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use crate::context::Context;
use crate::context::memory::Grant;
use crate::io_uring::{init_ring, RingInfo};
use crate::io_uring::handle::{AtomicPendingTag, PendingTag, RingHandle, RingHandleRuntimeState, RingHandleState, Rings, Runqueue};
use crate::io_uring::handle::{AtomicPendingTag, RingHandle, RingHandleRuntimeState, RingHandleState, Rings, Runqueue};
use crate::memory::Frame;
use crate::paging::ActivePageTable;
use crate::paging::entry::EntryFlags as PtEntryFlags;
......@@ -21,7 +21,7 @@ use crate::{context, memory};
use crate::syscall::data::{Map, Stat};
use crate::syscall::flag::{MapFlags, MODE_CHR};
use crate::syscall::error::{Error, Result};
use crate::syscall::error::{EBADF, EBADFD, EEXIST, EFAULT, EINVAL, ENOENT, ENOMEM, ENOSYS, EOPNOTSUPP, EPERM, EPROTONOSUPPORT, ESRCH};
use crate::syscall::error::{EBADF, EBADFD, EFAULT, EINVAL, ENOENT, ENOMEM, ENOSYS, EOPNOTSUPP, EPERM, EPROTONOSUPPORT, ESRCH};
use crate::syscall::io_uring::IoUringCreateInfo;
use crate::syscall::io_uring::v1::{CachePadded, CqEntry32, CqEntry64, IoUringCreateFlags, PoolFdEntry, SqEntry32, SqEntry64};
use crate::syscall::scheme::Scheme;
......
......@@ -85,9 +85,9 @@ impl Scheme for DebugScheme {
*handles.get(&id).ok_or(Error::new(EBADF))?
};
INPUT.call_once(init_input)
.receive_into_sync(buf, handle.flags & O_NONBLOCK == 0, "DebugScheme::read")
.ok_or(Error::new(EINTR))
let count = INPUT.call_once(init_input)
.receive_into_sync(buf, handle.flags & O_NONBLOCK == 0, "DebugScheme::read")?;
Ok(count)
}
/// Write the `buffer` to the `file`
......
......@@ -147,8 +147,8 @@ impl EventScheme {
let event_buf = unsafe { slice::from_raw_parts_mut(slice.as_ptr() as *mut Event, slice.len() / mem::size_of::<Event>()) };
match queue.poll_read(event_buf, current_signal_epoch, cx) {
task::Poll::Ready(Some(bytes_read)) => total_bytes_read = total_bytes_read.checked_add(bytes_read).ok_or(Error::new(EOVERFLOW))?,
task::Poll::Ready(None) => {
task::Poll::Ready(Ok(bytes_read)) => total_bytes_read = total_bytes_read.checked_add(bytes_read).ok_or(Error::new(EOVERFLOW))?,
task::Poll::Ready(Err(_eintr)) => {
signal = true;
break;
},
......
use core::sync::atomic::{AtomicUsize, Ordering};
use core::{iter, mem, slice, str, task};
use core::{iter, slice, str, task};
use alloc::sync::{Arc, Weak};
use alloc::sync::Arc;
use alloc::boxed::Box;
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
......
......@@ -94,9 +94,9 @@ impl Scheme for SerioScheme {
*handles.get(&id).ok_or(Error::new(EBADF))?
};
INPUT[handle.index].call_once(init_input)
.receive_into_sync(buf, handle.flags & O_NONBLOCK != O_NONBLOCK, "SerioScheme::read")
.ok_or(Error::new(EINTR))
let count = INPUT[handle.index].call_once(init_input)
.receive_into_sync(buf, handle.flags & O_NONBLOCK != O_NONBLOCK, "SerioScheme::read")?;
Ok(count)
}
fn fcntl(&self, id: usize, cmd: usize, arg: usize) -> Result<usize> {
......
......@@ -14,14 +14,14 @@ use crate::context::memory::{Grant, entry_flags, Region, round_down_pages};
use crate::paging::{InactivePageTable, Page, VirtualAddress};
use crate::paging::temporary_page::TemporaryPage;
use crate::scheme::{AtomicSchemeId, ContextOrKernel, FileHandle, SchemeId};
use crate::sync::{WaitQueue, WaitMap};
use crate::sync::{Eintr, WaitQueue, WaitMap};
use crate::syscall::data::{Map, OldMap, Packet, Stat, StatVfs, TimeSpec, Tuple};
use crate::syscall::error::*;
use crate::syscall::flag::{EventFlags, EVENT_READ, O_NONBLOCK, MapFlags, PROT_READ, PROT_WRITE};
use crate::syscall::number::*;
use crate::syscall::scheme::{self, async_scheme::AsyncScheme, Scheme};
use crate::syscall::io_uring::{CqEntry32, CqEntry64, SqEntry32, SqEntry64, IoUringRecvFlags, IoUringRecvInfo, IoUringCreateFlags};
use crate::{event, memory};
use crate::syscall::io_uring::{IoUringRecvFlags, IoUringRecvInfo, IoUringCreateFlags};
use crate::{common, event, memory};
#[derive(Debug)]
pub struct UserInner {
......@@ -84,7 +84,8 @@ impl UserInner {
let id = packet.id;
self.submit_sync(packet)?;
self.done.receive_sync(&id, "call_inner_sync").ok_or(Error::new(EINTR))
let retval = self.done.receive_sync(&id, "call_inner_sync")?;
Ok(retval)
}
pub fn next_id(&self) -> u64 {
......@@ -117,7 +118,8 @@ impl UserInner {
};
match self.done.poll_receive(&syscall_id, fail_on_signal, current_signal_epoch, cx) {
task::Poll::Ready(return_value) => task::Poll::Ready(return_value.ok_or(Error::new(EINTR))),
task::Poll::Ready(Ok(return_value)) => task::Poll::Ready(Ok(return_value)),
task::Poll::Ready(Err(Eintr)) => task::Poll::Ready(Err(Error::new(EINTR))),
task::Poll::Pending => task::Poll::Pending,
}
}
......@@ -229,8 +231,7 @@ impl UserInner {
};
match self.todo.poll_receive_into(packet_buf, fail_on_signal, current_signal_epoch, cx_unwrapped) {
task::Poll::Ready(Some(c)) => Some(c),
task::Poll::Ready(None) => return task::Poll::Ready(Err(Error::new(EINTR))),
task::Poll::Ready(result) => result,
// The logical contract to register a waker, by returning Pending, is upheld in
// poll_receive_into.
task::Poll::Pending => return task::Poll::Pending,
......@@ -243,26 +244,30 @@ impl UserInner {
};
task::Poll::Ready(if let Some(count) = count_opt {
if count > 0 {
log::trace!("packets read to scheme handler in context `{}`: {:?}", alloc::string::String::from_utf8_lossy(&*context::contexts().current().unwrap().read().name.lock()), &packet_buf[..count]);
// If we received requests, return them to the scheme handler
Ok(count * mem::size_of::<Packet>())
} else if unmounting {
// If there were no requests and we were unmounting, return EOF
task::Poll::Ready(match count_opt {
Ok(count) => {
if count > 0 {
log::trace!("packets read to scheme handler in context `{}`: {:?}", alloc::string::String::from_utf8_lossy(&*context::contexts().current().unwrap().read().name.lock()), &packet_buf[..count]);
// If we received requests, return them to the scheme handler
Ok(count * mem::size_of::<Packet>())
} else if unmounting {
// If there were no requests and we were unmounting, return EOF
Ok(0)
} else {
// If there were no requests and O_NONBLOCK was used, return EAGAIN
Err(Error::new(EAGAIN))
}
}
Err(Eintr) if self.unmounting.load(Ordering::SeqCst) => {
// If we are unmounting and there are no pending requests, return EOF
// Unmounting is read again because the previous value
// may have changed since we first blocked for packets
Ok(0)
} else {
// If there were no requests and O_NONBLOCK was used, return EAGAIN
Err(Error::new(EAGAIN))
}
} else if self.unmounting.load(Ordering::SeqCst) {
// If we are unmounting and there are no pending requests, return EOF
// Unmounting is read again because the previous value
// may have changed since we first blocked for packets
Ok(0)
} else {
// A signal was received, return EINTR
Err(Error::new(EINTR))
Err(Eintr) => {
// A signal was received, return EINTR
Err(Error::new(EINTR))
}
})
}
pub fn poll_read(&self, buf: &mut [u8], fail_on_signal: bool, cx: &mut task::Context<'_>) -> task::Poll<Result<usize>> {
......@@ -326,7 +331,7 @@ impl UserInner {
}
}
self.done.send(packet.id, packet.a);
self.done.send_sync(packet.id, packet.a)?;
}
i += 1;
}
......@@ -770,19 +775,11 @@ impl Scheme for UserScheme {
recv_flags |= IoUringRecvFlags::BITS_32;
}
let submission_entry_size = if recv_flags.contains(IoUringRecvFlags::BITS_32) {
mem::size_of::<SqEntry32>()
} else {
mem::size_of::<SqEntry64>()
};
let completion_entry_size = if recv_flags.contains(IoUringRecvFlags::BITS_32) {
mem::size_of::<CqEntry32>()
} else {
mem::size_of::<CqEntry64>()
};
let raw_se_bytesize = sq_entry_count.checked_mul(sq_entry_size).ok_or(Error::new(EINVAL))?;
let raw_ce_bytesize = cq_entry_count.checked_mul(cq_entry_size).ok_or(Error::new(EINVAL))?;
let se_bytesize = ((sq_entry_count * submission_entry_size + memory::PAGE_SIZE - 1) / memory::PAGE_SIZE) * memory::PAGE_SIZE;
let ce_bytesize = ((cq_entry_count * completion_entry_size + memory::PAGE_SIZE - 1) / memory::PAGE_SIZE) * memory::PAGE_SIZE;
let se_bytesize = common::checked_round_up(raw_se_bytesize, memory::PAGE_SIZE).ok_or(Error::new(EOVERFLOW))?;
let ce_bytesize = common::checked_round_up(raw_ce_bytesize, memory::PAGE_SIZE).ok_or(Error::new(EOVERFLOW))?;
let producer_weak = Weak::clone(&inner.context);
let producer_arc = producer_weak.upgrade().ok_or(Error::new(ESRCH))?;
......
pub use self::wait_condition::WaitCondition;
pub use self::wait_condition::{Eintr, WaitCondition};
pub use self::wait_queue::WaitQueue;
pub use self::wait_map::WaitMap;
......
......@@ -7,6 +7,8 @@ use spin::{Mutex, MutexGuard, RwLock};
use crate::context::{self, Context};
use crate::context::signal::SignalEpoch;
use crate::syscall::error::Error;
use crate::syscall::error::EINTR;
#[derive(Debug)]
struct WakerWrapper {
......@@ -19,6 +21,15 @@ pub struct WaitCondition {
wakers: Mutex<Vec<WakerWrapper>>,
}
#[derive(Debug)]
pub struct Eintr;
impl From<Eintr> for Error {
fn from(eintr: Eintr) -> Self {
Self::new(EINTR)
}
}
impl WaitCondition {
pub fn new() -> WaitCondition {
WaitCondition {
......
......@@ -5,21 +5,32 @@ use alloc::collections::BTreeMap;
use spin::Mutex;
use crate::context::signal::SignalEpoch;
use crate::sync::WaitCondition;
use crate::sync::{Eintr, WaitCondition};
#[derive(Debug)]
pub struct WaitMap<K, V> {
inner: Mutex<BTreeMap<K, V>>,
condition: WaitCondition,
max_capacity: Option<usize>,
not_empty_condition: WaitCondition,
blocking_send_component: Option<BlockingSendComponent>,
}
#[derive(Debug)]
struct BlockingSendComponent {
max_capacity: usize,
not_full_condition: WaitCondition,
}
impl<K, V> WaitMap<K, V> where K: Clone + Ord {
pub fn new(max_capacity: Option<usize>) -> WaitMap<K, V> {
WaitMap {
inner: Mutex::new(BTreeMap::new()),
condition: WaitCondition::new(),
max_capacity,
not_empty_condition: WaitCondition::new(),
blocking_send_component: max_capacity.map(|max_capacity| {
BlockingSendComponent {
max_capacity,
not_full_condition: WaitCondition::new(),
}
}),
}
}
......@@ -27,26 +38,26 @@ impl<K, V> WaitMap<K, V> where K: Clone + Ord {
self.inner.lock().remove(key)
}
pub fn receive_sync(&self, key: &K, reason: &'static str) -> Option<V> {
pub fn receive_sync(&self, key: &K, reason: &'static str) -> Result<V, Eintr> {
loop {
let mut inner = self.inner.lock();
if let Some(value) = inner.remove(key) {
return Some(value);
return Ok(value);
}
if ! self.condition.wait(inner, reason) {
return None;
if ! self.not_empty_condition.wait(inner, reason) {
return Err(Eintr);
}
}
}
pub fn poll_receive(&self, key: &K, fail_on_signal: bool, current_signal_epoch: SignalEpoch, cx: &mut task::Context<'_>) -> task::Poll<Option<V>> {
pub fn poll_receive(&self, key: &K, fail_on_signal: bool, current_signal_epoch: SignalEpoch, cx: &mut task::Context<'_>) -> task::Poll<Result<V, Eintr>> {
if let Some(value) = self.inner.lock().remove(key) {
task::Poll::Ready(Some(value))
task::Poll::Ready(Ok(value))
} else {
if fail_on_signal && self.condition.signal_has_occurred(cx.waker(), current_signal_epoch) {
return task::Poll::Ready(None);
if fail_on_signal && self.not_empty_condition.signal_has_occurred(cx.waker(), current_signal_epoch) {
return task::Poll::Ready(Err(Eintr));
}
self.condition.add_waker(cx.waker(), current_signal_epoch);
self.not_empty_condition.add_waker(cx.waker(), current_signal_epoch);
task::Poll::Pending
}
}
......@@ -60,16 +71,16 @@ impl<K, V> WaitMap<K, V> where K: Clone + Ord {
}
}
pub fn receive_any_sync(&self, reason: &'static str) -> Option<(K, V)> {
pub fn receive_any_sync(&self, reason: &'static str) -> Result<(K, V), Eintr> {
loop {
let mut inner = self.inner.lock();
if let Some(key) = inner.keys().next().cloned() {
if let Some(entry) = inner.remove(&key).map(|value| (key, value)) {
return Some(entry);
return Ok(entry);
}
}
if ! self.condition.wait(inner, reason) {
return None;
if ! self.not_empty_condition.wait(inner, reason) {
return Err(Eintr);
}
}
}
......@@ -79,9 +90,44 @@ impl<K, V> WaitMap<K, V> where K: Clone + Ord {
ret
}
pub fn send(&self, key: K, value: V) {
self.inner.lock().insert(key, value);
self.condition.notify();
pub fn send_sync(&self, key: K, value: V) -> Result<(), Eintr> {
loop {
let mut inner = self.inner.lock();
if let Some(blocking_send_component) = self.blocking_send_component.as_ref() {
let is_full = blocking_send_component.max_capacity == inner.len();
if is_full {
if ! blocking_send_component.not_full_condition.wait(inner, "WaitMap::send_sync") {
return Err(Eintr);
}
continue;
}
}
inner.insert(key, value);
self.not_empty_condition.notify();
return Ok(());
}
}
pub fn poll_send(&self, key: K, value: V, fail_on_signal: bool, current_signal_epoch: SignalEpoch, cx: &mut task::Context<'_>) -> task::Poll<Result<(), Eintr>> {
let mut inner = self.inner.lock();
if let Some(blocking_send_component) = self.blocking_send_component.as_ref() {
let is_full = blocking_send_component.max_capacity == inner.len();
if is_full {
if blocking_send_component.not_full_condition.signal_has_occurred(cx.waker(), current_signal_epoch) {
return task::Poll::Ready(Err(Eintr));
}
return task::Poll::Pending;
}
}
inner.insert(key, value);
self.not_empty_condition.notify();
return task::Poll::Ready(Ok(()));
}
}
impl<K, V> Clone for WaitMap<K, V>
......@@ -92,8 +138,11 @@ where
fn clone(&self) -> Self {
Self {
inner: Mutex::new(self.inner.lock().clone()),
condition: WaitCondition::new(),
max_capacity: self.max_capacity,
not_empty_condition: WaitCondition::new(),