From 11687827f8d8e3f98af1b14f44423f985b7b0a21 Mon Sep 17 00:00:00 2001 From: korken89 Date: Thu, 26 Aug 2021 13:49:42 +0000 Subject: [PATCH] deploy: 59bc89f2973040d84fca6872dc7bf30c6343a1c2 --- crates.js | 2 +- implementors/core/convert/trait.From.js | 2 + implementors/core/default/trait.Default.js | 2 + implementors/core/fmt/trait.Debug.js | 3 + implementors/core/fmt/trait.Display.js | 2 + implementors/core/marker/trait.Freeze.js | 3 + implementors/core/marker/trait.Send.js | 3 + implementors/core/marker/trait.Sync.js | 3 + implementors/core/marker/trait.Unpin.js | 3 + implementors/core/ops/deref/trait.Deref.js | 3 + implementors/core/ops/deref/trait.DerefMut.js | 3 + implementors/core/ops/drop/trait.Drop.js | 3 + implementors/lock_api/mutex/trait.RawMutex.js | 3 + .../lock_api/rwlock/trait.RawRwLock.js | 3 + .../rwlock/trait.RawRwLockDowngrade.js | 3 + .../lock_api/rwlock/trait.RawRwLockUpgrade.js | 3 + implementors/scopeguard/trait.Strategy.js | 3 + .../spin/relax/trait.RelaxStrategy.js | 3 + lock_api/all.html | 5 + lock_api/index.html | 120 + lock_api/mutex/struct.MappedMutexGuard.html | 11 + lock_api/mutex/struct.Mutex.html | 11 + lock_api/mutex/struct.MutexGuard.html | 11 + lock_api/mutex/trait.RawMutex.html | 11 + lock_api/mutex/trait.RawMutexFair.html | 11 + lock_api/mutex/trait.RawMutexTimed.html | 11 + .../struct.MappedReentrantMutexGuard.html | 11 + .../remutex/struct.RawReentrantMutex.html | 11 + lock_api/remutex/struct.ReentrantMutex.html | 11 + .../remutex/struct.ReentrantMutexGuard.html | 11 + lock_api/remutex/trait.GetThreadId.html | 11 + .../rwlock/struct.MappedRwLockReadGuard.html | 11 + .../rwlock/struct.MappedRwLockWriteGuard.html | 11 + lock_api/rwlock/struct.RwLock.html | 11 + lock_api/rwlock/struct.RwLockReadGuard.html | 11 + .../struct.RwLockUpgradableReadGuard.html | 11 + lock_api/rwlock/struct.RwLockWriteGuard.html | 11 + lock_api/rwlock/trait.RawRwLock.html | 11 + lock_api/rwlock/trait.RawRwLockDowngrade.html | 11 + lock_api/rwlock/trait.RawRwLockFair.html | 11 + lock_api/rwlock/trait.RawRwLockRecursive.html | 11 + .../rwlock/trait.RawRwLockRecursiveTimed.html | 11 + lock_api/rwlock/trait.RawRwLockTimed.html | 11 + lock_api/rwlock/trait.RawRwLockUpgrade.html | 11 + .../trait.RawRwLockUpgradeDowngrade.html | 11 + .../rwlock/trait.RawRwLockUpgradeFair.html | 11 + .../rwlock/trait.RawRwLockUpgradeTimed.html | 11 + lock_api/sidebar-items.js | 1 + lock_api/struct.GuardNoSend.html | 13 + lock_api/struct.GuardSend.html | 13 + lock_api/struct.MappedMutexGuard.html | 47 + .../struct.MappedReentrantMutexGuard.html | 46 + lock_api/struct.MappedRwLockReadGuard.html | 46 + lock_api/struct.MappedRwLockWriteGuard.html | 47 + lock_api/struct.Mutex.html | 81 + lock_api/struct.MutexGuard.html | 57 + lock_api/struct.RawReentrantMutex.html | 41 + lock_api/struct.ReentrantMutex.html | 90 + lock_api/struct.ReentrantMutexGuard.html | 56 + lock_api/struct.RwLock.html | 183 + lock_api/struct.RwLockReadGuard.html | 55 + .../struct.RwLockUpgradableReadGuard.html | 59 + lock_api/struct.RwLockWriteGuard.html | 65 + lock_api/trait.GetThreadId.html | 17 + lock_api/trait.RawMutex.html | 30 + lock_api/trait.RawMutexFair.html | 23 + lock_api/trait.RawMutexTimed.html | 15 + lock_api/trait.RawRwLock.html | 38 + lock_api/trait.RawRwLockDowngrade.html | 11 + lock_api/trait.RawRwLockFair.html | 32 + lock_api/trait.RawRwLockRecursive.html | 14 + lock_api/trait.RawRwLockRecursiveTimed.html | 11 + lock_api/trait.RawRwLockTimed.html | 19 + lock_api/trait.RawRwLockUpgrade.html | 26 + lock_api/trait.RawRwLockUpgradeDowngrade.html | 14 + lock_api/trait.RawRwLockUpgradeFair.html | 18 + lock_api/trait.RawRwLockUpgradeTimed.html | 20 + scopeguard/all.html | 5 + scopeguard/enum.Always.html | 19 + scopeguard/fn.guard.html | 4 + scopeguard/index.html | 171 + scopeguard/macro.defer!.html | 11 + scopeguard/macro.defer.html | 10 + scopeguard/sidebar-items.js | 1 + scopeguard/struct.ScopeGuard.html | 50 + scopeguard/trait.Strategy.html | 8 + search-index.js | 5 +- source-files.js | 3 + spin/all.html | 5 + spin/barrier/index.html | 19 + spin/barrier/sidebar-items.js | 1 + spin/barrier/struct.Barrier.html | 73 + spin/barrier/struct.BarrierWaitResult.html | 30 + spin/index.html | 92 + spin/lazy/index.html | 8 + spin/lazy/sidebar-items.js | 1 + spin/lazy/struct.Lazy.html | 61 + spin/lock_api/index.html | 11 + spin/lock_api/sidebar-items.js | 1 + spin/lock_api/type.Mutex.html | 4 + spin/lock_api/type.MutexGuard.html | 4 + spin/lock_api/type.RwLock.html | 4 + spin/lock_api/type.RwLockReadGuard.html | 4 + .../type.RwLockUpgradableReadGuard.html | 4 + spin/lock_api/type.RwLockWriteGuard.html | 4 + spin/mutex/index.html | 15 + spin/mutex/sidebar-items.js | 1 + spin/mutex/spin/index.html | 9 + spin/mutex/spin/sidebar-items.js | 1 + spin/mutex/spin/struct.SpinMutex.html | 141 + spin/mutex/spin/struct.SpinMutexGuard.html | 30 + spin/mutex/struct.Mutex.html | 125 + spin/mutex/struct.MutexGuard.html | 30 + spin/once/index.html | 6 + spin/once/sidebar-items.js | 1 + spin/once/struct.Once.html | 93 + spin/relax/index.html | 10 + spin/relax/sidebar-items.js | 1 + spin/relax/struct.Loop.html | 17 + spin/relax/struct.Spin.html | 23 + spin/relax/trait.RelaxStrategy.html | 7 + spin/rwlock/index.html | 9 + spin/rwlock/sidebar-items.js | 1 + spin/rwlock/struct.RwLock.html | 215 + spin/rwlock/struct.RwLockReadGuard.html | 29 + spin/rwlock/struct.RwLockUpgradableGuard.html | 60 + spin/rwlock/struct.RwLockWriteGuard.html | 50 + spin/sidebar-items.js | 1 + spin/type.Barrier.html | 6 + spin/type.Lazy.html | 6 + spin/type.Mutex.html | 6 + spin/type.Once.html | 6 + spin/type.RwLock.html | 6 + spin/type.RwLockUpgradableGuard.html | 7 + spin/type.RwLockWriteGuard.html | 6 + src/heapless/pool/cas.rs.html | 44 +- src/lock_api/lib.rs.html | 227 ++ src/lock_api/mutex.rs.html | 1453 +++++++ src/lock_api/remutex.rs.html | 1709 ++++++++ src/lock_api/rwlock.rs.html | 3557 +++++++++++++++++ src/scopeguard/lib.rs.html | 1161 ++++++ src/spin/barrier.rs.html | 471 +++ src/spin/lazy.rs.html | 229 ++ src/spin/lib.rs.html | 377 ++ src/spin/mutex.rs.html | 653 +++ src/spin/mutex/spin.rs.html | 1031 +++++ src/spin/once.rs.html | 1269 ++++++ src/spin/relax.rs.html | 121 + src/spin/rwlock.rs.html | 2245 +++++++++++ 149 files changed, 17731 insertions(+), 7 deletions(-) create mode 100644 implementors/lock_api/mutex/trait.RawMutex.js create mode 100644 implementors/lock_api/rwlock/trait.RawRwLock.js create mode 100644 implementors/lock_api/rwlock/trait.RawRwLockDowngrade.js create mode 100644 implementors/lock_api/rwlock/trait.RawRwLockUpgrade.js create mode 100644 implementors/scopeguard/trait.Strategy.js create mode 100644 implementors/spin/relax/trait.RelaxStrategy.js create mode 100644 lock_api/all.html create mode 100644 lock_api/index.html create mode 100644 lock_api/mutex/struct.MappedMutexGuard.html create mode 100644 lock_api/mutex/struct.Mutex.html create mode 100644 lock_api/mutex/struct.MutexGuard.html create mode 100644 lock_api/mutex/trait.RawMutex.html create mode 100644 lock_api/mutex/trait.RawMutexFair.html create mode 100644 lock_api/mutex/trait.RawMutexTimed.html create mode 100644 lock_api/remutex/struct.MappedReentrantMutexGuard.html create mode 100644 lock_api/remutex/struct.RawReentrantMutex.html create mode 100644 lock_api/remutex/struct.ReentrantMutex.html create mode 100644 lock_api/remutex/struct.ReentrantMutexGuard.html create mode 100644 lock_api/remutex/trait.GetThreadId.html create mode 100644 lock_api/rwlock/struct.MappedRwLockReadGuard.html create mode 100644 lock_api/rwlock/struct.MappedRwLockWriteGuard.html create mode 100644 lock_api/rwlock/struct.RwLock.html create mode 100644 lock_api/rwlock/struct.RwLockReadGuard.html create mode 100644 lock_api/rwlock/struct.RwLockUpgradableReadGuard.html create mode 100644 lock_api/rwlock/struct.RwLockWriteGuard.html create mode 100644 lock_api/rwlock/trait.RawRwLock.html create mode 100644 lock_api/rwlock/trait.RawRwLockDowngrade.html create mode 100644 lock_api/rwlock/trait.RawRwLockFair.html create mode 100644 lock_api/rwlock/trait.RawRwLockRecursive.html create mode 100644 lock_api/rwlock/trait.RawRwLockRecursiveTimed.html create mode 100644 lock_api/rwlock/trait.RawRwLockTimed.html create mode 100644 lock_api/rwlock/trait.RawRwLockUpgrade.html create mode 100644 lock_api/rwlock/trait.RawRwLockUpgradeDowngrade.html create mode 100644 lock_api/rwlock/trait.RawRwLockUpgradeFair.html create mode 100644 lock_api/rwlock/trait.RawRwLockUpgradeTimed.html create mode 100644 lock_api/sidebar-items.js create mode 100644 lock_api/struct.GuardNoSend.html create mode 100644 lock_api/struct.GuardSend.html create mode 100644 lock_api/struct.MappedMutexGuard.html create mode 100644 lock_api/struct.MappedReentrantMutexGuard.html create mode 100644 lock_api/struct.MappedRwLockReadGuard.html create mode 100644 lock_api/struct.MappedRwLockWriteGuard.html create mode 100644 lock_api/struct.Mutex.html create mode 100644 lock_api/struct.MutexGuard.html create mode 100644 lock_api/struct.RawReentrantMutex.html create mode 100644 lock_api/struct.ReentrantMutex.html create mode 100644 lock_api/struct.ReentrantMutexGuard.html create mode 100644 lock_api/struct.RwLock.html create mode 100644 lock_api/struct.RwLockReadGuard.html create mode 100644 lock_api/struct.RwLockUpgradableReadGuard.html create mode 100644 lock_api/struct.RwLockWriteGuard.html create mode 100644 lock_api/trait.GetThreadId.html create mode 100644 lock_api/trait.RawMutex.html create mode 100644 lock_api/trait.RawMutexFair.html create mode 100644 lock_api/trait.RawMutexTimed.html create mode 100644 lock_api/trait.RawRwLock.html create mode 100644 lock_api/trait.RawRwLockDowngrade.html create mode 100644 lock_api/trait.RawRwLockFair.html create mode 100644 lock_api/trait.RawRwLockRecursive.html create mode 100644 lock_api/trait.RawRwLockRecursiveTimed.html create mode 100644 lock_api/trait.RawRwLockTimed.html create mode 100644 lock_api/trait.RawRwLockUpgrade.html create mode 100644 lock_api/trait.RawRwLockUpgradeDowngrade.html create mode 100644 lock_api/trait.RawRwLockUpgradeFair.html create mode 100644 lock_api/trait.RawRwLockUpgradeTimed.html create mode 100644 scopeguard/all.html create mode 100644 scopeguard/enum.Always.html create mode 100644 scopeguard/fn.guard.html create mode 100644 scopeguard/index.html create mode 100644 scopeguard/macro.defer!.html create mode 100644 scopeguard/macro.defer.html create mode 100644 scopeguard/sidebar-items.js create mode 100644 scopeguard/struct.ScopeGuard.html create mode 100644 scopeguard/trait.Strategy.html create mode 100644 spin/all.html create mode 100644 spin/barrier/index.html create mode 100644 spin/barrier/sidebar-items.js create mode 100644 spin/barrier/struct.Barrier.html create mode 100644 spin/barrier/struct.BarrierWaitResult.html create mode 100644 spin/index.html create mode 100644 spin/lazy/index.html create mode 100644 spin/lazy/sidebar-items.js create mode 100644 spin/lazy/struct.Lazy.html create mode 100644 spin/lock_api/index.html create mode 100644 spin/lock_api/sidebar-items.js create mode 100644 spin/lock_api/type.Mutex.html create mode 100644 spin/lock_api/type.MutexGuard.html create mode 100644 spin/lock_api/type.RwLock.html create mode 100644 spin/lock_api/type.RwLockReadGuard.html create mode 100644 spin/lock_api/type.RwLockUpgradableReadGuard.html create mode 100644 spin/lock_api/type.RwLockWriteGuard.html create mode 100644 spin/mutex/index.html create mode 100644 spin/mutex/sidebar-items.js create mode 100644 spin/mutex/spin/index.html create mode 100644 spin/mutex/spin/sidebar-items.js create mode 100644 spin/mutex/spin/struct.SpinMutex.html create mode 100644 spin/mutex/spin/struct.SpinMutexGuard.html create mode 100644 spin/mutex/struct.Mutex.html create mode 100644 spin/mutex/struct.MutexGuard.html create mode 100644 spin/once/index.html create mode 100644 spin/once/sidebar-items.js create mode 100644 spin/once/struct.Once.html create mode 100644 spin/relax/index.html create mode 100644 spin/relax/sidebar-items.js create mode 100644 spin/relax/struct.Loop.html create mode 100644 spin/relax/struct.Spin.html create mode 100644 spin/relax/trait.RelaxStrategy.html create mode 100644 spin/rwlock/index.html create mode 100644 spin/rwlock/sidebar-items.js create mode 100644 spin/rwlock/struct.RwLock.html create mode 100644 spin/rwlock/struct.RwLockReadGuard.html create mode 100644 spin/rwlock/struct.RwLockUpgradableGuard.html create mode 100644 spin/rwlock/struct.RwLockWriteGuard.html create mode 100644 spin/sidebar-items.js create mode 100644 spin/type.Barrier.html create mode 100644 spin/type.Lazy.html create mode 100644 spin/type.Mutex.html create mode 100644 spin/type.Once.html create mode 100644 spin/type.RwLock.html create mode 100644 spin/type.RwLockUpgradableGuard.html create mode 100644 spin/type.RwLockWriteGuard.html create mode 100644 src/lock_api/lib.rs.html create mode 100644 src/lock_api/mutex.rs.html create mode 100644 src/lock_api/remutex.rs.html create mode 100644 src/lock_api/rwlock.rs.html create mode 100644 src/scopeguard/lib.rs.html create mode 100644 src/spin/barrier.rs.html create mode 100644 src/spin/lazy.rs.html create mode 100644 src/spin/lib.rs.html create mode 100644 src/spin/mutex.rs.html create mode 100644 src/spin/mutex/spin.rs.html create mode 100644 src/spin/once.rs.html create mode 100644 src/spin/relax.rs.html create mode 100644 src/spin/rwlock.rs.html diff --git a/crates.js b/crates.js index af1c5b4f..2a3a563e 100644 --- a/crates.js +++ b/crates.js @@ -1 +1 @@ -window.ALL_CRATES = ["byteorder","hash32","heapless","stable_deref_trait"]; \ No newline at end of file +window.ALL_CRATES = ["byteorder","hash32","heapless","lock_api","scopeguard","spin","stable_deref_trait"]; \ No newline at end of file diff --git a/implementors/core/convert/trait.From.js b/implementors/core/convert/trait.From.js index 8dac861c..f3e19c1e 100644 --- a/implementors/core/convert/trait.From.js +++ b/implementors/core/convert/trait.From.js @@ -1,3 +1,5 @@ (function() {var implementors = {}; implementors["heapless"] = [{"text":"impl<'a, const N: usize> From<&'a str> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<i8> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<i16> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<i32> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<i64> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<u8> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<u16> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<u32> for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<const N: usize> From<u64> for String<N>","synthetic":false,"types":["heapless::string::String"]}]; +implementors["lock_api"] = [{"text":"impl<R: RawMutex, T> From<T> for Mutex<R, T>","synthetic":false,"types":["lock_api::mutex::Mutex"]},{"text":"impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<R: RawRwLock, T> From<T> for RwLock<R, T>","synthetic":false,"types":["lock_api::rwlock::RwLock"]}]; +implementors["spin"] = [{"text":"impl<T, R> From<T> for SpinMutex<T, R>","synthetic":false,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<T, R> From<T> for Mutex<T, R>","synthetic":false,"types":["spin::mutex::Mutex"]},{"text":"impl<T, R> From<T> for Once<T, R>","synthetic":false,"types":["spin::once::Once"]},{"text":"impl<T, R> From<T> for RwLock<T, R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/default/trait.Default.js b/implementors/core/default/trait.Default.js index ff165254..f471296f 100644 --- a/implementors/core/default/trait.Default.js +++ b/implementors/core/default/trait.Default.js @@ -2,4 +2,6 @@ implementors["byteorder"] = [{"text":"impl Default for BigEndian","synthetic":false,"types":["byteorder::BigEndian"]},{"text":"impl Default for LittleEndian","synthetic":false,"types":["byteorder::LittleEndian"]}]; implementors["hash32"] = [{"text":"impl Default for Hasher","synthetic":false,"types":["hash32::fnv::Hasher"]},{"text":"impl Default for Hasher","synthetic":false,"types":["hash32::murmur3::Hasher"]},{"text":"impl<H> Default for BuildHasherDefault<H> where
    H: Default + Hasher
","synthetic":false,"types":["hash32::BuildHasherDefault"]}]; implementors["heapless"] = [{"text":"impl<T, const N: usize> Default for Deque<T, N>","synthetic":false,"types":["heapless::deque::Deque"]},{"text":"impl<T, const N: usize> Default for HistoryBuffer<T, N>","synthetic":false,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, S, const N: usize> Default for IndexMap<K, V, S, N> where
    K: Eq + Hash,
    S: BuildHasher + Default
","synthetic":false,"types":["heapless::indexmap::IndexMap"]},{"text":"impl<T, S, const N: usize> Default for IndexSet<T, S, N> where
    T: Eq + Hash,
    S: BuildHasher + Default
","synthetic":false,"types":["heapless::indexset::IndexSet"]},{"text":"impl<K, V, const N: usize> Default for LinearMap<K, V, N> where
    K: Eq
","synthetic":false,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<const N: usize> Default for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Default for Vec<T, N>","synthetic":false,"types":["heapless::vec::Vec"]},{"text":"impl<T, K, const N: usize> Default for BinaryHeap<T, K, N> where
    T: Ord,
    K: Kind, 
","synthetic":false,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<T, const N: usize> Default for MpMcQueue<T, N>","synthetic":false,"types":["heapless::mpmc::MpMcQueue"]},{"text":"impl<T, const N: usize> Default for Queue<T, N>","synthetic":false,"types":["heapless::spsc::Queue"]}]; +implementors["lock_api"] = [{"text":"impl<R: RawMutex, T: ?Sized + Default> Default for Mutex<R, T>","synthetic":false,"types":["lock_api::mutex::Mutex"]},{"text":"impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T>","synthetic":false,"types":["lock_api::rwlock::RwLock"]}]; +implementors["spin"] = [{"text":"impl<T: Default, R> Default for Lazy<T, fn() -> T, R>","synthetic":false,"types":["spin::lazy::Lazy"]},{"text":"impl<T: ?Sized + Default, R> Default for SpinMutex<T, R>","synthetic":false,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<T: ?Sized + Default, R> Default for Mutex<T, R>","synthetic":false,"types":["spin::mutex::Mutex"]},{"text":"impl<T: ?Sized + Default, R> Default for RwLock<T, R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.Debug.js b/implementors/core/fmt/trait.Debug.js index e1cc9734..3d4d9be0 100644 --- a/implementors/core/fmt/trait.Debug.js +++ b/implementors/core/fmt/trait.Debug.js @@ -2,4 +2,7 @@ implementors["byteorder"] = [{"text":"impl Debug for BigEndian","synthetic":false,"types":["byteorder::BigEndian"]},{"text":"impl Debug for LittleEndian","synthetic":false,"types":["byteorder::LittleEndian"]}]; implementors["hash32"] = [{"text":"impl<H: Default + Hasher> Debug for BuildHasherDefault<H>","synthetic":false,"types":["hash32::BuildHasherDefault"]}]; implementors["heapless"] = [{"text":"impl<T, const N: usize> Debug for HistoryBuffer<T, N> where
    T: Debug
","synthetic":false,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, S, const N: usize> Debug for IndexMap<K, V, S, N> where
    K: Eq + Hash + Debug,
    V: Debug,
    S: BuildHasher
","synthetic":false,"types":["heapless::indexmap::IndexMap"]},{"text":"impl<T, S, const N: usize> Debug for IndexSet<T, S, N> where
    T: Eq + Hash + Debug,
    S: BuildHasher
","synthetic":false,"types":["heapless::indexset::IndexSet"]},{"text":"impl<K, V, const N: usize> Debug for LinearMap<K, V, N> where
    K: Eq + Debug,
    V: Debug
","synthetic":false,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<const N: usize> Debug for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Debug for Vec<T, N> where
    T: Debug
","synthetic":false,"types":["heapless::vec::Vec"]},{"text":"impl<T, K, const N: usize> Debug for BinaryHeap<T, K, N> where
    K: Kind,
    T: Ord + Debug
","synthetic":false,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<P> Debug for Box<P> where
    P: Pool,
    P::Data: Debug
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> Debug for Box<T> where
    T: Debug
","synthetic":false,"types":["heapless::pool::Box"]},{"text":"impl Debug for LinkedIndexU8","synthetic":false,"types":["heapless::sorted_linked_list::LinkedIndexU8"]},{"text":"impl Debug for LinkedIndexU16","synthetic":false,"types":["heapless::sorted_linked_list::LinkedIndexU16"]},{"text":"impl Debug for LinkedIndexUsize","synthetic":false,"types":["heapless::sorted_linked_list::LinkedIndexUsize"]},{"text":"impl<T, Idx, Kind, const N: usize> Debug for SortedLinkedList<T, Idx, Kind, N> where
    T: Ord + Debug,
    Idx: SortedLinkedListIndex,
    Kind: LLKind, 
","synthetic":false,"types":["heapless::sorted_linked_list::SortedLinkedList"]},{"text":"impl<T, const N: usize> Debug for Queue<T, N> where
    T: Debug
","synthetic":false,"types":["heapless::spsc::Queue"]}]; +implementors["lock_api"] = [{"text":"impl<R: RawMutex, T: ?Sized + Debug> Debug for Mutex<R, T>","synthetic":false,"types":["lock_api::mutex::Mutex"]},{"text":"impl<'a, R: RawMutex + 'a, T: Debug + ?Sized + 'a> Debug for MutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, T: Debug + ?Sized + 'a> Debug for MappedMutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<R: RawMutex, G: GetThreadId, T: ?Sized + Debug> Debug for ReentrantMutex<R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: Debug + ?Sized + 'a> Debug for ReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: Debug + ?Sized + 'a> Debug for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<R: RawRwLock, T: ?Sized + Debug> Debug for RwLock<R, T>","synthetic":false,"types":["lock_api::rwlock::RwLock"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Debug + ?Sized + 'a> Debug for RwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Debug + ?Sized + 'a> Debug for RwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R: RawRwLockUpgrade + 'a, T: Debug + ?Sized + 'a> Debug for RwLockUpgradableReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Debug + ?Sized + 'a> Debug for MappedRwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Debug + ?Sized + 'a> Debug for MappedRwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["scopeguard"] = [{"text":"impl Debug for Always","synthetic":false,"types":["scopeguard::Always"]},{"text":"impl<T, F, S> Debug for ScopeGuard<T, F, S> where
    T: Debug,
    F: FnOnce(T),
    S: Strategy
","synthetic":false,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<T: Debug, F, R> Debug for Lazy<T, F, R>","synthetic":false,"types":["spin::lazy::Lazy"]},{"text":"impl<T: ?Sized + Debug, R> Debug for SpinMutex<T, R>","synthetic":false,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<'a, T: ?Sized + Debug> Debug for SpinMutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<T: ?Sized + Debug, R> Debug for Mutex<T, R>","synthetic":false,"types":["spin::mutex::Mutex"]},{"text":"impl<'a, T: ?Sized + Debug> Debug for MutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::MutexGuard"]},{"text":"impl<T: Debug, R> Debug for Once<T, R>","synthetic":false,"types":["spin::once::Once"]},{"text":"impl<T: ?Sized + Debug, R> Debug for RwLock<T, R>","synthetic":false,"types":["spin::rwlock::RwLock"]},{"text":"impl<'rwlock, T: ?Sized + Debug> Debug for RwLockReadGuard<'rwlock, T>","synthetic":false,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'rwlock, T: ?Sized + Debug, R> Debug for RwLockUpgradableGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl<'rwlock, T: ?Sized + Debug, R> Debug for RwLockWriteGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockWriteGuard"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.Display.js b/implementors/core/fmt/trait.Display.js index 3112799b..349e65e7 100644 --- a/implementors/core/fmt/trait.Display.js +++ b/implementors/core/fmt/trait.Display.js @@ -1,3 +1,5 @@ (function() {var implementors = {}; implementors["heapless"] = [{"text":"impl<const N: usize> Display for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<P> Display for Box<P> where
    P: Pool,
    P::Data: Display
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> Display for Box<T> where
    T: Display
","synthetic":false,"types":["heapless::pool::Box"]}]; +implementors["lock_api"] = [{"text":"impl<'a, R: RawMutex + 'a, T: Display + ?Sized + 'a> Display for MutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, T: Display + ?Sized + 'a> Display for MappedMutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: Display + ?Sized + 'a> Display for ReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: Display + ?Sized + 'a> Display for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Display + ?Sized + 'a> Display for RwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Display + ?Sized + 'a> Display for RwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R: RawRwLockUpgrade + 'a, T: Display + ?Sized + 'a> Display for RwLockUpgradableReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Display + ?Sized + 'a> Display for MappedRwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: Display + ?Sized + 'a> Display for MappedRwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["spin"] = [{"text":"impl<'a, T: ?Sized + Display> Display for SpinMutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<'a, T: ?Sized + Display> Display for MutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::MutexGuard"]},{"text":"impl<'rwlock, T: ?Sized + Display> Display for RwLockReadGuard<'rwlock, T>","synthetic":false,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'rwlock, T: ?Sized + Display, R> Display for RwLockUpgradableGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl<'rwlock, T: ?Sized + Display, R> Display for RwLockWriteGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockWriteGuard"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Freeze.js b/implementors/core/marker/trait.Freeze.js index 4c5ff066..db21d7aa 100644 --- a/implementors/core/marker/trait.Freeze.js +++ b/implementors/core/marker/trait.Freeze.js @@ -2,4 +2,7 @@ implementors["byteorder"] = [{"text":"impl Freeze for BigEndian","synthetic":true,"types":["byteorder::BigEndian"]},{"text":"impl Freeze for LittleEndian","synthetic":true,"types":["byteorder::LittleEndian"]}]; implementors["hash32"] = [{"text":"impl Freeze for Hasher","synthetic":true,"types":["hash32::fnv::Hasher"]},{"text":"impl Freeze for Hasher","synthetic":true,"types":["hash32::murmur3::Hasher"]},{"text":"impl<H> Freeze for BuildHasherDefault<H>","synthetic":true,"types":["hash32::BuildHasherDefault"]}]; implementors["heapless"] = [{"text":"impl<T, const N: usize> Freeze for Deque<T, N> where
    T: Freeze, 
","synthetic":true,"types":["heapless::deque::Deque"]},{"text":"impl<T, const N: usize> Freeze for HistoryBuffer<T, N> where
    T: Freeze, 
","synthetic":true,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, S, const N: usize> Freeze for IndexMap<K, V, S, N> where
    K: Freeze,
    S: Freeze,
    V: Freeze, 
","synthetic":true,"types":["heapless::indexmap::IndexMap"]},{"text":"impl<T, S, const N: usize> Freeze for IndexSet<T, S, N> where
    S: Freeze,
    T: Freeze, 
","synthetic":true,"types":["heapless::indexset::IndexSet"]},{"text":"impl<K, V, const N: usize> Freeze for LinearMap<K, V, N> where
    K: Freeze,
    V: Freeze, 
","synthetic":true,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<const N: usize> Freeze for String<N>","synthetic":true,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Freeze for Vec<T, N> where
    T: Freeze, 
","synthetic":true,"types":["heapless::vec::Vec"]},{"text":"impl Freeze for Min","synthetic":true,"types":["heapless::binary_heap::Min"]},{"text":"impl Freeze for Max","synthetic":true,"types":["heapless::binary_heap::Max"]},{"text":"impl<T, K, const N: usize> Freeze for BinaryHeap<T, K, N> where
    T: Freeze, 
","synthetic":true,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<'a, T, K, const N: usize> Freeze for PeekMut<'a, T, K, N>","synthetic":true,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<T, const N: usize> !Freeze for MpMcQueue<T, N>","synthetic":true,"types":["heapless::mpmc::MpMcQueue"]},{"text":"impl<POOL, STATE> Freeze for Box<POOL, STATE>","synthetic":true,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> !Freeze for Node<T>","synthetic":true,"types":["heapless::pool::stack::Node"]},{"text":"impl<T> !Freeze for Pool<T>","synthetic":true,"types":["heapless::pool::Pool"]},{"text":"impl<T, STATE> Freeze for Box<T, STATE>","synthetic":true,"types":["heapless::pool::Box"]},{"text":"impl Freeze for Uninit","synthetic":true,"types":["heapless::pool::Uninit"]},{"text":"impl Freeze for Init","synthetic":true,"types":["heapless::pool::Init"]},{"text":"impl Freeze for Min","synthetic":true,"types":["heapless::sorted_linked_list::Min"]},{"text":"impl Freeze for Max","synthetic":true,"types":["heapless::sorted_linked_list::Max"]},{"text":"impl<T, Idx> Freeze for Node<T, Idx> where
    Idx: Freeze,
    T: Freeze, 
","synthetic":true,"types":["heapless::sorted_linked_list::Node"]},{"text":"impl<T, Idx, Kind, const N: usize> Freeze for SortedLinkedList<T, Idx, Kind, N> where
    Idx: Freeze,
    T: Freeze, 
","synthetic":true,"types":["heapless::sorted_linked_list::SortedLinkedList"]},{"text":"impl Freeze for LinkedIndexU8","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU8"]},{"text":"impl Freeze for LinkedIndexU16","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU16"]},{"text":"impl Freeze for LinkedIndexUsize","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexUsize"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Freeze for Iter<'a, T, Idx, Kind, N> where
    Idx: Freeze, 
","synthetic":true,"types":["heapless::sorted_linked_list::Iter"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Freeze for FindMut<'a, T, Idx, Kind, N> where
    Idx: Freeze, 
","synthetic":true,"types":["heapless::sorted_linked_list::FindMut"]},{"text":"impl<T, const N: usize> !Freeze for Queue<T, N>","synthetic":true,"types":["heapless::spsc::Queue"]},{"text":"impl<'a, T, const N: usize> Freeze for Iter<'a, T, N>","synthetic":true,"types":["heapless::spsc::Iter"]},{"text":"impl<'a, T, const N: usize> Freeze for IterMut<'a, T, N>","synthetic":true,"types":["heapless::spsc::IterMut"]},{"text":"impl<'a, T, const N: usize> Freeze for Consumer<'a, T, N>","synthetic":true,"types":["heapless::spsc::Consumer"]},{"text":"impl<'a, T, const N: usize> Freeze for Producer<'a, T, N>","synthetic":true,"types":["heapless::spsc::Producer"]}]; +implementors["lock_api"] = [{"text":"impl<R, T> !Freeze for Mutex<R, T>","synthetic":true,"types":["lock_api::mutex::Mutex"]},{"text":"impl<'a, R, T: ?Sized> Freeze for MutexGuard<'a, R, T>","synthetic":true,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R, T: ?Sized> Freeze for MappedMutexGuard<'a, R, T>","synthetic":true,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<R, G> !Freeze for RawReentrantMutex<R, G>","synthetic":true,"types":["lock_api::remutex::RawReentrantMutex"]},{"text":"impl<R, G, T> !Freeze for ReentrantMutex<R, G, T>","synthetic":true,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<'a, R, G, T: ?Sized> Freeze for ReentrantMutexGuard<'a, R, G, T>","synthetic":true,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R, G, T: ?Sized> Freeze for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":true,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<R, T> !Freeze for RwLock<R, T>","synthetic":true,"types":["lock_api::rwlock::RwLock"]},{"text":"impl<'a, R, T: ?Sized> Freeze for RwLockReadGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Freeze for RwLockWriteGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R, T: ?Sized> Freeze for RwLockUpgradableReadGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Freeze for MappedRwLockReadGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Freeze for MappedRwLockWriteGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]},{"text":"impl Freeze for GuardSend","synthetic":true,"types":["lock_api::GuardSend"]},{"text":"impl Freeze for GuardNoSend","synthetic":true,"types":["lock_api::GuardNoSend"]}]; +implementors["scopeguard"] = [{"text":"impl Freeze for Always","synthetic":true,"types":["scopeguard::Always"]},{"text":"impl<T, F, S> Freeze for ScopeGuard<T, F, S> where
    F: Freeze,
    T: Freeze, 
","synthetic":true,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<R = Spin> !Freeze for Barrier<R>","synthetic":true,"types":["spin::barrier::Barrier"]},{"text":"impl Freeze for BarrierWaitResult","synthetic":true,"types":["spin::barrier::BarrierWaitResult"]},{"text":"impl<T, F = fn() -> T, R = Spin> !Freeze for Lazy<T, F, R>","synthetic":true,"types":["spin::lazy::Lazy"]},{"text":"impl<T, R = Spin> !Freeze for SpinMutex<T, R>","synthetic":true,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<'a, T: ?Sized> Freeze for SpinMutexGuard<'a, T>","synthetic":true,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<T, R = Spin> !Freeze for Mutex<T, R>","synthetic":true,"types":["spin::mutex::Mutex"]},{"text":"impl<'a, T: ?Sized> Freeze for MutexGuard<'a, T>","synthetic":true,"types":["spin::mutex::MutexGuard"]},{"text":"impl<T = (), R = Spin> !Freeze for Once<T, R>","synthetic":true,"types":["spin::once::Once"]},{"text":"impl<T, R = Spin> !Freeze for RwLock<T, R>","synthetic":true,"types":["spin::rwlock::RwLock"]},{"text":"impl<'a, T: ?Sized> Freeze for RwLockReadGuard<'a, T>","synthetic":true,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'a, T: ?Sized, R> Freeze for RwLockWriteGuard<'a, T, R>","synthetic":true,"types":["spin::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, T: ?Sized, R> Freeze for RwLockUpgradableGuard<'a, T, R>","synthetic":true,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl Freeze for Spin","synthetic":true,"types":["spin::relax::Spin"]},{"text":"impl Freeze for Loop","synthetic":true,"types":["spin::relax::Loop"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Send.js b/implementors/core/marker/trait.Send.js index 87501a3a..f279ccbe 100644 --- a/implementors/core/marker/trait.Send.js +++ b/implementors/core/marker/trait.Send.js @@ -2,4 +2,7 @@ implementors["byteorder"] = [{"text":"impl Send for BigEndian","synthetic":true,"types":["byteorder::BigEndian"]},{"text":"impl Send for LittleEndian","synthetic":true,"types":["byteorder::LittleEndian"]}]; implementors["hash32"] = [{"text":"impl Send for Hasher","synthetic":true,"types":["hash32::fnv::Hasher"]},{"text":"impl Send for Hasher","synthetic":true,"types":["hash32::murmur3::Hasher"]},{"text":"impl<H> Send for BuildHasherDefault<H> where
    H: Send
","synthetic":true,"types":["hash32::BuildHasherDefault"]}]; implementors["heapless"] = [{"text":"impl<T, const N: usize> Send for Deque<T, N> where
    T: Send
","synthetic":true,"types":["heapless::deque::Deque"]},{"text":"impl<T, const N: usize> Send for HistoryBuffer<T, N> where
    T: Send
","synthetic":true,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, S, const N: usize> Send for IndexMap<K, V, S, N> where
    K: Send,
    S: Send,
    V: Send
","synthetic":true,"types":["heapless::indexmap::IndexMap"]},{"text":"impl<T, S, const N: usize> Send for IndexSet<T, S, N> where
    S: Send,
    T: Send
","synthetic":true,"types":["heapless::indexset::IndexSet"]},{"text":"impl<K, V, const N: usize> Send for LinearMap<K, V, N> where
    K: Send,
    V: Send
","synthetic":true,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<const N: usize> Send for String<N>","synthetic":true,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Send for Vec<T, N> where
    T: Send
","synthetic":true,"types":["heapless::vec::Vec"]},{"text":"impl Send for Min","synthetic":true,"types":["heapless::binary_heap::Min"]},{"text":"impl Send for Max","synthetic":true,"types":["heapless::binary_heap::Max"]},{"text":"impl<T, K, const N: usize> Send for BinaryHeap<T, K, N> where
    K: Send,
    T: Send
","synthetic":true,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<'a, T, K, const N: usize> Send for PeekMut<'a, T, K, N> where
    K: Send,
    T: Send
","synthetic":true,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<T, const N: usize> Send for MpMcQueue<T, N> where
    T: Send
","synthetic":true,"types":["heapless::mpmc::MpMcQueue"]},{"text":"impl<T> !Send for Node<T>","synthetic":true,"types":["heapless::pool::stack::Node"]},{"text":"impl Send for Uninit","synthetic":true,"types":["heapless::pool::Uninit"]},{"text":"impl Send for Init","synthetic":true,"types":["heapless::pool::Init"]},{"text":"impl Send for Min","synthetic":true,"types":["heapless::sorted_linked_list::Min"]},{"text":"impl Send for Max","synthetic":true,"types":["heapless::sorted_linked_list::Max"]},{"text":"impl<T, Idx> Send for Node<T, Idx> where
    Idx: Send,
    T: Send
","synthetic":true,"types":["heapless::sorted_linked_list::Node"]},{"text":"impl<T, Idx, Kind, const N: usize> Send for SortedLinkedList<T, Idx, Kind, N> where
    Idx: Send,
    Kind: Send,
    T: Send
","synthetic":true,"types":["heapless::sorted_linked_list::SortedLinkedList"]},{"text":"impl Send for LinkedIndexU8","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU8"]},{"text":"impl Send for LinkedIndexU16","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU16"]},{"text":"impl Send for LinkedIndexUsize","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexUsize"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Send for Iter<'a, T, Idx, Kind, N> where
    Idx: Send + Sync,
    Kind: Sync,
    T: Sync
","synthetic":true,"types":["heapless::sorted_linked_list::Iter"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Send for FindMut<'a, T, Idx, Kind, N> where
    Idx: Send,
    Kind: Send,
    T: Send
","synthetic":true,"types":["heapless::sorted_linked_list::FindMut"]},{"text":"impl<T, const N: usize> Send for Queue<T, N> where
    T: Send
","synthetic":true,"types":["heapless::spsc::Queue"]},{"text":"impl<'a, T, const N: usize> !Send for Iter<'a, T, N>","synthetic":true,"types":["heapless::spsc::Iter"]},{"text":"impl<'a, T, const N: usize> Send for IterMut<'a, T, N> where
    T: Send
","synthetic":true,"types":["heapless::spsc::IterMut"]},{"text":"impl<P, S> Send for Box<P, S> where
    P: Pool,
    P::Data: Send
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> Send for Pool<T>","synthetic":false,"types":["heapless::pool::Pool"]},{"text":"impl<T, S> Send for Box<T, S> where
    T: Send
","synthetic":false,"types":["heapless::pool::Box"]},{"text":"impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where
    T: Send
","synthetic":false,"types":["heapless::spsc::Consumer"]},{"text":"impl<'a, T, const N: usize> Send for Producer<'a, T, N> where
    T: Send
","synthetic":false,"types":["heapless::spsc::Producer"]}]; +implementors["lock_api"] = [{"text":"impl<'a, R, T: ?Sized> Send for MutexGuard<'a, R, T> where
    R: Sync,
    T: Send,
    <R as RawMutex>::GuardMarker: Send
","synthetic":true,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R, G, T> !Send for ReentrantMutexGuard<'a, R, G, T>","synthetic":true,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R, G, T> !Send for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":true,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<'a, R, T: ?Sized> Send for RwLockReadGuard<'a, R, T> where
    R: Sync,
    T: Send + Sync,
    <R as RawRwLock>::GuardMarker: Send
","synthetic":true,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Send for RwLockWriteGuard<'a, R, T> where
    R: Sync,
    T: Send + Sync,
    <R as RawRwLock>::GuardMarker: Send
","synthetic":true,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R, T: ?Sized> Send for RwLockUpgradableReadGuard<'a, R, T> where
    R: Sync,
    T: Send + Sync,
    <R as RawRwLock>::GuardMarker: Send
","synthetic":true,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl Send for GuardSend","synthetic":true,"types":["lock_api::GuardSend"]},{"text":"impl !Send for GuardNoSend","synthetic":true,"types":["lock_api::GuardNoSend"]},{"text":"impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T>","synthetic":false,"types":["lock_api::mutex::Mutex"]},{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + Send + 'a> Send for MappedMutexGuard<'a, R, T> where
    R::GuardMarker: Send
","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G>","synthetic":false,"types":["lock_api::remutex::RawReentrantMutex"]},{"text":"impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send for ReentrantMutex<R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T>","synthetic":false,"types":["lock_api::rwlock::RwLock"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
    R::GuardMarker: Send
","synthetic":false,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
    R::GuardMarker: Send
","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["scopeguard"] = [{"text":"impl Send for Always","synthetic":true,"types":["scopeguard::Always"]},{"text":"impl<T, F, S> Send for ScopeGuard<T, F, S> where
    F: Send,
    T: Send
","synthetic":true,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<R> Send for Barrier<R>","synthetic":true,"types":["spin::barrier::Barrier"]},{"text":"impl Send for BarrierWaitResult","synthetic":true,"types":["spin::barrier::BarrierWaitResult"]},{"text":"impl<T, F, R> Send for Lazy<T, F, R> where
    F: Send,
    T: Send
","synthetic":true,"types":["spin::lazy::Lazy"]},{"text":"impl<T: ?Sized, R> Send for SpinMutex<T, R> where
    R: Send,
    T: Send
","synthetic":true,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<'a, T: ?Sized> Send for SpinMutexGuard<'a, T> where
    T: Send
","synthetic":true,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<'a, T: ?Sized> Send for MutexGuard<'a, T> where
    T: Send
","synthetic":true,"types":["spin::mutex::MutexGuard"]},{"text":"impl<'a, T: ?Sized> Send for RwLockReadGuard<'a, T> where
    T: Sync
","synthetic":true,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'a, T: ?Sized, R> Send for RwLockWriteGuard<'a, T, R> where
    R: Send,
    T: Send + Sync
","synthetic":true,"types":["spin::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, T: ?Sized, R> Send for RwLockUpgradableGuard<'a, T, R> where
    R: Send,
    T: Send + Sync
","synthetic":true,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl Send for Spin","synthetic":true,"types":["spin::relax::Spin"]},{"text":"impl Send for Loop","synthetic":true,"types":["spin::relax::Loop"]},{"text":"impl<T: ?Sized + Send> Send for SpinMutex<T>","synthetic":false,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<T: ?Sized + Send, R> Send for Mutex<T, R>","synthetic":false,"types":["spin::mutex::Mutex"]},{"text":"impl<T: Send, R> Send for Once<T, R>","synthetic":false,"types":["spin::once::Once"]},{"text":"impl<T: ?Sized + Send, R> Send for RwLock<T, R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Sync.js b/implementors/core/marker/trait.Sync.js index 0f62b018..82448d89 100644 --- a/implementors/core/marker/trait.Sync.js +++ b/implementors/core/marker/trait.Sync.js @@ -2,4 +2,7 @@ implementors["byteorder"] = [{"text":"impl Sync for BigEndian","synthetic":true,"types":["byteorder::BigEndian"]},{"text":"impl Sync for LittleEndian","synthetic":true,"types":["byteorder::LittleEndian"]}]; implementors["hash32"] = [{"text":"impl Sync for Hasher","synthetic":true,"types":["hash32::fnv::Hasher"]},{"text":"impl Sync for Hasher","synthetic":true,"types":["hash32::murmur3::Hasher"]},{"text":"impl<H> Sync for BuildHasherDefault<H> where
    H: Sync
","synthetic":true,"types":["hash32::BuildHasherDefault"]}]; implementors["heapless"] = [{"text":"impl<T, const N: usize> Sync for Deque<T, N> where
    T: Sync
","synthetic":true,"types":["heapless::deque::Deque"]},{"text":"impl<T, const N: usize> Sync for HistoryBuffer<T, N> where
    T: Sync
","synthetic":true,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, S, const N: usize> Sync for IndexMap<K, V, S, N> where
    K: Sync,
    S: Sync,
    V: Sync
","synthetic":true,"types":["heapless::indexmap::IndexMap"]},{"text":"impl<T, S, const N: usize> Sync for IndexSet<T, S, N> where
    S: Sync,
    T: Sync
","synthetic":true,"types":["heapless::indexset::IndexSet"]},{"text":"impl<K, V, const N: usize> Sync for LinearMap<K, V, N> where
    K: Sync,
    V: Sync
","synthetic":true,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<const N: usize> Sync for String<N>","synthetic":true,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Sync for Vec<T, N> where
    T: Sync
","synthetic":true,"types":["heapless::vec::Vec"]},{"text":"impl Sync for Min","synthetic":true,"types":["heapless::binary_heap::Min"]},{"text":"impl Sync for Max","synthetic":true,"types":["heapless::binary_heap::Max"]},{"text":"impl<T, K, const N: usize> Sync for BinaryHeap<T, K, N> where
    K: Sync,
    T: Sync
","synthetic":true,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<'a, T, K, const N: usize> Sync for PeekMut<'a, T, K, N> where
    K: Sync,
    T: Sync
","synthetic":true,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<T> !Sync for Node<T>","synthetic":true,"types":["heapless::pool::stack::Node"]},{"text":"impl<T> !Sync for Pool<T>","synthetic":true,"types":["heapless::pool::Pool"]},{"text":"impl Sync for Uninit","synthetic":true,"types":["heapless::pool::Uninit"]},{"text":"impl Sync for Init","synthetic":true,"types":["heapless::pool::Init"]},{"text":"impl Sync for Min","synthetic":true,"types":["heapless::sorted_linked_list::Min"]},{"text":"impl Sync for Max","synthetic":true,"types":["heapless::sorted_linked_list::Max"]},{"text":"impl<T, Idx> Sync for Node<T, Idx> where
    Idx: Sync,
    T: Sync
","synthetic":true,"types":["heapless::sorted_linked_list::Node"]},{"text":"impl<T, Idx, Kind, const N: usize> Sync for SortedLinkedList<T, Idx, Kind, N> where
    Idx: Sync,
    Kind: Sync,
    T: Sync
","synthetic":true,"types":["heapless::sorted_linked_list::SortedLinkedList"]},{"text":"impl Sync for LinkedIndexU8","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU8"]},{"text":"impl Sync for LinkedIndexU16","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU16"]},{"text":"impl Sync for LinkedIndexUsize","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexUsize"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Sync for Iter<'a, T, Idx, Kind, N> where
    Idx: Sync,
    Kind: Sync,
    T: Sync
","synthetic":true,"types":["heapless::sorted_linked_list::Iter"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Sync for FindMut<'a, T, Idx, Kind, N> where
    Idx: Sync,
    Kind: Sync,
    T: Sync
","synthetic":true,"types":["heapless::sorted_linked_list::FindMut"]},{"text":"impl<T, const N: usize> !Sync for Queue<T, N>","synthetic":true,"types":["heapless::spsc::Queue"]},{"text":"impl<'a, T, const N: usize> !Sync for Iter<'a, T, N>","synthetic":true,"types":["heapless::spsc::Iter"]},{"text":"impl<'a, T, const N: usize> !Sync for IterMut<'a, T, N>","synthetic":true,"types":["heapless::spsc::IterMut"]},{"text":"impl<'a, T, const N: usize> !Sync for Consumer<'a, T, N>","synthetic":true,"types":["heapless::spsc::Consumer"]},{"text":"impl<'a, T, const N: usize> !Sync for Producer<'a, T, N>","synthetic":true,"types":["heapless::spsc::Producer"]},{"text":"impl<T, const N: usize> Sync for MpMcQueue<T, N> where
    T: Send
","synthetic":false,"types":["heapless::mpmc::MpMcQueue"]},{"text":"impl<P, S> Sync for Box<P, S> where
    P: Pool,
    P::Data: Sync
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T, S> Sync for Box<T, S> where
    T: Sync
","synthetic":false,"types":["heapless::pool::Box"]}]; +implementors["lock_api"] = [{"text":"impl<'a, R, T: ?Sized> Sync for RwLockReadGuard<'a, R, T> where
    R: Sync,
    T: Send + Sync,
    <R as RawRwLock>::GuardMarker: Sync
","synthetic":true,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Sync for RwLockWriteGuard<'a, R, T> where
    R: Sync,
    T: Send + Sync,
    <R as RawRwLock>::GuardMarker: Sync
","synthetic":true,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl Sync for GuardSend","synthetic":true,"types":["lock_api::GuardSend"]},{"text":"impl Sync for GuardNoSend","synthetic":false,"types":["lock_api::GuardNoSend"]},{"text":"impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T>","synthetic":false,"types":["lock_api::mutex::Mutex"]},{"text":"impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MappedMutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G>","synthetic":false,"types":["lock_api::remutex::RawReentrantMutex"]},{"text":"impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync for ReentrantMutex<R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync for ReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T>","synthetic":false,"types":["lock_api::rwlock::RwLock"]},{"text":"impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync for RwLockUpgradableReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["scopeguard"] = [{"text":"impl Sync for Always","synthetic":true,"types":["scopeguard::Always"]},{"text":"impl<T, F, S> Sync for ScopeGuard<T, F, S> where
    T: Sync,
    F: FnOnce(T),
    S: Strategy
","synthetic":false,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<R> Sync for Barrier<R>","synthetic":true,"types":["spin::barrier::Barrier"]},{"text":"impl Sync for BarrierWaitResult","synthetic":true,"types":["spin::barrier::BarrierWaitResult"]},{"text":"impl<T, F = fn() -> T, R = Spin> !Sync for Lazy<T, F, R>","synthetic":true,"types":["spin::lazy::Lazy"]},{"text":"impl<T, R = Spin> !Sync for SpinMutex<T, R>","synthetic":true,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<'a, T: ?Sized> Sync for SpinMutexGuard<'a, T> where
    T: Sync
","synthetic":true,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<'a, T: ?Sized> Sync for MutexGuard<'a, T> where
    T: Sync
","synthetic":true,"types":["spin::mutex::MutexGuard"]},{"text":"impl<'a, T: ?Sized> Sync for RwLockReadGuard<'a, T> where
    T: Sync
","synthetic":true,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'a, T: ?Sized, R> Sync for RwLockWriteGuard<'a, T, R> where
    R: Sync,
    T: Send + Sync
","synthetic":true,"types":["spin::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, T: ?Sized, R> Sync for RwLockUpgradableGuard<'a, T, R> where
    R: Sync,
    T: Send + Sync
","synthetic":true,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl Sync for Spin","synthetic":true,"types":["spin::relax::Spin"]},{"text":"impl Sync for Loop","synthetic":true,"types":["spin::relax::Loop"]},{"text":"impl<T, F: Send> Sync for Lazy<T, F> where
    Once<T>: Sync
","synthetic":false,"types":["spin::lazy::Lazy"]},{"text":"impl<T: ?Sized + Send> Sync for SpinMutex<T>","synthetic":false,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<T: ?Sized + Send, R> Sync for Mutex<T, R>","synthetic":false,"types":["spin::mutex::Mutex"]},{"text":"impl<T: Send + Sync, R> Sync for Once<T, R>","synthetic":false,"types":["spin::once::Once"]},{"text":"impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Unpin.js b/implementors/core/marker/trait.Unpin.js index 653f42ab..da2d47d4 100644 --- a/implementors/core/marker/trait.Unpin.js +++ b/implementors/core/marker/trait.Unpin.js @@ -2,4 +2,7 @@ implementors["byteorder"] = [{"text":"impl Unpin for BigEndian","synthetic":true,"types":["byteorder::BigEndian"]},{"text":"impl Unpin for LittleEndian","synthetic":true,"types":["byteorder::LittleEndian"]}]; implementors["hash32"] = [{"text":"impl Unpin for Hasher","synthetic":true,"types":["hash32::fnv::Hasher"]},{"text":"impl Unpin for Hasher","synthetic":true,"types":["hash32::murmur3::Hasher"]},{"text":"impl<H> Unpin for BuildHasherDefault<H> where
    H: Unpin
","synthetic":true,"types":["hash32::BuildHasherDefault"]}]; implementors["heapless"] = [{"text":"impl<T, const N: usize> Unpin for Deque<T, N> where
    T: Unpin
","synthetic":true,"types":["heapless::deque::Deque"]},{"text":"impl<T, const N: usize> Unpin for HistoryBuffer<T, N> where
    T: Unpin
","synthetic":true,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, S, const N: usize> Unpin for IndexMap<K, V, S, N> where
    K: Unpin,
    S: Unpin,
    V: Unpin
","synthetic":true,"types":["heapless::indexmap::IndexMap"]},{"text":"impl<T, S, const N: usize> Unpin for IndexSet<T, S, N> where
    S: Unpin,
    T: Unpin
","synthetic":true,"types":["heapless::indexset::IndexSet"]},{"text":"impl<K, V, const N: usize> Unpin for LinearMap<K, V, N> where
    K: Unpin,
    V: Unpin
","synthetic":true,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<const N: usize> Unpin for String<N>","synthetic":true,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Unpin for Vec<T, N> where
    T: Unpin
","synthetic":true,"types":["heapless::vec::Vec"]},{"text":"impl Unpin for Min","synthetic":true,"types":["heapless::binary_heap::Min"]},{"text":"impl Unpin for Max","synthetic":true,"types":["heapless::binary_heap::Max"]},{"text":"impl<T, K, const N: usize> Unpin for BinaryHeap<T, K, N> where
    K: Unpin,
    T: Unpin
","synthetic":true,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<'a, T, K, const N: usize> Unpin for PeekMut<'a, T, K, N>","synthetic":true,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<T, const N: usize> Unpin for MpMcQueue<T, N> where
    T: Unpin
","synthetic":true,"types":["heapless::mpmc::MpMcQueue"]},{"text":"impl<POOL, STATE> Unpin for Box<POOL, STATE> where
    POOL: Unpin,
    STATE: Unpin
","synthetic":true,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> Unpin for Node<T> where
    T: Unpin
","synthetic":true,"types":["heapless::pool::stack::Node"]},{"text":"impl<T> Unpin for Pool<T>","synthetic":true,"types":["heapless::pool::Pool"]},{"text":"impl<T, STATE> Unpin for Box<T, STATE> where
    STATE: Unpin
","synthetic":true,"types":["heapless::pool::Box"]},{"text":"impl Unpin for Uninit","synthetic":true,"types":["heapless::pool::Uninit"]},{"text":"impl Unpin for Init","synthetic":true,"types":["heapless::pool::Init"]},{"text":"impl Unpin for Min","synthetic":true,"types":["heapless::sorted_linked_list::Min"]},{"text":"impl Unpin for Max","synthetic":true,"types":["heapless::sorted_linked_list::Max"]},{"text":"impl<T, Idx> Unpin for Node<T, Idx> where
    Idx: Unpin,
    T: Unpin
","synthetic":true,"types":["heapless::sorted_linked_list::Node"]},{"text":"impl<T, Idx, Kind, const N: usize> Unpin for SortedLinkedList<T, Idx, Kind, N> where
    Idx: Unpin,
    Kind: Unpin,
    T: Unpin
","synthetic":true,"types":["heapless::sorted_linked_list::SortedLinkedList"]},{"text":"impl Unpin for LinkedIndexU8","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU8"]},{"text":"impl Unpin for LinkedIndexU16","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexU16"]},{"text":"impl Unpin for LinkedIndexUsize","synthetic":true,"types":["heapless::sorted_linked_list::LinkedIndexUsize"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Unpin for Iter<'a, T, Idx, Kind, N> where
    Idx: Unpin
","synthetic":true,"types":["heapless::sorted_linked_list::Iter"]},{"text":"impl<'a, T, Idx, Kind, const N: usize> Unpin for FindMut<'a, T, Idx, Kind, N> where
    Idx: Unpin
","synthetic":true,"types":["heapless::sorted_linked_list::FindMut"]},{"text":"impl<T, const N: usize> Unpin for Queue<T, N> where
    T: Unpin
","synthetic":true,"types":["heapless::spsc::Queue"]},{"text":"impl<'a, T, const N: usize> Unpin for Iter<'a, T, N>","synthetic":true,"types":["heapless::spsc::Iter"]},{"text":"impl<'a, T, const N: usize> Unpin for IterMut<'a, T, N>","synthetic":true,"types":["heapless::spsc::IterMut"]},{"text":"impl<'a, T, const N: usize> Unpin for Consumer<'a, T, N>","synthetic":true,"types":["heapless::spsc::Consumer"]},{"text":"impl<'a, T, const N: usize> Unpin for Producer<'a, T, N>","synthetic":true,"types":["heapless::spsc::Producer"]}]; +implementors["lock_api"] = [{"text":"impl<R, T: ?Sized> Unpin for Mutex<R, T> where
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["lock_api::mutex::Mutex"]},{"text":"impl<'a, R, T: ?Sized> Unpin for MutexGuard<'a, R, T> where
    <R as RawMutex>::GuardMarker: Unpin
","synthetic":true,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R, T: ?Sized> Unpin for MappedMutexGuard<'a, R, T>","synthetic":true,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<R, G> Unpin for RawReentrantMutex<R, G> where
    G: Unpin,
    R: Unpin
","synthetic":true,"types":["lock_api::remutex::RawReentrantMutex"]},{"text":"impl<R, G, T: ?Sized> Unpin for ReentrantMutex<R, G, T> where
    G: Unpin,
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["lock_api::remutex::ReentrantMutex"]},{"text":"impl<'a, R, G, T: ?Sized> Unpin for ReentrantMutexGuard<'a, R, G, T>","synthetic":true,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R, G, T: ?Sized> Unpin for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":true,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<R, T: ?Sized> Unpin for RwLock<R, T> where
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["lock_api::rwlock::RwLock"]},{"text":"impl<'a, R, T: ?Sized> Unpin for RwLockReadGuard<'a, R, T> where
    <R as RawRwLock>::GuardMarker: Unpin
","synthetic":true,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Unpin for RwLockWriteGuard<'a, R, T> where
    <R as RawRwLock>::GuardMarker: Unpin
","synthetic":true,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R, T: ?Sized> Unpin for RwLockUpgradableReadGuard<'a, R, T> where
    <R as RawRwLock>::GuardMarker: Unpin
","synthetic":true,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Unpin for MappedRwLockReadGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R, T: ?Sized> Unpin for MappedRwLockWriteGuard<'a, R, T>","synthetic":true,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]},{"text":"impl Unpin for GuardSend","synthetic":true,"types":["lock_api::GuardSend"]},{"text":"impl Unpin for GuardNoSend","synthetic":true,"types":["lock_api::GuardNoSend"]}]; +implementors["scopeguard"] = [{"text":"impl Unpin for Always","synthetic":true,"types":["scopeguard::Always"]},{"text":"impl<T, F, S> Unpin for ScopeGuard<T, F, S> where
    F: Unpin,
    T: Unpin
","synthetic":true,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<R> Unpin for Barrier<R> where
    R: Unpin
","synthetic":true,"types":["spin::barrier::Barrier"]},{"text":"impl Unpin for BarrierWaitResult","synthetic":true,"types":["spin::barrier::BarrierWaitResult"]},{"text":"impl<T, F, R> Unpin for Lazy<T, F, R> where
    F: Unpin,
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["spin::lazy::Lazy"]},{"text":"impl<T: ?Sized, R> Unpin for SpinMutex<T, R> where
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<'a, T: ?Sized> Unpin for SpinMutexGuard<'a, T>","synthetic":true,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<T: ?Sized, R> Unpin for Mutex<T, R> where
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["spin::mutex::Mutex"]},{"text":"impl<'a, T: ?Sized> Unpin for MutexGuard<'a, T>","synthetic":true,"types":["spin::mutex::MutexGuard"]},{"text":"impl<T, R> Unpin for Once<T, R> where
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["spin::once::Once"]},{"text":"impl<T: ?Sized, R> Unpin for RwLock<T, R> where
    R: Unpin,
    T: Unpin
","synthetic":true,"types":["spin::rwlock::RwLock"]},{"text":"impl<'a, T: ?Sized> Unpin for RwLockReadGuard<'a, T>","synthetic":true,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'a, T: ?Sized, R> Unpin for RwLockWriteGuard<'a, T, R> where
    R: Unpin
","synthetic":true,"types":["spin::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, T: ?Sized, R> Unpin for RwLockUpgradableGuard<'a, T, R> where
    R: Unpin
","synthetic":true,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl Unpin for Spin","synthetic":true,"types":["spin::relax::Spin"]},{"text":"impl Unpin for Loop","synthetic":true,"types":["spin::relax::Loop"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/deref/trait.Deref.js b/implementors/core/ops/deref/trait.Deref.js index ee5c06f9..b7de2dd4 100644 --- a/implementors/core/ops/deref/trait.Deref.js +++ b/implementors/core/ops/deref/trait.Deref.js @@ -1,3 +1,6 @@ (function() {var implementors = {}; implementors["heapless"] = [{"text":"impl<T, const N: usize> Deref for HistoryBuffer<T, N>","synthetic":false,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<const N: usize> Deref for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> Deref for Vec<T, N>","synthetic":false,"types":["heapless::vec::Vec"]},{"text":"impl<T, K, const N: usize> Deref for PeekMut<'_, T, K, N> where
    T: Ord,
    K: Kind, 
","synthetic":false,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<P> Deref for Box<P> where
    P: Pool
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> Deref for Box<T>","synthetic":false,"types":["heapless::pool::Box"]},{"text":"impl<T, Idx, Kind, const N: usize> Deref for FindMut<'_, T, Idx, Kind, N> where
    T: Ord,
    Idx: SortedLinkedListIndex,
    Kind: LLKind, 
","synthetic":false,"types":["heapless::sorted_linked_list::FindMut"]}]; +implementors["lock_api"] = [{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MappedMutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref for ReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["scopeguard"] = [{"text":"impl<T, F, S> Deref for ScopeGuard<T, F, S> where
    F: FnOnce(T),
    S: Strategy
","synthetic":false,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<T, F: FnOnce() -> T, R: RelaxStrategy> Deref for Lazy<T, F, R>","synthetic":false,"types":["spin::lazy::Lazy"]},{"text":"impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<'a, T: ?Sized> Deref for MutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::MutexGuard"]},{"text":"impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T>","synthetic":false,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockWriteGuard"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/deref/trait.DerefMut.js b/implementors/core/ops/deref/trait.DerefMut.js index 15a5d4d4..e1f13bfa 100644 --- a/implementors/core/ops/deref/trait.DerefMut.js +++ b/implementors/core/ops/deref/trait.DerefMut.js @@ -1,3 +1,6 @@ (function() {var implementors = {}; implementors["heapless"] = [{"text":"impl<const N: usize> DerefMut for String<N>","synthetic":false,"types":["heapless::string::String"]},{"text":"impl<T, const N: usize> DerefMut for Vec<T, N>","synthetic":false,"types":["heapless::vec::Vec"]},{"text":"impl<T, K, const N: usize> DerefMut for PeekMut<'_, T, K, N> where
    T: Ord,
    K: Kind, 
","synthetic":false,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<P> DerefMut for Box<P> where
    P: Pool
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T> DerefMut for Box<T>","synthetic":false,"types":["heapless::pool::Box"]},{"text":"impl<T, Idx, Kind, const N: usize> DerefMut for FindMut<'_, T, Idx, Kind, N> where
    T: Ord,
    Idx: SortedLinkedListIndex,
    Kind: LLKind, 
","synthetic":false,"types":["heapless::sorted_linked_list::FindMut"]}]; +implementors["lock_api"] = [{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["scopeguard"] = [{"text":"impl<T, F, S> DerefMut for ScopeGuard<T, F, S> where
    F: FnOnce(T),
    S: Strategy
","synthetic":false,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::MutexGuard"]},{"text":"impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockWriteGuard"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/drop/trait.Drop.js b/implementors/core/ops/drop/trait.Drop.js index d5950f8e..0082c321 100644 --- a/implementors/core/ops/drop/trait.Drop.js +++ b/implementors/core/ops/drop/trait.Drop.js @@ -1,3 +1,6 @@ (function() {var implementors = {}; implementors["heapless"] = [{"text":"impl<T, const N: usize> Drop for Deque<T, N>","synthetic":false,"types":["heapless::deque::Deque"]},{"text":"impl<T, const N: usize> Drop for HistoryBuffer<T, N>","synthetic":false,"types":["heapless::histbuf::HistoryBuffer"]},{"text":"impl<K, V, const N: usize> Drop for LinearMap<K, V, N>","synthetic":false,"types":["heapless::linear_map::LinearMap"]},{"text":"impl<T, const N: usize> Drop for Vec<T, N>","synthetic":false,"types":["heapless::vec::Vec"]},{"text":"impl<T, K, const N: usize> Drop for PeekMut<'_, T, K, N> where
    T: Ord,
    K: Kind, 
","synthetic":false,"types":["heapless::binary_heap::PeekMut"]},{"text":"impl<T, K, const N: usize> Drop for BinaryHeap<T, K, N>","synthetic":false,"types":["heapless::binary_heap::BinaryHeap"]},{"text":"impl<P, S> Drop for Box<P, S> where
    P: Pool,
    S: 'static, 
","synthetic":false,"types":["heapless::pool::singleton::Box"]},{"text":"impl<T, Idx, Kind, const N: usize> Drop for FindMut<'_, T, Idx, Kind, N> where
    T: Ord,
    Idx: SortedLinkedListIndex,
    Kind: LLKind, 
","synthetic":false,"types":["heapless::sorted_linked_list::FindMut"]},{"text":"impl<T, Idx, Kind, const N: usize> Drop for SortedLinkedList<T, Idx, Kind, N> where
    Idx: SortedLinkedListIndex
","synthetic":false,"types":["heapless::sorted_linked_list::SortedLinkedList"]},{"text":"impl<T, const N: usize> Drop for Queue<T, N>","synthetic":false,"types":["heapless::spsc::Queue"]}]; +implementors["lock_api"] = [{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T>","synthetic":false,"types":["lock_api::mutex::MappedMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop for ReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::ReentrantMutexGuard"]},{"text":"impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop for MappedReentrantMutexGuard<'a, R, G, T>","synthetic":false,"types":["lock_api::remutex::MappedReentrantMutexGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockWriteGuard"]},{"text":"impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::RwLockUpgradableReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockReadGuard"]},{"text":"impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T>","synthetic":false,"types":["lock_api::rwlock::MappedRwLockWriteGuard"]}]; +implementors["scopeguard"] = [{"text":"impl<T, F, S> Drop for ScopeGuard<T, F, S> where
    F: FnOnce(T),
    S: Strategy
","synthetic":false,"types":["scopeguard::ScopeGuard"]}]; +implementors["spin"] = [{"text":"impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T>","synthetic":false,"types":["spin::mutex::spin::SpinMutexGuard"]},{"text":"impl<T, R> Drop for Once<T, R>","synthetic":false,"types":["spin::once::Once"]},{"text":"impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T>","synthetic":false,"types":["spin::rwlock::RwLockReadGuard"]},{"text":"impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockUpgradableGuard"]},{"text":"impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R>","synthetic":false,"types":["spin::rwlock::RwLockWriteGuard"]}]; if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/lock_api/mutex/trait.RawMutex.js b/implementors/lock_api/mutex/trait.RawMutex.js new file mode 100644 index 00000000..650b7101 --- /dev/null +++ b/implementors/lock_api/mutex/trait.RawMutex.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["spin"] = [{"text":"impl<R: RelaxStrategy> RawMutex for SpinMutex<(), R>","synthetic":false,"types":["spin::mutex::spin::SpinMutex"]},{"text":"impl<R: RelaxStrategy> RawMutex for Mutex<(), R>","synthetic":false,"types":["spin::mutex::Mutex"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/lock_api/rwlock/trait.RawRwLock.js b/implementors/lock_api/rwlock/trait.RawRwLock.js new file mode 100644 index 00000000..d5387e15 --- /dev/null +++ b/implementors/lock_api/rwlock/trait.RawRwLock.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["spin"] = [{"text":"impl<R: RelaxStrategy> RawRwLock for RwLock<(), R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/lock_api/rwlock/trait.RawRwLockDowngrade.js b/implementors/lock_api/rwlock/trait.RawRwLockDowngrade.js new file mode 100644 index 00000000..80843515 --- /dev/null +++ b/implementors/lock_api/rwlock/trait.RawRwLockDowngrade.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["spin"] = [{"text":"impl<R: RelaxStrategy> RawRwLockDowngrade for RwLock<(), R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/lock_api/rwlock/trait.RawRwLockUpgrade.js b/implementors/lock_api/rwlock/trait.RawRwLockUpgrade.js new file mode 100644 index 00000000..29df437a --- /dev/null +++ b/implementors/lock_api/rwlock/trait.RawRwLockUpgrade.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["spin"] = [{"text":"impl<R: RelaxStrategy> RawRwLockUpgrade for RwLock<(), R>","synthetic":false,"types":["spin::rwlock::RwLock"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/scopeguard/trait.Strategy.js b/implementors/scopeguard/trait.Strategy.js new file mode 100644 index 00000000..fa3c2f5f --- /dev/null +++ b/implementors/scopeguard/trait.Strategy.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["scopeguard"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/spin/relax/trait.RelaxStrategy.js b/implementors/spin/relax/trait.RelaxStrategy.js new file mode 100644 index 00000000..dd397323 --- /dev/null +++ b/implementors/spin/relax/trait.RelaxStrategy.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["spin"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/lock_api/all.html b/lock_api/all.html new file mode 100644 index 00000000..cd9b85e3 --- /dev/null +++ b/lock_api/all.html @@ -0,0 +1,5 @@ +List of all items in this crate + +

List of all items[] + +

Structs

Traits

\ No newline at end of file diff --git a/lock_api/index.html b/lock_api/index.html new file mode 100644 index 00000000..2ea888e8 --- /dev/null +++ b/lock_api/index.html @@ -0,0 +1,120 @@ +lock_api - Rust + +

Crate lock_api[][src]

Expand description

This library provides type-safe and fully-featured Mutex and RwLock +types which wrap a simple raw mutex or rwlock type. This has several +benefits: not only does it eliminate a large portion of the work in +implementing custom lock types, it also allows users to write code which is +generic with regards to different lock implementations.

+

Basic usage of this crate is very straightforward:

+
    +
  1. Create a raw lock type. This should only contain the lock state, not any +data protected by the lock.
  2. +
  3. Implement the RawMutex trait for your custom lock type.
  4. +
  5. Export your mutex as a type alias for lock_api::Mutex, and +your mutex guard as a type alias for lock_api::MutexGuard. +See the example below for details.
  6. +
+

This process is similar for RwLocks, except that two guards need to be +exported instead of one. (Or 3 guards if your type supports upgradable read +locks, see extension traits below for details)

+

Example

+
+use lock_api::{RawMutex, Mutex, GuardSend};
+use std::sync::atomic::{AtomicBool, Ordering};
+
+// 1. Define our raw lock type
+pub struct RawSpinlock(AtomicBool);
+
+// 2. Implement RawMutex for this type
+unsafe impl RawMutex for RawSpinlock {
+    const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
+
+    // A spinlock guard can be sent to another thread and unlocked there
+    type GuardMarker = GuardSend;
+
+    fn lock(&self) {
+        // Note: This isn't the best way of implementing a spinlock, but it
+        // suffices for the sake of this example.
+        while !self.try_lock() {}
+    }
+
+    fn try_lock(&self) -> bool {
+        self.0
+            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+    }
+
+    unsafe fn unlock(&self) {
+        self.0.store(false, Ordering::Release);
+    }
+}
+
+// 3. Export the wrappers. This are the types that your users will actually use.
+pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>;
+pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>;
+

Extension traits

+

In addition to basic locking & unlocking functionality, you have the option +of exposing additional functionality in your lock types by implementing +additional traits for it. Examples of extension features include:

+
    +
  • Fair unlocking (RawMutexFair, RawRwLockFair)
  • +
  • Lock timeouts (RawMutexTimed, RawRwLockTimed)
  • +
  • Downgradable write locks (RawRwLockDowngradable)
  • +
  • Recursive read locks (RawRwLockRecursive)
  • +
  • Upgradable read locks (RawRwLockUpgrade)
  • +
+

The Mutex and RwLock wrappers will automatically expose this additional +functionality if the raw lock type implements these extension traits.

+

Cargo features

+

This crate supports two cargo features:

+
    +
  • owning_ref: Allows your lock types to be used with the owning_ref crate.
  • +
  • nightly: Enables nightly-only features. At the moment the only such +feature is const fn constructors for lock types.
  • +
+

Structs

+
GuardNoSend

Marker type which indicates that the Guard type for a lock is not Send.

+
GuardSend

Marker type which indicates that the Guard type for a lock is Send.

+
MappedMutexGuard

An RAII mutex guard returned by MutexGuard::map, which can point to a +subfield of the protected data.

+
MappedReentrantMutexGuard

An RAII mutex guard returned by ReentrantMutexGuard::map, which can point to a +subfield of the protected data.

+
MappedRwLockReadGuard

An RAII read lock guard returned by RwLockReadGuard::map, which can point to a +subfield of the protected data.

+
MappedRwLockWriteGuard

An RAII write lock guard returned by RwLockWriteGuard::map, which can point to a +subfield of the protected data.

+
Mutex

A mutual exclusion primitive useful for protecting shared data

+
MutexGuard

An RAII implementation of a “scoped lock” of a mutex. When this structure is +dropped (falls out of scope), the lock will be unlocked.

+
RawReentrantMutex

A raw mutex type that wraps another raw mutex to provide reentrancy.

+
ReentrantMutex

A mutex which can be recursively locked by a single thread.

+
ReentrantMutexGuard

An RAII implementation of a “scoped lock” of a reentrant mutex. When this structure +is dropped (falls out of scope), the lock will be unlocked.

+
RwLock

A reader-writer lock

+
RwLockReadGuard

RAII structure used to release the shared read access of a lock when +dropped.

+
RwLockUpgradableReadGuard

RAII structure used to release the upgradable read access of a lock when +dropped.

+
RwLockWriteGuard

RAII structure used to release the exclusive write access of a lock when +dropped.

+

Traits

+
GetThreadId

Helper trait which returns a non-zero thread ID.

+
RawMutex

Basic operations for a mutex.

+
RawMutexFair

Additional methods for mutexes which support fair unlocking.

+
RawMutexTimed

Additional methods for mutexes which support locking with timeouts.

+
RawRwLock

Basic operations for a reader-writer lock.

+
RawRwLockDowngrade

Additional methods for RwLocks which support atomically downgrading an +exclusive lock to a shared lock.

+
RawRwLockFair

Additional methods for RwLocks which support fair unlocking.

+
RawRwLockRecursive

Additional methods for RwLocks which support recursive read locks.

+
RawRwLockRecursiveTimed

Additional methods for RwLocks which support recursive read locks and timeouts.

+
RawRwLockTimed

Additional methods for RwLocks which support locking with timeouts.

+
RawRwLockUpgrade

Additional methods for RwLocks which support atomically upgrading a shared +lock to an exclusive lock.

+
RawRwLockUpgradeDowngrade

Additional methods for RwLocks which support upgradable locks and lock +downgrading.

+
RawRwLockUpgradeFair

Additional methods for RwLocks which support upgradable locks and fair +unlocking.

+
RawRwLockUpgradeTimed

Additional methods for RwLocks which support upgradable locks and locking +with timeouts.

+
\ No newline at end of file diff --git a/lock_api/mutex/struct.MappedMutexGuard.html b/lock_api/mutex/struct.MappedMutexGuard.html new file mode 100644 index 00000000..1974c0fd --- /dev/null +++ b/lock_api/mutex/struct.MappedMutexGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.MappedMutexGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/mutex/struct.Mutex.html b/lock_api/mutex/struct.Mutex.html new file mode 100644 index 00000000..cf26d92c --- /dev/null +++ b/lock_api/mutex/struct.Mutex.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.Mutex.html...

+ + + \ No newline at end of file diff --git a/lock_api/mutex/struct.MutexGuard.html b/lock_api/mutex/struct.MutexGuard.html new file mode 100644 index 00000000..7d036672 --- /dev/null +++ b/lock_api/mutex/struct.MutexGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.MutexGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/mutex/trait.RawMutex.html b/lock_api/mutex/trait.RawMutex.html new file mode 100644 index 00000000..3649fc19 --- /dev/null +++ b/lock_api/mutex/trait.RawMutex.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawMutex.html...

+ + + \ No newline at end of file diff --git a/lock_api/mutex/trait.RawMutexFair.html b/lock_api/mutex/trait.RawMutexFair.html new file mode 100644 index 00000000..63ce4229 --- /dev/null +++ b/lock_api/mutex/trait.RawMutexFair.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawMutexFair.html...

+ + + \ No newline at end of file diff --git a/lock_api/mutex/trait.RawMutexTimed.html b/lock_api/mutex/trait.RawMutexTimed.html new file mode 100644 index 00000000..8d5764af --- /dev/null +++ b/lock_api/mutex/trait.RawMutexTimed.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawMutexTimed.html...

+ + + \ No newline at end of file diff --git a/lock_api/remutex/struct.MappedReentrantMutexGuard.html b/lock_api/remutex/struct.MappedReentrantMutexGuard.html new file mode 100644 index 00000000..0bf91ff3 --- /dev/null +++ b/lock_api/remutex/struct.MappedReentrantMutexGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.MappedReentrantMutexGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/remutex/struct.RawReentrantMutex.html b/lock_api/remutex/struct.RawReentrantMutex.html new file mode 100644 index 00000000..655a7289 --- /dev/null +++ b/lock_api/remutex/struct.RawReentrantMutex.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.RawReentrantMutex.html...

+ + + \ No newline at end of file diff --git a/lock_api/remutex/struct.ReentrantMutex.html b/lock_api/remutex/struct.ReentrantMutex.html new file mode 100644 index 00000000..f8613f1f --- /dev/null +++ b/lock_api/remutex/struct.ReentrantMutex.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.ReentrantMutex.html...

+ + + \ No newline at end of file diff --git a/lock_api/remutex/struct.ReentrantMutexGuard.html b/lock_api/remutex/struct.ReentrantMutexGuard.html new file mode 100644 index 00000000..793f2001 --- /dev/null +++ b/lock_api/remutex/struct.ReentrantMutexGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.ReentrantMutexGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/remutex/trait.GetThreadId.html b/lock_api/remutex/trait.GetThreadId.html new file mode 100644 index 00000000..1c47b277 --- /dev/null +++ b/lock_api/remutex/trait.GetThreadId.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.GetThreadId.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/struct.MappedRwLockReadGuard.html b/lock_api/rwlock/struct.MappedRwLockReadGuard.html new file mode 100644 index 00000000..a9c72250 --- /dev/null +++ b/lock_api/rwlock/struct.MappedRwLockReadGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.MappedRwLockReadGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/struct.MappedRwLockWriteGuard.html b/lock_api/rwlock/struct.MappedRwLockWriteGuard.html new file mode 100644 index 00000000..858ce9b6 --- /dev/null +++ b/lock_api/rwlock/struct.MappedRwLockWriteGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.MappedRwLockWriteGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/struct.RwLock.html b/lock_api/rwlock/struct.RwLock.html new file mode 100644 index 00000000..6df1cad3 --- /dev/null +++ b/lock_api/rwlock/struct.RwLock.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.RwLock.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/struct.RwLockReadGuard.html b/lock_api/rwlock/struct.RwLockReadGuard.html new file mode 100644 index 00000000..c3009cf3 --- /dev/null +++ b/lock_api/rwlock/struct.RwLockReadGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.RwLockReadGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/struct.RwLockUpgradableReadGuard.html b/lock_api/rwlock/struct.RwLockUpgradableReadGuard.html new file mode 100644 index 00000000..4beb9912 --- /dev/null +++ b/lock_api/rwlock/struct.RwLockUpgradableReadGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.RwLockUpgradableReadGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/struct.RwLockWriteGuard.html b/lock_api/rwlock/struct.RwLockWriteGuard.html new file mode 100644 index 00000000..ab64740f --- /dev/null +++ b/lock_api/rwlock/struct.RwLockWriteGuard.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/struct.RwLockWriteGuard.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLock.html b/lock_api/rwlock/trait.RawRwLock.html new file mode 100644 index 00000000..87e78d8f --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLock.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLock.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockDowngrade.html b/lock_api/rwlock/trait.RawRwLockDowngrade.html new file mode 100644 index 00000000..b1e7b7b3 --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockDowngrade.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockDowngrade.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockFair.html b/lock_api/rwlock/trait.RawRwLockFair.html new file mode 100644 index 00000000..7c03e137 --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockFair.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockFair.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockRecursive.html b/lock_api/rwlock/trait.RawRwLockRecursive.html new file mode 100644 index 00000000..112471dd --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockRecursive.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockRecursive.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockRecursiveTimed.html b/lock_api/rwlock/trait.RawRwLockRecursiveTimed.html new file mode 100644 index 00000000..44736ba3 --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockRecursiveTimed.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockRecursiveTimed.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockTimed.html b/lock_api/rwlock/trait.RawRwLockTimed.html new file mode 100644 index 00000000..9e594a6f --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockTimed.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockTimed.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockUpgrade.html b/lock_api/rwlock/trait.RawRwLockUpgrade.html new file mode 100644 index 00000000..d5c7ff93 --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockUpgrade.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockUpgrade.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockUpgradeDowngrade.html b/lock_api/rwlock/trait.RawRwLockUpgradeDowngrade.html new file mode 100644 index 00000000..d0dc758d --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockUpgradeDowngrade.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockUpgradeDowngrade.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockUpgradeFair.html b/lock_api/rwlock/trait.RawRwLockUpgradeFair.html new file mode 100644 index 00000000..c4aa7ce5 --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockUpgradeFair.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockUpgradeFair.html...

+ + + \ No newline at end of file diff --git a/lock_api/rwlock/trait.RawRwLockUpgradeTimed.html b/lock_api/rwlock/trait.RawRwLockUpgradeTimed.html new file mode 100644 index 00000000..b4ecfa4c --- /dev/null +++ b/lock_api/rwlock/trait.RawRwLockUpgradeTimed.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to ../../lock_api/trait.RawRwLockUpgradeTimed.html...

+ + + \ No newline at end of file diff --git a/lock_api/sidebar-items.js b/lock_api/sidebar-items.js new file mode 100644 index 00000000..680c9484 --- /dev/null +++ b/lock_api/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["GuardNoSend","Marker type which indicates that the Guard type for a lock is not `Send`."],["GuardSend","Marker type which indicates that the Guard type for a lock is `Send`."],["MappedMutexGuard","An RAII mutex guard returned by `MutexGuard::map`, which can point to a subfield of the protected data."],["MappedReentrantMutexGuard","An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a subfield of the protected data."],["MappedRwLockReadGuard","An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a subfield of the protected data."],["MappedRwLockWriteGuard","An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a subfield of the protected data."],["Mutex","A mutual exclusion primitive useful for protecting shared data"],["MutexGuard","An RAII implementation of a “scoped lock” of a mutex. When this structure is dropped (falls out of scope), the lock will be unlocked."],["RawReentrantMutex","A raw mutex type that wraps another raw mutex to provide reentrancy."],["ReentrantMutex","A mutex which can be recursively locked by a single thread."],["ReentrantMutexGuard","An RAII implementation of a “scoped lock” of a reentrant mutex. When this structure is dropped (falls out of scope), the lock will be unlocked."],["RwLock","A reader-writer lock"],["RwLockReadGuard","RAII structure used to release the shared read access of a lock when dropped."],["RwLockUpgradableReadGuard","RAII structure used to release the upgradable read access of a lock when dropped."],["RwLockWriteGuard","RAII structure used to release the exclusive write access of a lock when dropped."]],"trait":[["GetThreadId","Helper trait which returns a non-zero thread ID."],["RawMutex","Basic operations for a mutex."],["RawMutexFair","Additional methods for mutexes which support fair unlocking."],["RawMutexTimed","Additional methods for mutexes which support locking with timeouts."],["RawRwLock","Basic operations for a reader-writer lock."],["RawRwLockDowngrade","Additional methods for RwLocks which support atomically downgrading an exclusive lock to a shared lock."],["RawRwLockFair","Additional methods for RwLocks which support fair unlocking."],["RawRwLockRecursive","Additional methods for RwLocks which support recursive read locks."],["RawRwLockRecursiveTimed","Additional methods for RwLocks which support recursive read locks and timeouts."],["RawRwLockTimed","Additional methods for RwLocks which support locking with timeouts."],["RawRwLockUpgrade","Additional methods for RwLocks which support atomically upgrading a shared lock to an exclusive lock."],["RawRwLockUpgradeDowngrade","Additional methods for RwLocks which support upgradable locks and lock downgrading."],["RawRwLockUpgradeFair","Additional methods for RwLocks which support upgradable locks and fair unlocking."],["RawRwLockUpgradeTimed","Additional methods for RwLocks which support upgradable locks and locking with timeouts."]]}); \ No newline at end of file diff --git a/lock_api/struct.GuardNoSend.html b/lock_api/struct.GuardNoSend.html new file mode 100644 index 00000000..300a58ab --- /dev/null +++ b/lock_api/struct.GuardNoSend.html @@ -0,0 +1,13 @@ +GuardNoSend in lock_api - Rust + +

Struct lock_api::GuardNoSend[][src]

pub struct GuardNoSend(_);
Expand description

Marker type which indicates that the Guard type for a lock is not Send.

+

Trait Implementations

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.GuardSend.html b/lock_api/struct.GuardSend.html new file mode 100644 index 00000000..dd60892f --- /dev/null +++ b/lock_api/struct.GuardSend.html @@ -0,0 +1,13 @@ +GuardSend in lock_api - Rust + +

Struct lock_api::GuardSend[][src]

pub struct GuardSend(_);
Expand description

Marker type which indicates that the Guard type for a lock is Send.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.MappedMutexGuard.html b/lock_api/struct.MappedMutexGuard.html new file mode 100644 index 00000000..ecb20504 --- /dev/null +++ b/lock_api/struct.MappedMutexGuard.html @@ -0,0 +1,47 @@ +MappedMutexGuard in lock_api - Rust + +

Struct lock_api::MappedMutexGuard[][src]

#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> { /* fields omitted */ }
Expand description

An RAII mutex guard returned by MutexGuard::map, which can point to a +subfield of the protected data.

+

The main difference between MappedMutexGuard and MutexGuard is that the +former doesn’t support temporarily unlocking and re-locking, since that +could introduce soundness issues if the locked object is modified by another +thread.

+

Implementations

Makes a new MappedMutexGuard for a component of the locked data.

+

This operation cannot fail as the MappedMutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as MappedMutexGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedMutexGuard for a component of the +locked data. The original guard is returned if the closure returns None.

+

This operation cannot fail as the MappedMutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as MappedMutexGuard::try_map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Unlocks the mutex using a fair unlock protocol.

+

By default, mutexes are unfair and allow the current thread to re-lock +the mutex before another has the chance to acquire the lock, even if +that thread has been blocked on the mutex for a long time. This is the +default because it allows much higher throughput as it avoids forcing a +context switch on every mutex unlock. This can result in one thread +acquiring a mutex many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the MutexGuard normally.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.MappedReentrantMutexGuard.html b/lock_api/struct.MappedReentrantMutexGuard.html new file mode 100644 index 00000000..024d27db --- /dev/null +++ b/lock_api/struct.MappedReentrantMutexGuard.html @@ -0,0 +1,46 @@ +MappedReentrantMutexGuard in lock_api - Rust + +

Struct lock_api::MappedReentrantMutexGuard[][src]

#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { /* fields omitted */ }
Expand description

An RAII mutex guard returned by ReentrantMutexGuard::map, which can point to a +subfield of the protected data.

+

The main difference between MappedReentrantMutexGuard and ReentrantMutexGuard is that the +former doesn’t support temporarily unlocking and re-locking, since that +could introduce soundness issues if the locked object is modified by another +thread.

+

Implementations

Makes a new MappedReentrantMutexGuard for a component of the locked data.

+

This operation cannot fail as the MappedReentrantMutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as MappedReentrantMutexGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedReentrantMutexGuard for a component of the +locked data. The original guard is return if the closure returns None.

+

This operation cannot fail as the MappedReentrantMutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as MappedReentrantMutexGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Unlocks the mutex using a fair unlock protocol.

+

By default, mutexes are unfair and allow the current thread to re-lock +the mutex before another has the chance to acquire the lock, even if +that thread has been blocked on the mutex for a long time. This is the +default because it allows much higher throughput as it avoids forcing a +context switch on every mutex unlock. This can result in one thread +acquiring a mutex many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the ReentrantMutexGuard normally.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.MappedRwLockReadGuard.html b/lock_api/struct.MappedRwLockReadGuard.html new file mode 100644 index 00000000..39c53c39 --- /dev/null +++ b/lock_api/struct.MappedRwLockReadGuard.html @@ -0,0 +1,46 @@ +MappedRwLockReadGuard in lock_api - Rust + +

Struct lock_api::MappedRwLockReadGuard[][src]

#[must_use = "if unused the RwLock will immediately unlock"]
pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { /* fields omitted */ }
Expand description

An RAII read lock guard returned by RwLockReadGuard::map, which can point to a +subfield of the protected data.

+

The main difference between MappedRwLockReadGuard and RwLockReadGuard is that the +former doesn’t support temporarily unlocking and re-locking, since that +could introduce soundness issues if the locked object is modified by another +thread.

+

Implementations

Make a new MappedRwLockReadGuard for a component of the locked data.

+

This operation cannot fail as the MappedRwLockReadGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as MappedRwLockReadGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedRwLockReadGuard for a component of the +locked data. The original guard is return if the closure returns None.

+

This operation cannot fail as the MappedRwLockReadGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as MappedRwLockReadGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Unlocks the RwLock using a fair unlock protocol.

+

By default, RwLock is unfair and allow the current thread to re-lock +the RwLock before another has the chance to acquire the lock, even if +that thread has been blocked on the RwLock for a long time. This is +the default because it allows much higher throughput as it avoids +forcing a context switch on every RwLock unlock. This can result in one +thread acquiring a RwLock many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the MappedRwLockReadGuard normally.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.MappedRwLockWriteGuard.html b/lock_api/struct.MappedRwLockWriteGuard.html new file mode 100644 index 00000000..c9590484 --- /dev/null +++ b/lock_api/struct.MappedRwLockWriteGuard.html @@ -0,0 +1,47 @@ +MappedRwLockWriteGuard in lock_api - Rust + +

Struct lock_api::MappedRwLockWriteGuard[][src]

#[must_use = "if unused the RwLock will immediately unlock"]
pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { /* fields omitted */ }
Expand description

An RAII write lock guard returned by RwLockWriteGuard::map, which can point to a +subfield of the protected data.

+

The main difference between MappedRwLockWriteGuard and RwLockWriteGuard is that the +former doesn’t support temporarily unlocking and re-locking, since that +could introduce soundness issues if the locked object is modified by another +thread.

+

Implementations

Make a new MappedRwLockWriteGuard for a component of the locked data.

+

This operation cannot fail as the MappedRwLockWriteGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as MappedRwLockWriteGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedRwLockWriteGuard for a component of the +locked data. The original guard is return if the closure returns None.

+

This operation cannot fail as the MappedRwLockWriteGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as MappedRwLockWriteGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Unlocks the RwLock using a fair unlock protocol.

+

By default, RwLock is unfair and allow the current thread to re-lock +the RwLock before another has the chance to acquire the lock, even if +that thread has been blocked on the RwLock for a long time. This is +the default because it allows much higher throughput as it avoids +forcing a context switch on every RwLock unlock. This can result in one +thread acquiring a RwLock many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the MappedRwLockWriteGuard normally.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.Mutex.html b/lock_api/struct.Mutex.html new file mode 100644 index 00000000..3ae039cf --- /dev/null +++ b/lock_api/struct.Mutex.html @@ -0,0 +1,81 @@ +Mutex in lock_api - Rust + +

Struct lock_api::Mutex[][src]

pub struct Mutex<R, T: ?Sized> { /* fields omitted */ }
Expand description

A mutual exclusion primitive useful for protecting shared data

+

This mutex will block threads waiting for the lock to become available. The +mutex can also be statically initialized or created via a new +constructor. Each mutex has a type parameter which represents the data that +it is protecting. The data can only be accessed through the RAII guards +returned from lock and try_lock, which guarantees that the data is only +ever accessed when the mutex is locked.

+

Implementations

Creates a new mutex in an unlocked state ready for use.

+

Consumes this mutex, returning the underlying data.

+

Creates a new mutex based on a pre-existing raw mutex.

+

This allows creating a mutex in a constant context on stable Rust.

+

Acquires a mutex, blocking the current thread until it is able to do so.

+

This function will block the local thread until it is available to acquire +the mutex. Upon returning, the thread is the only thread with the mutex +held. An RAII guard is returned to allow scoped unlock of the lock. When +the guard goes out of scope, the mutex will be unlocked.

+

Attempts to lock a mutex in the thread which already holds the lock will +result in a deadlock.

+

Attempts to acquire this lock.

+

If the lock could not be acquired at this time, then None is returned. +Otherwise, an RAII guard is returned. The lock will be unlocked when the +guard is dropped.

+

This function does not block.

+

Returns a mutable reference to the underlying data.

+

Since this call borrows the Mutex mutably, no actual locking needs to +take place—the mutable borrow statically guarantees no locks exist.

+

Checks whether the mutex is currently locked.

+

Forcibly unlocks the mutex.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a MutexGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +MutexGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a mutex is unlocked when not locked.

+

Returns the underlying raw mutex object.

+

Note that you will most likely need to import the RawMutex trait from +lock_api to be able to call functions on the raw mutex.

+

Safety

+

This method is unsafe because it allows unlocking a mutex while +still holding a reference to a MutexGuard.

+

Returns a raw pointer to the underlying data.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a MutexGuard object alive, for example when +dealing with FFI.

+

Safety

+

You must ensure that there are no data races when dereferencing the +returned pointer, for example if the current thread logically owns +a MutexGuard but that guard has been discarded using mem::forget.

+

Forcibly unlocks the mutex using a fair unlock procotol.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a MutexGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +MutexGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a mutex is unlocked when not locked.

+

Attempts to acquire this lock until a timeout is reached.

+

If the lock could not be acquired before the timeout expired, then +None is returned. Otherwise, an RAII guard is returned. The lock will +be unlocked when the guard is dropped.

+

Attempts to acquire this lock until a timeout is reached.

+

If the lock could not be acquired before the timeout expired, then +None is returned. Otherwise, an RAII guard is returned. The lock will +be unlocked when the guard is dropped.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

Returns the “default value” for a type. Read more

+

Performs the conversion.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.MutexGuard.html b/lock_api/struct.MutexGuard.html new file mode 100644 index 00000000..4470f019 --- /dev/null +++ b/lock_api/struct.MutexGuard.html @@ -0,0 +1,57 @@ +MutexGuard in lock_api - Rust + +

Struct lock_api::MutexGuard[][src]

#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, R: RawMutex, T: ?Sized> { /* fields omitted */ }
Expand description

An RAII implementation of a “scoped lock” of a mutex. When this structure is +dropped (falls out of scope), the lock will be unlocked.

+

The data protected by the mutex can be accessed through this guard via its +Deref and DerefMut implementations.

+

Implementations

Returns a reference to the original Mutex object.

+

Makes a new MappedMutexGuard for a component of the locked data.

+

This operation cannot fail as the MutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as MutexGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedMutexGuard for a component of the +locked data. The original guard is returned if the closure returns None.

+

This operation cannot fail as the MutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as MutexGuard::try_map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Temporarily unlocks the mutex to execute the given function.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the mutex.

+

Unlocks the mutex using a fair unlock protocol.

+

By default, mutexes are unfair and allow the current thread to re-lock +the mutex before another has the chance to acquire the lock, even if +that thread has been blocked on the mutex for a long time. This is the +default because it allows much higher throughput as it avoids forcing a +context switch on every mutex unlock. This can result in one thread +acquiring a mutex many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the MutexGuard normally.

+

Temporarily unlocks the mutex to execute the given function.

+

The mutex is unlocked using a fair unlock protocol.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the mutex.

+

Temporarily yields the mutex to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by lock, however it can be much more efficient in the case where there +are no waiting threads.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.RawReentrantMutex.html b/lock_api/struct.RawReentrantMutex.html new file mode 100644 index 00000000..cb2c9e95 --- /dev/null +++ b/lock_api/struct.RawReentrantMutex.html @@ -0,0 +1,41 @@ +RawReentrantMutex in lock_api - Rust + +

Struct lock_api::RawReentrantMutex[][src]

pub struct RawReentrantMutex<R, G> { /* fields omitted */ }
Expand description

A raw mutex type that wraps another raw mutex to provide reentrancy.

+

Although this has the same methods as the RawMutex trait, it does +not implement it, and should not be used in the same way, since this +mutex can successfully acquire a lock multiple times in the same thread. +Only use this when you know you want a raw mutex that can be locked +reentrantly; you probably want ReentrantMutex instead.

+

Implementations

Initial value for an unlocked mutex.

+

Acquires this mutex, blocking if it’s held by another thread.

+

Attempts to acquire this mutex without blocking. Returns true +if the lock was successfully acquired and false otherwise.

+

Unlocks this mutex. The inner mutex may not be unlocked if +this mutex was acquired previously in the current thread.

+

Safety

+

This method may only be called if the mutex is held by the current thread.

+

Checks whether the mutex is currently locked.

+

Checks whether the mutex is currently held by the current thread.

+

Unlocks this mutex using a fair unlock protocol. The inner mutex +may not be unlocked if this mutex was acquired previously in the +current thread.

+

Safety

+

This method may only be called if the mutex is held by the current thread.

+

Temporarily yields the mutex to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by lock, however it can be much more efficient in the case where there +are no waiting threads.

+

Safety

+

This method may only be called if the mutex is held by the current thread.

+

Attempts to acquire this lock until a timeout is reached.

+

Attempts to acquire this lock until a timeout is reached.

+

Trait Implementations

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.ReentrantMutex.html b/lock_api/struct.ReentrantMutex.html new file mode 100644 index 00000000..c9bab4aa --- /dev/null +++ b/lock_api/struct.ReentrantMutex.html @@ -0,0 +1,90 @@ +ReentrantMutex in lock_api - Rust + +

Struct lock_api::ReentrantMutex[][src]

pub struct ReentrantMutex<R, G, T: ?Sized> { /* fields omitted */ }
Expand description

A mutex which can be recursively locked by a single thread.

+

This type is identical to Mutex except for the following points:

+
    +
  • Locking multiple times from the same thread will work correctly instead of +deadlocking.
  • +
  • ReentrantMutexGuard does not give mutable references to the locked data. +Use a RefCell if you need this.
  • +
+

See Mutex for more details about the underlying mutex +primitive.

+

Implementations

Creates a new reentrant mutex in an unlocked state ready for use.

+

Consumes this mutex, returning the underlying data.

+

Creates a new reentrant mutex based on a pre-existing raw mutex and a +helper to get the thread ID.

+

This allows creating a reentrant mutex in a constant context on stable +Rust.

+

Acquires a reentrant mutex, blocking the current thread until it is able +to do so.

+

If the mutex is held by another thread then this function will block the +local thread until it is available to acquire the mutex. If the mutex is +already held by the current thread then this function will increment the +lock reference count and return immediately. Upon returning, +the thread is the only thread with the mutex held. An RAII guard is +returned to allow scoped unlock of the lock. When the guard goes out of +scope, the mutex will be unlocked.

+

Attempts to acquire this lock.

+

If the lock could not be acquired at this time, then None is returned. +Otherwise, an RAII guard is returned. The lock will be unlocked when the +guard is dropped.

+

This function does not block.

+

Returns a mutable reference to the underlying data.

+

Since this call borrows the ReentrantMutex mutably, no actual locking needs to +take place—the mutable borrow statically guarantees no locks exist.

+

Checks whether the mutex is currently locked.

+

Checks whether the mutex is currently held by the current thread.

+

Forcibly unlocks the mutex.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a ReentrantMutexGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +ReentrantMutexGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a mutex is unlocked when not locked.

+

Returns the underlying raw mutex object.

+

Note that you will most likely need to import the RawMutex trait from +lock_api to be able to call functions on the raw mutex.

+

Safety

+

This method is unsafe because it allows unlocking a mutex while +still holding a reference to a ReentrantMutexGuard.

+

Returns a raw pointer to the underlying data.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a ReentrantMutexGuard object alive, for example +when dealing with FFI.

+

Safety

+

You must ensure that there are no data races when dereferencing the +returned pointer, for example if the current thread logically owns a +ReentrantMutexGuard but that guard has been discarded using +mem::forget.

+

Forcibly unlocks the mutex using a fair unlock protocol.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a ReentrantMutexGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +ReentrantMutexGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a mutex is unlocked when not locked.

+

Attempts to acquire this lock until a timeout is reached.

+

If the lock could not be acquired before the timeout expired, then +None is returned. Otherwise, an RAII guard is returned. The lock will +be unlocked when the guard is dropped.

+

Attempts to acquire this lock until a timeout is reached.

+

If the lock could not be acquired before the timeout expired, then +None is returned. Otherwise, an RAII guard is returned. The lock will +be unlocked when the guard is dropped.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

Returns the “default value” for a type. Read more

+

Performs the conversion.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.ReentrantMutexGuard.html b/lock_api/struct.ReentrantMutexGuard.html new file mode 100644 index 00000000..195eeca0 --- /dev/null +++ b/lock_api/struct.ReentrantMutexGuard.html @@ -0,0 +1,56 @@ +ReentrantMutexGuard in lock_api - Rust + +

Struct lock_api::ReentrantMutexGuard[][src]

#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { /* fields omitted */ }
Expand description

An RAII implementation of a “scoped lock” of a reentrant mutex. When this structure +is dropped (falls out of scope), the lock will be unlocked.

+

The data protected by the mutex can be accessed through this guard via its +Deref implementation.

+

Implementations

Returns a reference to the original ReentrantMutex object.

+

Makes a new MappedReentrantMutexGuard for a component of the locked data.

+

This operation cannot fail as the ReentrantMutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as ReentrantMutexGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedReentrantMutexGuard for a component of the +locked data. The original guard is return if the closure returns None.

+

This operation cannot fail as the ReentrantMutexGuard passed +in already locked the mutex.

+

This is an associated function that needs to be +used as ReentrantMutexGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Temporarily unlocks the mutex to execute the given function.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the mutex.

+

Unlocks the mutex using a fair unlock protocol.

+

By default, mutexes are unfair and allow the current thread to re-lock +the mutex before another has the chance to acquire the lock, even if +that thread has been blocked on the mutex for a long time. This is the +default because it allows much higher throughput as it avoids forcing a +context switch on every mutex unlock. This can result in one thread +acquiring a mutex many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the ReentrantMutexGuard normally.

+

Temporarily unlocks the mutex to execute the given function.

+

The mutex is unlocked a fair unlock protocol.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the mutex.

+

Temporarily yields the mutex to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by lock, however it can be much more efficient in the case where there +are no waiting threads.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.RwLock.html b/lock_api/struct.RwLock.html new file mode 100644 index 00000000..c4c47c1f --- /dev/null +++ b/lock_api/struct.RwLock.html @@ -0,0 +1,183 @@ +RwLock in lock_api - Rust + +

Struct lock_api::RwLock[][src]

pub struct RwLock<R, T: ?Sized> { /* fields omitted */ }
Expand description

A reader-writer lock

+

This type of lock allows a number of readers or at most one writer at any +point in time. The write portion of this lock typically allows modification +of the underlying data (exclusive access) and the read portion of this lock +typically allows for read-only access (shared access).

+

The type parameter T represents the data that this lock protects. It is +required that T satisfies Send to be shared across threads and Sync to +allow concurrent access through readers. The RAII guards returned from the +locking methods implement Deref (and DerefMut for the write methods) +to allow access to the contained of the lock.

+

Implementations

Creates a new instance of an RwLock<T> which is unlocked.

+

Consumes this RwLock, returning the underlying data.

+

Creates a new new instance of an RwLock<T> based on a pre-existing +RawRwLock<T>.

+

This allows creating a RwLock<T> in a constant context on stable +Rust.

+

Locks this RwLock with shared read access, blocking the current thread +until it can be acquired.

+

The calling thread will be blocked until there are no more writers which +hold the lock. There may be other readers currently inside the lock when +this method returns.

+

Note that attempts to recursively acquire a read lock on a RwLock when +the current thread already holds one may result in a deadlock.

+

Returns an RAII guard which will release this thread’s shared access +once it is dropped.

+

Attempts to acquire this RwLock with shared read access.

+

If the access could not be granted at this time, then None is returned. +Otherwise, an RAII guard is returned which will release the shared access +when it is dropped.

+

This function does not block.

+

Locks this RwLock with exclusive write access, blocking the current +thread until it can be acquired.

+

This function will not return while other writers or other readers +currently have access to the lock.

+

Returns an RAII guard which will drop the write access of this RwLock +when dropped.

+

Attempts to lock this RwLock with exclusive write access.

+

If the lock could not be acquired at this time, then None is returned. +Otherwise, an RAII guard is returned which will release the lock when +it is dropped.

+

This function does not block.

+

Returns a mutable reference to the underlying data.

+

Since this call borrows the RwLock mutably, no actual locking needs to +take place—the mutable borrow statically guarantees no locks exist.

+

Checks whether this RwLock is currently locked in any way.

+

Forcibly unlocks a read lock.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a RwLockReadGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +RwLockReadGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a rwlock is read-unlocked when not read-locked.

+

Forcibly unlocks a write lock.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a RwLockWriteGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +RwLockWriteGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a rwlock is write-unlocked when not write-locked.

+

Returns the underlying raw reader-writer lock object.

+

Note that you will most likely need to import the RawRwLock trait from +lock_api to be able to call functions on the raw +reader-writer lock.

+

Safety

+

This method is unsafe because it allows unlocking a mutex while +still holding a reference to a lock guard.

+

Returns a raw pointer to the underlying data.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a RwLockReadGuard or RwLockWriteGuard object +alive, for example when dealing with FFI.

+

Safety

+

You must ensure that there are no data races when dereferencing the +returned pointer, for example if the current thread logically owns a +RwLockReadGuard or RwLockWriteGuard but that guard has been discarded +using mem::forget.

+

Forcibly unlocks a read lock using a fair unlock procotol.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a RwLockReadGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +RwLockReadGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a rwlock is read-unlocked when not read-locked.

+

Forcibly unlocks a write lock using a fair unlock procotol.

+

This is useful when combined with mem::forget to hold a lock without +the need to maintain a RwLockWriteGuard object alive, for example when +dealing with FFI.

+

Safety

+

This method must only be called if the current thread logically owns a +RwLockWriteGuard but that guard has be discarded using mem::forget. +Behavior is undefined if a rwlock is write-unlocked when not write-locked.

+

Attempts to acquire this RwLock with shared read access until a timeout +is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the shared access when it is dropped.

+

Attempts to acquire this RwLock with shared read access until a timeout +is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the shared access when it is dropped.

+

Attempts to acquire this RwLock with exclusive write access until a +timeout is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the exclusive access when it is dropped.

+

Attempts to acquire this RwLock with exclusive write access until a +timeout is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the exclusive access when it is dropped.

+

Locks this RwLock with shared read access, blocking the current thread +until it can be acquired.

+

The calling thread will be blocked until there are no more writers which +hold the lock. There may be other readers currently inside the lock when +this method returns.

+

Unlike read, this method is guaranteed to succeed without blocking if +another read lock is held at the time of the call. This allows a thread +to recursively lock a RwLock. However using this method can cause +writers to starve since readers no longer block if a writer is waiting +for the lock.

+

Returns an RAII guard which will release this thread’s shared access +once it is dropped.

+

Attempts to acquire this RwLock with shared read access.

+

If the access could not be granted at this time, then None is returned. +Otherwise, an RAII guard is returned which will release the shared access +when it is dropped.

+

This method is guaranteed to succeed if another read lock is held at the +time of the call. See the documentation for read_recursive for details.

+

This function does not block.

+

Attempts to acquire this RwLock with shared read access until a timeout +is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the shared access when it is dropped.

+

This method is guaranteed to succeed without blocking if another read +lock is held at the time of the call. See the documentation for +read_recursive for details.

+

Attempts to acquire this RwLock with shared read access until a timeout +is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the shared access when it is dropped.

+

Locks this RwLock with upgradable read access, blocking the current thread +until it can be acquired.

+

The calling thread will be blocked until there are no more writers or other +upgradable reads which hold the lock. There may be other readers currently +inside the lock when this method returns.

+

Returns an RAII guard which will release this thread’s shared access +once it is dropped.

+

Attempts to acquire this RwLock with upgradable read access.

+

If the access could not be granted at this time, then None is returned. +Otherwise, an RAII guard is returned which will release the shared access +when it is dropped.

+

This function does not block.

+

Attempts to acquire this RwLock with upgradable read access until a timeout +is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the shared access when it is dropped.

+

Attempts to acquire this RwLock with upgradable read access until a timeout +is reached.

+

If the access could not be granted before the timeout expires, then +None is returned. Otherwise, an RAII guard is returned which will +release the shared access when it is dropped.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

Returns the “default value” for a type. Read more

+

Performs the conversion.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.RwLockReadGuard.html b/lock_api/struct.RwLockReadGuard.html new file mode 100644 index 00000000..2d65697f --- /dev/null +++ b/lock_api/struct.RwLockReadGuard.html @@ -0,0 +1,55 @@ +RwLockReadGuard in lock_api - Rust + +

Struct lock_api::RwLockReadGuard[][src]

#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { /* fields omitted */ }
Expand description

RAII structure used to release the shared read access of a lock when +dropped.

+

Implementations

Returns a reference to the original reader-writer lock object.

+

Make a new MappedRwLockReadGuard for a component of the locked data.

+

This operation cannot fail as the RwLockReadGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as RwLockReadGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedRwLockReadGuard for a component of the +locked data. The original guard is return if the closure returns None.

+

This operation cannot fail as the RwLockReadGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as RwLockReadGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Temporarily unlocks the RwLock to execute the given function.

+

The RwLock is unlocked a fair unlock protocol.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the RwLock.

+

Unlocks the RwLock using a fair unlock protocol.

+

By default, RwLock is unfair and allow the current thread to re-lock +the RwLock before another has the chance to acquire the lock, even if +that thread has been blocked on the RwLock for a long time. This is +the default because it allows much higher throughput as it avoids +forcing a context switch on every RwLock unlock. This can result in one +thread acquiring a RwLock many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the RwLockReadGuard normally.

+

Temporarily unlocks the RwLock to execute the given function.

+

The RwLock is unlocked a fair unlock protocol.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the RwLock.

+

Temporarily yields the RwLock to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by read, however it can be much more efficient in the case where there +are no waiting threads.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.RwLockUpgradableReadGuard.html b/lock_api/struct.RwLockUpgradableReadGuard.html new file mode 100644 index 00000000..b4eebf78 --- /dev/null +++ b/lock_api/struct.RwLockUpgradableReadGuard.html @@ -0,0 +1,59 @@ +RwLockUpgradableReadGuard in lock_api - Rust + +

Struct lock_api::RwLockUpgradableReadGuard[][src]

#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> { /* fields omitted */ }
Expand description

RAII structure used to release the upgradable read access of a lock when +dropped.

+

Implementations

Returns a reference to the original reader-writer lock object.

+

Temporarily unlocks the RwLock to execute the given function.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the RwLock.

+

Atomically upgrades an upgradable read lock lock into a exclusive write lock, +blocking the current thread until it can be acquired.

+

Tries to atomically upgrade an upgradable read lock into a exclusive write lock.

+

If the access could not be granted at this time, then the current guard is returned.

+

Unlocks the RwLock using a fair unlock protocol.

+

By default, RwLock is unfair and allow the current thread to re-lock +the RwLock before another has the chance to acquire the lock, even if +that thread has been blocked on the RwLock for a long time. This is +the default because it allows much higher throughput as it avoids +forcing a context switch on every RwLock unlock. This can result in one +thread acquiring a RwLock many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the RwLockUpgradableReadGuard normally.

+

Temporarily unlocks the RwLock to execute the given function.

+

The RwLock is unlocked a fair unlock protocol.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the RwLock.

+

Temporarily yields the RwLock to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by upgradable_read, however it can be much more efficient in the case where there +are no waiting threads.

+

Atomically downgrades an upgradable read lock lock into a shared read lock +without allowing any writers to take exclusive access of the lock in the +meantime.

+

Note that if there are any writers currently waiting to take the lock +then other readers may not be able to acquire the lock even if it was +downgraded.

+

Tries to atomically upgrade an upgradable read lock into a exclusive +write lock, until a timeout is reached.

+

If the access could not be granted before the timeout expires, then +the current guard is returned.

+

Tries to atomically upgrade an upgradable read lock into a exclusive +write lock, until a timeout is reached.

+

If the access could not be granted before the timeout expires, then +the current guard is returned.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/struct.RwLockWriteGuard.html b/lock_api/struct.RwLockWriteGuard.html new file mode 100644 index 00000000..e252b556 --- /dev/null +++ b/lock_api/struct.RwLockWriteGuard.html @@ -0,0 +1,65 @@ +RwLockWriteGuard in lock_api - Rust + +

Struct lock_api::RwLockWriteGuard[][src]

#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { /* fields omitted */ }
Expand description

RAII structure used to release the exclusive write access of a lock when +dropped.

+

Implementations

Returns a reference to the original reader-writer lock object.

+

Make a new MappedRwLockWriteGuard for a component of the locked data.

+

This operation cannot fail as the RwLockWriteGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as RwLockWriteGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Attempts to make a new MappedRwLockWriteGuard for a component of the +locked data. The original guard is return if the closure returns None.

+

This operation cannot fail as the RwLockWriteGuard passed +in already locked the data.

+

This is an associated function that needs to be +used as RwLockWriteGuard::map(...). A method would interfere with methods of +the same name on the contents of the locked data.

+

Temporarily unlocks the RwLock to execute the given function.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the RwLock.

+

Atomically downgrades a write lock into a read lock without allowing any +writers to take exclusive access of the lock in the meantime.

+

Note that if there are any writers currently waiting to take the lock +then other readers may not be able to acquire the lock even if it was +downgraded.

+

Atomically downgrades a write lock into an upgradable read lock without allowing any +writers to take exclusive access of the lock in the meantime.

+

Note that if there are any writers currently waiting to take the lock +then other readers may not be able to acquire the lock even if it was +downgraded.

+

Unlocks the RwLock using a fair unlock protocol.

+

By default, RwLock is unfair and allow the current thread to re-lock +the RwLock before another has the chance to acquire the lock, even if +that thread has been blocked on the RwLock for a long time. This is +the default because it allows much higher throughput as it avoids +forcing a context switch on every RwLock unlock. This can result in one +thread acquiring a RwLock many more times than other threads.

+

However in some cases it can be beneficial to ensure fairness by forcing +the lock to pass on to a waiting thread if there is one. This is done by +using this method instead of dropping the RwLockWriteGuard normally.

+

Temporarily unlocks the RwLock to execute the given function.

+

The RwLock is unlocked a fair unlock protocol.

+

This is safe because &mut guarantees that there exist no other +references to the data protected by the RwLock.

+

Temporarily yields the RwLock to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by write, however it can be much more efficient in the case where there +are no waiting threads.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/lock_api/trait.GetThreadId.html b/lock_api/trait.GetThreadId.html new file mode 100644 index 00000000..bcf70e95 --- /dev/null +++ b/lock_api/trait.GetThreadId.html @@ -0,0 +1,17 @@ +GetThreadId in lock_api - Rust + +

Trait lock_api::GetThreadId[][src]

pub unsafe trait GetThreadId {
+    const INIT: Self;
+
+    fn nonzero_thread_id(&self) -> NonZeroUsize;
+}
Expand description

Helper trait which returns a non-zero thread ID.

+

The simplest way to implement this trait is to return the address of a +thread-local variable.

+

Safety

+

Implementations of this trait must ensure that no two active threads share +the same thread ID. However the ID of a thread that has exited can be +re-used since that thread is no longer active.

+

Associated Constants

Initial value.

+

Required methods

Returns a non-zero thread ID which identifies the current thread of +execution.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawMutex.html b/lock_api/trait.RawMutex.html new file mode 100644 index 00000000..ee0b849a --- /dev/null +++ b/lock_api/trait.RawMutex.html @@ -0,0 +1,30 @@ +RawMutex in lock_api - Rust + +

Trait lock_api::RawMutex[][src]

pub unsafe trait RawMutex {
+    type GuardMarker;
+
+    const INIT: Self;
+
+    fn lock(&self);
+
fn try_lock(&self) -> bool; +
unsafe fn unlock(&self); + + fn is_locked(&self) -> bool { ... } +}
Expand description

Basic operations for a mutex.

+

Types implementing this trait can be used by Mutex to form a safe and +fully-functioning mutex type.

+

Safety

+

Implementations of this trait must ensure that the mutex is actually +exclusive: a lock can’t be acquired while the mutex is already locked.

+

Associated Types

Marker type which determines whether a lock guard should be Send. Use +one of the GuardSend or GuardNoSend helper types here.

+

Associated Constants

Initial value for an unlocked mutex.

+

Required methods

Acquires this mutex, blocking the current thread until it is able to do so.

+

Attempts to acquire this mutex without blocking. Returns true +if the lock was successfully acquired and false otherwise.

+

Unlocks this mutex.

+

Safety

+

This method may only be called if the mutex is held in the current context, i.e. it must +be paired with a successful call to lock, try_lock, try_lock_for or try_lock_until.

+

Provided methods

Checks whether the mutex is currently locked.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawMutexFair.html b/lock_api/trait.RawMutexFair.html new file mode 100644 index 00000000..d61e8e98 --- /dev/null +++ b/lock_api/trait.RawMutexFair.html @@ -0,0 +1,23 @@ +RawMutexFair in lock_api - Rust + +

Trait lock_api::RawMutexFair[][src]

pub unsafe trait RawMutexFair: RawMutex {
+    unsafe fn unlock_fair(&self);
+
+    unsafe fn bump(&self) { ... }
+}
Expand description

Additional methods for mutexes which support fair unlocking.

+

Fair unlocking means that a lock is handed directly over to the next waiting +thread if there is one, without giving other threads the opportunity to +“steal” the lock in the meantime. This is typically slower than unfair +unlocking, but may be necessary in certain circumstances.

+

Required methods

Unlocks this mutex using a fair unlock protocol.

+

Safety

+

This method may only be called if the mutex is held in the current context, see +the documentation of unlock.

+

Provided methods

Temporarily yields the mutex to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_fair followed +by lock, however it can be much more efficient in the case where there +are no waiting threads.

+

Safety

+

This method may only be called if the mutex is held in the current context, see +the documentation of unlock.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawMutexTimed.html b/lock_api/trait.RawMutexTimed.html new file mode 100644 index 00000000..273931d6 --- /dev/null +++ b/lock_api/trait.RawMutexTimed.html @@ -0,0 +1,15 @@ +RawMutexTimed in lock_api - Rust + +

Trait lock_api::RawMutexTimed[][src]

pub unsafe trait RawMutexTimed: RawMutex {
+    type Duration;
+    type Instant;
+    fn try_lock_for(&self, timeout: Self::Duration) -> bool;
+
fn try_lock_until(&self, timeout: Self::Instant) -> bool; +}
Expand description

Additional methods for mutexes which support locking with timeouts.

+

The Duration and Instant types are specified as associated types so that +this trait is usable even in no_std environments.

+

Associated Types

Duration type used for try_lock_for.

+

Instant type used for try_lock_until.

+

Required methods

Attempts to acquire this lock until a timeout is reached.

+

Attempts to acquire this lock until a timeout is reached.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLock.html b/lock_api/trait.RawRwLock.html new file mode 100644 index 00000000..735d00bc --- /dev/null +++ b/lock_api/trait.RawRwLock.html @@ -0,0 +1,38 @@ +RawRwLock in lock_api - Rust + +

Trait lock_api::RawRwLock[][src]

pub unsafe trait RawRwLock {
+    type GuardMarker;
+
+    const INIT: Self;
+
+    fn lock_shared(&self);
+
fn try_lock_shared(&self) -> bool; +
unsafe fn unlock_shared(&self); +
fn lock_exclusive(&self); +
fn try_lock_exclusive(&self) -> bool; +
unsafe fn unlock_exclusive(&self); + + fn is_locked(&self) -> bool { ... } +}
Expand description

Basic operations for a reader-writer lock.

+

Types implementing this trait can be used by RwLock to form a safe and +fully-functioning RwLock type.

+

Safety

+

Implementations of this trait must ensure that the RwLock is actually +exclusive: an exclusive lock can’t be acquired while an exclusive or shared +lock exists, and a shared lock can’t be acquire while an exclusive lock +exists.

+

Associated Types

Marker type which determines whether a lock guard should be Send. Use +one of the GuardSend or GuardNoSend helper types here.

+

Associated Constants

Initial value for an unlocked RwLock.

+

Required methods

Acquires a shared lock, blocking the current thread until it is able to do so.

+

Attempts to acquire a shared lock without blocking.

+

Releases a shared lock.

+

Safety

+

This method may only be called if a shared lock is held in the current context.

+

Acquires an exclusive lock, blocking the current thread until it is able to do so.

+

Attempts to acquire an exclusive lock without blocking.

+

Releases an exclusive lock.

+

Safety

+

This method may only be called if an exclusive lock is held in the current context.

+

Provided methods

Checks if this RwLock is currently locked in any way.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockDowngrade.html b/lock_api/trait.RawRwLockDowngrade.html new file mode 100644 index 00000000..2e805372 --- /dev/null +++ b/lock_api/trait.RawRwLockDowngrade.html @@ -0,0 +1,11 @@ +RawRwLockDowngrade in lock_api - Rust + +

Trait lock_api::RawRwLockDowngrade[][src]

pub unsafe trait RawRwLockDowngrade: RawRwLock {
+    unsafe fn downgrade(&self);
+}
Expand description

Additional methods for RwLocks which support atomically downgrading an +exclusive lock to a shared lock.

+

Required methods

Atomically downgrades an exclusive lock into a shared lock without +allowing any thread to take an exclusive lock in the meantime.

+

Safety

+

This method may only be called if an exclusive lock is held in the current context.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockFair.html b/lock_api/trait.RawRwLockFair.html new file mode 100644 index 00000000..9cbe108f --- /dev/null +++ b/lock_api/trait.RawRwLockFair.html @@ -0,0 +1,32 @@ +RawRwLockFair in lock_api - Rust + +

Trait lock_api::RawRwLockFair[][src]

pub unsafe trait RawRwLockFair: RawRwLock {
+    unsafe fn unlock_shared_fair(&self);
+
unsafe fn unlock_exclusive_fair(&self); + + unsafe fn bump_shared(&self) { ... } +
unsafe fn bump_exclusive(&self) { ... } +}
Expand description

Additional methods for RwLocks which support fair unlocking.

+

Fair unlocking means that a lock is handed directly over to the next waiting +thread if there is one, without giving other threads the opportunity to +“steal” the lock in the meantime. This is typically slower than unfair +unlocking, but may be necessary in certain circumstances.

+

Required methods

Releases a shared lock using a fair unlock protocol.

+

Safety

+

This method may only be called if a shared lock is held in the current context.

+

Releases an exclusive lock using a fair unlock protocol.

+

Safety

+

This method may only be called if an exclusive lock is held in the current context.

+

Provided methods

Temporarily yields a shared lock to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_shared_fair followed +by lock_shared, however it can be much more efficient in the case where there +are no waiting threads.

+

Safety

+

This method may only be called if a shared lock is held in the current context.

+

Temporarily yields an exclusive lock to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_exclusive_fair followed +by lock_exclusive, however it can be much more efficient in the case where there +are no waiting threads.

+

Safety

+

This method may only be called if an exclusive lock is held in the current context.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockRecursive.html b/lock_api/trait.RawRwLockRecursive.html new file mode 100644 index 00000000..853371f2 --- /dev/null +++ b/lock_api/trait.RawRwLockRecursive.html @@ -0,0 +1,14 @@ +RawRwLockRecursive in lock_api - Rust + +

Trait lock_api::RawRwLockRecursive[][src]

pub unsafe trait RawRwLockRecursive: RawRwLock {
+    fn lock_shared_recursive(&self);
+
fn try_lock_shared_recursive(&self) -> bool; +}
Expand description

Additional methods for RwLocks which support recursive read locks.

+

These are guaranteed to succeed without blocking if +another read lock is held at the time of the call. This allows a thread +to recursively lock a RwLock. However using this method can cause +writers to starve since readers no longer block if a writer is waiting +for the lock.

+

Required methods

Acquires a shared lock without deadlocking in case of a recursive lock.

+

Attempts to acquire a shared lock without deadlocking in case of a recursive lock.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockRecursiveTimed.html b/lock_api/trait.RawRwLockRecursiveTimed.html new file mode 100644 index 00000000..2e9eaab0 --- /dev/null +++ b/lock_api/trait.RawRwLockRecursiveTimed.html @@ -0,0 +1,11 @@ +RawRwLockRecursiveTimed in lock_api - Rust + +

Trait lock_api::RawRwLockRecursiveTimed[][src]

pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
+    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
+
fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool; +}
Expand description

Additional methods for RwLocks which support recursive read locks and timeouts.

+

Required methods

Attempts to acquire a shared lock until a timeout is reached, without +deadlocking in case of a recursive lock.

+

Attempts to acquire a shared lock until a timeout is reached, without +deadlocking in case of a recursive lock.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockTimed.html b/lock_api/trait.RawRwLockTimed.html new file mode 100644 index 00000000..5f49aa1c --- /dev/null +++ b/lock_api/trait.RawRwLockTimed.html @@ -0,0 +1,19 @@ +RawRwLockTimed in lock_api - Rust + +

Trait lock_api::RawRwLockTimed[][src]

pub unsafe trait RawRwLockTimed: RawRwLock {
+    type Duration;
+    type Instant;
+    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
+
fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool; +
fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool; +
fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool; +}
Expand description

Additional methods for RwLocks which support locking with timeouts.

+

The Duration and Instant types are specified as associated types so that +this trait is usable even in no_std environments.

+

Associated Types

Duration type used for try_lock_for.

+

Instant type used for try_lock_until.

+

Required methods

Attempts to acquire a shared lock until a timeout is reached.

+

Attempts to acquire a shared lock until a timeout is reached.

+

Attempts to acquire an exclusive lock until a timeout is reached.

+

Attempts to acquire an exclusive lock until a timeout is reached.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockUpgrade.html b/lock_api/trait.RawRwLockUpgrade.html new file mode 100644 index 00000000..00133579 --- /dev/null +++ b/lock_api/trait.RawRwLockUpgrade.html @@ -0,0 +1,26 @@ +RawRwLockUpgrade in lock_api - Rust + +

Trait lock_api::RawRwLockUpgrade[][src]

pub unsafe trait RawRwLockUpgrade: RawRwLock {
+    fn lock_upgradable(&self);
+
fn try_lock_upgradable(&self) -> bool; +
unsafe fn unlock_upgradable(&self); +
unsafe fn upgrade(&self); +
unsafe fn try_upgrade(&self) -> bool; +}
Expand description

Additional methods for RwLocks which support atomically upgrading a shared +lock to an exclusive lock.

+

This requires acquiring a special “upgradable read lock” instead of a +normal shared lock. There may only be one upgradable lock at any time, +otherwise deadlocks could occur when upgrading.

+

Required methods

Acquires an upgradable lock, blocking the current thread until it is able to do so.

+

Attempts to acquire an upgradable lock without blocking.

+

Releases an upgradable lock.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Upgrades an upgradable lock to an exclusive lock.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Attempts to upgrade an upgradable lock to an exclusive lock without +blocking.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockUpgradeDowngrade.html b/lock_api/trait.RawRwLockUpgradeDowngrade.html new file mode 100644 index 00000000..dacf4378 --- /dev/null +++ b/lock_api/trait.RawRwLockUpgradeDowngrade.html @@ -0,0 +1,14 @@ +RawRwLockUpgradeDowngrade in lock_api - Rust + +

Trait lock_api::RawRwLockUpgradeDowngrade[][src]

pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
+    unsafe fn downgrade_upgradable(&self);
+
unsafe fn downgrade_to_upgradable(&self); +}
Expand description

Additional methods for RwLocks which support upgradable locks and lock +downgrading.

+

Required methods

Downgrades an upgradable lock to a shared lock.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Downgrades an exclusive lock to an upgradable lock.

+

Safety

+

This method may only be called if an exclusive lock is held in the current context.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockUpgradeFair.html b/lock_api/trait.RawRwLockUpgradeFair.html new file mode 100644 index 00000000..a0613ce7 --- /dev/null +++ b/lock_api/trait.RawRwLockUpgradeFair.html @@ -0,0 +1,18 @@ +RawRwLockUpgradeFair in lock_api - Rust + +

Trait lock_api::RawRwLockUpgradeFair[][src]

pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
+    unsafe fn unlock_upgradable_fair(&self);
+
+    unsafe fn bump_upgradable(&self) { ... }
+}
Expand description

Additional methods for RwLocks which support upgradable locks and fair +unlocking.

+

Required methods

Releases an upgradable lock using a fair unlock protocol.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Provided methods

Temporarily yields an upgradable lock to a waiting thread if there is one.

+

This method is functionally equivalent to calling unlock_upgradable_fair followed +by lock_upgradable, however it can be much more efficient in the case where there +are no waiting threads.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Implementors

\ No newline at end of file diff --git a/lock_api/trait.RawRwLockUpgradeTimed.html b/lock_api/trait.RawRwLockUpgradeTimed.html new file mode 100644 index 00000000..92b13758 --- /dev/null +++ b/lock_api/trait.RawRwLockUpgradeTimed.html @@ -0,0 +1,20 @@ +RawRwLockUpgradeTimed in lock_api - Rust + +

Trait lock_api::RawRwLockUpgradeTimed[][src]

pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
+    fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
+
fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool; +
unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool; +
unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool; +}
Expand description

Additional methods for RwLocks which support upgradable locks and locking +with timeouts.

+

Required methods

Attempts to acquire an upgradable lock until a timeout is reached.

+

Attempts to acquire an upgradable lock until a timeout is reached.

+

Attempts to upgrade an upgradable lock to an exclusive lock until a +timeout is reached.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Attempts to upgrade an upgradable lock to an exclusive lock until a +timeout is reached.

+

Safety

+

This method may only be called if an upgradable lock is held in the current context.

+

Implementors

\ No newline at end of file diff --git a/scopeguard/all.html b/scopeguard/all.html new file mode 100644 index 00000000..25378139 --- /dev/null +++ b/scopeguard/all.html @@ -0,0 +1,5 @@ +List of all items in this crate + +

List of all items[] + +

Structs

Enums

Traits

Macros

Functions

\ No newline at end of file diff --git a/scopeguard/enum.Always.html b/scopeguard/enum.Always.html new file mode 100644 index 00000000..b5cc36ff --- /dev/null +++ b/scopeguard/enum.Always.html @@ -0,0 +1,19 @@ +Always in scopeguard - Rust + +

Enum scopeguard::Always[][src]

pub enum Always {}
Expand description

Always run on scope exit.

+

“Always” run: on regular exit from a scope or on unwinding from a panic. +Can not run on abort, process exit, and other catastrophic events where +destructors don’t run.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

Return true if the guard’s associated code should run +(in the context where this method is called). Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/scopeguard/fn.guard.html b/scopeguard/fn.guard.html new file mode 100644 index 00000000..cca937a6 --- /dev/null +++ b/scopeguard/fn.guard.html @@ -0,0 +1,4 @@ +guard in scopeguard - Rust + +

Function scopeguard::guard[][src]

pub fn guard<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, Always> where
    F: FnOnce(T), 
Expand description

Create a new ScopeGuard owning v and with deferred closure dropfn.

+
\ No newline at end of file diff --git a/scopeguard/index.html b/scopeguard/index.html new file mode 100644 index 00000000..642de53d --- /dev/null +++ b/scopeguard/index.html @@ -0,0 +1,171 @@ +scopeguard - Rust + +

Crate scopeguard[][src]

Expand description

A scope guard will run a given closure when it goes out of scope, +even if the code between panics. +(as long as panic doesn’t abort)

+

Examples

Hello World

+

This example creates a scope guard with an example function:

+ +
+extern crate scopeguard;
+
+fn f() {
+    let _guard = scopeguard::guard((), |_| {
+        println!("Hello Scope Exit!");
+    });
+
+    // rest of the code here.
+
+    // Here, at the end of `_guard`'s scope, the guard's closure is called.
+    // It is also called if we exit this scope through unwinding instead.
+}
+

defer!

+

Use the defer macro to run an operation at scope exit, +either regular scope exit or during unwinding from a panic.

+ +
+#[macro_use(defer)] extern crate scopeguard;
+
+use std::cell::Cell;
+
+fn main() {
+    // use a cell to observe drops during and after the scope guard is active
+    let drop_counter = Cell::new(0);
+    {
+        // Create a scope guard using `defer!` for the current scope
+        defer! {
+            drop_counter.set(1 + drop_counter.get());
+        }
+
+        // Do regular operations here in the meantime.
+
+        // Just before scope exit: it hasn't run yet.
+        assert_eq!(drop_counter.get(), 0);
+
+        // The following scope end is where the defer closure is called
+    }
+    assert_eq!(drop_counter.get(), 1);
+}
+

Scope Guard with Value

+

If the scope guard closure needs to access an outer value that is also +mutated outside of the scope guard, then you may want to use the scope guard +with a value. The guard works like a smart pointer, so the inner value can +be accessed by reference or by mutable reference.

+

1. The guard owns a file

+

In this example, the scope guard owns a file and ensures pending writes are +synced at scope exit.

+ +
+extern crate scopeguard;
+
+use std::fs::*;
+use std::io::{self, Write};
+
+fn try_main() -> io::Result<()> {
+    let f = File::create("newfile.txt")?;
+    let mut file = scopeguard::guard(f, |f| {
+        // ensure we flush file at return or panic
+        let _ = f.sync_all();
+    });
+    // Access the file through the scope guard itself
+    file.write_all(b"test me\n").map(|_| ())
+}
+
+fn main() {
+    try_main().unwrap();
+}
+
+

2. The guard restores an invariant on scope exit

+
+extern crate scopeguard;
+
+use std::mem::ManuallyDrop;
+use std::ptr;
+
+// This function, just for this example, takes the first element
+// and inserts it into the assumed sorted tail of the vector.
+//
+// For optimization purposes we temporarily violate an invariant of the
+// Vec, that it owns all of its elements.
+//
+// The safe approach is to use swap, which means two writes to memory,
+// the optimization is to use a “hole” which uses only one write of memory
+// for each position it moves.
+//
+// We *must* use a scope guard to run this code safely. We
+// are running arbitrary user code (comparison operators) that may panic.
+// The scope guard ensures we restore the invariant after successful
+// exit or during unwinding from panic.
+fn insertion_sort_first<T>(v: &mut Vec<T>)
+    where T: PartialOrd
+{
+    struct Hole<'a, T: 'a> {
+        v: &'a mut Vec<T>,
+        index: usize,
+        value: ManuallyDrop<T>,
+    }
+
+    unsafe {
+        // Create a moved-from location in the vector, a “hole”.
+        let value = ptr::read(&v[0]);
+        let mut hole = Hole { v: v, index: 0, value: ManuallyDrop::new(value) };
+
+        // Use a scope guard with a value.
+        // At scope exit, plug the hole so that the vector is fully
+        // initialized again.
+        // The scope guard owns the hole, but we can access it through the guard.
+        let mut hole_guard = scopeguard::guard(hole, |hole| {
+            // plug the hole in the vector with the value that was // taken out
+            let index = hole.index;
+            ptr::copy_nonoverlapping(&*hole.value, &mut hole.v[index], 1);
+        });
+
+        // run algorithm that moves the hole in the vector here
+        // move the hole until it's in a sorted position
+        for i in 1..hole_guard.v.len() {
+            if *hole_guard.value >= hole_guard.v[i] {
+                // move the element back and the hole forward
+                let index = hole_guard.index;
+                ptr::copy_nonoverlapping(&hole_guard.v[index + 1], &mut hole_guard.v[index], 1);
+                hole_guard.index += 1;
+            } else {
+                break;
+            }
+        }
+
+        // When the scope exits here, the Vec becomes whole again!
+    }
+}
+
+fn main() {
+    let string = String::from;
+    let mut data = vec![string("c"), string("a"), string("b"), string("d")];
+    insertion_sort_first(&mut data);
+    assert_eq!(data, vec!["a", "b", "c", "d"]);
+}
+
+

Crate Features

+
    +
  • use_std +
      +
    • Enabled by default. Enables the OnUnwind and OnSuccess strategies.
    • +
    • Disable to use no_std.
    • +
    +
  • +
+

Rust Version

+

This version of the crate requires Rust 1.20 or later.

+

The scopeguard 1.x release series will use a carefully considered version +upgrade policy, where in a later 1.x version, we will raise the minimum +required Rust version.

+

Macros

+
defer

Macro to create a ScopeGuard (always run).

+

Structs

+
ScopeGuard

ScopeGuard is a scope guard that may own a protected value.

+

Enums

+
Always

Always run on scope exit.

+

Traits

+
Strategy

Controls in which cases the associated code should be run

+

Functions

+
guard

Create a new ScopeGuard owning v and with deferred closure dropfn.

+
\ No newline at end of file diff --git a/scopeguard/macro.defer!.html b/scopeguard/macro.defer!.html new file mode 100644 index 00000000..b96a94c2 --- /dev/null +++ b/scopeguard/macro.defer!.html @@ -0,0 +1,11 @@ + + + + + Redirection + + +

Redirecting to macro.defer.html...

+ + + \ No newline at end of file diff --git a/scopeguard/macro.defer.html b/scopeguard/macro.defer.html new file mode 100644 index 00000000..7cb7dbd8 --- /dev/null +++ b/scopeguard/macro.defer.html @@ -0,0 +1,10 @@ +defer in scopeguard - Rust + +

Macro scopeguard::defer[][src]

+macro_rules! defer {
+    ($($t:tt)*) => { ... };
+}
+
Expand description

Macro to create a ScopeGuard (always run).

+

The macro takes statements, which are the body of a closure +that will run when the scope is exited.

+
\ No newline at end of file diff --git a/scopeguard/sidebar-items.js b/scopeguard/sidebar-items.js new file mode 100644 index 00000000..2a9cf7dd --- /dev/null +++ b/scopeguard/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"enum":[["Always","Always run on scope exit."]],"fn":[["guard","Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`."]],"macro":[["defer","Macro to create a `ScopeGuard` (always run)."]],"struct":[["ScopeGuard","`ScopeGuard` is a scope guard that may own a protected value."]],"trait":[["Strategy","Controls in which cases the associated code should be run"]]}); \ No newline at end of file diff --git a/scopeguard/struct.ScopeGuard.html b/scopeguard/struct.ScopeGuard.html new file mode 100644 index 00000000..54c00cb5 --- /dev/null +++ b/scopeguard/struct.ScopeGuard.html @@ -0,0 +1,50 @@ +ScopeGuard in scopeguard - Rust + +

Struct scopeguard::ScopeGuard[][src]

pub struct ScopeGuard<T, F, S = Always> where
    F: FnOnce(T),
    S: Strategy
{ /* fields omitted */ }
Expand description

ScopeGuard is a scope guard that may own a protected value.

+

If you place a guard in a local variable, the closure can +run regardless how you leave the scope — through regular return or panic +(except if panic or other code aborts; so as long as destructors run). +It is run only once.

+

The S parameter for Strategy determines if +the closure actually runs.

+

The guard’s closure will be called with the held value in the destructor.

+

The ScopeGuard implements Deref so that you can access the inner value.

+

Implementations

Create a ScopeGuard that owns v (accessible through deref) and calls +dropfn when its destructor runs.

+

The Strategy decides whether the scope guard’s closure should run.

+

“Defuse” the guard and extract the value without calling the closure.

+ +
+extern crate scopeguard;
+
+use scopeguard::{guard, ScopeGuard};
+
+fn conditional() -> bool { true }
+
+fn main() {
+    let mut guard = guard(Vec::new(), |mut v| v.clear());
+    guard.push(1);
+     
+    if conditional() {
+        // a condition maybe makes us decide to
+        // “defuse” the guard and get back its inner parts
+        let value = ScopeGuard::into_inner(guard);
+    } else {
+        // guard still exists in this branch
+    }
+}
+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/scopeguard/trait.Strategy.html b/scopeguard/trait.Strategy.html new file mode 100644 index 00000000..d12d08f3 --- /dev/null +++ b/scopeguard/trait.Strategy.html @@ -0,0 +1,8 @@ +Strategy in scopeguard - Rust + +

Trait scopeguard::Strategy[][src]

pub trait Strategy {
+    fn should_run() -> bool;
+}
Expand description

Controls in which cases the associated code should be run

+

Required methods

Return true if the guard’s associated code should run +(in the context where this method is called).

+

Implementors

\ No newline at end of file diff --git a/search-index.js b/search-index.js index 7227c786..a642cbc0 100644 --- a/search-index.js +++ b/search-index.js @@ -1,7 +1,10 @@ var searchIndex = JSON.parse('{\ "byteorder":{"doc":"This crate provides convenience methods for encoding and …","t":[6,4,8,6,4,6,6,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,10,11,11,11,11,11,11,10,11,11,10,11,11,10,11,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,10,11,11,10,11,11,10,11,11,11,10,11,11,10,11,11,11,10,11,11,10,11,11,10,11,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,10,11,11,10,11,11,10,11,11,11,10,11,11,10,11,11,11,10,11,11,10,11,11,10,11,11,10,11,11],"n":["BE","BigEndian","ByteOrder","LE","LittleEndian","NativeEndian","NetworkEndian","borrow","borrow","borrow_mut","borrow_mut","clone","clone","cmp","cmp","default","default","eq","eq","fmt","fmt","from","from","from_slice_f32","from_slice_f32","from_slice_f32","from_slice_f64","from_slice_f64","from_slice_f64","from_slice_i128","from_slice_i16","from_slice_i32","from_slice_i64","from_slice_u128","from_slice_u128","from_slice_u128","from_slice_u16","from_slice_u16","from_slice_u16","from_slice_u32","from_slice_u32","from_slice_u32","from_slice_u64","from_slice_u64","from_slice_u64","hash","hash","into","into","partial_cmp","partial_cmp","read_f32","read_f32_into","read_f32_into_unchecked","read_f64","read_f64_into","read_f64_into_unchecked","read_i128","read_i128_into","read_i16","read_i16_into","read_i24","read_i32","read_i32_into","read_i48","read_i64","read_i64_into","read_int","read_int128","read_u128","read_u128","read_u128","read_u128_into","read_u128_into","read_u128_into","read_u16","read_u16","read_u16","read_u16_into","read_u16_into","read_u16_into","read_u24","read_u32","read_u32","read_u32","read_u32_into","read_u32_into","read_u32_into","read_u48","read_u64","read_u64","read_u64","read_u64_into","read_u64_into","read_u64_into","read_uint","read_uint","read_uint","read_uint128","read_uint128","read_uint128","try_from","try_from","try_into","try_into","type_id","type_id","write_f32","write_f32_into","write_f64","write_f64_into","write_i128","write_i128_into","write_i16","write_i16_into","write_i24","write_i32","write_i32_into","write_i48","write_i64","write_i64_into","write_i8_into","write_int","write_int128","write_u128","write_u128","write_u128","write_u128_into","write_u128_into","write_u128_into","write_u16","write_u16","write_u16","write_u16_into","write_u16_into","write_u16_into","write_u24","write_u32","write_u32","write_u32","write_u32_into","write_u32_into","write_u32_into","write_u48","write_u64","write_u64","write_u64","write_u64_into","write_u64_into","write_u64_into","write_uint","write_uint","write_uint","write_uint128","write_uint128","write_uint128"],"q":["byteorder","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["A type alias for BigEndian.","Defines big-endian serialization.","ByteOrder describes types that can serialize integers as …","A type alias for LittleEndian.","Defines little-endian serialization.","Defines system native-endian serialization.","Defines network byte order serialization.","","","","","","","","","","","","","","","","","Converts the given slice of IEEE754 single-precision (4 …","","","Converts the given slice of IEEE754 double-precision (8 …","","","Converts the given slice of signed 128 bit integers to a …","Converts the given slice of signed 16 bit integers to a …","Converts the given slice of signed 32 bit integers to a …","Converts the given slice of signed 64 bit integers to a …","Converts the given slice of unsigned 128 bit integers to …","","","Converts the given slice of unsigned 16 bit integers to a …","","","Converts the given slice of unsigned 32 bit integers to a …","","","Converts the given slice of unsigned 64 bit integers to a …","","","","","","","","","Reads a IEEE754 single-precision (4 bytes) floating point …","Reads IEEE754 single-precision (4 bytes) floating point …","DEPRECATED.","Reads a IEEE754 double-precision (8 bytes) floating point …","Reads IEEE754 single-precision (4 bytes) floating point …","DEPRECATED.","Reads a signed 128 bit integer from buf.","Reads signed 128 bit integers from src into dst.","Reads a signed 16 bit integer from buf.","Reads signed 16 bit integers from src to dst.","Reads a signed 24 bit integer from buf, stored in i32.","Reads a signed 32 bit integer from buf.","Reads signed 32 bit integers from src into dst.","Reads a signed 48 bit integer from buf, stored in i64.","Reads a signed 64 bit integer from buf.","Reads signed 64 bit integers from src into dst.","Reads a signed n-bytes integer from buf.","Reads a signed n-bytes integer from buf.","Reads an unsigned 128 bit integer from buf.","","","Reads unsigned 128 bit integers from src into dst.","","","Reads an unsigned 16 bit integer from buf.","","","Reads unsigned 16 bit integers from src into dst.","","","Reads an unsigned 24 bit integer from buf, stored in u32.","Reads an unsigned 32 bit integer from buf.","","","Reads unsigned 32 bit integers from src into dst.","","","Reads an unsigned 48 bit integer from buf, stored in u64.","Reads an unsigned 64 bit integer from buf.","","","Reads unsigned 64 bit integers from src into dst.","","","Reads an unsigned n-bytes integer from buf.","","","Reads an unsigned n-bytes integer from buf.","","","","","","","","","Writes a IEEE754 single-precision (4 bytes) floating …","Writes IEEE754 single-precision (4 bytes) floating point …","Writes a IEEE754 double-precision (8 bytes) floating …","Writes IEEE754 double-precision (8 bytes) floating point …","Writes a signed 128 bit integer n to buf.","Writes signed 128 bit integers from src into dst.","Writes a signed 16 bit integer n to buf.","Writes signed 16 bit integers from src into dst.","Writes a signed 24 bit integer n to buf, stored in i32.","Writes a signed 32 bit integer n to buf.","Writes signed 32 bit integers from src into dst.","Writes a signed 48 bit integer n to buf, stored in i64.","Writes a signed 64 bit integer n to buf.","Writes signed 64 bit integers from src into dst.","Writes signed 8 bit integers from src into dst.","Writes a signed integer n to buf using only nbytes.","Writes a signed integer n to buf using only nbytes.","Writes an unsigned 128 bit integer n to buf.","","","Writes unsigned 128 bit integers from src into dst.","","","Writes an unsigned 16 bit integer n to buf.","","","Writes unsigned 16 bit integers from src into dst.","","","Writes an unsigned 24 bit integer n to buf, stored in u32.","Writes an unsigned 32 bit integer n to buf.","","","Writes unsigned 32 bit integers from src into dst.","","","Writes an unsigned 48 bit integer n to buf, stored in u64.","Writes an unsigned 64 bit integer n to buf.","","","Writes unsigned 64 bit integers from src into dst.","","","Writes an unsigned integer n to buf using only nbytes.","","","Writes an unsigned integer n to buf using only nbytes.","",""],"i":[0,0,0,0,0,0,0,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,3,1,2,3,1,2,3,3,3,3,3,1,2,3,1,2,3,1,2,3,1,2,1,2,1,2,1,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,3,1,2,3,1,2,3,1,2,3,3,1,2,3,1,2,3,3,1,2,3,1,2,3,1,2,3,1,2,1,2,1,2,1,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,3,1,2,3,1,2,3,1,2,3,3,1,2,3,1,2,3,3,1,2,3,1,2,3,1,2,3,1,2],"f":[null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[],["bigendian",4]],[[],["littleendian",4]],[[["bigendian",4]],["ordering",4]],[[["littleendian",4]],["ordering",4]],[[],["bigendian",4]],[[],["littleendian",4]],[[["bigendian",4]],["bool",15]],[[["littleendian",4]],["bool",15]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["bigendian",4]],[["ordering",4],["option",4]]],[[["littleendian",4]],[["ordering",4],["option",4]]],[[],["f32",15]],[[]],[[]],[[],["f64",15]],[[]],[[]],[[],["i128",15]],[[]],[[],["i16",15]],[[]],[[],["i32",15]],[[],["i32",15]],[[]],[[],["i64",15]],[[],["i64",15]],[[]],[[["usize",15]],["i64",15]],[[["usize",15]],["i128",15]],[[],["u128",15]],[[],["u128",15]],[[],["u128",15]],[[]],[[]],[[]],[[],["u16",15]],[[],["u16",15]],[[],["u16",15]],[[]],[[]],[[]],[[],["u32",15]],[[],["u32",15]],[[],["u32",15]],[[],["u32",15]],[[]],[[]],[[]],[[],["u64",15]],[[],["u64",15]],[[],["u64",15]],[[],["u64",15]],[[]],[[]],[[]],[[["usize",15]],["u64",15]],[[["usize",15]],["u64",15]],[[["usize",15]],["u64",15]],[[["usize",15]],["u128",15]],[[["usize",15]],["u128",15]],[[["usize",15]],["u128",15]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[["f32",15]]],[[]],[[["f64",15]]],[[]],[[["i128",15]]],[[]],[[["i16",15]]],[[]],[[["i32",15]]],[[["i32",15]]],[[]],[[["i64",15]]],[[["i64",15]]],[[]],[[]],[[["i64",15],["usize",15]]],[[["usize",15],["i128",15]]],[[["u128",15]]],[[["u128",15]]],[[["u128",15]]],[[]],[[]],[[]],[[["u16",15]]],[[["u16",15]]],[[["u16",15]]],[[]],[[]],[[]],[[["u32",15]]],[[["u32",15]]],[[["u32",15]]],[[["u32",15]]],[[]],[[]],[[]],[[["u64",15]]],[[["u64",15]]],[[["u64",15]]],[[["u64",15]]],[[]],[[]],[[]],[[["usize",15],["u64",15]]],[[["usize",15],["u64",15]]],[[["usize",15],["u64",15]]],[[["u128",15],["usize",15]]],[[["u128",15],["usize",15]]],[[["u128",15],["usize",15]]]],"p":[[4,"BigEndian"],[4,"LittleEndian"],[8,"ByteOrder"]]},\ "hash32":{"doc":"32-bit hashing machinery","t":[8,3,3,8,8,16,3,11,11,11,11,11,11,10,11,11,11,11,11,11,10,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11],"n":["BuildHasher","BuildHasherDefault","FnvHasher","Hash","Hasher","Hasher","Murmur3Hasher","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","build_hasher","build_hasher","clone","default","default","default","eq","finish","finish","finish","fmt","from","from","from","hash","hash_slice","into","into","into","new","try_from","try_from","try_from","try_into","try_into","try_into","type_id","type_id","type_id","write","write","write"],"q":["hash32","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["See core::hash::BuildHasher for details","See core::hash::BuildHasherDefault for details","32-bit Fowler-Noll-Vo hasher","See core::hash::Hash for details","See core::hash::Hasher for details","See core::hash::BuildHasher::Hasher","32-bit MurmurHash3 hasher","","","","","","","See core::hash::BuildHasher.build_hasher","","","","","","","See core::hash::Hasher.finish","","","","","","","Feeds this value into the given Hasher.","Feeds a slice of this type into the given Hasher.","","","","const constructor","","","","","","","","","","See core::hash::Hasher.write","",""],"i":[0,0,0,0,0,1,0,2,3,4,2,3,4,1,4,4,2,3,4,4,5,2,3,4,2,3,4,6,6,2,3,4,4,2,3,4,2,3,4,2,3,4,5,2,3],"f":[null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["buildhasherdefault",3]],["bool",15]],[[],["u32",15]],[[],["u32",15]],[[],["u32",15]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[]],[[]],[[]]],"p":[[8,"BuildHasher"],[3,"FnvHasher"],[3,"Murmur3Hasher"],[3,"BuildHasherDefault"],[8,"Hasher"],[8,"Hash"]]},\ -"heapless":{"doc":"static friendly data structures that don’t require …","t":[3,6,6,3,3,3,3,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,4,4,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,6,6,6,6,6,6,11,11,11,11,11,11,11,11,11,11,11,3,4,3,3,4,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,16,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,3,3,3,3,3,3,3,3,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,3,3,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11],"n":["Deque","FnvIndexMap","FnvIndexSet","HistoryBuffer","IndexMap","IndexSet","LinearMap","String","Vec","as_mut","as_mut","as_mut_slices","as_mut_str","as_mut_vec","as_ref","as_ref","as_ref","as_ref","as_ref","as_slice","as_slice","as_slices","as_str","back","back_mut","binary_heap","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","capacity","capacity","capacity","capacity","capacity","capacity","capacity","clear","clear","clear","clear","clear","clear","clear","clear_with","clone","clone","clone","clone","clone","clone","cmp","cmp","contains","contains_key","contains_key","default","default","default","default","default","default","default","deref","deref","deref","deref_mut","deref_mut","difference","drop","drop","drop","drop","ends_with","eq","eq","eq","eq","eq","eq","eq","eq","eq","eq","eq","eq","extend","extend","extend","extend","extend","extend","extend","extend","extend","extend_from_slice","extend_from_slice","fmt","fmt","fmt","fmt","fmt","fmt","fmt","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from_iter","from_iter","from_iter","from_iter","from_slice","from_str","front","front_mut","get","get","get_mut","get_mut","hash","hash","hash","hash","index","index","index_mut","index_mut","insert","insert","insert","intersection","into","into","into","into","into","into","into","into_array","into_bytes","into_iter","into_iter","is_disjoint","is_empty","is_empty","is_empty","is_empty","is_empty","is_full","is_full","is_subset","is_superset","iter","iter","iter","iter","iter_mut","iter_mut","iter_mut","keys","keys","len","len","len","len","len","mpmc","ne","ne","ne","new","new","new","new","new","new","new","new_with","partial_cmp","partial_cmp","pool","pop","pop","pop_back","pop_front","pop_unchecked","push","push","push_back","push_back_unchecked","push_front","push_front_unchecked","push_str","push_unchecked","recent","remove","remove","remove","resize","resize_default","set_len","sorted_linked_list","spsc","starts_with","swap_remove","swap_remove","swap_remove_unchecked","symmetric_difference","truncate","truncate","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id","type_id","type_id","union","values","values","values_mut","values_mut","write","write_char","write_str","write_str","BinaryHeap","Max","Min","PeekMut","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","capacity","clear","clone","default","deref","deref_mut","drop","drop","fmt","from","from","from","from","into","into","into","into","is_empty","iter","iter_mut","len","new","peek","peek_mut","pop","pop","pop_unchecked","push","push_unchecked","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","MpMcQueue","Q16","Q2","Q32","Q4","Q64","Q8","borrow","borrow_mut","default","dequeue","enqueue","from","into","new","try_from","try_into","type_id","Box","Init","Node","Pool","Uninit","alloc","as_mut","as_ref","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","cmp","deref","deref_mut","eq","fmt","fmt","free","from","from","from","from","from","grow","grow_exact","hash","init","into","into","into","into","into","new","partial_cmp","singleton","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id","Box","Data","Pool","alloc","as_mut","as_ref","borrow","borrow_mut","cmp","deref","deref_mut","drop","eq","fmt","fmt","forget","freeze","from","grow","grow_exact","hash","init","into","partial_cmp","try_from","try_into","type_id","FindMut","Iter","LinkedIndexU16","LinkedIndexU8","LinkedIndexUsize","Max","Min","Node","SortedLinkedList","SortedLinkedListIndex","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","clone","clone","clone","cmp","cmp","cmp","deref","deref_mut","drop","drop","eq","eq","eq","find_mut","finish","fmt","fmt","fmt","fmt","from","from","from","from","from","from","from","from","from","get_unchecked","get_unchecked","get_unchecked","into","into","into","into","into","into","into","into","into","into_iter","is_empty","is_full","iter","ne","ne","ne","new_u16","new_u8","new_unchecked","new_unchecked","new_unchecked","new_usize","next","none","none","none","option","option","option","partial_cmp","partial_cmp","partial_cmp","peek","pop","pop","pop_unchecked","push","push_unchecked","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","Consumer","Iter","IterMut","Producer","Queue","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","capacity","capacity","capacity","clone","clone","default","dequeue","dequeue","dequeue_unchecked","dequeue_unchecked","drop","enqueue","enqueue","enqueue_unchecked","enqueue_unchecked","eq","fmt","from","from","from","from","from","hash","hash","into","into","into","into","into","into_iter","into_iter","is_empty","is_full","iter","iter_mut","len","len","len","new","next","next","next_back","next_back","peek","peek","ready","ready","split","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id"],"q":["heapless","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::binary_heap","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::mpmc","","","","","","","","","","","","","","","","","","heapless::pool","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::pool::singleton","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::sorted_linked_list","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::spsc","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["A fixed capacity double-ended queue.","A heapless::IndexMap using the default FNV hasher","A heapless::IndexSet using the default FNV hasher. A list …","A “history buffer”, similar to a write-only ring …","Fixed capacity IndexMap","Fixed capacity IndexSet.","A fixed capacity map / dictionary that performs lookups …","A fixed capacity String","A fixed capacity Vec","","","Returns a pair of mutable slices which contain, in order, …","Converts a String into a mutable string slice.","Returns a mutable reference to the contents of this String…","","","","","","Returns the array slice backing the buffer, without …","Extracts a slice containing the entire vector.","Returns a pair of slices which contain, in order, the …","Extracts a string slice containing the entire string.","Provides a reference to the back element, or None if the …","Provides a mutable reference to the back element, or None …","A priority queue implemented with a binary heap.","","","","","","","","","","","","","","","Returns the maximum number of elements the deque can hold.","Returns the capacity of the buffer, which is the length …","Returns the number of elements the map can hold","Returns the number of elements the set can hold","Returns the number of elements that the map can hold","Returns the maximum number of elements the String can hold","Returns the maximum number of elements the vector can …","Clears the deque, removing all values.","Clears the buffer, replacing every element with the …","Remove all key-value pairs in the map, while preserving …","Clears the set, removing all values.","Clears the map, removing all key-value pairs","Truncates this String, removing all contents.","Clears the vector, removing all values.","Clears the buffer, replacing every element with the given …","","","","","","","","","Returns true if the set contains a value.","Returns true if the map contains a value for the …","Returns true if the map contains a value for the …","","","","","","","","","","","","","Visits the values representing the difference, i.e. the …","","","","","Returns true if needle is a suffix of the Vec.","","","","","","","","","","","","","","","","","","","Extends the vec from an iterator.","","","Clones and writes all elements in a slice to the buffer.","Clones and appends all elements in a slice to the Vec.","","","","","","","","","","","","","","","","","","","","","","","","","","","","Constructs a new vector with a fixed capacity of N and …","","Provides a reference to the front element, or None if the …","Provides a mutable reference to the front element, or …","Returns a reference to the value corresponding to the key.","Returns a reference to the value corresponding to the key","Returns a mutable reference to the value corresponding to …","Returns a mutable reference to the value corresponding to …","","","","","","","","","Inserts a key-value pair into the map.","Adds a value to the set.","Inserts a key-value pair into the map.","Visits the values representing the intersection, i.e. the …","","","","","","","","Returns the contents of the vector as an array of length M…","Converts a String into a byte vector.","","","Returns true if self has no elements in common with other…","Returns whether the deque is empty.","Returns true if the map contains no elements.","Returns true if the set contains no elements.","Returns true if the map contains no elements","Returns true if the vec is empty","Returns whether the deque is full (i.e. if …","Returns true if the vec is full","Returns true if the set is a subset of another, i.e. other…","Examples","Returns an iterator over the deque.","Return an iterator over the key-value pairs of the map, …","Return an iterator over the values of the set, in their …","An iterator visiting all key-value pairs in arbitrary …","Returns an iterator that allows modifying each value.","Return an iterator over the key-value pairs of the map, …","An iterator visiting all key-value pairs in arbitrary …","Return an iterator over the keys of the map, in their …","An iterator visiting all keys in arbitrary order","Returns the number of elements currently in the deque.","Returns the current fill level of the buffer.","Return the number of key-value pairs in the map.","Returns the number of elements in the set.","Returns the number of elements in this map","A fixed capacity Multiple-Producer Multiple-Consumer …","","","","Constructs a new, empty deque with a fixed capacity of N","Constructs a new history buffer.","Creates an empty IndexMap.","Creates an empty IndexSet","Creates an empty LinearMap","Constructs a new, empty String with a fixed capacity of N","Constructs a new, empty vector with a fixed capacity of N","Constructs a new history buffer, where every element is …","","","A heap-less, interrupt-safe, lock-free memory pool (*)","Removes the last character from the string buffer and …","Removes the last element from a vector and returns it, or …","Removes the item from the back of the deque and returns …","Removes the item from the front of the deque and returns …","Removes the last element from a vector and returns it","Appends the given char to the end of this String.","Appends an item to the back of the collection","Appends an item to the back of the deque","Appends an item to the back of the deque","Appends an item to the front of the deque","Appends an item to the front of the deque","Appends a given string slice onto the end of this String.","Appends an item to the back of the collection","Returns a reference to the most recently written value.","Same as swap_remove","Removes a value from the set. Returns true if the value …","Removes a key from the map, returning the value at the …","Resizes the Vec in-place so that len is equal to new_len.","Resizes the Vec in-place so that len is equal to new_len.","Forces the length of the vector to new_len.","A fixed sorted priority linked list, similar to BinaryHeap…","Fixed capacity Single Producer Single Consumer (SPSC) …","Returns true if needle is a prefix of the Vec.","Remove the key-value pair equivalent to key and return …","Removes an element from the vector and returns it.","Removes an element from the vector and returns it.","Visits the values representing the symmetric difference, …","Shortens this String to the specified length.","Shortens the vector, keeping the first len elements and …","","","","","","","","","","","","","","","","","","","","","","Visits the values representing the union, i.e. all the …","Return an iterator over the values of the map, in their …","An iterator visiting all values in arbitrary order","Return an iterator over mutable references to the the …","An iterator visiting all values mutably in arbitrary order","Writes an element to the buffer, overwriting the oldest …","","","","A priority queue implemented with a binary heap.","Max-heap","Min-heap","Structure wrapping a mutable reference to the greatest …","","","","","","","","","Returns the capacity of the binary heap.","Drops all items from the binary heap.","","","","","","","","","","","","","","","","Checks if the binary heap is empty.","Returns an iterator visiting all values in the underlying …","Returns a mutable iterator visiting all values in the …","Returns the length of the binary heap.","Creates an empty BinaryHeap as a $K-heap.","Returns the top (greatest if max-heap, smallest if …","Returns a mutable reference to the greatest item in the …","Removes the top (greatest if max-heap, smallest if …","Removes the peeked value from the heap and returns it.","Removes the top (greatest if max-heap, smallest if …","Pushes an item onto the binary heap.","Pushes an item onto the binary heap without first …","","","","","","","","","","","","","MPMC queue with a capacity for N elements The max value …","MPMC queue with a capability for 16 elements.","MPMC queue with a capability for 2 elements.","MPMC queue with a capability for 32 elements.","MPMC queue with a capability for 4 elements.","MPMC queue with a capability for 64 elements.","MPMC queue with a capability for 8 elements.","","","","Returns the item in the front of the queue, or None if …","Adds an item to the end of the queue","","","Creates an empty queue","","","","A memory block","Initialized type state","Unfortunate implementation detail required to use the …","A lock-free memory pool","Uninitialized type state","Claims a memory block from the pool","","","","","","","","","","","","","","","","","","","Returns a memory block to the pool","","","","","","Increases the capacity of the pool","Increases the capacity of the pool","","Initializes this memory block","","","","","","Creates a new empty pool","","Pool as a global singleton","","","","","","","","","","","","","","","","A memory block that belongs to the global memory pool, …","The type of data that can be allocated on this pool","A global singleton memory pool","Claims a memory block from the pool","","","","","","","","","","","","Forgets the contents of this memory block without running …","(DO NOT USE, SEE DEPRECATION) Freezes the contents of …","","Increases the capacity of the pool","Increases the capacity of the pool","","Initializes this memory block","","","","","","Comes from [SortedLinkedList::find_mut].","Iterator for the linked list.","Index for the [SortedLinkedList] with specific backing …","Index for the [SortedLinkedList] with specific backing …","Index for the [SortedLinkedList] with specific backing …","Marker for Max sorted [SortedLinkedList].","Marker for Min sorted [SortedLinkedList].","A node in the [SortedLinkedList].","The linked list.","Trait for defining an index for the linked list, never …","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","Find an element in the list that can be changed and …","This will resort the element into the correct position in …","","","","","","","","","","","","","","This is only valid if self.option() is not None.","This is only valid if self.option() is not None.","This is only valid if self.option() is not None.","","","","","","","","","","","Checks if the linked list is empty.","Checks if the linked list is full.","Get an iterator over the sorted list.","","","","Create a new linked list.","Create a new linked list.","","","","Create a new linked list.","","","","","","","","","","","Peek at the first element.","Pops the first element in the list.","This will pop the element from the list.","Pop an element from the list without checking so the list …","Pushes an element to the linked list and sorts it into …","Pushes a value onto the list without checking if the list …","","","","","","","","","","","","","","","","","","","","","","","","","","","","A queue “consumer”; it can dequeue items from the …","An iterator over the items of a queue","A mutable iterator over the items of a queue","A queue “producer”; it can enqueue items into the …","A statically allocated single producer single consumer …","","","","","","","","","","","Returns the maximum number of elements the queue can hold","Returns the maximum number of elements the queue can hold","Returns the maximum number of elements the queue can hold","","","","Returns the item in the front of the queue, or None if …","Returns the item in the front of the queue, or None if …","Returns the item in the front of the queue, without …","Returns the item in the front of the queue, without …","","Adds an item to the end of the queue","Adds an item to the end of the queue, returns back the …","Adds an item to the end of the queue, without checking if …","Adds an item to the end of the queue, without checking if …","","","","","","","","","","","","","","","","","Returns true if the queue is empty","Returns true if the queue is full","Iterates from the front of the queue to the back","Returns an iterator that allows modifying each value","Returns the number of elements in the queue","Returns the number of elements in the queue","Returns the number of elements in the queue","Creates an empty queue with a fixed capacity of N - 1","","","","","Returns a reference to the item in the front of the queue …","Returns the item in the front of the queue without …","Returns if there are any items to dequeue. When this …","Returns if there is any space to enqueue a new item. When …","Splits a queue into producer and consumer endpoints","","","","","","","","","","","","","","",""],"i":[0,0,0,0,0,0,0,0,0,1,1,2,3,3,4,3,3,1,1,4,1,2,3,2,2,0,2,4,5,6,7,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,4,2,5,6,7,3,1,3,1,6,5,7,2,4,5,6,7,3,1,4,3,1,3,1,6,2,4,7,1,1,5,6,7,3,3,3,1,1,1,1,1,1,4,4,5,5,6,6,1,1,1,4,1,4,5,6,7,3,3,1,2,4,5,6,7,3,3,3,3,3,3,3,3,3,3,1,5,6,7,1,1,3,2,2,5,7,5,7,3,3,1,1,5,7,5,7,5,6,7,6,2,4,5,6,7,3,1,1,3,2,1,6,2,5,6,7,1,2,1,6,6,2,5,6,7,2,5,7,5,7,2,4,5,6,7,0,3,3,3,2,4,5,6,7,3,1,4,3,1,0,3,1,2,2,1,3,1,2,2,2,2,3,1,4,5,6,7,1,1,1,0,0,1,5,1,1,6,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,6,5,7,5,7,4,3,3,1,0,0,0,0,8,9,10,11,8,9,10,11,10,10,10,10,11,11,10,11,10,8,9,10,11,8,9,10,11,10,10,10,10,10,10,10,10,11,10,10,10,8,9,10,11,8,9,10,11,8,9,10,11,0,0,0,0,0,0,0,12,12,12,12,12,12,12,12,12,12,12,0,0,0,0,0,13,14,14,15,13,14,16,17,15,13,14,16,17,14,14,14,14,14,14,13,15,13,14,16,17,13,13,14,14,15,13,14,16,17,13,14,0,15,13,14,16,17,15,13,14,16,17,15,13,14,16,17,0,18,0,18,19,19,19,19,19,19,19,19,19,19,19,19,19,19,18,18,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,20,21,22,23,24,25,26,27,28,20,21,22,23,24,25,26,27,28,26,27,28,26,27,28,25,25,23,25,26,27,28,23,25,23,26,27,28,20,21,22,23,24,25,26,27,28,26,27,28,20,21,22,23,24,25,26,27,28,24,23,23,23,26,27,28,23,23,26,27,28,23,24,26,27,28,26,27,28,26,27,28,23,23,25,23,23,23,20,21,22,23,24,25,26,27,28,20,21,22,23,24,25,26,27,28,20,21,22,23,24,25,26,27,28,0,0,0,0,0,29,30,31,32,33,29,30,31,32,33,29,32,33,29,30,29,29,32,29,32,29,29,33,29,33,29,29,29,30,31,32,33,29,29,29,30,31,32,33,30,31,29,29,29,29,29,32,33,29,30,31,30,31,29,32,32,33,29,29,30,31,32,33,29,30,31,32,33,29,30,31,32,33],"f":[null,null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[],["str",15]],[[],["vec",3]],[[]],[[],["str",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["str",15]],[[],["option",4]],[[],["option",4]],null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["ordering",4]],[[],["ordering",4]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["str",15]],[[]],[[],["str",15]],[[]],[[["indexset",3]],["difference",3]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[["indexmap",3]],["bool",15]],[[["indexset",3]],["bool",15]],[[["linearmap",3]],["bool",15]],[[["string",3]],["bool",15]],[[],["bool",15]],[[["str",15]],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[["vec",3]],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[["u32",15]]],[[["u64",15]]],[[["i16",15]]],[[["i32",15]]],[[["i64",15]]],[[]],[[["i8",15]]],[[["u8",15]]],[[["u16",15]]],[[["str",15]]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[["str",15]],["result",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],[["result",4],["option",4]]],[[],[["bool",15],["result",4]]],[[],[["result",4],["option",4]]],[[["indexset",3]],["intersection",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[],[["u8",15],["vec",3]]],[[]],[[]],[[["indexset",3]],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[["indexset",3]],["bool",15]],[[["indexset",3]],["bool",15]],[[],["iter",3]],[[],["iter",3]],[[],["iter",3]],[[],["iter",3]],[[],["itermut",3]],[[],["itermut",3]],[[],["itermut",3]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],null,[[["str",15]],["bool",15]],[[["string",3]],["bool",15]],[[],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["string",3]],[["option",4],["ordering",4]]],[[["vec",3]],[["option",4],["ordering",4]]],null,[[],[["option",4],["char",15]]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[]],[[["char",15]],["result",4]],[[],["result",4]],[[],["result",4]],[[]],[[],["result",4]],[[]],[[["str",15]],["result",4]],[[]],[[],["option",4]],[[],["option",4]],[[],["bool",15]],[[],["option",4]],[[["usize",15]],["result",4]],[[["usize",15]],["result",4]],[[["usize",15]]],null,null,[[],["bool",15]],[[],["option",4]],[[["usize",15]]],[[["usize",15]]],[[["indexset",3]]],[[["usize",15]]],[[["usize",15]]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[["indexset",3]]],[[]],[[]],[[]],[[]],[[]],[[["char",15]],[["error",3],["result",4]]],[[["str",15]],[["error",3],["result",4]]],[[["str",15]],["result",6]],null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["iter",3]],[[],["itermut",3]],[[],["usize",15]],[[]],[[],["option",4]],[[],[["option",4],["peekmut",3]]],[[],["option",4]],[[["peekmut",3]]],[[]],[[],["result",4]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,null,null,null,null,[[]],[[]],[[]],[[],["option",4]],[[],["result",4]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],null,null,null,null,null,[[],[["option",4],["box",3]]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["box",3]],["ordering",4]],[[]],[[]],[[["box",3]],["bool",15]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["box",3]]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[["maybeuninit",19]],["usize",15]],[[]],[[],[["box",3],["init",4]]],[[]],[[]],[[]],[[]],[[]],[[]],[[["box",3]],[["ordering",4],["option",4]]],null,[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,[[],[["option",4],["box",3]]],[[]],[[]],[[]],[[]],[[["box",3]],["ordering",4]],[[]],[[]],[[]],[[["box",3]],["bool",15]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[],[["box",3],["uninit",4]]],[[],[["init",4],["box",3]]],[[]],[[],["usize",15]],[[["maybeuninit",19]],["usize",15]],[[]],[[],[["init",4],["box",3]]],[[]],[[["box",3]],[["ordering",4],["option",4]]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],null,null,null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["linkedindexu8",3]],[[],["linkedindexu16",3]],[[],["linkedindexusize",3]],[[["linkedindexu8",3]],["ordering",4]],[[["linkedindexu16",3]],["ordering",4]],[[["linkedindexusize",3]],["ordering",4]],[[]],[[]],[[]],[[]],[[["linkedindexu8",3]],["bool",15]],[[["linkedindexu16",3]],["bool",15]],[[["linkedindexusize",3]],["bool",15]],[[],[["findmut",3],["option",4]]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[],["iter",3]],[[["linkedindexu8",3]],["bool",15]],[[["linkedindexu16",3]],["bool",15]],[[["linkedindexusize",3]],["bool",15]],[[]],[[]],[[["usize",15]]],[[["usize",15]]],[[["usize",15]]],[[]],[[],["option",4]],[[]],[[]],[[]],[[],[["option",4],["usize",15]]],[[],[["option",4],["usize",15]]],[[],[["option",4],["usize",15]]],[[["linkedindexu8",3]],[["ordering",4],["option",4]]],[[["linkedindexu16",3]],[["ordering",4],["option",4]]],[[["linkedindexusize",3]],[["ordering",4],["option",4]]],[[],["option",4]],[[],["result",4]],[[]],[[]],[[],["result",4]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[]],[[]],[[],["option",4]],[[],["option",4]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[]],[[]],[[["queue",3]],["bool",15]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[],["iter",3]],[[],["itermut",3]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["bool",15]],[[],["bool",15]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]]],"p":[[3,"Vec"],[3,"Deque"],[3,"String"],[3,"HistoryBuffer"],[3,"IndexMap"],[3,"IndexSet"],[3,"LinearMap"],[4,"Min"],[4,"Max"],[3,"BinaryHeap"],[3,"PeekMut"],[3,"MpMcQueue"],[3,"Pool"],[3,"Box"],[3,"Node"],[4,"Uninit"],[4,"Init"],[8,"Pool"],[3,"Box"],[3,"Min"],[3,"Max"],[3,"Node"],[3,"SortedLinkedList"],[3,"Iter"],[3,"FindMut"],[3,"LinkedIndexU8"],[3,"LinkedIndexU16"],[3,"LinkedIndexUsize"],[3,"Queue"],[3,"Iter"],[3,"IterMut"],[3,"Consumer"],[3,"Producer"]]},\ +"heapless":{"doc":"static friendly data structures that don’t require …","t":[3,6,6,3,3,3,3,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,4,4,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,6,6,6,6,6,6,11,11,11,11,11,11,11,11,11,11,11,3,4,3,3,4,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,16,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,3,3,3,3,3,3,3,3,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,3,3,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11],"n":["Deque","FnvIndexMap","FnvIndexSet","HistoryBuffer","IndexMap","IndexSet","LinearMap","String","Vec","as_mut","as_mut","as_mut_slices","as_mut_str","as_mut_vec","as_ref","as_ref","as_ref","as_ref","as_ref","as_slice","as_slice","as_slices","as_str","back","back_mut","binary_heap","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","capacity","capacity","capacity","capacity","capacity","capacity","capacity","clear","clear","clear","clear","clear","clear","clear","clear_with","clone","clone","clone","clone","clone","clone","cmp","cmp","contains","contains_key","contains_key","default","default","default","default","default","default","default","deref","deref","deref","deref_mut","deref_mut","difference","drop","drop","drop","drop","ends_with","eq","eq","eq","eq","eq","eq","eq","eq","eq","eq","eq","eq","extend","extend","extend","extend","extend","extend","extend","extend","extend","extend_from_slice","extend_from_slice","fmt","fmt","fmt","fmt","fmt","fmt","fmt","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from_iter","from_iter","from_iter","from_iter","from_slice","from_str","front","front_mut","get","get","get_mut","get_mut","hash","hash","hash","hash","index","index","index_mut","index_mut","insert","insert","insert","intersection","into","into","into","into","into","into","into","into_array","into_bytes","into_iter","into_iter","is_disjoint","is_empty","is_empty","is_empty","is_empty","is_empty","is_full","is_full","is_subset","is_superset","iter","iter","iter","iter","iter_mut","iter_mut","iter_mut","keys","keys","len","len","len","len","len","mpmc","ne","ne","ne","new","new","new","new","new","new","new","new_with","partial_cmp","partial_cmp","pool","pop","pop","pop_back","pop_front","pop_unchecked","push","push","push_back","push_back_unchecked","push_front","push_front_unchecked","push_str","push_unchecked","recent","remove","remove","remove","resize","resize_default","set_len","sorted_linked_list","spsc","starts_with","swap_remove","swap_remove","swap_remove_unchecked","symmetric_difference","truncate","truncate","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id","type_id","type_id","union","values","values","values_mut","values_mut","write","write_char","write_str","write_str","BinaryHeap","Max","Min","PeekMut","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","capacity","clear","clone","default","deref","deref_mut","drop","drop","fmt","from","from","from","from","into","into","into","into","is_empty","iter","iter_mut","len","new","peek","peek_mut","pop","pop","pop_unchecked","push","push_unchecked","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","MpMcQueue","Q16","Q2","Q32","Q4","Q64","Q8","borrow","borrow_mut","default","dequeue","enqueue","from","into","new","try_from","try_into","type_id","Box","Init","Node","Pool","Uninit","alloc","as_mut","as_ref","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","cmp","deref","deref_mut","eq","fmt","fmt","free","from","from","from","from","from","grow","grow_exact","hash","init","into","into","into","into","into","new","partial_cmp","singleton","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id","Box","Data","Pool","alloc","as_mut","as_ref","borrow","borrow_mut","cmp","deref","deref_mut","drop","eq","fmt","fmt","forget","freeze","from","grow","grow_exact","hash","init","into","partial_cmp","try_from","try_into","type_id","FindMut","Iter","LinkedIndexU16","LinkedIndexU8","LinkedIndexUsize","Max","Min","Node","SortedLinkedList","SortedLinkedListIndex","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","clone","clone","clone","cmp","cmp","cmp","deref","deref_mut","drop","drop","eq","eq","eq","find_mut","finish","fmt","fmt","fmt","fmt","from","from","from","from","from","from","from","from","from","get_unchecked","get_unchecked","get_unchecked","into","into","into","into","into","into","into","into","into","into_iter","is_empty","is_full","iter","ne","ne","ne","new_u16","new_u8","new_unchecked","new_unchecked","new_unchecked","new_usize","next","none","none","none","option","option","option","partial_cmp","partial_cmp","partial_cmp","peek","pop","pop","pop_unchecked","push","push_unchecked","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","Consumer","Iter","IterMut","Producer","Queue","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","capacity","capacity","capacity","clone","clone","default","dequeue","dequeue","dequeue_unchecked","dequeue_unchecked","drop","enqueue","enqueue","enqueue_unchecked","enqueue_unchecked","eq","fmt","from","from","from","from","from","hash","hash","into","into","into","into","into","into_iter","into_iter","is_empty","is_full","iter","iter_mut","len","len","len","new","next","next","next_back","next_back","peek","peek","ready","ready","split","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","type_id","type_id","type_id","type_id","type_id"],"q":["heapless","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::binary_heap","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::mpmc","","","","","","","","","","","","","","","","","","heapless::pool","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::pool::singleton","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::sorted_linked_list","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","heapless::spsc","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["A fixed capacity double-ended queue.","A heapless::IndexMap using the default FNV hasher","A heapless::IndexSet using the default FNV hasher. A list …","A “history buffer”, similar to a write-only ring …","Fixed capacity IndexMap","Fixed capacity IndexSet.","A fixed capacity map / dictionary that performs lookups …","A fixed capacity String","A fixed capacity Vec","","","Returns a pair of mutable slices which contain, in order, …","Converts a String into a mutable string slice.","Returns a mutable reference to the contents of this String…","","","","","","Returns the array slice backing the buffer, without …","Extracts a slice containing the entire vector.","Returns a pair of slices which contain, in order, the …","Extracts a string slice containing the entire string.","Provides a reference to the back element, or None if the …","Provides a mutable reference to the back element, or None …","A priority queue implemented with a binary heap.","","","","","","","","","","","","","","","Returns the maximum number of elements the deque can hold.","Returns the capacity of the buffer, which is the length …","Returns the number of elements the map can hold","Returns the number of elements the set can hold","Returns the number of elements that the map can hold","Returns the maximum number of elements the String can hold","Returns the maximum number of elements the vector can …","Clears the deque, removing all values.","Clears the buffer, replacing every element with the …","Remove all key-value pairs in the map, while preserving …","Clears the set, removing all values.","Clears the map, removing all key-value pairs","Truncates this String, removing all contents.","Clears the vector, removing all values.","Clears the buffer, replacing every element with the given …","","","","","","","","","Returns true if the set contains a value.","Returns true if the map contains a value for the …","Returns true if the map contains a value for the …","","","","","","","","","","","","","Visits the values representing the difference, i.e. the …","","","","","Returns true if needle is a suffix of the Vec.","","","","","","","","","","","","","","","","","","","Extends the vec from an iterator.","","","Clones and writes all elements in a slice to the buffer.","Clones and appends all elements in a slice to the Vec.","","","","","","","","","","","","","","","","","","","","","","","","","","","","Constructs a new vector with a fixed capacity of N and …","","Provides a reference to the front element, or None if the …","Provides a mutable reference to the front element, or …","Returns a reference to the value corresponding to the key.","Returns a reference to the value corresponding to the key","Returns a mutable reference to the value corresponding to …","Returns a mutable reference to the value corresponding to …","","","","","","","","","Inserts a key-value pair into the map.","Adds a value to the set.","Inserts a key-value pair into the map.","Visits the values representing the intersection, i.e. the …","","","","","","","","Returns the contents of the vector as an array of length M…","Converts a String into a byte vector.","","","Returns true if self has no elements in common with other…","Returns whether the deque is empty.","Returns true if the map contains no elements.","Returns true if the set contains no elements.","Returns true if the map contains no elements","Returns true if the vec is empty","Returns whether the deque is full (i.e. if …","Returns true if the vec is full","Returns true if the set is a subset of another, i.e. other…","Examples","Returns an iterator over the deque.","Return an iterator over the key-value pairs of the map, …","Return an iterator over the values of the set, in their …","An iterator visiting all key-value pairs in arbitrary …","Returns an iterator that allows modifying each value.","Return an iterator over the key-value pairs of the map, …","An iterator visiting all key-value pairs in arbitrary …","Return an iterator over the keys of the map, in their …","An iterator visiting all keys in arbitrary order","Returns the number of elements currently in the deque.","Returns the current fill level of the buffer.","Return the number of key-value pairs in the map.","Returns the number of elements in the set.","Returns the number of elements in this map","A fixed capacity Multiple-Producer Multiple-Consumer …","","","","Constructs a new, empty deque with a fixed capacity of N","Constructs a new history buffer.","Creates an empty IndexMap.","Creates an empty IndexSet","Creates an empty LinearMap","Constructs a new, empty String with a fixed capacity of N","Constructs a new, empty vector with a fixed capacity of N","Constructs a new history buffer, where every element is …","","","A heap-less, interrupt-safe, lock-free memory pool (*)","Removes the last character from the string buffer and …","Removes the last element from a vector and returns it, or …","Removes the item from the back of the deque and returns …","Removes the item from the front of the deque and returns …","Removes the last element from a vector and returns it","Appends the given char to the end of this String.","Appends an item to the back of the collection","Appends an item to the back of the deque","Appends an item to the back of the deque","Appends an item to the front of the deque","Appends an item to the front of the deque","Appends a given string slice onto the end of this String.","Appends an item to the back of the collection","Returns a reference to the most recently written value.","Same as swap_remove","Removes a value from the set. Returns true if the value …","Removes a key from the map, returning the value at the …","Resizes the Vec in-place so that len is equal to new_len.","Resizes the Vec in-place so that len is equal to new_len.","Forces the length of the vector to new_len.","A fixed sorted priority linked list, similar to BinaryHeap…","Fixed capacity Single Producer Single Consumer (SPSC) …","Returns true if needle is a prefix of the Vec.","Remove the key-value pair equivalent to key and return …","Removes an element from the vector and returns it.","Removes an element from the vector and returns it.","Visits the values representing the symmetric difference, …","Shortens this String to the specified length.","Shortens the vector, keeping the first len elements and …","","","","","","","","","","","","","","","","","","","","","","Visits the values representing the union, i.e. all the …","Return an iterator over the values of the map, in their …","An iterator visiting all values in arbitrary order","Return an iterator over mutable references to the the …","An iterator visiting all values mutably in arbitrary order","Writes an element to the buffer, overwriting the oldest …","","","","A priority queue implemented with a binary heap.","Max-heap","Min-heap","Structure wrapping a mutable reference to the greatest …","","","","","","","","","Returns the capacity of the binary heap.","Drops all items from the binary heap.","","","","","","","","","","","","","","","","Checks if the binary heap is empty.","Returns an iterator visiting all values in the underlying …","Returns a mutable iterator visiting all values in the …","Returns the length of the binary heap.","Creates an empty BinaryHeap as a $K-heap.","Returns the top (greatest if max-heap, smallest if …","Returns a mutable reference to the greatest item in the …","Removes the top (greatest if max-heap, smallest if …","Removes the peeked value from the heap and returns it.","Removes the top (greatest if max-heap, smallest if …","Pushes an item onto the binary heap.","Pushes an item onto the binary heap without first …","","","","","","","","","","","","","MPMC queue with a capacity for N elements The max value …","MPMC queue with a capability for 16 elements.","MPMC queue with a capability for 2 elements.","MPMC queue with a capability for 32 elements.","MPMC queue with a capability for 4 elements.","MPMC queue with a capability for 64 elements.","MPMC queue with a capability for 8 elements.","","","","Returns the item in the front of the queue, or None if …","Adds an item to the end of the queue","","","Creates an empty queue","","","","A memory block","Initialized type state","Unfortunate implementation detail required to use the …","A lock-free memory pool","Uninitialized type state","Claims a memory block from the pool","","","","","","","","","","","","","","","","","","","Returns a memory block to the pool","","","","","","Increases the capacity of the pool","Increases the capacity of the pool","","Initializes this memory block","","","","","","Creates a new empty pool","","Pool as a global singleton","","","","","","","","","","","","","","","","A memory block that belongs to the global memory pool, …","The type of data that can be allocated on this pool","A global singleton memory pool","Claims a memory block from the pool","","","","","","","","","","","","Forgets the contents of this memory block without running …","(DO NOT USE, SEE DEPRECATION) Freezes the contents of …","","Increases the capacity of the pool","Increases the capacity of the pool","","Initializes this memory block","","","","","","Comes from [SortedLinkedList::find_mut].","Iterator for the linked list.","Index for the [SortedLinkedList] with specific backing …","Index for the [SortedLinkedList] with specific backing …","Index for the [SortedLinkedList] with specific backing …","Marker for Max sorted [SortedLinkedList].","Marker for Min sorted [SortedLinkedList].","A node in the [SortedLinkedList].","The linked list.","Trait for defining an index for the linked list, never …","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","Find an element in the list that can be changed and …","This will resort the element into the correct position in …","","","","","","","","","","","","","","This is only valid if self.option() is not None.","This is only valid if self.option() is not None.","This is only valid if self.option() is not None.","","","","","","","","","","","Checks if the linked list is empty.","Checks if the linked list is full.","Get an iterator over the sorted list.","","","","Create a new linked list.","Create a new linked list.","","","","Create a new linked list.","","","","","","","","","","","Peek at the first element.","Pops the first element in the list.","This will pop the element from the list.","Pop an element from the list without checking so the list …","Pushes an element to the linked list and sorts it into …","Pushes a value onto the list without checking if the list …","","","","","","","","","","","","","","","","","","","","","","","","","","","","A queue “consumer”; it can dequeue items from the …","An iterator over the items of a queue","A mutable iterator over the items of a queue","A queue “producer”; it can enqueue items into the …","A statically allocated single producer single consumer …","","","","","","","","","","","Returns the maximum number of elements the queue can hold","Returns the maximum number of elements the queue can hold","Returns the maximum number of elements the queue can hold","","","","Returns the item in the front of the queue, or None if …","Returns the item in the front of the queue, or None if …","Returns the item in the front of the queue, without …","Returns the item in the front of the queue, without …","","Adds an item to the end of the queue","Adds an item to the end of the queue, returns back the …","Adds an item to the end of the queue, without checking if …","Adds an item to the end of the queue, without checking if …","","","","","","","","","","","","","","","","","Returns true if the queue is empty","Returns true if the queue is full","Iterates from the front of the queue to the back","Returns an iterator that allows modifying each value","Returns the number of elements in the queue","Returns the number of elements in the queue","Returns the number of elements in the queue","Creates an empty queue with a fixed capacity of N - 1","","","","","Returns a reference to the item in the front of the queue …","Returns the item in the front of the queue without …","Returns if there are any items to dequeue. When this …","Returns if there is any space to enqueue a new item. When …","Splits a queue into producer and consumer endpoints","","","","","","","","","","","","","","",""],"i":[0,0,0,0,0,0,0,0,0,1,1,2,3,3,4,3,3,1,1,4,1,2,3,2,2,0,2,4,5,6,7,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,4,2,5,6,7,3,1,3,1,6,5,7,2,4,5,6,7,3,1,4,3,1,3,1,6,2,4,7,1,1,5,6,7,3,3,3,1,1,1,1,1,1,4,4,5,5,6,6,1,1,1,4,1,4,5,6,7,3,3,1,2,4,5,6,7,3,3,3,3,3,3,3,3,3,3,1,5,6,7,1,1,3,2,2,5,7,5,7,3,3,1,1,5,7,5,7,5,6,7,6,2,4,5,6,7,3,1,1,3,2,1,6,2,5,6,7,1,2,1,6,6,2,5,6,7,2,5,7,5,7,2,4,5,6,7,0,3,3,3,2,4,5,6,7,3,1,4,3,1,0,3,1,2,2,1,3,1,2,2,2,2,3,1,4,5,6,7,1,1,1,0,0,1,5,1,1,6,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,2,4,5,6,7,3,1,6,5,7,5,7,4,3,3,1,0,0,0,0,8,9,10,11,8,9,10,11,10,10,10,10,11,11,10,11,10,8,9,10,11,8,9,10,11,10,10,10,10,10,10,10,10,11,10,10,10,8,9,10,11,8,9,10,11,8,9,10,11,0,0,0,0,0,0,0,12,12,12,12,12,12,12,12,12,12,12,0,0,0,0,0,13,14,14,15,13,14,16,17,15,13,14,16,17,14,14,14,14,14,14,13,15,13,14,16,17,13,13,14,14,15,13,14,16,17,13,14,0,15,13,14,16,17,15,13,14,16,17,15,13,14,16,17,0,18,0,18,19,19,19,19,19,19,19,19,19,19,19,19,19,19,18,18,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,20,21,22,23,24,25,26,27,28,20,21,22,23,24,25,26,27,28,26,27,28,26,27,28,25,25,23,25,26,27,28,23,25,23,26,27,28,20,21,22,23,24,25,26,27,28,26,27,28,20,21,22,23,24,25,26,27,28,24,23,23,23,26,27,28,23,23,26,27,28,23,24,26,27,28,26,27,28,26,27,28,23,23,25,23,23,23,20,21,22,23,24,25,26,27,28,20,21,22,23,24,25,26,27,28,20,21,22,23,24,25,26,27,28,0,0,0,0,0,29,30,31,32,33,29,30,31,32,33,29,32,33,29,30,29,29,32,29,32,29,29,33,29,33,29,29,29,30,31,32,33,29,29,29,30,31,32,33,30,31,29,29,29,29,29,32,33,29,30,31,30,31,29,32,32,33,29,29,30,31,32,33,29,30,31,32,33,29,30,31,32,33],"f":[null,null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[],["str",15]],[[],["vec",3]],[[]],[[],["str",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["str",15]],[[],["option",4]],[[],["option",4]],null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["ordering",4]],[[],["ordering",4]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["str",15]],[[]],[[],["str",15]],[[]],[[["indexset",3]],["difference",3]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[["indexmap",3]],["bool",15]],[[["indexset",3]],["bool",15]],[[["linearmap",3]],["bool",15]],[[["string",3]],["bool",15]],[[],["bool",15]],[[["str",15]],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[["vec",3]],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[["u32",15]]],[[["u64",15]]],[[["i16",15]]],[[["i32",15]]],[[["i64",15]]],[[]],[[["i8",15]]],[[["u8",15]]],[[["u16",15]]],[[["str",15]]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[["str",15]],["result",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],[["option",4],["result",4]]],[[],[["bool",15],["result",4]]],[[],[["option",4],["result",4]]],[[["indexset",3]],["intersection",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[],[["u8",15],["vec",3]]],[[]],[[]],[[["indexset",3]],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[["indexset",3]],["bool",15]],[[["indexset",3]],["bool",15]],[[],["iter",3]],[[],["iter",3]],[[],["iter",3]],[[],["iter",3]],[[],["itermut",3]],[[],["itermut",3]],[[],["itermut",3]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],null,[[["str",15]],["bool",15]],[[["string",3]],["bool",15]],[[],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["string",3]],[["option",4],["ordering",4]]],[[["vec",3]],[["option",4],["ordering",4]]],null,[[],[["option",4],["char",15]]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[]],[[["char",15]],["result",4]],[[],["result",4]],[[],["result",4]],[[]],[[],["result",4]],[[]],[[["str",15]],["result",4]],[[]],[[],["option",4]],[[],["option",4]],[[],["bool",15]],[[],["option",4]],[[["usize",15]],["result",4]],[[["usize",15]],["result",4]],[[["usize",15]]],null,null,[[],["bool",15]],[[],["option",4]],[[["usize",15]]],[[["usize",15]]],[[["indexset",3]]],[[["usize",15]]],[[["usize",15]]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[["indexset",3]]],[[]],[[]],[[]],[[]],[[]],[[["char",15]],[["error",3],["result",4]]],[[["str",15]],[["error",3],["result",4]]],[[["str",15]],["result",6]],null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["iter",3]],[[],["itermut",3]],[[],["usize",15]],[[]],[[],["option",4]],[[],[["option",4],["peekmut",3]]],[[],["option",4]],[[["peekmut",3]]],[[]],[[],["result",4]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,null,null,null,null,[[]],[[]],[[]],[[],["option",4]],[[],["result",4]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],null,null,null,null,null,[[],[["box",3],["option",4]]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["box",3]],["ordering",4]],[[]],[[]],[[["box",3]],["bool",15]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["box",3]]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[["maybeuninit",19]],["usize",15]],[[]],[[],[["box",3],["init",4]]],[[]],[[]],[[]],[[]],[[]],[[]],[[["box",3]],[["ordering",4],["option",4]]],null,[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,[[],[["option",4],["box",3]]],[[]],[[]],[[]],[[]],[[["box",3]],["ordering",4]],[[]],[[]],[[]],[[["box",3]],["bool",15]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[],[["box",3],["uninit",4]]],[[],[["init",4],["box",3]]],[[]],[[],["usize",15]],[[["maybeuninit",19]],["usize",15]],[[]],[[],[["init",4],["box",3]]],[[]],[[["box",3]],[["ordering",4],["option",4]]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],null,null,null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["linkedindexu8",3]],[[],["linkedindexu16",3]],[[],["linkedindexusize",3]],[[["linkedindexu8",3]],["ordering",4]],[[["linkedindexu16",3]],["ordering",4]],[[["linkedindexusize",3]],["ordering",4]],[[]],[[]],[[]],[[]],[[["linkedindexu8",3]],["bool",15]],[[["linkedindexu16",3]],["bool",15]],[[["linkedindexusize",3]],["bool",15]],[[],[["option",4],["findmut",3]]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[],["iter",3]],[[["linkedindexu8",3]],["bool",15]],[[["linkedindexu16",3]],["bool",15]],[[["linkedindexusize",3]],["bool",15]],[[]],[[]],[[["usize",15]]],[[["usize",15]]],[[["usize",15]]],[[]],[[],["option",4]],[[]],[[]],[[]],[[],[["option",4],["usize",15]]],[[],[["option",4],["usize",15]]],[[],[["option",4],["usize",15]]],[[["linkedindexu8",3]],[["ordering",4],["option",4]]],[[["linkedindexu16",3]],[["ordering",4],["option",4]]],[[["linkedindexusize",3]],[["ordering",4],["option",4]]],[[],["option",4]],[[],["result",4]],[[]],[[]],[[],["result",4]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[]],[[]],[[],["option",4]],[[],["option",4]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[]],[[]],[[["queue",3]],["bool",15]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[],["iter",3]],[[],["itermut",3]],[[],["usize",15]],[[],["usize",15]],[[],["usize",15]],[[]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["option",4]],[[],["bool",15]],[[],["bool",15]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]]],"p":[[3,"Vec"],[3,"Deque"],[3,"String"],[3,"HistoryBuffer"],[3,"IndexMap"],[3,"IndexSet"],[3,"LinearMap"],[4,"Min"],[4,"Max"],[3,"BinaryHeap"],[3,"PeekMut"],[3,"MpMcQueue"],[3,"Pool"],[3,"Box"],[3,"Node"],[4,"Uninit"],[4,"Init"],[8,"Pool"],[3,"Box"],[3,"Min"],[3,"Max"],[3,"Node"],[3,"SortedLinkedList"],[3,"Iter"],[3,"FindMut"],[3,"LinkedIndexU8"],[3,"LinkedIndexU16"],[3,"LinkedIndexUsize"],[3,"Queue"],[3,"Iter"],[3,"IterMut"],[3,"Consumer"],[3,"Producer"]]},\ +"lock_api":{"doc":"This library provides type-safe and fully-featured Mutex …","t":[16,16,8,16,16,3,3,18,18,18,18,18,18,18,16,16,3,3,3,3,3,3,8,8,8,3,8,8,8,8,8,8,8,8,8,8,3,3,3,3,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,10,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,11,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,11,11,10,10,10,10,11,11,11,10,10,10,10,10,10,10,11,11,11,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,10,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,10,10,10,11,11,11,11,11,11,11,11,11,11,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,10,11,11],"n":["Duration","Duration","GetThreadId","GuardMarker","GuardMarker","GuardNoSend","GuardSend","INIT","INIT","INIT","INIT","INIT","INIT","INIT","Instant","Instant","MappedMutexGuard","MappedReentrantMutexGuard","MappedRwLockReadGuard","MappedRwLockWriteGuard","Mutex","MutexGuard","RawMutex","RawMutexFair","RawMutexTimed","RawReentrantMutex","RawRwLock","RawRwLockDowngrade","RawRwLockFair","RawRwLockRecursive","RawRwLockRecursiveTimed","RawRwLockTimed","RawRwLockUpgrade","RawRwLockUpgradeDowngrade","RawRwLockUpgradeFair","RawRwLockUpgradeTimed","ReentrantMutex","ReentrantMutexGuard","RwLock","RwLockReadGuard","RwLockUpgradableReadGuard","RwLockWriteGuard","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","borrow_mut","bump","bump","bump","bump","bump","bump","bump","bump","bump_exclusive","bump_exclusive","bump_shared","bump_shared","bump_upgradable","bump_upgradable","const_new","const_new","const_new","data_ptr","data_ptr","data_ptr","default","default","default","deref","deref","deref","deref","deref","deref","deref","deref","deref","deref_mut","deref_mut","deref_mut","deref_mut","downgrade","downgrade","downgrade","downgrade_to_upgradable","downgrade_to_upgradable","downgrade_upgradable","drop","drop","drop","drop","drop","drop","drop","drop","drop","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","fmt","force_unlock","force_unlock","force_unlock_fair","force_unlock_fair","force_unlock_read","force_unlock_read_fair","force_unlock_write","force_unlock_write_fair","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","from","get_mut","get_mut","get_mut","into","into","into","into","into","into","into","into","into","into","into","into","into","into","into","into_inner","into_inner","into_inner","is_locked","is_locked","is_locked","is_locked","is_locked","is_locked","is_locked","is_locked","is_owned_by_current_thread","is_owned_by_current_thread","lock","lock","lock","lock","lock_exclusive","lock_shared","lock_shared_recursive","lock_upgradable","map","map","map","map","map","map","map","map","mutex","new","new","new","nonzero_thread_id","raw","raw","raw","read","read_recursive","remutex","rwlock","rwlock","rwlock","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_into","try_lock","try_lock","try_lock","try_lock","try_lock_exclusive","try_lock_exclusive_for","try_lock_exclusive_until","try_lock_for","try_lock_for","try_lock_for","try_lock_for","try_lock_shared","try_lock_shared_for","try_lock_shared_recursive","try_lock_shared_recursive_for","try_lock_shared_recursive_until","try_lock_shared_until","try_lock_until","try_lock_until","try_lock_until","try_lock_until","try_lock_upgradable","try_lock_upgradable_for","try_lock_upgradable_until","try_map","try_map","try_map","try_map","try_map","try_map","try_map","try_map","try_read","try_read_for","try_read_recursive","try_read_recursive_for","try_read_recursive_until","try_read_until","try_upgradable_read","try_upgradable_read_for","try_upgradable_read_until","try_upgrade","try_upgrade","try_upgrade_for","try_upgrade_for","try_upgrade_until","try_upgrade_until","try_write","try_write_for","try_write_until","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","type_id","unlock","unlock","unlock_exclusive","unlock_exclusive_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_fair","unlock_shared","unlock_shared_fair","unlock_upgradable","unlock_upgradable_fair","unlocked","unlocked","unlocked","unlocked","unlocked","unlocked_fair","unlocked_fair","unlocked_fair","unlocked_fair","unlocked_fair","upgradable_read","upgrade","upgrade","write"],"q":["lock_api","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["Duration type used for try_lock_for.","Duration type used for try_lock_for.","Helper trait which returns a non-zero thread ID.","Marker type which determines whether a lock guard should …","Marker type which determines whether a lock guard should …","Marker type which indicates that the Guard type for a …","Marker type which indicates that the Guard type for a …","Initial value for an unlocked mutex.","Initial value for an unlocked mutex.","Initial value.","Initial value.","Initial value for an unlocked mutex.","Initial value for an unlocked RwLock.","Initial value for an unlocked RwLock.","Instant type used for try_lock_until.","Instant type used for try_lock_until.","An RAII mutex guard returned by MutexGuard::map, which …","An RAII mutex guard returned by ReentrantMutexGuard::map, …","An RAII read lock guard returned by RwLockReadGuard::map, …","An RAII write lock guard returned by RwLockWriteGuard::map…","A mutual exclusion primitive useful for protecting shared …","An RAII implementation of a “scoped lock” of a mutex. …","Basic operations for a mutex.","Additional methods for mutexes which support fair …","Additional methods for mutexes which support locking with …","A raw mutex type that wraps another raw mutex to provide …","Basic operations for a reader-writer lock.","Additional methods for RwLocks which support atomically …","Additional methods for RwLocks which support fair …","Additional methods for RwLocks which support recursive …","Additional methods for RwLocks which support recursive …","Additional methods for RwLocks which support locking with …","Additional methods for RwLocks which support atomically …","Additional methods for RwLocks which support upgradable …","Additional methods for RwLocks which support upgradable …","Additional methods for RwLocks which support upgradable …","A mutex which can be recursively locked by a single …","An RAII implementation of a “scoped lock” of a …","A reader-writer lock","RAII structure used to release the shared read access of …","RAII structure used to release the upgradable read access …","RAII structure used to release the exclusive write access …","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","Temporarily yields the mutex to a waiting thread if there …","Temporarily yields the mutex to a waiting thread if there …","Temporarily yields the mutex to a waiting thread if there …","Temporarily yields the mutex to a waiting thread if there …","Temporarily yields the mutex to a waiting thread if there …","Temporarily yields the RwLock to a waiting thread if …","Temporarily yields the RwLock to a waiting thread if …","Temporarily yields the RwLock to a waiting thread if …","Temporarily yields an exclusive lock to a waiting thread …","Temporarily yields an exclusive lock to a waiting thread …","Temporarily yields a shared lock to a waiting thread if …","Temporarily yields a shared lock to a waiting thread if …","Temporarily yields an upgradable lock to a waiting thread …","Temporarily yields an upgradable lock to a waiting thread …","Creates a new mutex based on a pre-existing raw mutex.","Creates a new reentrant mutex based on a pre-existing raw …","Creates a new new instance of an RwLock based on a …","Returns a raw pointer to the underlying data.","Returns a raw pointer to the underlying data.","Returns a raw pointer to the underlying data.","","","","","","","","","","","","","","","","","Atomically downgrades an exclusive lock into a shared …","Atomically downgrades a write lock into a read lock …","Atomically downgrades an upgradable read lock lock into a …","Downgrades an exclusive lock to an upgradable lock.","Atomically downgrades a write lock into an upgradable …","Downgrades an upgradable lock to a shared lock.","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","Forcibly unlocks the mutex.","Forcibly unlocks the mutex.","Forcibly unlocks the mutex using a fair unlock procotol.","Forcibly unlocks the mutex using a fair unlock protocol.","Forcibly unlocks a read lock.","Forcibly unlocks a read lock using a fair unlock procotol.","Forcibly unlocks a write lock.","Forcibly unlocks a write lock using a fair unlock …","","","","","","","","","","","","","","","","","","","","","","Returns a mutable reference to the underlying data.","Returns a mutable reference to the underlying data.","Returns a mutable reference to the underlying data.","","","","","","","","","","","","","","","","Consumes this mutex, returning the underlying data.","Consumes this mutex, returning the underlying data.","Consumes this RwLock, returning the underlying data.","Checks whether the mutex is currently locked.","Checks whether the mutex is currently locked.","Checks whether the mutex is currently locked.","Checks whether the mutex is currently locked.","Checks whether the mutex is currently locked.","Checks if this RwLock is currently locked in any way.","Checks if this RwLock is currently locked in any way.","Checks whether this RwLock is currently locked in any way.","Checks whether the mutex is currently held by the current …","Checks whether the mutex is currently held by the current …","Acquires this mutex, blocking the current thread until it …","Acquires a mutex, blocking the current thread until it is …","Acquires this mutex, blocking if it’s held by another …","Acquires a reentrant mutex, blocking the current thread …","Acquires an exclusive lock, blocking the current thread …","Acquires a shared lock, blocking the current thread until …","Acquires a shared lock without deadlocking in case of a …","Acquires an upgradable lock, blocking the current thread …","Makes a new MappedMutexGuard for a component of the …","Makes a new MappedMutexGuard for a component of the …","Makes a new MappedReentrantMutexGuard for a component of …","Makes a new MappedReentrantMutexGuard for a component of …","Make a new MappedRwLockReadGuard for a component of the …","Make a new MappedRwLockWriteGuard for a component of the …","Make a new MappedRwLockReadGuard for a component of the …","Make a new MappedRwLockWriteGuard for a component of the …","Returns a reference to the original Mutex object.","Creates a new mutex in an unlocked state ready for use.","Creates a new reentrant mutex in an unlocked state ready …","Creates a new instance of an RwLock which is unlocked.","Returns a non-zero thread ID which identifies the current …","Returns the underlying raw mutex object.","Returns the underlying raw mutex object.","Returns the underlying raw reader-writer lock object.","Locks this RwLock with shared read access, blocking the …","Locks this RwLock with shared read access, blocking the …","Returns a reference to the original ReentrantMutex object.","Returns a reference to the original reader-writer lock …","Returns a reference to the original reader-writer lock …","Returns a reference to the original reader-writer lock …","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","Attempts to acquire this mutex without blocking. Returns …","Attempts to acquire this lock.","Attempts to acquire this mutex without blocking. Returns …","Attempts to acquire this lock.","Attempts to acquire an exclusive lock without blocking.","Attempts to acquire an exclusive lock until a timeout is …","Attempts to acquire an exclusive lock until a timeout is …","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire a shared lock without blocking.","Attempts to acquire a shared lock until a timeout is …","Attempts to acquire a shared lock without deadlocking in …","Attempts to acquire a shared lock until a timeout is …","Attempts to acquire a shared lock until a timeout is …","Attempts to acquire a shared lock until a timeout is …","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire this lock until a timeout is reached.","Attempts to acquire an upgradable lock without blocking.","Attempts to acquire an upgradable lock until a timeout is …","Attempts to acquire an upgradable lock until a timeout is …","Attempts to make a new MappedMutexGuard for a component …","Attempts to make a new MappedMutexGuard for a component …","Attempts to make a new MappedReentrantMutexGuard for a …","Attempts to make a new MappedReentrantMutexGuard for a …","Attempts to make a new MappedRwLockReadGuard for a …","Attempts to make a new MappedRwLockWriteGuard for a …","Attempts to make a new MappedRwLockReadGuard for a …","Attempts to make a new MappedRwLockWriteGuard for a …","Attempts to acquire this RwLock with shared read access.","Attempts to acquire this RwLock with shared read access …","Attempts to acquire this RwLock with shared read access.","Attempts to acquire this RwLock with shared read access …","Attempts to acquire this RwLock with shared read access …","Attempts to acquire this RwLock with shared read access …","Attempts to acquire this RwLock with upgradable read …","Attempts to acquire this RwLock with upgradable read …","Attempts to acquire this RwLock with upgradable read …","Attempts to upgrade an upgradable lock to an exclusive …","Tries to atomically upgrade an upgradable read lock into …","Attempts to upgrade an upgradable lock to an exclusive …","Tries to atomically upgrade an upgradable read lock into …","Attempts to upgrade an upgradable lock to an exclusive …","Tries to atomically upgrade an upgradable read lock into …","Attempts to lock this RwLock with exclusive write access.","Attempts to acquire this RwLock with exclusive write …","Attempts to acquire this RwLock with exclusive write …","","","","","","","","","","","","","","","","Unlocks this mutex.","Unlocks this mutex. The inner mutex may not be unlocked if…","Releases an exclusive lock.","Releases an exclusive lock using a fair unlock protocol.","Unlocks this mutex using a fair unlock protocol.","Unlocks the mutex using a fair unlock protocol.","Unlocks the mutex using a fair unlock protocol.","Unlocks this mutex using a fair unlock protocol. The …","Unlocks the mutex using a fair unlock protocol.","Unlocks the mutex using a fair unlock protocol.","Unlocks the RwLock using a fair unlock protocol.","Unlocks the RwLock using a fair unlock protocol.","Unlocks the RwLock using a fair unlock protocol.","Unlocks the RwLock using a fair unlock protocol.","Unlocks the RwLock using a fair unlock protocol.","Releases a shared lock.","Releases a shared lock using a fair unlock protocol.","Releases an upgradable lock.","Releases an upgradable lock using a fair unlock protocol.","Temporarily unlocks the mutex to execute the given …","Temporarily unlocks the mutex to execute the given …","Temporarily unlocks the RwLock to execute the given …","Temporarily unlocks the RwLock to execute the given …","Temporarily unlocks the RwLock to execute the given …","Temporarily unlocks the mutex to execute the given …","Temporarily unlocks the mutex to execute the given …","Temporarily unlocks the RwLock to execute the given …","Temporarily unlocks the RwLock to execute the given …","Temporarily unlocks the RwLock to execute the given …","Locks this RwLock with upgradable read access, blocking …","Upgrades an upgradable lock to an exclusive lock.","Atomically upgrades an upgradable read lock lock into a …","Locks this RwLock with exclusive write access, blocking …"],"i":[1,2,0,3,4,0,0,3,3,5,5,6,4,4,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,8,9,10,11,6,12,13,14,15,16,17,18,19,20,7,8,9,10,11,6,12,13,14,15,16,17,18,19,20,21,21,10,6,13,16,17,18,22,22,22,22,23,23,9,12,15,9,12,15,9,12,15,10,11,13,14,16,17,18,19,20,10,11,17,20,24,17,18,25,17,25,10,11,13,14,16,17,18,19,20,9,10,10,11,11,12,13,13,14,14,15,16,16,17,17,18,18,19,19,20,20,9,12,9,12,15,15,15,15,7,8,9,9,9,10,11,6,12,12,12,13,14,15,15,15,16,17,18,19,20,9,12,15,7,8,9,10,11,6,12,13,14,15,16,17,18,19,20,9,12,15,3,3,9,6,12,4,4,15,6,12,3,9,6,12,4,4,26,27,10,11,13,14,16,17,19,20,10,9,12,15,5,9,12,15,15,15,13,16,17,18,7,8,9,10,11,6,12,13,14,15,16,17,18,19,20,7,8,9,10,11,6,12,13,14,15,16,17,18,19,20,3,9,6,12,4,2,2,1,9,6,12,4,2,26,28,28,2,1,9,6,12,27,29,29,10,11,13,14,16,17,19,20,15,15,15,15,15,15,15,15,15,27,18,29,18,29,18,15,15,15,7,8,9,10,11,6,12,13,14,15,16,17,18,19,20,3,6,4,22,21,10,11,6,13,14,16,17,18,19,20,4,22,27,23,10,13,16,17,18,10,13,16,17,18,15,27,18,15],"f":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["mutex",3]],[[],["reentrantmutex",3]],[[],["rwlock",3]],[[]],[[]],[[]],[[],["mutex",3]],[[],["reentrantmutex",3]],[[],["rwlock",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["rwlockreadguard",3]],[[],["rwlockreadguard",3]],[[]],[[],["rwlockupgradablereadguard",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["mutex",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["reentrantmutex",3]],[[]],[[]],[[],["rwlock",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[]],[[],["mutexguard",3]],[[]],[[],["reentrantmutexguard",3]],[[]],[[]],[[]],[[]],[[],[["sized",8],["mappedmutexguard",3]]],[[],[["sized",8],["mappedmutexguard",3]]],[[],[["sized",8],["mappedreentrantmutexguard",3]]],[[],[["sized",8],["mappedreentrantmutexguard",3]]],[[],[["sized",8],["mappedrwlockreadguard",3]]],[[],[["sized",8],["mappedrwlockwriteguard",3]]],[[],[["sized",8],["mappedrwlockreadguard",3]]],[[],[["sized",8],["mappedrwlockwriteguard",3]]],[[],["mutex",3]],[[],["mutex",3]],[[],["reentrantmutex",3]],[[],["rwlock",3]],[[],["nonzerousize",3]],[[]],[[]],[[]],[[],["rwlockreadguard",3]],[[],["rwlockreadguard",3]],[[],["reentrantmutex",3]],[[],["rwlock",3]],[[],["rwlock",3]],[[],["rwlock",3]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["bool",15]],[[],[["mutexguard",3],["option",4]]],[[],["bool",15]],[[],[["reentrantmutexguard",3],["option",4]]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],[["mutexguard",3],["option",4]]],[[],["bool",15]],[[],[["reentrantmutexguard",3],["option",4]]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],[["mutexguard",3],["option",4]]],[[],["bool",15]],[[],[["reentrantmutexguard",3],["option",4]]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],[["result",4],["mappedmutexguard",3]]],[[],[["result",4],["mappedmutexguard",3]]],[[],[["result",4],["mappedreentrantmutexguard",3]]],[[],[["result",4],["mappedreentrantmutexguard",3]]],[[],[["result",4],["mappedrwlockreadguard",3]]],[[],[["result",4],["mappedrwlockwriteguard",3]]],[[],[["result",4],["mappedrwlockreadguard",3]]],[[],[["result",4],["mappedrwlockwriteguard",3]]],[[],[["option",4],["rwlockreadguard",3]]],[[],[["option",4],["rwlockreadguard",3]]],[[],[["option",4],["rwlockreadguard",3]]],[[],[["option",4],["rwlockreadguard",3]]],[[],[["option",4],["rwlockreadguard",3]]],[[],[["option",4],["rwlockreadguard",3]]],[[],[["option",4],["rwlockupgradablereadguard",3]]],[[],[["option",4],["rwlockupgradablereadguard",3]]],[[],[["option",4],["rwlockupgradablereadguard",3]]],[[],["bool",15]],[[],[["rwlockwriteguard",3],["result",4]]],[[],["bool",15]],[[],[["rwlockwriteguard",3],["result",4]]],[[],["bool",15]],[[],[["rwlockwriteguard",3],["result",4]]],[[],[["option",4],["rwlockwriteguard",3]]],[[],[["option",4],["rwlockwriteguard",3]]],[[],[["option",4],["rwlockwriteguard",3]]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["rwlockupgradablereadguard",3]],[[]],[[],["rwlockwriteguard",3]],[[],["rwlockwriteguard",3]]],"p":[[8,"RawMutexTimed"],[8,"RawRwLockTimed"],[8,"RawMutex"],[8,"RawRwLock"],[8,"GetThreadId"],[3,"RawReentrantMutex"],[3,"GuardSend"],[3,"GuardNoSend"],[3,"Mutex"],[3,"MutexGuard"],[3,"MappedMutexGuard"],[3,"ReentrantMutex"],[3,"ReentrantMutexGuard"],[3,"MappedReentrantMutexGuard"],[3,"RwLock"],[3,"RwLockReadGuard"],[3,"RwLockWriteGuard"],[3,"RwLockUpgradableReadGuard"],[3,"MappedRwLockReadGuard"],[3,"MappedRwLockWriteGuard"],[8,"RawMutexFair"],[8,"RawRwLockFair"],[8,"RawRwLockUpgradeFair"],[8,"RawRwLockDowngrade"],[8,"RawRwLockUpgradeDowngrade"],[8,"RawRwLockRecursive"],[8,"RawRwLockUpgrade"],[8,"RawRwLockRecursiveTimed"],[8,"RawRwLockUpgradeTimed"]]},\ +"scopeguard":{"doc":"A scope guard will run a given closure when it goes out …","t":[4,3,8,11,11,11,11,14,11,11,11,11,11,11,11,5,11,11,11,10,11,11,11,11,11,11,11,11],"n":["Always","ScopeGuard","Strategy","borrow","borrow","borrow_mut","borrow_mut","defer","deref","deref_mut","drop","fmt","fmt","from","from","guard","into","into","into_inner","should_run","should_run","try_from","try_from","try_into","try_into","type_id","type_id","with_strategy"],"q":["scopeguard","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["Always run on scope exit.","ScopeGuard is a scope guard that may own a protected …","Controls in which cases the associated code should be run","","","","","Macro to create a ScopeGuard (always run).","","","","","","","","Create a new ScopeGuard owning v and with deferred …","","","“Defuse” the guard and extract the value without …","Return true if the guard’s associated code should run …","","","","","","","","Create a ScopeGuard that owns v (accessible through …"],"i":[0,0,0,1,2,1,2,0,1,1,1,1,2,1,2,0,1,2,1,3,2,1,2,1,2,1,2,1],"f":[null,null,null,[[]],[[]],[[]],[[]],null,[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[],[["always",4],["scopeguard",3]]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["scopeguard",3]]],"p":[[3,"ScopeGuard"],[4,"Always"],[8,"Strategy"]]},\ +"spin":{"doc":"This crate provides spin-based versions of the primitives …","t":[6,6,6,6,6,6,6,0,0,0,0,0,0,0,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,11,11,11,11,11,11,11,11,11,11,11,11,11,6,6,6,6,6,6,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,0,11,11,11,11,11,11,11,11,11,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,18,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,3,8,3,11,11,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,3,3,3,3,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11],"n":["Barrier","Lazy","Mutex","Once","RwLock","RwLockUpgradableGuard","RwLockWriteGuard","barrier","lazy","lock_api","mutex","once","relax","rwlock","Barrier","BarrierWaitResult","borrow","borrow","borrow_mut","borrow_mut","from","from","into","into","is_leader","new","try_from","try_from","try_into","try_into","type_id","type_id","wait","Lazy","as_mut_ptr","borrow","borrow_mut","default","deref","fmt","force","from","into","new","try_from","try_into","type_id","Mutex","MutexGuard","RwLock","RwLockReadGuard","RwLockUpgradableReadGuard","RwLockWriteGuard","Mutex","MutexGuard","borrow","borrow","borrow_mut","borrow_mut","default","deref","deref_mut","fmt","fmt","fmt","force_unlock","from","from","from","from","get_mut","into","into","into_inner","is_locked","is_locked","leak","lock","lock","new","spin","try_from","try_from","try_into","try_into","try_lock","try_lock","type_id","type_id","unlock","SpinMutex","SpinMutexGuard","as_mut_ptr","borrow","borrow","borrow_mut","borrow_mut","default","deref","deref_mut","drop","fmt","fmt","fmt","force_unlock","from","from","from","from","get_mut","into","into","into_inner","is_locked","is_locked","leak","lock","lock","new","try_from","try_from","try_into","try_into","try_lock","try_lock","type_id","type_id","unlock","INIT","Once","as_mut_ptr","borrow","borrow_mut","call_once","drop","fmt","from","from","from","get","get_mut","get_unchecked","initialized","into","is_completed","new","poll","try_from","try_into","try_into_inner","type_id","wait","Loop","RelaxStrategy","Spin","borrow","borrow","borrow_mut","borrow_mut","from","from","into","into","relax","relax","relax","try_from","try_from","try_into","try_into","type_id","type_id","RwLock","RwLockReadGuard","RwLockUpgradableGuard","RwLockWriteGuard","as_mut_ptr","borrow","borrow","borrow","borrow","borrow_mut","borrow_mut","borrow_mut","borrow_mut","default","deref","deref","deref","deref_mut","downgrade","downgrade","downgrade","downgrade_to_upgradeable","drop","drop","drop","fmt","fmt","fmt","fmt","fmt","fmt","fmt","force_read_decrement","force_write_unlock","from","from","from","from","from","from","get_mut","into","into","into","into","into_inner","is_locked","leak","leak","leak","lock_exclusive","lock_shared","lock_upgradable","new","read","reader_count","try_from","try_from","try_from","try_from","try_into","try_into","try_into","try_into","try_lock_exclusive","try_lock_shared","try_lock_upgradable","try_read","try_upgrade","try_upgrade","try_upgradeable_read","try_write","type_id","type_id","type_id","type_id","unlock_exclusive","unlock_shared","unlock_upgradable","upgrade","upgrade","upgradeable_read","write","writer_count"],"q":["spin","","","","","","","","","","","","","","spin::barrier","","","","","","","","","","","","","","","","","","","spin::lazy","","","","","","","","","","","","","","spin::lock_api","","","","","","spin::mutex","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","spin::mutex::spin","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","spin::once","","","","","","","","","","","","","","","","","","","","","","","","spin::relax","","","","","","","","","","","","","","","","","","","","spin::rwlock","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","",""],"d":["A primitive that synchronizes the execution of multiple …","A value which is initialized on the first access. See […","A primitive that synchronizes the execution of multiple …","A primitive that provides lazy one-time initialization. …","A lock that provides data access to either one writer or …","A guard that provides immutable data access but can be …","A guard that provides mutable data access. See […","Synchronization primitive allowing multiple threads to …","Synchronization primitives for lazy evaluation.","Spin synchronisation primitives, but compatible with …","Locks that have the same behaviour as a mutex.","Synchronization primitives for one-time evaluation.","Strategies that determine the behaviour of locks when …","A lock that provides data access to either one writer or …","A primitive that synchronizes the execution of multiple …","A BarrierWaitResult is returned by wait when all threads …","","","","","","","","","Returns whether this thread from wait is the “leader …","Creates a new barrier that can block a given number of …","","","","","","","Blocks the current thread until all threads have …","A value which is initialized on the first access.","Retrieves a mutable pointer to the inner data.","","","Creates a new lazy value using Default as the …","","","Forces the evaluation of this lazy value and returns a …","","","Creates a new lazy value with the given initializing …","","","","A lock that provides mutually exclusive data access …","A guard that provides mutable data access (compatible …","A lock that provides data access to either one writer or …","A guard that provides immutable data access (compatible …","A guard that provides immutable data access but can be …","A guard that provides mutable data access (compatible …","A spin-based lock providing mutually exclusive access to …","A generic guard that will protect some data access and …","","","","","","","","","","","Force unlock this [Mutex].","","","","","Returns a mutable reference to the underlying data.","","","Consumes this [Mutex] and unwraps the underlying data.","Returns true if the lock is currently held.","","Leak the lock guard, yielding a mutable reference to the …","","Locks the [Mutex] and returns a guard that permits access …","Creates a new [Mutex] wrapping the supplied data.","A naïve spinning mutex.","","","","","Try to lock this [Mutex], returning a lock guard if …","","","","","A spin lock providing mutually exclusive access to data.","A guard that provides mutable data access.","Returns a mutable pointer to the underlying data.","","","","","","","","The dropping of the MutexGuard will release the lock it …","","","","Force unlock this [SpinMutex].","","","","","Returns a mutable reference to the underlying data.","","","Consumes this [SpinMutex] and unwraps the underlying data.","","Returns true if the lock is currently held.","Leak the lock guard, yielding a mutable reference to the …","","Locks the [SpinMutex] and returns a guard that permits …","Creates a new [SpinMutex] wrapping the supplied data.","","","","","","Try to lock this [SpinMutex], returning a lock guard if …","","","","Initialization constant of [Once].","A primitive that provides lazy one-time initialization.","Retrieve a pointer to the inner data.","","","Performs an initialization routine once and only once. …","","","","","","Returns a reference to the inner value if the [Once] has …","Returns a mutable reference to the inner value if the […","Returns a reference to the inner value on the unchecked …","Creates a new initialized [Once].","","Checks whether the value has been initialized.","Creates a new [Once].","Like [Once::get], but will spin if the [Once] is in the …","","","Returns a the inner value if the [Once] has been …","","Spins until the [Once] contains a value.","A strategy that rapidly spins, without telling the CPU to …","A trait implemented by spinning relax strategies.","A strategy that rapidly spins while informing the CPU …","","","","","","","","","Perform the relaxing operation during a period of …","","","","","","","","","A lock that provides data access to either one writer or …","A guard that provides immutable data access.","A guard that provides immutable data access but can be …","A guard that provides mutable data access.","Returns a mutable pointer to the underying data.","","","","","","","","","","","","","","","Downgrades the writable lock guard to a readable, shared …","Downgrades the upgradeable lock guard to a readable, …","Downgrades the writable lock guard to an upgradable, …","","","","","","","","","","","Force decrement the reader count.","Force unlock exclusive write access.","","","","","","","Returns a mutable reference to the underlying data.","","","","","Consumes this RwLock, returning the underlying data.","","Leak the lock guard, yielding a reference to the …","Leak the lock guard, yielding a mutable reference to the …","Leak the lock guard, yielding a reference to the …","","","","Creates a new spinlock wrapping the supplied data.","Locks this rwlock with shared read access, blocking the …","Return the number of readers that currently hold the lock …","","","","","","","","","","","","Attempt to acquire this lock with shared read access.","","Tries to upgrade an upgradeable lock guard to a writable …","Tries to obtain an upgradeable lock guard.","Attempt to lock this rwlock with exclusive write access.","","","","","","","","","Upgrades an upgradeable lock guard to a writable lock …","Obtain a readable lock guard that can later be upgraded …","Lock this rwlock with exclusive write access, blocking …","Return the number of writers that currently hold the lock."],"i":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,1,2,1,2,1,2,2,1,1,2,1,2,1,2,1,0,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,0,0,0,0,0,0,4,5,4,5,4,5,5,4,5,5,4,4,4,4,5,4,4,5,4,4,4,5,4,4,4,0,4,5,4,5,4,4,4,5,4,0,0,6,6,7,6,7,6,7,7,7,6,7,7,6,6,6,6,7,6,6,7,6,6,6,7,6,6,6,6,7,6,7,6,6,6,7,6,8,0,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,0,0,0,9,10,9,10,9,10,9,10,11,9,10,9,10,9,10,9,10,0,0,0,0,12,12,13,14,15,12,13,14,15,12,13,14,15,14,12,14,15,14,13,14,15,12,13,13,14,14,15,15,12,12,12,12,12,13,14,15,12,12,13,14,15,12,12,13,14,15,12,12,12,12,12,12,12,13,14,15,12,13,14,15,12,12,12,12,12,15,12,12,12,13,14,15,12,12,12,12,15,12,12,12],"f":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[["usize",15]]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],[[],["barrierwaitresult",3]],null,[[]],[[]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],null,null,null,null,null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[]],[[]],[[],["mutexguard",3]],[[]],null,[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],[["option",4],["mutexguard",3]]],[[],["bool",15]],[[],["typeid",3]],[[],["typeid",3]],[[]],null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[],["bool",15]],[[]],[[]],[[],["spinmutexguard",3]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["bool",15]],[[],[["option",4],["spinmutexguard",3]]],[[],["typeid",3]],[[],["typeid",3]],[[]],null,null,[[]],[[]],[[]],[[["fnonce",8]]],[[]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[],["option",4]],[[],["option",4]],[[]],[[]],[[]],[[],["bool",15]],[[]],[[],["option",4]],[[],["result",4]],[[],["result",4]],[[],["option",4]],[[],["typeid",3]],[[]],null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["typeid",3]],[[],["typeid",3]],null,null,null,null,[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["rwlockreadguard",3]],[[],["rwlockreadguard",3]],[[],["rwlockupgradableguard",3]],[[]],[[]],[[]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[["formatter",3]],["result",6]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["bool",15]],[[]],[[]],[[]],[[]],[[]],[[]],[[]],[[],["rwlockreadguard",3]],[[],["usize",15]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["result",4]],[[],["bool",15]],[[],["bool",15]],[[],["bool",15]],[[],[["option",4],["rwlockreadguard",3]]],[[],["bool",15]],[[],[["result",4],["rwlockwriteguard",3]]],[[],[["rwlockupgradableguard",3],["option",4]]],[[],[["option",4],["rwlockwriteguard",3]]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[],["typeid",3]],[[]],[[]],[[]],[[]],[[],["rwlockwriteguard",3]],[[],["rwlockupgradableguard",3]],[[],["rwlockwriteguard",3]],[[],["usize",15]]],"p":[[3,"Barrier"],[3,"BarrierWaitResult"],[3,"Lazy"],[3,"Mutex"],[3,"MutexGuard"],[3,"SpinMutex"],[3,"SpinMutexGuard"],[3,"Once"],[3,"Spin"],[3,"Loop"],[8,"RelaxStrategy"],[3,"RwLock"],[3,"RwLockReadGuard"],[3,"RwLockWriteGuard"],[3,"RwLockUpgradableGuard"]]},\ "stable_deref_trait":{"doc":"This module defines an unsafe marker trait, StableDeref, …","t":[8,8],"n":["CloneStableDeref","StableDeref"],"q":["stable_deref_trait",""],"d":["An unsafe marker trait for types where clones deref to …","An unsafe marker trait for types that deref to a stable …"],"i":[0,0],"f":[null,null],"p":[]}\ }'); if (window.initSearch) {window.initSearch(searchIndex)}; \ No newline at end of file diff --git a/source-files.js b/source-files.js index 5d19be2a..52b26c89 100644 --- a/source-files.js +++ b/source-files.js @@ -2,5 +2,8 @@ var N = null;var sourcesIndex = {}; sourcesIndex["byteorder"] = {"name":"","files":["lib.rs"]}; sourcesIndex["hash32"] = {"name":"","files":["fnv.rs","lib.rs","murmur3.rs"]}; sourcesIndex["heapless"] = {"name":"","dirs":[{"name":"pool","files":["cas.rs","mod.rs","singleton.rs"]}],"files":["binary_heap.rs","deque.rs","histbuf.rs","indexmap.rs","indexset.rs","lib.rs","linear_map.rs","mpmc.rs","sealed.rs","sorted_linked_list.rs","spsc.rs","string.rs","vec.rs"]}; +sourcesIndex["lock_api"] = {"name":"","files":["lib.rs","mutex.rs","remutex.rs","rwlock.rs"]}; +sourcesIndex["scopeguard"] = {"name":"","files":["lib.rs"]}; +sourcesIndex["spin"] = {"name":"","dirs":[{"name":"mutex","files":["spin.rs"]}],"files":["barrier.rs","lazy.rs","lib.rs","mutex.rs","once.rs","relax.rs","rwlock.rs"]}; sourcesIndex["stable_deref_trait"] = {"name":"","files":["lib.rs"]}; createSourceSidebar(); diff --git a/spin/all.html b/spin/all.html new file mode 100644 index 00000000..c9b3d6ca --- /dev/null +++ b/spin/all.html @@ -0,0 +1,5 @@ +List of all items in this crate + +

List of all items[] + +

Structs

Traits

Typedefs

\ No newline at end of file diff --git a/spin/barrier/index.html b/spin/barrier/index.html new file mode 100644 index 00000000..e2d6260a --- /dev/null +++ b/spin/barrier/index.html @@ -0,0 +1,19 @@ +spin::barrier - Rust + +

Module spin::barrier[][src]

Expand description

Synchronization primitive allowing multiple threads to synchronize the +beginning of some computation.

+

Implementation adapted from the ‘Barrier’ type of the standard library. See: +https://doc.rust-lang.org/std/sync/struct.Barrier.html

+

Copyright 2014 The Rust Project Developers. See the COPYRIGHT +file at the top-level directory of this distribution and at +http://rust-lang.org/COPYRIGHT.

+

Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +<LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +option. This file may not be copied, modified, or distributed +except according to those terms.

+

Structs

+
Barrier

A primitive that synchronizes the execution of multiple threads.

+
BarrierWaitResult

A BarrierWaitResult is returned by wait when all threads in the Barrier +have rendezvoused.

+
\ No newline at end of file diff --git a/spin/barrier/sidebar-items.js b/spin/barrier/sidebar-items.js new file mode 100644 index 00000000..08dc32c8 --- /dev/null +++ b/spin/barrier/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["Barrier","A primitive that synchronizes the execution of multiple threads."],["BarrierWaitResult","A `BarrierWaitResult` is returned by `wait` when all threads in the `Barrier` have rendezvoused."]]}); \ No newline at end of file diff --git a/spin/barrier/struct.Barrier.html b/spin/barrier/struct.Barrier.html new file mode 100644 index 00000000..146d8626 --- /dev/null +++ b/spin/barrier/struct.Barrier.html @@ -0,0 +1,73 @@ +Barrier in spin::barrier - Rust + +

Struct spin::barrier::Barrier[][src]

pub struct Barrier<R = Spin> { /* fields omitted */ }
Expand description

A primitive that synchronizes the execution of multiple threads.

+

Example

+
+use spin;
+use std::sync::Arc;
+use std::thread;
+
+let mut handles = Vec::with_capacity(10);
+let barrier = Arc::new(spin::Barrier::new(10));
+for _ in 0..10 {
+    let c = barrier.clone();
+    // The same messages will be printed together.
+    // You will NOT see any interleaving.
+    handles.push(thread::spawn(move|| {
+        println!("before wait");
+        c.wait();
+        println!("after wait");
+    }));
+}
+// Wait for other threads to finish.
+for handle in handles {
+    handle.join().unwrap();
+}
+

Implementations

Blocks the current thread until all threads have rendezvoused here.

+

Barriers are re-usable after all threads have rendezvoused once, and can +be used continuously.

+

A single (arbitrary) thread will receive a BarrierWaitResult that +returns true from is_leader when returning from this function, and +all other threads will receive a result that will return false from +is_leader.

+

Examples

+
+use spin;
+use std::sync::Arc;
+use std::thread;
+
+let mut handles = Vec::with_capacity(10);
+let barrier = Arc::new(spin::Barrier::new(10));
+for _ in 0..10 {
+    let c = barrier.clone();
+    // The same messages will be printed together.
+    // You will NOT see any interleaving.
+    handles.push(thread::spawn(move|| {
+        println!("before wait");
+        c.wait();
+        println!("after wait");
+    }));
+}
+// Wait for other threads to finish.
+for handle in handles {
+    handle.join().unwrap();
+}
+

Creates a new barrier that can block a given number of threads.

+

A barrier will block n-1 threads which call wait and then wake up +all threads at once when the nth thread calls wait. A Barrier created +with n = 0 will behave identically to one created with n = 1.

+

Examples

+
+use spin;
+
+let barrier = spin::Barrier::new(10);
+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/barrier/struct.BarrierWaitResult.html b/spin/barrier/struct.BarrierWaitResult.html new file mode 100644 index 00000000..307e5290 --- /dev/null +++ b/spin/barrier/struct.BarrierWaitResult.html @@ -0,0 +1,30 @@ +BarrierWaitResult in spin::barrier - Rust + +

Struct spin::barrier::BarrierWaitResult[][src]

pub struct BarrierWaitResult(_);
Expand description

A BarrierWaitResult is returned by wait when all threads in the Barrier +have rendezvoused.

+

Examples

+
+use spin;
+
+let barrier = spin::Barrier::new(1);
+let barrier_wait_result = barrier.wait();
+

Implementations

Returns whether this thread from wait is the “leader thread”.

+

Only one thread will have true returned from their result, all other +threads will have false returned.

+

Examples

+
+use spin;
+
+let barrier = spin::Barrier::new(1);
+let barrier_wait_result = barrier.wait();
+println!("{:?}", barrier_wait_result.is_leader());
+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/index.html b/spin/index.html new file mode 100644 index 00000000..324b3923 --- /dev/null +++ b/spin/index.html @@ -0,0 +1,92 @@ +spin - Rust + +

Crate spin[][src]

Expand description

This crate provides spin-based versions of the +primitives in std::sync and std::lazy. Because synchronization is done through spinning, +the primitives are suitable for use in no_std environments.

+

Features

+
    +
  • +

    Mutex, RwLock, Once/SyncOnceCell, and SyncLazy equivalents

    +
  • +
  • +

    Support for no_std environments

    +
  • +
  • +

    lock_api compatibility

    +
  • +
  • +

    Upgradeable RwLock guards

    +
  • +
  • +

    Guards can be sent and shared between threads

    +
  • +
  • +

    Guard leaking

    +
  • +
  • +

    Ticket locks

    +
  • +
  • +

    Different strategies for dealing with contention

    +
  • +
+

Relationship with std::sync

+

While spin is not a drop-in replacement for std::sync (and +should not be considered as such) +an effort is made to keep this crate reasonably consistent with std::sync.

+

Many of the types defined in this crate have ‘additional capabilities’ when compared to std::sync:

+ +

Conversely, the types in this crate do not have some of the features std::sync has:

+ +

Feature flags

+

The crate comes with a few feature flags that you may wish to use.

+
    +
  • +

    lock_api enables support for lock_api

    +
  • +
  • +

    ticket_mutex uses a ticket lock for the implementation of Mutex

    +
  • +
  • +

    std enables support for thread yielding instead of spinning

    +
  • +
+

Re-exports

+
pub use mutex::MutexGuard;
pub use rwlock::RwLockReadGuard;
pub use relax::Spin;
pub use relax::RelaxStrategy;

Modules

+
barrier

Synchronization primitive allowing multiple threads to synchronize the +beginning of some computation.

+
lazy

Synchronization primitives for lazy evaluation.

+
lock_api

Spin synchronisation primitives, but compatible with lock_api.

+
mutex

Locks that have the same behaviour as a mutex.

+
once

Synchronization primitives for one-time evaluation.

+
relax

Strategies that determine the behaviour of locks when encountering contention.

+
rwlock

A lock that provides data access to either one writer or many readers.

+

Type Definitions

+
Barrier

A primitive that synchronizes the execution of multiple threads. See barrier::Barrier for documentation.

+
Lazy

A value which is initialized on the first access. See lazy::Lazy for documentation.

+
Mutex

A primitive that synchronizes the execution of multiple threads. See mutex::Mutex for documentation.

+
Once

A primitive that provides lazy one-time initialization. See once::Once for documentation.

+
RwLock

A lock that provides data access to either one writer or many readers. See rwlock::RwLock for documentation.

+
RwLockUpgradableGuard

A guard that provides immutable data access but can be upgraded to RwLockWriteGuard. See +rwlock::RwLockUpgradableGuard for documentation.

+
RwLockWriteGuard

A guard that provides mutable data access. See rwlock::RwLockWriteGuard for documentation.

+
\ No newline at end of file diff --git a/spin/lazy/index.html b/spin/lazy/index.html new file mode 100644 index 00000000..2f605e32 --- /dev/null +++ b/spin/lazy/index.html @@ -0,0 +1,8 @@ +spin::lazy - Rust + +

Module spin::lazy[][src]

Expand description

Synchronization primitives for lazy evaluation.

+

Implementation adapted from the SyncLazy type of the standard library. See: +https://doc.rust-lang.org/std/lazy/struct.SyncLazy.html

+

Structs

+
Lazy

A value which is initialized on the first access.

+
\ No newline at end of file diff --git a/spin/lazy/sidebar-items.js b/spin/lazy/sidebar-items.js new file mode 100644 index 00000000..4388dc28 --- /dev/null +++ b/spin/lazy/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["Lazy","A value which is initialized on the first access."]]}); \ No newline at end of file diff --git a/spin/lazy/struct.Lazy.html b/spin/lazy/struct.Lazy.html new file mode 100644 index 00000000..aefa90ad --- /dev/null +++ b/spin/lazy/struct.Lazy.html @@ -0,0 +1,61 @@ +Lazy in spin::lazy - Rust + +

Struct spin::lazy::Lazy[][src]

pub struct Lazy<T, F = fn() -> T, R = Spin> { /* fields omitted */ }
Expand description

A value which is initialized on the first access.

+

This type is a thread-safe Lazy, and can be used in statics.

+

Examples

+
+use std::collections::HashMap;
+use spin::Lazy;
+
+static HASHMAP: Lazy<HashMap<i32, String>> = Lazy::new(|| {
+    println!("initializing");
+    let mut m = HashMap::new();
+    m.insert(13, "Spica".to_string());
+    m.insert(74, "Hoyten".to_string());
+    m
+});
+
+fn main() {
+    println!("ready");
+    std::thread::spawn(|| {
+        println!("{:?}", HASHMAP.get(&13));
+    }).join().unwrap();
+    println!("{:?}", HASHMAP.get(&74));
+
+    // Prints:
+    //   ready
+    //   initializing
+    //   Some("Spica")
+    //   Some("Hoyten")
+}
+

Implementations

Creates a new lazy value with the given initializing +function.

+

Retrieves a mutable pointer to the inner data.

+

This is especially useful when interfacing with low level code or FFI where the caller +explicitly knows that it has exclusive access to the inner data. Note that reading from +this pointer is UB until initialized or directly written to.

+

Forces the evaluation of this lazy value and +returns a reference to result. This is equivalent +to the Deref impl, but is explicit.

+

Examples

+
+use spin::Lazy;
+
+let lazy = Lazy::new(|| 92);
+
+assert_eq!(Lazy::force(&lazy), &92);
+assert_eq!(&*lazy, &92);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

Creates a new lazy value using Default as the initializing function.

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/lock_api/index.html b/spin/lock_api/index.html new file mode 100644 index 00000000..919a11f1 --- /dev/null +++ b/spin/lock_api/index.html @@ -0,0 +1,11 @@ +spin::lock_api - Rust + +

Module spin::lock_api[][src]

Expand description

Spin synchronisation primitives, but compatible with lock_api.

+

Type Definitions

+
Mutex

A lock that provides mutually exclusive data access (compatible with lock_api).

+
MutexGuard

A guard that provides mutable data access (compatible with lock_api).

+
RwLock

A lock that provides data access to either one writer or many readers (compatible with lock_api).

+
RwLockReadGuard

A guard that provides immutable data access (compatible with lock_api).

+
RwLockUpgradableReadGuard

A guard that provides immutable data access but can be upgraded to RwLockWriteGuard (compatible with lock_api).

+
RwLockWriteGuard

A guard that provides mutable data access (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/lock_api/sidebar-items.js b/spin/lock_api/sidebar-items.js new file mode 100644 index 00000000..952392e8 --- /dev/null +++ b/spin/lock_api/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"type":[["Mutex","A lock that provides mutually exclusive data access (compatible with `lock_api`)."],["MutexGuard","A guard that provides mutable data access (compatible with `lock_api`)."],["RwLock","A lock that provides data access to either one writer or many readers (compatible with `lock_api`)."],["RwLockReadGuard","A guard that provides immutable data access (compatible with `lock_api`)."],["RwLockUpgradableReadGuard","A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with `lock_api`)."],["RwLockWriteGuard","A guard that provides mutable data access (compatible with `lock_api`)."]]}); \ No newline at end of file diff --git a/spin/lock_api/type.Mutex.html b/spin/lock_api/type.Mutex.html new file mode 100644 index 00000000..b297f798 --- /dev/null +++ b/spin/lock_api/type.Mutex.html @@ -0,0 +1,4 @@ +Mutex in spin::lock_api - Rust + +

Type Definition spin::lock_api::Mutex[][src]

type Mutex<T> = Mutex<Mutex<()>, T>;
Expand description

A lock that provides mutually exclusive data access (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/lock_api/type.MutexGuard.html b/spin/lock_api/type.MutexGuard.html new file mode 100644 index 00000000..91464ae9 --- /dev/null +++ b/spin/lock_api/type.MutexGuard.html @@ -0,0 +1,4 @@ +MutexGuard in spin::lock_api - Rust + +

Type Definition spin::lock_api::MutexGuard[][src]

type MutexGuard<'a, T> = MutexGuard<'a, Mutex<()>, T>;
Expand description

A guard that provides mutable data access (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/lock_api/type.RwLock.html b/spin/lock_api/type.RwLock.html new file mode 100644 index 00000000..ae883d59 --- /dev/null +++ b/spin/lock_api/type.RwLock.html @@ -0,0 +1,4 @@ +RwLock in spin::lock_api - Rust + +

Type Definition spin::lock_api::RwLock[][src]

type RwLock<T> = RwLock<RwLock<()>, T>;
Expand description

A lock that provides data access to either one writer or many readers (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/lock_api/type.RwLockReadGuard.html b/spin/lock_api/type.RwLockReadGuard.html new file mode 100644 index 00000000..6d0e8c66 --- /dev/null +++ b/spin/lock_api/type.RwLockReadGuard.html @@ -0,0 +1,4 @@ +RwLockReadGuard in spin::lock_api - Rust + +

Type Definition spin::lock_api::RwLockReadGuard[][src]

type RwLockReadGuard<'a, T> = RwLockReadGuard<'a, RwLock<()>, T>;
Expand description

A guard that provides immutable data access (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/lock_api/type.RwLockUpgradableReadGuard.html b/spin/lock_api/type.RwLockUpgradableReadGuard.html new file mode 100644 index 00000000..ac548058 --- /dev/null +++ b/spin/lock_api/type.RwLockUpgradableReadGuard.html @@ -0,0 +1,4 @@ +RwLockUpgradableReadGuard in spin::lock_api - Rust + +

Type Definition spin::lock_api::RwLockUpgradableReadGuard[][src]

type RwLockUpgradableReadGuard<'a, T> = RwLockUpgradableReadGuard<'a, RwLock<()>, T>;
Expand description

A guard that provides immutable data access but can be upgraded to RwLockWriteGuard (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/lock_api/type.RwLockWriteGuard.html b/spin/lock_api/type.RwLockWriteGuard.html new file mode 100644 index 00000000..f8b03653 --- /dev/null +++ b/spin/lock_api/type.RwLockWriteGuard.html @@ -0,0 +1,4 @@ +RwLockWriteGuard in spin::lock_api - Rust + +

Type Definition spin::lock_api::RwLockWriteGuard[][src]

type RwLockWriteGuard<'a, T> = RwLockWriteGuard<'a, RwLock<()>, T>;
Expand description

A guard that provides mutable data access (compatible with lock_api).

+
\ No newline at end of file diff --git a/spin/mutex/index.html b/spin/mutex/index.html new file mode 100644 index 00000000..1f43381f --- /dev/null +++ b/spin/mutex/index.html @@ -0,0 +1,15 @@ +spin::mutex - Rust + +

Module spin::mutex[][src]

Expand description

Locks that have the same behaviour as a mutex.

+

The Mutex in the root of the crate, can be configured using the ticket_mutex feature. +If it’s enabled, TicketMutex and TicketMutexGuard will be re-exported as Mutex +and MutexGuard, otherwise the SpinMutex and guard will be re-exported.

+

ticket_mutex is disabled by default.

+

Re-exports

+
pub use self::spin::SpinMutex;
pub use self::spin::SpinMutexGuard;

Modules

+
spin

A naïve spinning mutex.

+

Structs

+
Mutex

A spin-based lock providing mutually exclusive access to data.

+
MutexGuard

A generic guard that will protect some data access and +uses either a ticket lock or a normal spin mutex.

+
\ No newline at end of file diff --git a/spin/mutex/sidebar-items.js b/spin/mutex/sidebar-items.js new file mode 100644 index 00000000..61a289bb --- /dev/null +++ b/spin/mutex/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"mod":[["spin","A naïve spinning mutex."]],"struct":[["Mutex","A spin-based lock providing mutually exclusive access to data."],["MutexGuard","A generic guard that will protect some data access and uses either a ticket lock or a normal spin mutex."]]}); \ No newline at end of file diff --git a/spin/mutex/spin/index.html b/spin/mutex/spin/index.html new file mode 100644 index 00000000..5aeace88 --- /dev/null +++ b/spin/mutex/spin/index.html @@ -0,0 +1,9 @@ +spin::mutex::spin - Rust + +

Module spin::mutex::spin[][src]

Expand description

A naïve spinning mutex.

+

Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case +latency is theoretically infinite.

+

Structs

+
SpinMutex

A spin lock providing mutually exclusive access to data.

+
SpinMutexGuard

A guard that provides mutable data access.

+
\ No newline at end of file diff --git a/spin/mutex/spin/sidebar-items.js b/spin/mutex/spin/sidebar-items.js new file mode 100644 index 00000000..2674abd5 --- /dev/null +++ b/spin/mutex/spin/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["SpinMutex","A spin lock providing mutually exclusive access to data."],["SpinMutexGuard","A guard that provides mutable data access."]]}); \ No newline at end of file diff --git a/spin/mutex/spin/struct.SpinMutex.html b/spin/mutex/spin/struct.SpinMutex.html new file mode 100644 index 00000000..4703e7fd --- /dev/null +++ b/spin/mutex/spin/struct.SpinMutex.html @@ -0,0 +1,141 @@ +SpinMutex in spin::mutex::spin - Rust + +

Struct spin::mutex::spin::SpinMutex[][src]

pub struct SpinMutex<T: ?Sized, R = Spin> { /* fields omitted */ }
Expand description

A spin lock providing mutually exclusive access to data.

+

Example

+
+use spin;
+
+let lock = spin::mutex::SpinMutex::<_>::new(0);
+
+// Modify the data
+*lock.lock() = 2;
+
+// Read the data
+let answer = *lock.lock();
+assert_eq!(answer, 2);
+

Thread safety example

+
+use spin;
+use std::sync::{Arc, Barrier};
+
+let thread_count = 1000;
+let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
+
+// We use a barrier to ensure the readout happens after all writing
+let barrier = Arc::new(Barrier::new(thread_count + 1));
+
+for _ in (0..thread_count) {
+    let my_barrier = barrier.clone();
+    let my_lock = spin_mutex.clone();
+    std::thread::spawn(move || {
+        let mut guard = my_lock.lock();
+        *guard += 1;
+
+        // Release the lock to prevent a deadlock
+        drop(guard);
+        my_barrier.wait();
+    });
+}
+
+barrier.wait();
+
+let answer = { *spin_mutex.lock() };
+assert_eq!(answer, thread_count);
+

Implementations

Creates a new SpinMutex wrapping the supplied data.

+

Example

+
+use spin::mutex::SpinMutex;
+
+static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
+
+fn demo() {
+    let lock = MUTEX.lock();
+    // do something with lock
+    drop(lock);
+}
+

Consumes this SpinMutex and unwraps the underlying data.

+

Example

+
+let lock = spin::mutex::SpinMutex::<_>::new(42);
+assert_eq!(42, lock.into_inner());
+

Returns a mutable pointer to the underlying data.

+

This is mostly meant to be used for applications which require manual unlocking, but where +storing both the lock and the pointer to the inner data gets inefficient.

+

Example

+
+let lock = spin::mutex::SpinMutex::<_>::new(42);
+
+unsafe {
+    core::mem::forget(lock.lock());
+     
+    assert_eq!(lock.as_mut_ptr().read(), 42);
+    lock.as_mut_ptr().write(58);
+
+    lock.force_unlock();
+}
+
+assert_eq!(*lock.lock(), 58);
+
+

Locks the SpinMutex and returns a guard that permits access to the inner data.

+

The returned value may be dereferenced for data access +and the lock will be dropped when the guard falls out of scope.

+ +
+let lock = spin::mutex::SpinMutex::<_>::new(0);
+{
+    let mut data = lock.lock();
+    // The lock is now locked and the data can be accessed
+    *data += 1;
+    // The lock is implicitly dropped at the end of the scope
+}
+

Returns true if the lock is currently held.

+

Safety

+

This function provides no synchronization guarantees and so its result should be considered ‘out of date’ +the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.

+

Force unlock this SpinMutex.

+

Safety

+

This is extremely unsafe if the lock is not held by the current +thread. However, this can be useful in some instances for exposing the +lock to FFI that doesn’t know how to deal with RAII.

+

Try to lock this SpinMutex, returning a lock guard if successful.

+

Example

+
+let lock = spin::mutex::SpinMutex::<_>::new(42);
+
+let maybe_guard = lock.try_lock();
+assert!(maybe_guard.is_some());
+
+// `maybe_guard` is still held, so the second call fails
+let maybe_guard2 = lock.try_lock();
+assert!(maybe_guard2.is_none());
+

Returns a mutable reference to the underlying data.

+

Since this call borrows the SpinMutex mutably, and a mutable reference is guaranteed to be exclusive in +Rust, no actual locking needs to take place – the mutable borrow statically guarantees no locks exist. As +such, this is a ‘zero-cost’ operation.

+

Example

+
+let mut lock = spin::mutex::SpinMutex::<_>::new(0);
+*lock.get_mut() = 10;
+assert_eq!(*lock.lock(), 10);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

Returns the “default value” for a type. Read more

+

Performs the conversion.

+

Marker type which determines whether a lock guard should be Send. Use +one of the GuardSend or GuardNoSend helper types here. Read more

+

Initial value for an unlocked mutex.

+

Acquires this mutex, blocking the current thread until it is able to do so.

+

Attempts to acquire this mutex without blocking. Returns true +if the lock was successfully acquired and false otherwise. Read more

+

Unlocks this mutex. Read more

+

Checks whether the mutex is currently locked.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/mutex/spin/struct.SpinMutexGuard.html b/spin/mutex/spin/struct.SpinMutexGuard.html new file mode 100644 index 00000000..900442f3 --- /dev/null +++ b/spin/mutex/spin/struct.SpinMutexGuard.html @@ -0,0 +1,30 @@ +SpinMutexGuard in spin::mutex::spin - Rust + +

Struct spin::mutex::spin::SpinMutexGuard[][src]

pub struct SpinMutexGuard<'a, T: ?Sized + 'a> { /* fields omitted */ }
Expand description

A guard that provides mutable data access.

+

When the guard falls out of scope it will release the lock.

+

Implementations

Leak the lock guard, yielding a mutable reference to the underlying data.

+

Note that this function will permanently lock the original SpinMutex.

+ +
+let mylock = spin::mutex::SpinMutex::<_>::new(0);
+
+let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
+
+*data = 1;
+assert_eq!(*data, 1);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

The dropping of the MutexGuard will release the lock it was created from.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/mutex/struct.Mutex.html b/spin/mutex/struct.Mutex.html new file mode 100644 index 00000000..5a5f4b0e --- /dev/null +++ b/spin/mutex/struct.Mutex.html @@ -0,0 +1,125 @@ +Mutex in spin::mutex - Rust + +

Struct spin::mutex::Mutex[][src]

pub struct Mutex<T: ?Sized, R = Spin> { /* fields omitted */ }
Expand description

A spin-based lock providing mutually exclusive access to data.

+

The implementation uses either a ticket mutex or a regular spin mutex depending on whether the spin_mutex or +ticket_mutex feature flag is enabled.

+

Example

+
+use spin;
+
+let lock = spin::Mutex::new(0);
+
+// Modify the data
+*lock.lock() = 2;
+
+// Read the data
+let answer = *lock.lock();
+assert_eq!(answer, 2);
+

Thread safety example

+
+use spin;
+use std::sync::{Arc, Barrier};
+
+let thread_count = 1000;
+let spin_mutex = Arc::new(spin::Mutex::new(0));
+
+// We use a barrier to ensure the readout happens after all writing
+let barrier = Arc::new(Barrier::new(thread_count + 1));
+
+for _ in (0..thread_count) {
+    let my_barrier = barrier.clone();
+    let my_lock = spin_mutex.clone();
+    std::thread::spawn(move || {
+        let mut guard = my_lock.lock();
+        *guard += 1;
+
+        // Release the lock to prevent a deadlock
+        drop(guard);
+        my_barrier.wait();
+    });
+}
+
+barrier.wait();
+
+let answer = { *spin_mutex.lock() };
+assert_eq!(answer, thread_count);
+

Implementations

Creates a new Mutex wrapping the supplied data.

+

Example

+
+use spin::Mutex;
+
+static MUTEX: Mutex<()> = Mutex::new(());
+
+fn demo() {
+    let lock = MUTEX.lock();
+    // do something with lock
+    drop(lock);
+}
+

Consumes this Mutex and unwraps the underlying data.

+

Example

+
+let lock = spin::Mutex::new(42);
+assert_eq!(42, lock.into_inner());
+

Locks the Mutex and returns a guard that permits access to the inner data.

+

The returned value may be dereferenced for data access +and the lock will be dropped when the guard falls out of scope.

+ +
+let lock = spin::Mutex::new(0);
+{
+    let mut data = lock.lock();
+    // The lock is now locked and the data can be accessed
+    *data += 1;
+    // The lock is implicitly dropped at the end of the scope
+}
+

Returns true if the lock is currently held.

+

Safety

+

This function provides no synchronization guarantees and so its result should be considered ‘out of date’ +the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.

+

Force unlock this Mutex.

+

Safety

+

This is extremely unsafe if the lock is not held by the current +thread. However, this can be useful in some instances for exposing the +lock to FFI that doesn’t know how to deal with RAII.

+

Try to lock this Mutex, returning a lock guard if successful.

+

Example

+
+let lock = spin::Mutex::new(42);
+
+let maybe_guard = lock.try_lock();
+assert!(maybe_guard.is_some());
+
+// `maybe_guard` is still held, so the second call fails
+let maybe_guard2 = lock.try_lock();
+assert!(maybe_guard2.is_none());
+

Returns a mutable reference to the underlying data.

+

Since this call borrows the Mutex mutably, and a mutable reference is guaranteed to be exclusive in Rust, +no actual locking needs to take place – the mutable borrow statically guarantees no locks exist. As such, +this is a ‘zero-cost’ operation.

+

Example

+
+let mut lock = spin::Mutex::new(0);
+*lock.get_mut() = 10;
+assert_eq!(*lock.lock(), 10);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

Returns the “default value” for a type. Read more

+

Performs the conversion.

+

Marker type which determines whether a lock guard should be Send. Use +one of the GuardSend or GuardNoSend helper types here. Read more

+

Initial value for an unlocked mutex.

+

Acquires this mutex, blocking the current thread until it is able to do so.

+

Attempts to acquire this mutex without blocking. Returns true +if the lock was successfully acquired and false otherwise. Read more

+

Unlocks this mutex. Read more

+

Checks whether the mutex is currently locked.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/mutex/struct.MutexGuard.html b/spin/mutex/struct.MutexGuard.html new file mode 100644 index 00000000..b80ec0e0 --- /dev/null +++ b/spin/mutex/struct.MutexGuard.html @@ -0,0 +1,30 @@ +MutexGuard in spin::mutex - Rust + +

Struct spin::mutex::MutexGuard[][src]

pub struct MutexGuard<'a, T: 'a + ?Sized> { /* fields omitted */ }
Expand description

A generic guard that will protect some data access and +uses either a ticket lock or a normal spin mutex.

+

For more info see TicketMutexGuard or SpinMutexGuard.

+

Implementations

Leak the lock guard, yielding a mutable reference to the underlying data.

+

Note that this function will permanently lock the original Mutex.

+ +
+let mylock = spin::Mutex::new(0);
+
+let data: &mut i32 = spin::MutexGuard::leak(mylock.lock());
+
+*data = 1;
+assert_eq!(*data, 1);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/once/index.html b/spin/once/index.html new file mode 100644 index 00000000..8664e689 --- /dev/null +++ b/spin/once/index.html @@ -0,0 +1,6 @@ +spin::once - Rust + +

Module spin::once[][src]

Expand description

Synchronization primitives for one-time evaluation.

+

Structs

+
Once

A primitive that provides lazy one-time initialization.

+
\ No newline at end of file diff --git a/spin/once/sidebar-items.js b/spin/once/sidebar-items.js new file mode 100644 index 00000000..439b0e46 --- /dev/null +++ b/spin/once/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["Once","A primitive that provides lazy one-time initialization."]]}); \ No newline at end of file diff --git a/spin/once/struct.Once.html b/spin/once/struct.Once.html new file mode 100644 index 00000000..7e23de63 --- /dev/null +++ b/spin/once/struct.Once.html @@ -0,0 +1,93 @@ +Once in spin::once - Rust + +

Struct spin::once::Once[][src]

pub struct Once<T = (), R = Spin> { /* fields omitted */ }
Expand description

A primitive that provides lazy one-time initialization.

+

Unlike its std::sync equivalent, this is generalized such that the closure returns a +value to be stored by the Once (std::sync::Once can be trivially emulated with +Once).

+

Because Once::new is const, this primitive may be used to safely initialize statics.

+

Examples

+
+use spin;
+
+static START: spin::Once = spin::Once::new();
+
+START.call_once(|| {
+    // run initialization here
+});
+

Implementations

Performs an initialization routine once and only once. The given closure +will be executed if this is the first time call_once has been called, +and otherwise the routine will not be invoked.

+

This method will block the calling thread if another initialization +routine is currently running.

+

When this function returns, it is guaranteed that some initialization +has run and completed (it may not be the closure specified). The +returned pointer will point to the result from the closure that was +run.

+

Panics

+

This function will panic if the Once previously panicked while attempting +to initialize. This is similar to the poisoning behaviour of std::sync’s +primitives.

+

Examples

+
+use spin;
+
+static INIT: spin::Once<usize> = spin::Once::new();
+
+fn get_cached_val() -> usize {
+    *INIT.call_once(expensive_computation)
+}
+
+fn expensive_computation() -> usize {
+    // ...
+}
+

Spins until the Once contains a value.

+

Note that in releases prior to 0.7, this function had the behaviour of Once::poll.

+

Panics

+

This function will panic if the Once previously panicked while attempting +to initialize. This is similar to the poisoning behaviour of std::sync’s +primitives.

+

Like Once::get, but will spin if the Once is in the process of being +initialized. If initialization has not even begun, None will be returned.

+

Note that in releases prior to 0.7, this function was named wait.

+

Panics

+

This function will panic if the Once previously panicked while attempting +to initialize. This is similar to the poisoning behaviour of std::sync’s +primitives.

+

Initialization constant of Once.

+

Creates a new Once.

+

Creates a new initialized Once.

+

Retrieve a pointer to the inner data.

+

While this method itself is safe, accessing the pointer before the Once has been +initialized is UB, unless this method has already been written to from a pointer coming +from this method.

+

Returns a reference to the inner value if the Once has been initialized.

+

Returns a reference to the inner value on the unchecked assumption that the Once has been initialized.

+

Safety

+

This is extremely unsafe if the Once has not already been initialized because a reference to uninitialized +memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused). +However, this can be useful in some instances for exposing the Once to FFI or when the overhead of atomically +checking initialization is unacceptable and the Once has already been initialized.

+

Returns a mutable reference to the inner value if the Once has been initialized.

+

Because this method requires a mutable reference to the Once, no synchronization +overhead is required to access the inner value. In effect, it is zero-cost.

+

Returns a the inner value if the Once has been initialized.

+

Because this method requires ownership of the Once, no synchronization overhead +is required to access the inner value. In effect, it is zero-cost.

+

Checks whether the value has been initialized.

+

This is done using Acquire ordering, and +therefore it is safe to access the value directly via +get_unchecked if this returns true.

+

Trait Implementations

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Performs the conversion.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/relax/index.html b/spin/relax/index.html new file mode 100644 index 00000000..5a2cf699 --- /dev/null +++ b/spin/relax/index.html @@ -0,0 +1,10 @@ +spin::relax - Rust + +

Module spin::relax[][src]

Expand description

Strategies that determine the behaviour of locks when encountering contention.

+

Structs

+
Loop

A strategy that rapidly spins, without telling the CPU to do any powering down.

+
Spin

A strategy that rapidly spins while informing the CPU that it should power down non-essential components via +core::hint::spin_loop.

+

Traits

+
RelaxStrategy

A trait implemented by spinning relax strategies.

+
\ No newline at end of file diff --git a/spin/relax/sidebar-items.js b/spin/relax/sidebar-items.js new file mode 100644 index 00000000..af2f4a94 --- /dev/null +++ b/spin/relax/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["Loop","A strategy that rapidly spins, without telling the CPU to do any powering down."],["Spin","A strategy that rapidly spins while informing the CPU that it should power down non-essential components via [`core::hint::spin_loop`]."]],"trait":[["RelaxStrategy","A trait implemented by spinning relax strategies."]]}); \ No newline at end of file diff --git a/spin/relax/struct.Loop.html b/spin/relax/struct.Loop.html new file mode 100644 index 00000000..324d4a1d --- /dev/null +++ b/spin/relax/struct.Loop.html @@ -0,0 +1,17 @@ +Loop in spin::relax - Rust + +

Struct spin::relax::Loop[][src]

pub struct Loop;
Expand description

A strategy that rapidly spins, without telling the CPU to do any powering down.

+

You almost certainly do not want to use this. Use Spin instead. It exists for completeness and for targets +that, for some reason, miscompile or do not support spin hint intrinsics despite attempting to generate code for +them (i.e: this is a workaround for possible compiler bugs).

+

Trait Implementations

Perform the relaxing operation during a period of contention.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/relax/struct.Spin.html b/spin/relax/struct.Spin.html new file mode 100644 index 00000000..f8b5cfaa --- /dev/null +++ b/spin/relax/struct.Spin.html @@ -0,0 +1,23 @@ +Spin in spin::relax - Rust + +

Struct spin::relax::Spin[][src]

pub struct Spin;
Expand description

A strategy that rapidly spins while informing the CPU that it should power down non-essential components via +core::hint::spin_loop.

+

Note that spinning is a ‘dumb’ strategy and most schedulers cannot correctly differentiate it from useful work, +thereby misallocating even more CPU time to the spinning process. This is known as +‘priority inversion’.

+

If you see signs that priority inversion is occurring, consider switching to [Yield] or, even better, not using a +spinlock at all and opting for a proper scheduler-aware lock. Remember also that different targets, operating +systems, schedulers, and even the same scheduler with different workloads will exhibit different behaviour. Just +because priority inversion isn’t occurring in your tests does not mean that it will not occur. Use a scheduler- +aware lock if at all possible.

+

Trait Implementations

Perform the relaxing operation during a period of contention.

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/relax/trait.RelaxStrategy.html b/spin/relax/trait.RelaxStrategy.html new file mode 100644 index 00000000..49d23b7d --- /dev/null +++ b/spin/relax/trait.RelaxStrategy.html @@ -0,0 +1,7 @@ +RelaxStrategy in spin::relax - Rust + +

Trait spin::relax::RelaxStrategy[][src]

pub trait RelaxStrategy {
+    fn relax();
+}
Expand description

A trait implemented by spinning relax strategies.

+

Required methods

Perform the relaxing operation during a period of contention.

+

Implementors

\ No newline at end of file diff --git a/spin/rwlock/index.html b/spin/rwlock/index.html new file mode 100644 index 00000000..b28fd890 --- /dev/null +++ b/spin/rwlock/index.html @@ -0,0 +1,9 @@ +spin::rwlock - Rust + +

Module spin::rwlock[][src]

Expand description

A lock that provides data access to either one writer or many readers.

+

Structs

+
RwLock

A lock that provides data access to either one writer or many readers.

+
RwLockReadGuard

A guard that provides immutable data access.

+
RwLockUpgradableGuard

A guard that provides immutable data access but can be upgraded to RwLockWriteGuard.

+
RwLockWriteGuard

A guard that provides mutable data access.

+
\ No newline at end of file diff --git a/spin/rwlock/sidebar-items.js b/spin/rwlock/sidebar-items.js new file mode 100644 index 00000000..52dc9613 --- /dev/null +++ b/spin/rwlock/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"struct":[["RwLock","A lock that provides data access to either one writer or many readers."],["RwLockReadGuard","A guard that provides immutable data access."],["RwLockUpgradableGuard","A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]."],["RwLockWriteGuard","A guard that provides mutable data access."]]}); \ No newline at end of file diff --git a/spin/rwlock/struct.RwLock.html b/spin/rwlock/struct.RwLock.html new file mode 100644 index 00000000..9e745ec9 --- /dev/null +++ b/spin/rwlock/struct.RwLock.html @@ -0,0 +1,215 @@ +RwLock in spin::rwlock - Rust + +

Struct spin::rwlock::RwLock[][src]

pub struct RwLock<T: ?Sized, R = Spin> { /* fields omitted */ }
Expand description

A lock that provides data access to either one writer or many readers.

+

This lock behaves in a similar manner to its namesake std::sync::RwLock but uses +spinning for synchronisation instead. Unlike its namespace, this lock does not +track lock poisoning.

+

This type of lock allows a number of readers or at most one writer at any +point in time. The write portion of this lock typically allows modification +of the underlying data (exclusive access) and the read portion of this lock +typically allows for read-only access (shared access).

+

The type parameter T represents the data that this lock protects. It is +required that T satisfies Send to be shared across tasks and Sync to +allow concurrent access through readers. The RAII guards returned from the +locking methods implement Deref (and DerefMut for the write methods) +to allow access to the contained of the lock.

+

An RwLockUpgradableGuard can be upgraded to a +writable guard through the RwLockUpgradableGuard::upgrade +RwLockUpgradableGuard::try_upgrade functions. +Writable or upgradeable guards can be downgraded through their respective downgrade +functions.

+

Based on Facebook’s +folly/RWSpinLock.h. +This implementation is unfair to writers - if the lock always has readers, then no writers will +ever get a chance. Using an upgradeable lock guard can somewhat alleviate this issue as no +new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken +when there are existing readers. However if the lock is that highly contended and writes are +crucial then this implementation may be a poor choice.

+

Examples

+
+use spin;
+
+let lock = spin::RwLock::new(5);
+
+// many reader locks can be held at once
+{
+    let r1 = lock.read();
+    let r2 = lock.read();
+    assert_eq!(*r1, 5);
+    assert_eq!(*r2, 5);
+} // read locks are dropped at this point
+
+// only one write lock may be held, however
+{
+    let mut w = lock.write();
+    *w += 1;
+    assert_eq!(*w, 6);
+} // write lock is dropped here
+

Implementations

Creates a new spinlock wrapping the supplied data.

+

May be used statically:

+ +
+use spin;
+
+static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
+
+fn demo() {
+    let lock = RW_LOCK.read();
+    // do something with lock
+    drop(lock);
+}
+

Consumes this RwLock, returning the underlying data.

+

Returns a mutable pointer to the underying data.

+

This is mostly meant to be used for applications which require manual unlocking, but where +storing both the lock and the pointer to the inner data gets inefficient.

+

While this is safe, writing to the data is undefined behavior unless the current thread has +acquired a write lock, and reading requires either a read or write lock.

+

Example

+
+let lock = spin::RwLock::new(42);
+
+unsafe {
+    core::mem::forget(lock.write());
+     
+    assert_eq!(lock.as_mut_ptr().read(), 42);
+    lock.as_mut_ptr().write(58);
+
+    lock.force_write_unlock();
+}
+
+assert_eq!(*lock.read(), 58);
+
+

Locks this rwlock with shared read access, blocking the current thread +until it can be acquired.

+

The calling thread will be blocked until there are no more writers which +hold the lock. There may be other readers currently inside the lock when +this method returns. This method does not provide any guarantees with +respect to the ordering of whether contentious readers or writers will +acquire the lock first.

+

Returns an RAII guard which will release this thread’s shared access +once it is dropped.

+ +
+let mylock = spin::RwLock::new(0);
+{
+    let mut data = mylock.read();
+    // The lock is now locked and the data can be read
+    println!("{}", *data);
+    // The lock is dropped
+}
+

Lock this rwlock with exclusive write access, blocking the current +thread until it can be acquired.

+

This function will not return while other writers or other readers +currently have access to the lock.

+

Returns an RAII guard which will drop the write access of this rwlock +when dropped.

+ +
+let mylock = spin::RwLock::new(0);
+{
+    let mut data = mylock.write();
+    // The lock is now locked and the data can be written
+    *data += 1;
+    // The lock is dropped
+}
+

Obtain a readable lock guard that can later be upgraded to a writable lock guard. +Upgrades can be done through the RwLockUpgradableGuard::upgrade method.

+

Attempt to acquire this lock with shared read access.

+

This function will never block and will return immediately if read +would otherwise succeed. Returns Some of an RAII guard which will +release the shared access of this thread when dropped, or None if the +access could not be granted. This method does not provide any +guarantees with respect to the ordering of whether contentious readers +or writers will acquire the lock first.

+ +
+let mylock = spin::RwLock::new(0);
+{
+    match mylock.try_read() {
+        Some(data) => {
+            // The lock is now locked and the data can be read
+            println!("{}", *data);
+            // The lock is dropped
+        },
+        None => (), // no cigar
+    };
+}
+

Return the number of readers that currently hold the lock (including upgradable readers).

+

Safety

+

This function provides no synchronization guarantees and so its result should be considered ‘out of date’ +the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.

+

Return the number of writers that currently hold the lock.

+

Because RwLock guarantees exclusive mutable access, this function may only return either 0 or 1.

+

Safety

+

This function provides no synchronization guarantees and so its result should be considered ‘out of date’ +the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.

+

Force decrement the reader count.

+

Safety

+

This is extremely unsafe if there are outstanding RwLockReadGuards +live, or if called more times than read has been called, but can be +useful in FFI contexts where the caller doesn’t know how to deal with +RAII. The underlying atomic operation uses Ordering::Release.

+

Force unlock exclusive write access.

+

Safety

+

This is extremely unsafe if there are outstanding RwLockWriteGuards +live, or if called when there are current readers, but can be useful in +FFI contexts where the caller doesn’t know how to deal with RAII. The +underlying atomic operation uses Ordering::Release.

+

Attempt to lock this rwlock with exclusive write access.

+

This function does not ever block, and it will return None if a call +to write would otherwise block. If successful, an RAII guard is +returned.

+ +
+let mylock = spin::RwLock::new(0);
+{
+    match mylock.try_write() {
+        Some(mut data) => {
+            // The lock is now locked and the data can be written
+            *data += 1;
+            // The lock is implicitly dropped
+        },
+        None => (), // no cigar
+    };
+}
+

Tries to obtain an upgradeable lock guard.

+

Returns a mutable reference to the underlying data.

+

Since this call borrows the RwLock mutably, no actual locking needs to +take place – the mutable borrow statically guarantees no locks exist.

+

Examples

+
+let mut lock = spin::RwLock::new(0);
+*lock.get_mut() = 10;
+assert_eq!(*lock.read(), 10);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

Returns the “default value” for a type. Read more

+

Performs the conversion.

+

Marker type which determines whether a lock guard should be Send. Use +one of the GuardSend or GuardNoSend helper types here. Read more

+

Initial value for an unlocked RwLock.

+

Acquires an exclusive lock, blocking the current thread until it is able to do so.

+

Attempts to acquire an exclusive lock without blocking.

+

Releases an exclusive lock. Read more

+

Acquires a shared lock, blocking the current thread until it is able to do so.

+

Attempts to acquire a shared lock without blocking.

+

Releases a shared lock. Read more

+

Checks if this RwLock is currently locked in any way.

+

Atomically downgrades an exclusive lock into a shared lock without +allowing any thread to take an exclusive lock in the meantime. Read more

+

Acquires an upgradable lock, blocking the current thread until it is able to do so.

+

Attempts to acquire an upgradable lock without blocking.

+

Releases an upgradable lock. Read more

+

Upgrades an upgradable lock to an exclusive lock. Read more

+

Attempts to upgrade an upgradable lock to an exclusive lock without +blocking. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/rwlock/struct.RwLockReadGuard.html b/spin/rwlock/struct.RwLockReadGuard.html new file mode 100644 index 00000000..466ff6e0 --- /dev/null +++ b/spin/rwlock/struct.RwLockReadGuard.html @@ -0,0 +1,29 @@ +RwLockReadGuard in spin::rwlock - Rust + +

Struct spin::rwlock::RwLockReadGuard[][src]

pub struct RwLockReadGuard<'a, T: 'a + ?Sized> { /* fields omitted */ }
Expand description

A guard that provides immutable data access.

+

When the guard falls out of scope it will decrement the read count, +potentially releasing the lock.

+

Implementations

Leak the lock guard, yielding a reference to the underlying data.

+

Note that this function will permanently lock the original lock for all but reading locks.

+ +
+let mylock = spin::RwLock::new(0);
+
+let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
+
+assert_eq!(*data, 0);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/rwlock/struct.RwLockUpgradableGuard.html b/spin/rwlock/struct.RwLockUpgradableGuard.html new file mode 100644 index 00000000..a4dba844 --- /dev/null +++ b/spin/rwlock/struct.RwLockUpgradableGuard.html @@ -0,0 +1,60 @@ +RwLockUpgradableGuard in spin::rwlock - Rust + +

Struct spin::rwlock::RwLockUpgradableGuard[][src]

pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> { /* fields omitted */ }
Expand description

A guard that provides immutable data access but can be upgraded to RwLockWriteGuard.

+

No writers or other upgradeable guards can exist while this is in scope. New reader +creation is prevented (to alleviate writer starvation) but there may be existing readers +when the lock is acquired.

+

When the guard falls out of scope it will release the lock.

+

Implementations

Upgrades an upgradeable lock guard to a writable lock guard.

+ +
+let mylock = spin::RwLock::new(0);
+
+let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+let writable = upgradeable.upgrade();
+

Tries to upgrade an upgradeable lock guard to a writable lock guard.

+ +
+let mylock = spin::RwLock::new(0);
+let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+
+match upgradeable.try_upgrade() {
+    Ok(writable) => /* upgrade successful - use writable lock guard */ (),
+    Err(upgradeable) => /* upgrade unsuccessful */ (),
+};
+

Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.

+ +
+let mylock = spin::RwLock::new(1);
+
+let upgradeable = mylock.upgradeable_read();
+assert!(mylock.try_read().is_none());
+assert_eq!(*upgradeable, 1);
+
+let readable = upgradeable.downgrade(); // This is guaranteed not to spin
+assert!(mylock.try_read().is_some());
+assert_eq!(*readable, 1);
+

Leak the lock guard, yielding a reference to the underlying data.

+

Note that this function will permanently lock the original lock.

+ +
+let mylock = spin::RwLock::new(0);
+
+let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
+
+assert_eq!(*data, 0);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/rwlock/struct.RwLockWriteGuard.html b/spin/rwlock/struct.RwLockWriteGuard.html new file mode 100644 index 00000000..2491526e --- /dev/null +++ b/spin/rwlock/struct.RwLockWriteGuard.html @@ -0,0 +1,50 @@ +RwLockWriteGuard in spin::rwlock - Rust + +

Struct spin::rwlock::RwLockWriteGuard[][src]

pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> { /* fields omitted */ }
Expand description

A guard that provides mutable data access.

+

When the guard falls out of scope it will release the lock.

+

Implementations

Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.

+ +
+let mylock = spin::RwLock::new(0);
+
+let mut writable = mylock.write();
+*writable = 1;
+
+let readable = writable.downgrade(); // This is guaranteed not to spin
+assert_eq!(*readable, 1);
+

Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.

+ +
+let mylock = spin::RwLock::new(0);
+
+let mut writable = mylock.write();
+*writable = 1;
+
+let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
+assert_eq!(*readable, 1);
+

Leak the lock guard, yielding a mutable reference to the underlying data.

+

Note that this function will permanently lock the original lock.

+ +
+let mylock = spin::RwLock::new(0);
+
+let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
+
+*data = 1;
+assert_eq!(*data, 1);
+

Trait Implementations

Formats the value using the given formatter. Read more

+

The resulting type after dereferencing.

+

Dereferences the value.

+

Mutably dereferences the value.

+

Formats the value using the given formatter. Read more

+

Executes the destructor for this type. Read more

+

Auto Trait Implementations

Blanket Implementations

Gets the TypeId of self. Read more

+

Immutably borrows from an owned value. Read more

+

Mutably borrows from an owned value. Read more

+

Performs the conversion.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+

The type returned in the event of a conversion error.

+

Performs the conversion.

+
\ No newline at end of file diff --git a/spin/sidebar-items.js b/spin/sidebar-items.js new file mode 100644 index 00000000..a1e2d6fa --- /dev/null +++ b/spin/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"mod":[["barrier","Synchronization primitive allowing multiple threads to synchronize the beginning of some computation."],["lazy","Synchronization primitives for lazy evaluation."],["lock_api","Spin synchronisation primitives, but compatible with `lock_api`."],["mutex","Locks that have the same behaviour as a mutex."],["once","Synchronization primitives for one-time evaluation."],["relax","Strategies that determine the behaviour of locks when encountering contention."],["rwlock","A lock that provides data access to either one writer or many readers."]],"type":[["Barrier","A primitive that synchronizes the execution of multiple threads. See [`barrier::Barrier`] for documentation."],["Lazy","A value which is initialized on the first access. See [`lazy::Lazy`] for documentation."],["Mutex","A primitive that synchronizes the execution of multiple threads. See [`mutex::Mutex`] for documentation."],["Once","A primitive that provides lazy one-time initialization. See [`once::Once`] for documentation."],["RwLock","A lock that provides data access to either one writer or many readers. See [`rwlock::RwLock`] for documentation."],["RwLockUpgradableGuard","A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. See [`rwlock::RwLockUpgradableGuard`] for documentation."],["RwLockWriteGuard","A guard that provides mutable data access. See [`rwlock::RwLockWriteGuard`] for documentation."]]}); \ No newline at end of file diff --git a/spin/type.Barrier.html b/spin/type.Barrier.html new file mode 100644 index 00000000..1d6e2cbd --- /dev/null +++ b/spin/type.Barrier.html @@ -0,0 +1,6 @@ +Barrier in spin - Rust + +

Type Definition spin::Barrier[][src]

type Barrier = Barrier;
Expand description

A primitive that synchronizes the execution of multiple threads. See barrier::Barrier for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/spin/type.Lazy.html b/spin/type.Lazy.html new file mode 100644 index 00000000..b14a1375 --- /dev/null +++ b/spin/type.Lazy.html @@ -0,0 +1,6 @@ +Lazy in spin - Rust + +

Type Definition spin::Lazy[][src]

type Lazy<T, F = fn() -> T> = Lazy<T, F>;
Expand description

A value which is initialized on the first access. See lazy::Lazy for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/spin/type.Mutex.html b/spin/type.Mutex.html new file mode 100644 index 00000000..20955743 --- /dev/null +++ b/spin/type.Mutex.html @@ -0,0 +1,6 @@ +Mutex in spin - Rust + +

Type Definition spin::Mutex[][src]

type Mutex<T> = Mutex<T>;
Expand description

A primitive that synchronizes the execution of multiple threads. See mutex::Mutex for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/spin/type.Once.html b/spin/type.Once.html new file mode 100644 index 00000000..430ac66a --- /dev/null +++ b/spin/type.Once.html @@ -0,0 +1,6 @@ +Once in spin - Rust + +

Type Definition spin::Once[][src]

type Once<T = ()> = Once<T>;
Expand description

A primitive that provides lazy one-time initialization. See once::Once for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/spin/type.RwLock.html b/spin/type.RwLock.html new file mode 100644 index 00000000..6eb48c0f --- /dev/null +++ b/spin/type.RwLock.html @@ -0,0 +1,6 @@ +RwLock in spin - Rust + +

Type Definition spin::RwLock[][src]

type RwLock<T> = RwLock<T>;
Expand description

A lock that provides data access to either one writer or many readers. See rwlock::RwLock for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/spin/type.RwLockUpgradableGuard.html b/spin/type.RwLockUpgradableGuard.html new file mode 100644 index 00000000..31ea6e35 --- /dev/null +++ b/spin/type.RwLockUpgradableGuard.html @@ -0,0 +1,7 @@ +RwLockUpgradableGuard in spin - Rust + +

Type Definition spin::RwLockUpgradableGuard[][src]

type RwLockUpgradableGuard<'a, T> = RwLockUpgradableGuard<'a, T>;
Expand description

A guard that provides immutable data access but can be upgraded to RwLockWriteGuard. See +rwlock::RwLockUpgradableGuard for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/spin/type.RwLockWriteGuard.html b/spin/type.RwLockWriteGuard.html new file mode 100644 index 00000000..8c0ccc7e --- /dev/null +++ b/spin/type.RwLockWriteGuard.html @@ -0,0 +1,6 @@ +RwLockWriteGuard in spin - Rust + +

Type Definition spin::RwLockWriteGuard[][src]

type RwLockWriteGuard<'a, T> = RwLockWriteGuard<'a, T>;
Expand description

A guard that provides mutable data access. See rwlock::RwLockWriteGuard for documentation.

+

A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax +strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.

+
\ No newline at end of file diff --git a/src/heapless/pool/cas.rs.html b/src/heapless/pool/cas.rs.html index 74cee40d..d9455094 100644 --- a/src/heapless/pool/cas.rs.html +++ b/src/heapless/pool/cas.rs.html @@ -231,6 +231,23 @@ 229 230 231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248
 //! Stack based on CAS atomics
 //!
@@ -321,9 +338,26 @@
 }
 
 #[cfg(target_arch = "x86_64")]
-fn anchor<T>() -> *mut T {
-    static mut ANCHOR: u8 = 0;
-    (unsafe { &mut ANCHOR } as *mut u8 as usize & !(core::mem::align_of::<T>() - 1)) as *mut T
+fn anchor<T>(init: Option<*mut T>) -> *mut T {
+    use core::sync::atomic::AtomicU8;
+
+    use spin::Once;
+
+    static LAZY_ANCHOR: Once<usize> = Once::new();
+
+    let likely_unaligned_address = if let Some(init) = init {
+        *LAZY_ANCHOR.call_once(|| init as usize)
+    } else {
+        LAZY_ANCHOR.get().copied().unwrap_or_else(|| {
+            // we may hit this branch with Pool of ZSTs where `grow` does not need to be called
+            static BSS_ANCHOR: AtomicU8 = AtomicU8::new(0);
+            &BSS_ANCHOR as *const _ as usize
+        })
+    };
+
+    let alignment_mask = !(core::mem::align_of::<T>() - 1);
+    let well_aligned_address = likely_unaligned_address & alignment_mask;
+    well_aligned_address as *mut T
 }
 
 /// On x86_64, anchored pointer. This is a (signed) 32-bit offset from `anchor` plus a 32-bit tag
@@ -350,7 +384,7 @@
     pub fn new(p: *mut T) -> Option<Self> {
         use core::convert::TryFrom;
 
-        i32::try_from((p as isize).wrapping_sub(anchor::<T>() as isize))
+        i32::try_from((p as isize).wrapping_sub(anchor::<T>(Some(p)) as isize))
             .ok()
             .map(|offset| unsafe { Ptr::from_parts(initial_tag_value(), offset) })
     }
@@ -400,7 +434,7 @@
     fn as_raw(&self) -> NonNull<T> {
         unsafe {
             NonNull::new_unchecked(
-                (anchor::<T>() as *mut u8).offset(self.offset() as isize) as *mut T
+                (anchor::<T>(None) as *mut u8).offset(self.offset() as isize) as *mut T,
             )
         }
     }
diff --git a/src/lock_api/lib.rs.html b/src/lock_api/lib.rs.html
new file mode 100644
index 00000000..5cd99493
--- /dev/null
+++ b/src/lock_api/lib.rs.html
@@ -0,0 +1,227 @@
+lib.rs - source
+
+
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! This library provides type-safe and fully-featured `Mutex` and `RwLock`
+//! types which wrap a simple raw mutex or rwlock type. This has several
+//! benefits: not only does it eliminate a large portion of the work in
+//! implementing custom lock types, it also allows users to write code which is
+//! generic with regards to different lock implementations.
+//!
+//! Basic usage of this crate is very straightforward:
+//!
+//! 1. Create a raw lock type. This should only contain the lock state, not any
+//!    data protected by the lock.
+//! 2. Implement the `RawMutex` trait for your custom lock type.
+//! 3. Export your mutex as a type alias for `lock_api::Mutex`, and
+//!    your mutex guard as a type alias for `lock_api::MutexGuard`.
+//!    See the [example](#example) below for details.
+//!
+//! This process is similar for RwLocks, except that two guards need to be
+//! exported instead of one. (Or 3 guards if your type supports upgradable read
+//! locks, see [extension traits](#extension-traits) below for details)
+//!
+//! # Example
+//!
+//! ```
+//! use lock_api::{RawMutex, Mutex, GuardSend};
+//! use std::sync::atomic::{AtomicBool, Ordering};
+//!
+//! // 1. Define our raw lock type
+//! pub struct RawSpinlock(AtomicBool);
+//!
+//! // 2. Implement RawMutex for this type
+//! unsafe impl RawMutex for RawSpinlock {
+//!     const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
+//!
+//!     // A spinlock guard can be sent to another thread and unlocked there
+//!     type GuardMarker = GuardSend;
+//!
+//!     fn lock(&self) {
+//!         // Note: This isn't the best way of implementing a spinlock, but it
+//!         // suffices for the sake of this example.
+//!         while !self.try_lock() {}
+//!     }
+//!
+//!     fn try_lock(&self) -> bool {
+//!         self.0
+//!             .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+//!             .is_ok()
+//!     }
+//!
+//!     unsafe fn unlock(&self) {
+//!         self.0.store(false, Ordering::Release);
+//!     }
+//! }
+//!
+//! // 3. Export the wrappers. This are the types that your users will actually use.
+//! pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>;
+//! pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>;
+//! ```
+//!
+//! # Extension traits
+//!
+//! In addition to basic locking & unlocking functionality, you have the option
+//! of exposing additional functionality in your lock types by implementing
+//! additional traits for it. Examples of extension features include:
+//!
+//! - Fair unlocking (`RawMutexFair`, `RawRwLockFair`)
+//! - Lock timeouts (`RawMutexTimed`, `RawRwLockTimed`)
+//! - Downgradable write locks (`RawRwLockDowngradable`)
+//! - Recursive read locks (`RawRwLockRecursive`)
+//! - Upgradable read locks (`RawRwLockUpgrade`)
+//!
+//! The `Mutex` and `RwLock` wrappers will automatically expose this additional
+//! functionality if the raw lock type implements these extension traits.
+//!
+//! # Cargo features
+//!
+//! This crate supports two cargo features:
+//!
+//! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate.
+//! - `nightly`: Enables nightly-only features. At the moment the only such
+//!   feature is `const fn` constructors for lock types.
+
+#![no_std]
+#![warn(missing_docs)]
+#![warn(rust_2018_idioms)]
+#![cfg_attr(feature = "nightly", feature(const_fn_trait_bound))]
+
+#[macro_use]
+extern crate scopeguard;
+
+/// Marker type which indicates that the Guard type for a lock is `Send`.
+pub struct GuardSend(());
+
+/// Marker type which indicates that the Guard type for a lock is not `Send`.
+pub struct GuardNoSend(*mut ());
+
+unsafe impl Sync for GuardNoSend {}
+
+mod mutex;
+pub use crate::mutex::*;
+
+mod remutex;
+pub use crate::remutex::*;
+
+mod rwlock;
+pub use crate::rwlock::*;
+
+
\ No newline at end of file diff --git a/src/lock_api/mutex.rs.html b/src/lock_api/mutex.rs.html new file mode 100644 index 00000000..69fe667e --- /dev/null +++ b/src/lock_api/mutex.rs.html @@ -0,0 +1,1453 @@ +mutex.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+/// Basic operations for a mutex.
+///
+/// Types implementing this trait can be used by `Mutex` to form a safe and
+/// fully-functioning mutex type.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that the mutex is actually
+/// exclusive: a lock can't be acquired while the mutex is already locked.
+pub unsafe trait RawMutex {
+    /// Initial value for an unlocked mutex.
+    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
+    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
+    #[allow(clippy::declare_interior_mutable_const)]
+    const INIT: Self;
+
+    /// Marker type which determines whether a lock guard should be `Send`. Use
+    /// one of the `GuardSend` or `GuardNoSend` helper types here.
+    type GuardMarker;
+
+    /// Acquires this mutex, blocking the current thread until it is able to do so.
+    fn lock(&self);
+
+    /// Attempts to acquire this mutex without blocking. Returns `true`
+    /// if the lock was successfully acquired and `false` otherwise.
+    fn try_lock(&self) -> bool;
+
+    /// Unlocks this mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if the mutex is held in the current context, i.e. it must
+    /// be paired with a successful call to [`lock`], [`try_lock`], [`try_lock_for`] or [`try_lock_until`].
+    ///
+    /// [`lock`]: #tymethod.lock
+    /// [`try_lock`]: #tymethod.try_lock
+    /// [`try_lock_for`]: trait.RawMutexTimed.html#tymethod.try_lock_for
+    /// [`try_lock_until`]: trait.RawMutexTimed.html#tymethod.try_lock_until
+    unsafe fn unlock(&self);
+
+    /// Checks whether the mutex is currently locked.
+    #[inline]
+    fn is_locked(&self) -> bool {
+        let acquired_lock = self.try_lock();
+        if acquired_lock {
+            // Safety: The lock has been successfully acquired above.
+            unsafe {
+                self.unlock();
+            }
+        }
+        !acquired_lock
+    }
+}
+
+/// Additional methods for mutexes which support fair unlocking.
+///
+/// Fair unlocking means that a lock is handed directly over to the next waiting
+/// thread if there is one, without giving other threads the opportunity to
+/// "steal" the lock in the meantime. This is typically slower than unfair
+/// unlocking, but may be necessary in certain circumstances.
+pub unsafe trait RawMutexFair: RawMutex {
+    /// Unlocks this mutex using a fair unlock protocol.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if the mutex is held in the current context, see
+    /// the documentation of [`unlock`].
+    ///
+    /// [`unlock`]: trait.RawMutex.html#tymethod.unlock
+    unsafe fn unlock_fair(&self);
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if the mutex is held in the current context, see
+    /// the documentation of [`unlock`].
+    ///
+    /// [`unlock`]: trait.RawMutex.html#tymethod.unlock
+    unsafe fn bump(&self) {
+        self.unlock_fair();
+        self.lock();
+    }
+}
+
+/// Additional methods for mutexes which support locking with timeouts.
+///
+/// The `Duration` and `Instant` types are specified as associated types so that
+/// this trait is usable even in `no_std` environments.
+pub unsafe trait RawMutexTimed: RawMutex {
+    /// Duration type used for `try_lock_for`.
+    type Duration;
+
+    /// Instant type used for `try_lock_until`.
+    type Instant;
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    fn try_lock_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    fn try_lock_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// A mutual exclusion primitive useful for protecting shared data
+///
+/// This mutex will block threads waiting for the lock to become available. The
+/// mutex can also be statically initialized or created via a `new`
+/// constructor. Each mutex has a type parameter which represents the data that
+/// it is protecting. The data can only be accessed through the RAII guards
+/// returned from `lock` and `try_lock`, which guarantees that the data is only
+/// ever accessed when the mutex is locked.
+pub struct Mutex<R, T: ?Sized> {
+    raw: R,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {}
+unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {}
+
+impl<R: RawMutex, T> Mutex<R, T> {
+    /// Creates a new mutex in an unlocked state ready for use.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> Mutex<R, T> {
+        Mutex {
+            raw: R::INIT,
+            data: UnsafeCell::new(val),
+        }
+    }
+
+    /// Creates a new mutex in an unlocked state ready for use.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> Mutex<R, T> {
+        Mutex {
+            raw: R::INIT,
+            data: UnsafeCell::new(val),
+        }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    #[inline]
+    pub fn into_inner(self) -> T {
+        self.data.into_inner()
+    }
+}
+
+impl<R, T> Mutex<R, T> {
+    /// Creates a new mutex based on a pre-existing raw mutex.
+    ///
+    /// This allows creating a mutex in a constant context on stable Rust.
+    #[inline]
+    pub const fn const_new(raw_mutex: R, val: T) -> Mutex<R, T> {
+        Mutex {
+            raw: raw_mutex,
+            data: UnsafeCell::new(val),
+        }
+    }
+}
+
+impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
+    /// # Safety
+    ///
+    /// The lock must be held when calling this method.
+    #[inline]
+    unsafe fn guard(&self) -> MutexGuard<'_, R, T> {
+        MutexGuard {
+            mutex: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Acquires a mutex, blocking the current thread until it is able to do so.
+    ///
+    /// This function will block the local thread until it is available to acquire
+    /// the mutex. Upon returning, the thread is the only thread with the mutex
+    /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+    /// the guard goes out of scope, the mutex will be unlocked.
+    ///
+    /// Attempts to lock a mutex in the thread which already holds the lock will
+    /// result in a deadlock.
+    #[inline]
+    pub fn lock(&self) -> MutexGuard<'_, R, T> {
+        self.raw.lock();
+        // SAFETY: The lock is held, as required.
+        unsafe { self.guard() }
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+    /// guard is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_lock(&self) -> Option<MutexGuard<'_, R, T>> {
+        if self.raw.try_lock() {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Checks whether the mutex is currently locked.
+    #[inline]
+    pub fn is_locked(&self) -> bool {
+        self.raw.is_locked()
+    }
+
+    /// Forcibly unlocks the mutex.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `MutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock(&self) {
+        self.raw.unlock();
+    }
+
+    /// Returns the underlying raw mutex object.
+    ///
+    /// Note that you will most likely need to import the `RawMutex` trait from
+    /// `lock_api` to be able to call functions on the raw mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a `MutexGuard`.
+    #[inline]
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw
+    }
+
+    /// Returns a raw pointer to the underlying data.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// You must ensure that there are no data races when dereferencing the
+    /// returned pointer, for example if the current thread logically owns
+    /// a `MutexGuard` but that guard has been discarded using `mem::forget`.
+    #[inline]
+    pub fn data_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<R: RawMutexFair, T: ?Sized> Mutex<R, T> {
+    /// Forcibly unlocks the mutex using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `MutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock_fair(&self) {
+        self.raw.unlock_fair();
+    }
+}
+
+impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<'_, R, T>> {
+        if self.raw.try_lock_for(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<'_, R, T>> {
+        if self.raw.try_lock_until(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawMutex, T: ?Sized + Default> Default for Mutex<R, T> {
+    #[inline]
+    fn default() -> Mutex<R, T> {
+        Mutex::new(Default::default())
+    }
+}
+
+impl<R: RawMutex, T> From<T> for Mutex<R, T> {
+    #[inline]
+    fn from(t: T) -> Mutex<R, T> {
+        Mutex::new(t)
+    }
+}
+
+impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("Mutex")
+                    .field("data", &LockedPlaceholder)
+                    .finish()
+            }
+        }
+    }
+}
+
+// Copied and modified from serde
+#[cfg(feature = "serde")]
+impl<R, T> Serialize for Mutex<R, T>
+where
+    R: RawMutex,
+    T: Serialize + ?Sized,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        self.lock().serialize(serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, R, T> Deserialize<'de> for Mutex<R, T>
+where
+    R: RawMutex,
+    T: Deserialize<'de> + ?Sized,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        Deserialize::deserialize(deserializer).map(Mutex::new)
+    }
+}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` and `DerefMut` implementations.
+#[must_use = "if unused the Mutex will immediately unlock"]
+pub struct MutexGuard<'a, R: RawMutex, T: ?Sized> {
+    mutex: &'a Mutex<R, T>,
+    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T> {}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
+    /// Returns a reference to the original `Mutex` object.
+    pub fn mutex(s: &Self) -> &'a Mutex<R, T> {
+        s.mutex
+    }
+
+    /// Makes a new `MappedMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = &s.mutex.raw;
+        let data = f(unsafe { &mut *s.mutex.data.get() });
+        mem::forget(s);
+        MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make a new `MappedMutexGuard` for a component of the
+    /// locked data. The original guard is returned if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::try_map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = &s.mutex.raw;
+        let data = match f(unsafe { &mut *s.mutex.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: A MutexGuard always holds the lock.
+        unsafe {
+            s.mutex.raw.unlock();
+        }
+        defer!(s.mutex.raw.lock());
+        f()
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: A MutexGuard always holds the lock.
+        unsafe {
+            s.mutex.raw.unlock_fair();
+        }
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// The mutex is unlocked using a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: A MutexGuard always holds the lock.
+        unsafe {
+            s.mutex.raw.unlock_fair();
+        }
+        defer!(s.mutex.raw.lock());
+        f()
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        // Safety: A MutexGuard always holds the lock.
+        unsafe {
+            s.mutex.raw.bump();
+        }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MutexGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.mutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.mutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: A MutexGuard always holds the lock.
+        unsafe {
+            self.mutex.raw.unlock();
+        }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {}
+
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use = "if unused the Mutex will immediately unlock"]
+pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> {
+    raw: &'a R,
+    data: *mut T,
+    marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedMutexGuard<'a, R, T>
+{
+}
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + Send + 'a> Send for MappedMutexGuard<'a, R, T> where
+    R::GuardMarker: Send
+{
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
+    /// Makes a new `MappedMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &mut *s.data });
+        mem::forget(s);
+        MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make a new `MappedMutexGuard` for a component of the
+    /// locked data. The original guard is returned if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedMutexGuard::try_map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &mut *s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: A MutexGuard always holds the lock.
+        unsafe {
+            s.raw.unlock_fair();
+        }
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MappedMutexGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: A MappedMutexGuard always holds the lock.
+        unsafe {
+            self.raw.unlock();
+        }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedMutexGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedMutexGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {}
+
+
\ No newline at end of file diff --git a/src/lock_api/remutex.rs.html b/src/lock_api/remutex.rs.html new file mode 100644 index 00000000..bd685f61 --- /dev/null +++ b/src/lock_api/remutex.rs.html @@ -0,0 +1,1709 @@ +remutex.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use crate::{
+    mutex::{RawMutex, RawMutexFair, RawMutexTimed},
+    GuardNoSend,
+};
+use core::{
+    cell::{Cell, UnsafeCell},
+    fmt,
+    marker::PhantomData,
+    mem,
+    num::NonZeroUsize,
+    ops::Deref,
+    sync::atomic::{AtomicUsize, Ordering},
+};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+/// Helper trait which returns a non-zero thread ID.
+///
+/// The simplest way to implement this trait is to return the address of a
+/// thread-local variable.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that no two active threads share
+/// the same thread ID. However the ID of a thread that has exited can be
+/// re-used since that thread is no longer active.
+pub unsafe trait GetThreadId {
+    /// Initial value.
+    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
+    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
+    #[allow(clippy::declare_interior_mutable_const)]
+    const INIT: Self;
+
+    /// Returns a non-zero thread ID which identifies the current thread of
+    /// execution.
+    fn nonzero_thread_id(&self) -> NonZeroUsize;
+}
+
+/// A raw mutex type that wraps another raw mutex to provide reentrancy.
+///
+/// Although this has the same methods as the [`RawMutex`] trait, it does
+/// not implement it, and should not be used in the same way, since this
+/// mutex can successfully acquire a lock multiple times in the same thread.
+/// Only use this when you know you want a raw mutex that can be locked
+/// reentrantly; you probably want [`ReentrantMutex`] instead.
+///
+/// [`RawMutex`]: trait.RawMutex.html
+/// [`ReentrantMutex`]: struct.ReentrantMutex.html
+pub struct RawReentrantMutex<R, G> {
+    owner: AtomicUsize,
+    lock_count: Cell<usize>,
+    mutex: R,
+    get_thread_id: G,
+}
+
+unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
+unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
+
+impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
+    /// Initial value for an unlocked mutex.
+    #[allow(clippy::declare_interior_mutable_const)]
+    pub const INIT: Self = RawReentrantMutex {
+        owner: AtomicUsize::new(0),
+        lock_count: Cell::new(0),
+        mutex: R::INIT,
+        get_thread_id: G::INIT,
+    };
+
+    #[inline]
+    fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
+        let id = self.get_thread_id.nonzero_thread_id().get();
+        if self.owner.load(Ordering::Relaxed) == id {
+            self.lock_count.set(
+                self.lock_count
+                    .get()
+                    .checked_add(1)
+                    .expect("ReentrantMutex lock count overflow"),
+            );
+        } else {
+            if !try_lock() {
+                return false;
+            }
+            self.owner.store(id, Ordering::Relaxed);
+            debug_assert_eq!(self.lock_count.get(), 0);
+            self.lock_count.set(1);
+        }
+        true
+    }
+
+    /// Acquires this mutex, blocking if it's held by another thread.
+    #[inline]
+    pub fn lock(&self) {
+        self.lock_internal(|| {
+            self.mutex.lock();
+            true
+        });
+    }
+
+    /// Attempts to acquire this mutex without blocking. Returns `true`
+    /// if the lock was successfully acquired and `false` otherwise.
+    #[inline]
+    pub fn try_lock(&self) -> bool {
+        self.lock_internal(|| self.mutex.try_lock())
+    }
+
+    /// Unlocks this mutex. The inner mutex may not be unlocked if
+    /// this mutex was acquired previously in the current thread.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if the mutex is held by the current thread.
+    #[inline]
+    pub unsafe fn unlock(&self) {
+        let lock_count = self.lock_count.get() - 1;
+        self.lock_count.set(lock_count);
+        if lock_count == 0 {
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.unlock();
+        }
+    }
+
+    /// Checks whether the mutex is currently locked.
+    #[inline]
+    pub fn is_locked(&self) -> bool {
+        self.mutex.is_locked()
+    }
+
+    /// Checks whether the mutex is currently held by the current thread.
+    #[inline]
+    pub fn is_owned_by_current_thread(&self) -> bool {
+        let id = self.get_thread_id.nonzero_thread_id().get();
+        self.owner.load(Ordering::Relaxed) == id
+    }
+}
+
+impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
+    /// Unlocks this mutex using a fair unlock protocol. The inner mutex
+    /// may not be unlocked if this mutex was acquired previously in the
+    /// current thread.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if the mutex is held by the current thread.
+    #[inline]
+    pub unsafe fn unlock_fair(&self) {
+        let lock_count = self.lock_count.get() - 1;
+        self.lock_count.set(lock_count);
+        if lock_count == 0 {
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.unlock_fair();
+        }
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if the mutex is held by the current thread.
+    #[inline]
+    pub unsafe fn bump(&self) {
+        if self.lock_count.get() == 1 {
+            let id = self.owner.load(Ordering::Relaxed);
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.bump();
+            self.owner.store(id, Ordering::Relaxed);
+        }
+    }
+}
+
+impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
+        self.lock_internal(|| self.mutex.try_lock_until(timeout))
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
+        self.lock_internal(|| self.mutex.try_lock_for(timeout))
+    }
+}
+
+/// A mutex which can be recursively locked by a single thread.
+///
+/// This type is identical to `Mutex` except for the following points:
+///
+/// - Locking multiple times from the same thread will work correctly instead of
+///   deadlocking.
+/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
+///   Use a `RefCell` if you need this.
+///
+/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
+/// primitive.
+pub struct ReentrantMutex<R, G, T: ?Sized> {
+    raw: RawReentrantMutex<R, G>,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
+    for ReentrantMutex<R, G, T>
+{
+}
+unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
+    for ReentrantMutex<R, G, T>
+{
+}
+
+impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
+    /// Creates a new reentrant mutex in an unlocked state ready for use.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: R::INIT,
+                get_thread_id: G::INIT,
+            },
+        }
+    }
+
+    /// Creates a new reentrant mutex in an unlocked state ready for use.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: R::INIT,
+                get_thread_id: G::INIT,
+            },
+        }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    #[inline]
+    pub fn into_inner(self) -> T {
+        self.data.into_inner()
+    }
+}
+
+impl<R, G, T> ReentrantMutex<R, G, T> {
+    /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
+    /// helper to get the thread ID.
+    ///
+    /// This allows creating a reentrant mutex in a constant context on stable
+    /// Rust.
+    #[inline]
+    pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: raw_mutex,
+                get_thread_id,
+            },
+        }
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// # Safety
+    ///
+    /// The lock must be held when calling this method.
+    #[inline]
+    unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
+        ReentrantMutexGuard {
+            remutex: &self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Acquires a reentrant mutex, blocking the current thread until it is able
+    /// to do so.
+    ///
+    /// If the mutex is held by another thread then this function will block the
+    /// local thread until it is available to acquire the mutex. If the mutex is
+    /// already held by the current thread then this function will increment the
+    /// lock reference count and return immediately. Upon returning,
+    /// the thread is the only thread with the mutex held. An RAII guard is
+    /// returned to allow scoped unlock of the lock. When the guard goes out of
+    /// scope, the mutex will be unlocked.
+    #[inline]
+    pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
+        self.raw.lock();
+        // SAFETY: The lock is held, as required.
+        unsafe { self.guard() }
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+    /// guard is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
+        if self.raw.try_lock() {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Checks whether the mutex is currently locked.
+    #[inline]
+    pub fn is_locked(&self) -> bool {
+        self.raw.is_locked()
+    }
+
+    /// Checks whether the mutex is currently held by the current thread.
+    #[inline]
+    pub fn is_owned_by_current_thread(&self) -> bool {
+        self.raw.is_owned_by_current_thread()
+    }
+
+    /// Forcibly unlocks the mutex.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock(&self) {
+        self.raw.unlock();
+    }
+
+    /// Returns the underlying raw mutex object.
+    ///
+    /// Note that you will most likely need to import the `RawMutex` trait from
+    /// `lock_api` to be able to call functions on the raw mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a `ReentrantMutexGuard`.
+    #[inline]
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw.mutex
+    }
+
+    /// Returns a raw pointer to the underlying data.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example
+    /// when dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// You must ensure that there are no data races when dereferencing the
+    /// returned pointer, for example if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has been discarded using
+    /// `mem::forget`.
+    #[inline]
+    pub fn data_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// Forcibly unlocks the mutex using a fair unlock protocol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock_fair(&self) {
+        self.raw.unlock_fair();
+    }
+}
+
+impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
+        if self.raw.try_lock_for(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
+        if self.raw.try_lock_until(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
+    #[inline]
+    fn default() -> ReentrantMutex<R, G, T> {
+        ReentrantMutex::new(Default::default())
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
+    #[inline]
+    fn from(t: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex::new(t)
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f
+                .debug_struct("ReentrantMutex")
+                .field("data", &&*guard)
+                .finish(),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("ReentrantMutex")
+                    .field("data", &LockedPlaceholder)
+                    .finish()
+            }
+        }
+    }
+}
+
+// Copied and modified from serde
+#[cfg(feature = "serde")]
+impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
+where
+    R: RawMutex,
+    G: GetThreadId,
+    T: Serialize + ?Sized,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        self.lock().serialize(serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
+where
+    R: RawMutex,
+    G: GetThreadId,
+    T: Deserialize<'de> + ?Sized,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
+    }
+}
+
+/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
+/// is dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` implementation.
+#[must_use = "if unused the ReentrantMutex will immediately unlock"]
+pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
+    remutex: &'a ReentrantMutex<R, G, T>,
+    marker: PhantomData<(&'a T, GuardNoSend)>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
+    /// Returns a reference to the original `ReentrantMutex` object.
+    pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
+        s.remutex
+    }
+
+    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = &s.remutex.raw;
+        let data = f(unsafe { &*s.remutex.data.get() });
+        mem::forget(s);
+        MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(
+        s: Self,
+        f: F,
+    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = &s.remutex.raw;
+        let data = match f(unsafe { &mut *s.remutex.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: A ReentrantMutexGuard always holds the lock.
+        unsafe {
+            s.remutex.raw.unlock();
+        }
+        defer!(s.remutex.raw.lock());
+        f()
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    ReentrantMutexGuard<'a, R, G, T>
+{
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: A ReentrantMutexGuard always holds the lock
+        unsafe {
+            s.remutex.raw.unlock_fair();
+        }
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// The mutex is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: A ReentrantMutexGuard always holds the lock
+        unsafe {
+            s.remutex.raw.unlock_fair();
+        }
+        defer!(s.remutex.raw.lock());
+        f()
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        // Safety: A ReentrantMutexGuard always holds the lock
+        unsafe {
+            s.remutex.raw.bump();
+        }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.remutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: A ReentrantMutexGuard always holds the lock.
+        unsafe {
+            self.remutex.raw.unlock();
+        }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+}
+
+/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use = "if unused the ReentrantMutex will immediately unlock"]
+pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
+    raw: &'a RawReentrantMutex<R, G>,
+    data: *const T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    MappedReentrantMutexGuard<'a, R, G, T>
+{
+    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &*s.data });
+        mem::forget(s);
+        MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(
+        s: Self,
+        f: F,
+    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &*s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    MappedReentrantMutexGuard<'a, R, G, T>
+{
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: A MappedReentrantMutexGuard always holds the lock
+        unsafe {
+            s.raw.unlock_fair();
+        }
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: A MappedReentrantMutexGuard always holds the lock.
+        unsafe {
+            self.raw.unlock();
+        }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+}
+
+
\ No newline at end of file diff --git a/src/lock_api/rwlock.rs.html b/src/lock_api/rwlock.rs.html new file mode 100644 index 00000000..6dca05bd --- /dev/null +++ b/src/lock_api/rwlock.rs.html @@ -0,0 +1,3557 @@ +rwlock.rs - source + +
   1
+   2
+   3
+   4
+   5
+   6
+   7
+   8
+   9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  24
+  25
+  26
+  27
+  28
+  29
+  30
+  31
+  32
+  33
+  34
+  35
+  36
+  37
+  38
+  39
+  40
+  41
+  42
+  43
+  44
+  45
+  46
+  47
+  48
+  49
+  50
+  51
+  52
+  53
+  54
+  55
+  56
+  57
+  58
+  59
+  60
+  61
+  62
+  63
+  64
+  65
+  66
+  67
+  68
+  69
+  70
+  71
+  72
+  73
+  74
+  75
+  76
+  77
+  78
+  79
+  80
+  81
+  82
+  83
+  84
+  85
+  86
+  87
+  88
+  89
+  90
+  91
+  92
+  93
+  94
+  95
+  96
+  97
+  98
+  99
+ 100
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+ 111
+ 112
+ 113
+ 114
+ 115
+ 116
+ 117
+ 118
+ 119
+ 120
+ 121
+ 122
+ 123
+ 124
+ 125
+ 126
+ 127
+ 128
+ 129
+ 130
+ 131
+ 132
+ 133
+ 134
+ 135
+ 136
+ 137
+ 138
+ 139
+ 140
+ 141
+ 142
+ 143
+ 144
+ 145
+ 146
+ 147
+ 148
+ 149
+ 150
+ 151
+ 152
+ 153
+ 154
+ 155
+ 156
+ 157
+ 158
+ 159
+ 160
+ 161
+ 162
+ 163
+ 164
+ 165
+ 166
+ 167
+ 168
+ 169
+ 170
+ 171
+ 172
+ 173
+ 174
+ 175
+ 176
+ 177
+ 178
+ 179
+ 180
+ 181
+ 182
+ 183
+ 184
+ 185
+ 186
+ 187
+ 188
+ 189
+ 190
+ 191
+ 192
+ 193
+ 194
+ 195
+ 196
+ 197
+ 198
+ 199
+ 200
+ 201
+ 202
+ 203
+ 204
+ 205
+ 206
+ 207
+ 208
+ 209
+ 210
+ 211
+ 212
+ 213
+ 214
+ 215
+ 216
+ 217
+ 218
+ 219
+ 220
+ 221
+ 222
+ 223
+ 224
+ 225
+ 226
+ 227
+ 228
+ 229
+ 230
+ 231
+ 232
+ 233
+ 234
+ 235
+ 236
+ 237
+ 238
+ 239
+ 240
+ 241
+ 242
+ 243
+ 244
+ 245
+ 246
+ 247
+ 248
+ 249
+ 250
+ 251
+ 252
+ 253
+ 254
+ 255
+ 256
+ 257
+ 258
+ 259
+ 260
+ 261
+ 262
+ 263
+ 264
+ 265
+ 266
+ 267
+ 268
+ 269
+ 270
+ 271
+ 272
+ 273
+ 274
+ 275
+ 276
+ 277
+ 278
+ 279
+ 280
+ 281
+ 282
+ 283
+ 284
+ 285
+ 286
+ 287
+ 288
+ 289
+ 290
+ 291
+ 292
+ 293
+ 294
+ 295
+ 296
+ 297
+ 298
+ 299
+ 300
+ 301
+ 302
+ 303
+ 304
+ 305
+ 306
+ 307
+ 308
+ 309
+ 310
+ 311
+ 312
+ 313
+ 314
+ 315
+ 316
+ 317
+ 318
+ 319
+ 320
+ 321
+ 322
+ 323
+ 324
+ 325
+ 326
+ 327
+ 328
+ 329
+ 330
+ 331
+ 332
+ 333
+ 334
+ 335
+ 336
+ 337
+ 338
+ 339
+ 340
+ 341
+ 342
+ 343
+ 344
+ 345
+ 346
+ 347
+ 348
+ 349
+ 350
+ 351
+ 352
+ 353
+ 354
+ 355
+ 356
+ 357
+ 358
+ 359
+ 360
+ 361
+ 362
+ 363
+ 364
+ 365
+ 366
+ 367
+ 368
+ 369
+ 370
+ 371
+ 372
+ 373
+ 374
+ 375
+ 376
+ 377
+ 378
+ 379
+ 380
+ 381
+ 382
+ 383
+ 384
+ 385
+ 386
+ 387
+ 388
+ 389
+ 390
+ 391
+ 392
+ 393
+ 394
+ 395
+ 396
+ 397
+ 398
+ 399
+ 400
+ 401
+ 402
+ 403
+ 404
+ 405
+ 406
+ 407
+ 408
+ 409
+ 410
+ 411
+ 412
+ 413
+ 414
+ 415
+ 416
+ 417
+ 418
+ 419
+ 420
+ 421
+ 422
+ 423
+ 424
+ 425
+ 426
+ 427
+ 428
+ 429
+ 430
+ 431
+ 432
+ 433
+ 434
+ 435
+ 436
+ 437
+ 438
+ 439
+ 440
+ 441
+ 442
+ 443
+ 444
+ 445
+ 446
+ 447
+ 448
+ 449
+ 450
+ 451
+ 452
+ 453
+ 454
+ 455
+ 456
+ 457
+ 458
+ 459
+ 460
+ 461
+ 462
+ 463
+ 464
+ 465
+ 466
+ 467
+ 468
+ 469
+ 470
+ 471
+ 472
+ 473
+ 474
+ 475
+ 476
+ 477
+ 478
+ 479
+ 480
+ 481
+ 482
+ 483
+ 484
+ 485
+ 486
+ 487
+ 488
+ 489
+ 490
+ 491
+ 492
+ 493
+ 494
+ 495
+ 496
+ 497
+ 498
+ 499
+ 500
+ 501
+ 502
+ 503
+ 504
+ 505
+ 506
+ 507
+ 508
+ 509
+ 510
+ 511
+ 512
+ 513
+ 514
+ 515
+ 516
+ 517
+ 518
+ 519
+ 520
+ 521
+ 522
+ 523
+ 524
+ 525
+ 526
+ 527
+ 528
+ 529
+ 530
+ 531
+ 532
+ 533
+ 534
+ 535
+ 536
+ 537
+ 538
+ 539
+ 540
+ 541
+ 542
+ 543
+ 544
+ 545
+ 546
+ 547
+ 548
+ 549
+ 550
+ 551
+ 552
+ 553
+ 554
+ 555
+ 556
+ 557
+ 558
+ 559
+ 560
+ 561
+ 562
+ 563
+ 564
+ 565
+ 566
+ 567
+ 568
+ 569
+ 570
+ 571
+ 572
+ 573
+ 574
+ 575
+ 576
+ 577
+ 578
+ 579
+ 580
+ 581
+ 582
+ 583
+ 584
+ 585
+ 586
+ 587
+ 588
+ 589
+ 590
+ 591
+ 592
+ 593
+ 594
+ 595
+ 596
+ 597
+ 598
+ 599
+ 600
+ 601
+ 602
+ 603
+ 604
+ 605
+ 606
+ 607
+ 608
+ 609
+ 610
+ 611
+ 612
+ 613
+ 614
+ 615
+ 616
+ 617
+ 618
+ 619
+ 620
+ 621
+ 622
+ 623
+ 624
+ 625
+ 626
+ 627
+ 628
+ 629
+ 630
+ 631
+ 632
+ 633
+ 634
+ 635
+ 636
+ 637
+ 638
+ 639
+ 640
+ 641
+ 642
+ 643
+ 644
+ 645
+ 646
+ 647
+ 648
+ 649
+ 650
+ 651
+ 652
+ 653
+ 654
+ 655
+ 656
+ 657
+ 658
+ 659
+ 660
+ 661
+ 662
+ 663
+ 664
+ 665
+ 666
+ 667
+ 668
+ 669
+ 670
+ 671
+ 672
+ 673
+ 674
+ 675
+ 676
+ 677
+ 678
+ 679
+ 680
+ 681
+ 682
+ 683
+ 684
+ 685
+ 686
+ 687
+ 688
+ 689
+ 690
+ 691
+ 692
+ 693
+ 694
+ 695
+ 696
+ 697
+ 698
+ 699
+ 700
+ 701
+ 702
+ 703
+ 704
+ 705
+ 706
+ 707
+ 708
+ 709
+ 710
+ 711
+ 712
+ 713
+ 714
+ 715
+ 716
+ 717
+ 718
+ 719
+ 720
+ 721
+ 722
+ 723
+ 724
+ 725
+ 726
+ 727
+ 728
+ 729
+ 730
+ 731
+ 732
+ 733
+ 734
+ 735
+ 736
+ 737
+ 738
+ 739
+ 740
+ 741
+ 742
+ 743
+ 744
+ 745
+ 746
+ 747
+ 748
+ 749
+ 750
+ 751
+ 752
+ 753
+ 754
+ 755
+ 756
+ 757
+ 758
+ 759
+ 760
+ 761
+ 762
+ 763
+ 764
+ 765
+ 766
+ 767
+ 768
+ 769
+ 770
+ 771
+ 772
+ 773
+ 774
+ 775
+ 776
+ 777
+ 778
+ 779
+ 780
+ 781
+ 782
+ 783
+ 784
+ 785
+ 786
+ 787
+ 788
+ 789
+ 790
+ 791
+ 792
+ 793
+ 794
+ 795
+ 796
+ 797
+ 798
+ 799
+ 800
+ 801
+ 802
+ 803
+ 804
+ 805
+ 806
+ 807
+ 808
+ 809
+ 810
+ 811
+ 812
+ 813
+ 814
+ 815
+ 816
+ 817
+ 818
+ 819
+ 820
+ 821
+ 822
+ 823
+ 824
+ 825
+ 826
+ 827
+ 828
+ 829
+ 830
+ 831
+ 832
+ 833
+ 834
+ 835
+ 836
+ 837
+ 838
+ 839
+ 840
+ 841
+ 842
+ 843
+ 844
+ 845
+ 846
+ 847
+ 848
+ 849
+ 850
+ 851
+ 852
+ 853
+ 854
+ 855
+ 856
+ 857
+ 858
+ 859
+ 860
+ 861
+ 862
+ 863
+ 864
+ 865
+ 866
+ 867
+ 868
+ 869
+ 870
+ 871
+ 872
+ 873
+ 874
+ 875
+ 876
+ 877
+ 878
+ 879
+ 880
+ 881
+ 882
+ 883
+ 884
+ 885
+ 886
+ 887
+ 888
+ 889
+ 890
+ 891
+ 892
+ 893
+ 894
+ 895
+ 896
+ 897
+ 898
+ 899
+ 900
+ 901
+ 902
+ 903
+ 904
+ 905
+ 906
+ 907
+ 908
+ 909
+ 910
+ 911
+ 912
+ 913
+ 914
+ 915
+ 916
+ 917
+ 918
+ 919
+ 920
+ 921
+ 922
+ 923
+ 924
+ 925
+ 926
+ 927
+ 928
+ 929
+ 930
+ 931
+ 932
+ 933
+ 934
+ 935
+ 936
+ 937
+ 938
+ 939
+ 940
+ 941
+ 942
+ 943
+ 944
+ 945
+ 946
+ 947
+ 948
+ 949
+ 950
+ 951
+ 952
+ 953
+ 954
+ 955
+ 956
+ 957
+ 958
+ 959
+ 960
+ 961
+ 962
+ 963
+ 964
+ 965
+ 966
+ 967
+ 968
+ 969
+ 970
+ 971
+ 972
+ 973
+ 974
+ 975
+ 976
+ 977
+ 978
+ 979
+ 980
+ 981
+ 982
+ 983
+ 984
+ 985
+ 986
+ 987
+ 988
+ 989
+ 990
+ 991
+ 992
+ 993
+ 994
+ 995
+ 996
+ 997
+ 998
+ 999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
+1483
+1484
+1485
+1486
+1487
+1488
+1489
+1490
+1491
+1492
+1493
+1494
+1495
+1496
+1497
+1498
+1499
+1500
+1501
+1502
+1503
+1504
+1505
+1506
+1507
+1508
+1509
+1510
+1511
+1512
+1513
+1514
+1515
+1516
+1517
+1518
+1519
+1520
+1521
+1522
+1523
+1524
+1525
+1526
+1527
+1528
+1529
+1530
+1531
+1532
+1533
+1534
+1535
+1536
+1537
+1538
+1539
+1540
+1541
+1542
+1543
+1544
+1545
+1546
+1547
+1548
+1549
+1550
+1551
+1552
+1553
+1554
+1555
+1556
+1557
+1558
+1559
+1560
+1561
+1562
+1563
+1564
+1565
+1566
+1567
+1568
+1569
+1570
+1571
+1572
+1573
+1574
+1575
+1576
+1577
+1578
+1579
+1580
+1581
+1582
+1583
+1584
+1585
+1586
+1587
+1588
+1589
+1590
+1591
+1592
+1593
+1594
+1595
+1596
+1597
+1598
+1599
+1600
+1601
+1602
+1603
+1604
+1605
+1606
+1607
+1608
+1609
+1610
+1611
+1612
+1613
+1614
+1615
+1616
+1617
+1618
+1619
+1620
+1621
+1622
+1623
+1624
+1625
+1626
+1627
+1628
+1629
+1630
+1631
+1632
+1633
+1634
+1635
+1636
+1637
+1638
+1639
+1640
+1641
+1642
+1643
+1644
+1645
+1646
+1647
+1648
+1649
+1650
+1651
+1652
+1653
+1654
+1655
+1656
+1657
+1658
+1659
+1660
+1661
+1662
+1663
+1664
+1665
+1666
+1667
+1668
+1669
+1670
+1671
+1672
+1673
+1674
+1675
+1676
+1677
+1678
+1679
+1680
+1681
+1682
+1683
+1684
+1685
+1686
+1687
+1688
+1689
+1690
+1691
+1692
+1693
+1694
+1695
+1696
+1697
+1698
+1699
+1700
+1701
+1702
+1703
+1704
+1705
+1706
+1707
+1708
+1709
+1710
+1711
+1712
+1713
+1714
+1715
+1716
+1717
+1718
+1719
+1720
+1721
+1722
+1723
+1724
+1725
+1726
+1727
+1728
+1729
+1730
+1731
+1732
+1733
+1734
+1735
+1736
+1737
+1738
+1739
+1740
+1741
+1742
+1743
+1744
+1745
+1746
+1747
+1748
+1749
+1750
+1751
+1752
+1753
+1754
+1755
+1756
+1757
+1758
+1759
+1760
+1761
+1762
+1763
+1764
+1765
+1766
+1767
+1768
+1769
+1770
+1771
+1772
+1773
+1774
+1775
+1776
+
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+/// Basic operations for a reader-writer lock.
+///
+/// Types implementing this trait can be used by `RwLock` to form a safe and
+/// fully-functioning `RwLock` type.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that the `RwLock` is actually
+/// exclusive: an exclusive lock can't be acquired while an exclusive or shared
+/// lock exists, and a shared lock can't be acquire while an exclusive lock
+/// exists.
+pub unsafe trait RawRwLock {
+    /// Initial value for an unlocked `RwLock`.
+    // A “non-constant” const item is a legacy way to supply an initialized value to downstream
+    // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
+    #[allow(clippy::declare_interior_mutable_const)]
+    const INIT: Self;
+
+    /// Marker type which determines whether a lock guard should be `Send`. Use
+    /// one of the `GuardSend` or `GuardNoSend` helper types here.
+    type GuardMarker;
+
+    /// Acquires a shared lock, blocking the current thread until it is able to do so.
+    fn lock_shared(&self);
+
+    /// Attempts to acquire a shared lock without blocking.
+    fn try_lock_shared(&self) -> bool;
+
+    /// Releases a shared lock.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if a shared lock is held in the current context.
+    unsafe fn unlock_shared(&self);
+
+    /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
+    fn lock_exclusive(&self);
+
+    /// Attempts to acquire an exclusive lock without blocking.
+    fn try_lock_exclusive(&self) -> bool;
+
+    /// Releases an exclusive lock.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an exclusive lock is held in the current context.
+    unsafe fn unlock_exclusive(&self);
+
+    /// Checks if this `RwLock` is currently locked in any way.
+    #[inline]
+    fn is_locked(&self) -> bool {
+        let acquired_lock = self.try_lock_exclusive();
+        if acquired_lock {
+            // Safety: A lock was successfully acquired above.
+            unsafe {
+                self.unlock_exclusive();
+            }
+        }
+        !acquired_lock
+    }
+}
+
+/// Additional methods for RwLocks which support fair unlocking.
+///
+/// Fair unlocking means that a lock is handed directly over to the next waiting
+/// thread if there is one, without giving other threads the opportunity to
+/// "steal" the lock in the meantime. This is typically slower than unfair
+/// unlocking, but may be necessary in certain circumstances.
+pub unsafe trait RawRwLockFair: RawRwLock {
+    /// Releases a shared lock using a fair unlock protocol.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if a shared lock is held in the current context.
+    unsafe fn unlock_shared_fair(&self);
+
+    /// Releases an exclusive lock using a fair unlock protocol.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an exclusive lock is held in the current context.
+    unsafe fn unlock_exclusive_fair(&self);
+
+    /// Temporarily yields a shared lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_shared_fair` followed
+    /// by `lock_shared`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if a shared lock is held in the current context.
+    unsafe fn bump_shared(&self) {
+        self.unlock_shared_fair();
+        self.lock_shared();
+    }
+
+    /// Temporarily yields an exclusive lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
+    /// by `lock_exclusive`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an exclusive lock is held in the current context.
+    unsafe fn bump_exclusive(&self) {
+        self.unlock_exclusive_fair();
+        self.lock_exclusive();
+    }
+}
+
+/// Additional methods for RwLocks which support atomically downgrading an
+/// exclusive lock to a shared lock.
+pub unsafe trait RawRwLockDowngrade: RawRwLock {
+    /// Atomically downgrades an exclusive lock into a shared lock without
+    /// allowing any thread to take an exclusive lock in the meantime.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an exclusive lock is held in the current context.
+    unsafe fn downgrade(&self);
+}
+
+/// Additional methods for RwLocks which support locking with timeouts.
+///
+/// The `Duration` and `Instant` types are specified as associated types so that
+/// this trait is usable even in `no_std` environments.
+pub unsafe trait RawRwLockTimed: RawRwLock {
+    /// Duration type used for `try_lock_for`.
+    type Duration;
+
+    /// Instant type used for `try_lock_until`.
+    type Instant;
+
+    /// Attempts to acquire a shared lock until a timeout is reached.
+    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire a shared lock until a timeout is reached.
+    fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
+
+    /// Attempts to acquire an exclusive lock until a timeout is reached.
+    fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire an exclusive lock until a timeout is reached.
+    fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// Additional methods for RwLocks which support recursive read locks.
+///
+/// These are guaranteed to succeed without blocking if
+/// another read lock is held at the time of the call. This allows a thread
+/// to recursively lock a `RwLock`. However using this method can cause
+/// writers to starve since readers no longer block if a writer is waiting
+/// for the lock.
+pub unsafe trait RawRwLockRecursive: RawRwLock {
+    /// Acquires a shared lock without deadlocking in case of a recursive lock.
+    fn lock_shared_recursive(&self);
+
+    /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive(&self) -> bool;
+}
+
+/// Additional methods for RwLocks which support recursive read locks and timeouts.
+pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
+    /// Attempts to acquire a shared lock until a timeout is reached, without
+    /// deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire a shared lock until a timeout is reached, without
+    /// deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// Additional methods for RwLocks which support atomically upgrading a shared
+/// lock to an exclusive lock.
+///
+/// This requires acquiring a special "upgradable read lock" instead of a
+/// normal shared lock. There may only be one upgradable lock at any time,
+/// otherwise deadlocks could occur when upgrading.
+pub unsafe trait RawRwLockUpgrade: RawRwLock {
+    /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
+    fn lock_upgradable(&self);
+
+    /// Attempts to acquire an upgradable lock without blocking.
+    fn try_lock_upgradable(&self) -> bool;
+
+    /// Releases an upgradable lock.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn unlock_upgradable(&self);
+
+    /// Upgrades an upgradable lock to an exclusive lock.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn upgrade(&self);
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock without
+    /// blocking.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn try_upgrade(&self) -> bool;
+}
+
+/// Additional methods for RwLocks which support upgradable locks and fair
+/// unlocking.
+pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
+    /// Releases an upgradable lock using a fair unlock protocol.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn unlock_upgradable_fair(&self);
+
+    /// Temporarily yields an upgradable lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
+    /// by `lock_upgradable`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn bump_upgradable(&self) {
+        self.unlock_upgradable_fair();
+        self.lock_upgradable();
+    }
+}
+
+/// Additional methods for RwLocks which support upgradable locks and lock
+/// downgrading.
+pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
+    /// Downgrades an upgradable lock to a shared lock.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn downgrade_upgradable(&self);
+
+    /// Downgrades an exclusive lock to an upgradable lock.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an exclusive lock is held in the current context.
+    unsafe fn downgrade_to_upgradable(&self);
+}
+
+/// Additional methods for RwLocks which support upgradable locks and locking
+/// with timeouts.
+pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
+    /// Attempts to acquire an upgradable lock until a timeout is reached.
+    fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire an upgradable lock until a timeout is reached.
+    fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
+    /// timeout is reached.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
+    /// timeout is reached.
+    ///
+    /// # Safety
+    ///
+    /// This method may only be called if an upgradable lock is held in the current context.
+    unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// A reader-writer lock
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
+/// allow concurrent access through readers. The RAII guards returned from the
+/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
+/// to allow access to the contained of the lock.
+pub struct RwLock<R, T: ?Sized> {
+    raw: R,
+    data: UnsafeCell<T>,
+}
+
+// Copied and modified from serde
+#[cfg(feature = "serde")]
+impl<R, T> Serialize for RwLock<R, T>
+where
+    R: RawRwLock,
+    T: Serialize + ?Sized,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        self.read().serialize(serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
+where
+    R: RawRwLock,
+    T: Deserialize<'de> + ?Sized,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        Deserialize::deserialize(deserializer).map(RwLock::new)
+    }
+}
+
+unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
+unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
+
+impl<R: RawRwLock, T> RwLock<R, T> {
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Consumes this `RwLock`, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R, T> RwLock<R, T> {
+    /// Creates a new new instance of an `RwLock<T>` based on a pre-existing
+    /// `RawRwLock<T>`.
+    ///
+    /// This allows creating a `RwLock<T>` in a constant context on stable
+    /// Rust.
+    #[inline]
+    pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: raw_rwlock,
+        }
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
+    /// # Safety
+    ///
+    /// The lock must be held when calling this method.
+    #[inline]
+    unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
+        RwLockReadGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// # Safety
+    ///
+    /// The lock must be held when calling this method.
+    #[inline]
+    unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
+        RwLockWriteGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns.
+    ///
+    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
+    /// the current thread already holds one may result in a deadlock.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
+        self.raw.lock_shared();
+        // SAFETY: The lock is held, as required.
+        unsafe { self.read_guard() }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
+        if self.raw.try_lock_shared() {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.read_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Locks this `RwLock` with exclusive write access, blocking the current
+    /// thread until it can be acquired.
+    ///
+    /// This function will not return while other writers or other readers
+    /// currently have access to the lock.
+    ///
+    /// Returns an RAII guard which will drop the write access of this `RwLock`
+    /// when dropped.
+    #[inline]
+    pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
+        self.raw.lock_exclusive();
+        // SAFETY: The lock is held, as required.
+        unsafe { self.write_guard() }
+    }
+
+    /// Attempts to lock this `RwLock` with exclusive write access.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the lock when
+    /// it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
+        if self.raw.try_lock_exclusive() {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.write_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Checks whether this `RwLock` is currently locked in any way.
+    #[inline]
+    pub fn is_locked(&self) -> bool {
+        self.raw.is_locked()
+    }
+
+    /// Forcibly unlocks a read lock.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
+    #[inline]
+    pub unsafe fn force_unlock_read(&self) {
+        self.raw.unlock_shared();
+    }
+
+    /// Forcibly unlocks a write lock.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
+    #[inline]
+    pub unsafe fn force_unlock_write(&self) {
+        self.raw.unlock_exclusive();
+    }
+
+    /// Returns the underlying raw reader-writer lock object.
+    ///
+    /// Note that you will most likely need to import the `RawRwLock` trait from
+    /// `lock_api` to be able to call functions on the raw
+    /// reader-writer lock.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a lock guard.
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw
+    }
+
+    /// Returns a raw pointer to the underlying data.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object
+    /// alive, for example when dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// You must ensure that there are no data races when dereferencing the
+    /// returned pointer, for example if the current thread logically owns a
+    /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded
+    /// using `mem::forget`.
+    #[inline]
+    pub fn data_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
+    /// Forcibly unlocks a read lock using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
+    #[inline]
+    pub unsafe fn force_unlock_read_fair(&self) {
+        self.raw.unlock_shared_fair();
+    }
+
+    /// Forcibly unlocks a write lock using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
+    #[inline]
+    pub unsafe fn force_unlock_write_fair(&self) {
+        self.raw.unlock_exclusive_fair();
+    }
+}
+
+impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
+        if self.raw.try_lock_shared_for(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.read_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
+        if self.raw.try_lock_shared_until(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.read_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with exclusive write access until a
+    /// timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the exclusive access when it is dropped.
+    #[inline]
+    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
+        if self.raw.try_lock_exclusive_for(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.write_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with exclusive write access until a
+    /// timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the exclusive access when it is dropped.
+    #[inline]
+    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
+        if self.raw.try_lock_exclusive_until(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.write_guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns.
+    ///
+    /// Unlike `read`, this method is guaranteed to succeed without blocking if
+    /// another read lock is held at the time of the call. This allows a thread
+    /// to recursively lock a `RwLock`. However using this method can cause
+    /// writers to starve since readers no longer block if a writer is waiting
+    /// for the lock.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
+        self.raw.lock_shared_recursive();
+        // SAFETY: The lock is held, as required.
+        unsafe { self.read_guard() }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This method is guaranteed to succeed if another read lock is held at the
+    /// time of the call. See the documentation for `read_recursive` for details.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
+        if self.raw.try_lock_shared_recursive() {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.read_guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    ///
+    /// This method is guaranteed to succeed without blocking if another read
+    /// lock is held at the time of the call. See the documentation for
+    /// `read_recursive` for details.
+    #[inline]
+    pub fn try_read_recursive_for(
+        &self,
+        timeout: R::Duration,
+    ) -> Option<RwLockReadGuard<'_, R, T>> {
+        if self.raw.try_lock_shared_recursive_for(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.read_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_recursive_until(
+        &self,
+        timeout: R::Instant,
+    ) -> Option<RwLockReadGuard<'_, R, T>> {
+        if self.raw.try_lock_shared_recursive_until(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.read_guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
+    /// # Safety
+    ///
+    /// The lock must be held when calling this method.
+    #[inline]
+    unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
+        RwLockUpgradableReadGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Locks this `RwLock` with upgradable read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers or other
+    /// upgradable reads which hold the lock. There may be other readers currently
+    /// inside the lock when this method returns.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
+        self.raw.lock_upgradable();
+        // SAFETY: The lock is held, as required.
+        unsafe { self.upgradable_guard() }
+    }
+
+    /// Attempts to acquire this `RwLock` with upgradable read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
+        if self.raw.try_lock_upgradable() {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.upgradable_guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_for(
+        &self,
+        timeout: R::Duration,
+    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
+        if self.raw.try_lock_upgradable_for(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.upgradable_guard() })
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_until(
+        &self,
+        timeout: R::Instant,
+    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
+        if self.raw.try_lock_upgradable_until(timeout) {
+            // SAFETY: The lock is held, as required.
+            Some(unsafe { self.upgradable_guard() })
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
+    #[inline]
+    fn default() -> RwLock<R, T> {
+        RwLock::new(Default::default())
+    }
+}
+
+impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
+    #[inline]
+    fn from(t: T) -> RwLock<R, T> {
+        RwLock::new(t)
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self.try_read() {
+            Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("RwLock")
+                    .field("data", &LockedPlaceholder)
+                    .finish()
+            }
+        }
+    }
+}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a T, R::GuardMarker)>,
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = &s.rwlock.raw;
+        let data = f(unsafe { &*s.rwlock.data.get() });
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `RwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+    {
+        let raw = &s.rwlock.raw;
+        let data = match f(unsafe { &*s.rwlock.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: An RwLockReadGuard always holds a shared lock.
+        unsafe {
+            s.rwlock.raw.unlock_shared();
+        }
+        defer!(s.rwlock.raw.lock_shared());
+        f()
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: An RwLockReadGuard always holds a shared lock.
+        unsafe {
+            s.rwlock.raw.unlock_shared_fair();
+        }
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: An RwLockReadGuard always holds a shared lock.
+        unsafe {
+            s.rwlock.raw.unlock_shared_fair();
+        }
+        defer!(s.rwlock.raw.lock_shared());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `read`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        // Safety: An RwLockReadGuard always holds a shared lock.
+        unsafe {
+            s.rwlock.raw.bump_shared();
+        }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: An RwLockReadGuard always holds a shared lock.
+        unsafe {
+            self.rwlock.raw.unlock_shared();
+        }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for RwLockReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = &s.rwlock.raw;
+        let data = f(unsafe { &mut *s.rwlock.data.get() });
+        mem::forget(s);
+        MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `RwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = &s.rwlock.raw;
+        let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: An RwLockReadGuard always holds a shared lock.
+        unsafe {
+            s.rwlock.raw.unlock_exclusive();
+        }
+        defer!(s.rwlock.raw.lock_exclusive());
+        f()
+    }
+}
+
+impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into a read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
+        // Safety: An RwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            s.rwlock.raw.downgrade();
+        }
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
+        // Safety: An RwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            s.rwlock.raw.downgrade_to_upgradable();
+        }
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockUpgradableReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockWriteGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: An RwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            s.rwlock.raw.unlock_exclusive_fair();
+        }
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: An RwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            s.rwlock.raw.unlock_exclusive_fair();
+        }
+        defer!(s.rwlock.raw.lock_exclusive());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `write`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        // Safety: An RwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            s.rwlock.raw.bump_exclusive();
+        }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: An RwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            self.rwlock.raw.unlock_exclusive();
+        }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for RwLockWriteGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
+
+/// RAII structure used to release the upgradable read access of a lock when
+/// dropped.
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
+    for RwLockUpgradableReadGuard<'a, R, T>
+{
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            s.rwlock.raw.unlock_upgradable();
+        }
+        defer!(s.rwlock.raw.lock_upgradable());
+        f()
+    }
+
+    /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
+    /// blocking the current thread until it can be acquired.
+    pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            s.rwlock.raw.upgrade();
+        }
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockWriteGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
+    ///
+    /// If the access could not be granted at this time, then the current guard is returned.
+    pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        if unsafe { s.rwlock.raw.try_upgrade() } {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            s.rwlock.raw.unlock_upgradable_fair();
+        }
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            s.rwlock.raw.unlock_upgradable_fair();
+        }
+        defer!(s.rwlock.raw.lock_upgradable());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `upgradable_read`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            s.rwlock.raw.bump_upgradable();
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Atomically downgrades an upgradable read lock lock into a shared read lock
+    /// without allowing any writers to take exclusive access of the lock in the
+    /// meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            s.rwlock.raw.downgrade_upgradable();
+        }
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    pub fn try_upgrade_for(
+        s: Self,
+        timeout: R::Duration,
+    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    #[inline]
+    pub fn try_upgrade_until(
+        s: Self,
+        timeout: R::Instant,
+    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+        unsafe {
+            self.rwlock.raw.unlock_upgradable();
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for RwLockUpgradableReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for RwLockUpgradableReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
+    for RwLockUpgradableReadGuard<'a, R, T>
+{
+}
+
+/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
+    raw: &'a R,
+    data: *const T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
+    R::GuardMarker: Send
+{
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
+    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &*s.data });
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&T) -> Option<&U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &*s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: A MappedRwLockReadGuard always holds a shared lock.
+        unsafe {
+            s.raw.unlock_shared_fair();
+        }
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: A MappedRwLockReadGuard always holds a shared lock.
+        unsafe {
+            self.raw.unlock_shared();
+        }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for MappedRwLockReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedRwLockReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
+    for MappedRwLockReadGuard<'a, R, T>
+{
+}
+
+/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
+    raw: &'a R,
+    data: *mut T,
+    marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedRwLockWriteGuard<'a, R, T>
+{
+}
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
+    R::GuardMarker: Send
+{
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &mut *s.data });
+        mem::forget(s);
+        MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
+    /// locked data. The original guard is return if the closure returns `None`.
+    ///
+    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self>
+    where
+        F: FnOnce(&mut T) -> Option<&mut U>,
+    {
+        let raw = s.raw;
+        let data = match f(unsafe { &mut *s.data }) {
+            Some(data) => data,
+            None => return Err(s),
+        };
+        mem::forget(s);
+        Ok(MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        })
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            s.raw.unlock_exclusive_fair();
+        }
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
+        unsafe {
+            self.raw.unlock_exclusive();
+        }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for MappedRwLockWriteGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedRwLockWriteGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
+    for MappedRwLockWriteGuard<'a, R, T>
+{
+}
+
+
\ No newline at end of file diff --git a/src/scopeguard/lib.rs.html b/src/scopeguard/lib.rs.html new file mode 100644 index 00000000..227b3772 --- /dev/null +++ b/src/scopeguard/lib.rs.html @@ -0,0 +1,1161 @@ +lib.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+
+#![cfg_attr(not(any(test, feature = "use_std")), no_std)]
+#![doc(html_root_url = "https://docs.rs/scopeguard/1/")]
+
+//! A scope guard will run a given closure when it goes out of scope,
+//! even if the code between panics.
+//! (as long as panic doesn't abort)
+//!
+//! # Examples
+//!
+//! ## Hello World
+//!
+//! This example creates a scope guard with an example function:
+//!
+//! ```
+//! extern crate scopeguard;
+//!
+//! fn f() {
+//!     let _guard = scopeguard::guard((), |_| {
+//!         println!("Hello Scope Exit!");
+//!     });
+//!
+//!     // rest of the code here.
+//!
+//!     // Here, at the end of `_guard`'s scope, the guard's closure is called.
+//!     // It is also called if we exit this scope through unwinding instead.
+//! }
+//! # fn main() {
+//! #    f();
+//! # }
+//! ```
+//!
+//! ## `defer!`
+//!
+//! Use the `defer` macro to run an operation at scope exit,
+//! either regular scope exit or during unwinding from a panic.
+//!
+//! ```
+//! #[macro_use(defer)] extern crate scopeguard;
+//!
+//! use std::cell::Cell;
+//!
+//! fn main() {
+//!     // use a cell to observe drops during and after the scope guard is active
+//!     let drop_counter = Cell::new(0);
+//!     {
+//!         // Create a scope guard using `defer!` for the current scope
+//!         defer! {
+//!             drop_counter.set(1 + drop_counter.get());
+//!         }
+//!
+//!         // Do regular operations here in the meantime.
+//!
+//!         // Just before scope exit: it hasn't run yet.
+//!         assert_eq!(drop_counter.get(), 0);
+//!
+//!         // The following scope end is where the defer closure is called
+//!     }
+//!     assert_eq!(drop_counter.get(), 1);
+//! }
+//! ```
+//!
+//! ## Scope Guard with Value
+//!
+//! If the scope guard closure needs to access an outer value that is also
+//! mutated outside of the scope guard, then you may want to use the scope guard
+//! with a value. The guard works like a smart pointer, so the inner value can
+//! be accessed by reference or by mutable reference.
+//!
+//! ### 1. The guard owns a file
+//!
+//! In this example, the scope guard owns a file and ensures pending writes are
+//! synced at scope exit.
+//!
+//! ```
+//! extern crate scopeguard;
+//!
+//! use std::fs::*;
+//! use std::io::{self, Write};
+//! # // Mock file so that we don't actually write a file
+//! # struct MockFile;
+//! # impl MockFile {
+//! #     fn create(_s: &str) -> io::Result<Self> { Ok(MockFile) }
+//! #     fn write_all(&self, _b: &[u8]) -> io::Result<()> { Ok(()) }
+//! #     fn sync_all(&self) -> io::Result<()> { Ok(()) }
+//! # }
+//! # use self::MockFile as File;
+//!
+//! fn try_main() -> io::Result<()> {
+//!     let f = File::create("newfile.txt")?;
+//!     let mut file = scopeguard::guard(f, |f| {
+//!         // ensure we flush file at return or panic
+//!         let _ = f.sync_all();
+//!     });
+//!     // Access the file through the scope guard itself
+//!     file.write_all(b"test me\n").map(|_| ())
+//! }
+//!
+//! fn main() {
+//!     try_main().unwrap();
+//! }
+//!
+//! ```
+//!
+//! ### 2. The guard restores an invariant on scope exit
+//!
+//! ```
+//! extern crate scopeguard;
+//!
+//! use std::mem::ManuallyDrop;
+//! use std::ptr;
+//!
+//! // This function, just for this example, takes the first element
+//! // and inserts it into the assumed sorted tail of the vector.
+//! //
+//! // For optimization purposes we temporarily violate an invariant of the
+//! // Vec, that it owns all of its elements.
+//! //
+//! // The safe approach is to use swap, which means two writes to memory,
+//! // the optimization is to use a “hole” which uses only one write of memory
+//! // for each position it moves.
+//! //
+//! // We *must* use a scope guard to run this code safely. We
+//! // are running arbitrary user code (comparison operators) that may panic.
+//! // The scope guard ensures we restore the invariant after successful
+//! // exit or during unwinding from panic.
+//! fn insertion_sort_first<T>(v: &mut Vec<T>)
+//!     where T: PartialOrd
+//! {
+//!     struct Hole<'a, T: 'a> {
+//!         v: &'a mut Vec<T>,
+//!         index: usize,
+//!         value: ManuallyDrop<T>,
+//!     }
+//!
+//!     unsafe {
+//!         // Create a moved-from location in the vector, a “hole”.
+//!         let value = ptr::read(&v[0]);
+//!         let mut hole = Hole { v: v, index: 0, value: ManuallyDrop::new(value) };
+//!
+//!         // Use a scope guard with a value.
+//!         // At scope exit, plug the hole so that the vector is fully
+//!         // initialized again.
+//!         // The scope guard owns the hole, but we can access it through the guard.
+//!         let mut hole_guard = scopeguard::guard(hole, |hole| {
+//!             // plug the hole in the vector with the value that was // taken out
+//!             let index = hole.index;
+//!             ptr::copy_nonoverlapping(&*hole.value, &mut hole.v[index], 1);
+//!         });
+//!
+//!         // run algorithm that moves the hole in the vector here
+//!         // move the hole until it's in a sorted position
+//!         for i in 1..hole_guard.v.len() {
+//!             if *hole_guard.value >= hole_guard.v[i] {
+//!                 // move the element back and the hole forward
+//!                 let index = hole_guard.index;
+//!                 ptr::copy_nonoverlapping(&hole_guard.v[index + 1], &mut hole_guard.v[index], 1);
+//!                 hole_guard.index += 1;
+//!             } else {
+//!                 break;
+//!             }
+//!         }
+//!
+//!         // When the scope exits here, the Vec becomes whole again!
+//!     }
+//! }
+//!
+//! fn main() {
+//!     let string = String::from;
+//!     let mut data = vec![string("c"), string("a"), string("b"), string("d")];
+//!     insertion_sort_first(&mut data);
+//!     assert_eq!(data, vec!["a", "b", "c", "d"]);
+//! }
+//!
+//! ```
+//!
+//!
+//! # Crate Features
+//!
+//! - `use_std`
+//!   + Enabled by default. Enables the `OnUnwind` and `OnSuccess` strategies.
+//!   + Disable to use `no_std`.
+//!
+//! # Rust Version
+//!
+//! This version of the crate requires Rust 1.20 or later.
+//!
+//! The scopeguard 1.x release series will use a carefully considered version
+//! upgrade policy, where in a later 1.x version, we will raise the minimum
+//! required Rust version.
+
+#[cfg(not(any(test, feature = "use_std")))]
+extern crate core as std;
+
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem::{self, ManuallyDrop};
+use std::ops::{Deref, DerefMut};
+use std::ptr;
+
+/// Controls in which cases the associated code should be run
+pub trait Strategy {
+    /// Return `true` if the guard’s associated code should run
+    /// (in the context where this method is called).
+    fn should_run() -> bool;
+}
+
+/// Always run on scope exit.
+///
+/// “Always” run: on regular exit from a scope or on unwinding from a panic.
+/// Can not run on abort, process exit, and other catastrophic events where
+/// destructors don’t run.
+#[derive(Debug)]
+pub enum Always {}
+
+/// Run on scope exit through unwinding.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[derive(Debug)]
+pub enum OnUnwind {}
+
+/// Run on regular scope exit, when not unwinding.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[derive(Debug)]
+pub enum OnSuccess {}
+
+impl Strategy for Always {
+    #[inline(always)]
+    fn should_run() -> bool { true }
+}
+
+#[cfg(feature = "use_std")]
+impl Strategy for OnUnwind {
+    #[inline]
+    fn should_run() -> bool { std::thread::panicking() }
+}
+
+#[cfg(feature = "use_std")]
+impl Strategy for OnSuccess {
+    #[inline]
+    fn should_run() -> bool { !std::thread::panicking() }
+}
+
+/// Macro to create a `ScopeGuard` (always run).
+///
+/// The macro takes statements, which are the body of a closure
+/// that will run when the scope is exited.
+#[macro_export]
+macro_rules! defer {
+    ($($t:tt)*) => {
+        let _guard = $crate::guard((), |()| { $($t)* });
+    };
+}
+
+/// Macro to create a `ScopeGuard` (run on successful scope exit).
+///
+/// The macro takes statements, which are the body of a closure
+/// that will run when the scope is exited.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[macro_export]
+macro_rules! defer_on_success {
+    ($($t:tt)*) => {
+        let _guard = $crate::guard_on_success((), |()| { $($t)* });
+    };
+}
+
+/// Macro to create a `ScopeGuard` (run on unwinding from panic).
+///
+/// The macro takes statements, which are the body of a closure
+/// that will run when the scope is exited.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[macro_export]
+macro_rules! defer_on_unwind {
+    ($($t:tt)*) => {
+        let _guard = $crate::guard_on_unwind((), |()| { $($t)* });
+    };
+}
+
+/// `ScopeGuard` is a scope guard that may own a protected value.
+///
+/// If you place a guard in a local variable, the closure can
+/// run regardless how you leave the scope — through regular return or panic
+/// (except if panic or other code aborts; so as long as destructors run).
+/// It is run only once.
+///
+/// The `S` parameter for [`Strategy`](trait.Strategy.html) determines if
+/// the closure actually runs.
+///
+/// The guard's closure will be called with the held value in the destructor.
+///
+/// The `ScopeGuard` implements `Deref` so that you can access the inner value.
+pub struct ScopeGuard<T, F, S = Always>
+    where F: FnOnce(T),
+          S: Strategy,
+{
+    value: ManuallyDrop<T>,
+    dropfn: ManuallyDrop<F>,
+    // fn(S) -> S is used, so that the S is not taken into account for auto traits.
+    strategy: PhantomData<fn(S) -> S>,
+}
+
+impl<T, F, S> ScopeGuard<T, F, S>
+    where F: FnOnce(T),
+          S: Strategy,
+{
+    /// Create a `ScopeGuard` that owns `v` (accessible through deref) and calls
+    /// `dropfn` when its destructor runs.
+    ///
+    /// The `Strategy` decides whether the scope guard's closure should run.
+    #[inline]
+    pub fn with_strategy(v: T, dropfn: F) -> ScopeGuard<T, F, S> {
+        ScopeGuard {
+            value: ManuallyDrop::new(v),
+            dropfn: ManuallyDrop::new(dropfn),
+            strategy: PhantomData,
+        }
+    }
+
+    /// “Defuse” the guard and extract the value without calling the closure.
+    ///
+    /// ```
+    /// extern crate scopeguard;
+    ///
+    /// use scopeguard::{guard, ScopeGuard};
+    ///
+    /// fn conditional() -> bool { true }
+    ///
+    /// fn main() {
+    ///     let mut guard = guard(Vec::new(), |mut v| v.clear());
+    ///     guard.push(1);
+    ///     
+    ///     if conditional() {
+    ///         // a condition maybe makes us decide to
+    ///         // “defuse” the guard and get back its inner parts
+    ///         let value = ScopeGuard::into_inner(guard);
+    ///     } else {
+    ///         // guard still exists in this branch
+    ///     }
+    /// }
+    /// ```
+    #[inline]
+    pub fn into_inner(guard: Self) -> T {
+        // Cannot move out of Drop-implementing types, so
+        // ptr::read the value and forget the guard.
+        unsafe {
+            let value = ptr::read(&*guard.value);
+            // read the closure so that it is dropped, and assign it to a local
+            // variable to ensure that it is only dropped after the guard has
+            // been forgotten. (In case the Drop impl of the closure, or that
+            // of any consumed captured variable, panics).
+            let _dropfn = ptr::read(&*guard.dropfn);
+            mem::forget(guard);
+            value
+        }
+    }
+}
+
+
+/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
+#[inline]
+pub fn guard<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, Always>
+    where F: FnOnce(T)
+{
+    ScopeGuard::with_strategy(v, dropfn)
+}
+
+/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
+///
+/// Requires crate feature `use_std`.
+#[cfg(feature = "use_std")]
+#[inline]
+pub fn guard_on_success<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnSuccess>
+    where F: FnOnce(T)
+{
+    ScopeGuard::with_strategy(v, dropfn)
+}
+
+/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
+///
+/// Requires crate feature `use_std`.
+///
+/// ## Examples
+///
+/// For performance reasons, or to emulate “only run guard on unwind” in
+/// no-std environments, we can also use the default guard and simply manually
+/// defuse it at the end of scope like the following example. (The performance
+/// reason would be if the [`OnUnwind`]'s call to [std::thread::panicking()] is
+/// an issue.)
+///
+/// ```
+/// extern crate scopeguard;
+///
+/// use scopeguard::ScopeGuard;
+/// # fn main() {
+/// {
+///     let guard = scopeguard::guard((), |_| {});
+///
+///     // rest of the code here
+///
+///     // we reached the end of scope without unwinding - defuse it
+///     ScopeGuard::into_inner(guard);
+/// }
+/// # }
+/// ```
+#[cfg(feature = "use_std")]
+#[inline]
+pub fn guard_on_unwind<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnUnwind>
+    where F: FnOnce(T)
+{
+    ScopeGuard::with_strategy(v, dropfn)
+}
+
+// ScopeGuard can be Sync even if F isn't because the closure is
+// not accessible from references.
+// The guard does not store any instance of S, so it is also irrelevant.
+unsafe impl<T, F, S> Sync for ScopeGuard<T, F, S>
+    where T: Sync,
+          F: FnOnce(T),
+          S: Strategy
+{}
+
+impl<T, F, S> Deref for ScopeGuard<T, F, S>
+    where F: FnOnce(T),
+          S: Strategy
+{
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        &*self.value
+    }
+}
+
+impl<T, F, S> DerefMut for ScopeGuard<T, F, S>
+    where F: FnOnce(T),
+          S: Strategy
+{
+    fn deref_mut(&mut self) -> &mut T {
+        &mut *self.value
+    }
+}
+
+impl<T, F, S> Drop for ScopeGuard<T, F, S>
+    where F: FnOnce(T),
+          S: Strategy
+{
+    fn drop(&mut self) {
+        // This is OK because the fields are `ManuallyDrop`s
+        // which will not be dropped by the compiler.
+        let (value, dropfn) = unsafe {
+            (ptr::read(&*self.value), ptr::read(&*self.dropfn))
+        };
+        if S::should_run() {
+            dropfn(value);
+        }
+    }
+}
+
+impl<T, F, S> fmt::Debug for ScopeGuard<T, F, S>
+    where T: fmt::Debug,
+          F: FnOnce(T),
+          S: Strategy
+{
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.debug_struct(stringify!(ScopeGuard))
+         .field("value", &*self.value)
+         .finish()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::cell::Cell;
+    use std::panic::catch_unwind;
+    use std::panic::AssertUnwindSafe;
+
+    #[test]
+    fn test_defer() {
+        let drops = Cell::new(0);
+        defer!(drops.set(1000));
+        assert_eq!(drops.get(), 0);
+    }
+
+    #[cfg(feature = "use_std")]
+    #[test]
+    fn test_defer_success_1() {
+        let drops = Cell::new(0);
+        {
+            defer_on_success!(drops.set(1));
+            assert_eq!(drops.get(), 0);
+        }
+        assert_eq!(drops.get(), 1);
+    }
+
+    #[cfg(feature = "use_std")]
+    #[test]
+    fn test_defer_success_2() {
+        let drops = Cell::new(0);
+        let _ = catch_unwind(AssertUnwindSafe(|| {
+            defer_on_success!(drops.set(1));
+            panic!("failure")
+        }));
+        assert_eq!(drops.get(), 0);
+    }
+
+    #[cfg(feature = "use_std")]
+    #[test]
+    fn test_defer_unwind_1() {
+        let drops = Cell::new(0);
+        let _ = catch_unwind(AssertUnwindSafe(|| {
+            defer_on_unwind!(drops.set(1));
+            assert_eq!(drops.get(), 0);
+            panic!("failure")
+        }));
+        assert_eq!(drops.get(), 1);
+    }
+
+    #[cfg(feature = "use_std")]
+    #[test]
+    fn test_defer_unwind_2() {
+        let drops = Cell::new(0);
+        {
+            defer_on_unwind!(drops.set(1));
+        }
+        assert_eq!(drops.get(), 0);
+    }
+
+    #[test]
+    fn test_only_dropped_by_closure_when_run() {
+        let value_drops = Cell::new(0);
+        let value = guard((), |()| value_drops.set(1 + value_drops.get()));
+        let closure_drops = Cell::new(0);
+        let guard = guard(value, |_| closure_drops.set(1 + closure_drops.get()));
+        assert_eq!(value_drops.get(), 0);
+        assert_eq!(closure_drops.get(), 0);
+        drop(guard);
+        assert_eq!(value_drops.get(), 1);
+        assert_eq!(closure_drops.get(), 1);
+    }
+
+    #[cfg(feature = "use_std")]
+    #[test]
+    fn test_dropped_once_when_not_run() {
+        let value_drops = Cell::new(0);
+        let value = guard((), |()| value_drops.set(1 + value_drops.get()));
+        let captured_drops = Cell::new(0);
+        let captured = guard((), |()| captured_drops.set(1 + captured_drops.get()));
+        let closure_drops = Cell::new(0);
+        let guard = guard_on_unwind(value, |value| {
+            drop(value);
+            drop(captured);
+            closure_drops.set(1 + closure_drops.get())
+        });
+        assert_eq!(value_drops.get(), 0);
+        assert_eq!(captured_drops.get(), 0);
+        assert_eq!(closure_drops.get(), 0);
+        drop(guard);
+        assert_eq!(value_drops.get(), 1);
+        assert_eq!(captured_drops.get(), 1);
+        assert_eq!(closure_drops.get(), 0);
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let dropped = Cell::new(false);
+        let value = guard(42, |_| dropped.set(true));
+        let guard = guard(value, |_| dropped.set(true));
+        let inner = ScopeGuard::into_inner(guard);
+        assert_eq!(dropped.get(), false);
+        assert_eq!(*inner, 42);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/spin/barrier.rs.html b/src/spin/barrier.rs.html new file mode 100644 index 00000000..8e69a17f --- /dev/null +++ b/src/spin/barrier.rs.html @@ -0,0 +1,471 @@ +barrier.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+
+//! Synchronization primitive allowing multiple threads to synchronize the
+//! beginning of some computation.
+//!
+//! Implementation adapted from the 'Barrier' type of the standard library. See:
+//! <https://doc.rust-lang.org/std/sync/struct.Barrier.html>
+//!
+//! Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+//! file at the top-level directory of this distribution and at
+//! <http://rust-lang.org/COPYRIGHT>.
+//!
+//! Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+//! <http://www.apache.org/licenses/LICENSE-2.0>> or the MIT license
+//! <LICENSE-MIT or <http://opensource.org/licenses/MIT>>, at your
+//! option. This file may not be copied, modified, or distributed
+//! except according to those terms.
+
+use crate::{mutex::Mutex, RelaxStrategy, Spin};
+
+/// A primitive that synchronizes the execution of multiple threads.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// let mut handles = Vec::with_capacity(10);
+/// let barrier = Arc::new(spin::Barrier::new(10));
+/// for _ in 0..10 {
+///     let c = barrier.clone();
+///     // The same messages will be printed together.
+///     // You will NOT see any interleaving.
+///     handles.push(thread::spawn(move|| {
+///         println!("before wait");
+///         c.wait();
+///         println!("after wait");
+///     }));
+/// }
+/// // Wait for other threads to finish.
+/// for handle in handles {
+///     handle.join().unwrap();
+/// }
+/// ```
+pub struct Barrier<R = Spin> {
+    lock: Mutex<BarrierState, R>,
+    num_threads: usize,
+}
+
+// The inner state of a double barrier
+struct BarrierState {
+    count: usize,
+    generation_id: usize,
+}
+
+/// A `BarrierWaitResult` is returned by [`wait`] when all threads in the [`Barrier`]
+/// have rendezvoused.
+///
+/// [`wait`]: struct.Barrier.html#method.wait
+/// [`Barrier`]: struct.Barrier.html
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// let barrier = spin::Barrier::new(1);
+/// let barrier_wait_result = barrier.wait();
+/// ```
+pub struct BarrierWaitResult(bool);
+
+impl<R: RelaxStrategy> Barrier<R> {
+    /// Blocks the current thread until all threads have rendezvoused here.
+    ///
+    /// Barriers are re-usable after all threads have rendezvoused once, and can
+    /// be used continuously.
+    ///
+    /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
+    /// returns `true` from [`is_leader`] when returning from this function, and
+    /// all other threads will receive a result that will return `false` from
+    /// [`is_leader`].
+    ///
+    /// [`BarrierWaitResult`]: struct.BarrierWaitResult.html
+    /// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    /// use std::sync::Arc;
+    /// use std::thread;
+    ///
+    /// let mut handles = Vec::with_capacity(10);
+    /// let barrier = Arc::new(spin::Barrier::new(10));
+    /// for _ in 0..10 {
+    ///     let c = barrier.clone();
+    ///     // The same messages will be printed together.
+    ///     // You will NOT see any interleaving.
+    ///     handles.push(thread::spawn(move|| {
+    ///         println!("before wait");
+    ///         c.wait();
+    ///         println!("after wait");
+    ///     }));
+    /// }
+    /// // Wait for other threads to finish.
+    /// for handle in handles {
+    ///     handle.join().unwrap();
+    /// }
+    /// ```
+    pub fn wait(&self) -> BarrierWaitResult {
+        let mut lock = self.lock.lock();
+        lock.count += 1;
+
+        if lock.count < self.num_threads {
+            // not the leader
+            let local_gen = lock.generation_id;
+
+            while local_gen == lock.generation_id &&
+                lock.count < self.num_threads {
+                drop(lock);
+                R::relax();
+                lock = self.lock.lock();
+            }
+            BarrierWaitResult(false)
+        } else {
+            // this thread is the leader,
+            //   and is responsible for incrementing the generation
+            lock.count = 0;
+            lock.generation_id = lock.generation_id.wrapping_add(1);
+            BarrierWaitResult(true)
+        }
+    }
+}
+
+impl<R> Barrier<R> {
+    /// Creates a new barrier that can block a given number of threads.
+    ///
+    /// A barrier will block `n`-1 threads which call [`wait`] and then wake up
+    /// all threads at once when the `n`th thread calls [`wait`]. A Barrier created
+    /// with n = 0 will behave identically to one created with n = 1.
+    ///
+    /// [`wait`]: #method.wait
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// let barrier = spin::Barrier::new(10);
+    /// ```
+    pub const fn new(n: usize) -> Self {
+        Self {
+            lock: Mutex::new(BarrierState {
+                count: 0,
+                generation_id: 0,
+            }),
+            num_threads: n,
+        }
+    }
+}
+
+impl BarrierWaitResult {
+    /// Returns whether this thread from [`wait`] is the "leader thread".
+    ///
+    /// Only one thread will have `true` returned from their result, all other
+    /// threads will have `false` returned.
+    ///
+    /// [`wait`]: struct.Barrier.html#method.wait
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// let barrier = spin::Barrier::new(1);
+    /// let barrier_wait_result = barrier.wait();
+    /// println!("{:?}", barrier_wait_result.is_leader());
+    /// ```
+    pub fn is_leader(&self) -> bool { self.0 }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::mpsc::{channel, TryRecvError};
+    use std::sync::Arc;
+    use std::thread;
+
+    type Barrier = super::Barrier;
+
+    fn use_barrier(n: usize, barrier: Arc<Barrier>) {
+        let (tx, rx) = channel();
+
+        for _ in 0..n - 1 {
+            let c = barrier.clone();
+            let tx = tx.clone();
+            thread::spawn(move|| {
+                tx.send(c.wait().is_leader()).unwrap();
+            });
+        }
+
+        // At this point, all spawned threads should be blocked,
+        // so we shouldn't get anything from the port
+        assert!(match rx.try_recv() {
+            Err(TryRecvError::Empty) => true,
+            _ => false,
+        });
+
+        let mut leader_found = barrier.wait().is_leader();
+
+        // Now, the barrier is cleared and we should get data.
+        for _ in 0..n - 1 {
+            if rx.recv().unwrap() {
+                assert!(!leader_found);
+                leader_found = true;
+            }
+        }
+        assert!(leader_found);
+    }
+
+    #[test]
+    fn test_barrier() {
+        const N: usize = 10;
+
+        let barrier = Arc::new(Barrier::new(N));
+
+        use_barrier(N, barrier.clone());
+
+        // use barrier twice to ensure it is reusable
+        use_barrier(N, barrier.clone());
+    }
+}
+
+
\ No newline at end of file diff --git a/src/spin/lazy.rs.html b/src/spin/lazy.rs.html new file mode 100644 index 00000000..615fd34b --- /dev/null +++ b/src/spin/lazy.rs.html @@ -0,0 +1,229 @@ +lazy.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+
+//! Synchronization primitives for lazy evaluation.
+//!
+//! Implementation adapted from the `SyncLazy` type of the standard library. See:
+//! <https://doc.rust-lang.org/std/lazy/struct.SyncLazy.html>
+
+use core::{cell::Cell, fmt, ops::Deref};
+use crate::{once::Once, RelaxStrategy, Spin};
+
+/// A value which is initialized on the first access.
+///
+/// This type is a thread-safe `Lazy`, and can be used in statics.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashMap;
+/// use spin::Lazy;
+///
+/// static HASHMAP: Lazy<HashMap<i32, String>> = Lazy::new(|| {
+///     println!("initializing");
+///     let mut m = HashMap::new();
+///     m.insert(13, "Spica".to_string());
+///     m.insert(74, "Hoyten".to_string());
+///     m
+/// });
+///
+/// fn main() {
+///     println!("ready");
+///     std::thread::spawn(|| {
+///         println!("{:?}", HASHMAP.get(&13));
+///     }).join().unwrap();
+///     println!("{:?}", HASHMAP.get(&74));
+///
+///     // Prints:
+///     //   ready
+///     //   initializing
+///     //   Some("Spica")
+///     //   Some("Hoyten")
+/// }
+/// ```
+pub struct Lazy<T, F = fn() -> T, R = Spin> {
+    cell: Once<T, R>,
+    init: Cell<Option<F>>,
+}
+
+impl<T: fmt::Debug, F, R> fmt::Debug for Lazy<T, F, R> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
+    }
+}
+
+// We never create a `&F` from a `&Lazy<T, F>` so it is fine
+// to not impl `Sync` for `F`
+// we do create a `&mut Option<F>` in `force`, but this is
+// properly synchronized, so it only happens once
+// so it also does not contribute to this impl.
+unsafe impl<T, F: Send> Sync for Lazy<T, F> where Once<T>: Sync {}
+// auto-derived `Send` impl is OK.
+
+impl<T, F, R> Lazy<T, F, R> {
+    /// Creates a new lazy value with the given initializing
+    /// function.
+    pub const fn new(f: F) -> Self {
+        Self { cell: Once::new(), init: Cell::new(Some(f)) }
+    }
+    /// Retrieves a mutable pointer to the inner data.
+    ///
+    /// This is especially useful when interfacing with low level code or FFI where the caller
+    /// explicitly knows that it has exclusive access to the inner data. Note that reading from
+    /// this pointer is UB until initialized or directly written to.
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.cell.as_mut_ptr()
+    }
+}
+
+impl<T, F: FnOnce() -> T, R: RelaxStrategy> Lazy<T, F, R> {
+    /// Forces the evaluation of this lazy value and
+    /// returns a reference to result. This is equivalent
+    /// to the `Deref` impl, but is explicit.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin::Lazy;
+    ///
+    /// let lazy = Lazy::new(|| 92);
+    ///
+    /// assert_eq!(Lazy::force(&lazy), &92);
+    /// assert_eq!(&*lazy, &92);
+    /// ```
+    pub fn force(this: &Self) -> &T {
+        this.cell.call_once(|| match this.init.take() {
+            Some(f) => f(),
+            None => panic!("Lazy instance has previously been poisoned"),
+        })
+    }
+}
+
+impl<T, F: FnOnce() -> T, R: RelaxStrategy> Deref for Lazy<T, F, R> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        Self::force(self)
+    }
+}
+
+impl<T: Default, R> Default for Lazy<T, fn() -> T, R> {
+    /// Creates a new lazy value using `Default` as the initializing function.
+    fn default() -> Self {
+        Self::new(T::default)
+    }
+}
+
+
\ No newline at end of file diff --git a/src/spin/lib.rs.html b/src/spin/lib.rs.html new file mode 100644 index 00000000..a1d03814 --- /dev/null +++ b/src/spin/lib.rs.html @@ -0,0 +1,377 @@ +lib.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+
+#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+#![deny(missing_docs)]
+
+//! This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) versions of the
+//! primitives in `std::sync` and `std::lazy`. Because synchronization is done through spinning,
+//! the primitives are suitable for use in `no_std` environments.
+//!
+//! # Features
+//!
+//! - `Mutex`, `RwLock`, `Once`/`SyncOnceCell`, and `SyncLazy` equivalents
+//!
+//! - Support for `no_std` environments
+//!
+//! - [`lock_api`](https://crates.io/crates/lock_api) compatibility
+//!
+//! - Upgradeable `RwLock` guards
+//!
+//! - Guards can be sent and shared between threads
+//!
+//! - Guard leaking
+//!
+//! - Ticket locks
+//!
+//! - Different strategies for dealing with contention
+//!
+//! # Relationship with `std::sync`
+//!
+//! While `spin` is not a drop-in replacement for `std::sync` (and
+//! [should not be considered as such](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html))
+//! an effort is made to keep this crate reasonably consistent with `std::sync`.
+//!
+//! Many of the types defined in this crate have 'additional capabilities' when compared to `std::sync`:
+//!
+//! - Because spinning does not depend on the thread-driven model of `std::sync`, guards ([`MutexGuard`],
+//!   [`RwLockReadGuard`], [`RwLockWriteGuard`], etc.) may be sent and shared between threads.
+//!
+//! - [`RwLockUpgradableGuard`] supports being upgraded into a [`RwLockWriteGuard`].
+//!
+//! - Guards support [leaking](https://doc.rust-lang.org/nomicon/leaking.html).
+//!
+//! - [`Once`] owns the value returned by its `call_once` initializer.
+//!
+//! - [`RwLock`] supports counting readers and writers.
+//!
+//! Conversely, the types in this crate do not have some of the features `std::sync` has:
+//!
+//! - Locks do not track [panic poisoning](https://doc.rust-lang.org/nomicon/poisoning.html).
+//!
+//! ## Feature flags
+//!
+//! The crate comes with a few feature flags that you may wish to use.
+//!
+//! - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
+//!
+//! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
+//!
+//! - `std` enables support for thread yielding instead of spinning
+
+#[cfg(any(test, feature = "std"))]
+extern crate core;
+
+#[cfg(feature = "barrier")]
+#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
+pub mod barrier;
+#[cfg(feature = "lazy")]
+#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
+pub mod lazy;
+#[cfg(feature = "mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+pub mod mutex;
+#[cfg(feature = "once")]
+#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
+pub mod once;
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub mod rwlock;
+pub mod relax;
+
+#[cfg(feature = "mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+pub use mutex::MutexGuard;
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub use rwlock::RwLockReadGuard;
+pub use relax::{Spin, RelaxStrategy};
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+pub use relax::Yield;
+
+// Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different
+// relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible
+// but sadly adding a default type parameter is *still* a breaking change in Rust (for understandable reasons).
+
+/// A primitive that synchronizes the execution of multiple threads. See [`barrier::Barrier`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "barrier")]
+#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
+pub type Barrier = crate::barrier::Barrier;
+
+/// A value which is initialized on the first access. See [`lazy::Lazy`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "lazy")]
+#[cfg_attr(docsrs, doc(cfg(feature = "lazy")))]
+pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, F>;
+
+/// A primitive that synchronizes the execution of multiple threads. See [`mutex::Mutex`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+pub type Mutex<T> = crate::mutex::Mutex<T>;
+
+/// A primitive that provides lazy one-time initialization. See [`once::Once`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "once")]
+#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
+pub type Once<T = ()> = crate::once::Once<T>;
+
+/// A lock that provides data access to either one writer or many readers. See [`rwlock::RwLock`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub type RwLock<T> = crate::rwlock::RwLock<T>;
+
+/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`]. See
+/// [`rwlock::RwLockUpgradableGuard`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub type RwLockUpgradableGuard<'a, T> = crate::rwlock::RwLockUpgradableGuard<'a, T>;
+
+/// A guard that provides mutable data access. See [`rwlock::RwLockWriteGuard`] for documentation.
+///
+/// A note for advanced users: this alias exists to avoid subtle type inference errors due to the default relax
+/// strategy type parameter. If you need a non-default relax strategy, use the fully-qualified path.
+#[cfg(feature = "rwlock")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+pub type RwLockWriteGuard<'a, T> = crate::rwlock::RwLockWriteGuard<'a, T>;
+
+/// Spin synchronisation primitives, but compatible with [`lock_api`](https://crates.io/crates/lock_api).
+#[cfg(feature = "lock_api")]
+#[cfg_attr(docsrs, doc(cfg(feature = "lock_api")))]
+pub mod lock_api {
+    /// A lock that provides mutually exclusive data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "mutex")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+    pub type Mutex<T> = lock_api_crate::Mutex<crate::Mutex<()>, T>;
+
+    /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "mutex")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
+    pub type MutexGuard<'a, T> = lock_api_crate::MutexGuard<'a, crate::Mutex<()>, T>;
+
+    /// A lock that provides data access to either one writer or many readers (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLock<T> = lock_api_crate::RwLock<crate::RwLock<()>, T>;
+
+    /// A guard that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLockReadGuard<'a, T> = lock_api_crate::RwLockReadGuard<'a, crate::RwLock<()>, T>;
+
+    /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLockWriteGuard<'a, T> = lock_api_crate::RwLockWriteGuard<'a, crate::RwLock<()>, T>;
+
+    /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+    #[cfg(feature = "rwlock")]
+    #[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
+    pub type RwLockUpgradableReadGuard<'a, T> =
+        lock_api_crate::RwLockUpgradableReadGuard<'a, crate::RwLock<()>, T>;
+}
+
+
\ No newline at end of file diff --git a/src/spin/mutex.rs.html b/src/spin/mutex.rs.html new file mode 100644 index 00000000..d9c14964 --- /dev/null +++ b/src/spin/mutex.rs.html @@ -0,0 +1,653 @@ +mutex.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+
+//! Locks that have the same behaviour as a mutex.
+//!
+//! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature.
+//! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`]
+//! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported.
+//!
+//! `ticket_mutex` is disabled by default.
+//!
+//! [`Mutex`]: ../struct.Mutex.html
+//! [`MutexGuard`]: ../struct.MutexGuard.html
+//! [`TicketMutex`]: ./struct.TicketMutex.html
+//! [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
+//! [`SpinMutex`]: ./struct.SpinMutex.html
+//! [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
+
+#[cfg(feature = "spin_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
+pub mod spin;
+#[cfg(feature = "spin_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "spin_mutex")))]
+pub use self::spin::{SpinMutex, SpinMutexGuard};
+
+#[cfg(feature = "ticket_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
+pub mod ticket;
+#[cfg(feature = "ticket_mutex")]
+#[cfg_attr(docsrs, doc(cfg(feature = "ticket_mutex")))]
+pub use self::ticket::{TicketMutex, TicketMutexGuard};
+
+use core::{
+    fmt,
+    ops::{Deref, DerefMut},
+};
+use crate::{RelaxStrategy, Spin};
+
+#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))]
+compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required.");
+
+#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
+type InnerMutex<T, R> = self::spin::SpinMutex<T, R>;
+#[cfg(all(not(feature = "use_ticket_mutex"), feature = "spin_mutex"))]
+type InnerMutexGuard<'a, T> = self::spin::SpinMutexGuard<'a, T>;
+
+#[cfg(feature = "use_ticket_mutex")]
+type InnerMutex<T, R> = self::ticket::TicketMutex<T, R>;
+#[cfg(feature = "use_ticket_mutex")]
+type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>;
+
+/// A spin-based lock providing mutually exclusive access to data.
+///
+/// The implementation uses either a ticket mutex or a regular spin mutex depending on whether the `spin_mutex` or
+/// `ticket_mutex` feature flag is enabled.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::Mutex::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::Mutex::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+///     let my_barrier = barrier.clone();
+///     let my_lock = spin_mutex.clone();
+///     std::thread::spawn(move || {
+///         let mut guard = my_lock.lock();
+///         *guard += 1;
+///
+///         // Release the lock to prevent a deadlock
+///         drop(guard);
+///         my_barrier.wait();
+///     });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct Mutex<T: ?Sized, R = Spin> {
+    inner: InnerMutex<T, R>,
+}
+
+unsafe impl<T: ?Sized + Send, R> Sync for Mutex<T, R> {}
+unsafe impl<T: ?Sized + Send, R> Send for Mutex<T, R> {}
+
+/// A generic guard that will protect some data access and
+/// uses either a ticket lock or a normal spin mutex.
+///
+/// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`].
+///
+/// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
+/// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
+pub struct MutexGuard<'a, T: 'a + ?Sized> {
+    inner: InnerMutexGuard<'a, T>,
+}
+
+impl<T, R> Mutex<T, R> {
+    /// Creates a new [`Mutex`] wrapping the supplied data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use spin::Mutex;
+    ///
+    /// static MUTEX: Mutex<()> = Mutex::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = MUTEX.lock();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline(always)]
+    pub const fn new(value: T) -> Self {
+        Self { inner: InnerMutex::new(value) }
+    }
+
+    /// Consumes this [`Mutex`] and unwraps the underlying data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::Mutex::new(42);
+    /// assert_eq!(42, lock.into_inner());
+    /// ```
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        self.inner.into_inner()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> Mutex<T, R> {
+    /// Locks the [`Mutex`] and returns a guard that permits access to the inner data.
+    ///
+    /// The returned value may be dereferenced for data access
+    /// and the lock will be dropped when the guard falls out of scope.
+    ///
+    /// ```
+    /// let lock = spin::Mutex::new(0);
+    /// {
+    ///     let mut data = lock.lock();
+    ///     // The lock is now locked and the data can be accessed
+    ///     *data += 1;
+    ///     // The lock is implicitly dropped at the end of the scope
+    /// }
+    /// ```
+    #[inline(always)]
+    pub fn lock(&self) -> MutexGuard<T> {
+        MutexGuard {
+            inner: self.inner.lock(),
+        }
+    }
+}
+
+impl<T: ?Sized, R> Mutex<T, R> {
+    /// Returns `true` if the lock is currently held.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    #[inline(always)]
+    pub fn is_locked(&self) -> bool {
+        self.inner.is_locked()
+    }
+
+    /// Force unlock this [`Mutex`].
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the lock is not held by the current
+    /// thread. However, this can be useful in some instances for exposing the
+    /// lock to FFI that doesn't know how to deal with RAII.
+    #[inline(always)]
+    pub unsafe fn force_unlock(&self) {
+        self.inner.force_unlock()
+    }
+
+    /// Try to lock this [`Mutex`], returning a lock guard if successful.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::Mutex::new(42);
+    ///
+    /// let maybe_guard = lock.try_lock();
+    /// assert!(maybe_guard.is_some());
+    ///
+    /// // `maybe_guard` is still held, so the second call fails
+    /// let maybe_guard2 = lock.try_lock();
+    /// assert!(maybe_guard2.is_none());
+    /// ```
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<MutexGuard<T>> {
+        self.inner
+            .try_lock()
+            .map(|guard| MutexGuard { inner: guard })
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust,
+    /// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such,
+    /// this is a 'zero-cost' operation.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let mut lock = spin::Mutex::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.lock(), 10);
+    /// ```
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        self.inner.get_mut()
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for Mutex<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&self.inner, f)
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for Mutex<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for Mutex<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'a, T: ?Sized> MutexGuard<'a, T> {
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original [`Mutex`].
+    ///
+    /// ```
+    /// let mylock = spin::Mutex::new(0);
+    ///
+    /// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline(always)]
+    pub fn leak(this: Self) -> &'a mut T {
+        InnerMutexGuard::leak(this.inner)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        &*self.inner
+    }
+}
+
+impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        &mut *self.inner
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for Mutex<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    fn lock(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(Self::lock(self));
+    }
+
+    fn try_lock(&self) -> bool {
+        // Prevent guard destructor running
+        Self::try_lock(self).map(core::mem::forget).is_some()
+    }
+
+    unsafe fn unlock(&self) {
+        self.force_unlock();
+    }
+
+    fn is_locked(&self) -> bool {
+        self.inner.is_locked()
+    }
+}
+
+
\ No newline at end of file diff --git a/src/spin/mutex/spin.rs.html b/src/spin/mutex/spin.rs.html new file mode 100644 index 00000000..1859c1b4 --- /dev/null +++ b/src/spin/mutex/spin.rs.html @@ -0,0 +1,1031 @@ +spin.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+
+//! A naïve spinning mutex.
+//!
+//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
+//! latency is theoretically infinite.
+
+use core::{
+    cell::UnsafeCell,
+    fmt,
+    ops::{Deref, DerefMut},
+    sync::atomic::{AtomicBool, Ordering},
+    marker::PhantomData,
+};
+use crate::{RelaxStrategy, Spin};
+
+/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::mutex::SpinMutex::<_>::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+///     let my_barrier = barrier.clone();
+///     let my_lock = spin_mutex.clone();
+///     std::thread::spawn(move || {
+///         let mut guard = my_lock.lock();
+///         *guard += 1;
+///
+///         // Release the lock to prevent a deadlock
+///         drop(guard);
+///         my_barrier.wait();
+///     });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct SpinMutex<T: ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    pub(crate) lock: AtomicBool,
+    data: UnsafeCell<T>,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
+    lock: &'a AtomicBool,
+    data: &'a mut T,
+}
+
+// Same unsafe impls as `std::sync::Mutex`
+unsafe impl<T: ?Sized + Send> Sync for SpinMutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for SpinMutex<T> {}
+
+impl<T, R> SpinMutex<T, R> {
+    /// Creates a new [`SpinMutex`] wrapping the supplied data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use spin::mutex::SpinMutex;
+    ///
+    /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = MUTEX.lock();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline(always)]
+    pub const fn new(data: T) -> Self {
+        SpinMutex {
+            lock: AtomicBool::new(false),
+            data: UnsafeCell::new(data),
+            phantom: PhantomData,
+        }
+    }
+
+    /// Consumes this [`SpinMutex`] and unwraps the underlying data.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    /// assert_eq!(42, lock.into_inner());
+    /// ```
+    #[inline(always)]
+    pub fn into_inner(self) -> T {
+        // We know statically that there are no outstanding references to
+        // `self` so there's no need to lock.
+        let SpinMutex { data, .. } = self;
+        data.into_inner()
+    }
+
+    /// Returns a mutable pointer to the underlying data.
+    ///
+    /// This is mostly meant to be used for applications which require manual unlocking, but where
+    /// storing both the lock and the pointer to the inner data gets inefficient.
+    ///
+    /// # Example
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    ///
+    /// unsafe {
+    ///     core::mem::forget(lock.lock());
+    ///     
+    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
+    ///     lock.as_mut_ptr().write(58);
+    ///
+    ///     lock.force_unlock();
+    /// }
+    ///
+    /// assert_eq!(*lock.lock(), 58);
+    ///
+    /// ```
+    #[inline(always)]
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
+    /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
+    ///
+    /// The returned value may be dereferenced for data access
+    /// and the lock will be dropped when the guard falls out of scope.
+    ///
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(0);
+    /// {
+    ///     let mut data = lock.lock();
+    ///     // The lock is now locked and the data can be accessed
+    ///     *data += 1;
+    ///     // The lock is implicitly dropped at the end of the scope
+    /// }
+    /// ```
+    #[inline(always)]
+    pub fn lock(&self) -> SpinMutexGuard<T> {
+        // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
+        // when called in a loop.
+        while self.lock.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
+            // Wait until the lock looks unlocked before retrying
+            while self.is_locked() {
+                R::relax();
+            }
+        }
+
+        SpinMutexGuard {
+            lock: &self.lock,
+            data: unsafe { &mut *self.data.get() },
+        }
+    }
+}
+
+impl<T: ?Sized, R> SpinMutex<T, R> {
+    /// Returns `true` if the lock is currently held.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    #[inline(always)]
+    pub fn is_locked(&self) -> bool {
+        self.lock.load(Ordering::Relaxed)
+    }
+
+    /// Force unlock this [`SpinMutex`].
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the lock is not held by the current
+    /// thread. However, this can be useful in some instances for exposing the
+    /// lock to FFI that doesn't know how to deal with RAII.
+    #[inline(always)]
+    pub unsafe fn force_unlock(&self) {
+        self.lock.store(false, Ordering::Release);
+    }
+
+    /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
+    ///
+    /// let maybe_guard = lock.try_lock();
+    /// assert!(maybe_guard.is_some());
+    ///
+    /// // `maybe_guard` is still held, so the second call fails
+    /// let maybe_guard2 = lock.try_lock();
+    /// assert!(maybe_guard2.is_none());
+    /// ```
+    #[inline(always)]
+    pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
+        // The reason for using a strong compare_exchange is explained here:
+        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
+        if self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok() {
+            Some(SpinMutexGuard {
+                lock: &self.lock,
+                data: unsafe { &mut *self.data.get() },
+            })
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
+    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
+    /// such, this is a 'zero-cost' operation.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
+    /// *lock.get_mut() = 10;
+    /// assert_eq!(*lock.lock(), 10);
+    /// ```
+    #[inline(always)]
+    pub fn get_mut(&mut self) -> &mut T {
+        // We know statically that there are no other references to `self`, so
+        // there's no need to lock the inner mutex.
+        unsafe { &mut *self.data.get() }
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => write!(f, "Mutex {{ data: ")
+                .and_then(|()| (&*guard).fmt(f))
+                .and_then(|()| write!(f, "}}")),
+            None => write!(f, "Mutex {{ <locked> }}"),
+        }
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for SpinMutex<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original [`SpinMutex`].
+    ///
+    /// ```
+    /// let mylock = spin::mutex::SpinMutex::<_>::new(0);
+    ///
+    /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline(always)]
+    pub fn leak(this: Self) -> &'a mut T {
+        let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
+        core::mem::forget(this);
+        unsafe { &mut *data }
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
+    type Target = T;
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        self.data
+    }
+}
+
+impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
+    /// The dropping of the MutexGuard will release the lock it was created from.
+    fn drop(&mut self) {
+        self.lock.store(false, Ordering::Release);
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    fn lock(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(Self::lock(self));
+    }
+
+    fn try_lock(&self) -> bool {
+        // Prevent guard destructor running
+        Self::try_lock(self).map(core::mem::forget).is_some()
+    }
+
+    unsafe fn unlock(&self) {
+        self.force_unlock();
+    }
+
+    fn is_locked(&self) -> bool {
+        Self::is_locked(self)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    type SpinMutex<T> = super::SpinMutex<T>;
+
+    #[derive(Eq, PartialEq, Debug)]
+    struct NonCopy(i32);
+
+    #[test]
+    fn smoke() {
+        let m = SpinMutex::<_>::new(());
+        drop(m.lock());
+        drop(m.lock());
+    }
+
+    #[test]
+    fn lots_and_lots() {
+        static M: SpinMutex<()> = SpinMutex::<_>::new(());
+        static mut CNT: u32 = 0;
+        const J: u32 = 1000;
+        const K: u32 = 3;
+
+        fn inc() {
+            for _ in 0..J {
+                unsafe {
+                    let _g = M.lock();
+                    CNT += 1;
+                }
+            }
+        }
+
+        let (tx, rx) = channel();
+        for _ in 0..K {
+            let tx2 = tx.clone();
+            thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            });
+            let tx2 = tx.clone();
+            thread::spawn(move || {
+                inc();
+                tx2.send(()).unwrap();
+            });
+        }
+
+        drop(tx);
+        for _ in 0..2 * K {
+            rx.recv().unwrap();
+        }
+        assert_eq!(unsafe { CNT }, J * K * 2);
+    }
+
+    #[test]
+    fn try_lock() {
+        let mutex = SpinMutex::<_>::new(42);
+
+        // First lock succeeds
+        let a = mutex.try_lock();
+        assert_eq!(a.as_ref().map(|r| **r), Some(42));
+
+        // Additional lock failes
+        let b = mutex.try_lock();
+        assert!(b.is_none());
+
+        // After dropping lock, it succeeds again
+        ::core::mem::drop(a);
+        let c = mutex.try_lock();
+        assert_eq!(c.as_ref().map(|r| **r), Some(42));
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let m = SpinMutex::<_>::new(NonCopy(10));
+        assert_eq!(m.into_inner(), NonCopy(10));
+    }
+
+    #[test]
+    fn test_into_inner_drop() {
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = m.into_inner();
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+
+    #[test]
+    fn test_mutex_arc_nested() {
+        // Tests nested mutexes and access
+        // to underlying data.
+        let arc = Arc::new(SpinMutex::<_>::new(1));
+        let arc2 = Arc::new(SpinMutex::<_>::new(arc));
+        let (tx, rx) = channel();
+        let _t = thread::spawn(move || {
+            let lock = arc2.lock();
+            let lock2 = lock.lock();
+            assert_eq!(*lock2, 1);
+            tx.send(()).unwrap();
+        });
+        rx.recv().unwrap();
+    }
+
+    #[test]
+    fn test_mutex_arc_access_in_unwind() {
+        let arc = Arc::new(SpinMutex::<_>::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<SpinMutex<i32>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    *self.i.lock() += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = arc.lock();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_mutex_unsized() {
+        let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
+        {
+            let b = &mut *mutex.lock();
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*mutex.lock(), comp);
+    }
+
+    #[test]
+    fn test_mutex_force_lock() {
+        let lock = SpinMutex::<_>::new(());
+        ::std::mem::forget(lock.lock());
+        unsafe {
+            lock.force_unlock();
+        }
+        assert!(lock.try_lock().is_some());
+    }
+}
+
+
\ No newline at end of file diff --git a/src/spin/once.rs.html b/src/spin/once.rs.html new file mode 100644 index 00000000..c4ca4258 --- /dev/null +++ b/src/spin/once.rs.html @@ -0,0 +1,1269 @@ +once.rs - source + +
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+
+    //! Synchronization primitives for one-time evaluation.
+
+use core::{
+    cell::UnsafeCell,
+    mem::MaybeUninit,
+    sync::atomic::{AtomicU8, Ordering},
+    marker::PhantomData,
+    fmt,
+};
+use crate::{RelaxStrategy, Spin};
+
+/// A primitive that provides lazy one-time initialization.
+///
+/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
+/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
+/// `Once`).
+///
+/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// static START: spin::Once = spin::Once::new();
+///
+/// START.call_once(|| {
+///     // run initialization here
+/// });
+/// ```
+pub struct Once<T = (), R = Spin> {
+    phantom: PhantomData<R>,
+    status: AtomicStatus,
+    data: UnsafeCell<MaybeUninit<T>>,
+}
+
+impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.get() {
+            Some(s) => write!(f, "Once {{ data: ")
+				.and_then(|()| s.fmt(f))
+				.and_then(|()| write!(f, "}}")),
+            None => write!(f, "Once {{ <uninitialized> }}")
+        }
+    }
+}
+
+// Same unsafe impls as `std::sync::RwLock`, because this also allows for
+// concurrent reads.
+unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
+unsafe impl<T: Send, R> Send for Once<T, R> {}
+
+mod status {
+    use super::*;
+
+    // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
+    // a value for which there exists a valid Status. This means that users of this API must only
+    // be allowed to load and store `Status`es.
+    #[repr(transparent)]
+    pub struct AtomicStatus(AtomicU8);
+
+    // Four states that a Once can be in, encoded into the lower bits of `status` in
+    // the Once structure.
+    #[repr(u8)]
+    #[derive(Clone, Copy, Debug, PartialEq)]
+    pub enum Status {
+        Incomplete = 0x00,
+        Running = 0x01,
+        Complete = 0x02,
+        Panicked = 0x03,
+    }
+    impl Status {
+        // Construct a status from an inner u8 integer.
+        //
+        // # Safety
+        //
+        // For this to be safe, the inner number must have a valid corresponding enum variant.
+        unsafe fn new_unchecked(inner: u8) -> Self {
+            core::mem::transmute(inner)
+        }
+    }
+
+    impl AtomicStatus {
+        #[inline(always)]
+        pub const fn new(status: Status) -> Self {
+            // SAFETY: We got the value directly from status, so transmuting back is fine.
+            Self(AtomicU8::new(status as u8))
+        }
+        #[inline(always)]
+        pub fn load(&self, ordering: Ordering) -> Status {
+            // SAFETY: We know that the inner integer must have been constructed from a Status in
+            // the first place.
+            unsafe { Status::new_unchecked(self.0.load(ordering)) }
+        }
+        #[inline(always)]
+        pub fn store(&self, status: Status, ordering: Ordering) {
+            // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
+            // a status, thus making transmutation safe.
+            self.0.store(status as u8, ordering);
+        }
+        #[inline(always)]
+        pub fn compare_exchange(&self, old: Status, new: Status, success: Ordering, failure: Ordering) -> Result<Status, Status> {
+            match self.0.compare_exchange(old as u8, new as u8, success, failure) {
+                // SAFETY: A compare exchange will always return a value that was later stored into
+                // the atomic u8, but due to the invariant that it must be a valid Status, we know
+                // that both Ok(_) and Err(_) will be safely transmutable.
+
+                Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
+                Err(err) => Ok(unsafe { Status::new_unchecked(err) }),
+            }
+        }
+        #[inline(always)]
+        pub fn get_mut(&mut self) -> &mut Status {
+            // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
+            // it to a &mut Status.
+            unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
+        }
+    }
+}
+use self::status::{Status, AtomicStatus};
+
+use core::hint::unreachable_unchecked as unreachable;
+
+impl<T, R: RelaxStrategy> Once<T, R> {
+    /// Performs an initialization routine once and only once. The given closure
+    /// will be executed if this is the first time `call_once` has been called,
+    /// and otherwise the routine will *not* be invoked.
+    ///
+    /// This method will block the calling thread if another initialization
+    /// routine is currently running.
+    ///
+    /// When this function returns, it is guaranteed that some initialization
+    /// has run and completed (it may not be the closure specified). The
+    /// returned pointer will point to the result from the closure that was
+    /// run.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// static INIT: spin::Once<usize> = spin::Once::new();
+    ///
+    /// fn get_cached_val() -> usize {
+    ///     *INIT.call_once(expensive_computation)
+    /// }
+    ///
+    /// fn expensive_computation() -> usize {
+    ///     // ...
+    /// # 2
+    /// }
+    /// ```
+    pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
+        // SAFETY: We perform an Acquire load because if this were to return COMPLETE, then we need
+        // the preceding stores done while initializing, to become visible after this load.
+        let mut status = self.status.load(Ordering::Acquire);
+
+        if status == Status::Incomplete {
+            match self.status.compare_exchange(
+                Status::Incomplete,
+                Status::Running,
+                // SAFETY: Success ordering: We do not have to synchronize any data at all, as the
+                // value is at this point uninitialized, so Relaxed is technically sufficient. We
+                // will however have to do a Release store later. However, the success ordering
+                // must always be at least as strong as the failure ordering, so we choose Acquire
+                // here anyway.
+                Ordering::Acquire,
+                // SAFETY: Failure ordering: While we have already loaded the status initially, we
+                // know that if some other thread would have fully initialized this in between,
+                // then there will be new not-yet-synchronized accesses done during that
+                // initialization that would not have been synchronized by the earlier load. Thus
+                // we use Acquire to ensure when we later call force_get() in the last match
+                // statement, if the status was changed to COMPLETE, that those accesses will become
+                // visible to us.
+                Ordering::Acquire,
+            ) {
+                Ok(_must_be_state_incomplete) => {
+                    // The compare-exchange suceeded, so we shall initialize it.
+
+                    // We use a guard (Finish) to catch panics caused by builder
+                    let finish = Finish { status: &self.status };
+                    unsafe {
+                        // SAFETY:
+                        // `UnsafeCell`/deref: currently the only accessor, mutably
+                        // and immutably by cas exclusion.
+                        // `write`: pointer comes from `MaybeUninit`.
+                        (*self.data.get()).as_mut_ptr().write(f())
+                    };
+                    // If there were to be a panic with unwind enabled, the code would
+                    // short-circuit and never reach the point where it writes the inner data.
+                    // The destructor for Finish will run, and poison the Once to ensure that other
+                    // threads accessing it do not exhibit unwanted behavior, if there were to be
+                    // any inconsistency in data structures caused by the panicking thread.
+                    //
+                    // However, f() is expected in the general case not to panic. In that case, we
+                    // simply forget the guard, bypassing its destructor. We could theoretically
+                    // clear a flag instead, but this eliminates the call to the destructor at
+                    // compile time, and unconditionally poisons during an eventual panic, if
+                    // unwinding is enabled.
+                    core::mem::forget(finish);
+
+                    // SAFETY: Release is required here, so that all memory accesses done in the
+                    // closure when initializing, become visible to other threads that perform Acquire
+                    // loads.
+                    //
+                    // And, we also know that the changes this thread has done will not magically
+                    // disappear from our cache, so it does not need to be AcqRel.
+                    self.status.store(Status::Complete, Ordering::Release);
+
+                    // This next line is mainly an optimization.
+                    return unsafe { self.force_get() };
+                }
+                // The compare-exchange failed, so we know for a fact that the status cannot be
+                // INCOMPLETE, or it would have succeeded.
+                Err(other_status) => status = other_status,
+            }
+        }
+
+        match status {
+            // SAFETY: We have either checked with an Acquire load, that the status is COMPLETE, or
+            // initialized it ourselves, in which case no additional synchronization is needed.
+            Status::Complete => unsafe { self.force_get() },
+            Status::Panicked => panic!("Once panicked"),
+            Status::Running => self
+                .poll()
+                .unwrap_or_else(|| {
+                    if cfg!(debug_assertions) {
+                        unreachable!("Encountered INCOMPLETE when polling Once")
+                    } else {
+                        // SAFETY: This poll is guaranteed never to fail because the API of poll
+                        // promises spinning if initialization is in progress. We've already
+                        // checked that initialisation is in progress, and initialisation is
+                        // monotonic: once done, it cannot be undone. We also fetched the status
+                        // with Acquire semantics, thereby guaranteeing that the later-executed
+                        // poll will also agree with us that initialization is in progress. Ergo,
+                        // this poll cannot fail.
+                        unsafe {
+                            unreachable();
+                        }
+                    }
+                }),
+
+            // SAFETY: The only invariant possible in addition to the aforementioned ones at the
+            // moment, is INCOMPLETE. However, the only way for this match statement to be
+            // reached, is if we lost the CAS (otherwise we would have returned early), in
+            // which case we know for a fact that the state cannot be changed back to INCOMPLETE as
+            // `Once`s are monotonic.
+            Status::Incomplete => unsafe { unreachable() },
+        }
+
+    }
+
+    /// Spins until the [`Once`] contains a value.
+    ///
+    /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    pub fn wait(&self) -> &T {
+        loop {
+            match self.poll() {
+                Some(x) => break x,
+                None => R::relax(),
+            }
+        }
+    }
+
+    /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
+    /// initialized. If initialization has not even begun, `None` will be returned.
+    ///
+    /// Note that in releases prior to `0.7`, this function was named `wait`.
+    ///
+    /// # Panics
+    ///
+    /// This function will panic if the [`Once`] previously panicked while attempting
+    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+    /// primitives.
+    pub fn poll(&self) -> Option<&T> {
+        loop {
+            // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
+            // sure that all memory accessed done while initializing that value, are visible when
+            // we return a reference to the inner data after this load.
+            match self.status.load(Ordering::Acquire) {
+                Status::Incomplete => return None,
+                Status::Running => R::relax(), // We spin
+                Status::Complete => return Some(unsafe { self.force_get() }),
+                Status::Panicked => panic!("Once previously poisoned by a panicked"),
+            }
+        }
+    }
+}
+
+impl<T, R> Once<T, R> {
+    /// Initialization constant of [`Once`].
+    #[allow(clippy::declare_interior_mutable_const)]
+    pub const INIT: Self = Self {
+        phantom: PhantomData,
+        status: AtomicStatus::new(Status::Incomplete),
+        data: UnsafeCell::new(MaybeUninit::uninit()),
+    };
+
+    /// Creates a new [`Once`].
+    pub const fn new() -> Self{
+        Self::INIT
+    }
+
+    /// Creates a new initialized [`Once`].
+    pub const fn initialized(data: T) -> Self {
+        Self {
+            phantom: PhantomData,
+            status: AtomicStatus::new(Status::Complete),
+            data: UnsafeCell::new(MaybeUninit::new(data)),
+        }
+    }
+
+    /// Retrieve a pointer to the inner data.
+    ///
+    /// While this method itself is safe, accessing the pointer before the [`Once`] has been
+    /// initialized is UB, unless this method has already been written to from a pointer coming
+    /// from this method.
+    pub fn as_mut_ptr(&self) -> *mut T {
+        // SAFETY:
+        // * MaybeUninit<T> always has exactly the same layout as T
+        self.data.get().cast::<T>()
+    }
+
+    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+    unsafe fn force_get(&self) -> &T {
+        // SAFETY:
+        // * `UnsafeCell`/inner deref: data never changes again
+        // * `MaybeUninit`/outer deref: data was initialized
+        &*(*self.data.get()).as_ptr()
+    }
+
+    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+    unsafe fn force_get_mut(&mut self) -> &mut T {
+        // SAFETY:
+        // * `UnsafeCell`/inner deref: data never changes again
+        // * `MaybeUninit`/outer deref: data was initialized
+        &mut *(*self.data.get()).as_mut_ptr()
+    }
+
+    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+    unsafe fn force_into_inner(self) -> T {
+        // SAFETY:
+        // * `UnsafeCell`/inner deref: data never changes again
+        // * `MaybeUninit`/outer deref: data was initialized
+        (*self.data.get()).as_ptr().read()
+    }
+
+    /// Returns a reference to the inner value if the [`Once`] has been initialized.
+    pub fn get(&self) -> Option<&T> {
+        // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
+        // nonatomic stores done when initializing, once we have loaded and checked the status.
+        match self.status.load(Ordering::Acquire) {
+            Status::Complete => Some(unsafe { self.force_get() }),
+            _ => None,
+        }
+    }
+
+    /// Returns a reference to the inner value on the unchecked assumption that the  [`Once`] has been initialized.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
+    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
+    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
+    /// checking initialization is unacceptable and the `Once` has already been initialized.
+    pub unsafe fn get_unchecked(&self) -> &T {
+        debug_assert_eq!(
+            self.status.load(Ordering::SeqCst),
+            Status::Complete,
+            "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
+        );
+        self.force_get()
+    }
+
+    /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
+    ///
+    /// Because this method requires a mutable reference to the [`Once`], no synchronization
+    /// overhead is required to access the inner value. In effect, it is zero-cost.
+    pub fn get_mut(&mut self) -> Option<&mut T> {
+        match *self.status.get_mut() {
+            Status::Complete => Some(unsafe { self.force_get_mut() }),
+            _ => None,
+        }
+    }
+
+    /// Returns a the inner value if the [`Once`] has been initialized.
+    ///
+    /// Because this method requires ownership of the [`Once`], no synchronization overhead
+    /// is required to access the inner value. In effect, it is zero-cost.
+    pub fn try_into_inner(mut self) -> Option<T> {
+        match *self.status.get_mut() {
+            Status::Complete => Some(unsafe { self.force_into_inner() }),
+            _ => None,
+        }
+    }
+
+    /// Checks whether the value has been initialized.
+    ///
+    /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
+    /// therefore it is safe to access the value directly via
+    /// [`get_unchecked`](Self::get_unchecked) if this returns true.
+    pub fn is_completed(&self) -> bool {
+        // TODO: Add a similar variant for Relaxed?
+        self.status.load(Ordering::Acquire) == Status::Complete
+    }
+}
+
+impl<T, R> From<T> for Once<T, R> {
+    fn from(data: T) -> Self {
+        Self::initialized(data)
+    }
+}
+
+impl<T, R> Drop for Once<T, R> {
+    fn drop(&mut self) {
+        // No need to do any atomic access here, we have &mut!
+        if *self.status.get_mut() == Status::Complete {
+            unsafe {
+                //TODO: Use MaybeUninit::assume_init_drop once stabilised
+                core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
+            }
+        }
+    }
+}
+
+struct Finish<'a> {
+    status: &'a AtomicStatus,
+}
+
+impl<'a> Drop for Finish<'a> {
+    fn drop(&mut self) {
+        // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
+        // This is mainly because panics are not meant to be fast at all, but also because if
+        // there were to be a compiler bug which reorders accesses within the same thread,
+        // where it should not, we want to be sure that the panic really is handled, and does
+        // not cause additional problems. SeqCst will therefore help guarding against such
+        // bugs.
+        self.status.store(Status::Panicked, Ordering::SeqCst);
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::mpsc::channel;
+    use std::thread;
+
+    use super::*;
+
+    #[test]
+    fn smoke_once() {
+        static O: Once = Once::new();
+        let mut a = 0;
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+    }
+
+    #[test]
+    fn smoke_once_value() {
+        static O: Once<usize> = Once::new();
+        let a = O.call_once(|| 1);
+        assert_eq!(*a, 1);
+        let b = O.call_once(|| 2);
+        assert_eq!(*b, 1);
+    }
+
+    #[test]
+    fn stampede_once() {
+        static O: Once = Once::new();
+        static mut RUN: bool = false;
+
+        let (tx, rx) = channel();
+        for _ in 0..10 {
+            let tx = tx.clone();
+            thread::spawn(move|| {
+                for _ in 0..4 { thread::yield_now() }
+                unsafe {
+                    O.call_once(|| {
+                        assert!(!RUN);
+                        RUN = true;
+                    });
+                    assert!(RUN);
+                }
+                tx.send(()).unwrap();
+            });
+        }
+
+        unsafe {
+            O.call_once(|| {
+                assert!(!RUN);
+                RUN = true;
+            });
+            assert!(RUN);
+        }
+
+        for _ in 0..10 {
+            rx.recv().unwrap();
+        }
+    }
+
+    #[test]
+    fn get() {
+        static INIT: Once<usize> = Once::new();
+
+        assert!(INIT.get().is_none());
+        INIT.call_once(|| 2);
+        assert_eq!(INIT.get().map(|r| *r), Some(2));
+    }
+
+    #[test]
+    fn get_no_wait() {
+        static INIT: Once<usize> = Once::new();
+
+        assert!(INIT.get().is_none());
+        thread::spawn(move|| {
+            INIT.call_once(|| loop { });
+        });
+        assert!(INIT.get().is_none());
+    }
+
+
+    #[test]
+    fn poll() {
+        static INIT: Once<usize> = Once::new();
+
+        assert!(INIT.poll().is_none());
+        INIT.call_once(|| 3);
+        assert_eq!(INIT.poll().map(|r| *r), Some(3));
+    }
+
+
+    #[test]
+    fn wait() {
+        static INIT: Once<usize> = Once::new();
+
+        std::thread::spawn(|| {
+            assert_eq!(*INIT.wait(), 3);
+            assert!(INIT.is_completed());
+        });
+
+        for _ in 0..4 { thread::yield_now() }
+
+        assert!(INIT.poll().is_none());
+        INIT.call_once(|| 3);
+    }
+
+    #[test]
+    fn panic() {
+        use ::std::panic;
+
+        static INIT: Once = Once::new();
+
+        // poison the once
+        let t = panic::catch_unwind(|| {
+            INIT.call_once(|| panic!());
+        });
+        assert!(t.is_err());
+
+        // poisoning propagates
+        let t = panic::catch_unwind(|| {
+            INIT.call_once(|| {});
+        });
+        assert!(t.is_err());
+    }
+
+    #[test]
+    fn init_constant() {
+        static O: Once = Once::INIT;
+        let mut a = 0;
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+        O.call_once(|| a += 1);
+        assert_eq!(a, 1);
+    }
+
+    static mut CALLED: bool = false;
+
+    struct DropTest {}
+
+    impl Drop for DropTest {
+        fn drop(&mut self) {
+            unsafe {
+                CALLED = true;
+            }
+        }
+    }
+
+    #[test]
+    fn drop_occurs() {
+        unsafe {
+            CALLED = false;
+        }
+
+        {
+            let once = Once::<_>::new();
+            once.call_once(|| DropTest {});
+        }
+
+        assert!(unsafe {
+            CALLED
+        });
+    }
+
+    #[test]
+    fn skip_uninit_drop() {
+        unsafe {
+            CALLED = false;
+        }
+
+        let once = Once::<DropTest>::new();
+        drop(once);
+
+        assert!(unsafe {
+            !CALLED
+        });
+    }
+}
+
+
\ No newline at end of file diff --git a/src/spin/relax.rs.html b/src/spin/relax.rs.html new file mode 100644 index 00000000..5900bcc9 --- /dev/null +++ b/src/spin/relax.rs.html @@ -0,0 +1,121 @@ +relax.rs - source + +
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+
+//! Strategies that determine the behaviour of locks when encountering contention.
+
+/// A trait implemented by spinning relax strategies.
+pub trait RelaxStrategy {
+    /// Perform the relaxing operation during a period of contention.
+    fn relax();
+}
+
+/// A strategy that rapidly spins while informing the CPU that it should power down non-essential components via
+/// [`core::hint::spin_loop`].
+///
+/// Note that spinning is a 'dumb' strategy and most schedulers cannot correctly differentiate it from useful work,
+/// thereby misallocating even more CPU time to the spinning process. This is known as
+/// ['priority inversion'](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html).
+///
+/// If you see signs that priority inversion is occurring, consider switching to [`Yield`] or, even better, not using a
+/// spinlock at all and opting for a proper scheduler-aware lock. Remember also that different targets, operating
+/// systems, schedulers, and even the same scheduler with different workloads will exhibit different behaviour. Just
+/// because priority inversion isn't occurring in your tests does not mean that it will not occur. Use a scheduler-
+/// aware lock if at all possible.
+pub struct Spin;
+
+impl RelaxStrategy for Spin {
+    #[inline(always)]
+    fn relax() {
+        core::hint::spin_loop();
+    }
+}
+
+/// A strategy that yields the current time slice to the scheduler in favour of other threads or processes.
+///
+/// This is generally used as a strategy for minimising power consumption and priority inversion on targets that have a
+/// standard library available. Note that such targets have scheduler-integrated concurrency primitives available, and
+/// you should generally use these instead, except in rare circumstances.
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+pub struct Yield;
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl RelaxStrategy for Yield {
+    #[inline(always)]
+    fn relax() {
+        std::thread::yield_now();
+    }
+}
+
+/// A strategy that rapidly spins, without telling the CPU to do any powering down.
+///
+/// You almost certainly do not want to use this. Use [`Spin`] instead. It exists for completeness and for targets
+/// that, for some reason, miscompile or do not support spin hint intrinsics despite attempting to generate code for
+/// them (i.e: this is a workaround for possible compiler bugs).
+pub struct Loop;
+
+impl RelaxStrategy for Loop {
+    #[inline(always)]
+    fn relax() {}
+}
+
+
\ No newline at end of file diff --git a/src/spin/rwlock.rs.html b/src/spin/rwlock.rs.html new file mode 100644 index 00000000..ad2d191a --- /dev/null +++ b/src/spin/rwlock.rs.html @@ -0,0 +1,2245 @@ +rwlock.rs - source + +
   1
+   2
+   3
+   4
+   5
+   6
+   7
+   8
+   9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  24
+  25
+  26
+  27
+  28
+  29
+  30
+  31
+  32
+  33
+  34
+  35
+  36
+  37
+  38
+  39
+  40
+  41
+  42
+  43
+  44
+  45
+  46
+  47
+  48
+  49
+  50
+  51
+  52
+  53
+  54
+  55
+  56
+  57
+  58
+  59
+  60
+  61
+  62
+  63
+  64
+  65
+  66
+  67
+  68
+  69
+  70
+  71
+  72
+  73
+  74
+  75
+  76
+  77
+  78
+  79
+  80
+  81
+  82
+  83
+  84
+  85
+  86
+  87
+  88
+  89
+  90
+  91
+  92
+  93
+  94
+  95
+  96
+  97
+  98
+  99
+ 100
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+ 111
+ 112
+ 113
+ 114
+ 115
+ 116
+ 117
+ 118
+ 119
+ 120
+ 121
+ 122
+ 123
+ 124
+ 125
+ 126
+ 127
+ 128
+ 129
+ 130
+ 131
+ 132
+ 133
+ 134
+ 135
+ 136
+ 137
+ 138
+ 139
+ 140
+ 141
+ 142
+ 143
+ 144
+ 145
+ 146
+ 147
+ 148
+ 149
+ 150
+ 151
+ 152
+ 153
+ 154
+ 155
+ 156
+ 157
+ 158
+ 159
+ 160
+ 161
+ 162
+ 163
+ 164
+ 165
+ 166
+ 167
+ 168
+ 169
+ 170
+ 171
+ 172
+ 173
+ 174
+ 175
+ 176
+ 177
+ 178
+ 179
+ 180
+ 181
+ 182
+ 183
+ 184
+ 185
+ 186
+ 187
+ 188
+ 189
+ 190
+ 191
+ 192
+ 193
+ 194
+ 195
+ 196
+ 197
+ 198
+ 199
+ 200
+ 201
+ 202
+ 203
+ 204
+ 205
+ 206
+ 207
+ 208
+ 209
+ 210
+ 211
+ 212
+ 213
+ 214
+ 215
+ 216
+ 217
+ 218
+ 219
+ 220
+ 221
+ 222
+ 223
+ 224
+ 225
+ 226
+ 227
+ 228
+ 229
+ 230
+ 231
+ 232
+ 233
+ 234
+ 235
+ 236
+ 237
+ 238
+ 239
+ 240
+ 241
+ 242
+ 243
+ 244
+ 245
+ 246
+ 247
+ 248
+ 249
+ 250
+ 251
+ 252
+ 253
+ 254
+ 255
+ 256
+ 257
+ 258
+ 259
+ 260
+ 261
+ 262
+ 263
+ 264
+ 265
+ 266
+ 267
+ 268
+ 269
+ 270
+ 271
+ 272
+ 273
+ 274
+ 275
+ 276
+ 277
+ 278
+ 279
+ 280
+ 281
+ 282
+ 283
+ 284
+ 285
+ 286
+ 287
+ 288
+ 289
+ 290
+ 291
+ 292
+ 293
+ 294
+ 295
+ 296
+ 297
+ 298
+ 299
+ 300
+ 301
+ 302
+ 303
+ 304
+ 305
+ 306
+ 307
+ 308
+ 309
+ 310
+ 311
+ 312
+ 313
+ 314
+ 315
+ 316
+ 317
+ 318
+ 319
+ 320
+ 321
+ 322
+ 323
+ 324
+ 325
+ 326
+ 327
+ 328
+ 329
+ 330
+ 331
+ 332
+ 333
+ 334
+ 335
+ 336
+ 337
+ 338
+ 339
+ 340
+ 341
+ 342
+ 343
+ 344
+ 345
+ 346
+ 347
+ 348
+ 349
+ 350
+ 351
+ 352
+ 353
+ 354
+ 355
+ 356
+ 357
+ 358
+ 359
+ 360
+ 361
+ 362
+ 363
+ 364
+ 365
+ 366
+ 367
+ 368
+ 369
+ 370
+ 371
+ 372
+ 373
+ 374
+ 375
+ 376
+ 377
+ 378
+ 379
+ 380
+ 381
+ 382
+ 383
+ 384
+ 385
+ 386
+ 387
+ 388
+ 389
+ 390
+ 391
+ 392
+ 393
+ 394
+ 395
+ 396
+ 397
+ 398
+ 399
+ 400
+ 401
+ 402
+ 403
+ 404
+ 405
+ 406
+ 407
+ 408
+ 409
+ 410
+ 411
+ 412
+ 413
+ 414
+ 415
+ 416
+ 417
+ 418
+ 419
+ 420
+ 421
+ 422
+ 423
+ 424
+ 425
+ 426
+ 427
+ 428
+ 429
+ 430
+ 431
+ 432
+ 433
+ 434
+ 435
+ 436
+ 437
+ 438
+ 439
+ 440
+ 441
+ 442
+ 443
+ 444
+ 445
+ 446
+ 447
+ 448
+ 449
+ 450
+ 451
+ 452
+ 453
+ 454
+ 455
+ 456
+ 457
+ 458
+ 459
+ 460
+ 461
+ 462
+ 463
+ 464
+ 465
+ 466
+ 467
+ 468
+ 469
+ 470
+ 471
+ 472
+ 473
+ 474
+ 475
+ 476
+ 477
+ 478
+ 479
+ 480
+ 481
+ 482
+ 483
+ 484
+ 485
+ 486
+ 487
+ 488
+ 489
+ 490
+ 491
+ 492
+ 493
+ 494
+ 495
+ 496
+ 497
+ 498
+ 499
+ 500
+ 501
+ 502
+ 503
+ 504
+ 505
+ 506
+ 507
+ 508
+ 509
+ 510
+ 511
+ 512
+ 513
+ 514
+ 515
+ 516
+ 517
+ 518
+ 519
+ 520
+ 521
+ 522
+ 523
+ 524
+ 525
+ 526
+ 527
+ 528
+ 529
+ 530
+ 531
+ 532
+ 533
+ 534
+ 535
+ 536
+ 537
+ 538
+ 539
+ 540
+ 541
+ 542
+ 543
+ 544
+ 545
+ 546
+ 547
+ 548
+ 549
+ 550
+ 551
+ 552
+ 553
+ 554
+ 555
+ 556
+ 557
+ 558
+ 559
+ 560
+ 561
+ 562
+ 563
+ 564
+ 565
+ 566
+ 567
+ 568
+ 569
+ 570
+ 571
+ 572
+ 573
+ 574
+ 575
+ 576
+ 577
+ 578
+ 579
+ 580
+ 581
+ 582
+ 583
+ 584
+ 585
+ 586
+ 587
+ 588
+ 589
+ 590
+ 591
+ 592
+ 593
+ 594
+ 595
+ 596
+ 597
+ 598
+ 599
+ 600
+ 601
+ 602
+ 603
+ 604
+ 605
+ 606
+ 607
+ 608
+ 609
+ 610
+ 611
+ 612
+ 613
+ 614
+ 615
+ 616
+ 617
+ 618
+ 619
+ 620
+ 621
+ 622
+ 623
+ 624
+ 625
+ 626
+ 627
+ 628
+ 629
+ 630
+ 631
+ 632
+ 633
+ 634
+ 635
+ 636
+ 637
+ 638
+ 639
+ 640
+ 641
+ 642
+ 643
+ 644
+ 645
+ 646
+ 647
+ 648
+ 649
+ 650
+ 651
+ 652
+ 653
+ 654
+ 655
+ 656
+ 657
+ 658
+ 659
+ 660
+ 661
+ 662
+ 663
+ 664
+ 665
+ 666
+ 667
+ 668
+ 669
+ 670
+ 671
+ 672
+ 673
+ 674
+ 675
+ 676
+ 677
+ 678
+ 679
+ 680
+ 681
+ 682
+ 683
+ 684
+ 685
+ 686
+ 687
+ 688
+ 689
+ 690
+ 691
+ 692
+ 693
+ 694
+ 695
+ 696
+ 697
+ 698
+ 699
+ 700
+ 701
+ 702
+ 703
+ 704
+ 705
+ 706
+ 707
+ 708
+ 709
+ 710
+ 711
+ 712
+ 713
+ 714
+ 715
+ 716
+ 717
+ 718
+ 719
+ 720
+ 721
+ 722
+ 723
+ 724
+ 725
+ 726
+ 727
+ 728
+ 729
+ 730
+ 731
+ 732
+ 733
+ 734
+ 735
+ 736
+ 737
+ 738
+ 739
+ 740
+ 741
+ 742
+ 743
+ 744
+ 745
+ 746
+ 747
+ 748
+ 749
+ 750
+ 751
+ 752
+ 753
+ 754
+ 755
+ 756
+ 757
+ 758
+ 759
+ 760
+ 761
+ 762
+ 763
+ 764
+ 765
+ 766
+ 767
+ 768
+ 769
+ 770
+ 771
+ 772
+ 773
+ 774
+ 775
+ 776
+ 777
+ 778
+ 779
+ 780
+ 781
+ 782
+ 783
+ 784
+ 785
+ 786
+ 787
+ 788
+ 789
+ 790
+ 791
+ 792
+ 793
+ 794
+ 795
+ 796
+ 797
+ 798
+ 799
+ 800
+ 801
+ 802
+ 803
+ 804
+ 805
+ 806
+ 807
+ 808
+ 809
+ 810
+ 811
+ 812
+ 813
+ 814
+ 815
+ 816
+ 817
+ 818
+ 819
+ 820
+ 821
+ 822
+ 823
+ 824
+ 825
+ 826
+ 827
+ 828
+ 829
+ 830
+ 831
+ 832
+ 833
+ 834
+ 835
+ 836
+ 837
+ 838
+ 839
+ 840
+ 841
+ 842
+ 843
+ 844
+ 845
+ 846
+ 847
+ 848
+ 849
+ 850
+ 851
+ 852
+ 853
+ 854
+ 855
+ 856
+ 857
+ 858
+ 859
+ 860
+ 861
+ 862
+ 863
+ 864
+ 865
+ 866
+ 867
+ 868
+ 869
+ 870
+ 871
+ 872
+ 873
+ 874
+ 875
+ 876
+ 877
+ 878
+ 879
+ 880
+ 881
+ 882
+ 883
+ 884
+ 885
+ 886
+ 887
+ 888
+ 889
+ 890
+ 891
+ 892
+ 893
+ 894
+ 895
+ 896
+ 897
+ 898
+ 899
+ 900
+ 901
+ 902
+ 903
+ 904
+ 905
+ 906
+ 907
+ 908
+ 909
+ 910
+ 911
+ 912
+ 913
+ 914
+ 915
+ 916
+ 917
+ 918
+ 919
+ 920
+ 921
+ 922
+ 923
+ 924
+ 925
+ 926
+ 927
+ 928
+ 929
+ 930
+ 931
+ 932
+ 933
+ 934
+ 935
+ 936
+ 937
+ 938
+ 939
+ 940
+ 941
+ 942
+ 943
+ 944
+ 945
+ 946
+ 947
+ 948
+ 949
+ 950
+ 951
+ 952
+ 953
+ 954
+ 955
+ 956
+ 957
+ 958
+ 959
+ 960
+ 961
+ 962
+ 963
+ 964
+ 965
+ 966
+ 967
+ 968
+ 969
+ 970
+ 971
+ 972
+ 973
+ 974
+ 975
+ 976
+ 977
+ 978
+ 979
+ 980
+ 981
+ 982
+ 983
+ 984
+ 985
+ 986
+ 987
+ 988
+ 989
+ 990
+ 991
+ 992
+ 993
+ 994
+ 995
+ 996
+ 997
+ 998
+ 999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+
+//! A lock that provides data access to either one writer or many readers.
+
+use core::{
+    cell::UnsafeCell,
+    ops::{Deref, DerefMut},
+    sync::atomic::{AtomicUsize, Ordering},
+    marker::PhantomData,
+    fmt,
+    mem,
+};
+use crate::{RelaxStrategy, Spin};
+
+/// A lock that provides data access to either one writer or many readers.
+///
+/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses
+/// spinning for synchronisation instead. Unlike its namespace, this lock does not
+/// track lock poisoning.
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
+/// allow concurrent access through readers. The RAII guards returned from the
+/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
+/// to allow access to the contained of the lock.
+///
+/// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a
+/// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade)
+/// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions.
+/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
+/// functions.
+///
+/// Based on Facebook's
+/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
+/// This implementation is unfair to writers - if the lock always has readers, then no writers will
+/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
+/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
+/// when there are existing readers. However if the lock is that highly contended and writes are
+/// crucial then this implementation may be a poor choice.
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+///     let r1 = lock.read();
+///     let r2 = lock.read();
+///     assert_eq!(*r1, 5);
+///     assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+///     let mut w = lock.write();
+///     *w += 1;
+///     assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// ```
+pub struct RwLock<T: ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    lock: AtomicUsize,
+    data: UnsafeCell<T>,
+}
+
+const READER: usize = 1 << 2;
+const UPGRADED: usize = 1 << 1;
+const WRITER: usize = 1;
+
+/// A guard that provides immutable data access.
+///
+/// When the guard falls out of scope it will decrement the read count,
+/// potentially releasing the lock.
+pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
+    lock: &'a AtomicUsize,
+    data: &'a T,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    inner: &'a RwLock<T, R>,
+    data: &'a mut T,
+}
+
+/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`].
+///
+/// No writers or other upgradeable guards can exist while this is in scope. New reader
+/// creation is prevented (to alleviate writer starvation) but there may be existing readers
+/// when the lock is acquired.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> {
+    phantom: PhantomData<R>,
+    inner: &'a RwLock<T, R>,
+    data: &'a T,
+}
+
+// Same unsafe impls as `std::sync::RwLock`
+unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {}
+unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {}
+
+impl<T, R> RwLock<T, R> {
+    /// Creates a new spinlock wrapping the supplied data.
+    ///
+    /// May be used statically:
+    ///
+    /// ```
+    /// use spin;
+    ///
+    /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
+    ///
+    /// fn demo() {
+    ///     let lock = RW_LOCK.read();
+    ///     // do something with lock
+    ///     drop(lock);
+    /// }
+    /// ```
+    #[inline]
+    pub const fn new(data: T) -> Self {
+        RwLock {
+            phantom: PhantomData,
+            lock: AtomicUsize::new(0),
+            data: UnsafeCell::new(data),
+        }
+    }
+
+    /// Consumes this `RwLock`, returning the underlying data.
+    #[inline]
+    pub fn into_inner(self) -> T {
+        // We know statically that there are no outstanding references to
+        // `self` so there's no need to lock.
+        let RwLock { data, .. } = self;
+        data.into_inner()
+    }
+    /// Returns a mutable pointer to the underying data.
+    ///
+    /// This is mostly meant to be used for applications which require manual unlocking, but where
+    /// storing both the lock and the pointer to the inner data gets inefficient.
+    ///
+    /// While this is safe, writing to the data is undefined behavior unless the current thread has
+    /// acquired a write lock, and reading requires either a read or write lock.
+    ///
+    /// # Example
+    /// ```
+    /// let lock = spin::RwLock::new(42);
+    ///
+    /// unsafe {
+    ///     core::mem::forget(lock.write());
+    ///     
+    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
+    ///     lock.as_mut_ptr().write(58);
+    ///
+    ///     lock.force_write_unlock();
+    /// }
+    ///
+    /// assert_eq!(*lock.read(), 58);
+    ///
+    /// ```
+    #[inline(always)]
+    pub fn as_mut_ptr(&self) -> *mut T {
+        self.data.get()
+    }
+}
+
+impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> {
+    /// Locks this rwlock with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns. This method does not provide any guarantees with
+    /// respect to the ordering of whether contentious readers or writers will
+    /// acquire the lock first.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     let mut data = mylock.read();
+    ///     // The lock is now locked and the data can be read
+    ///     println!("{}", *data);
+    ///     // The lock is dropped
+    /// }
+    /// ```
+    #[inline]
+    pub fn read(&self) -> RwLockReadGuard<T> {
+        loop {
+            match self.try_read() {
+                Some(guard) => return guard,
+                None => R::relax(),
+            }
+        }
+    }
+
+    /// Lock this rwlock with exclusive write access, blocking the current
+    /// thread until it can be acquired.
+    ///
+    /// This function will not return while other writers or other readers
+    /// currently have access to the lock.
+    ///
+    /// Returns an RAII guard which will drop the write access of this rwlock
+    /// when dropped.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     let mut data = mylock.write();
+    ///     // The lock is now locked and the data can be written
+    ///     *data += 1;
+    ///     // The lock is dropped
+    /// }
+    /// ```
+    #[inline]
+    pub fn write(&self) -> RwLockWriteGuard<T, R> {
+        loop {
+            match self.try_write_internal(false) {
+                Some(guard) => return guard,
+                None => R::relax(),
+            }
+        }
+    }
+
+    /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
+    /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
+    #[inline]
+    pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T, R> {
+        loop {
+            match self.try_upgradeable_read() {
+                Some(guard) => return guard,
+                None => R::relax(),
+            }
+        }
+    }
+}
+
+impl<T: ?Sized, R> RwLock<T, R> {
+    /// Attempt to acquire this lock with shared read access.
+    ///
+    /// This function will never block and will return immediately if `read`
+    /// would otherwise succeed. Returns `Some` of an RAII guard which will
+    /// release the shared access of this thread when dropped, or `None` if the
+    /// access could not be granted. This method does not provide any
+    /// guarantees with respect to the ordering of whether contentious readers
+    /// or writers will acquire the lock first.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     match mylock.try_read() {
+    ///         Some(data) => {
+    ///             // The lock is now locked and the data can be read
+    ///             println!("{}", *data);
+    ///             // The lock is dropped
+    ///         },
+    ///         None => (), // no cigar
+    ///     };
+    /// }
+    /// ```
+    #[inline]
+    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
+        let value = self.lock.fetch_add(READER, Ordering::Acquire);
+
+        // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
+        // This helps reduce writer starvation.
+        if value & (WRITER | UPGRADED) != 0 {
+            // Lock is taken, undo.
+            self.lock.fetch_sub(READER, Ordering::Release);
+            None
+        } else {
+            Some(RwLockReadGuard {
+                lock: &self.lock,
+                data: unsafe { &*self.data.get() },
+            })
+        }
+    }
+
+    /// Return the number of readers that currently hold the lock (including upgradable readers).
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    pub fn reader_count(&self) -> usize {
+        let state = self.lock.load(Ordering::Relaxed);
+        state / READER + (state & UPGRADED) / UPGRADED
+    }
+
+    /// Return the number of writers that currently hold the lock.
+    ///
+    /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
+    ///
+    /// # Safety
+    ///
+    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+    pub fn writer_count(&self) -> usize {
+        (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
+    }
+
+    /// Force decrement the reader count.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
+    /// live, or if called more times than `read` has been called, but can be
+    /// useful in FFI contexts where the caller doesn't know how to deal with
+    /// RAII. The underlying atomic operation uses `Ordering::Release`.
+    #[inline]
+    pub unsafe fn force_read_decrement(&self) {
+        debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
+        self.lock.fetch_sub(READER, Ordering::Release);
+    }
+
+    /// Force unlock exclusive write access.
+    ///
+    /// # Safety
+    ///
+    /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
+    /// live, or if called when there are current readers, but can be useful in
+    /// FFI contexts where the caller doesn't know how to deal with RAII. The
+    /// underlying atomic operation uses `Ordering::Release`.
+    #[inline]
+    pub unsafe fn force_write_unlock(&self) {
+        debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
+        self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
+    }
+
+    #[inline(always)]
+    fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T, R>> {
+        if compare_exchange(
+            &self.lock,
+            0,
+            WRITER,
+            Ordering::Acquire,
+            Ordering::Relaxed,
+            strong,
+        )
+        .is_ok()
+        {
+            Some(RwLockWriteGuard {
+                phantom: PhantomData,
+                inner: self,
+                data: unsafe { &mut *self.data.get() },
+            })
+        } else {
+            None
+        }
+    }
+
+    /// Attempt to lock this rwlock with exclusive write access.
+    ///
+    /// This function does not ever block, and it will return `None` if a call
+    /// to `write` would otherwise block. If successful, an RAII guard is
+    /// returned.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// {
+    ///     match mylock.try_write() {
+    ///         Some(mut data) => {
+    ///             // The lock is now locked and the data can be written
+    ///             *data += 1;
+    ///             // The lock is implicitly dropped
+    ///         },
+    ///         None => (), // no cigar
+    ///     };
+    /// }
+    /// ```
+    #[inline]
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<T, R>> {
+        self.try_write_internal(true)
+    }
+
+    /// Tries to obtain an upgradeable lock guard.
+    #[inline]
+    pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T, R>> {
+        if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
+            Some(RwLockUpgradableGuard {
+                phantom: PhantomData,
+                inner: self,
+                data: unsafe { &*self.data.get() },
+            })
+        } else {
+            // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
+            // When they unlock, they will clear the bit.
+            None
+        }
+    }
+
+   /// Returns a mutable reference to the underlying data.
+   ///
+   /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+   /// take place -- the mutable borrow statically guarantees no locks exist.
+   ///
+   /// # Examples
+   ///
+   /// ```
+   /// let mut lock = spin::RwLock::new(0);
+   /// *lock.get_mut() = 10;
+   /// assert_eq!(*lock.read(), 10);
+   /// ```
+    pub fn get_mut(&mut self) -> &mut T {
+        // We know statically that there are no other references to `self`, so
+        // there's no need to lock the inner lock.
+        unsafe { &mut *self.data.get() }
+    }
+}
+
+impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_read() {
+            Some(guard) => write!(f, "RwLock {{ data: ")
+                .and_then(|()| (&*guard).fmt(f))
+                .and_then(|()| write!(f, "}}")),
+            None => write!(f, "RwLock {{ <locked> }}"),
+        }
+    }
+}
+
+impl<T: ?Sized + Default, R> Default for RwLock<T, R> {
+    fn default() -> Self {
+        Self::new(Default::default())
+    }
+}
+
+impl<T, R> From<T> for RwLock<T, R> {
+    fn from(data: T) -> Self {
+        Self::new(data)
+    }
+}
+
+impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+    /// Leak the lock guard, yielding a reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original lock for all but reading locks.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
+    ///
+    /// assert_eq!(*data, 0);
+    /// ```
+    #[inline]
+    pub fn leak(this: Self) -> &'rwlock T {
+        let Self { data, .. } = this;
+        data
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> {
+    /// Upgrades an upgradeable lock guard to a writable lock guard.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+    /// let writable = upgradeable.upgrade();
+    /// ```
+    #[inline]
+    pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> {
+        loop {
+            self = match self.try_upgrade_internal(false) {
+                Ok(guard) => return guard,
+                Err(e) => e,
+            };
+
+            R::relax();
+        }
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> {
+    #[inline(always)]
+    fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
+        if compare_exchange(
+            &self.inner.lock,
+            UPGRADED,
+            WRITER,
+            Ordering::Acquire,
+            Ordering::Relaxed,
+            strong,
+        )
+        .is_ok()
+        {
+            let inner = self.inner;
+
+            // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
+            mem::forget(self);
+
+            // Upgrade successful
+            Ok(RwLockWriteGuard {
+                phantom: PhantomData,
+                inner,
+                data: unsafe { &mut *inner.data.get() },
+            })
+        } else {
+            Err(self)
+        }
+    }
+
+    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+    ///
+    /// match upgradeable.try_upgrade() {
+    ///     Ok(writable) => /* upgrade successful - use writable lock guard */ (),
+    ///     Err(upgradeable) => /* upgrade unsuccessful */ (),
+    /// };
+    /// ```
+    #[inline]
+    pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
+        self.try_upgrade_internal(true)
+    }
+
+    #[inline]
+    /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(1);
+    ///
+    /// let upgradeable = mylock.upgradeable_read();
+    /// assert!(mylock.try_read().is_none());
+    /// assert_eq!(*upgradeable, 1);
+    ///
+    /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
+    /// assert!(mylock.try_read().is_some());
+    /// assert_eq!(*readable, 1);
+    /// ```
+    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
+        // Reserve the read guard for ourselves
+        self.inner.lock.fetch_add(READER, Ordering::Acquire);
+
+        let inner = self.inner;
+
+        // Dropping self removes the UPGRADED bit
+        mem::drop(self);
+
+        RwLockReadGuard {
+            lock: &inner.lock,
+            data: unsafe { &*inner.data.get() },
+        }
+    }
+
+    /// Leak the lock guard, yielding a reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original lock.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
+    ///
+    /// assert_eq!(*data, 0);
+    /// ```
+    #[inline]
+    pub fn leak(this: Self) -> &'rwlock T {
+        let Self { data, .. } = this;
+        data
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> {
+    /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let mut writable = mylock.write();
+    /// *writable = 1;
+    ///
+    /// let readable = writable.downgrade(); // This is guaranteed not to spin
+    /// # let readable_2 = mylock.try_read().unwrap();
+    /// assert_eq!(*readable, 1);
+    /// ```
+    #[inline]
+    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
+        // Reserve the read guard for ourselves
+        self.inner.lock.fetch_add(READER, Ordering::Acquire);
+
+        let inner = self.inner;
+
+        // Dropping self removes the UPGRADED bit
+        mem::drop(self);
+
+        RwLockReadGuard {
+            lock: &inner.lock,
+            data: unsafe { &*inner.data.get() },
+        }
+    }
+
+    /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let mut writable = mylock.write();
+    /// *writable = 1;
+    ///
+    /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
+    /// assert_eq!(*readable, 1);
+    /// ```
+    #[inline]
+    pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> {
+        debug_assert_eq!(self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED), WRITER);
+
+        // Reserve the read guard for ourselves
+        self.inner.lock.store(UPGRADED, Ordering::Release);
+
+        let inner = self.inner;
+
+        // Dropping self removes the UPGRADED bit
+        mem::forget(self);
+
+        RwLockUpgradableGuard {
+            phantom: PhantomData,
+            inner,
+            data: unsafe { &*inner.data.get() },
+        }
+    }
+
+    /// Leak the lock guard, yielding a mutable reference to the underlying data.
+    ///
+    /// Note that this function will permanently lock the original lock.
+    ///
+    /// ```
+    /// let mylock = spin::RwLock::new(0);
+    ///
+    /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
+    ///
+    /// *data = 1;
+    /// assert_eq!(*data, 1);
+    /// ```
+    #[inline]
+    pub fn leak(this: Self) -> &'rwlock mut T {
+        let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
+        core::mem::forget(this);
+        unsafe { &mut *data }
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        fmt::Display::fmt(&**self, f)
+    }
+}
+
+impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        self.data
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> {
+    fn deref_mut(&mut self) -> &mut T {
+        self.data
+    }
+}
+
+impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
+    fn drop(&mut self) {
+        debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
+        self.lock.fetch_sub(READER, Ordering::Release);
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> {
+    fn drop(&mut self) {
+        debug_assert_eq!(
+            self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
+            UPGRADED
+        );
+        self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
+    }
+}
+
+impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> {
+    fn drop(&mut self) {
+        debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
+
+        // Writer is responsible for clearing both WRITER and UPGRADED bits.
+        // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
+        self.inner.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
+    }
+}
+
+#[inline(always)]
+fn compare_exchange(
+    atomic: &AtomicUsize,
+    current: usize,
+    new: usize,
+    success: Ordering,
+    failure: Ordering,
+    strong: bool,
+) -> Result<usize, usize> {
+    if strong {
+        atomic.compare_exchange(current, new, success, failure)
+    } else {
+        atomic.compare_exchange_weak(current, new, success, failure)
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> {
+    type GuardMarker = lock_api_crate::GuardSend;
+
+    const INIT: Self = Self::new(());
+
+    #[inline(always)]
+    fn lock_exclusive(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(self.write());
+    }
+
+    #[inline(always)]
+    fn try_lock_exclusive(&self) -> bool {
+        // Prevent guard destructor running
+        self.try_write().map(|g| core::mem::forget(g)).is_some()
+    }
+
+    #[inline(always)]
+    unsafe fn unlock_exclusive(&self) {
+        drop(RwLockWriteGuard {
+            inner: self,
+            data: &mut (),
+            phantom: PhantomData,
+        });
+    }
+
+    #[inline(always)]
+    fn lock_shared(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(self.read());
+    }
+
+    #[inline(always)]
+    fn try_lock_shared(&self) -> bool {
+        // Prevent guard destructor running
+        self.try_read().map(|g| core::mem::forget(g)).is_some()
+    }
+
+    #[inline(always)]
+    unsafe fn unlock_shared(&self) {
+        drop(RwLockReadGuard {
+            lock: &self.lock,
+            data: &(),
+        });
+    }
+
+    #[inline(always)]
+    fn is_locked(&self) -> bool {
+        self.lock.load(Ordering::Relaxed) != 0
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> {
+    #[inline(always)]
+    fn lock_upgradable(&self) {
+        // Prevent guard destructor running
+        core::mem::forget(self.upgradeable_read());
+    }
+
+    #[inline(always)]
+    fn try_lock_upgradable(&self) -> bool {
+        // Prevent guard destructor running
+        self.try_upgradeable_read().map(|g| core::mem::forget(g)).is_some()
+    }
+
+    #[inline(always)]
+    unsafe fn unlock_upgradable(&self) {
+        drop(RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        });
+    }
+
+    #[inline(always)]
+    unsafe fn upgrade(&self) {
+        let tmp_guard = RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.upgrade());
+    }
+
+    #[inline(always)]
+    unsafe fn try_upgrade(&self) -> bool {
+        let tmp_guard = RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        };
+        tmp_guard.try_upgrade().map(|g| core::mem::forget(g)).is_ok()
+    }
+}
+
+#[cfg(feature = "lock_api")]
+unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> {
+    unsafe fn downgrade(&self) {
+        let tmp_guard = RwLockWriteGuard {
+            inner: self,
+            data: &mut (),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.downgrade());
+    }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> {
+    unsafe fn downgrade_upgradable(&self) {
+        let tmp_guard = RwLockUpgradableGuard {
+            inner: self,
+            data: &(),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.downgrade());
+    }
+
+    unsafe fn downgrade_to_upgradable(&self) {
+        let tmp_guard = RwLockWriteGuard {
+            inner: self,
+            data: &mut (),
+            phantom: PhantomData,
+        };
+        core::mem::forget(tmp_guard.downgrade_to_upgradeable());
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::prelude::v1::*;
+
+    use std::sync::atomic::{AtomicUsize, Ordering};
+    use std::sync::mpsc::channel;
+    use std::sync::Arc;
+    use std::thread;
+
+    type RwLock<T> = super::RwLock<T>;
+
+    #[derive(Eq, PartialEq, Debug)]
+    struct NonCopy(i32);
+
+    #[test]
+    fn smoke() {
+        let l = RwLock::new(());
+        drop(l.read());
+        drop(l.write());
+        drop((l.read(), l.read()));
+        drop(l.write());
+    }
+
+    // TODO: needs RNG
+    //#[test]
+    //fn frob() {
+    //    static R: RwLock = RwLock::new();
+    //    const N: usize = 10;
+    //    const M: usize = 1000;
+    //
+    //    let (tx, rx) = channel::<()>();
+    //    for _ in 0..N {
+    //        let tx = tx.clone();
+    //        thread::spawn(move|| {
+    //            let mut rng = rand::thread_rng();
+    //            for _ in 0..M {
+    //                if rng.gen_weighted_bool(N) {
+    //                    drop(R.write());
+    //                } else {
+    //                    drop(R.read());
+    //                }
+    //            }
+    //            drop(tx);
+    //        });
+    //    }
+    //    drop(tx);
+    //    let _ = rx.recv();
+    //    unsafe { R.destroy(); }
+    //}
+
+    #[test]
+    fn test_rw_arc() {
+        let arc = Arc::new(RwLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        thread::spawn(move || {
+            let mut lock = arc2.write();
+            for _ in 0..10 {
+                let tmp = *lock;
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
+            }
+            tx.send(()).unwrap();
+        });
+
+        // Readers try to catch the writer in the act
+        let mut children = Vec::new();
+        for _ in 0..5 {
+            let arc3 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = arc3.read();
+                assert!(*lock >= 0);
+            }));
+        }
+
+        // Wait for children to pass their asserts
+        for r in children {
+            assert!(r.join().is_ok());
+        }
+
+        // Wait for writer to finish
+        rx.recv().unwrap();
+        let lock = arc.read();
+        assert_eq!(*lock, 10);
+    }
+
+    #[test]
+    fn test_rw_access_in_unwind() {
+        let arc = Arc::new(RwLock::new(1));
+        let arc2 = arc.clone();
+        let _ = thread::spawn(move || -> () {
+            struct Unwinder {
+                i: Arc<RwLock<isize>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = self.i.write();
+                    *lock += 1;
+                }
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        })
+        .join();
+        let lock = arc.read();
+        assert_eq!(*lock, 2);
+    }
+
+    #[test]
+    fn test_rwlock_unsized() {
+        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+        {
+            let b = &mut *rw.write();
+            b[0] = 4;
+            b[2] = 5;
+        }
+        let comp: &[i32] = &[4, 2, 5];
+        assert_eq!(&*rw.read(), comp);
+    }
+
+    #[test]
+    fn test_rwlock_try_write() {
+        use std::mem::drop;
+
+        let lock = RwLock::new(0isize);
+        let read_guard = lock.read();
+
+        let write_result = lock.try_write();
+        match write_result {
+            None => (),
+            Some(_) => assert!(
+                false,
+                "try_write should not succeed while read_guard is in scope"
+            ),
+        }
+
+        drop(read_guard);
+    }
+
+    #[test]
+    fn test_rw_try_read() {
+        let m = RwLock::new(0);
+        ::std::mem::forget(m.write());
+        assert!(m.try_read().is_none());
+    }
+
+    #[test]
+    fn test_into_inner() {
+        let m = RwLock::new(NonCopy(10));
+        assert_eq!(m.into_inner(), NonCopy(10));
+    }
+
+    #[test]
+    fn test_into_inner_drop() {
+        struct Foo(Arc<AtomicUsize>);
+        impl Drop for Foo {
+            fn drop(&mut self) {
+                self.0.fetch_add(1, Ordering::SeqCst);
+            }
+        }
+        let num_drops = Arc::new(AtomicUsize::new(0));
+        let m = RwLock::new(Foo(num_drops.clone()));
+        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        {
+            let _inner = m.into_inner();
+            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+        }
+        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+    }
+
+    #[test]
+    fn test_force_read_decrement() {
+        let m = RwLock::new(());
+        ::std::mem::forget(m.read());
+        ::std::mem::forget(m.read());
+        ::std::mem::forget(m.read());
+        assert!(m.try_write().is_none());
+        unsafe {
+            m.force_read_decrement();
+            m.force_read_decrement();
+        }
+        assert!(m.try_write().is_none());
+        unsafe {
+            m.force_read_decrement();
+        }
+        assert!(m.try_write().is_some());
+    }
+
+    #[test]
+    fn test_force_write_unlock() {
+        let m = RwLock::new(());
+        ::std::mem::forget(m.write());
+        assert!(m.try_read().is_none());
+        unsafe {
+            m.force_write_unlock();
+        }
+        assert!(m.try_read().is_some());
+    }
+
+    #[test]
+    fn test_upgrade_downgrade() {
+        let m = RwLock::new(());
+        {
+            let _r = m.read();
+            let upg = m.try_upgradeable_read().unwrap();
+            assert!(m.try_read().is_none());
+            assert!(m.try_write().is_none());
+            assert!(upg.try_upgrade().is_err());
+        }
+        {
+            let w = m.write();
+            assert!(m.try_upgradeable_read().is_none());
+            let _r = w.downgrade();
+            assert!(m.try_upgradeable_read().is_some());
+            assert!(m.try_read().is_some());
+            assert!(m.try_write().is_none());
+        }
+        {
+            let _u = m.upgradeable_read();
+            assert!(m.try_upgradeable_read().is_none());
+        }
+
+        assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
+    }
+}
+
+
\ No newline at end of file