Merge remote-tracking branch 'origin/master' into const-fn-feature

This commit is contained in:
Felix Stegmaier 2018-07-13 11:25:56 +02:00
commit 40133fea0f
4 changed files with 88 additions and 3 deletions

View File

@ -84,7 +84,6 @@
#![deny(warnings)]
#![cfg_attr(feature = "const-fn", feature(const_fn))]
#![feature(core_intrinsics)]
#![feature(nonzero)]
#![feature(untagged_unions)]
#![no_std]

View File

@ -212,7 +212,7 @@ where
let tail = self.tail.load_relaxed().into();
if head > tail {
head - tail
self.capacity().into() + 1 - head + tail
} else {
tail - head
}
@ -343,7 +343,7 @@ macro_rules! impl_ {
let tail = self.tail.load_relaxed();
if head > tail {
head - tail
self.capacity() + 1 - head + tail
} else {
tail - head
}
@ -573,4 +573,32 @@ mod tests {
assert_eq!(rb.len(), 2);
}
#[test]
fn ready_flag() {
let mut rb: RingBuffer<i32, U2> = RingBuffer::new();
let (mut p, mut c) = rb.split();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
p.enqueue(0).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
p.enqueue(1).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), false);
c.dequeue().unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
c.dequeue().unwrap();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
}
}

View File

@ -79,6 +79,14 @@ macro_rules! impl_ {
N: Add<U1> + Unsigned,
Sum<N, U1>: ArrayLength<T>,
{
/// Returns if there are any items to dequeue. When this returns true, at least the
/// first subsequent dequeue will succeed.
pub fn ready(&self) -> bool {
let tail = unsafe { self.rb.as_ref().tail.load_acquire() };
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
return head != tail;
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
pub fn dequeue(&mut self) -> Option<T> {
let tail = unsafe { self.rb.as_ref().tail.load_acquire() };
@ -119,6 +127,21 @@ macro_rules! impl_ {
N: Add<U1> + Unsigned,
Sum<N, U1>: ArrayLength<T>,
{
/// Returns if there is any space to enqueue a new item. When this returns true, at
/// least the first subsequent enqueue will succeed.
pub fn ready(&self) -> bool {
let n = unsafe { self.rb.as_ref().capacity() + 1 };
let tail = unsafe { self.rb.as_ref().tail.load_relaxed() };
// NOTE we could replace this `load_acquire` with a `load_relaxed` and this method
// would be sound on most architectures but that change would result in UB according
// to the C++ memory model, which is what Rust currently uses, so we err on the side
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
// more details.
let head = unsafe { self.rb.as_ref().head.load_acquire() };
let next_tail = (tail + 1) % n;
return next_tail != head;
}
/// Adds an `item` to the end of the queue
///
/// Returns back the `item` if the queue is full

View File

@ -157,3 +157,38 @@ fn unchecked() {
assert_eq!(rb.len(), N::to_usize() / 2);
}
#[test]
fn len_properly_wraps() {
type N = U3;
let mut rb: RingBuffer<u8, N> = RingBuffer::new();
rb.enqueue(1).unwrap();
assert_eq!(rb.len(), 1);
rb.dequeue();
assert_eq!(rb.len(), 0);
rb.enqueue(2).unwrap();
assert_eq!(rb.len(), 1);
rb.enqueue(3).unwrap();
assert_eq!(rb.len(), 2);
rb.enqueue(4).unwrap();
assert_eq!(rb.len(), 3);
}
#[test]
fn iterator_properly_wraps() {
type N = U3;
let mut rb: RingBuffer<u8, N> = RingBuffer::new();
rb.enqueue(1).unwrap();
rb.dequeue();
rb.enqueue(2).unwrap();
rb.enqueue(3).unwrap();
rb.enqueue(4).unwrap();
let expected = [2, 3, 4];
let mut actual = [0, 0, 0];
for (idx, el) in rb.iter().enumerate() {
actual[idx] = *el;
}
assert_eq!(expected, actual)
}