mirror of
https://github.com/rust-lang/rust.git
synced 2025-02-18 09:53:26 +00:00
Rewrite std::comm
* Streams are now ~3x faster than before (fewer allocations and more optimized) * Based on a single-producer single-consumer lock-free queue that doesn't always have to allocate on every send. * Blocking via mutexes/cond vars outside the runtime * Streams work in/out of the runtime seamlessly * Select now works in/out of the runtime seamlessly * Streams will now fail!() on send() if the other end has hung up * try_send() will not fail * PortOne/ChanOne removed * SharedPort removed * MegaPipe removed * Generic select removed (only one kind of port now) * API redesign * try_recv == never block * recv_opt == block, don't fail * iter() == Iterator<T> for Port<T> * removed peek * Type::new * Removed rt::comm
This commit is contained in:
parent
000cda611f
commit
bfa9064ba2
@ -1,311 +0,0 @@
|
||||
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*!
|
||||
Message passing
|
||||
*/
|
||||
|
||||
#[allow(missing_doc)];
|
||||
|
||||
use clone::Clone;
|
||||
use iter::Iterator;
|
||||
use kinds::Send;
|
||||
use option::Option;
|
||||
use rtcomm = rt::comm;
|
||||
|
||||
/// A trait for things that can send multiple messages.
|
||||
pub trait GenericChan<T> {
|
||||
/// Sends a message.
|
||||
fn send(&self, x: T);
|
||||
}
|
||||
|
||||
/// Things that can send multiple messages and can detect when the receiver
|
||||
/// is closed
|
||||
pub trait GenericSmartChan<T> {
|
||||
/// Sends a message, or report if the receiver has closed the connection.
|
||||
fn try_send(&self, x: T) -> bool;
|
||||
}
|
||||
|
||||
/// Trait for non-rescheduling send operations, similar to `send_deferred` on ChanOne.
|
||||
pub trait SendDeferred<T> {
|
||||
fn send_deferred(&self, val: T);
|
||||
fn try_send_deferred(&self, val: T) -> bool;
|
||||
}
|
||||
|
||||
/// A trait for things that can receive multiple messages.
|
||||
pub trait GenericPort<T> {
|
||||
/// Receives a message, or fails if the connection closes.
|
||||
fn recv(&self) -> T;
|
||||
|
||||
/// Receives a message, or returns `none` if
|
||||
/// the connection is closed or closes.
|
||||
fn try_recv(&self) -> Option<T>;
|
||||
|
||||
/// Returns an iterator that breaks once the connection closes.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ~~~rust
|
||||
/// do spawn {
|
||||
/// for x in port.recv_iter() {
|
||||
/// if pred(x) { break; }
|
||||
/// println!("{}", x);
|
||||
/// }
|
||||
/// }
|
||||
/// ~~~
|
||||
fn recv_iter<'a>(&'a self) -> RecvIterator<'a, Self> {
|
||||
RecvIterator { port: self }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RecvIterator<'a, P> {
|
||||
priv port: &'a P,
|
||||
}
|
||||
|
||||
impl<'a, T, P: GenericPort<T>> Iterator<T> for RecvIterator<'a, P> {
|
||||
fn next(&mut self) -> Option<T> {
|
||||
self.port.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
/// Ports that can `peek`
|
||||
pub trait Peekable<T> {
|
||||
/// Returns true if a message is available
|
||||
fn peek(&self) -> bool;
|
||||
}
|
||||
|
||||
/* priv is disabled to allow users to get at traits like Select. */
|
||||
pub struct PortOne<T> { /* priv */ x: rtcomm::PortOne<T> }
|
||||
pub struct ChanOne<T> { /* priv */ x: rtcomm::ChanOne<T> }
|
||||
|
||||
pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
|
||||
let (p, c) = rtcomm::oneshot();
|
||||
(PortOne { x: p }, ChanOne { x: c })
|
||||
}
|
||||
|
||||
pub struct Port<T> { /* priv */ x: rtcomm::Port<T> }
|
||||
pub struct Chan<T> { /* priv */ x: rtcomm::Chan<T> }
|
||||
|
||||
pub fn stream<T: Send>() -> (Port<T>, Chan<T>) {
|
||||
let (p, c) = rtcomm::stream();
|
||||
(Port { x: p }, Chan { x: c })
|
||||
}
|
||||
|
||||
impl<T: Send> ChanOne<T> {
|
||||
pub fn send(self, val: T) {
|
||||
let ChanOne { x: c } = self;
|
||||
c.send(val)
|
||||
}
|
||||
|
||||
pub fn try_send(self, val: T) -> bool {
|
||||
let ChanOne { x: c } = self;
|
||||
c.try_send(val)
|
||||
}
|
||||
|
||||
pub fn send_deferred(self, val: T) {
|
||||
let ChanOne { x: c } = self;
|
||||
c.send_deferred(val)
|
||||
}
|
||||
|
||||
pub fn try_send_deferred(self, val: T) -> bool {
|
||||
let ChanOne{ x: c } = self;
|
||||
c.try_send_deferred(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> PortOne<T> {
|
||||
pub fn recv(self) -> T {
|
||||
let PortOne { x: p } = self;
|
||||
p.recv()
|
||||
}
|
||||
|
||||
pub fn try_recv(self) -> Option<T> {
|
||||
let PortOne { x: p } = self;
|
||||
p.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Peekable<T> for PortOne<T> {
|
||||
fn peek(&self) -> bool {
|
||||
let &PortOne { x: ref p } = self;
|
||||
p.peek()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericChan<T> for Chan<T> {
|
||||
fn send(&self, val: T) {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericSmartChan<T> for Chan<T> {
|
||||
fn try_send(&self, val: T) -> bool {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.try_send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> SendDeferred<T> for Chan<T> {
|
||||
fn send_deferred(&self, val: T) {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.send_deferred(val)
|
||||
}
|
||||
|
||||
fn try_send_deferred(&self, val: T) -> bool {
|
||||
let &Chan { x: ref c } = self;
|
||||
c.try_send_deferred(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericPort<T> for Port<T> {
|
||||
fn recv(&self) -> T {
|
||||
let &Port { x: ref p } = self;
|
||||
p.recv()
|
||||
}
|
||||
|
||||
fn try_recv(&self) -> Option<T> {
|
||||
let &Port { x: ref p } = self;
|
||||
p.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Peekable<T> for Port<T> {
|
||||
fn peek(&self) -> bool {
|
||||
let &Port { x: ref p } = self;
|
||||
p.peek()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct SharedChan<T> { /* priv */ x: rtcomm::SharedChan<T> }
|
||||
|
||||
impl<T: Send> SharedChan<T> {
|
||||
pub fn new(c: Chan<T>) -> SharedChan<T> {
|
||||
let Chan { x: c } = c;
|
||||
SharedChan { x: rtcomm::SharedChan::new(c) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericChan<T> for SharedChan<T> {
|
||||
fn send(&self, val: T) {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericSmartChan<T> for SharedChan<T> {
|
||||
fn try_send(&self, val: T) -> bool {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.try_send(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> SendDeferred<T> for SharedChan<T> {
|
||||
fn send_deferred(&self, val: T) {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.send_deferred(val)
|
||||
}
|
||||
|
||||
fn try_send_deferred(&self, val: T) -> bool {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
c.try_send_deferred(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Clone for SharedChan<T> {
|
||||
fn clone(&self) -> SharedChan<T> {
|
||||
let &SharedChan { x: ref c } = self;
|
||||
SharedChan { x: c.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SharedPort<T> { /* priv */ x: rtcomm::SharedPort<T> }
|
||||
|
||||
impl<T: Send> SharedPort<T> {
|
||||
pub fn new(p: Port<T>) -> SharedPort<T> {
|
||||
let Port { x: p } = p;
|
||||
SharedPort { x: rtcomm::SharedPort::new(p) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> GenericPort<T> for SharedPort<T> {
|
||||
fn recv(&self) -> T {
|
||||
let &SharedPort { x: ref p } = self;
|
||||
p.recv()
|
||||
}
|
||||
|
||||
fn try_recv(&self) -> Option<T> {
|
||||
let &SharedPort { x: ref p } = self;
|
||||
p.try_recv()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Clone for SharedPort<T> {
|
||||
fn clone(&self) -> SharedPort<T> {
|
||||
let &SharedPort { x: ref p } = self;
|
||||
SharedPort { x: p.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use comm::*;
|
||||
use prelude::*;
|
||||
|
||||
#[test]
|
||||
fn test_nested_recv_iter() {
|
||||
let (port, chan) = stream::<int>();
|
||||
let (total_port, total_chan) = oneshot::<int>();
|
||||
|
||||
do spawn {
|
||||
let mut acc = 0;
|
||||
for x in port.recv_iter() {
|
||||
acc += x;
|
||||
for x in port.recv_iter() {
|
||||
acc += x;
|
||||
for x in port.try_recv().move_iter() {
|
||||
acc += x;
|
||||
total_chan.send(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chan.send(3);
|
||||
chan.send(1);
|
||||
chan.send(2);
|
||||
assert_eq!(total_port.recv(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recv_iter_break() {
|
||||
let (port, chan) = stream::<int>();
|
||||
let (count_port, count_chan) = oneshot::<int>();
|
||||
|
||||
do spawn {
|
||||
let mut count = 0;
|
||||
for x in port.recv_iter() {
|
||||
if count >= 3 {
|
||||
count_chan.send(count);
|
||||
break;
|
||||
} else {
|
||||
count += x;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chan.send(2);
|
||||
chan.send(2);
|
||||
chan.send(2);
|
||||
chan.send(2);
|
||||
assert_eq!(count_port.recv(), 4);
|
||||
}
|
||||
}
|
337
src/libstd/comm/imp.rs
Normal file
337
src/libstd/comm/imp.rs
Normal file
@ -0,0 +1,337 @@
|
||||
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! One of the major goals behind this channel implementation is to work
|
||||
//! seamlessly on and off the runtime. This also means that the code isn't
|
||||
//! littered with "if is_green() { ... } else { ... }". Right now, the rest of
|
||||
//! the runtime isn't quite ready to for this abstraction to be done very
|
||||
//! nicely, so the conditional "if green" blocks are all contained in this inner
|
||||
//! module.
|
||||
//!
|
||||
//! The goal of this module is to mirror what the runtime "should be", not the
|
||||
//! state that it is currently in today. You'll notice that there is no mention
|
||||
//! of schedulers or is_green inside any of the channel code, it is currently
|
||||
//! entirely contained in this one module.
|
||||
//!
|
||||
//! In the ideal world, nothing in this module exists and it is all implemented
|
||||
//! elsewhere in the runtime (in the proper location). All of this code is
|
||||
//! structured in order to easily refactor this to the correct location whenever
|
||||
//! we have the trait objects in place to serve as the boundary of the
|
||||
//! abstraction.
|
||||
|
||||
use iter::{range, Iterator};
|
||||
use ops::Drop;
|
||||
use option::{Some, None, Option};
|
||||
use rt::local::Local;
|
||||
use rt::sched::{SchedHandle, Scheduler, TaskFromFriend};
|
||||
use rt::thread::Thread;
|
||||
use rt;
|
||||
use unstable::mutex::Mutex;
|
||||
use unstable::sync::UnsafeArc;
|
||||
|
||||
// A task handle is a method of waking up a blocked task. The handle itself
|
||||
// is completely opaque and only has a wake() method defined on it. This
|
||||
// method will wake the method regardless of the context of the thread which
|
||||
// is currently calling wake().
|
||||
//
|
||||
// This abstraction should be able to be created when putting a task to
|
||||
// sleep. This should basically be a method on whatever the local Task is,
|
||||
// consuming the local Task.
|
||||
|
||||
pub struct TaskHandle {
|
||||
priv inner: TaskRepr
|
||||
}
|
||||
enum TaskRepr {
|
||||
Green(rt::BlockedTask, *mut SchedHandle),
|
||||
Native(NativeWakeupStyle),
|
||||
}
|
||||
enum NativeWakeupStyle {
|
||||
ArcWakeup(UnsafeArc<Mutex>), // shared mutex to synchronize on
|
||||
LocalWakeup(*mut Mutex), // synchronize on the task-local mutex
|
||||
}
|
||||
|
||||
impl TaskHandle {
|
||||
// Signal that this handle should be woken up. The `can_resched`
|
||||
// argument indicates whether the current task could possibly be
|
||||
// rescheduled or not. This does not have a lot of meaning for the
|
||||
// native case, but for an M:N case it indicates whether a context
|
||||
// switch can happen or not.
|
||||
pub fn wake(self, can_resched: bool) {
|
||||
match self.inner {
|
||||
Green(task, handle) => {
|
||||
// If we have a local scheduler, then use that to run the
|
||||
// blocked task, otherwise we can use the handle to send the
|
||||
// task back to its home.
|
||||
if rt::in_green_task_context() {
|
||||
if can_resched {
|
||||
task.wake().map(Scheduler::run_task);
|
||||
} else {
|
||||
let mut s: ~Scheduler = Local::take();
|
||||
s.enqueue_blocked_task(task);
|
||||
Local::put(s);
|
||||
}
|
||||
} else {
|
||||
let task = match task.wake() {
|
||||
Some(task) => task, None => return
|
||||
};
|
||||
// XXX: this is not an easy section of code to refactor.
|
||||
// If this handle is owned by the Task (which it
|
||||
// should be), then this would be a use-after-free
|
||||
// because once the task is pushed onto the message
|
||||
// queue, the handle is gone.
|
||||
//
|
||||
// Currently the handle is instead owned by the
|
||||
// Port/Chan pair, which means that because a
|
||||
// channel is invoking this method the handle will
|
||||
// continue to stay alive for the entire duration
|
||||
// of this method. This will require thought when
|
||||
// moving the handle into the task.
|
||||
unsafe { (*handle).send(TaskFromFriend(task)) }
|
||||
}
|
||||
}
|
||||
|
||||
// Note that there are no use-after-free races in this code. In
|
||||
// the arc-case, we own the lock, and in the local case, we're
|
||||
// using a lock so it's guranteed that they aren't running while
|
||||
// we hold the lock.
|
||||
Native(ArcWakeup(lock)) => {
|
||||
unsafe {
|
||||
let lock = lock.get();
|
||||
(*lock).lock();
|
||||
(*lock).signal();
|
||||
(*lock).unlock();
|
||||
}
|
||||
}
|
||||
Native(LocalWakeup(lock)) => {
|
||||
unsafe {
|
||||
(*lock).lock();
|
||||
(*lock).signal();
|
||||
(*lock).unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trashes handle to this task. This ensures that necessary memory is
|
||||
// deallocated, and there may be some extra assertions as well.
|
||||
pub fn trash(self) {
|
||||
match self.inner {
|
||||
Green(task, _) => task.assert_already_awake(),
|
||||
Native(..) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This structure is an abstraction of what should be stored in the local
|
||||
// task itself. This data is currently stored inside of each channel, but
|
||||
// this should rather be stored in each task (and channels will still
|
||||
// continue to lazily initialize this data).
|
||||
|
||||
pub struct TaskData {
|
||||
priv handle: Option<SchedHandle>,
|
||||
priv lock: Mutex,
|
||||
}
|
||||
|
||||
impl TaskData {
|
||||
pub fn new() -> TaskData {
|
||||
TaskData {
|
||||
handle: None,
|
||||
lock: unsafe { Mutex::empty() },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TaskData {
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.lock.destroy() }
|
||||
}
|
||||
}
|
||||
|
||||
// Now this is the really fun part. This is where all the M:N/1:1-agnostic
|
||||
// along with recv/select-agnostic blocking information goes. A "blocking
|
||||
// context" is really just a stack-allocated structure (which is probably
|
||||
// fine to be a stack-trait-object).
|
||||
//
|
||||
// This has some particularly strange interfaces, but the reason for all
|
||||
// this is to support selection/recv/1:1/M:N all in one bundle.
|
||||
|
||||
pub struct BlockingContext<'a> {
|
||||
priv inner: BlockingRepr<'a>
|
||||
}
|
||||
|
||||
enum BlockingRepr<'a> {
|
||||
GreenBlock(rt::BlockedTask, &'a mut Scheduler),
|
||||
NativeBlock(Option<UnsafeArc<Mutex>>),
|
||||
}
|
||||
|
||||
impl<'a> BlockingContext<'a> {
|
||||
// Creates one blocking context. The data provided should in theory be
|
||||
// acquired from the local task, but it is instead acquired from the
|
||||
// channel currently.
|
||||
//
|
||||
// This function will call `f` with a blocking context, plus the data
|
||||
// that it is given. This function will then return whether this task
|
||||
// should actually go to sleep or not. If `true` is returned, then this
|
||||
// function does not return until someone calls `wake()` on the task.
|
||||
// If `false` is returned, then this function immediately returns.
|
||||
//
|
||||
// # Safety note
|
||||
//
|
||||
// Note that this stack closure may not be run on the same stack as when
|
||||
// this function was called. This means that the environment of this
|
||||
// stack closure could be unsafely aliased. This is currently prevented
|
||||
// through the guarantee that this function will never return before `f`
|
||||
// finishes executing.
|
||||
pub fn one(data: &mut TaskData,
|
||||
f: |BlockingContext, &mut TaskData| -> bool) {
|
||||
if rt::in_green_task_context() {
|
||||
let sched: ~Scheduler = Local::take();
|
||||
sched.deschedule_running_task_and_then(|sched, task| {
|
||||
let ctx = BlockingContext { inner: GreenBlock(task, sched) };
|
||||
// no need to do something on success/failure other than
|
||||
// returning because the `block` function for a BlockingContext
|
||||
// takes care of reawakening itself if the blocking procedure
|
||||
// fails. If this function is successful, then we're already
|
||||
// blocked, and if it fails, the task will already be
|
||||
// rescheduled.
|
||||
f(ctx, data);
|
||||
});
|
||||
} else {
|
||||
unsafe { data.lock.lock(); }
|
||||
let ctx = BlockingContext { inner: NativeBlock(None) };
|
||||
if f(ctx, data) {
|
||||
unsafe { data.lock.wait(); }
|
||||
}
|
||||
unsafe { data.lock.unlock(); }
|
||||
}
|
||||
}
|
||||
|
||||
// Creates many blocking contexts. The intended use case for this
|
||||
// function is selection over a number of ports. This will create `amt`
|
||||
// blocking contexts, yielding them to `f` in turn. If `f` returns
|
||||
// false, then this function aborts and returns immediately. If `f`
|
||||
// repeatedly returns `true` `amt` times, then this function will block.
|
||||
pub fn many(amt: uint, f: |BlockingContext| -> bool) {
|
||||
if rt::in_green_task_context() {
|
||||
let sched: ~Scheduler = Local::take();
|
||||
sched.deschedule_running_task_and_then(|sched, task| {
|
||||
for handle in task.make_selectable(amt) {
|
||||
let ctx = BlockingContext {
|
||||
inner: GreenBlock(handle, sched)
|
||||
};
|
||||
// see comment above in `one` for why no further action is
|
||||
// necessary here
|
||||
if !f(ctx) { break }
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// In the native case, our decision to block must be shared
|
||||
// amongst all of the channels. It may be possible to
|
||||
// stack-allocate this mutex (instead of putting it in an
|
||||
// UnsafeArc box), but for now in order to prevent
|
||||
// use-after-free trivially we place this into a box and then
|
||||
// pass that around.
|
||||
unsafe {
|
||||
let mtx = UnsafeArc::new(Mutex::new());
|
||||
(*mtx.get()).lock();
|
||||
let success = range(0, amt).all(|_| {
|
||||
f(BlockingContext {
|
||||
inner: NativeBlock(Some(mtx.clone()))
|
||||
})
|
||||
});
|
||||
if success {
|
||||
(*mtx.get()).wait();
|
||||
}
|
||||
(*mtx.get()).unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This function will consume this BlockingContext, and optionally block
|
||||
// if according to the atomic `decision` function. The semantics of this
|
||||
// functions are:
|
||||
//
|
||||
// * `slot` is required to be a `None`-slot (which is owned by the
|
||||
// channel)
|
||||
// * The `slot` will be filled in with a blocked version of the current
|
||||
// task (with `wake`-ability if this function is successful).
|
||||
// * If the `decision` function returns true, then this function
|
||||
// immediately returns having relinquished ownership of the task.
|
||||
// * If the `decision` function returns false, then the `slot` is reset
|
||||
// to `None` and the task is re-scheduled if necessary (remember that
|
||||
// the task will not resume executing before the outer `one` or
|
||||
// `many` function has returned. This function is expected to have a
|
||||
// release memory fence in order for the modifications of `to_wake` to be
|
||||
// visible to other tasks. Code which attempts to read `to_wake` should
|
||||
// have an acquiring memory fence to guarantee that this write is
|
||||
// visible.
|
||||
//
|
||||
// This function will return whether the blocking occurred or not.
|
||||
pub fn block(self,
|
||||
data: &mut TaskData,
|
||||
slot: &mut Option<TaskHandle>,
|
||||
decision: || -> bool) -> bool {
|
||||
assert!(slot.is_none());
|
||||
match self.inner {
|
||||
GreenBlock(task, sched) => {
|
||||
if data.handle.is_none() {
|
||||
data.handle = Some(sched.make_handle());
|
||||
}
|
||||
let handle = data.handle.get_mut_ref() as *mut SchedHandle;
|
||||
*slot = Some(TaskHandle { inner: Green(task, handle) });
|
||||
|
||||
if !decision() {
|
||||
match slot.take_unwrap().inner {
|
||||
Green(task, _) => sched.enqueue_blocked_task(task),
|
||||
Native(..) => unreachable!()
|
||||
}
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
NativeBlock(shared) => {
|
||||
*slot = Some(TaskHandle {
|
||||
inner: Native(match shared {
|
||||
Some(arc) => ArcWakeup(arc),
|
||||
None => LocalWakeup(&mut data.lock as *mut Mutex),
|
||||
})
|
||||
});
|
||||
|
||||
if !decision() {
|
||||
*slot = None;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Agnostic method of forcing a yield of the current task
|
||||
pub fn yield_now() {
|
||||
if rt::in_green_task_context() {
|
||||
let sched: ~Scheduler = Local::take();
|
||||
sched.yield_now();
|
||||
} else {
|
||||
Thread::yield_now();
|
||||
}
|
||||
}
|
||||
|
||||
// Agnostic method of "maybe yielding" in order to provide fairness
|
||||
pub fn maybe_yield() {
|
||||
if rt::in_green_task_context() {
|
||||
let sched: ~Scheduler = Local::take();
|
||||
sched.maybe_yield();
|
||||
} else {
|
||||
// the OS decides fairness, nothing for us to do.
|
||||
}
|
||||
}
|
1371
src/libstd/comm/mod.rs
Normal file
1371
src/libstd/comm/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
498
src/libstd/comm/select.rs
Normal file
498
src/libstd/comm/select.rs
Normal file
@ -0,0 +1,498 @@
|
||||
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! Selection over an array of ports
|
||||
//!
|
||||
//! This module contains the implementation machinery necessary for selecting
|
||||
//! over a number of ports. One large goal of this module is to provide an
|
||||
//! efficient interface to selecting over any port of any type.
|
||||
//!
|
||||
//! This is achieved through an architecture of a "port set" in which ports are
|
||||
//! added to a set and then the entire set is waited on at once. The set can be
|
||||
//! waited on multiple times to prevent re-adding each port to the set.
|
||||
//!
|
||||
//! Usage of this module is currently encouraged to go through the use of the
|
||||
//! `select!` macro. This macro allows naturally binding of variables to the
|
||||
//! received values of ports in a much more natural syntax then usage of the
|
||||
//! `Select` structure directly.
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! let (mut p1, c1) = Chan::new();
|
||||
//! let (mut p2, c2) = Chan::new();
|
||||
//!
|
||||
//! c1.send(1);
|
||||
//! c2.send(2);
|
||||
//!
|
||||
//! select! (
|
||||
//! val = p1.recv() => {
|
||||
//! assert_eq!(val, 1);
|
||||
//! }
|
||||
//! val = p2.recv() => {
|
||||
//! assert_eq!(val, 2);
|
||||
//! }
|
||||
//! )
|
||||
|
||||
use cast;
|
||||
use iter::Iterator;
|
||||
use kinds::Send;
|
||||
use ops::Drop;
|
||||
use option::{Some, None, Option};
|
||||
use ptr::RawPtr;
|
||||
use super::imp::BlockingContext;
|
||||
use super::{Packet, Port, imp};
|
||||
use uint;
|
||||
use unstable::atomics::{Relaxed, SeqCst};
|
||||
|
||||
macro_rules! select {
|
||||
(
|
||||
$name1:pat = $port1:ident.$meth1:ident() => $code1:expr,
|
||||
$($name:pat = $port:ident.$meth:ident() => $code:expr),*
|
||||
) => ({
|
||||
use std::comm::Select;
|
||||
let sel = Select::new();
|
||||
let mut $port1 = sel.add(&mut $port1);
|
||||
$( let mut $port = sel.add(&mut $port); )*
|
||||
let ret = sel.wait();
|
||||
if ret == $port1.id { let $name1 = $port1.$meth1(); $code1 }
|
||||
$( else if ret == $port.id { let $name = $port.$meth(); $code } )*
|
||||
else { unreachable!() }
|
||||
})
|
||||
}
|
||||
|
||||
/// The "port set" of the select interface. This structure is used to manage a
|
||||
/// set of ports which are being selected over.
|
||||
#[no_freeze]
|
||||
#[no_send]
|
||||
pub struct Select {
|
||||
priv head: *mut Packet,
|
||||
priv tail: *mut Packet,
|
||||
priv next_id: uint,
|
||||
}
|
||||
|
||||
/// A handle to a port which is currently a member of a `Select` set of ports.
|
||||
/// This handle is used to keep the port in the set as well as interact with the
|
||||
/// underlying port.
|
||||
pub struct Handle<'self, T> {
|
||||
id: uint,
|
||||
priv selector: &'self Select,
|
||||
priv port: &'self mut Port<T>,
|
||||
}
|
||||
|
||||
struct PacketIterator { priv cur: *mut Packet }
|
||||
|
||||
impl Select {
|
||||
/// Creates a new selection structure. This set is initially empty and
|
||||
/// `wait` will fail!() if called.
|
||||
///
|
||||
/// Usage of this struct directly can sometimes be burdensome, and usage is
|
||||
/// rather much easier through the `select!` macro.
|
||||
pub fn new() -> Select {
|
||||
Select {
|
||||
head: 0 as *mut Packet,
|
||||
tail: 0 as *mut Packet,
|
||||
next_id: 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a new port to this set, returning a handle which is then used to
|
||||
/// receive on the port.
|
||||
///
|
||||
/// Note that this port parameter takes `&mut Port` instead of `&Port`. None
|
||||
/// of the methods of receiving on a port require `&mut self`, but `&mut` is
|
||||
/// used here in order to have the compiler guarantee that the same port is
|
||||
/// not added to this set more than once.
|
||||
///
|
||||
/// When the returned handle falls out of scope, the port will be removed
|
||||
/// from this set. While the handle is in this set, usage of the port can be
|
||||
/// done through the `Handle`'s receiving methods.
|
||||
pub fn add<'a, T: Send>(&'a self, port: &'a mut Port<T>) -> Handle<'a, T> {
|
||||
let this = unsafe { cast::transmute_mut(self) };
|
||||
let id = this.next_id;
|
||||
this.next_id += 1;
|
||||
unsafe {
|
||||
let packet = port.queue.packet();
|
||||
assert!(!(*packet).selecting.load(Relaxed));
|
||||
assert_eq!((*packet).selection_id, 0);
|
||||
(*packet).selection_id = id;
|
||||
if this.head.is_null() {
|
||||
this.head = packet;
|
||||
this.tail = packet;
|
||||
} else {
|
||||
(*packet).select_prev = this.tail;
|
||||
assert!((*packet).select_next.is_null());
|
||||
(*this.tail).select_next = packet;
|
||||
this.tail = packet;
|
||||
}
|
||||
}
|
||||
Handle { id: id, selector: this, port: port }
|
||||
}
|
||||
|
||||
/// Waits for an event on this port set. The returned valus is *not* and
|
||||
/// index, but rather an id. This id can be queried against any active
|
||||
/// `Handle` structures (each one has a public `id` field). The handle with
|
||||
/// the matching `id` will have some sort of event available on it. The
|
||||
/// event could either be that data is available or the corresponding
|
||||
/// channel has been closed.
|
||||
pub fn wait(&self) -> uint {
|
||||
// Note that this is currently an inefficient implementation. We in
|
||||
// theory have knowledge about all ports in the set ahead of time, so
|
||||
// this method shouldn't really have to iterate over all of them yet
|
||||
// again. The idea with this "port set" interface is to get the
|
||||
// interface right this time around, and later this implementation can
|
||||
// be optimized.
|
||||
//
|
||||
// This implementation can be summarized by:
|
||||
//
|
||||
// fn select(ports) {
|
||||
// if any port ready { return ready index }
|
||||
// deschedule {
|
||||
// block on all ports
|
||||
// }
|
||||
// unblock on all ports
|
||||
// return ready index
|
||||
// }
|
||||
//
|
||||
// Most notably, the iterations over all of the ports shouldn't be
|
||||
// necessary.
|
||||
unsafe {
|
||||
let mut amt = 0;
|
||||
for p in self.iter() {
|
||||
assert!(!(*p).selecting.load(Relaxed));
|
||||
amt += 1;
|
||||
if (*p).can_recv() {
|
||||
return (*p).selection_id;
|
||||
}
|
||||
}
|
||||
assert!(amt > 0);
|
||||
|
||||
let mut ready_index = amt;
|
||||
let mut ready_id = uint::max_value;
|
||||
let mut iter = self.iter().enumerate();
|
||||
|
||||
// Acquire a number of blocking contexts, and block on each one
|
||||
// sequentially until one fails. If one fails, then abort
|
||||
// immediately so we can go unblock on all the other ports.
|
||||
BlockingContext::many(amt, |ctx| {
|
||||
let (i, packet) = iter.next().unwrap();
|
||||
(*packet).selecting.store(true, SeqCst);
|
||||
if !ctx.block(&mut (*packet).data,
|
||||
&mut (*packet).to_wake,
|
||||
|| (*packet).decrement()) {
|
||||
(*packet).abort_selection(false);
|
||||
(*packet).selecting.store(false, SeqCst);
|
||||
ready_index = i;
|
||||
ready_id = (*packet).selection_id;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
// Abort the selection process on each port. If the abort process
|
||||
// returns `true`, then that means that the port is ready to receive
|
||||
// some data. Note that this also means that the port may have yet
|
||||
// to have fully read the `to_wake` field and woken us up (although
|
||||
// the wakeup is guaranteed to fail).
|
||||
//
|
||||
// This situation happens in the window of where a sender invokes
|
||||
// increment(), sees -1, and then decides to wake up the task. After
|
||||
// all this is done, the sending thread will set `selecting` to
|
||||
// `false`. Until this is done, we cannot return. If we were to
|
||||
// return, then a sender could wake up a port which has gone back to
|
||||
// sleep after this call to `select`.
|
||||
//
|
||||
// Note that it is a "fairly small window" in which an increment()
|
||||
// views that it should wake a thread up until the `selecting` bit
|
||||
// is set to false. For now, the implementation currently just spins
|
||||
// in a yield loop. This is very distasteful, but this
|
||||
// implementation is already nowhere near what it should ideally be.
|
||||
// A rewrite should focus on avoiding a yield loop, and for now this
|
||||
// implementation is tying us over to a more efficient "don't
|
||||
// iterate over everything every time" implementation.
|
||||
for packet in self.iter().take(ready_index) {
|
||||
if (*packet).abort_selection(true) {
|
||||
ready_id = (*packet).selection_id;
|
||||
while (*packet).selecting.load(Relaxed) {
|
||||
imp::yield_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sanity check for now to make sure that everyone is turned off.
|
||||
for packet in self.iter() {
|
||||
assert!(!(*packet).selecting.load(Relaxed));
|
||||
}
|
||||
|
||||
return ready_id;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn remove(&self, packet: *mut Packet) {
|
||||
let this = cast::transmute_mut(self);
|
||||
assert!(!(*packet).selecting.load(Relaxed));
|
||||
if (*packet).select_prev.is_null() {
|
||||
assert_eq!(packet, this.head);
|
||||
this.head = (*packet).select_next;
|
||||
} else {
|
||||
(*(*packet).select_prev).select_next = (*packet).select_next;
|
||||
}
|
||||
if (*packet).select_next.is_null() {
|
||||
assert_eq!(packet, this.tail);
|
||||
this.tail = (*packet).select_prev;
|
||||
} else {
|
||||
(*(*packet).select_next).select_prev = (*packet).select_prev;
|
||||
}
|
||||
(*packet).select_next = 0 as *mut Packet;
|
||||
(*packet).select_prev = 0 as *mut Packet;
|
||||
(*packet).selection_id = 0;
|
||||
}
|
||||
|
||||
fn iter(&self) -> PacketIterator { PacketIterator { cur: self.head } }
|
||||
}
|
||||
|
||||
impl<'self, T: Send> Handle<'self, T> {
|
||||
/// Receive a value on the underlying port. Has the same semantics as
|
||||
/// `Port.recv`
|
||||
pub fn recv(&mut self) -> T { self.port.recv() }
|
||||
/// Block to receive a value on the underlying port, returning `Some` on
|
||||
/// success or `None` if the channel disconnects. This function has the same
|
||||
/// semantics as `Port.recv_opt`
|
||||
pub fn recv_opt(&mut self) -> Option<T> { self.port.recv_opt() }
|
||||
/// Immediately attempt to receive a value on a port, this function will
|
||||
/// never block. Has the same semantics as `Port.try_recv`.
|
||||
pub fn try_recv(&mut self) -> Option<T> { self.port.try_recv() }
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl Drop for Select {
|
||||
fn drop(&mut self) {
|
||||
assert!(self.head.is_null());
|
||||
assert!(self.tail.is_null());
|
||||
}
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<'self, T: Send> Drop for Handle<'self, T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { self.selector.remove(self.port.queue.packet()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator<*mut Packet> for PacketIterator {
|
||||
fn next(&mut self) -> Option<*mut Packet> {
|
||||
if self.cur.is_null() {
|
||||
None
|
||||
} else {
|
||||
let ret = Some(self.cur);
|
||||
unsafe { self.cur = (*self.cur).select_next; }
|
||||
ret
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::super::*;
|
||||
use prelude::*;
|
||||
|
||||
test!(fn smoke() {
|
||||
let (mut p1, c1) = Chan::<int>::new();
|
||||
let (mut p2, c2) = Chan::<int>::new();
|
||||
c1.send(1);
|
||||
select! (
|
||||
foo = p1.recv() => { assert_eq!(foo, 1); },
|
||||
_bar = p2.recv() => { fail!() }
|
||||
)
|
||||
c2.send(2);
|
||||
select! (
|
||||
_foo = p1.recv() => { fail!() },
|
||||
bar = p2.recv() => { assert_eq!(bar, 2) }
|
||||
)
|
||||
drop(c1);
|
||||
select! (
|
||||
foo = p1.recv_opt() => { assert_eq!(foo, None); },
|
||||
_bar = p2.recv() => { fail!() }
|
||||
)
|
||||
drop(c2);
|
||||
select! (
|
||||
bar = p2.recv_opt() => { assert_eq!(bar, None); },
|
||||
)
|
||||
})
|
||||
|
||||
test!(fn smoke2() {
|
||||
let (mut p1, _c1) = Chan::<int>::new();
|
||||
let (mut p2, _c2) = Chan::<int>::new();
|
||||
let (mut p3, _c3) = Chan::<int>::new();
|
||||
let (mut p4, _c4) = Chan::<int>::new();
|
||||
let (mut p5, c5) = Chan::<int>::new();
|
||||
c5.send(4);
|
||||
select! (
|
||||
_foo = p1.recv() => { fail!("1") },
|
||||
_foo = p2.recv() => { fail!("2") },
|
||||
_foo = p3.recv() => { fail!("3") },
|
||||
_foo = p4.recv() => { fail!("4") },
|
||||
foo = p5.recv() => { assert_eq!(foo, 4); }
|
||||
)
|
||||
})
|
||||
|
||||
test!(fn closed() {
|
||||
let (mut p1, _c1) = Chan::<int>::new();
|
||||
let (mut p2, c2) = Chan::<int>::new();
|
||||
drop(c2);
|
||||
|
||||
select! (
|
||||
_a1 = p1.recv_opt() => { fail!() },
|
||||
a2 = p2.recv_opt() => { assert_eq!(a2, None); }
|
||||
)
|
||||
})
|
||||
|
||||
#[test]
|
||||
fn unblocks() {
|
||||
use std::io::timer;
|
||||
|
||||
let (mut p1, c1) = Chan::<int>::new();
|
||||
let (mut p2, _c2) = Chan::<int>::new();
|
||||
let (p3, c3) = Chan::<int>::new();
|
||||
|
||||
do spawn {
|
||||
timer::sleep(3);
|
||||
c1.send(1);
|
||||
p3.recv();
|
||||
timer::sleep(3);
|
||||
}
|
||||
|
||||
select! (
|
||||
a = p1.recv() => { assert_eq!(a, 1); },
|
||||
_b = p2.recv() => { fail!() }
|
||||
)
|
||||
c3.send(1);
|
||||
select! (
|
||||
a = p1.recv_opt() => { assert_eq!(a, None); },
|
||||
_b = p2.recv() => { fail!() }
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn both_ready() {
|
||||
use std::io::timer;
|
||||
|
||||
let (mut p1, c1) = Chan::<int>::new();
|
||||
let (mut p2, c2) = Chan::<int>::new();
|
||||
let (p3, c3) = Chan::<()>::new();
|
||||
|
||||
do spawn {
|
||||
timer::sleep(3);
|
||||
c1.send(1);
|
||||
c2.send(2);
|
||||
p3.recv();
|
||||
}
|
||||
|
||||
select! (
|
||||
a = p1.recv() => { assert_eq!(a, 1); },
|
||||
a = p2.recv() => { assert_eq!(a, 2); }
|
||||
)
|
||||
select! (
|
||||
a = p1.recv() => { assert_eq!(a, 1); },
|
||||
a = p2.recv() => { assert_eq!(a, 2); }
|
||||
)
|
||||
c3.send(());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress() {
|
||||
static AMT: int = 10000;
|
||||
let (mut p1, c1) = Chan::<int>::new();
|
||||
let (mut p2, c2) = Chan::<int>::new();
|
||||
let (p3, c3) = Chan::<()>::new();
|
||||
|
||||
do spawn {
|
||||
for i in range(0, AMT) {
|
||||
if i % 2 == 0 {
|
||||
c1.send(i);
|
||||
} else {
|
||||
c2.send(i);
|
||||
}
|
||||
p3.recv();
|
||||
}
|
||||
}
|
||||
|
||||
for i in range(0, AMT) {
|
||||
select! (
|
||||
i1 = p1.recv() => { assert!(i % 2 == 0 && i == i1); },
|
||||
i2 = p2.recv() => { assert!(i % 2 == 1 && i == i2); }
|
||||
)
|
||||
c3.send(());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress_native() {
|
||||
use std::rt::thread::Thread;
|
||||
use std::unstable::run_in_bare_thread;
|
||||
static AMT: int = 10000;
|
||||
|
||||
do run_in_bare_thread {
|
||||
let (mut p1, c1) = Chan::<int>::new();
|
||||
let (mut p2, c2) = Chan::<int>::new();
|
||||
let (p3, c3) = Chan::<()>::new();
|
||||
|
||||
let t = do Thread::start {
|
||||
for i in range(0, AMT) {
|
||||
if i % 2 == 0 {
|
||||
c1.send(i);
|
||||
} else {
|
||||
c2.send(i);
|
||||
}
|
||||
p3.recv();
|
||||
}
|
||||
};
|
||||
|
||||
for i in range(0, AMT) {
|
||||
select! (
|
||||
i1 = p1.recv() => { assert!(i % 2 == 0 && i == i1); },
|
||||
i2 = p2.recv() => { assert!(i % 2 == 1 && i == i2); }
|
||||
)
|
||||
c3.send(());
|
||||
}
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn native_both_ready() {
|
||||
use std::rt::thread::Thread;
|
||||
use std::unstable::run_in_bare_thread;
|
||||
|
||||
do run_in_bare_thread {
|
||||
let (mut p1, c1) = Chan::<int>::new();
|
||||
let (mut p2, c2) = Chan::<int>::new();
|
||||
let (p3, c3) = Chan::<()>::new();
|
||||
|
||||
let t = do Thread::start {
|
||||
c1.send(1);
|
||||
c2.send(2);
|
||||
p3.recv();
|
||||
};
|
||||
|
||||
select! (
|
||||
a = p1.recv() => { assert_eq!(a, 1); },
|
||||
b = p2.recv() => { assert_eq!(b, 2); }
|
||||
)
|
||||
select! (
|
||||
a = p1.recv() => { assert_eq!(a, 1); },
|
||||
b = p2.recv() => { assert_eq!(b, 2); }
|
||||
)
|
||||
c3.send(());
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
}
|
@ -203,15 +203,16 @@ pub mod rt;
|
||||
mod std {
|
||||
pub use clone;
|
||||
pub use cmp;
|
||||
pub use comm;
|
||||
pub use condition;
|
||||
pub use fmt;
|
||||
pub use io;
|
||||
pub use kinds;
|
||||
pub use local_data;
|
||||
pub use logging;
|
||||
pub use logging;
|
||||
pub use option;
|
||||
pub use os;
|
||||
pub use io;
|
||||
pub use rt;
|
||||
pub use str;
|
||||
pub use to_bytes;
|
||||
|
@ -1,5 +1,4 @@
|
||||
/* Multi-producer/multi-consumer bounded queue
|
||||
* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
@ -163,7 +162,6 @@ mod tests {
|
||||
use prelude::*;
|
||||
use option::*;
|
||||
use task;
|
||||
use comm;
|
||||
use super::Queue;
|
||||
|
||||
#[test]
|
||||
@ -174,10 +172,9 @@ mod tests {
|
||||
assert_eq!(None, q.pop());
|
||||
|
||||
for _ in range(0, nthreads) {
|
||||
let (port, chan) = comm::stream();
|
||||
chan.send(q.clone());
|
||||
let q = q.clone();
|
||||
do task::spawn_sched(task::SingleThreaded) {
|
||||
let mut q = port.recv();
|
||||
let mut q = q;
|
||||
for i in range(0, nmsgs) {
|
||||
assert!(q.push(i));
|
||||
}
|
||||
@ -186,12 +183,11 @@ mod tests {
|
||||
|
||||
let mut completion_ports = ~[];
|
||||
for _ in range(0, nthreads) {
|
||||
let (completion_port, completion_chan) = comm::stream();
|
||||
let (completion_port, completion_chan) = Chan::new();
|
||||
completion_ports.push(completion_port);
|
||||
let (port, chan) = comm::stream();
|
||||
chan.send(q.clone());
|
||||
let q = q.clone();
|
||||
do task::spawn_sched(task::SingleThreaded) {
|
||||
let mut q = port.recv();
|
||||
let mut q = q;
|
||||
let mut i = 0u;
|
||||
loop {
|
||||
match q.pop() {
|
||||
@ -206,7 +202,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
for completion_port in completion_ports.iter() {
|
||||
for completion_port in completion_ports.mut_iter() {
|
||||
assert_eq!(nmsgs, completion_port.recv());
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
/* Multi-producer/single-consumer queue
|
||||
* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
@ -27,163 +26,177 @@
|
||||
*/
|
||||
|
||||
//! A mostly lock-free multi-producer, single consumer queue.
|
||||
// http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
|
||||
|
||||
use unstable::sync::UnsafeArc;
|
||||
use unstable::atomics::{AtomicPtr,Relaxed,Release,Acquire};
|
||||
use ptr::{mut_null, to_mut_unsafe_ptr};
|
||||
// http://www.1024cores.net/home/lock-free-algorithms
|
||||
// /queues/non-intrusive-mpsc-node-based-queue
|
||||
|
||||
use cast;
|
||||
use option::*;
|
||||
use clone::Clone;
|
||||
use kinds::Send;
|
||||
use ops::Drop;
|
||||
use option::{Option, None, Some};
|
||||
use unstable::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
|
||||
use unstable::sync::UnsafeArc;
|
||||
|
||||
pub enum PopResult<T> {
|
||||
/// Some data has been popped
|
||||
Data(T),
|
||||
/// The queue is empty
|
||||
Empty,
|
||||
/// The queue is in an inconsistent state. Popping data should succeed, but
|
||||
/// some pushers have yet to make enough progress in order allow a pop to
|
||||
/// succeed. It is recommended that a pop() occur "in the near future" in
|
||||
/// order to see if the sender has made progress or not
|
||||
Inconsistent,
|
||||
}
|
||||
|
||||
struct Node<T> {
|
||||
next: AtomicPtr<Node<T>>,
|
||||
value: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> Node<T> {
|
||||
fn empty() -> Node<T> {
|
||||
Node{next: AtomicPtr::new(mut_null()), value: None}
|
||||
}
|
||||
|
||||
fn with_value(value: T) -> Node<T> {
|
||||
Node{next: AtomicPtr::new(mut_null()), value: Some(value)}
|
||||
}
|
||||
}
|
||||
|
||||
struct State<T> {
|
||||
pad0: [u8, ..64],
|
||||
struct State<T, P> {
|
||||
head: AtomicPtr<Node<T>>,
|
||||
pad1: [u8, ..64],
|
||||
stub: Node<T>,
|
||||
pad2: [u8, ..64],
|
||||
tail: *mut Node<T>,
|
||||
pad3: [u8, ..64],
|
||||
packet: P,
|
||||
}
|
||||
|
||||
struct Queue<T> {
|
||||
priv state: UnsafeArc<State<T>>,
|
||||
pub struct Consumer<T, P> {
|
||||
priv state: UnsafeArc<State<T, P>>,
|
||||
}
|
||||
|
||||
impl<T: Send> Clone for Queue<T> {
|
||||
fn clone(&self) -> Queue<T> {
|
||||
Queue {
|
||||
state: self.state.clone()
|
||||
pub struct Producer<T, P> {
|
||||
priv state: UnsafeArc<State<T, P>>,
|
||||
}
|
||||
|
||||
impl<T: Send, P: Send> Clone for Producer<T, P> {
|
||||
fn clone(&self) -> Producer<T, P> {
|
||||
Producer { state: self.state.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn queue<T: Send, P: Send>(p: P) -> (Consumer<T, P>, Producer<T, P>) {
|
||||
unsafe {
|
||||
let (a, b) = UnsafeArc::new2(State::new(p));
|
||||
(Consumer { state: a }, Producer { state: b })
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Node<T> {
|
||||
unsafe fn new(v: Option<T>) -> *mut Node<T> {
|
||||
cast::transmute(~Node {
|
||||
next: AtomicPtr::new(0 as *mut Node<T>),
|
||||
value: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send, P: Send> State<T, P> {
|
||||
pub unsafe fn new(p: P) -> State<T, P> {
|
||||
let stub = Node::new(None);
|
||||
State {
|
||||
head: AtomicPtr::new(stub),
|
||||
tail: stub,
|
||||
packet: p,
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn push(&mut self, t: T) {
|
||||
let n = Node::new(Some(t));
|
||||
let prev = self.head.swap(n, AcqRel);
|
||||
(*prev).next.store(n, Release);
|
||||
}
|
||||
|
||||
unsafe fn pop(&mut self) -> PopResult<T> {
|
||||
let tail = self.tail;
|
||||
let next = (*tail).next.load(Acquire);
|
||||
|
||||
if !next.is_null() {
|
||||
self.tail = next;
|
||||
assert!((*tail).value.is_none());
|
||||
assert!((*next).value.is_some());
|
||||
let ret = (*next).value.take_unwrap();
|
||||
let _: ~Node<T> = cast::transmute(tail);
|
||||
return Data(ret);
|
||||
}
|
||||
|
||||
if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
|
||||
}
|
||||
|
||||
unsafe fn is_empty(&mut self) -> bool {
|
||||
return (*self.tail).next.load(Acquire).is_null();
|
||||
}
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T: Send, P: Send> Drop for State<T, P> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let mut cur = self.tail;
|
||||
while !cur.is_null() {
|
||||
let next = (*cur).next.load(Relaxed);
|
||||
let _: ~Node<T> = cast::transmute(cur);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> State<T> {
|
||||
pub fn new() -> State<T> {
|
||||
State{
|
||||
pad0: [0, ..64],
|
||||
head: AtomicPtr::new(mut_null()),
|
||||
pad1: [0, ..64],
|
||||
stub: Node::<T>::empty(),
|
||||
pad2: [0, ..64],
|
||||
tail: mut_null(),
|
||||
pad3: [0, ..64],
|
||||
}
|
||||
}
|
||||
|
||||
fn init(&mut self) {
|
||||
let stub = self.get_stub_unsafe();
|
||||
self.head.store(stub, Relaxed);
|
||||
self.tail = stub;
|
||||
}
|
||||
|
||||
fn get_stub_unsafe(&mut self) -> *mut Node<T> {
|
||||
to_mut_unsafe_ptr(&mut self.stub)
|
||||
}
|
||||
|
||||
fn push(&mut self, value: T) {
|
||||
unsafe {
|
||||
let node = cast::transmute(~Node::with_value(value));
|
||||
self.push_node(node);
|
||||
}
|
||||
}
|
||||
|
||||
fn push_node(&mut self, node: *mut Node<T>) {
|
||||
unsafe {
|
||||
(*node).next.store(mut_null(), Release);
|
||||
let prev = self.head.swap(node, Relaxed);
|
||||
(*prev).next.store(node, Release);
|
||||
}
|
||||
}
|
||||
|
||||
fn pop(&mut self) -> Option<T> {
|
||||
unsafe {
|
||||
let mut tail = self.tail;
|
||||
let mut next = (*tail).next.load(Acquire);
|
||||
let stub = self.get_stub_unsafe();
|
||||
if tail == stub {
|
||||
if mut_null() == next {
|
||||
return None
|
||||
}
|
||||
self.tail = next;
|
||||
tail = next;
|
||||
next = (*next).next.load(Acquire);
|
||||
}
|
||||
if next != mut_null() {
|
||||
let tail: ~Node<T> = cast::transmute(tail);
|
||||
self.tail = next;
|
||||
return tail.value
|
||||
}
|
||||
let head = self.head.load(Relaxed);
|
||||
if tail != head {
|
||||
return None
|
||||
}
|
||||
self.push_node(stub);
|
||||
next = (*tail).next.load(Acquire);
|
||||
if next != mut_null() {
|
||||
let tail: ~Node<T> = cast::transmute(tail);
|
||||
self.tail = next;
|
||||
return tail.value
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> Queue<T> {
|
||||
pub fn new() -> Queue<T> {
|
||||
unsafe {
|
||||
let q = Queue{state: UnsafeArc::new(State::new())};
|
||||
(*q.state.get()).init();
|
||||
q
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send, P: Send> Producer<T, P> {
|
||||
pub fn push(&mut self, value: T) {
|
||||
unsafe { (*self.state.get()).push(value) }
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
unsafe{ (*self.state.get()).is_empty() }
|
||||
}
|
||||
pub unsafe fn packet(&self) -> *mut P {
|
||||
&mut (*self.state.get()).packet as *mut P
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pop(&mut self) -> Option<T> {
|
||||
unsafe{ (*self.state.get()).pop() }
|
||||
impl<T: Send, P: Send> Consumer<T, P> {
|
||||
pub fn pop(&mut self) -> PopResult<T> {
|
||||
unsafe { (*self.state.get()).pop() }
|
||||
}
|
||||
pub fn casual_pop(&mut self) -> Option<T> {
|
||||
match self.pop() {
|
||||
Data(t) => Some(t),
|
||||
Empty | Inconsistent => None,
|
||||
}
|
||||
}
|
||||
pub unsafe fn packet(&self) -> *mut P {
|
||||
&mut (*self.state.get()).packet as *mut P
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use prelude::*;
|
||||
use option::*;
|
||||
|
||||
use task;
|
||||
use comm;
|
||||
use super::Queue;
|
||||
use super::{queue, Data, Empty, Inconsistent};
|
||||
|
||||
#[test]
|
||||
fn test_full() {
|
||||
let (_, mut p) = queue(());
|
||||
p.push(~1);
|
||||
p.push(~2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let nthreads = 8u;
|
||||
let nmsgs = 1000u;
|
||||
let mut q = Queue::new();
|
||||
assert_eq!(None, q.pop());
|
||||
let (mut c, p) = queue(());
|
||||
match c.pop() {
|
||||
Empty => {}
|
||||
Inconsistent | Data(..) => fail!()
|
||||
}
|
||||
|
||||
for _ in range(0, nthreads) {
|
||||
let (port, chan) = comm::stream();
|
||||
chan.send(q.clone());
|
||||
let q = p.clone();
|
||||
do task::spawn_sched(task::SingleThreaded) {
|
||||
let mut q = port.recv();
|
||||
let mut q = q;
|
||||
for i in range(0, nmsgs) {
|
||||
q.push(i);
|
||||
}
|
||||
@ -191,13 +204,10 @@ mod tests {
|
||||
}
|
||||
|
||||
let mut i = 0u;
|
||||
loop {
|
||||
match q.pop() {
|
||||
None => {},
|
||||
Some(_) => {
|
||||
i += 1;
|
||||
if i == nthreads*nmsgs { break }
|
||||
}
|
||||
while i < nthreads * nmsgs {
|
||||
match c.pop() {
|
||||
Empty | Inconsistent => {},
|
||||
Data(_) => { i += 1 }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
296
src/libstd/rt/spsc_queue.rs
Normal file
296
src/libstd/rt/spsc_queue.rs
Normal file
@ -0,0 +1,296 @@
|
||||
/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* The views and conclusions contained in the software and documentation are
|
||||
* those of the authors and should not be interpreted as representing official
|
||||
* policies, either expressed or implied, of Dmitry Vyukov.
|
||||
*/
|
||||
|
||||
// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
|
||||
use cast;
|
||||
use kinds::Send;
|
||||
use ops::Drop;
|
||||
use option::{Some, None, Option};
|
||||
use unstable::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
|
||||
use unstable::sync::UnsafeArc;
|
||||
|
||||
// Node within the linked list queue of messages to send
|
||||
struct Node<T> {
|
||||
// XXX: this could be an uninitialized T if we're careful enough, and
|
||||
// that would reduce memory usage (and be a bit faster).
|
||||
// is it worth it?
|
||||
value: Option<T>, // nullable for re-use of nodes
|
||||
next: AtomicPtr<Node<T>>, // next node in the queue
|
||||
}
|
||||
|
||||
// The producer/consumer halves both need access to the `tail` field, and if
|
||||
// they both have access to that we may as well just give them both access
|
||||
// to this whole structure.
|
||||
struct State<T, P> {
|
||||
// consumer fields
|
||||
tail: *mut Node<T>, // where to pop from
|
||||
tail_prev: AtomicPtr<Node<T>>, // where to pop from
|
||||
|
||||
// producer fields
|
||||
head: *mut Node<T>, // where to push to
|
||||
first: *mut Node<T>, // where to get new nodes from
|
||||
tail_copy: *mut Node<T>, // between first/tail
|
||||
|
||||
// Cache maintenance fields. Additions and subtractions are stored
|
||||
// separately in order to allow them to use nonatomic addition/subtraction.
|
||||
cache_bound: uint,
|
||||
cache_additions: AtomicUint,
|
||||
cache_subtractions: AtomicUint,
|
||||
|
||||
packet: P,
|
||||
}
|
||||
|
||||
pub struct Producer<T, P> {
|
||||
priv state: UnsafeArc<State<T, P>>,
|
||||
}
|
||||
|
||||
pub struct Consumer<T, P> {
|
||||
priv state: UnsafeArc<State<T, P>>,
|
||||
}
|
||||
|
||||
pub fn queue<T: Send, P: Send>(bound: uint,
|
||||
p: P) -> (Consumer<T, P>, Producer<T, P>)
|
||||
{
|
||||
let n1 = Node::new();
|
||||
let n2 = Node::new();
|
||||
unsafe { (*n1).next.store(n2, Relaxed) }
|
||||
let state = State {
|
||||
tail: n2,
|
||||
tail_prev: AtomicPtr::new(n1),
|
||||
head: n2,
|
||||
first: n1,
|
||||
tail_copy: n1,
|
||||
cache_bound: bound,
|
||||
cache_additions: AtomicUint::new(0),
|
||||
cache_subtractions: AtomicUint::new(0),
|
||||
packet: p,
|
||||
};
|
||||
let (arc1, arc2) = UnsafeArc::new2(state);
|
||||
(Consumer { state: arc1 }, Producer { state: arc2 })
|
||||
}
|
||||
|
||||
impl<T: Send> Node<T> {
|
||||
fn new() -> *mut Node<T> {
|
||||
unsafe {
|
||||
cast::transmute(~Node {
|
||||
value: None,
|
||||
next: AtomicPtr::new(0 as *mut Node<T>),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send, P: Send> Producer<T, P> {
|
||||
pub fn push(&mut self, t: T) {
|
||||
unsafe { (*self.state.get()).push(t) }
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
unsafe { (*self.state.get()).is_empty() }
|
||||
}
|
||||
pub unsafe fn packet(&self) -> *mut P {
|
||||
&mut (*self.state.get()).packet as *mut P
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send, P: Send> Consumer<T, P> {
|
||||
pub fn pop(&mut self) -> Option<T> {
|
||||
unsafe { (*self.state.get()).pop() }
|
||||
}
|
||||
pub unsafe fn packet(&self) -> *mut P {
|
||||
&mut (*self.state.get()).packet as *mut P
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send, P: Send> State<T, P> {
|
||||
// remember that there is only one thread executing `push` (and only one
|
||||
// thread executing `pop`)
|
||||
unsafe fn push(&mut self, t: T) {
|
||||
// Acquire a node (which either uses a cached one or allocates a new
|
||||
// one), and then append this to the 'head' node.
|
||||
let n = self.alloc();
|
||||
assert!((*n).value.is_none());
|
||||
(*n).value = Some(t);
|
||||
(*n).next.store(0 as *mut Node<T>, Relaxed);
|
||||
(*self.head).next.store(n, Release);
|
||||
self.head = n;
|
||||
}
|
||||
|
||||
unsafe fn alloc(&mut self) -> *mut Node<T> {
|
||||
// First try to see if we can consume the 'first' node for our uses.
|
||||
// We try to avoid as many atomic instructions as possible here, so
|
||||
// the addition to cache_subtractions is not atomic (plus we're the
|
||||
// only one subtracting from the cache).
|
||||
if self.first != self.tail_copy {
|
||||
if self.cache_bound > 0 {
|
||||
let b = self.cache_subtractions.load(Relaxed);
|
||||
self.cache_subtractions.store(b + 1, Relaxed);
|
||||
}
|
||||
let ret = self.first;
|
||||
self.first = (*ret).next.load(Relaxed);
|
||||
return ret;
|
||||
}
|
||||
// If the above fails, then update our copy of the tail and try
|
||||
// again.
|
||||
self.tail_copy = self.tail_prev.load(Acquire);
|
||||
if self.first != self.tail_copy {
|
||||
if self.cache_bound > 0 {
|
||||
let b = self.cache_subtractions.load(Relaxed);
|
||||
self.cache_subtractions.store(b + 1, Relaxed);
|
||||
}
|
||||
let ret = self.first;
|
||||
self.first = (*ret).next.load(Relaxed);
|
||||
return ret;
|
||||
}
|
||||
// If all of that fails, then we have to allocate a new node
|
||||
// (there's nothing in the node cache).
|
||||
Node::new()
|
||||
}
|
||||
|
||||
// remember that there is only one thread executing `pop` (and only one
|
||||
// thread executing `push`)
|
||||
unsafe fn pop(&mut self) -> Option<T> {
|
||||
// The `tail` node is not actually a used node, but rather a
|
||||
// sentinel from where we should start popping from. Hence, look at
|
||||
// tail's next field and see if we can use it. If we do a pop, then
|
||||
// the current tail node is a candidate for going into the cache.
|
||||
let tail = self.tail;
|
||||
let next = (*tail).next.load(Acquire);
|
||||
if next.is_null() { return None }
|
||||
assert!((*next).value.is_some());
|
||||
let ret = (*next).value.take();
|
||||
|
||||
self.tail = next;
|
||||
if self.cache_bound == 0 {
|
||||
self.tail_prev.store(tail, Release);
|
||||
} else {
|
||||
// XXX: this is dubious with overflow.
|
||||
let additions = self.cache_additions.load(Relaxed);
|
||||
let subtractions = self.cache_subtractions.load(Relaxed);
|
||||
let size = additions - subtractions;
|
||||
|
||||
if size < self.cache_bound {
|
||||
self.tail_prev.store(tail, Release);
|
||||
self.cache_additions.store(additions + 1, Relaxed);
|
||||
} else {
|
||||
(*self.tail_prev.load(Relaxed)).next.store(next, Relaxed);
|
||||
// We have successfully erased all references to 'tail', so
|
||||
// now we can safely drop it.
|
||||
let _: ~Node<T> = cast::transmute(tail);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsafe fn is_empty(&self) -> bool {
|
||||
let tail = self.tail;
|
||||
let next = (*tail).next.load(Acquire);
|
||||
return next.is_null();
|
||||
}
|
||||
}
|
||||
|
||||
#[unsafe_destructor]
|
||||
impl<T: Send, P: Send> Drop for State<T, P> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let mut cur = self.first;
|
||||
while !cur.is_null() {
|
||||
let next = (*cur).next.load(Relaxed);
|
||||
let _n: ~Node<T> = cast::transmute(cur);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use prelude::*;
|
||||
use super::queue;
|
||||
use task;
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
let (mut c, mut p) = queue(0, ());
|
||||
p.push(1);
|
||||
p.push(2);
|
||||
assert_eq!(c.pop(), Some(1));
|
||||
assert_eq!(c.pop(), Some(2));
|
||||
assert_eq!(c.pop(), None);
|
||||
p.push(3);
|
||||
p.push(4);
|
||||
assert_eq!(c.pop(), Some(3));
|
||||
assert_eq!(c.pop(), Some(4));
|
||||
assert_eq!(c.pop(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_full() {
|
||||
let (_, mut p) = queue(0, ());
|
||||
p.push(~1);
|
||||
p.push(~2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke_bound() {
|
||||
let (mut c, mut p) = queue(1, ());
|
||||
p.push(1);
|
||||
p.push(2);
|
||||
assert_eq!(c.pop(), Some(1));
|
||||
assert_eq!(c.pop(), Some(2));
|
||||
assert_eq!(c.pop(), None);
|
||||
p.push(3);
|
||||
p.push(4);
|
||||
assert_eq!(c.pop(), Some(3));
|
||||
assert_eq!(c.pop(), Some(4));
|
||||
assert_eq!(c.pop(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress() {
|
||||
stress_bound(0);
|
||||
stress_bound(1);
|
||||
|
||||
fn stress_bound(bound: uint) {
|
||||
let (c, mut p) = queue(bound, ());
|
||||
do task::spawn_sched(task::SingleThreaded) {
|
||||
let mut c = c;
|
||||
for _ in range(0, 100000) {
|
||||
loop {
|
||||
match c.pop() {
|
||||
Some(1) => break,
|
||||
Some(_) => fail!(),
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _ in range(0, 100000) {
|
||||
p.push(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -26,7 +26,6 @@ use option::{Option, Some, None};
|
||||
use rt::borrowck::BorrowRecord;
|
||||
use rt::borrowck;
|
||||
use rt::context::Context;
|
||||
use rt::context;
|
||||
use rt::env;
|
||||
use io::Writer;
|
||||
use rt::kill::Death;
|
||||
|
Loading…
Reference in New Issue
Block a user