Reserve 'yield' keyword

Rename task::yield() to task::deschedule().

Fixes #8494.
This commit is contained in:
Kevin Ballard 2013-08-16 12:49:40 -07:00
parent 680eb71564
commit 418e1ebae6
23 changed files with 75 additions and 72 deletions

View File

@ -762,7 +762,7 @@ mod tests {
do 10.times {
let tmp = *num;
*num = -1;
task::yield();
task::deschedule();
*num = tmp + 1;
}
c.send(());
@ -913,7 +913,7 @@ mod tests {
do read_mode.read |state| {
// if writer mistakenly got in, make sure it mutates state
// before we assert on it
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
// make sure writer didn't get in.
assert!(*state);
}
@ -921,9 +921,9 @@ mod tests {
}
#[test]
fn test_rw_write_cond_downgrade_read_race() {
// Ideally the above test case would have yield statements in it that
// Ideally the above test case would have deschedule statements in it that
// helped to expose the race nearly 100% of the time... but adding
// yields in the intuitively-right locations made it even less likely,
// deschedules in the intuitively-right locations made it even less likely,
// and I wasn't sure why :( . This is a mediocre "next best" option.
do 8.times { test_rw_write_cond_downgrade_read_race_helper() }
}

View File

@ -112,7 +112,7 @@ impl<Q:Send> Sem<Q> {
}
}
// Uncomment if you wish to test for sem races. Not valgrind-friendly.
/* do 1000.times { task::yield(); } */
/* do 1000.times { task::deschedule(); } */
// Need to wait outside the exclusive.
if waiter_nobe.is_some() {
let _ = waiter_nobe.unwrap().recv();
@ -225,7 +225,7 @@ impl<'self> Condvar<'self> {
}
}
// If yield checks start getting inserted anywhere, we can be
// If deschedule checks start getting inserted anywhere, we can be
// killed before or after enqueueing. Deciding whether to
// unkillably reacquire the lock needs to happen atomically
// wrt enqueuing.
@ -731,11 +731,11 @@ mod tests {
let s2 = ~s.clone();
do task::spawn || {
do s2.access {
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
}
}
do s.access {
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
}
}
#[test]
@ -748,7 +748,7 @@ mod tests {
s2.acquire();
c.send(());
}
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
s.release();
let _ = p.recv();
@ -757,7 +757,7 @@ mod tests {
let s = ~Semaphore::new(0);
let s2 = ~s.clone();
do task::spawn || {
do 5.times { task::yield(); }
do 5.times { task::deschedule(); }
s2.release();
let _ = p.recv();
}
@ -800,7 +800,7 @@ mod tests {
c.send(());
}
let _ = p.recv(); // wait for child to come alive
do 5.times { task::yield(); } // let the child contend
do 5.times { task::deschedule(); } // let the child contend
}
let _ = p.recv(); // wait for child to be done
}
@ -837,7 +837,7 @@ mod tests {
do n.times {
do m.lock {
let oldval = *sharedstate;
task::yield();
task::deschedule();
*sharedstate = oldval + 1;
}
}
@ -948,7 +948,7 @@ mod tests {
let (p,c) = comm::stream();
do task::spawn || { // linked
let _ = p.recv(); // wait for sibling to get in the mutex
task::yield();
task::deschedule();
fail!();
}
do m2.lock_cond |cond| {
@ -1114,7 +1114,7 @@ mod tests {
do n.times {
do lock_rwlock_in_mode(x, mode) {
let oldval = *sharedstate;
task::yield();
task::deschedule();
*sharedstate = oldval + 1;
}
}

View File

@ -141,7 +141,7 @@ pub struct Death {
on_exit: Option<~fn(bool)>,
// nesting level counter for task::unkillable calls (0 == killable).
unkillable: int,
// nesting level counter for unstable::atomically calls (0 == can yield).
// nesting level counter for unstable::atomically calls (0 == can deschedule).
wont_sleep: int,
// A "spare" handle to the kill flag inside the kill handle. Used during
// blocking/waking as an optimization to avoid two xadds on the refcount.
@ -572,16 +572,16 @@ impl Death {
}
/// Enter a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a subsequent call to allow_yield.
/// All calls must be paired with a subsequent call to allow_deschedule.
#[inline]
pub fn inhibit_yield(&mut self) {
pub fn inhibit_deschedule(&mut self) {
self.wont_sleep += 1;
}
/// Exit a possibly-nested "atomic" section of code. Just for assertions.
/// All calls must be paired with a preceding call to inhibit_yield.
/// All calls must be paired with a preceding call to inhibit_deschedule.
#[inline]
pub fn allow_yield(&mut self) {
pub fn allow_deschedule(&mut self) {
rtassert!(self.wont_sleep != 0);
self.wont_sleep -= 1;
}

View File

@ -250,7 +250,7 @@ mod test {
let (c2, p3, c4) = x.take();
p3.recv(); // handshake parent
c4.send(()); // normal receive
task::yield();
task::deschedule();
c2.send(()); // select receive
}
@ -294,7 +294,7 @@ mod test {
if send_on_chans.contains(&i) {
let c = Cell::new(c);
do spawntask_random {
task::yield();
task::deschedule();
c.take().send(());
}
}

View File

@ -537,7 +537,7 @@ pub fn with_task_name<U>(blk: &fn(Option<&str>) -> U) -> U {
}
}
pub fn yield() {
pub fn deschedule() {
//! Yield control to the task scheduler
use rt::local::Local;
@ -568,10 +568,10 @@ pub fn failing() -> bool {
*
* ~~~
* do task::unkillable {
* // detach / yield / destroy must all be called together
* // detach / deschedule / destroy must all be called together
* rustrt::rust_port_detach(po);
* // This must not result in the current task being killed
* task::yield();
* task::deschedule();
* rustrt::rust_port_destroy(po);
* }
* ~~~
@ -689,7 +689,7 @@ fn test_spawn_unlinked_unsup_no_fail_down() { // grandchild sends on a port
let ch = ch.clone();
do spawn_unlinked {
// Give middle task a chance to fail-but-not-kill-us.
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
ch.send(()); // If killed first, grandparent hangs.
}
fail!(); // Shouldn't kill either (grand)parent or (grand)child.
@ -712,7 +712,7 @@ fn test_spawn_unlinked_sup_no_fail_up() { // child unlinked fails
do run_in_newsched_task {
do spawn_supervised { fail!(); }
// Give child a chance to fail-but-not-kill-us.
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
}
}
#[ignore(reason = "linked failure")]
@ -821,7 +821,7 @@ fn test_spawn_failure_propagate_grandchild() {
do spawn_supervised {
do spawn_supervised { block_forever(); }
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -838,7 +838,7 @@ fn test_spawn_failure_propagate_secondborn() {
do spawn_supervised {
do spawn { block_forever(); } // linked
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -855,7 +855,7 @@ fn test_spawn_failure_propagate_nephew_or_niece() {
do spawn { // linked
do spawn_supervised { block_forever(); }
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -872,7 +872,7 @@ fn test_spawn_linked_sup_propagate_sibling() {
do spawn { // linked
do spawn { block_forever(); } // linked
}
do 16.times { task::yield(); }
do 16.times { task::deschedule(); }
fail!();
};
assert!(result.is_err());
@ -1169,12 +1169,12 @@ fn test_unkillable() {
// We want to do this after failing
do spawn_unlinked {
do 10.times { yield() }
do 10.times { deschedule() }
ch.send(());
}
do spawn {
yield();
deschedule();
// We want to fail after the unkillable task
// blocks on recv
fail!();
@ -1205,12 +1205,12 @@ fn test_unkillable_nested() {
// We want to do this after failing
do spawn_unlinked || {
do 10.times { yield() }
do 10.times { deschedule() }
ch.send(());
}
do spawn {
yield();
deschedule();
// We want to fail after the unkillable task
// blocks on recv
fail!();
@ -1277,7 +1277,7 @@ fn test_spawn_watched() {
t.unlinked();
t.watched();
do t.spawn {
task::yield();
task::deschedule();
fail!();
}
}
@ -1313,7 +1313,7 @@ fn test_indestructible() {
t.unwatched();
do t.spawn {
p3.recv();
task::yield();
task::deschedule();
fail!();
}
c3.send(());

View File

@ -272,9 +272,9 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
/**
* Enables a runtime assertion that no operation in the argument closure shall
* use scheduler operations (yield, recv, spawn, etc). This is for use with
* use scheduler operations (deschedule, recv, spawn, etc). This is for use with
* pthread mutexes, which may block the entire scheduler thread, rather than
* just one task, and is hence prone to deadlocks if mixed with yielding.
* just one task, and is hence prone to deadlocks if mixed with descheduling.
*
* NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
* synchronization whatsoever. It only makes sense to use for CPU-local issues.
@ -288,10 +288,10 @@ pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
if in_green_task_context() {
let t = Local::unsafe_borrow::<Task>();
do (|| {
(*t).death.inhibit_yield();
(*t).death.inhibit_deschedule();
f()
}).finally {
(*t).death.allow_yield();
(*t).death.allow_deschedule();
}
} else {
f()
@ -349,7 +349,7 @@ struct ExData<T> {
* This uses a pthread mutex, not one that's aware of the userspace scheduler.
* The user of an Exclusive must be careful not to invoke any functions that may
* reschedule the task while holding the lock, or deadlock may result. If you
* need to block or yield while accessing shared state, use extra::sync::RWArc.
* need to block or deschedule while accessing shared state, use extra::sync::RWArc.
*/
pub struct Exclusive<T> {
x: UnsafeAtomicRcBox<ExData<T>>
@ -377,7 +377,7 @@ impl<T:Send> Exclusive<T> {
// Exactly like std::arc::MutexArc,access(), but with the LittleLock
// instead of a proper mutex. Same reason for being unsafe.
//
// Currently, scheduling operations (i.e., yielding, receiving on a pipe,
// Currently, scheduling operations (i.e., descheduling, receiving on a pipe,
// accessing the provided condition variable) are prohibited while inside
// the Exclusive. Supporting that is a work in progress.
#[inline]
@ -431,7 +431,7 @@ mod tests {
fn test_atomically() {
// NB. The whole runtime will abort on an 'atomic-sleep' violation,
// so we can't really test for the converse behaviour.
unsafe { do atomically { } } task::yield(); // oughtn't fail
unsafe { do atomically { } } task::deschedule(); // oughtn't fail
}
#[test]
@ -545,7 +545,7 @@ mod tests {
c.send(());
}
p.recv();
task::yield(); // Try to make the unwrapper get blocked first.
task::deschedule(); // Try to make the unwrapper get blocked first.
let left_x = x.try_unwrap();
assert!(left_x.is_left());
util::ignore(left_x);
@ -566,7 +566,7 @@ mod tests {
do task::spawn {
let x2 = x2.take();
unsafe { do x2.with |_hello| { } }
task::yield();
task::deschedule();
}
assert!(x.unwrap() == ~~"hello");
@ -612,7 +612,7 @@ mod tests {
let x = Exclusive::new(~~"hello");
let x2 = x.clone();
do task::spawn {
do 10.times { task::yield(); } // try to let the unwrapper go
do 10.times { task::deschedule(); } // try to let the unwrapper go
fail!(); // punt it awake from its deadlock
}
let _z = x.unwrap();

View File

@ -477,6 +477,7 @@ fn mk_fresh_ident_interner() -> @ident_interner {
"be", // 64
"pure", // 65
"yield", // 66
];
@ident_interner {
@ -585,7 +586,6 @@ pub mod keywords {
Once,
Priv,
Pub,
Pure,
Ref,
Return,
Static,
@ -601,6 +601,8 @@ pub mod keywords {
// Reserved keywords
Be,
Pure,
Yield,
}
impl Keyword {
@ -628,7 +630,6 @@ pub mod keywords {
Once => ident { name: 50, ctxt: 0 },
Priv => ident { name: 51, ctxt: 0 },
Pub => ident { name: 52, ctxt: 0 },
Pure => ident { name: 65, ctxt: 0 },
Ref => ident { name: 53, ctxt: 0 },
Return => ident { name: 54, ctxt: 0 },
Static => ident { name: 27, ctxt: 0 },
@ -642,6 +643,8 @@ pub mod keywords {
Use => ident { name: 61, ctxt: 0 },
While => ident { name: 62, ctxt: 0 },
Be => ident { name: 64, ctxt: 0 },
Pure => ident { name: 65, ctxt: 0 },
Yield => ident { name: 66, ctxt: 0 },
}
}
}
@ -657,7 +660,7 @@ pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool {
pub fn is_any_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => match sid.name {
8 | 27 | 32 .. 65 => true,
8 | 27 | 32 .. 66 => true,
_ => false,
},
_ => false
@ -677,7 +680,7 @@ pub fn is_strict_keyword(tok: &Token) -> bool {
pub fn is_reserved_keyword(tok: &Token) -> bool {
match *tok {
token::IDENT(sid, false) => match sid.name {
64 .. 65 => true,
64 .. 66 => true,
_ => false,
},
_ => false,

View File

@ -31,7 +31,7 @@ fn child_generation(gens_left: uint, c: comm::Chan<()>) {
do task::spawn_supervised {
let c = c.take();
if gens_left & 1 == 1 {
task::yield(); // shake things up a bit
task::deschedule(); // shake things up a bit
}
if gens_left > 0 {
child_generation(gens_left - 1, c); // recurse

View File

@ -35,7 +35,7 @@ extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
fn count(n: uint) -> uint {
unsafe {
task::yield();
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}

View File

@ -21,5 +21,5 @@ fn child() { fail!(); }
fn main() {
let (p, _c) = comm::stream::<()>();
task::spawn(|| child() );
task::yield();
task::deschedule();
}

View File

@ -15,7 +15,7 @@ use std::comm;
use std::task;
fn goodfail() {
task::yield();
task::deschedule();
fail!("goodfail");
}

View File

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This creates a bunch of yielding tasks that run concurrently
// This creates a bunch of descheduling tasks that run concurrently
// while holding onto C stacks
use std::libc;
@ -27,7 +27,7 @@ extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
task::yield();
task::deschedule();
count(data - 1u) + count(data - 1u)
}
}

View File

@ -30,7 +30,7 @@ extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
fn count(n: uint) -> uint {
unsafe {
task::yield();
task::deschedule();
rustrt::rust_dbg_call(cb, n)
}
}

View File

@ -110,7 +110,7 @@ pub mod pipes {
let old_state = swap_state_acq(&mut (*p).state,
blocked);
match old_state {
empty | blocked => { task::yield(); }
empty | blocked => { task::deschedule(); }
full => {
let payload = util::replace(&mut p.payload, None);
return Some(payload.unwrap())

View File

@ -23,13 +23,13 @@ fn iloop() {
task::spawn(|| die() );
let (p, c) = comm::stream::<()>();
loop {
// Sending and receiving here because these actions yield,
// Sending and receiving here because these actions deschedule,
// at which point our child can kill us.
c.send(());
p.recv();
// The above comment no longer makes sense but I'm
// reluctant to remove a linked failure test case.
task::yield();
task::deschedule();
}
}

View File

@ -21,5 +21,5 @@ pub fn main() {
task::spawn(|| x(~"hello from second spawned fn", 66) );
task::spawn(|| x(~"hello from third spawned fn", 67) );
let mut i: int = 30;
while i > 0 { i = i - 1; info!("parent sleeping"); task::yield(); }
while i > 0 { i = i - 1; info!("parent sleeping"); task::deschedule(); }
}

View File

@ -36,5 +36,5 @@ pub fn main() {
let c = p.recv();
c.send(~"A");
c.send(~"B");
task::yield();
task::deschedule();
}

View File

@ -28,7 +28,7 @@ fn test00() {
// Sleep long enough for the task to finish.
let mut i = 0;
while i < 10000 {
task::yield();
task::deschedule();
i += 1;
}

View File

@ -69,11 +69,11 @@ fn join(port: Port<bool>) -> bool {
}
fn supervised() {
// Yield to make sure the supervisor joins before we
// Deschedule to make sure the supervisor joins before we
// fail. This is currently not needed because the supervisor
// runs first, but I can imagine that changing.
error!("supervised task=%?", 0);
task::yield();
task::deschedule();
fail!();
}

View File

@ -19,10 +19,10 @@
use std::task;
fn supervised() {
// Yield to make sure the supervisor joins before we fail. This is
// Deschedule to make sure the supervisor joins before we fail. This is
// currently not needed because the supervisor runs first, but I can
// imagine that changing.
task::yield();
task::deschedule();
fail!();
}

View File

@ -17,13 +17,13 @@ pub fn main() {
builder.future_result(|r| { result = Some(r); });
builder.spawn(child);
error!("1");
task::yield();
task::deschedule();
error!("2");
task::yield();
task::deschedule();
error!("3");
result.unwrap().recv();
}
fn child() {
error!("4"); task::yield(); error!("5"); task::yield(); error!("6");
error!("4"); task::deschedule(); error!("5"); task::deschedule(); error!("6");
}

View File

@ -17,7 +17,7 @@ pub fn main() {
builder.future_result(|r| { result = Some(r); });
builder.spawn(child);
error!("1");
task::yield();
task::deschedule();
result.unwrap().recv();
}

View File

@ -13,5 +13,5 @@ use std::task;
pub fn main() {
let mut i: int = 0;
while i < 100 { i = i + 1; error!(i); task::yield(); }
while i < 100 { i = i + 1; error!(i); task::deschedule(); }
}