auto merge of #8581 : FlaPer87/rust/issue/8232, r=bblum

As for now, rekillable is an unsafe function, instead, it should behave
just like unkillable by encapsulating unsafe code within an unsafe
block.

This patch does that and removes unsafe blocks that were encapsulating
rekillable calls throughout rust's libs.

Fixes #8232
This commit is contained in:
bors 2013-08-27 11:35:51 -07:00
commit c822d1070a
3 changed files with 87 additions and 48 deletions

View File

@ -135,9 +135,7 @@ impl<Q:Send> Sem<Q> {
do task::unkillable {
do (|| {
self.acquire();
unsafe {
do task::rekillable { blk() }
}
do task::rekillable { blk() }
}).finally {
self.release();
}
@ -234,10 +232,8 @@ impl<'self> Condvar<'self> {
// signaller already sent -- I mean 'unconditionally' in contrast
// with acquire().)
do (|| {
unsafe {
do task::rekillable {
let _ = WaitEnd.take_unwrap().recv();
}
do task::rekillable {
let _ = WaitEnd.take_unwrap().recv();
}
}).finally {
// Reacquire the condvar. Note this is back in the unkillable
@ -516,14 +512,12 @@ impl RWLock {
* 'write' from other tasks will run concurrently with this one.
*/
pub fn write<U>(&self, blk: &fn() -> U) -> U {
unsafe {
do task::unkillable {
(&self.order_lock).acquire();
do (&self.access_lock).access {
(&self.order_lock).release();
do task::rekillable {
blk()
}
do task::unkillable {
(&self.order_lock).acquire();
do (&self.access_lock).access {
(&self.order_lock).release();
do task::rekillable {
blk()
}
}
}
@ -562,16 +556,14 @@ impl RWLock {
// which can't happen until T2 finishes the downgrade-read entirely.
// The astute reader will also note that making waking writers use the
// order_lock is better for not starving readers.
unsafe {
do task::unkillable {
(&self.order_lock).acquire();
do (&self.access_lock).access_cond |cond| {
(&self.order_lock).release();
do task::rekillable {
let opt_lock = Just(&self.order_lock);
blk(&Condvar { sem: cond.sem, order: opt_lock,
token: NonCopyable::new() })
}
do task::unkillable {
(&self.order_lock).acquire();
do (&self.access_lock).access_cond |cond| {
(&self.order_lock).release();
do task::rekillable {
let opt_lock = Just(&self.order_lock);
blk(&Condvar { sem: cond.sem, order: opt_lock,
token: NonCopyable::new() })
}
}
}
@ -606,10 +598,8 @@ impl RWLock {
(&self.access_lock).acquire();
(&self.order_lock).release();
do (|| {
unsafe {
do task::rekillable {
blk(RWLockWriteMode { lock: self, token: NonCopyable::new() })
}
do task::rekillable {
blk(RWLockWriteMode { lock: self, token: NonCopyable::new() })
}
}).finally {
let writer_or_last_reader;

View File

@ -647,7 +647,11 @@ impl Death {
/// All calls must be paired with a preceding call to inhibit_kill.
#[inline]
pub fn allow_kill(&mut self, already_failing: bool) {
rtassert!(self.unkillable != 0);
if self.unkillable == 0 {
// we need to decrement the counter before failing.
self.unkillable -= 1;
fail!("Cannot enter a rekillable() block without a surrounding unkillable()");
}
self.unkillable -= 1;
if self.unkillable == 0 {
rtassert!(self.kill_handle.is_some());

View File

@ -597,21 +597,36 @@ pub fn unkillable<U>(f: &fn() -> U) -> U {
}
}
/// The inverse of unkillable. Only ever to be used nested in unkillable().
pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
/**
* Makes killable a task marked as unkillable. This
* is meant to be used only nested in unkillable.
*
* # Example
*
* ~~~
* do task::unkillable {
* do task::rekillable {
* // Task is killable
* }
* // Task is unkillable again
* }
*/
pub fn rekillable<U>(f: &fn() -> U) -> U {
use rt::task::Task;
if in_green_task_context() {
let t = Local::unsafe_borrow::<Task>();
do (|| {
(*t).death.allow_kill((*t).unwinder.unwinding);
unsafe {
if in_green_task_context() {
let t = Local::unsafe_borrow::<Task>();
do (|| {
(*t).death.allow_kill((*t).unwinder.unwinding);
f()
}).finally {
(*t).death.inhibit_kill((*t).unwinder.unwinding);
}
} else {
// FIXME(#3095): As in unkillable().
f()
}).finally {
(*t).death.inhibit_kill((*t).unwinder.unwinding);
}
} else {
// FIXME(#3095): As in unkillable().
f()
}
}
@ -636,8 +651,8 @@ fn test_kill_unkillable_task() {
}
}
#[ignore(reason = "linked failure")]
#[test]
#[ignore(cfg(windows))]
fn test_kill_rekillable_task() {
use rt::test::*;
@ -646,11 +661,9 @@ fn test_kill_rekillable_task() {
do run_in_newsched_task {
do task::try {
do task::unkillable {
unsafe {
do task::rekillable {
do task::spawn {
fail!();
}
do task::rekillable {
do task::spawn {
fail!();
}
}
}
@ -658,7 +671,39 @@ fn test_kill_rekillable_task() {
}
}
#[test] #[should_fail]
#[test]
#[should_fail]
#[ignore(cfg(windows))]
fn test_rekillable_not_nested() {
do rekillable {
// This should fail before
// receiving anything since
// this block should be nested
// into a unkillable block.
deschedule();
}
}
#[test]
#[ignore(cfg(windows))]
fn test_rekillable_nested_failure() {
let result = do task::try {
do unkillable {
do rekillable {
let (port,chan) = comm::stream();
do task::spawn { chan.send(()); fail!(); }
port.recv(); // wait for child to exist
port.recv(); // block forever, expect to get killed.
}
}
};
assert!(result.is_err());
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_cant_dup_task_builder() {
let mut builder = task();
builder.unlinked();