From ad022b1a1bcdb8d2e6d1f200f5824f16de2c2193 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 13:40:34 -0800 Subject: [PATCH 01/10] Remove Runtime trait This commit removes most of the remaining runtime infrastructure related to the green/native split. In particular, it removes the `Runtime` trait and instead inlines the native implementation. Closes #17325 [breaking-change] --- src/libnative/task.rs | 246 ------------------------------ src/librustrt/lib.rs | 40 ----- src/librustrt/rtio.rs | 45 ------ src/librustrt/task.rs | 342 +++++++++++++++++++++--------------------- src/libstd/rt/mod.rs | 2 + 5 files changed, 172 insertions(+), 503 deletions(-) delete mode 100644 src/librustrt/rtio.rs diff --git a/src/libnative/task.rs b/src/libnative/task.rs index 6d640b61b18..02fb5b31c0d 100644 --- a/src/libnative/task.rs +++ b/src/libnative/task.rs @@ -24,252 +24,6 @@ use std::rt::task::{Task, BlockedTask, TaskOpts}; use std::rt::thread::Thread; use std::rt; -use std::task::{TaskBuilder, Spawner}; - -/// Creates a new Task which is ready to execute as a 1:1 task. -pub fn new(stack_bounds: (uint, uint), stack_guard: uint) -> Box { - let mut task = box Task::new(); - let mut ops = ops(); - ops.stack_bounds = stack_bounds; - ops.stack_guard = stack_guard; - task.put_runtime(ops); - return task; -} - -fn ops() -> Box { - box Ops { - lock: unsafe { NativeMutex::new() }, - awoken: false, - // these *should* get overwritten - stack_bounds: (0, 0), - stack_guard: 0 - } -} - -/// A spawner for native tasks -pub struct NativeSpawner; - -impl Spawner for NativeSpawner { - fn spawn(self, opts: TaskOpts, f: proc():Send) { - let TaskOpts { name, stack_size, on_exit } = opts; - - let mut task = box Task::new(); - task.name = name; - task.death.on_exit = on_exit; - - let stack = stack_size.unwrap_or(rt::min_stack()); - let task = task; - let ops = ops(); - - // Note that this increment must happen *before* the spawn in order to - // guarantee that if this task exits it will always end up waiting for - // the spawned task to exit. - let token = bookkeeping::increment(); - - // Spawning a new OS thread guarantees that __morestack will never get - // triggered, but we must manually set up the actual stack bounds once - // this function starts executing. This raises the lower limit by a bit - // because by the time that this function is executing we've already - // consumed at least a little bit of stack (we don't know the exact byte - // address at which our stack started). - Thread::spawn_stack(stack, proc() { - let something_around_the_top_of_the_stack = 1; - let addr = &something_around_the_top_of_the_stack as *const int; - let my_stack = addr as uint; - unsafe { - stack::record_os_managed_stack_bounds(my_stack - stack + 1024, - my_stack); - } - let mut ops = ops; - ops.stack_guard = rt::thread::current_guard_page(); - ops.stack_bounds = (my_stack - stack + 1024, my_stack); - - let mut f = Some(f); - let mut task = task; - task.put_runtime(ops); - drop(task.run(|| { f.take().unwrap()() }).destroy()); - drop(token); - }) - } -} - -/// An extension trait adding a `native` configuration method to `TaskBuilder`. -pub trait NativeTaskBuilder { - fn native(self) -> TaskBuilder; -} - -impl NativeTaskBuilder for TaskBuilder { - fn native(self) -> TaskBuilder { - self.spawner(NativeSpawner) - } -} - -// This structure is the glue between channels and the 1:1 scheduling mode. This -// structure is allocated once per task. -struct Ops { - lock: NativeMutex, // native synchronization - awoken: bool, // used to prevent spurious wakeups - - // This field holds the known bounds of the stack in (lo, hi) form. Not all - // native tasks necessarily know their precise bounds, hence this is - // optional. - stack_bounds: (uint, uint), - - stack_guard: uint -} - -impl rt::Runtime for Ops { - fn yield_now(self: Box, mut cur_task: Box) { - // put the task back in TLS and then invoke the OS thread yield - cur_task.put_runtime(self); - Local::put(cur_task); - Thread::yield_now(); - } - - fn maybe_yield(self: Box, mut cur_task: Box) { - // just put the task back in TLS, on OS threads we never need to - // opportunistically yield b/c the OS will do that for us (preemption) - cur_task.put_runtime(self); - Local::put(cur_task); - } - - fn wrap(self: Box) -> Box { - self as Box - } - - fn stack_bounds(&self) -> (uint, uint) { self.stack_bounds } - - fn stack_guard(&self) -> Option { - if self.stack_guard != 0 { - Some(self.stack_guard) - } else { - None - } - } - - fn can_block(&self) -> bool { true } - - // This function gets a little interesting. There are a few safety and - // ownership violations going on here, but this is all done in the name of - // shared state. Additionally, all of the violations are protected with a - // mutex, so in theory there are no races. - // - // The first thing we need to do is to get a pointer to the task's internal - // mutex. This address will not be changing (because the task is allocated - // on the heap). We must have this handle separately because the task will - // have its ownership transferred to the given closure. We're guaranteed, - // however, that this memory will remain valid because *this* is the current - // task's execution thread. - // - // The next weird part is where ownership of the task actually goes. We - // relinquish it to the `f` blocking function, but upon returning this - // function needs to replace the task back in TLS. There is no communication - // from the wakeup thread back to this thread about the task pointer, and - // there's really no need to. In order to get around this, we cast the task - // to a `uint` which is then used at the end of this function to cast back - // to a `Box` object. Naturally, this looks like it violates - // ownership semantics in that there may be two `Box` objects. - // - // The fun part is that the wakeup half of this implementation knows to - // "forget" the task on the other end. This means that the awakening half of - // things silently relinquishes ownership back to this thread, but not in a - // way that the compiler can understand. The task's memory is always valid - // for both tasks because these operations are all done inside of a mutex. - // - // You'll also find that if blocking fails (the `f` function hands the - // BlockedTask back to us), we will `mem::forget` the handles. The - // reasoning for this is the same logic as above in that the task silently - // transfers ownership via the `uint`, not through normal compiler - // semantics. - // - // On a mildly unrelated note, it should also be pointed out that OS - // condition variables are susceptible to spurious wakeups, which we need to - // be ready for. In order to accommodate for this fact, we have an extra - // `awoken` field which indicates whether we were actually woken up via some - // invocation of `reawaken`. This flag is only ever accessed inside the - // lock, so there's no need to make it atomic. - fn deschedule(mut self: Box, - times: uint, - mut cur_task: Box, - f: |BlockedTask| -> Result<(), BlockedTask>) { - let me = &mut *self as *mut Ops; - cur_task.put_runtime(self); - - unsafe { - let cur_task_dupe = &mut *cur_task as *mut Task; - let task = BlockedTask::block(cur_task); - - if times == 1 { - let guard = (*me).lock.lock(); - (*me).awoken = false; - match f(task) { - Ok(()) => { - while !(*me).awoken { - guard.wait(); - } - } - Err(task) => { mem::forget(task.wake()); } - } - } else { - let iter = task.make_selectable(times); - let guard = (*me).lock.lock(); - (*me).awoken = false; - - // Apply the given closure to all of the "selectable tasks", - // bailing on the first one that produces an error. Note that - // care must be taken such that when an error is occurred, we - // may not own the task, so we may still have to wait for the - // task to become available. In other words, if task.wake() - // returns `None`, then someone else has ownership and we must - // wait for their signal. - match iter.map(f).filter_map(|a| a.err()).next() { - None => {} - Some(task) => { - match task.wake() { - Some(task) => { - mem::forget(task); - (*me).awoken = true; - } - None => {} - } - } - } - while !(*me).awoken { - guard.wait(); - } - } - // re-acquire ownership of the task - cur_task = mem::transmute(cur_task_dupe); - } - - // put the task back in TLS, and everything is as it once was. - Local::put(cur_task); - } - - // See the comments on `deschedule` for why the task is forgotten here, and - // why it's valid to do so. - fn reawaken(mut self: Box, mut to_wake: Box) { - unsafe { - let me = &mut *self as *mut Ops; - to_wake.put_runtime(self); - mem::forget(to_wake); - let guard = (*me).lock.lock(); - (*me).awoken = true; - guard.signal(); - } - } - - fn spawn_sibling(self: Box, - mut cur_task: Box, - opts: TaskOpts, - f: proc():Send) { - cur_task.put_runtime(self); - Local::put(cur_task); - - NativeSpawner.spawn(opts, f); - } -} - #[cfg(test)] mod tests { use std::rt::local::Local; diff --git a/src/librustrt/lib.rs b/src/librustrt/lib.rs index fee748e29d9..387b430b8f8 100644 --- a/src/librustrt/lib.rs +++ b/src/librustrt/lib.rs @@ -39,11 +39,6 @@ pub use self::unwind::{begin_unwind, begin_unwind_fmt}; use core::prelude::*; -use alloc::boxed::Box; -use core::any::Any; - -use task::{Task, BlockedTask, TaskOpts}; - mod macros; mod at_exit_imp; @@ -60,46 +55,11 @@ pub mod exclusive; pub mod local; pub mod local_data; pub mod mutex; -pub mod rtio; pub mod stack; pub mod task; pub mod thread; pub mod unwind; -/// The interface to the current runtime. -/// -/// This trait is used as the abstraction between 1:1 and M:N scheduling. The -/// two independent crates, libnative and libgreen, both have objects which -/// implement this trait. The goal of this trait is to encompass all the -/// fundamental differences in functionality between the 1:1 and M:N runtime -/// modes. -pub trait Runtime { - // Necessary scheduling functions, used for channels and blocking I/O - // (sometimes). - fn yield_now(self: Box, cur_task: Box); - fn maybe_yield(self: Box, cur_task: Box); - fn deschedule(self: Box, - times: uint, - cur_task: Box, - f: |BlockedTask| -> Result<(), BlockedTask>); - fn reawaken(self: Box, to_wake: Box); - - // Miscellaneous calls which are very different depending on what context - // you're in. - fn spawn_sibling(self: Box, - cur_task: Box, - opts: TaskOpts, - f: proc():Send); - /// The (low, high) edges of the current stack. - fn stack_bounds(&self) -> (uint, uint); // (lo, hi) - /// The last writable byte of the stack next to the guard page - fn stack_guard(&self) -> Option; - fn can_block(&self) -> bool; - - // FIXME: This is a serious code smell and this should not exist at all. - fn wrap(self: Box) -> Box; -} - /// The default error code of the rust runtime if the main task panics instead /// of exiting cleanly. pub const DEFAULT_ERROR_CODE: int = 101; diff --git a/src/librustrt/rtio.rs b/src/librustrt/rtio.rs deleted file mode 100644 index 86de8168189..00000000000 --- a/src/librustrt/rtio.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The EventLoop and internal synchronous I/O interface. - -use core::prelude::*; -use alloc::boxed::Box; - -pub trait EventLoop { - fn run(&mut self); - fn callback(&mut self, arg: proc(): Send); - fn pausable_idle_callback(&mut self, Box) - -> Box; - fn remote_callback(&mut self, Box) - -> Box; - - // last vestige of IoFactory - fn has_active_io(&self) -> bool; -} - -pub trait Callback { - fn call(&mut self); -} - -pub trait RemoteCallback { - /// Trigger the remote callback. Note that the number of times the - /// callback is run is not guaranteed. All that is guaranteed is - /// that, after calling 'fire', the callback will be called at - /// least once, but multiple callbacks may be coalesced and - /// callbacks may be called more often requested. Destruction also - /// triggers the callback. - fn fire(&mut self); -} - -pub trait PausableIdleCallback { - fn pause(&mut self); - fn resume(&mut self); -} diff --git a/src/librustrt/task.rs b/src/librustrt/task.rs index 2c8fca2d5e6..34c913c5bcb 100644 --- a/src/librustrt/task.rs +++ b/src/librustrt/task.rs @@ -16,7 +16,7 @@ pub use self::BlockedTask::*; use self::TaskState::*; use alloc::arc::Arc; -use alloc::boxed::{BoxAny, Box}; +use alloc::boxed::Box; use core::any::Any; use core::atomic::{AtomicUint, SeqCst}; use core::iter::Take; @@ -24,76 +24,21 @@ use core::kinds::marker; use core::mem; use core::prelude::{Clone, Drop, Err, Iterator, None, Ok, Option, Send, Some}; use core::prelude::{drop}; -use core::raw; +use bookkeeping; +use mutex::NativeMutex; use local_data; -use Runtime; use local::Local; +use thread::{mod, Thread}; +use stack; use unwind; use unwind::Unwinder; use collections::str::SendStr; /// State associated with Rust tasks. /// -/// Rust tasks are primarily built with two separate components. One is this -/// structure which handles standard services such as TLD, unwinding support, -/// naming of a task, etc. The second component is the runtime of this task, a -/// `Runtime` trait object. -/// -/// The `Runtime` object instructs this task how it can perform critical -/// operations such as blocking, rescheduling, I/O constructors, etc. The two -/// halves are separately owned, but one is often found contained in the other. -/// A task's runtime can be reflected upon with the `maybe_take_runtime` method, -/// and otherwise its ownership is managed with `take_runtime` and -/// `put_runtime`. -/// -/// In general, this structure should not be used. This is meant to be an -/// unstable internal detail of the runtime itself. From time-to-time, however, -/// it is useful to manage tasks directly. An example of this would be -/// interoperating with the Rust runtime from FFI callbacks or such. For this -/// reason, there are two methods of note with the `Task` structure. -/// -/// * `run` - This function will execute a closure inside the context of a task. -/// Failure is caught and handled via the task's on_exit callback. If -/// this panics, the task is still returned, but it can no longer be -/// used, it is poisoned. -/// -/// * `destroy` - This is a required function to call to destroy a task. If a -/// task falls out of scope without calling `destroy`, its -/// destructor bomb will go off, aborting the process. -/// -/// With these two methods, tasks can be re-used to execute code inside of its -/// context while having a point in the future where destruction is allowed. -/// More information can be found on these specific methods. -/// -/// # Example -/// -/// ```no_run -/// extern crate native; -/// use std::uint; -/// # fn main() { -/// -/// // Create a task using a native runtime -/// let task = native::task::new((0, uint::MAX), 0); -/// -/// // Run some code, catching any possible panic -/// let task = task.run(|| { -/// // Run some code inside this task -/// println!("Hello with a native runtime!"); -/// }); -/// -/// // Run some code again, catching the panic -/// let task = task.run(|| { -/// panic!("oh no, what to do!"); -/// }); -/// -/// // Now that the task has panicked, it can never be used again -/// assert!(task.is_destroyed()); -/// -/// // Deallocate the resources associated with this task -/// task.destroy(); -/// # } -/// ``` +/// This structure is currently undergoing major changes, and is +/// likely to be move/be merged with a `Thread` structure. pub struct Task { pub storage: LocalStorage, pub unwinder: Unwinder, @@ -101,7 +46,15 @@ pub struct Task { pub name: Option, state: TaskState, - imp: Option>, + lock: NativeMutex, // native synchronization + awoken: bool, // used to prevent spurious wakeups + + // This field holds the known bounds of the stack in (lo, hi) form. Not all + // native tasks necessarily know their precise bounds, hence this is + // optional. + stack_bounds: (uint, uint), + + stack_guard: uint } // Once a task has entered the `Armed` state it must be destroyed via `drop`, @@ -152,23 +105,60 @@ pub struct BlockedTasks { impl Task { /// Creates a new uninitialized task. - /// - /// This method cannot be used to immediately invoke `run` because the task - /// itself will likely require a runtime to be inserted via `put_runtime`. - /// - /// Note that you likely don't want to call this function, but rather the - /// task creation functions through libnative or libgreen. - pub fn new() -> Task { + pub fn new(stack_bounds: Option<(uint, uint)>, stack_guard: Option) -> Task { Task { storage: LocalStorage(None), unwinder: Unwinder::new(), death: Death::new(), state: New, name: None, - imp: None, + lock: unsafe { NativeMutex::new() }, + awoken: false, + // these *should* get overwritten + stack_bounds: stack_bounds.unwrap_or((0, 0)), + stack_guard: stack_guard.unwrap_or(0) } } + pub fn spawn(opts: TaskOpts, f: proc():Send) { + let TaskOpts { name, stack_size, on_exit } = opts; + + let mut task = box Task::new(None, None); + task.name = name; + task.death.on_exit = on_exit; + + // FIXME: change this back after moving rustrt into std + // let stack = stack_size.unwrap_or(rt::min_stack()); + let stack = stack_size.unwrap_or(2 * 1024 * 1024); + + // Note that this increment must happen *before* the spawn in order to + // guarantee that if this task exits it will always end up waiting for + // the spawned task to exit. + let token = bookkeeping::increment(); + + // Spawning a new OS thread guarantees that __morestack will never get + // triggered, but we must manually set up the actual stack bounds once + // this function starts executing. This raises the lower limit by a bit + // because by the time that this function is executing we've already + // consumed at least a little bit of stack (we don't know the exact byte + // address at which our stack started). + Thread::spawn_stack(stack, proc() { + let something_around_the_top_of_the_stack = 1; + let addr = &something_around_the_top_of_the_stack as *const int; + let my_stack = addr as uint; + unsafe { + stack::record_os_managed_stack_bounds(my_stack - stack + 1024, + my_stack); + } + task.stack_guard = thread::current_guard_page(); + task.stack_bounds = (my_stack - stack + 1024, my_stack); + + let mut f = Some(f); + drop(task.run(|| { f.take().unwrap()() }).destroy()); + drop(token); + }) + } + /// Consumes ownership of a task, runs some code, and returns the task back. /// /// This function can be used as an emulated "try/catch" to interoperate @@ -190,23 +180,6 @@ impl Task { /// /// It is invalid to call this function with a task that has been previously /// destroyed via a failed call to `run`. - /// - /// # Example - /// - /// ```no_run - /// extern crate native; - /// use std::uint; - /// # fn main() { - /// - /// // Create a new native task - /// let task = native::task::new((0, uint::MAX), 0); - /// - /// // Run some code once and then destroy this task - /// task.run(|| { - /// println!("Hello with a native runtime!"); - /// }).destroy(); - /// # } - /// ``` pub fn run(mut self: Box, f: ||) -> Box { assert!(!self.is_destroyed(), "cannot re-use a destroyed task"); @@ -329,111 +302,136 @@ impl Task { /// Queries whether this can be destroyed or not. pub fn is_destroyed(&self) -> bool { self.state == Destroyed } - /// Inserts a runtime object into this task, transferring ownership to the - /// task. It is illegal to replace a previous runtime object in this task - /// with this argument. - pub fn put_runtime(&mut self, ops: Box) { - assert!(self.imp.is_none()); - self.imp = Some(ops); - } - - /// Removes the runtime from this task, transferring ownership to the - /// caller. - pub fn take_runtime(&mut self) -> Box { - assert!(self.imp.is_some()); - self.imp.take().unwrap() - } - - /// Attempts to extract the runtime as a specific type. If the runtime does - /// not have the provided type, then the runtime is not removed. If the - /// runtime does have the specified type, then it is removed and returned - /// (transfer of ownership). - /// - /// It is recommended to only use this method when *absolutely necessary*. - /// This function may not be available in the future. - pub fn maybe_take_runtime(&mut self) -> Option> { - // This is a terrible, terrible function. The general idea here is to - // take the runtime, cast it to Box, check if it has the right - // type, and then re-cast it back if necessary. The method of doing - // this is pretty sketchy and involves shuffling vtables of trait - // objects around, but it gets the job done. - // - // FIXME: This function is a serious code smell and should be avoided at - // all costs. I have yet to think of a method to avoid this - // function, and I would be saddened if more usage of the function - // crops up. - unsafe { - let imp = self.imp.take().unwrap(); - let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable; - match imp.wrap().downcast::() { - Ok(t) => Some(t), - Err(t) => { - let data = mem::transmute::<_, raw::TraitObject>(t).data; - let obj: Box = - mem::transmute(raw::TraitObject { - vtable: vtable, - data: data, - }); - self.put_runtime(obj); - None - } - } - } - } - - /// Spawns a sibling to this task. The newly spawned task is configured with - /// the `opts` structure and will run `f` as the body of its code. - pub fn spawn_sibling(mut self: Box, - opts: TaskOpts, - f: proc(): Send) { - let ops = self.imp.take().unwrap(); - ops.spawn_sibling(self, opts, f) - } - /// Deschedules the current task, invoking `f` `amt` times. It is not /// recommended to use this function directly, but rather communication /// primitives in `std::comm` should be used. + // + // This function gets a little interesting. There are a few safety and + // ownership violations going on here, but this is all done in the name of + // shared state. Additionally, all of the violations are protected with a + // mutex, so in theory there are no races. + // + // The first thing we need to do is to get a pointer to the task's internal + // mutex. This address will not be changing (because the task is allocated + // on the heap). We must have this handle separately because the task will + // have its ownership transferred to the given closure. We're guaranteed, + // however, that this memory will remain valid because *this* is the current + // task's execution thread. + // + // The next weird part is where ownership of the task actually goes. We + // relinquish it to the `f` blocking function, but upon returning this + // function needs to replace the task back in TLS. There is no communication + // from the wakeup thread back to this thread about the task pointer, and + // there's really no need to. In order to get around this, we cast the task + // to a `uint` which is then used at the end of this function to cast back + // to a `Box` object. Naturally, this looks like it violates + // ownership semantics in that there may be two `Box` objects. + // + // The fun part is that the wakeup half of this implementation knows to + // "forget" the task on the other end. This means that the awakening half of + // things silently relinquishes ownership back to this thread, but not in a + // way that the compiler can understand. The task's memory is always valid + // for both tasks because these operations are all done inside of a mutex. + // + // You'll also find that if blocking fails (the `f` function hands the + // BlockedTask back to us), we will `mem::forget` the handles. The + // reasoning for this is the same logic as above in that the task silently + // transfers ownership via the `uint`, not through normal compiler + // semantics. + // + // On a mildly unrelated note, it should also be pointed out that OS + // condition variables are susceptible to spurious wakeups, which we need to + // be ready for. In order to accommodate for this fact, we have an extra + // `awoken` field which indicates whether we were actually woken up via some + // invocation of `reawaken`. This flag is only ever accessed inside the + // lock, so there's no need to make it atomic. pub fn deschedule(mut self: Box, - amt: uint, + times: uint, f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) { - let ops = self.imp.take().unwrap(); - ops.deschedule(amt, self, f) + unsafe { + let me = &mut *self as *mut Task; + let task = BlockedTask::block(self); + + if times == 1 { + let guard = (*me).lock.lock(); + (*me).awoken = false; + match f(task) { + Ok(()) => { + while !(*me).awoken { + guard.wait(); + } + } + Err(task) => { mem::forget(task.wake()); } + } + } else { + let iter = task.make_selectable(times); + let guard = (*me).lock.lock(); + (*me).awoken = false; + + // Apply the given closure to all of the "selectable tasks", + // bailing on the first one that produces an error. Note that + // care must be taken such that when an error is occurred, we + // may not own the task, so we may still have to wait for the + // task to become available. In other words, if task.wake() + // returns `None`, then someone else has ownership and we must + // wait for their signal. + match iter.map(f).filter_map(|a| a.err()).next() { + None => {} + Some(task) => { + match task.wake() { + Some(task) => { + mem::forget(task); + (*me).awoken = true; + } + None => {} + } + } + } + while !(*me).awoken { + guard.wait(); + } + } + // put the task back in TLS, and everything is as it once was. + Local::put(mem::transmute(me)); + } } - /// Wakes up a previously blocked task, optionally specifying whether the - /// current task can accept a change in scheduling. This function can only - /// be called on tasks that were previously blocked in `deschedule`. + /// Wakes up a previously blocked task. This function can only be + /// called on tasks that were previously blocked in `deschedule`. + // + // See the comments on `deschedule` for why the task is forgotten here, and + // why it's valid to do so. pub fn reawaken(mut self: Box) { - let ops = self.imp.take().unwrap(); - ops.reawaken(self); + unsafe { + let me = &mut *self as *mut Task; + mem::forget(self); + let guard = (*me).lock.lock(); + (*me).awoken = true; + guard.signal(); + } } /// Yields control of this task to another task. This function will /// eventually return, but possibly not immediately. This is used as an /// opportunity to allow other tasks a chance to run. - pub fn yield_now(mut self: Box) { - let ops = self.imp.take().unwrap(); - ops.yield_now(self); - } - - /// Similar to `yield_now`, except that this function may immediately return - /// without yielding (depending on what the runtime decides to do). - pub fn maybe_yield(mut self: Box) { - let ops = self.imp.take().unwrap(); - ops.maybe_yield(self); + pub fn yield_now() { + Thread::yield_now(); } /// Returns the stack bounds for this task in (lo, hi) format. The stack /// bounds may not be known for all tasks, so the return value may be /// `None`. pub fn stack_bounds(&self) -> (uint, uint) { - self.imp.as_ref().unwrap().stack_bounds() + self.stack_bounds } - /// Returns whether it is legal for this task to block the OS thread that it - /// is running on. - pub fn can_block(&self) -> bool { - self.imp.as_ref().unwrap().can_block() + /// Returns the stack guard for this task, if known. + pub fn stack_guard(&self) -> Option { + if self.stack_guard != 0 { + Some(self.stack_guard) + } else { + None + } } /// Consume this task, flagging it as a candidate for destruction. diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index b97e80d0dc1..8701fadf65c 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -54,6 +54,8 @@ Several modules in `core` are clients of `rt`: // FIXME: this should not be here. #![allow(missing_docs)] +#![allow(dead_code)] + use failure; use rustrt; From 3ee916e50bd86768cb2a9141f9b2c52d2601b412 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 13:55:57 -0800 Subject: [PATCH 02/10] Remove libnative With runtime removal complete, there's nothing left of libnative. This commit removes it. Fixes #18687 [breaking-change] --- mk/crates.mk | 9 +- src/driver/driver.rs | 2 + src/libnative/io/addrinfo.rs | 116 --- src/libnative/io/process.rs | 1240 --------------------------- src/libnative/lib.rs | 155 ---- src/libnative/task.rs | 130 --- src/librustc_trans/driver/driver.rs | 7 +- src/libstd/lib.rs | 4 +- src/libstd/rt/mod.rs | 76 ++ src/libsyntax/std_inject.rs | 31 +- 10 files changed, 87 insertions(+), 1683 deletions(-) delete mode 100644 src/libnative/io/addrinfo.rs delete mode 100644 src/libnative/io/process.rs delete mode 100644 src/libnative/lib.rs delete mode 100644 src/libnative/task.rs diff --git a/mk/crates.mk b/mk/crates.mk index 1a9d1e82467..2523575b078 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -49,7 +49,7 @@ # automatically generated for all stage/host/target combinations. ################################################################################ -TARGET_CRATES := libc std green native flate arena term \ +TARGET_CRATES := libc std green flate arena term \ serialize sync getopts collections test time rand \ log regex graphviz core rbml alloc rustrt \ unicode @@ -67,7 +67,6 @@ DEPS_std := core libc rand alloc collections rustrt sync unicode \ native:rust_builtin native:backtrace DEPS_graphviz := std DEPS_green := std native:context_switch -DEPS_native := std DEPS_syntax := std term serialize log fmt_macros arena libc DEPS_rustc_trans := rustc rustc_back rustc_llvm libc DEPS_rustc := syntax flate arena serialize getopts rbml \ @@ -95,9 +94,9 @@ DEPS_regex := std DEPS_regex_macros = rustc syntax std regex DEPS_fmt_macros = std -TOOL_DEPS_compiletest := test getopts native -TOOL_DEPS_rustdoc := rustdoc native -TOOL_DEPS_rustc := rustc_trans native +TOOL_DEPS_compiletest := test getopts +TOOL_DEPS_rustdoc := rustdoc +TOOL_DEPS_rustc := rustc_trans TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs TOOL_SOURCE_rustdoc := $(S)src/driver/driver.rs TOOL_SOURCE_rustc := $(S)src/driver/driver.rs diff --git a/src/driver/driver.rs b/src/driver/driver.rs index 632d21d7b9c..224b4f1b5c5 100644 --- a/src/driver/driver.rs +++ b/src/driver/driver.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![no_start] + #[cfg(rustdoc)] extern crate "rustdoc" as this; diff --git a/src/libnative/io/addrinfo.rs b/src/libnative/io/addrinfo.rs deleted file mode 100644 index d40438e4272..00000000000 --- a/src/libnative/io/addrinfo.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use libc::{c_char, c_int}; -use libc; -use std::mem; -use std::ptr::{null, null_mut}; -use std::rt::rtio; -use std::rt::rtio::IoError; - -use super::net; - -pub struct GetAddrInfoRequest; - -impl GetAddrInfoRequest { - pub fn run(host: Option<&str>, servname: Option<&str>, - hint: Option) - -> Result, IoError> - { - assert!(host.is_some() || servname.is_some()); - - let c_host = host.map(|x| x.to_c_str()); - let c_host = c_host.as_ref().map(|x| x.as_ptr()).unwrap_or(null()); - let c_serv = servname.map(|x| x.to_c_str()); - let c_serv = c_serv.as_ref().map(|x| x.as_ptr()).unwrap_or(null()); - - let hint = hint.map(|hint| { - libc::addrinfo { - ai_flags: hint.flags as c_int, - ai_family: hint.family as c_int, - ai_socktype: 0, - ai_protocol: 0, - ai_addrlen: 0, - ai_canonname: null_mut(), - ai_addr: null_mut(), - ai_next: null_mut() - } - }); - - let hint_ptr = hint.as_ref().map_or(null(), |x| { - x as *const libc::addrinfo - }); - let mut res = null_mut(); - - // Make the call - let s = unsafe { - getaddrinfo(c_host, c_serv, hint_ptr, &mut res) - }; - - // Error? - if s != 0 { - return Err(get_error(s)); - } - - // Collect all the results we found - let mut addrs = Vec::new(); - let mut rp = res; - while rp.is_not_null() { - unsafe { - let addr = match net::sockaddr_to_addr(mem::transmute((*rp).ai_addr), - (*rp).ai_addrlen as uint) { - Ok(a) => a, - Err(e) => return Err(e) - }; - addrs.push(rtio::AddrinfoInfo { - address: addr, - family: (*rp).ai_family as uint, - socktype: 0, - protocol: 0, - flags: (*rp).ai_flags as uint - }); - - rp = (*rp).ai_next as *mut libc::addrinfo; - } - } - - unsafe { freeaddrinfo(res); } - - Ok(addrs) - } -} - -extern "system" { - fn getaddrinfo(node: *const c_char, service: *const c_char, - hints: *const libc::addrinfo, - res: *mut *mut libc::addrinfo) -> c_int; - fn freeaddrinfo(res: *mut libc::addrinfo); - #[cfg(not(windows))] - fn gai_strerror(errcode: c_int) -> *const c_char; -} - -#[cfg(windows)] -fn get_error(_: c_int) -> IoError { - net::last_error() -} - -#[cfg(not(windows))] -fn get_error(s: c_int) -> IoError { - use std::c_str::CString; - - let err_str = unsafe { - CString::new(gai_strerror(s), false).as_str().unwrap().to_string() - }; - IoError { - code: s as uint, - extra: 0, - detail: Some(err_str), - } -} diff --git a/src/libnative/io/process.rs b/src/libnative/io/process.rs deleted file mode 100644 index 30c916f3303..00000000000 --- a/src/libnative/io/process.rs +++ /dev/null @@ -1,1240 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use libc::{pid_t, c_void, c_int}; -use libc; -use std::c_str::CString; -use std::io; -use std::mem; -use std::os; -use std::ptr; -use std::rt::rtio::{ProcessConfig, IoResult, IoError}; -use std::rt::rtio; - -use super::file; -use super::util; - -#[cfg(windows)] use std::io::fs::PathExtensions; -#[cfg(windows)] use std::string::String; -#[cfg(unix)] use super::c; -#[cfg(unix)] use super::retry; -#[cfg(unix)] use io::helper_thread::Helper; - -#[cfg(unix)] -helper_init!(static HELPER: Helper) - -/** - * A value representing a child process. - * - * The lifetime of this value is linked to the lifetime of the actual - * process - the Process destructor calls self.finish() which waits - * for the process to terminate. - */ -pub struct Process { - /// The unique id of the process (this should never be negative). - pid: pid_t, - - /// A handle to the process - on unix this will always be NULL, but on - /// windows it will be a HANDLE to the process, which will prevent the - /// pid being re-used until the handle is closed. - handle: *mut (), - - /// None until finish() is called. - exit_code: Option, - - /// Manually delivered signal - exit_signal: Option, - - /// Deadline after which wait() will return - deadline: u64, -} - -#[cfg(unix)] -enum Req { - NewChild(libc::pid_t, Sender, u64), -} - -impl Process { - /// Creates a new process using native process-spawning abilities provided - /// by the OS. Operations on this process will be blocking instead of using - /// the runtime for sleeping just this current task. - pub fn spawn(cfg: ProcessConfig) - -> IoResult<(Process, Vec>)> - { - // right now we only handle stdin/stdout/stderr. - if cfg.extra_io.len() > 0 { - return Err(super::unimpl()); - } - - fn get_io(io: rtio::StdioContainer, - ret: &mut Vec>) - -> IoResult> - { - match io { - rtio::Ignored => { ret.push(None); Ok(None) } - rtio::InheritFd(fd) => { - ret.push(None); - Ok(Some(file::FileDesc::new(fd, false))) - } - rtio::CreatePipe(readable, _writable) => { - let (reader, writer) = try!(pipe()); - let (theirs, ours) = if readable { - (reader, writer) - } else { - (writer, reader) - }; - ret.push(Some(ours)); - Ok(Some(theirs)) - } - } - } - - let mut ret_io = Vec::new(); - let res = spawn_process_os(cfg, - try!(get_io(cfg.stdin, &mut ret_io)), - try!(get_io(cfg.stdout, &mut ret_io)), - try!(get_io(cfg.stderr, &mut ret_io))); - - match res { - Ok(res) => { - let p = Process { - pid: res.pid, - handle: res.handle, - exit_code: None, - exit_signal: None, - deadline: 0, - }; - Ok((p, ret_io)) - } - Err(e) => Err(e) - } - } - - pub fn kill(pid: libc::pid_t, signum: int) -> IoResult<()> { - unsafe { killpid(pid, signum) } - } -} - -impl rtio::RtioProcess for Process { - fn id(&self) -> pid_t { self.pid } - - fn set_timeout(&mut self, timeout: Option) { - self.deadline = timeout.map(|i| i + ::io::timer::now()).unwrap_or(0); - } - - fn wait(&mut self) -> IoResult { - match self.exit_code { - Some(code) => Ok(code), - None => { - let code = try!(waitpid(self.pid, self.deadline)); - // On windows, waitpid will never return a signal. If a signal - // was successfully delivered to the process, however, we can - // consider it as having died via a signal. - let code = match self.exit_signal { - None => code, - Some(signal) if cfg!(windows) => rtio::ExitSignal(signal), - Some(..) => code, - }; - self.exit_code = Some(code); - Ok(code) - } - } - } - - fn kill(&mut self, signum: int) -> IoResult<()> { - #[cfg(unix)] use libc::EINVAL as ERROR; - #[cfg(windows)] use libc::ERROR_NOTHING_TO_TERMINATE as ERROR; - - // On Linux (and possibly other unices), a process that has exited will - // continue to accept signals because it is "defunct". The delivery of - // signals will only fail once the child has been reaped. For this - // reason, if the process hasn't exited yet, then we attempt to collect - // their status with WNOHANG. - if self.exit_code.is_none() { - match waitpid_nowait(self.pid) { - Some(code) => { self.exit_code = Some(code); } - None => {} - } - } - - // if the process has finished, and therefore had waitpid called, - // and we kill it, then on unix we might ending up killing a - // newer process that happens to have the same (re-used) id - match self.exit_code { - Some(..) => return Err(IoError { - code: ERROR as uint, - extra: 0, - detail: Some("can't kill an exited process".to_string()), - }), - None => {} - } - - // A successfully delivered signal that isn't 0 (just a poll for being - // alive) is recorded for windows (see wait()) - match unsafe { killpid(self.pid, signum) } { - Ok(()) if signum == 0 => Ok(()), - Ok(()) => { self.exit_signal = Some(signum); Ok(()) } - Err(e) => Err(e), - } - } -} - -impl Drop for Process { - fn drop(&mut self) { - free_handle(self.handle); - } -} - -pub fn pipe() -> IoResult<(file::FileDesc, file::FileDesc)> { - #[cfg(unix)] use libc::EMFILE as ERROR; - #[cfg(windows)] use libc::WSAEMFILE as ERROR; - struct Closer { fd: libc::c_int } - - let os::Pipe { reader, writer } = match unsafe { os::pipe() } { - Ok(p) => p, - Err(io::IoError { detail, .. }) => return Err(IoError { - code: ERROR as uint, - extra: 0, - detail: detail, - }) - }; - let mut reader = Closer { fd: reader }; - let mut writer = Closer { fd: writer }; - - let native_reader = file::FileDesc::new(reader.fd, true); - reader.fd = -1; - let native_writer = file::FileDesc::new(writer.fd, true); - writer.fd = -1; - return Ok((native_reader, native_writer)); - - impl Drop for Closer { - fn drop(&mut self) { - if self.fd != -1 { - let _ = unsafe { libc::close(self.fd) }; - } - } - } -} - -#[cfg(windows)] -unsafe fn killpid(pid: pid_t, signal: int) -> IoResult<()> { - let handle = libc::OpenProcess(libc::PROCESS_TERMINATE | - libc::PROCESS_QUERY_INFORMATION, - libc::FALSE, pid as libc::DWORD); - if handle.is_null() { - return Err(super::last_error()) - } - let ret = match signal { - // test for existence on signal 0 - 0 => { - let mut status = 0; - let ret = libc::GetExitCodeProcess(handle, &mut status); - if ret == 0 { - Err(super::last_error()) - } else if status != libc::STILL_ACTIVE { - Err(IoError { - code: libc::ERROR_NOTHING_TO_TERMINATE as uint, - extra: 0, - detail: None, - }) - } else { - Ok(()) - } - } - 15 | 9 => { // sigterm or sigkill - let ret = libc::TerminateProcess(handle, 1); - super::mkerr_winbool(ret) - } - _ => Err(IoError { - code: libc::ERROR_CALL_NOT_IMPLEMENTED as uint, - extra: 0, - detail: Some("unsupported signal on windows".to_string()), - }) - }; - let _ = libc::CloseHandle(handle); - return ret; -} - -#[cfg(not(windows))] -unsafe fn killpid(pid: pid_t, signal: int) -> IoResult<()> { - let r = libc::funcs::posix88::signal::kill(pid, signal as c_int); - super::mkerr_libc(r) -} - -struct SpawnProcessResult { - pid: pid_t, - handle: *mut (), -} - -#[cfg(windows)] -fn spawn_process_os(cfg: ProcessConfig, - in_fd: Option, - out_fd: Option, - err_fd: Option) - -> IoResult { - use libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO}; - use libc::consts::os::extra::{ - TRUE, FALSE, - STARTF_USESTDHANDLES, - INVALID_HANDLE_VALUE, - DUPLICATE_SAME_ACCESS - }; - use libc::funcs::extra::kernel32::{ - GetCurrentProcess, - DuplicateHandle, - CloseHandle, - CreateProcessW - }; - use libc::funcs::extra::msvcrt::get_osfhandle; - - use std::mem; - use std::iter::Iterator; - use std::str::StrPrelude; - - if cfg.gid.is_some() || cfg.uid.is_some() { - return Err(IoError { - code: libc::ERROR_CALL_NOT_IMPLEMENTED as uint, - extra: 0, - detail: Some("unsupported gid/uid requested on windows".to_string()), - }) - } - - // To have the spawning semantics of unix/windows stay the same, we need to - // read the *child's* PATH if one is provided. See #15149 for more details. - let program = cfg.env.and_then(|env| { - for &(ref key, ref v) in env.iter() { - if b"PATH" != key.as_bytes_no_nul() { continue } - - // Split the value and test each path to see if the program exists. - for path in os::split_paths(v.as_bytes_no_nul()).into_iter() { - let path = path.join(cfg.program.as_bytes_no_nul()) - .with_extension(os::consts::EXE_EXTENSION); - if path.exists() { - return Some(path.to_c_str()) - } - } - break - } - None - }); - - unsafe { - let mut si = zeroed_startupinfo(); - si.cb = mem::size_of::() as DWORD; - si.dwFlags = STARTF_USESTDHANDLES; - - let cur_proc = GetCurrentProcess(); - - // Similarly to unix, we don't actually leave holes for the stdio file - // descriptors, but rather open up /dev/null equivalents. These - // equivalents are drawn from libuv's windows process spawning. - let set_fd = |fd: &Option, slot: &mut HANDLE, - is_stdin: bool| { - match *fd { - None => { - let access = if is_stdin { - libc::FILE_GENERIC_READ - } else { - libc::FILE_GENERIC_WRITE | libc::FILE_READ_ATTRIBUTES - }; - let size = mem::size_of::(); - let mut sa = libc::SECURITY_ATTRIBUTES { - nLength: size as libc::DWORD, - lpSecurityDescriptor: ptr::null_mut(), - bInheritHandle: 1, - }; - let mut filename: Vec = "NUL".utf16_units().collect(); - filename.push(0); - *slot = libc::CreateFileW(filename.as_ptr(), - access, - libc::FILE_SHARE_READ | - libc::FILE_SHARE_WRITE, - &mut sa, - libc::OPEN_EXISTING, - 0, - ptr::null_mut()); - if *slot == INVALID_HANDLE_VALUE { - return Err(super::last_error()) - } - } - Some(ref fd) => { - let orig = get_osfhandle(fd.fd()) as HANDLE; - if orig == INVALID_HANDLE_VALUE { - return Err(super::last_error()) - } - if DuplicateHandle(cur_proc, orig, cur_proc, slot, - 0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE { - return Err(super::last_error()) - } - } - } - Ok(()) - }; - - try!(set_fd(&in_fd, &mut si.hStdInput, true)); - try!(set_fd(&out_fd, &mut si.hStdOutput, false)); - try!(set_fd(&err_fd, &mut si.hStdError, false)); - - let cmd_str = make_command_line(program.as_ref().unwrap_or(cfg.program), - cfg.args); - let mut pi = zeroed_process_information(); - let mut create_err = None; - - // stolen from the libuv code. - let mut flags = libc::CREATE_UNICODE_ENVIRONMENT; - if cfg.detach { - flags |= libc::DETACHED_PROCESS | libc::CREATE_NEW_PROCESS_GROUP; - } - - with_envp(cfg.env, |envp| { - with_dirp(cfg.cwd, |dirp| { - let mut cmd_str: Vec = cmd_str.as_slice().utf16_units().collect(); - cmd_str.push(0); - let created = CreateProcessW(ptr::null(), - cmd_str.as_mut_ptr(), - ptr::null_mut(), - ptr::null_mut(), - TRUE, - flags, envp, dirp, - &mut si, &mut pi); - if created == FALSE { - create_err = Some(super::last_error()); - } - }) - }); - - assert!(CloseHandle(si.hStdInput) != 0); - assert!(CloseHandle(si.hStdOutput) != 0); - assert!(CloseHandle(si.hStdError) != 0); - - match create_err { - Some(err) => return Err(err), - None => {} - } - - // We close the thread handle because we don't care about keeping the - // thread id valid, and we aren't keeping the thread handle around to be - // able to close it later. We don't close the process handle however - // because std::we want the process id to stay valid at least until the - // calling code closes the process handle. - assert!(CloseHandle(pi.hThread) != 0); - - Ok(SpawnProcessResult { - pid: pi.dwProcessId as pid_t, - handle: pi.hProcess as *mut () - }) - } -} - -#[cfg(windows)] -fn zeroed_startupinfo() -> libc::types::os::arch::extra::STARTUPINFO { - libc::types::os::arch::extra::STARTUPINFO { - cb: 0, - lpReserved: ptr::null_mut(), - lpDesktop: ptr::null_mut(), - lpTitle: ptr::null_mut(), - dwX: 0, - dwY: 0, - dwXSize: 0, - dwYSize: 0, - dwXCountChars: 0, - dwYCountCharts: 0, - dwFillAttribute: 0, - dwFlags: 0, - wShowWindow: 0, - cbReserved2: 0, - lpReserved2: ptr::null_mut(), - hStdInput: libc::INVALID_HANDLE_VALUE, - hStdOutput: libc::INVALID_HANDLE_VALUE, - hStdError: libc::INVALID_HANDLE_VALUE, - } -} - -#[cfg(windows)] -fn zeroed_process_information() -> libc::types::os::arch::extra::PROCESS_INFORMATION { - libc::types::os::arch::extra::PROCESS_INFORMATION { - hProcess: ptr::null_mut(), - hThread: ptr::null_mut(), - dwProcessId: 0, - dwThreadId: 0 - } -} - -#[cfg(windows)] -fn make_command_line(prog: &CString, args: &[CString]) -> String { - let mut cmd = String::new(); - append_arg(&mut cmd, prog.as_str() - .expect("expected program name to be utf-8 encoded")); - for arg in args.iter() { - cmd.push(' '); - append_arg(&mut cmd, arg.as_str() - .expect("expected argument to be utf-8 encoded")); - } - return cmd; - - fn append_arg(cmd: &mut String, arg: &str) { - // If an argument has 0 characters then we need to quote it to ensure - // that it actually gets passed through on the command line or otherwise - // it will be dropped entirely when parsed on the other end. - let quote = arg.chars().any(|c| c == ' ' || c == '\t') || arg.len() == 0; - if quote { - cmd.push('"'); - } - let argvec: Vec = arg.chars().collect(); - for i in range(0u, argvec.len()) { - append_char_at(cmd, argvec.as_slice(), i); - } - if quote { - cmd.push('"'); - } - } - - fn append_char_at(cmd: &mut String, arg: &[char], i: uint) { - match arg[i] { - '"' => { - // Escape quotes. - cmd.push_str("\\\""); - } - '\\' => { - if backslash_run_ends_in_quote(arg, i) { - // Double all backslashes that are in runs before quotes. - cmd.push_str("\\\\"); - } else { - // Pass other backslashes through unescaped. - cmd.push('\\'); - } - } - c => { - cmd.push(c); - } - } - } - - fn backslash_run_ends_in_quote(s: &[char], mut i: uint) -> bool { - while i < s.len() && s[i] == '\\' { - i += 1; - } - return i < s.len() && s[i] == '"'; - } -} - -#[cfg(unix)] -fn spawn_process_os(cfg: ProcessConfig, - in_fd: Option, - out_fd: Option, - err_fd: Option) - -> IoResult -{ - use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp}; - use libc::funcs::bsd44::getdtablesize; - use io::c; - - mod rustrt { - extern { - pub fn rust_unset_sigprocmask(); - } - } - - #[cfg(target_os = "macos")] - unsafe fn set_environ(envp: *const c_void) { - extern { fn _NSGetEnviron() -> *mut *const c_void; } - - *_NSGetEnviron() = envp; - } - #[cfg(not(target_os = "macos"))] - unsafe fn set_environ(envp: *const c_void) { - extern { static mut environ: *const c_void; } - environ = envp; - } - - unsafe fn set_cloexec(fd: c_int) { - let ret = c::ioctl(fd, c::FIOCLEX); - assert_eq!(ret, 0); - } - - let dirp = cfg.cwd.map(|c| c.as_ptr()).unwrap_or(ptr::null()); - - let cfg = unsafe { - mem::transmute::>(cfg) - }; - - with_envp(cfg.env, proc(envp) { - with_argv(cfg.program, cfg.args, proc(argv) unsafe { - let (mut input, mut output) = try!(pipe()); - - // We may use this in the child, so perform allocations before the - // fork - let devnull = "/dev/null".to_c_str(); - - set_cloexec(output.fd()); - - let pid = fork(); - if pid < 0 { - return Err(super::last_error()) - } else if pid > 0 { - drop(output); - let mut bytes = [0, ..4]; - return match input.inner_read(bytes) { - Ok(4) => { - let errno = (bytes[0] as i32 << 24) | - (bytes[1] as i32 << 16) | - (bytes[2] as i32 << 8) | - (bytes[3] as i32 << 0); - - Err(IoError { - code: errno as uint, - detail: None, - extra: 0, - }) - } - Err(..) => { - Ok(SpawnProcessResult { - pid: pid, - handle: ptr::null_mut() - }) - } - Ok(..) => panic!("short read on the cloexec pipe"), - }; - } - // And at this point we've reached a special time in the life of the - // child. The child must now be considered hamstrung and unable to - // do anything other than syscalls really. Consider the following - // scenario: - // - // 1. Thread A of process 1 grabs the malloc() mutex - // 2. Thread B of process 1 forks(), creating thread C - // 3. Thread C of process 2 then attempts to malloc() - // 4. The memory of process 2 is the same as the memory of - // process 1, so the mutex is locked. - // - // This situation looks a lot like deadlock, right? It turns out - // that this is what pthread_atfork() takes care of, which is - // presumably implemented across platforms. The first thing that - // threads to *before* forking is to do things like grab the malloc - // mutex, and then after the fork they unlock it. - // - // Despite this information, libnative's spawn has been witnessed to - // deadlock on both OSX and FreeBSD. I'm not entirely sure why, but - // all collected backtraces point at malloc/free traffic in the - // child spawned process. - // - // For this reason, the block of code below should contain 0 - // invocations of either malloc of free (or their related friends). - // - // As an example of not having malloc/free traffic, we don't close - // this file descriptor by dropping the FileDesc (which contains an - // allocation). Instead we just close it manually. This will never - // have the drop glue anyway because this code never returns (the - // child will either exec() or invoke libc::exit) - let _ = libc::close(input.fd()); - - fn fail(output: &mut file::FileDesc) -> ! { - let errno = os::errno(); - let bytes = [ - (errno >> 24) as u8, - (errno >> 16) as u8, - (errno >> 8) as u8, - (errno >> 0) as u8, - ]; - assert!(output.inner_write(bytes).is_ok()); - unsafe { libc::_exit(1) } - } - - rustrt::rust_unset_sigprocmask(); - - // If a stdio file descriptor is set to be ignored (via a -1 file - // descriptor), then we don't actually close it, but rather open - // up /dev/null into that file descriptor. Otherwise, the first file - // descriptor opened up in the child would be numbered as one of the - // stdio file descriptors, which is likely to wreak havoc. - let setup = |src: Option, dst: c_int| { - let src = match src { - None => { - let flags = if dst == libc::STDIN_FILENO { - libc::O_RDONLY - } else { - libc::O_RDWR - }; - libc::open(devnull.as_ptr(), flags, 0) - } - Some(obj) => { - let fd = obj.fd(); - // Leak the memory and the file descriptor. We're in the - // child now an all our resources are going to be - // cleaned up very soon - mem::forget(obj); - fd - } - }; - src != -1 && retry(|| dup2(src, dst)) != -1 - }; - - if !setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) } - if !setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) } - if !setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) } - - // close all other fds - for fd in range(3, getdtablesize()).rev() { - if fd != output.fd() { - let _ = close(fd as c_int); - } - } - - match cfg.gid { - Some(u) => { - if libc::setgid(u as libc::gid_t) != 0 { - fail(&mut output); - } - } - None => {} - } - match cfg.uid { - Some(u) => { - // When dropping privileges from root, the `setgroups` call - // will remove any extraneous groups. If we don't call this, - // then even though our uid has dropped, we may still have - // groups that enable us to do super-user things. This will - // fail if we aren't root, so don't bother checking the - // return value, this is just done as an optimistic - // privilege dropping function. - extern { - fn setgroups(ngroups: libc::c_int, - ptr: *const libc::c_void) -> libc::c_int; - } - let _ = setgroups(0, 0 as *const libc::c_void); - - if libc::setuid(u as libc::uid_t) != 0 { - fail(&mut output); - } - } - None => {} - } - if cfg.detach { - // Don't check the error of setsid because it fails if we're the - // process leader already. We just forked so it shouldn't return - // error, but ignore it anyway. - let _ = libc::setsid(); - } - if !dirp.is_null() && chdir(dirp) == -1 { - fail(&mut output); - } - if !envp.is_null() { - set_environ(envp); - } - let _ = execvp(*argv, argv as *mut _); - fail(&mut output); - }) - }) -} - -#[cfg(unix)] -fn with_argv(prog: &CString, args: &[CString], - cb: proc(*const *const libc::c_char) -> T) -> T { - let mut ptrs: Vec<*const libc::c_char> = Vec::with_capacity(args.len()+1); - - // Convert the CStrings into an array of pointers. Note: the - // lifetime of the various CStrings involved is guaranteed to be - // larger than the lifetime of our invocation of cb, but this is - // technically unsafe as the callback could leak these pointers - // out of our scope. - ptrs.push(prog.as_ptr()); - ptrs.extend(args.iter().map(|tmp| tmp.as_ptr())); - - // Add a terminating null pointer (required by libc). - ptrs.push(ptr::null()); - - cb(ptrs.as_ptr()) -} - -#[cfg(unix)] -fn with_envp(env: Option<&[(&CString, &CString)]>, - cb: proc(*const c_void) -> T) -> T { - // On posixy systems we can pass a char** for envp, which is a - // null-terminated array of "k=v\0" strings. Since we must create - // these strings locally, yet expose a raw pointer to them, we - // create a temporary vector to own the CStrings that outlives the - // call to cb. - match env { - Some(env) => { - let mut tmps = Vec::with_capacity(env.len()); - - for pair in env.iter() { - let mut kv = Vec::new(); - kv.push_all(pair.ref0().as_bytes_no_nul()); - kv.push('=' as u8); - kv.push_all(pair.ref1().as_bytes()); // includes terminal \0 - tmps.push(kv); - } - - // As with `with_argv`, this is unsafe, since cb could leak the pointers. - let mut ptrs: Vec<*const libc::c_char> = - tmps.iter() - .map(|tmp| tmp.as_ptr() as *const libc::c_char) - .collect(); - ptrs.push(ptr::null()); - - cb(ptrs.as_ptr() as *const c_void) - } - _ => cb(ptr::null()) - } -} - -#[cfg(windows)] -fn with_envp(env: Option<&[(&CString, &CString)]>, cb: |*mut c_void| -> T) -> T { - // On Windows we pass an "environment block" which is not a char**, but - // rather a concatenation of null-terminated k=v\0 sequences, with a final - // \0 to terminate. - match env { - Some(env) => { - let mut blk = Vec::new(); - - for pair in env.iter() { - let kv = format!("{}={}", - pair.ref0().as_str().unwrap(), - pair.ref1().as_str().unwrap()); - blk.extend(kv.as_slice().utf16_units()); - blk.push(0); - } - - blk.push(0); - - cb(blk.as_mut_ptr() as *mut c_void) - } - _ => cb(ptr::null_mut()) - } -} - -#[cfg(windows)] -fn with_dirp(d: Option<&CString>, cb: |*const u16| -> T) -> T { - match d { - Some(dir) => { - let dir_str = dir.as_str() - .expect("expected workingdirectory to be utf-8 encoded"); - let mut dir_str: Vec = dir_str.utf16_units().collect(); - dir_str.push(0); - cb(dir_str.as_ptr()) - }, - None => cb(ptr::null()) - } -} - -#[cfg(windows)] -fn free_handle(handle: *mut ()) { - assert!(unsafe { - libc::CloseHandle(mem::transmute(handle)) != 0 - }) -} - -#[cfg(unix)] -fn free_handle(_handle: *mut ()) { - // unix has no process handle object, just a pid -} - -#[cfg(unix)] -fn translate_status(status: c_int) -> rtio::ProcessExit { - #![allow(non_snake_case)] - #[cfg(any(target_os = "linux", target_os = "android"))] - mod imp { - pub fn WIFEXITED(status: i32) -> bool { (status & 0xff) == 0 } - pub fn WEXITSTATUS(status: i32) -> i32 { (status >> 8) & 0xff } - pub fn WTERMSIG(status: i32) -> i32 { status & 0x7f } - } - - #[cfg(any(target_os = "macos", - target_os = "ios", - target_os = "freebsd", - target_os = "dragonfly"))] - mod imp { - pub fn WIFEXITED(status: i32) -> bool { (status & 0x7f) == 0 } - pub fn WEXITSTATUS(status: i32) -> i32 { status >> 8 } - pub fn WTERMSIG(status: i32) -> i32 { status & 0o177 } - } - - if imp::WIFEXITED(status) { - rtio::ExitStatus(imp::WEXITSTATUS(status) as int) - } else { - rtio::ExitSignal(imp::WTERMSIG(status) as int) - } -} - -/** - * Waits for a process to exit and returns the exit code, failing - * if there is no process with the specified id. - * - * Note that this is private to avoid race conditions on unix where if - * a user calls waitpid(some_process.get_id()) then some_process.finish() - * and some_process.destroy() and some_process.finalize() will then either - * operate on a none-existent process or, even worse, on a newer process - * with the same id. - */ -#[cfg(windows)] -fn waitpid(pid: pid_t, deadline: u64) -> IoResult { - use libc::types::os::arch::extra::DWORD; - use libc::consts::os::extra::{ - SYNCHRONIZE, - PROCESS_QUERY_INFORMATION, - FALSE, - STILL_ACTIVE, - INFINITE, - WAIT_TIMEOUT, - WAIT_OBJECT_0, - }; - use libc::funcs::extra::kernel32::{ - OpenProcess, - GetExitCodeProcess, - CloseHandle, - WaitForSingleObject, - }; - - unsafe { - let process = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION, - FALSE, - pid as DWORD); - if process.is_null() { - return Err(super::last_error()) - } - - loop { - let mut status = 0; - if GetExitCodeProcess(process, &mut status) == FALSE { - let err = Err(super::last_error()); - assert!(CloseHandle(process) != 0); - return err; - } - if status != STILL_ACTIVE { - assert!(CloseHandle(process) != 0); - return Ok(rtio::ExitStatus(status as int)); - } - let interval = if deadline == 0 { - INFINITE - } else { - let now = ::io::timer::now(); - if deadline < now {0} else {(deadline - now) as u32} - }; - match WaitForSingleObject(process, interval) { - WAIT_OBJECT_0 => {} - WAIT_TIMEOUT => { - assert!(CloseHandle(process) != 0); - return Err(util::timeout("process wait timed out")) - } - _ => { - let err = Err(super::last_error()); - assert!(CloseHandle(process) != 0); - return err - } - } - } - } -} - -#[cfg(unix)] -fn waitpid(pid: pid_t, deadline: u64) -> IoResult { - use std::cmp; - use std::comm; - - static mut WRITE_FD: libc::c_int = 0; - - let mut status = 0 as c_int; - if deadline == 0 { - return match retry(|| unsafe { c::waitpid(pid, &mut status, 0) }) { - -1 => panic!("unknown waitpid error: {}", super::last_error().code), - _ => Ok(translate_status(status)), - } - } - - // On unix, wait() and its friends have no timeout parameters, so there is - // no way to time out a thread in wait(). From some googling and some - // thinking, it appears that there are a few ways to handle timeouts in - // wait(), but the only real reasonable one for a multi-threaded program is - // to listen for SIGCHLD. - // - // With this in mind, the waiting mechanism with a timeout barely uses - // waitpid() at all. There are a few times that waitpid() is invoked with - // WNOHANG, but otherwise all the necessary blocking is done by waiting for - // a SIGCHLD to arrive (and that blocking has a timeout). Note, however, - // that waitpid() is still used to actually reap the child. - // - // Signal handling is super tricky in general, and this is no exception. Due - // to the async nature of SIGCHLD, we use the self-pipe trick to transmit - // data out of the signal handler to the rest of the application. The first - // idea would be to have each thread waiting with a timeout to read this - // output file descriptor, but a write() is akin to a signal(), not a - // broadcast(), so it would only wake up one thread, and possibly the wrong - // thread. Hence a helper thread is used. - // - // The helper thread here is responsible for farming requests for a - // waitpid() with a timeout, and then processing all of the wait requests. - // By guaranteeing that only this helper thread is reading half of the - // self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread - // is also responsible for select() to wait for incoming messages or - // incoming SIGCHLD messages, along with passing an appropriate timeout to - // select() to wake things up as necessary. - // - // The ordering of the following statements is also very purposeful. First, - // we must be guaranteed that the helper thread is booted and available to - // receive SIGCHLD signals, and then we must also ensure that we do a - // nonblocking waitpid() at least once before we go ask the sigchld helper. - // This prevents the race where the child exits, we boot the helper, and - // then we ask for the child's exit status (never seeing a sigchld). - // - // The actual communication between the helper thread and this thread is - // quite simple, just a channel moving data around. - - HELPER.boot(register_sigchld, waitpid_helper); - - match waitpid_nowait(pid) { - Some(ret) => return Ok(ret), - None => {} - } - - let (tx, rx) = channel(); - HELPER.send(NewChild(pid, tx, deadline)); - return match rx.recv_opt() { - Ok(e) => Ok(e), - Err(()) => Err(util::timeout("wait timed out")), - }; - - // Register a new SIGCHLD handler, returning the reading half of the - // self-pipe plus the old handler registered (return value of sigaction). - // - // Be sure to set up the self-pipe first because as soon as we register a - // handler we're going to start receiving signals. - fn register_sigchld() -> (libc::c_int, c::sigaction) { - unsafe { - let mut pipes = [0, ..2]; - assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0); - util::set_nonblocking(pipes[0], true).ok().unwrap(); - util::set_nonblocking(pipes[1], true).ok().unwrap(); - WRITE_FD = pipes[1]; - - let mut old: c::sigaction = mem::zeroed(); - let mut new: c::sigaction = mem::zeroed(); - new.sa_handler = sigchld_handler; - new.sa_flags = c::SA_NOCLDSTOP; - assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0); - (pipes[0], old) - } - } - - // Helper thread for processing SIGCHLD messages - fn waitpid_helper(input: libc::c_int, - messages: Receiver, - (read_fd, old): (libc::c_int, c::sigaction)) { - util::set_nonblocking(input, true).ok().unwrap(); - let mut set: c::fd_set = unsafe { mem::zeroed() }; - let mut tv: libc::timeval; - let mut active = Vec::<(libc::pid_t, Sender, u64)>::new(); - let max = cmp::max(input, read_fd) + 1; - - 'outer: loop { - // Figure out the timeout of our syscall-to-happen. If we're waiting - // for some processes, then they'll have a timeout, otherwise we - // wait indefinitely for a message to arrive. - // - // FIXME: sure would be nice to not have to scan the entire array - let min = active.iter().map(|a| *a.ref2()).enumerate().min_by(|p| { - p.val1() - }); - let (p, idx) = match min { - Some((idx, deadline)) => { - let now = ::io::timer::now(); - let ms = if now < deadline {deadline - now} else {0}; - tv = util::ms_to_timeval(ms); - (&mut tv as *mut _, idx) - } - None => (ptr::null_mut(), -1), - }; - - // Wait for something to happen - c::fd_set(&mut set, input); - c::fd_set(&mut set, read_fd); - match unsafe { c::select(max, &mut set, ptr::null_mut(), - ptr::null_mut(), p) } { - // interrupted, retry - -1 if os::errno() == libc::EINTR as int => continue, - - // We read something, break out and process - 1 | 2 => {} - - // Timeout, the pending request is removed - 0 => { - drop(active.remove(idx)); - continue - } - - n => panic!("error in select {} ({})", os::errno(), n), - } - - // Process any pending messages - if drain(input) { - loop { - match messages.try_recv() { - Ok(NewChild(pid, tx, deadline)) => { - active.push((pid, tx, deadline)); - } - Err(comm::Disconnected) => { - assert!(active.len() == 0); - break 'outer; - } - Err(comm::Empty) => break, - } - } - } - - // If a child exited (somehow received SIGCHLD), then poll all - // children to see if any of them exited. - // - // We also attempt to be responsible netizens when dealing with - // SIGCHLD by invoking any previous SIGCHLD handler instead of just - // ignoring any previous SIGCHLD handler. Note that we don't provide - // a 1:1 mapping of our handler invocations to the previous handler - // invocations because we drain the `read_fd` entirely. This is - // probably OK because the kernel is already allowed to coalesce - // simultaneous signals, we're just doing some extra coalescing. - // - // Another point of note is that this likely runs the signal handler - // on a different thread than the one that received the signal. I - // *think* this is ok at this time. - // - // The main reason for doing this is to allow stdtest to run native - // tests as well. Both libgreen and libnative are running around - // with process timeouts, but libgreen should get there first - // (currently libuv doesn't handle old signal handlers). - if drain(read_fd) { - let i: uint = unsafe { mem::transmute(old.sa_handler) }; - if i != 0 { - assert!(old.sa_flags & c::SA_SIGINFO == 0); - (old.sa_handler)(c::SIGCHLD); - } - - // FIXME: sure would be nice to not have to scan the entire - // array... - active.retain(|&(pid, ref tx, _)| { - match waitpid_nowait(pid) { - Some(msg) => { tx.send(msg); false } - None => true, - } - }); - } - } - - // Once this helper thread is done, we re-register the old sigchld - // handler and close our intermediate file descriptors. - unsafe { - assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::null_mut()), 0); - let _ = libc::close(read_fd); - let _ = libc::close(WRITE_FD); - WRITE_FD = -1; - } - } - - // Drain all pending data from the file descriptor, returning if any data - // could be drained. This requires that the file descriptor is in - // nonblocking mode. - fn drain(fd: libc::c_int) -> bool { - let mut ret = false; - loop { - let mut buf = [0u8, ..1]; - match unsafe { - libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void, - buf.len() as libc::size_t) - } { - n if n > 0 => { ret = true; } - 0 => return true, - -1 if util::wouldblock() => return ret, - n => panic!("bad read {} ({})", os::last_os_error(), n), - } - } - } - - // Signal handler for SIGCHLD signals, must be async-signal-safe! - // - // This function will write to the writing half of the "self pipe" to wake - // up the helper thread if it's waiting. Note that this write must be - // nonblocking because if it blocks and the reader is the thread we - // interrupted, then we'll deadlock. - // - // When writing, if the write returns EWOULDBLOCK then we choose to ignore - // it. At that point we're guaranteed that there's something in the pipe - // which will wake up the other end at some point, so we just allow this - // signal to be coalesced with the pending signals on the pipe. - extern fn sigchld_handler(_signum: libc::c_int) { - let msg = 1i; - match unsafe { - libc::write(WRITE_FD, &msg as *const _ as *const libc::c_void, 1) - } { - 1 => {} - -1 if util::wouldblock() => {} // see above comments - n => panic!("bad error on write fd: {} {}", n, os::errno()), - } - } -} - -fn waitpid_nowait(pid: pid_t) -> Option { - return waitpid_os(pid); - - // This code path isn't necessary on windows - #[cfg(windows)] - fn waitpid_os(_pid: pid_t) -> Option { None } - - #[cfg(unix)] - fn waitpid_os(pid: pid_t) -> Option { - let mut status = 0 as c_int; - match retry(|| unsafe { - c::waitpid(pid, &mut status, c::WNOHANG) - }) { - n if n == pid => Some(translate_status(status)), - 0 => None, - n => panic!("unknown waitpid error `{}`: {}", n, - super::last_error().code), - } - } -} - -#[cfg(test)] -mod tests { - - #[test] #[cfg(windows)] - fn test_make_command_line() { - use std::str; - use std::c_str::CString; - use super::make_command_line; - - fn test_wrapper(prog: &str, args: &[&str]) -> String { - make_command_line(&prog.to_c_str(), - args.iter() - .map(|a| a.to_c_str()) - .collect::>() - .as_slice()) - } - - assert_eq!( - test_wrapper("prog", ["aaa", "bbb", "ccc"]), - "prog aaa bbb ccc".to_string() - ); - - assert_eq!( - test_wrapper("C:\\Program Files\\blah\\blah.exe", ["aaa"]), - "\"C:\\Program Files\\blah\\blah.exe\" aaa".to_string() - ); - assert_eq!( - test_wrapper("C:\\Program Files\\test", ["aa\"bb"]), - "\"C:\\Program Files\\test\" aa\\\"bb".to_string() - ); - assert_eq!( - test_wrapper("echo", ["a b c"]), - "echo \"a b c\"".to_string() - ); - assert_eq!( - test_wrapper("\u03c0\u042f\u97f3\u00e6\u221e", []), - "\u03c0\u042f\u97f3\u00e6\u221e".to_string() - ); - } -} diff --git a/src/libnative/lib.rs b/src/libnative/lib.rs deleted file mode 100644 index ea1136dfe3c..00000000000 --- a/src/libnative/lib.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The native I/O and threading crate -//! -//! This crate contains an implementation of 1:1 scheduling for a "native" -//! runtime. In addition, all I/O provided by this crate is the thread blocking -//! version of I/O. -//! -//! # Starting with libnative -//! -//! ```rust -//! extern crate native; -//! -//! #[start] -//! fn start(argc: int, argv: *const *const u8) -> int { -//! native::start(argc, argv, main) -//! } -//! -//! fn main() { -//! // this code is running on the main OS thread -//! } -//! ``` -//! -//! # Force spawning a native task -//! -//! ```rust -//! extern crate native; -//! -//! use std::task::TaskBuilder; -//! use native::NativeTaskBuilder; -//! -//! fn main() { -//! // We're not sure whether this main function is run in 1:1 or M:N mode. -//! -//! TaskBuilder::new().native().spawn(proc() { -//! // this code is guaranteed to be run on a native thread -//! }); -//! } -//! ``` - -#![crate_name = "native"] -#![experimental] -#![license = "MIT/ASL2"] -#![crate_type = "rlib"] -#![crate_type = "dylib"] -#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "http://www.rust-lang.org/favicon.ico", - html_root_url = "http://doc.rust-lang.org/nightly/")] - -#![deny(unused_results, unused_must_use)] -#![allow(non_camel_case_types)] -#![allow(unknown_features)] -#![feature(default_type_params, lang_items, slicing_syntax, globs)] - -// NB this crate explicitly does *not* allow glob imports, please seriously -// consider whether they're needed before adding that feature here (the -// answer is that you don't need them) -#![feature(macro_rules, unsafe_destructor, default_type_params)] - -extern crate alloc; -extern crate libc; - -use std::os; -use std::rt; -use std::str; - -pub use task::NativeTaskBuilder; - -pub mod task; - -#[cfg(any(windows, android))] -static OS_DEFAULT_STACK_ESTIMATE: uint = 1 << 20; -#[cfg(all(unix, not(android)))] -static OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20); - -#[lang = "start"] -#[cfg(not(test))] -pub fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int { - use std::mem; - start(argc, argv, proc() { - let main: extern "Rust" fn() = unsafe { mem::transmute(main) }; - main(); - }) -} - -/// Executes the given procedure after initializing the runtime with the given -/// argc/argv. -/// -/// This procedure is guaranteed to run on the thread calling this function, but -/// the stack bounds for this rust task will *not* be set. Care must be taken -/// for this function to not overflow its stack. -/// -/// This function will only return once *all* native threads in the system have -/// exited. -pub fn start(argc: int, argv: *const *const u8, main: proc()) -> int { - let something_around_the_top_of_the_stack = 1; - let addr = &something_around_the_top_of_the_stack as *const int; - let my_stack_top = addr as uint; - - // FIXME #11359 we just assume that this thread has a stack of a - // certain size, and estimate that there's at most 20KB of stack - // frames above our current position. - let my_stack_bottom = my_stack_top + 20000 - OS_DEFAULT_STACK_ESTIMATE; - - // When using libgreen, one of the first things that we do is to turn off - // the SIGPIPE signal (set it to ignore). By default, some platforms will - // send a *signal* when a EPIPE error would otherwise be delivered. This - // runtime doesn't install a SIGPIPE handler, causing it to kill the - // program, which isn't exactly what we want! - // - // Hence, we set SIGPIPE to ignore when the program starts up in order to - // prevent this problem. - #[cfg(windows)] fn ignore_sigpipe() {} - #[cfg(unix)] fn ignore_sigpipe() { - use libc; - use libc::funcs::posix01::signal::signal; - unsafe { - assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != -1); - } - } - ignore_sigpipe(); - - rt::init(argc, argv); - let mut exit_code = None; - let mut main = Some(main); - let mut task = task::new((my_stack_bottom, my_stack_top), - rt::thread::main_guard_page()); - task.name = Some(str::Slice("
")); - drop(task.run(|| { - unsafe { - rt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top); - } - exit_code = Some(run(main.take().unwrap())); - }).destroy()); - unsafe { rt::cleanup(); } - // If the exit code wasn't set, then the task block must have panicked. - return exit_code.unwrap_or(rt::DEFAULT_ERROR_CODE); -} - -/// Executes a procedure on the current thread in a Rust task context. -/// -/// This function has all of the same details as `start` except for a different -/// number of arguments. -pub fn run(main: proc()) -> int { - main(); - os::get_exit_status() -} diff --git a/src/libnative/task.rs b/src/libnative/task.rs deleted file mode 100644 index 02fb5b31c0d..00000000000 --- a/src/libnative/task.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Tasks implemented on top of OS threads -//! -//! This module contains the implementation of the 1:1 threading module required -//! by rust tasks. This implements the necessary API traits laid out by std::rt -//! in order to spawn new tasks and deschedule the current task. - -use std::any::Any; -use std::mem; -use std::rt::bookkeeping; -use std::rt::local::Local; -use std::rt::mutex::NativeMutex; -use std::rt::stack; -use std::rt::task::{Task, BlockedTask, TaskOpts}; -use std::rt::thread::Thread; -use std::rt; - -#[cfg(test)] -mod tests { - use std::rt::local::Local; - use std::rt::task::{Task, TaskOpts}; - use std::task; - use std::task::{TaskBuilder, Spawner}; - - use super::{Ops, NativeTaskBuilder, NativeSpawner}; - - #[test] - fn smoke() { - let (tx, rx) = channel(); - spawn(proc() { - tx.send(()); - }); - rx.recv(); - } - - #[test] - fn smoke_panic() { - let (tx, rx) = channel::<()>(); - spawn(proc() { - let _tx = tx; - panic!() - }); - assert_eq!(rx.recv_opt(), Err(())); - } - - #[test] - fn smoke_opts() { - let mut opts = TaskOpts::new(); - opts.name = Some("test".into_maybe_owned()); - opts.stack_size = Some(20 * 4096); - let (tx, rx) = channel(); - opts.on_exit = Some(proc(r) tx.send(r)); - NativeSpawner.spawn(opts, proc() {}); - assert!(rx.recv().is_ok()); - } - - #[test] - fn smoke_opts_panic() { - let mut opts = TaskOpts::new(); - let (tx, rx) = channel(); - opts.on_exit = Some(proc(r) tx.send(r)); - NativeSpawner.spawn(opts, proc() { panic!() }); - assert!(rx.recv().is_err()); - } - - #[test] - fn yield_test() { - let (tx, rx) = channel(); - spawn(proc() { - for _ in range(0u, 10) { task::deschedule(); } - tx.send(()); - }); - rx.recv(); - } - - #[test] - fn spawn_children() { - let (tx1, rx) = channel(); - spawn(proc() { - let (tx2, rx) = channel(); - spawn(proc() { - let (tx3, rx) = channel(); - spawn(proc() { - tx3.send(()); - }); - rx.recv(); - tx2.send(()); - }); - rx.recv(); - tx1.send(()); - }); - rx.recv(); - } - - #[test] - fn spawn_inherits() { - let (tx, rx) = channel(); - TaskBuilder::new().spawner(NativeSpawner).spawn(proc() { - spawn(proc() { - let mut task: Box = Local::take(); - match task.maybe_take_runtime::() { - Some(ops) => { - task.put_runtime(ops); - } - None => panic!(), - } - Local::put(task); - tx.send(()); - }); - }); - rx.recv(); - } - - #[test] - fn test_native_builder() { - let res = TaskBuilder::new().native().try(proc() { - "Success!".to_string() - }); - assert_eq!(res.ok().unwrap(), "Success!".to_string()); - } -} diff --git a/src/librustc_trans/driver/driver.rs b/src/librustc_trans/driver/driver.rs index 98cf779fcd2..b3b68d0c22b 100644 --- a/src/librustc_trans/driver/driver.rs +++ b/src/librustc_trans/driver/driver.rs @@ -198,10 +198,6 @@ pub fn phase_2_configure_and_expand(sess: &Session, *sess.features.borrow_mut() = features; }); - let any_exe = sess.crate_types.borrow().iter().any(|ty| { - *ty == config::CrateTypeExecutable - }); - // strip before expansion to allow macros to depend on // configuration variables e.g/ in // @@ -215,8 +211,7 @@ pub fn phase_2_configure_and_expand(sess: &Session, krate = time(time_passes, "crate injection", krate, |krate| syntax::std_inject::maybe_inject_crates_ref(krate, - sess.opts.alt_std_name.clone(), - any_exe)); + sess.opts.alt_std_name.clone())); let mut addl_plugins = Some(addl_plugins); let Plugins { macros, registrars } diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 7f2a4c7e365..70b30997e18 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -248,9 +248,7 @@ pub mod fmt; #[path = "sys/common/mod.rs"] mod sys_common; -// FIXME #7809: This shouldn't be pub, and it should be reexported under 'unstable' -// but name resolution doesn't work without it being pub. -pub mod rt; +mod rt; mod failure; // A curious inner-module that's not exported that contains the binding diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 8701fadf65c..322df17f4f1 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -58,6 +58,7 @@ Several modules in `core` are clients of `rt`: use failure; use rustrt; +use startup; // Reexport some of our utilities which are expected by other crates. pub use self::util::{default_sched_threads, min_stack, running_on_valgrind}; @@ -86,6 +87,81 @@ pub fn init(argc: int, argv: *const *const u8) { unsafe { unwind::register(failure::on_fail); } } +#[cfg(any(windows, android))] +static OS_DEFAULT_STACK_ESTIMATE: uint = 1 << 20; +#[cfg(all(unix, not(android)))] +static OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20); + +#[cfg(not(test))] +#[lang = "start"] +fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int { + use std::mem; + start(argc, argv, proc() { + let main: extern "Rust" fn() = unsafe { mem::transmute(main) }; + main(); + }) +} + +/// Executes the given procedure after initializing the runtime with the given +/// argc/argv. +/// +/// This procedure is guaranteed to run on the thread calling this function, but +/// the stack bounds for this rust task will *not* be set. Care must be taken +/// for this function to not overflow its stack. +/// +/// This function will only return once *all* native threads in the system have +/// exited. +pub fn start(argc: int, argv: *const *const u8, main: proc()) -> int { + use prelude::*; + use rt; + use rustrt::task::Task; + use str; + + let something_around_the_top_of_the_stack = 1; + let addr = &something_around_the_top_of_the_stack as *const int; + let my_stack_top = addr as uint; + + // FIXME #11359 we just assume that this thread has a stack of a + // certain size, and estimate that there's at most 20KB of stack + // frames above our current position. + let my_stack_bottom = my_stack_top + 20000 - OS_DEFAULT_STACK_ESTIMATE; + + // When using libgreen, one of the first things that we do is to turn off + // the SIGPIPE signal (set it to ignore). By default, some platforms will + // send a *signal* when a EPIPE error would otherwise be delivered. This + // runtime doesn't install a SIGPIPE handler, causing it to kill the + // program, which isn't exactly what we want! + // + // Hence, we set SIGPIPE to ignore when the program starts up in order to + // prevent this problem. + #[cfg(windows)] fn ignore_sigpipe() {} + #[cfg(unix)] fn ignore_sigpipe() { + use libc; + use libc::funcs::posix01::signal::signal; + unsafe { + assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != -1); + } + } + ignore_sigpipe(); + + init(argc, argv); + let mut exit_code = None; + let mut main = Some(main); + let mut task = task::new((my_stack_bottom, my_stack_top), + rt::thread::main_guard_page()); + task.name = Some(str::Slice("
")); + drop(task.run(|| { + unsafe { + rt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top); + } + (main.take().unwrap())(); + exit_code = Some(os::get_exit_status()); + }).destroy()); + unsafe { rt::cleanup(); } + // If the exit code wasn't set, then the task block must have panicked. + return exit_code.unwrap_or(rt::DEFAULT_ERROR_CODE); +} + /// One-time runtime cleanup. /// /// This function is unsafe because it performs no checks to ensure that the diff --git a/src/libsyntax/std_inject.rs b/src/libsyntax/std_inject.rs index 6a4ab365a50..e98be046586 100644 --- a/src/libsyntax/std_inject.rs +++ b/src/libsyntax/std_inject.rs @@ -22,10 +22,10 @@ use util::small_vector::SmallVector; use std::mem; -pub fn maybe_inject_crates_ref(krate: ast::Crate, alt_std_name: Option, any_exe: bool) +pub fn maybe_inject_crates_ref(krate: ast::Crate, alt_std_name: Option) -> ast::Crate { if use_std(&krate) { - inject_crates_ref(krate, alt_std_name, any_exe) + inject_crates_ref(krate, alt_std_name) } else { krate } @@ -43,17 +43,12 @@ fn use_std(krate: &ast::Crate) -> bool { !attr::contains_name(krate.attrs.as_slice(), "no_std") } -fn use_start(krate: &ast::Crate) -> bool { - !attr::contains_name(krate.attrs.as_slice(), "no_start") -} - fn no_prelude(attrs: &[ast::Attribute]) -> bool { attr::contains_name(attrs, "no_implicit_prelude") } struct StandardLibraryInjector<'a> { alt_std_name: Option, - any_exe: bool, } impl<'a> fold::Folder for StandardLibraryInjector<'a> { @@ -80,23 +75,6 @@ impl<'a> fold::Folder for StandardLibraryInjector<'a> { span: DUMMY_SP }); - if use_start(&krate) && self.any_exe { - let visible_rt_name = "rt"; - let actual_rt_name = "native"; - // Gensym the ident so it can't be named - let visible_rt_name = token::gensym_ident(visible_rt_name); - let actual_rt_name = token::intern_and_get_ident(actual_rt_name); - - vis.push(ast::ViewItem { - node: ast::ViewItemExternCrate(visible_rt_name, - Some((actual_rt_name, ast::CookedStr)), - ast::DUMMY_NODE_ID), - attrs: Vec::new(), - vis: ast::Inherited, - span: DUMMY_SP - }); - } - // `extern crate` must be precede `use` items mem::swap(&mut vis, &mut krate.module.view_items); krate.module.view_items.extend(vis.into_iter()); @@ -118,12 +96,9 @@ impl<'a> fold::Folder for StandardLibraryInjector<'a> { } } -fn inject_crates_ref(krate: ast::Crate, - alt_std_name: Option, - any_exe: bool) -> ast::Crate { +fn inject_crates_ref(krate: ast::Crate, alt_std_name: Option) -> ast::Crate { let mut fold = StandardLibraryInjector { alt_std_name: alt_std_name, - any_exe: any_exe, }; fold.fold_crate(krate) } From 91a2c0d51241677d71b8c0abc80535e580fe3939 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 13:56:15 -0800 Subject: [PATCH 03/10] Remove libgreen With runtime removal complete, there is no longer any reason to provide libgreen. [breaking-change] --- src/libgreen/basic.rs | 259 ------ src/libgreen/context.rs | 325 ------- src/libgreen/coroutine.rs | 44 - src/libgreen/lib.rs | 567 ------------ src/libgreen/macros.rs | 118 --- src/libgreen/message_queue.rs | 67 -- src/libgreen/sched.rs | 1523 --------------------------------- src/libgreen/simple.rs | 96 --- src/libgreen/sleeper_list.rs | 46 - src/libgreen/stack.rs | 215 ----- src/libgreen/task.rs | 602 ------------- 11 files changed, 3862 deletions(-) delete mode 100644 src/libgreen/basic.rs delete mode 100644 src/libgreen/context.rs delete mode 100644 src/libgreen/coroutine.rs delete mode 100644 src/libgreen/lib.rs delete mode 100644 src/libgreen/macros.rs delete mode 100644 src/libgreen/message_queue.rs delete mode 100644 src/libgreen/sched.rs delete mode 100644 src/libgreen/simple.rs delete mode 100644 src/libgreen/sleeper_list.rs delete mode 100644 src/libgreen/stack.rs delete mode 100644 src/libgreen/task.rs diff --git a/src/libgreen/basic.rs b/src/libgreen/basic.rs deleted file mode 100644 index aa933f182e5..00000000000 --- a/src/libgreen/basic.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This is a basic event loop implementation not meant for any "real purposes" -//! other than testing the scheduler and proving that it's possible to have a -//! pluggable event loop. -//! -//! This implementation is also used as the fallback implementation of an event -//! loop if no other one is provided (and M:N scheduling is desired). -use self::Message::*; - -use alloc::arc::Arc; -use std::sync::atomic; -use std::mem; -use std::rt::rtio::{EventLoop, RemoteCallback}; -use std::rt::rtio::{PausableIdleCallback, Callback}; -use std::rt::exclusive::Exclusive; - -/// This is the only exported function from this module. -pub fn event_loop() -> Box { - box BasicLoop::new() as Box -} - -struct BasicLoop { - work: Vec, // pending work - remotes: Vec<(uint, Box)>, - next_remote: uint, - messages: Arc>>, - idle: Option>, - idle_active: Option>, -} - -enum Message { RunRemote(uint), RemoveRemote(uint) } - -impl BasicLoop { - fn new() -> BasicLoop { - BasicLoop { - work: vec![], - idle: None, - idle_active: None, - next_remote: 0, - remotes: vec![], - messages: Arc::new(Exclusive::new(Vec::new())), - } - } - - /// Process everything in the work queue (continually) - fn work(&mut self) { - while self.work.len() > 0 { - for work in mem::replace(&mut self.work, vec![]).into_iter() { - work(); - } - } - } - - fn remote_work(&mut self) { - let messages = unsafe { - mem::replace(&mut *self.messages.lock(), Vec::new()) - }; - for message in messages.into_iter() { - self.message(message); - } - } - - fn message(&mut self, message: Message) { - match message { - RunRemote(i) => { - match self.remotes.iter_mut().find(|& &(id, _)| id == i) { - Some(&(_, ref mut f)) => f.call(), - None => panic!("bad remote: {}", i), - } - } - RemoveRemote(i) => { - match self.remotes.iter().position(|&(id, _)| id == i) { - Some(i) => { self.remotes.remove(i).unwrap(); } - None => panic!("bad remote: {}", i), - } - } - } - } - - /// Run the idle callback if one is registered - fn idle(&mut self) { - match self.idle { - Some(ref mut idle) => { - if self.idle_active.as_ref().unwrap().load(atomic::SeqCst) { - idle.call(); - } - } - None => {} - } - } - - fn has_idle(&self) -> bool { - self.idle.is_some() && self.idle_active.as_ref().unwrap().load(atomic::SeqCst) - } -} - -impl EventLoop for BasicLoop { - fn run(&mut self) { - // Not exactly efficient, but it gets the job done. - while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() { - - self.work(); - self.remote_work(); - - if self.has_idle() { - self.idle(); - continue - } - - unsafe { - let messages = self.messages.lock(); - // We block here if we have no messages to process and we may - // receive a message at a later date - if self.remotes.len() > 0 && messages.len() == 0 && - self.work.len() == 0 { - messages.wait() - } - } - } - } - - fn callback(&mut self, f: proc():Send) { - self.work.push(f); - } - - // FIXME: Seems like a really weird requirement to have an event loop provide. - fn pausable_idle_callback(&mut self, cb: Box) - -> Box { - rtassert!(self.idle.is_none()); - self.idle = Some(cb); - let a = Arc::new(atomic::AtomicBool::new(true)); - self.idle_active = Some(a.clone()); - box BasicPausable { active: a } as Box - } - - fn remote_callback(&mut self, f: Box) - -> Box { - let id = self.next_remote; - self.next_remote += 1; - self.remotes.push((id, f)); - box BasicRemote::new(self.messages.clone(), id) as - Box - } - - fn has_active_io(&self) -> bool { false } -} - -struct BasicRemote { - queue: Arc>>, - id: uint, -} - -impl BasicRemote { - fn new(queue: Arc>>, id: uint) -> BasicRemote { - BasicRemote { queue: queue, id: id } - } -} - -impl RemoteCallback for BasicRemote { - fn fire(&mut self) { - let mut queue = unsafe { self.queue.lock() }; - queue.push(RunRemote(self.id)); - queue.signal(); - } -} - -impl Drop for BasicRemote { - fn drop(&mut self) { - let mut queue = unsafe { self.queue.lock() }; - queue.push(RemoveRemote(self.id)); - queue.signal(); - } -} - -struct BasicPausable { - active: Arc, -} - -impl PausableIdleCallback for BasicPausable { - fn pause(&mut self) { - self.active.store(false, atomic::SeqCst); - } - fn resume(&mut self) { - self.active.store(true, atomic::SeqCst); - } -} - -impl Drop for BasicPausable { - fn drop(&mut self) { - self.active.store(false, atomic::SeqCst); - } -} - -#[cfg(test)] -mod test { - use std::rt::task::TaskOpts; - - use basic; - use PoolConfig; - use SchedPool; - - fn pool() -> SchedPool { - SchedPool::new(PoolConfig { - threads: 1, - event_loop_factory: basic::event_loop, - }) - } - - fn run(f: proc():Send) { - let mut pool = pool(); - pool.spawn(TaskOpts::new(), f); - pool.shutdown(); - } - - #[test] - fn smoke() { - run(proc() {}); - } - - #[test] - fn some_channels() { - run(proc() { - let (tx, rx) = channel(); - spawn(proc() { - tx.send(()); - }); - rx.recv(); - }); - } - - #[test] - fn multi_thread() { - let mut pool = SchedPool::new(PoolConfig { - threads: 2, - event_loop_factory: basic::event_loop, - }); - - for _ in range(0u, 20) { - pool.spawn(TaskOpts::new(), proc() { - let (tx, rx) = channel(); - spawn(proc() { - tx.send(()); - }); - rx.recv(); - }); - } - - pool.shutdown(); - } -} diff --git a/src/libgreen/context.rs b/src/libgreen/context.rs deleted file mode 100644 index 2d3e85cc833..00000000000 --- a/src/libgreen/context.rs +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use stack::Stack; -use std::uint; -use std::mem::transmute; -use std::rt::stack; -use std::raw; -#[cfg(target_arch = "x86_64")] -use std::simd; -use libc; - -// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing -// SSE regs. It would be marginally better not to do this. In C++ we -// use an attribute on a struct. -// FIXME #7761: It would be nice to define regs as `Box>` -// since the registers are sometimes empty, but the discriminant would -// then misalign the regs again. -pub struct Context { - /// Hold the registers while the task or scheduler is suspended - regs: Box, - /// Lower bound and upper bound for the stack - stack_bounds: Option<(uint, uint)>, -} - -pub type InitFn = extern "C" fn(uint, *mut (), *mut ()) -> !; - -impl Context { - pub fn empty() -> Context { - Context { - regs: new_regs(), - stack_bounds: None, - } - } - - /// Create a new context that will resume execution by running proc() - /// - /// The `init` function will be run with `arg` and the `start` procedure - /// split up into code and env pointers. It is required that the `init` - /// function never return. - /// - /// FIXME: this is basically an awful the interface. The main reason for - /// this is to reduce the number of allocations made when a green - /// task is spawned as much as possible - pub fn new(init: InitFn, arg: uint, start: proc():Send, - stack: &mut Stack) -> Context { - - let sp: *const uint = stack.end(); - let sp: *mut uint = sp as *mut uint; - // Save and then immediately load the current context, - // which we will then modify to call the given function when restored - let mut regs = new_regs(); - - initialize_call_frame(&mut *regs, - init, - arg, - unsafe { transmute(start) }, - sp); - - // Scheduler tasks don't have a stack in the "we allocated it" sense, - // but rather they run on pthreads stacks. We have complete control over - // them in terms of the code running on them (and hopefully they don't - // overflow). Additionally, their coroutine stacks are listed as being - // zero-length, so that's how we detect what's what here. - let stack_base: *const uint = stack.start(); - let bounds = if sp as libc::uintptr_t == stack_base as libc::uintptr_t { - None - } else { - Some((stack_base as uint, sp as uint)) - }; - return Context { - regs: regs, - stack_bounds: bounds, - } - } - - /* Switch contexts - - Suspend the current execution context and resume another by - saving the registers values of the executing thread to a Context - then loading the registers from a previously saved Context. - */ - pub fn swap(out_context: &mut Context, in_context: &Context) { - rtdebug!("swapping contexts"); - let out_regs: &mut Registers = match out_context { - &Context { regs: box ref mut r, .. } => r - }; - let in_regs: &Registers = match in_context { - &Context { regs: box ref r, .. } => r - }; - - rtdebug!("noting the stack limit and doing raw swap"); - - unsafe { - // Right before we switch to the new context, set the new context's - // stack limit in the OS-specified TLS slot. This also means that - // we cannot call any more rust functions after record_stack_bounds - // returns because they would all likely panic due to the limit being - // invalid for the current task. Lucky for us `rust_swap_registers` - // is a C function so we don't have to worry about that! - match in_context.stack_bounds { - Some((lo, hi)) => stack::record_rust_managed_stack_bounds(lo, hi), - // If we're going back to one of the original contexts or - // something that's possibly not a "normal task", then reset - // the stack limit to 0 to make morestack never panic - None => stack::record_rust_managed_stack_bounds(0, uint::MAX), - } - rust_swap_registers(out_regs, in_regs); - } - } -} - -#[link(name = "context_switch", kind = "static")] -extern { - fn rust_swap_registers(out_regs: *mut Registers, in_regs: *const Registers); -} - -// Register contexts used in various architectures -// -// These structures all represent a context of one task throughout its -// execution. Each struct is a representation of the architecture's register -// set. When swapping between tasks, these register sets are used to save off -// the current registers into one struct, and load them all from another. -// -// Note that this is only used for context switching, which means that some of -// the registers may go unused. For example, for architectures with -// callee/caller saved registers, the context will only reflect the callee-saved -// registers. This is because the caller saved registers are already stored -// elsewhere on the stack (if it was necessary anyway). -// -// Additionally, there may be fields on various architectures which are unused -// entirely because they only reflect what is theoretically possible for a -// "complete register set" to show, but user-space cannot alter these registers. -// An example of this would be the segment selectors for x86. -// -// These structures/functions are roughly in-sync with the source files inside -// of src/rt/arch/$arch. The only currently used function from those folders is -// the `rust_swap_registers` function, but that's only because for now segmented -// stacks are disabled. - -#[cfg(target_arch = "x86")] -#[repr(C)] -struct Registers { - eax: u32, ebx: u32, ecx: u32, edx: u32, - ebp: u32, esi: u32, edi: u32, esp: u32, - cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16, - eflags: u32, eip: u32 -} - -#[cfg(target_arch = "x86")] -fn new_regs() -> Box { - box Registers { - eax: 0, ebx: 0, ecx: 0, edx: 0, - ebp: 0, esi: 0, edi: 0, esp: 0, - cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0, - eflags: 0, eip: 0 - } -} - -#[cfg(target_arch = "x86")] -fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, - procedure: raw::Procedure, sp: *mut uint) { - let sp = sp as *mut uint; - // x86 has interesting stack alignment requirements, so do some alignment - // plus some offsetting to figure out what the actual stack should be. - let sp = align_down(sp); - let sp = mut_offset(sp, -4); - - unsafe { *mut_offset(sp, 2) = procedure.env as uint }; - unsafe { *mut_offset(sp, 1) = procedure.code as uint }; - unsafe { *mut_offset(sp, 0) = arg as uint }; - let sp = mut_offset(sp, -1); - unsafe { *sp = 0 }; // The final return address - - regs.esp = sp as u32; - regs.eip = fptr as u32; - - // Last base pointer on the stack is 0 - regs.ebp = 0; -} - -// windows requires saving more registers (both general and XMM), so the windows -// register context must be larger. -#[cfg(all(windows, target_arch = "x86_64"))] -#[repr(C)] -struct Registers { - gpr:[libc::uintptr_t, ..14], - _xmm:[simd::u32x4, ..10] -} -#[cfg(all(not(windows), target_arch = "x86_64"))] -#[repr(C)] -struct Registers { - gpr:[libc::uintptr_t, ..10], - _xmm:[simd::u32x4, ..6] -} - -#[cfg(all(windows, target_arch = "x86_64"))] -fn new_regs() -> Box { - box() Registers { - gpr:[0,..14], - _xmm:[simd::u32x4(0,0,0,0),..10] - } -} -#[cfg(all(not(windows), target_arch = "x86_64"))] -fn new_regs() -> Box { - box() Registers { - gpr:[0,..10], - _xmm:[simd::u32x4(0,0,0,0),..6] - } -} - -#[cfg(target_arch = "x86_64")] -fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, - procedure: raw::Procedure, sp: *mut uint) { - extern { fn rust_bootstrap_green_task(); } - - // Redefinitions from rt/arch/x86_64/regs.h - static RUSTRT_RSP: uint = 1; - static RUSTRT_IP: uint = 8; - static RUSTRT_RBP: uint = 2; - static RUSTRT_R12: uint = 4; - static RUSTRT_R13: uint = 5; - static RUSTRT_R14: uint = 6; - static RUSTRT_R15: uint = 7; - - let sp = align_down(sp); - let sp = mut_offset(sp, -1); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - rtdebug!("creating call frame"); - rtdebug!("fptr {:#x}", fptr as libc::uintptr_t); - rtdebug!("arg {:#x}", arg); - rtdebug!("sp {}", sp); - - // These registers are frobbed by rust_bootstrap_green_task into the right - // location so we can invoke the "real init function", `fptr`. - regs.gpr[RUSTRT_R12] = arg as libc::uintptr_t; - regs.gpr[RUSTRT_R13] = procedure.code as libc::uintptr_t; - regs.gpr[RUSTRT_R14] = procedure.env as libc::uintptr_t; - regs.gpr[RUSTRT_R15] = fptr as libc::uintptr_t; - - // These registers are picked up by the regular context switch paths. These - // will put us in "mostly the right context" except for frobbing all the - // arguments to the right place. We have the small trampoline code inside of - // rust_bootstrap_green_task to do that. - regs.gpr[RUSTRT_RSP] = sp as libc::uintptr_t; - regs.gpr[RUSTRT_IP] = rust_bootstrap_green_task as libc::uintptr_t; - - // Last base pointer on the stack should be 0 - regs.gpr[RUSTRT_RBP] = 0; -} - -#[cfg(target_arch = "arm")] -type Registers = [libc::uintptr_t, ..32]; - -#[cfg(target_arch = "arm")] -fn new_regs() -> Box { box {[0, .. 32]} } - -#[cfg(target_arch = "arm")] -fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, - procedure: raw::Procedure, sp: *mut uint) { - extern { fn rust_bootstrap_green_task(); } - - let sp = align_down(sp); - // sp of arm eabi is 8-byte aligned - let sp = mut_offset(sp, -2); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - // ARM uses the same technique as x86_64 to have a landing pad for the start - // of all new green tasks. Neither r1/r2 are saved on a context switch, so - // the shim will copy r3/r4 into r1/r2 and then execute the function in r5 - regs[0] = arg as libc::uintptr_t; // r0 - regs[3] = procedure.code as libc::uintptr_t; // r3 - regs[4] = procedure.env as libc::uintptr_t; // r4 - regs[5] = fptr as libc::uintptr_t; // r5 - regs[13] = sp as libc::uintptr_t; // #52 sp, r13 - regs[14] = rust_bootstrap_green_task as libc::uintptr_t; // #56 pc, r14 --> lr -} - -#[cfg(any(target_arch = "mips", target_arch = "mipsel"))] -type Registers = [libc::uintptr_t, ..32]; - -#[cfg(any(target_arch = "mips", target_arch = "mipsel"))] -fn new_regs() -> Box { box {[0, .. 32]} } - -#[cfg(any(target_arch = "mips", target_arch = "mipsel"))] -fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, - procedure: raw::Procedure, sp: *mut uint) { - let sp = align_down(sp); - // sp of mips o32 is 8-byte aligned - let sp = mut_offset(sp, -2); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - regs[4] = arg as libc::uintptr_t; - regs[5] = procedure.code as libc::uintptr_t; - regs[6] = procedure.env as libc::uintptr_t; - regs[29] = sp as libc::uintptr_t; - regs[25] = fptr as libc::uintptr_t; - regs[31] = fptr as libc::uintptr_t; -} - -fn align_down(sp: *mut uint) -> *mut uint { - let sp = (sp as uint) & !(16 - 1); - sp as *mut uint -} - -// ptr::mut_offset is positive ints only -#[inline] -pub fn mut_offset(ptr: *mut T, count: int) -> *mut T { - use std::mem::size_of; - (ptr as int + count * (size_of::() as int)) as *mut T -} diff --git a/src/libgreen/coroutine.rs b/src/libgreen/coroutine.rs deleted file mode 100644 index f2e64dc25a9..00000000000 --- a/src/libgreen/coroutine.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Coroutines represent nothing more than a context and a stack -// segment. - -use context::Context; -use stack::{StackPool, Stack}; - -/// A coroutine is nothing more than a (register context, stack) pair. -pub struct Coroutine { - /// The segment of stack on which the task is currently running or - /// if the task is blocked, on which the task will resume - /// execution. - /// - /// Servo needs this to be public in order to tell SpiderMonkey - /// about the stack bounds. - pub current_stack_segment: Stack, - - /// Always valid if the task is alive and not running. - pub saved_context: Context -} - -impl Coroutine { - pub fn empty() -> Coroutine { - Coroutine { - current_stack_segment: unsafe { Stack::dummy_stack() }, - saved_context: Context::empty() - } - } - - /// Destroy coroutine and try to reuse std::stack segment. - pub fn recycle(self, stack_pool: &mut StackPool) { - let Coroutine { current_stack_segment, .. } = self; - stack_pool.give_stack(current_stack_segment); - } -} diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs deleted file mode 100644 index 4e2908dd2b0..00000000000 --- a/src/libgreen/lib.rs +++ /dev/null @@ -1,567 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The "green scheduling" library -//! -//! This library provides M:N threading for rust programs. Internally this has -//! the implementation of a green scheduler along with context switching and a -//! stack-allocation strategy. This can be optionally linked in to rust -//! programs in order to provide M:N functionality inside of 1:1 programs. -//! -//! # Architecture -//! -//! An M:N scheduling library implies that there are N OS thread upon which M -//! "green threads" are multiplexed. In other words, a set of green threads are -//! all run inside a pool of OS threads. -//! -//! With this design, you can achieve _concurrency_ by spawning many green -//! threads, and you can achieve _parallelism_ by running the green threads -//! simultaneously on multiple OS threads. Each OS thread is a candidate for -//! being scheduled on a different core (the source of parallelism), and then -//! all of the green threads cooperatively schedule amongst one another (the -//! source of concurrency). -//! -//! ## Schedulers -//! -//! In order to coordinate among green threads, each OS thread is primarily -//! running something which we call a Scheduler. Whenever a reference to a -//! Scheduler is made, it is synonymous to referencing one OS thread. Each -//! scheduler is bound to one and exactly one OS thread, and the thread that it -//! is bound to never changes. -//! -//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`) -//! which is the thread pool term from above. A pool of schedulers all share the -//! work that they create. Furthermore, whenever a green thread is created (also -//! synonymously referred to as a green task), it is associated with a -//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool. -//! -//! Schedulers can have at most one green thread running on them at a time. When -//! a scheduler is asleep on its event loop, there are no green tasks running on -//! the OS thread or the scheduler. The term "context switch" is used for when -//! the running green thread is swapped out, but this simply changes the one -//! green thread which is running on the scheduler. -//! -//! ## Green Threads -//! -//! A green thread can largely be summarized by a stack and a register context. -//! Whenever a green thread is spawned, it allocates a stack, and then prepares -//! a register context for execution. The green task may be executed across -//! multiple OS threads, but it will always use the same stack and it will carry -//! its register context across OS threads. -//! -//! Each green thread is cooperatively scheduled with other green threads. -//! Primarily, this means that there is no pre-emption of a green thread. The -//! major consequence of this design is that a green thread stuck in an infinite -//! loop will prevent all other green threads from running on that particular -//! scheduler. -//! -//! Scheduling events for green threads occur on communication and I/O -//! boundaries. For example, if a green task blocks waiting for a message on a -//! channel some other green thread can now run on the scheduler. This also has -//! the consequence that until a green thread performs any form of scheduling -//! event, it will be running on the same OS thread (unconditionally). -//! -//! ## Work Stealing -//! -//! With a pool of schedulers, a new green task has a number of options when -//! deciding where to run initially. The current implementation uses a concept -//! called work stealing in order to spread out work among schedulers. -//! -//! In a work-stealing model, each scheduler maintains a local queue of tasks to -//! run, and this queue is stolen from by other schedulers. Implementation-wise, -//! work stealing has some hairy parts, but from a user-perspective, work -//! stealing simply implies what with M green threads and N schedulers where -//! M > N it is very likely that all schedulers will be busy executing work. -//! -//! # Considerations when using libgreen -//! -//! An M:N runtime has both pros and cons, and there is no one answer as to -//! whether M:N or 1:1 is appropriate to use. As always, there are many -//! advantages and disadvantages between the two. Regardless of the workload, -//! however, there are some aspects of using green thread which you should be -//! aware of: -//! -//! * The largest concern when using libgreen is interoperating with native -//! code. Care should be taken when calling native code that will block the OS -//! thread as it will prevent further green tasks from being scheduled on the -//! OS thread. -//! -//! * Native code using thread-local-storage should be approached -//! with care. Green threads may migrate among OS threads at any time, so -//! native libraries using thread-local state may not always work. -//! -//! * Native synchronization primitives (e.g. pthread mutexes) will also not -//! work for green threads. The reason for this is because native primitives -//! often operate on a _os thread_ granularity whereas green threads are -//! operating on a more granular unit of work. -//! -//! * A green threading runtime is not fork-safe. If the process forks(), it -//! cannot expect to make reasonable progress by continuing to use green -//! threads. -//! -//! Note that these concerns do not mean that operating with native code is a -//! lost cause. These are simply just concerns which should be considered when -//! invoking native code. -//! -//! # Starting with libgreen -//! -//! ```rust -//! extern crate green; -//! -//! #[start] -//! fn start(argc: int, argv: *const *const u8) -> int { -//! green::start(argc, argv, green::basic::event_loop, main) -//! } -//! -//! fn main() { -//! // this code is running in a pool of schedulers -//! } -//! ``` -//! -//! > **Note**: This `main` function in this example does *not* have I/O -//! > support. The basic event loop does not provide any support -//! -//! # Using a scheduler pool -//! -//! This library adds a `GreenTaskBuilder` trait that extends the methods -//! available on `std::task::TaskBuilder` to allow spawning a green task, -//! possibly pinned to a particular scheduler thread: -//! -//! ```rust -//! extern crate green; -//! -//! # fn main() { -//! use std::task::TaskBuilder; -//! use green::{SchedPool, PoolConfig, GreenTaskBuilder}; -//! -//! let mut config = PoolConfig::new(); -//! -//! let mut pool = SchedPool::new(config); -//! -//! // Spawn tasks into the pool of schedulers -//! TaskBuilder::new().green(&mut pool).spawn(proc() { -//! // this code is running inside the pool of schedulers -//! -//! spawn(proc() { -//! // this code is also running inside the same scheduler pool -//! }); -//! }); -//! -//! // Dynamically add a new scheduler to the scheduler pool. This adds another -//! // OS thread that green threads can be multiplexed on to. -//! let mut handle = pool.spawn_sched(); -//! -//! // Pin a task to the spawned scheduler -//! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() { -//! /* ... */ -//! }); -//! -//! // Handles keep schedulers alive, so be sure to drop all handles before -//! // destroying the sched pool -//! drop(handle); -//! -//! // Required to shut down this scheduler pool. -//! // The task will panic if `shutdown` is not called. -//! pool.shutdown(); -//! # } -//! ``` - -#![crate_name = "green"] -#![experimental] -#![license = "MIT/ASL2"] -#![crate_type = "rlib"] -#![crate_type = "dylib"] -#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "http://www.rust-lang.org/favicon.ico", - html_root_url = "http://doc.rust-lang.org/nightly/", - html_playground_url = "http://play.rust-lang.org/")] - -#![feature(macro_rules, phase, default_type_params, globs)] -#![allow(deprecated)] - -#[cfg(test)] #[phase(plugin, link)] extern crate log; -extern crate libc; -extern crate alloc; - -use alloc::arc::Arc; -use std::mem::replace; -use std::os; -use std::rt::rtio; -use std::rt::thread::Thread; -use std::rt::task::TaskOpts; -use std::rt; -use std::sync::atomic::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; -use std::sync::deque; -use std::task::{TaskBuilder, Spawner}; - -use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, PinnedTask, NewNeighbor}; -use sleeper_list::SleeperList; -use stack::StackPool; -use task::GreenTask; - -mod macros; -mod simple; -mod message_queue; - -pub mod basic; -pub mod context; -pub mod coroutine; -pub mod sched; -pub mod sleeper_list; -pub mod stack; -pub mod task; - -/// Set up a default runtime configuration, given compiler-supplied arguments. -/// -/// This function will block until the entire pool of M:N schedulers have -/// exited. This function also requires a local task to be available. -/// -/// # Arguments -/// -/// * `argc` & `argv` - The argument vector. On Unix this information is used -/// by os::args. -/// * `main` - The initial procedure to run inside of the M:N scheduling pool. -/// Once this procedure exits, the scheduling pool will begin to shut -/// down. The entire pool (and this function) will only return once -/// all child tasks have finished executing. -/// -/// # Return value -/// -/// The return value is used as the process return code. 0 on success, 101 on -/// error. -pub fn start(argc: int, argv: *const *const u8, - event_loop_factory: fn() -> Box, - main: proc():Send) -> int { - rt::init(argc, argv); - let mut main = Some(main); - let mut ret = None; - simple::task().run(|| { - ret = Some(run(event_loop_factory, main.take().unwrap())); - }).destroy(); - // unsafe is ok b/c we're sure that the runtime is gone - unsafe { rt::cleanup() } - ret.unwrap() -} - -/// Execute the main function in a pool of M:N schedulers. -/// -/// Configures the runtime according to the environment, by default using a task -/// scheduler with the same number of threads as cores. Returns a process exit -/// code. -/// -/// This function will not return until all schedulers in the associated pool -/// have returned. -pub fn run(event_loop_factory: fn() -> Box, - main: proc():Send) -> int { - // Create a scheduler pool and spawn the main task into this pool. We will - // get notified over a channel when the main task exits. - let mut cfg = PoolConfig::new(); - cfg.event_loop_factory = event_loop_factory; - let mut pool = SchedPool::new(cfg); - let (tx, rx) = channel(); - let mut opts = TaskOpts::new(); - opts.on_exit = Some(proc(r) tx.send(r)); - opts.name = Some("
".into_maybe_owned()); - pool.spawn(opts, main); - - // Wait for the main task to return, and set the process error code - // appropriately. - if rx.recv().is_err() { - os::set_exit_status(rt::DEFAULT_ERROR_CODE); - } - - // Now that we're sure all tasks are dead, shut down the pool of schedulers, - // waiting for them all to return. - pool.shutdown(); - os::get_exit_status() -} - -/// Configuration of how an M:N pool of schedulers is spawned. -pub struct PoolConfig { - /// The number of schedulers (OS threads) to spawn into this M:N pool. - pub threads: uint, - /// A factory function used to create new event loops. If this is not - /// specified then the default event loop factory is used. - pub event_loop_factory: fn() -> Box, -} - -impl PoolConfig { - /// Returns the default configuration, as determined the environment - /// variables of this process. - pub fn new() -> PoolConfig { - PoolConfig { - threads: rt::default_sched_threads(), - event_loop_factory: basic::event_loop, - } - } -} - -/// A structure representing a handle to a pool of schedulers. This handle is -/// used to keep the pool alive and also reap the status from the pool. -pub struct SchedPool { - id: uint, - threads: Vec>, - handles: Vec, - stealers: Vec>>, - next_friend: uint, - stack_pool: StackPool, - deque_pool: deque::BufferPool>, - sleepers: SleeperList, - factory: fn() -> Box, - task_state: TaskState, - tasks_done: Receiver<()>, -} - -/// This is an internal state shared among a pool of schedulers. This is used to -/// keep track of how many tasks are currently running in the pool and then -/// sending on a channel once the entire pool has been drained of all tasks. -#[deriving(Clone)] -pub struct TaskState { - cnt: Arc, - done: Sender<()>, -} - -impl SchedPool { - /// Execute the main function in a pool of M:N schedulers. - /// - /// This will configure the pool according to the `config` parameter, and - /// initially run `main` inside the pool of schedulers. - pub fn new(config: PoolConfig) -> SchedPool { - static POOL_ID: AtomicUint = INIT_ATOMIC_UINT; - - let PoolConfig { - threads: nscheds, - event_loop_factory: factory - } = config; - assert!(nscheds > 0); - - // The pool of schedulers that will be returned from this function - let (p, state) = TaskState::new(); - let mut pool = SchedPool { - threads: vec![], - handles: vec![], - stealers: vec![], - id: POOL_ID.fetch_add(1, SeqCst), - sleepers: SleeperList::new(), - stack_pool: StackPool::new(), - deque_pool: deque::BufferPool::new(), - next_friend: 0, - factory: factory, - task_state: state, - tasks_done: p, - }; - - // Create a work queue for each scheduler, ntimes. Create an extra - // for the main thread if that flag is set. We won't steal from it. - let mut workers = Vec::with_capacity(nscheds); - let mut stealers = Vec::with_capacity(nscheds); - - for _ in range(0, nscheds) { - let (w, s) = pool.deque_pool.deque(); - workers.push(w); - stealers.push(s); - } - pool.stealers = stealers; - - // Now that we've got all our work queues, create one scheduler per - // queue, spawn the scheduler into a thread, and be sure to keep a - // handle to the scheduler and the thread to keep them alive. - for worker in workers.into_iter() { - rtdebug!("inserting a regular scheduler"); - - let mut sched = box Scheduler::new(pool.id, - (pool.factory)(), - worker, - pool.stealers.clone(), - pool.sleepers.clone(), - pool.task_state.clone()); - pool.handles.push(sched.make_handle()); - pool.threads.push(Thread::start(proc() { sched.bootstrap(); })); - } - - return pool; - } - - /// Creates a new task configured to run inside of this pool of schedulers. - /// This is useful to create a task which can then be sent to a specific - /// scheduler created by `spawn_sched` (and possibly pin it to that - /// scheduler). - #[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"] - pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box { - GreenTask::configure(&mut self.stack_pool, opts, f) - } - - /// Spawns a new task into this pool of schedulers, using the specified - /// options to configure the new task which is spawned. - /// - /// New tasks are spawned in a round-robin fashion to the schedulers in this - /// pool, but tasks can certainly migrate among schedulers once they're in - /// the pool. - #[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"] - pub fn spawn(&mut self, opts: TaskOpts, f: proc():Send) { - let task = self.task(opts, f); - - // Figure out someone to send this task to - let idx = self.next_friend; - self.next_friend += 1; - if self.next_friend >= self.handles.len() { - self.next_friend = 0; - } - - // Jettison the task away! - self.handles[idx].send(TaskFromFriend(task)); - } - - /// Spawns a new scheduler into this M:N pool. A handle is returned to the - /// scheduler for use. The scheduler will not exit as long as this handle is - /// active. - /// - /// The scheduler spawned will participate in work stealing with all of the - /// other schedulers currently in the scheduler pool. - pub fn spawn_sched(&mut self) -> SchedHandle { - let (worker, stealer) = self.deque_pool.deque(); - self.stealers.push(stealer.clone()); - - // Tell all existing schedulers about this new scheduler so they can all - // steal work from it - for handle in self.handles.iter_mut() { - handle.send(NewNeighbor(stealer.clone())); - } - - // Create the new scheduler, using the same sleeper list as all the - // other schedulers as well as having a stealer handle to all other - // schedulers. - let mut sched = box Scheduler::new(self.id, - (self.factory)(), - worker, - self.stealers.clone(), - self.sleepers.clone(), - self.task_state.clone()); - let ret = sched.make_handle(); - self.handles.push(sched.make_handle()); - self.threads.push(Thread::start(proc() { sched.bootstrap() })); - - return ret; - } - - /// Consumes the pool of schedulers, waiting for all tasks to exit and all - /// schedulers to shut down. - /// - /// This function is required to be called in order to drop a pool of - /// schedulers, it is considered an error to drop a pool without calling - /// this method. - /// - /// This only waits for all tasks in *this pool* of schedulers to exit, any - /// native tasks or extern pools will not be waited on - pub fn shutdown(mut self) { - self.stealers = vec![]; - - // Wait for everyone to exit. We may have reached a 0-task count - // multiple times in the past, meaning there could be several buffered - // messages on the `tasks_done` port. We're guaranteed that after *some* - // message the current task count will be 0, so we just receive in a - // loop until everything is totally dead. - while self.task_state.active() { - self.tasks_done.recv(); - } - - // Now that everyone's gone, tell everything to shut down. - for mut handle in replace(&mut self.handles, vec![]).into_iter() { - handle.send(Shutdown); - } - for thread in replace(&mut self.threads, vec![]).into_iter() { - thread.join(); - } - } -} - -impl TaskState { - fn new() -> (Receiver<()>, TaskState) { - let (tx, rx) = channel(); - (rx, TaskState { - cnt: Arc::new(AtomicUint::new(0)), - done: tx, - }) - } - - fn increment(&mut self) { - self.cnt.fetch_add(1, SeqCst); - } - - fn active(&self) -> bool { - self.cnt.load(SeqCst) != 0 - } - - fn decrement(&mut self) { - let prev = self.cnt.fetch_sub(1, SeqCst); - if prev == 1 { - self.done.send(()); - } - } -} - -impl Drop for SchedPool { - fn drop(&mut self) { - if self.threads.len() > 0 { - panic!("dropping a M:N scheduler pool that wasn't shut down"); - } - } -} - -/// A spawner for green tasks -pub struct GreenSpawner<'a>{ - pool: &'a mut SchedPool, - handle: Option<&'a mut SchedHandle> -} - -impl<'a> Spawner for GreenSpawner<'a> { - #[inline] - fn spawn(self, opts: TaskOpts, f: proc():Send) { - let GreenSpawner { pool, handle } = self; - match handle { - None => pool.spawn(opts, f), - Some(h) => h.send(PinnedTask(pool.task(opts, f))) - } - } -} - -/// An extension trait adding `green` configuration methods to `TaskBuilder`. -pub trait GreenTaskBuilder { - fn green<'a>(self, &'a mut SchedPool) -> TaskBuilder>; - fn green_pinned<'a>(self, &'a mut SchedPool, &'a mut SchedHandle) - -> TaskBuilder>; -} - -impl GreenTaskBuilder for TaskBuilder { - fn green<'a>(self, pool: &'a mut SchedPool) -> TaskBuilder> { - self.spawner(GreenSpawner {pool: pool, handle: None}) - } - - fn green_pinned<'a>(self, pool: &'a mut SchedPool, handle: &'a mut SchedHandle) - -> TaskBuilder> { - self.spawner(GreenSpawner {pool: pool, handle: Some(handle)}) - } -} - -#[cfg(test)] -mod test { - use std::task::TaskBuilder; - use super::{SchedPool, PoolConfig, GreenTaskBuilder}; - - #[test] - fn test_green_builder() { - let mut pool = SchedPool::new(PoolConfig::new()); - let res = TaskBuilder::new().green(&mut pool).try(proc() { - "Success!".to_string() - }); - assert_eq!(res.ok().unwrap(), "Success!".to_string()); - pool.shutdown(); - } -} diff --git a/src/libgreen/macros.rs b/src/libgreen/macros.rs deleted file mode 100644 index 4cce430d88a..00000000000 --- a/src/libgreen/macros.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// FIXME: this file probably shouldn't exist -// ignore-lexer-test FIXME #15677 - -#![macro_escape] - -use std::fmt; - -// Indicates whether we should perform expensive sanity checks, including rtassert! -// FIXME: Once the runtime matures remove the `true` below to turn off rtassert, etc. -pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert); - -macro_rules! rterrln ( - ($($arg:tt)*) => ( { - format_args!(::macros::dumb_println, $($arg)*) - } ) -) - -// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build. -macro_rules! rtdebug ( - ($($arg:tt)*) => ( { - if cfg!(rtdebug) { - rterrln!($($arg)*) - } - }) -) - -macro_rules! rtassert ( - ( $arg:expr ) => ( { - if ::macros::ENFORCE_SANITY { - if !$arg { - rtabort!(" assertion failed: {}", stringify!($arg)); - } - } - } ) -) - - -macro_rules! rtabort ( - ($($arg:tt)*) => ( { - ::macros::abort(format!($($arg)*).as_slice()); - } ) -) - -pub fn dumb_println(args: &fmt::Arguments) { - use std::rt; - let mut w = rt::Stderr; - let _ = writeln!(&mut w, "{}", args); -} - -pub fn abort(msg: &str) -> ! { - let msg = if !msg.is_empty() { msg } else { "aborted" }; - let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) ); - let quote = match hash % 10 { - 0 => " -It was from the artists and poets that the pertinent answers came, and I -know that panic would have broken loose had they been able to compare notes. -As it was, lacking their original letters, I half suspected the compiler of -having asked leading questions, or of having edited the correspondence in -corroboration of what he had latently resolved to see.", - 1 => " -There are not many persons who know what wonders are opened to them in the -stories and visions of their youth; for when as children we listen and dream, -we think but half-formed thoughts, and when as men we try to remember, we are -dulled and prosaic with the poison of life. But some of us awake in the night -with strange phantasms of enchanted hills and gardens, of fountains that sing -in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch -down to sleeping cities of bronze and stone, and of shadowy companies of heroes -that ride caparisoned white horses along the edges of thick forests; and then -we know that we have looked back through the ivory gates into that world of -wonder which was ours before we were wise and unhappy.", - 2 => " -Instead of the poems I had hoped for, there came only a shuddering blackness -and ineffable loneliness; and I saw at last a fearful truth which no one had -ever dared to breathe before — the unwhisperable secret of secrets — The fact -that this city of stone and stridor is not a sentient perpetuation of Old New -York as London is of Old London and Paris of Old Paris, but that it is in fact -quite dead, its sprawling body imperfectly embalmed and infested with queer -animate things which have nothing to do with it as it was in life.", - 3 => " -The ocean ate the last of the land and poured into the smoking gulf, thereby -giving up all it had ever conquered. From the new-flooded lands it flowed -again, uncovering death and decay; and from its ancient and immemorial bed it -trickled loathsomely, uncovering nighted secrets of the years when Time was -young and the gods unborn. Above the waves rose weedy remembered spires. The -moon laid pale lilies of light on dead London, and Paris stood up from its damp -grave to be sanctified with star-dust. Then rose spires and monoliths that were -weedy but not remembered; terrible spires and monoliths of lands that men never -knew were lands...", - 4 => " -There was a night when winds from unknown spaces whirled us irresistibly into -limitless vacuum beyond all thought and entity. Perceptions of the most -maddeningly untransmissible sort thronged upon us; perceptions of infinity -which at the time convulsed us with joy, yet which are now partly lost to my -memory and partly incapable of presentation to others.", - _ => "You've met with a terrible fate, haven't you?" - }; - rterrln!("{}", ""); - rterrln!("{}", quote); - rterrln!("{}", ""); - rterrln!("fatal runtime error: {}", msg); - - abort(); - - fn abort() -> ! { - use std::intrinsics; - unsafe { intrinsics::abort() } - } -} diff --git a/src/libgreen/message_queue.rs b/src/libgreen/message_queue.rs deleted file mode 100644 index c589a9fb592..00000000000 --- a/src/libgreen/message_queue.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::PopResult::*; - -use alloc::arc::Arc; -use std::sync::mpsc_queue as mpsc; -use std::kinds::marker; - -pub enum PopResult { - Inconsistent, - Empty, - Data(T), -} - -pub fn queue() -> (Consumer, Producer) { - let a = Arc::new(mpsc::Queue::new()); - (Consumer { inner: a.clone(), noshare: marker::NoSync }, - Producer { inner: a, noshare: marker::NoSync }) -} - -pub struct Producer { - inner: Arc>, - noshare: marker::NoSync, -} - -pub struct Consumer { - inner: Arc>, - noshare: marker::NoSync, -} - -impl Consumer { - pub fn pop(&self) -> PopResult { - match self.inner.pop() { - mpsc::Inconsistent => Inconsistent, - mpsc::Empty => Empty, - mpsc::Data(t) => Data(t), - } - } - - pub fn casual_pop(&self) -> Option { - match self.inner.pop() { - mpsc::Inconsistent => None, - mpsc::Empty => None, - mpsc::Data(t) => Some(t), - } - } -} - -impl Producer { - pub fn push(&self, t: T) { - self.inner.push(t); - } -} - -impl Clone for Producer { - fn clone(&self) -> Producer { - Producer { inner: self.inner.clone(), noshare: marker::NoSync } - } -} diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs deleted file mode 100644 index e8cb65d35df..00000000000 --- a/src/libgreen/sched.rs +++ /dev/null @@ -1,1523 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::SchedMessage::*; -use self::EffortLevel::*; - -use std::mem; -use std::rt::local::Local; -use std::rt::mutex::NativeMutex; -use std::rt::rtio::{RemoteCallback, PausableIdleCallback, Callback, EventLoop}; -use std::rt::task::BlockedTask; -use std::rt::task::Task; -use std::sync::deque; -use std::raw; - -use std::rand::{XorShiftRng, Rng, Rand}; - -use TaskState; -use context::Context; -use coroutine::Coroutine; -use sleeper_list::SleeperList; -use stack::StackPool; -use task::{TypeSched, GreenTask, HomeSched, AnySched}; -use message_queue as msgq; - -/// A scheduler is responsible for coordinating the execution of Tasks -/// on a single thread. The scheduler runs inside a slightly modified -/// Rust Task. When not running this task is stored in the scheduler -/// struct. The scheduler struct acts like a baton, all scheduling -/// actions are transfers of the baton. -/// -/// FIXME: This creates too many callbacks to run_sched_once, resulting -/// in too much allocation and too many events. -pub struct Scheduler { - /// ID number of the pool that this scheduler is a member of. When - /// reawakening green tasks, this is used to ensure that tasks aren't - /// reawoken on the wrong pool of schedulers. - pub pool_id: uint, - /// The pool of stacks that this scheduler has cached - pub stack_pool: StackPool, - /// Bookkeeping for the number of tasks which are currently running around - /// inside this pool of schedulers - pub task_state: TaskState, - /// There are N work queues, one per scheduler. - work_queue: deque::Worker>, - /// Work queues for the other schedulers. These are created by - /// cloning the core work queues. - work_queues: Vec>>, - /// The queue of incoming messages from other schedulers. - /// These are enqueued by SchedHandles after which a remote callback - /// is triggered to handle the message. - message_queue: msgq::Consumer, - /// Producer used to clone sched handles from - message_producer: msgq::Producer, - /// A shared list of sleeping schedulers. We'll use this to wake - /// up schedulers when pushing work onto the work queue. - sleeper_list: SleeperList, - /// Indicates that we have previously pushed a handle onto the - /// SleeperList but have not yet received the Wake message. - /// Being `true` does not necessarily mean that the scheduler is - /// not active since there are multiple event sources that may - /// wake the scheduler. It just prevents the scheduler from pushing - /// multiple handles onto the sleeper list. - sleepy: bool, - /// A flag to indicate we've received the shutdown message and should - /// no longer try to go to sleep, but exit instead. - no_sleep: bool, - /// The scheduler runs on a special task. When it is not running - /// it is stored here instead of the work queue. - sched_task: Option>, - /// An action performed after a context switch on behalf of the - /// code running before the context switch - cleanup_job: Option, - /// If the scheduler shouldn't run some tasks, a friend to send - /// them to. - friend_handle: Option, - /// Should this scheduler run any task, or only pinned tasks? - run_anything: bool, - /// A fast XorShift rng for scheduler use - rng: XorShiftRng, - /// A toggleable idle callback - idle_callback: Option>, - /// A countdown that starts at a random value and is decremented - /// every time a yield check is performed. When it hits 0 a task - /// will yield. - yield_check_count: uint, - /// A flag to tell the scheduler loop it needs to do some stealing - /// in order to introduce randomness as part of a yield - steal_for_yield: bool, - - // n.b. currently destructors of an object are run in top-to-bottom in order - // of field declaration. Due to its nature, the pausable idle callback - // must have some sort of handle to the event loop, so it needs to get - // destroyed before the event loop itself. For this reason, we destroy - // the event loop last to ensure that any unsafe references to it are - // destroyed before it's actually destroyed. - - /// The event loop used to drive the scheduler and perform I/O - pub event_loop: Box, -} - -/// An indication of how hard to work on a given operation, the difference -/// mainly being whether memory is synchronized or not -#[deriving(PartialEq)] -enum EffortLevel { - DontTryTooHard, - GiveItYourBest -} - -static MAX_YIELD_CHECKS: uint = 20000; - -fn reset_yield_check(rng: &mut XorShiftRng) -> uint { - let r: uint = Rand::rand(rng); - r % MAX_YIELD_CHECKS + 1 -} - -impl Scheduler { - - // * Initialization Functions - - pub fn new(pool_id: uint, - event_loop: Box, - work_queue: deque::Worker>, - work_queues: Vec>>, - sleeper_list: SleeperList, - state: TaskState) - -> Scheduler { - - Scheduler::new_special(pool_id, event_loop, work_queue, work_queues, - sleeper_list, true, None, state) - - } - - pub fn new_special(pool_id: uint, - event_loop: Box, - work_queue: deque::Worker>, - work_queues: Vec>>, - sleeper_list: SleeperList, - run_anything: bool, - friend: Option, - state: TaskState) - -> Scheduler { - - let (consumer, producer) = msgq::queue(); - let mut sched = Scheduler { - pool_id: pool_id, - sleeper_list: sleeper_list, - message_queue: consumer, - message_producer: producer, - sleepy: false, - no_sleep: false, - event_loop: event_loop, - work_queue: work_queue, - work_queues: work_queues, - stack_pool: StackPool::new(), - sched_task: None, - cleanup_job: None, - run_anything: run_anything, - friend_handle: friend, - rng: new_sched_rng(), - idle_callback: None, - yield_check_count: 0, - steal_for_yield: false, - task_state: state, - }; - - sched.yield_check_count = reset_yield_check(&mut sched.rng); - - return sched; - } - - // FIXME: This may eventually need to be refactored so that - // the scheduler itself doesn't have to call event_loop.run. - // That will be important for embedding the runtime into external - // event loops. - - // Take a main task to run, and a scheduler to run it in. Create a - // scheduler task and bootstrap into it. - pub fn bootstrap(mut self: Box) { - - // Build an Idle callback. - let cb = box SchedRunner as Box; - self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb)); - - // Create a task for the scheduler with an empty context. - let sched_task = GreenTask::new_typed(Some(Coroutine::empty()), - TypeSched); - - // Before starting our first task, make sure the idle callback - // is active. As we do not start in the sleep state this is - // important. - self.idle_callback.as_mut().unwrap().resume(); - - // Now, as far as all the scheduler state is concerned, we are inside - // the "scheduler" context. The scheduler immediately hands over control - // to the event loop, and this will only exit once the event loop no - // longer has any references (handles or I/O objects). - rtdebug!("starting scheduler {}", self.sched_id()); - let mut sched_task = self.run(sched_task); - - // Close the idle callback. - let mut sched = sched_task.sched.take().unwrap(); - sched.idle_callback.take(); - // Make one go through the loop to run the close callback. - let mut stask = sched.run(sched_task); - - // Now that we are done with the scheduler, clean up the - // scheduler task. Do so by removing it from TLS and manually - // cleaning up the memory it uses. As we didn't actually call - // task.run() on the scheduler task we never get through all - // the cleanup code it runs. - rtdebug!("stopping scheduler {}", stask.sched.as_ref().unwrap().sched_id()); - - // Should not have any messages - let message = stask.sched.as_mut().unwrap().message_queue.pop(); - rtassert!(match message { msgq::Empty => true, _ => false }); - - stask.task.take().unwrap().drop(); - } - - // This does not return a scheduler, as the scheduler is placed - // inside the task. - pub fn run(mut self: Box, stask: Box) - -> Box { - - // This is unsafe because we need to place the scheduler, with - // the event_loop inside, inside our task. But we still need a - // mutable reference to the event_loop to give it the "run" - // command. - unsafe { - let event_loop: *mut Box = &mut self.event_loop; - // Our scheduler must be in the task before the event loop - // is started. - stask.put_with_sched(self); - (*event_loop).run(); - } - - // This is a serious code smell, but this function could be done away - // with if necessary. The ownership of `stask` was transferred into - // local storage just before the event loop ran, so it is possible to - // transmute `stask` as a uint across the running of the event loop to - // re-acquire ownership here. - // - // This would involve removing the Task from TLS, removing the runtime, - // forgetting the runtime, and then putting the task into `stask`. For - // now, because we have `GreenTask::convert`, I chose to take this - // method for cleanliness. This function is *not* a fundamental reason - // why this function should exist. - GreenTask::convert(Local::take()) - } - - // * Execution Functions - Core Loop Logic - - // This function is run from the idle callback on the uv loop, indicating - // that there are no I/O events pending. When this function returns, we will - // fall back to epoll() in the uv event loop, waiting for more things to - // happen. We may come right back off epoll() if the idle callback is still - // active, in which case we're truly just polling to see if I/O events are - // complete. - // - // The model for this function is to execute as much work as possible while - // still fairly considering I/O tasks. Falling back to epoll() frequently is - // often quite expensive, so we attempt to avoid it as much as possible. If - // we have any active I/O on the event loop, then we're forced to fall back - // to epoll() in order to provide fairness, but as long as we're doing work - // and there's no active I/O, we can continue to do work. - // - // If we try really hard to do some work, but no work is available to be - // done, then we fall back to epoll() to block this thread waiting for more - // work (instead of busy waiting). - fn run_sched_once(mut self: Box, stask: Box) { - // Make sure that we're not lying in that the `stask` argument is indeed - // the scheduler task for this scheduler. - assert!(self.sched_task.is_none()); - - // Assume that we need to continue idling unless we reach the - // end of this function without performing an action. - self.idle_callback.as_mut().unwrap().resume(); - - // First we check for scheduler messages, these are higher - // priority than regular tasks. - let (mut sched, mut stask, mut did_work) = - self.interpret_message_queue(stask, DontTryTooHard); - - // After processing a message, we consider doing some more work on the - // event loop. The "keep going" condition changes after the first - // iteration because we don't want to spin here infinitely. - // - // Once we start doing work we can keep doing work so long as the - // iteration does something. Note that we don't want to starve the - // message queue here, so each iteration when we're done working we - // check the message queue regardless of whether we did work or not. - let mut keep_going = !did_work || !sched.event_loop.has_active_io(); - while keep_going { - let (a, b, c) = match sched.do_work(stask) { - (sched, task, false) => { - sched.interpret_message_queue(task, GiveItYourBest) - } - (sched, task, true) => { - let (sched, task, _) = - sched.interpret_message_queue(task, GiveItYourBest); - (sched, task, true) - } - }; - sched = a; - stask = b; - did_work = c; - - // We only keep going if we managed to do something productive and - // also don't have any active I/O. If we didn't do anything, we - // should consider going to sleep, and if we have active I/O we need - // to poll for completion. - keep_going = did_work && !sched.event_loop.has_active_io(); - } - - // If we ever did some work, then we shouldn't put our scheduler - // entirely to sleep just yet. Leave the idle callback active and fall - // back to epoll() to see what's going on. - if did_work { - return stask.put_with_sched(sched); - } - - // If we got here then there was no work to do. - // Generate a SchedHandle and push it to the sleeper list so - // somebody can wake us up later. - if !sched.sleepy && !sched.no_sleep { - rtdebug!("scheduler has no work to do, going to sleep"); - sched.sleepy = true; - let handle = sched.make_handle(); - sched.sleeper_list.push(handle); - // Since we are sleeping, deactivate the idle callback. - sched.idle_callback.as_mut().unwrap().pause(); - } else { - rtdebug!("not sleeping, already doing so or no_sleep set"); - // We may not be sleeping, but we still need to deactivate - // the idle callback. - sched.idle_callback.as_mut().unwrap().pause(); - } - - // Finished a cycle without using the Scheduler. Place it back - // in TLS. - stask.put_with_sched(sched); - } - - // This function returns None if the scheduler is "used", or it - // returns the still-available scheduler. At this point all - // message-handling will count as a turn of work, and as a result - // return None. - fn interpret_message_queue(mut self: Box, - stask: Box, - effort: EffortLevel) - -> (Box, Box, bool) { - let msg = if effort == DontTryTooHard { - self.message_queue.casual_pop() - } else { - // When popping our message queue, we could see an "inconsistent" - // state which means that we *should* be able to pop data, but we - // are unable to at this time. Our options are: - // - // 1. Spin waiting for data - // 2. Ignore this and pretend we didn't find a message - // - // If we choose route 1, then if the pusher in question is currently - // pre-empted, we're going to take up our entire time slice just - // spinning on this queue. If we choose route 2, then the pusher in - // question is still guaranteed to make a send() on its async - // handle, so we will guaranteed wake up and see its message at some - // point. - // - // I have chosen to take route #2. - match self.message_queue.pop() { - msgq::Data(t) => Some(t), - msgq::Empty | msgq::Inconsistent => None - } - }; - - match msg { - Some(PinnedTask(task)) => { - let mut task = task; - task.give_home(HomeSched(self.make_handle())); - let (sched, task) = self.resume_task_immediately(stask, task); - (sched, task, true) - } - Some(TaskFromFriend(task)) => { - rtdebug!("got a task from a friend. lovely!"); - let (sched, task) = - self.process_task(stask, task, - Scheduler::resume_task_immediately_cl); - (sched, task, true) - } - Some(RunOnce(task)) => { - // bypass the process_task logic to force running this task once - // on this home scheduler. This is often used for I/O (homing). - let (sched, task) = self.resume_task_immediately(stask, task); - (sched, task, true) - } - Some(Wake) => { - self.sleepy = false; - (self, stask, true) - } - Some(Shutdown) => { - rtdebug!("shutting down"); - if self.sleepy { - // There may be an outstanding handle on the - // sleeper list. Pop them all to make sure that's - // not the case. - loop { - match self.sleeper_list.pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake); - } - None => break - } - } - } - // No more sleeping. After there are no outstanding - // event loop references we will shut down. - self.no_sleep = true; - self.sleepy = false; - (self, stask, true) - } - Some(NewNeighbor(neighbor)) => { - self.work_queues.push(neighbor); - (self, stask, false) - } - None => (self, stask, false) - } - } - - fn do_work(mut self: Box, stask: Box) - -> (Box, Box, bool) { - rtdebug!("scheduler calling do work"); - match self.find_work() { - Some(task) => { - rtdebug!("found some work! running the task"); - let (sched, task) = - self.process_task(stask, task, - Scheduler::resume_task_immediately_cl); - (sched, task, true) - } - None => { - rtdebug!("no work was found, returning the scheduler struct"); - (self, stask, false) - } - } - } - - // Workstealing: In this iteration of the runtime each scheduler - // thread has a distinct work queue. When no work is available - // locally, make a few attempts to steal work from the queues of - // other scheduler threads. If a few steals fail we end up in the - // old "no work" path which is fine. - - // First step in the process is to find a task. This function does - // that by first checking the local queue, and if there is no work - // there, trying to steal from the remote work queues. - fn find_work(&mut self) -> Option> { - rtdebug!("scheduler looking for work"); - if !self.steal_for_yield { - match self.work_queue.pop() { - Some(task) => { - rtdebug!("found a task locally"); - return Some(task) - } - None => { - rtdebug!("scheduler trying to steal"); - return self.try_steals(); - } - } - } else { - // During execution of the last task, it performed a 'yield', - // so we're doing some work stealing in order to introduce some - // scheduling randomness. Otherwise we would just end up popping - // that same task again. This is pretty lame and is to work around - // the problem that work stealing is not designed for 'non-strict' - // (non-fork-join) task parallelism. - self.steal_for_yield = false; - match self.try_steals() { - Some(task) => { - rtdebug!("stole a task after yielding"); - return Some(task); - } - None => { - rtdebug!("did not steal a task after yielding"); - // Back to business - return self.find_work(); - } - } - } - } - - // Try stealing from all queues the scheduler knows about. This - // naive implementation can steal from our own queue or from other - // special schedulers. - fn try_steals(&mut self) -> Option> { - let work_queues = &mut self.work_queues; - let len = work_queues.len(); - let start_index = self.rng.gen_range(0, len); - for index in range(0, len).map(|i| (i + start_index) % len) { - match work_queues[index].steal() { - deque::Data(task) => { - rtdebug!("found task by stealing"); - return Some(task) - } - _ => () - } - }; - rtdebug!("giving up on stealing"); - return None; - } - - // * Task Routing Functions - Make sure tasks send up in the right - // place. - - fn process_task(mut self: Box, - cur: Box, - mut next: Box, - schedule_fn: SchedulingFn) - -> (Box, Box) { - rtdebug!("processing a task"); - - match next.take_unwrap_home() { - HomeSched(home_handle) => { - if home_handle.sched_id != self.sched_id() { - rtdebug!("sending task home"); - next.give_home(HomeSched(home_handle)); - Scheduler::send_task_home(next); - (self, cur) - } else { - rtdebug!("running task here"); - next.give_home(HomeSched(home_handle)); - schedule_fn(self, cur, next) - } - } - AnySched if self.run_anything => { - rtdebug!("running anysched task here"); - next.give_home(AnySched); - schedule_fn(self, cur, next) - } - AnySched => { - rtdebug!("sending task to friend"); - next.give_home(AnySched); - self.send_to_friend(next); - (self, cur) - } - } - } - - fn send_task_home(task: Box) { - let mut task = task; - match task.take_unwrap_home() { - HomeSched(mut home_handle) => home_handle.send(PinnedTask(task)), - AnySched => rtabort!("error: cannot send anysched task home"), - } - } - - /// Take a non-homed task we aren't allowed to run here and send - /// it to the designated friend scheduler to execute. - fn send_to_friend(&mut self, task: Box) { - rtdebug!("sending a task to friend"); - match self.friend_handle { - Some(ref mut handle) => { - handle.send(TaskFromFriend(task)); - } - None => { - rtabort!("tried to send task to a friend but scheduler has no friends"); - } - } - } - - /// Schedule a task to be executed later. - /// - /// Pushes the task onto the work stealing queue and tells the - /// event loop to run it later. Always use this instead of pushing - /// to the work queue directly. - pub fn enqueue_task(&mut self, task: Box) { - - // We push the task onto our local queue clone. - assert!(!task.is_sched()); - self.work_queue.push(task); - match self.idle_callback { - Some(ref mut idle) => idle.resume(), - None => {} // allow enqueuing before the scheduler starts - } - - // We've made work available. Notify a - // sleeping scheduler. - - match self.sleeper_list.casual_pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake) - } - None => { (/* pass */) } - }; - } - - // * Core Context Switching Functions - - // The primary function for changing contexts. In the current - // design the scheduler is just a slightly modified GreenTask, so - // all context swaps are from GreenTask to GreenTask. The only difference - // between the various cases is where the inputs come from, and - // what is done with the resulting task. That is specified by the - // cleanup function f, which takes the scheduler and the - // old task as inputs. - - pub fn change_task_context(mut self: Box, - mut current_task: Box, - mut next_task: Box, - f: |&mut Scheduler, Box|) - -> Box { - let f_opaque = ClosureConverter::from_fn(f); - - let current_task_dupe = &mut *current_task as *mut GreenTask; - - // The current task is placed inside an enum with the cleanup - // function. This enum is then placed inside the scheduler. - self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque)); - - // The scheduler is then placed inside the next task. - next_task.sched = Some(self); - - // However we still need an internal mutable pointer to the - // original task. The strategy here was "arrange memory, then - // get pointers", so we crawl back up the chain using - // transmute to eliminate borrowck errors. - unsafe { - - let sched: &mut Scheduler = - mem::transmute(&**next_task.sched.as_mut().unwrap()); - - let current_task: &mut GreenTask = match sched.cleanup_job { - Some(CleanupJob { ref mut task, .. }) => &mut **task, - None => rtabort!("no cleanup job") - }; - - let (current_task_context, next_task_context) = - Scheduler::get_contexts(current_task, &mut *next_task); - - // Done with everything - put the next task in TLS. This - // works because due to transmute the borrow checker - // believes that we have no internal pointers to - // next_task. - mem::forget(next_task); - - // The raw context swap operation. The next action taken - // will be running the cleanup job from the context of the - // next task. - Context::swap(current_task_context, next_task_context); - } - - // When the context swaps back to this task we immediately - // run the cleanup job, as expected by the previously called - // swap_contexts function. - let mut current_task: Box = unsafe { - mem::transmute(current_task_dupe) - }; - current_task.sched.as_mut().unwrap().run_cleanup_job(); - - // See the comments in switch_running_tasks_and_then for why a lock - // is acquired here. This is the resumption points and the "bounce" - // that it is referring to. - unsafe { - let _guard = current_task.nasty_deschedule_lock.lock(); - } - return current_task; - } - - // Returns a mutable reference to both contexts involved in this - // swap. This is unsafe - we are getting mutable internal - // references to keep even when we don't own the tasks. It looks - // kinda safe because we are doing transmutes before passing in - // the arguments. - pub fn get_contexts<'a>(current_task: &mut GreenTask, - next_task: &mut GreenTask) - -> (&'a mut Context, &'a mut Context) - { - let current_task_context = - &mut current_task.coroutine.as_mut().unwrap().saved_context; - let next_task_context = - &mut next_task.coroutine.as_mut().unwrap().saved_context; - unsafe { - (mem::transmute(current_task_context), - mem::transmute(next_task_context)) - } - } - - // * Context Swapping Helpers - Here be ugliness! - - pub fn resume_task_immediately(self: Box, - cur: Box, - next: Box) - -> (Box, Box) { - assert!(cur.is_sched()); - let mut cur = self.change_task_context(cur, next, |sched, stask| { - assert!(sched.sched_task.is_none()); - sched.sched_task = Some(stask); - }); - (cur.sched.take().unwrap(), cur) - } - - fn resume_task_immediately_cl(sched: Box, - cur: Box, - next: Box) - -> (Box, Box) { - sched.resume_task_immediately(cur, next) - } - - /// Block a running task, context switch to the scheduler, then pass the - /// blocked task to a closure. - /// - /// # Safety note - /// - /// The closure here is a *stack* closure that lives in the - /// running task. It gets transmuted to the scheduler's lifetime - /// and called while the task is blocked. - /// - /// This passes a Scheduler pointer to the fn after the context switch - /// in order to prevent that fn from performing further scheduling operations. - /// Doing further scheduling could easily result in infinite recursion. - /// - /// Note that if the closure provided relinquishes ownership of the - /// BlockedTask, then it is possible for the task to resume execution before - /// the closure has finished executing. This would naturally introduce a - /// race if the closure and task shared portions of the environment. - /// - /// This situation is currently prevented, or in other words it is - /// guaranteed that this function will not return before the given closure - /// has returned. - pub fn deschedule_running_task_and_then(mut self: Box, - cur: Box, - f: |&mut Scheduler, BlockedTask|) { - // Trickier - we need to get the scheduler task out of self - // and use it as the destination. - let stask = self.sched_task.take().unwrap(); - // Otherwise this is the same as below. - self.switch_running_tasks_and_then(cur, stask, f) - } - - pub fn switch_running_tasks_and_then(self: Box, - cur: Box, - next: Box, - f: |&mut Scheduler, BlockedTask|) { - // And here comes one of the sad moments in which a lock is used in a - // core portion of the rust runtime. As always, this is highly - // undesirable, so there's a good reason behind it. - // - // There is an excellent outline of the problem in issue #8132, and it's - // summarized in that `f` is executed on a sched task, but its - // environment is on the previous task. If `f` relinquishes ownership of - // the BlockedTask, then it may introduce a race where `f` is using the - // environment as well as the code after the 'deschedule' block. - // - // The solution we have chosen to adopt for now is to acquire a - // task-local lock around this block. The resumption of the task in - // context switching will bounce on the lock, thereby waiting for this - // block to finish, eliminating the race mentioned above. - // panic!("should never return!"); - // - // To actually maintain a handle to the lock, we use an unsafe pointer - // to it, but we're guaranteed that the task won't exit until we've - // unlocked the lock so there's no worry of this memory going away. - let cur = self.change_task_context(cur, next, |sched, mut task| { - let lock: *mut NativeMutex = &mut task.nasty_deschedule_lock; - unsafe { - let _guard = (*lock).lock(); - f(sched, BlockedTask::block(task.swap())); - } - }); - cur.put(); - } - - fn switch_task(sched: Box, - cur: Box, - next: Box) - -> (Box, Box) { - let mut cur = sched.change_task_context(cur, next, |sched, last_task| { - if last_task.is_sched() { - assert!(sched.sched_task.is_none()); - sched.sched_task = Some(last_task); - } else { - sched.enqueue_task(last_task); - } - }); - (cur.sched.take().unwrap(), cur) - } - - // * Task Context Helpers - - /// Called by a running task to end execution, after which it will - /// be recycled by the scheduler for reuse in a new task. - pub fn terminate_current_task(mut self: Box, - cur: Box) - -> ! { - // Similar to deschedule running task and then, but cannot go through - // the task-blocking path. The task is already dying. - let stask = self.sched_task.take().unwrap(); - let _cur = self.change_task_context(cur, stask, |sched, mut dead_task| { - let coroutine = dead_task.coroutine.take().unwrap(); - coroutine.recycle(&mut sched.stack_pool); - sched.task_state.decrement(); - }); - panic!("should never return!"); - } - - pub fn run_task(self: Box, - cur: Box, - next: Box) { - let (sched, task) = - self.process_task(cur, next, Scheduler::switch_task); - task.put_with_sched(sched); - } - - pub fn run_task_later(mut cur: Box, next: Box) { - let mut sched = cur.sched.take().unwrap(); - sched.enqueue_task(next); - cur.put_with_sched(sched); - } - - /// Yield control to the scheduler, executing another task. This is guaranteed - /// to introduce some amount of randomness to the scheduler. Currently the - /// randomness is a result of performing a round of work stealing (which - /// may end up stealing from the current scheduler). - pub fn yield_now(mut self: Box, cur: Box) { - // Async handles trigger the scheduler by calling yield_now on the local - // task, which eventually gets us to here. See comments in SchedRunner - // for more info on this. - if cur.is_sched() { - assert!(self.sched_task.is_none()); - self.run_sched_once(cur); - } else { - self.yield_check_count = reset_yield_check(&mut self.rng); - // Tell the scheduler to start stealing on the next iteration - self.steal_for_yield = true; - let stask = self.sched_task.take().unwrap(); - let cur = self.change_task_context(cur, stask, |sched, task| { - sched.enqueue_task(task); - }); - cur.put() - } - } - - pub fn maybe_yield(mut self: Box, cur: Box) { - // It's possible for sched tasks to possibly call this function, and it - // just means that they're likely sending on channels (which - // occasionally call this function). Sched tasks follow different paths - // when executing yield_now(), which may possibly trip the assertion - // below. For this reason, we just have sched tasks bail out soon. - // - // Sched tasks have no need to yield anyway because as soon as they - // return they'll yield to other threads by falling back to the event - // loop. Additionally, we completely control sched tasks, so we can make - // sure that they never execute more than enough code. - if cur.is_sched() { - return cur.put_with_sched(self) - } - - // The number of times to do the yield check before yielding, chosen - // arbitrarily. - rtassert!(self.yield_check_count > 0); - self.yield_check_count -= 1; - if self.yield_check_count == 0 { - self.yield_now(cur); - } else { - cur.put_with_sched(self); - } - } - - - // * Utility Functions - - pub fn sched_id(&self) -> uint { self as *const Scheduler as uint } - - pub fn run_cleanup_job(&mut self) { - let cleanup_job = self.cleanup_job.take().unwrap(); - cleanup_job.run(self) - } - - pub fn make_handle(&mut self) -> SchedHandle { - let remote = self.event_loop.remote_callback(box SchedRunner); - - return SchedHandle { - remote: remote, - queue: self.message_producer.clone(), - sched_id: self.sched_id() - } - } -} - -// Supporting types - -type SchedulingFn = fn(Box, Box, Box) - -> (Box, Box); - -pub enum SchedMessage { - Wake, - Shutdown, - NewNeighbor(deque::Stealer>), - PinnedTask(Box), - TaskFromFriend(Box), - RunOnce(Box), -} - -pub struct SchedHandle { - remote: Box, - queue: msgq::Producer, - pub sched_id: uint -} - -impl SchedHandle { - pub fn send(&mut self, msg: SchedMessage) { - self.queue.push(msg); - self.remote.fire(); - } -} - -struct SchedRunner; - -impl Callback for SchedRunner { - fn call(&mut self) { - // In theory, this function needs to invoke the `run_sched_once` - // function on the scheduler. Sadly, we have no context here, except for - // knowledge of the local `Task`. In order to avoid a call to - // `GreenTask::convert`, we just call `yield_now` and the scheduler will - // detect when a sched task performs a yield vs a green task performing - // a yield (and act accordingly). - // - // This function could be converted to `GreenTask::convert` if - // absolutely necessary, but for cleanliness it is much better to not - // use the conversion function. - let task: Box = Local::take(); - task.yield_now(); - } -} - -struct CleanupJob { - task: Box, - f: UnsafeTaskReceiver -} - -impl CleanupJob { - pub fn new(task: Box, f: UnsafeTaskReceiver) -> CleanupJob { - CleanupJob { - task: task, - f: f - } - } - - pub fn run(self, sched: &mut Scheduler) { - let CleanupJob { task, f } = self; - f.to_fn()(sched, task) - } -} - -// FIXME: Some hacks to put a || closure in Scheduler without borrowck -// complaining -type UnsafeTaskReceiver = raw::Closure; -trait ClosureConverter { - fn from_fn(|&mut Scheduler, Box|) -> Self; - fn to_fn(self) -> |&mut Scheduler, Box|:'static ; -} -impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: |&mut Scheduler, Box|) -> UnsafeTaskReceiver { - unsafe { mem::transmute(f) } - } - fn to_fn(self) -> |&mut Scheduler, Box|:'static { - unsafe { mem::transmute(self) } - } -} - -// On unix, we read randomness straight from /dev/urandom, but the -// default constructor of an XorShiftRng does this via io::fs, which -// relies on the scheduler existing, so we have to manually load -// randomness. Windows has its own C API for this, so we don't need to -// worry there. -#[cfg(windows)] -fn new_sched_rng() -> XorShiftRng { - use std::rand::OsRng; - match OsRng::new() { - Ok(mut r) => r.gen(), - Err(e) => { - rtabort!("sched: failed to create seeded RNG: {}", e) - } - } -} -#[cfg(unix)] -fn new_sched_rng() -> XorShiftRng { - use libc; - use std::mem; - use std::rand::SeedableRng; - - let fd = "/dev/urandom".with_c_str(|name| { - unsafe { libc::open(name, libc::O_RDONLY, 0) } - }); - if fd == -1 { - rtabort!("could not open /dev/urandom for reading.") - } - - let mut seeds = [0u32, .. 4]; - let size = mem::size_of_val(&seeds); - loop { - let nbytes = unsafe { - libc::read(fd, - seeds.as_mut_ptr() as *mut libc::c_void, - size as libc::size_t) - }; - rtassert!(nbytes as uint == size); - - if !seeds.iter().all(|x| *x == 0) { - break; - } - } - - unsafe {libc::close(fd);} - - SeedableRng::from_seed(seeds) -} - -#[cfg(test)] -mod test { - use std::rt::task::TaskOpts; - use std::rt::task::Task; - use std::rt::local::Local; - - use {TaskState, PoolConfig, SchedPool}; - use basic; - use sched::{TaskFromFriend, PinnedTask}; - use task::{GreenTask, HomeSched, AnySched}; - - fn pool() -> SchedPool { - SchedPool::new(PoolConfig { - threads: 1, - event_loop_factory: basic::event_loop, - }) - } - - fn run(f: proc():Send) { - let mut pool = pool(); - pool.spawn(TaskOpts::new(), f); - pool.shutdown(); - } - - fn sched_id() -> uint { - let mut task = Local::borrow(None::); - match task.maybe_take_runtime::() { - Some(green) => { - let ret = green.sched.as_ref().unwrap().sched_id(); - task.put_runtime(green); - return ret; - } - None => panic!() - } - } - - #[test] - fn trivial_run_in_newsched_task_test() { - let mut task_ran = false; - let task_ran_ptr: *mut bool = &mut task_ran; - run(proc() { - unsafe { *task_ran_ptr = true }; - rtdebug!("executed from the new scheduler") - }); - assert!(task_ran); - } - - #[test] - fn multiple_task_test() { - let total = 10; - let mut task_run_count = 0; - let task_run_count_ptr: *mut uint = &mut task_run_count; - // with only one thread this is safe to run in without worries of - // contention. - run(proc() { - for _ in range(0u, total) { - spawn(proc() { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1}; - }); - } - }); - assert!(task_run_count == total); - } - - #[test] - fn multiple_task_nested_test() { - let mut task_run_count = 0; - let task_run_count_ptr: *mut uint = &mut task_run_count; - run(proc() { - spawn(proc() { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - spawn(proc() { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - spawn(proc() { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - }) - }) - }) - }); - assert!(task_run_count == 3); - } - - // A very simple test that confirms that a task executing on the - // home scheduler notices that it is home. - #[test] - fn test_home_sched() { - let mut pool = pool(); - - let (dtx, drx) = channel(); - { - let (tx, rx) = channel(); - let mut handle1 = pool.spawn_sched(); - let mut handle2 = pool.spawn_sched(); - - handle1.send(TaskFromFriend(pool.task(TaskOpts::new(), proc() { - tx.send(sched_id()); - }))); - let sched1_id = rx.recv(); - - let mut task = pool.task(TaskOpts::new(), proc() { - assert_eq!(sched_id(), sched1_id); - dtx.send(()); - }); - task.give_home(HomeSched(handle1)); - handle2.send(TaskFromFriend(task)); - } - drx.recv(); - - pool.shutdown(); - } - - // An advanced test that checks all four possible states that a - // (task,sched) can be in regarding homes. - - #[test] - fn test_schedule_home_states() { - use sleeper_list::SleeperList; - use super::{Shutdown, Scheduler, SchedHandle}; - use std::rt::thread::Thread; - use std::sync::deque::BufferPool; - - Thread::start(proc() { - let sleepers = SleeperList::new(); - let pool = BufferPool::new(); - let (normal_worker, normal_stealer) = pool.deque(); - let (special_worker, special_stealer) = pool.deque(); - let queues = vec![normal_stealer, special_stealer]; - let (_p, state) = TaskState::new(); - - // Our normal scheduler - let mut normal_sched = box Scheduler::new( - 1, - basic::event_loop(), - normal_worker, - queues.clone(), - sleepers.clone(), - state.clone()); - - let normal_handle = normal_sched.make_handle(); - let friend_handle = normal_sched.make_handle(); - - // Our special scheduler - let mut special_sched = box Scheduler::new_special( - 1, - basic::event_loop(), - special_worker, - queues.clone(), - sleepers.clone(), - false, - Some(friend_handle), - state); - - let special_handle = special_sched.make_handle(); - - let t1_handle = special_sched.make_handle(); - let t4_handle = special_sched.make_handle(); - - // Four test tasks: - // 1) task is home on special - // 2) task not homed, sched doesn't care - // 3) task not homed, sched requeues - // 4) task not home, send home - - // Grab both the scheduler and the task from TLS and check if the - // task is executing on an appropriate scheduler. - fn on_appropriate_sched() -> bool { - use task::{TypeGreen, TypeSched, HomeSched}; - let task = GreenTask::convert(Local::take()); - let sched_id = task.sched.as_ref().unwrap().sched_id(); - let run_any = task.sched.as_ref().unwrap().run_anything; - let ret = match task.task_type { - TypeGreen(Some(AnySched)) => { - run_any - } - TypeGreen(Some(HomeSched(SchedHandle { - sched_id: ref id, - .. - }))) => { - *id == sched_id - } - TypeGreen(None) => { panic!("task without home"); } - TypeSched => { panic!("expected green task"); } - }; - task.put(); - ret - } - - let task1 = GreenTask::new_homed(&mut special_sched.stack_pool, - None, HomeSched(t1_handle), proc() { - rtassert!(on_appropriate_sched()); - }); - - let task2 = GreenTask::new(&mut normal_sched.stack_pool, None, proc() { - rtassert!(on_appropriate_sched()); - }); - - let task3 = GreenTask::new(&mut normal_sched.stack_pool, None, proc() { - rtassert!(on_appropriate_sched()); - }); - - let task4 = GreenTask::new_homed(&mut special_sched.stack_pool, - None, HomeSched(t4_handle), proc() { - rtassert!(on_appropriate_sched()); - }); - - // Signal from the special task that we are done. - let (tx, rx) = channel::<()>(); - - fn run(next: Box) { - let mut task = GreenTask::convert(Local::take()); - let sched = task.sched.take().unwrap(); - sched.run_task(task, next) - } - - let normal_task = GreenTask::new(&mut normal_sched.stack_pool, None, proc() { - run(task2); - run(task4); - rx.recv(); - let mut nh = normal_handle; - nh.send(Shutdown); - let mut sh = special_handle; - sh.send(Shutdown); - }); - normal_sched.enqueue_task(normal_task); - - let special_task = GreenTask::new(&mut special_sched.stack_pool, None, proc() { - run(task1); - run(task3); - tx.send(()); - }); - special_sched.enqueue_task(special_task); - - let normal_sched = normal_sched; - let normal_thread = Thread::start(proc() { normal_sched.bootstrap() }); - - let special_sched = special_sched; - let special_thread = Thread::start(proc() { special_sched.bootstrap() }); - - normal_thread.join(); - special_thread.join(); - }).join(); - } - - //#[test] - //fn test_stress_schedule_task_states() { - // if util::limit_thread_creation_due_to_osx_and_valgrind() { return; } - // let n = stress_factor() * 120; - // for _ in range(0, n as int) { - // test_schedule_home_states(); - // } - //} - - #[test] - fn wakeup_across_scheds() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - - let mut pool1 = pool(); - let mut pool2 = pool(); - - pool1.spawn(TaskOpts::new(), proc() { - let id = sched_id(); - tx1.send(()); - rx2.recv(); - assert_eq!(id, sched_id()); - }); - - pool2.spawn(TaskOpts::new(), proc() { - let id = sched_id(); - rx1.recv(); - assert_eq!(id, sched_id()); - tx2.send(()); - }); - - pool1.shutdown(); - pool2.shutdown(); - } - - // A regression test that the final message is always handled. - // Used to deadlock because Shutdown was never recvd. - #[test] - fn no_missed_messages() { - let mut pool = pool(); - - let task = pool.task(TaskOpts::new(), proc()()); - pool.spawn_sched().send(TaskFromFriend(task)); - - pool.shutdown(); - } - - #[test] - fn multithreading() { - run(proc() { - let mut rxs = vec![]; - for _ in range(0u, 10) { - let (tx, rx) = channel(); - spawn(proc() { - tx.send(()); - }); - rxs.push(rx); - } - - loop { - match rxs.pop() { - Some(rx) => rx.recv(), - None => break, - } - } - }); - } - - #[test] - fn thread_ring() { - run(proc() { - let (end_tx, end_rx) = channel(); - - let n_tasks = 10; - let token = 2000; - - let (tx1, mut rx) = channel(); - tx1.send((token, end_tx)); - let mut i = 2; - while i <= n_tasks { - let (tx, next_rx) = channel(); - let imm_i = i; - let imm_rx = rx; - spawn(proc() { - roundtrip(imm_i, n_tasks, &imm_rx, &tx); - }); - rx = next_rx; - i += 1; - } - let rx = rx; - spawn(proc() { - roundtrip(1, n_tasks, &rx, &tx1); - }); - - end_rx.recv(); - }); - - fn roundtrip(id: int, n_tasks: int, - rx: &Receiver<(int, Sender<()>)>, - tx: &Sender<(int, Sender<()>)>) { - loop { - match rx.recv() { - (1, end_tx) => { - debug!("{}\n", id); - end_tx.send(()); - return; - } - (token, end_tx) => { - debug!("thread: {} got token: {}", id, token); - tx.send((token - 1, end_tx)); - if token <= n_tasks { - return; - } - } - } - } - } - } - - #[test] - fn start_closure_dtor() { - // Regression test that the `start` task entrypoint can - // contain dtors that use task resources - run(proc() { - #[allow(dead_code)] - struct S { field: () } - - impl Drop for S { - fn drop(&mut self) { - let _foo = box 0i; - } - } - - let s = S { field: () }; - - spawn(proc() { - let _ss = &s; - }); - }); - } - - #[test] - fn dont_starve_1() { - let mut pool = SchedPool::new(PoolConfig { - threads: 2, // this must be > 1 - event_loop_factory: basic::event_loop, - }); - pool.spawn(TaskOpts::new(), proc() { - let (tx, rx) = channel(); - - // This task should not be able to starve the sender; - // The sender should get stolen to another thread. - spawn(proc() { - while rx.try_recv().is_err() { } - }); - - tx.send(()); - }); - pool.shutdown(); - } - - #[test] - fn dont_starve_2() { - run(proc() { - let (tx1, rx1) = channel(); - let (tx2, _rx2) = channel(); - - // This task should not be able to starve the other task. - // The sends should eventually yield. - spawn(proc() { - while rx1.try_recv().is_err() { - tx2.send(()); - } - }); - - tx1.send(()); - }); - } - - // Regression test for a logic bug that would cause single-threaded - // schedulers to sleep forever after yielding and stealing another task. - #[test] - fn single_threaded_yield() { - use std::task::deschedule; - run(proc() { - for _ in range(0u, 5) { deschedule(); } - }); - } - - #[test] - fn test_spawn_sched_blocking() { - use std::rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; - static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; - - // Testing that a task in one scheduler can block in foreign code - // without affecting other schedulers - for _ in range(0u, 20) { - let mut pool = pool(); - let (start_tx, start_rx) = channel(); - let (fin_tx, fin_rx) = channel(); - - let mut handle = pool.spawn_sched(); - handle.send(PinnedTask(pool.task(TaskOpts::new(), proc() { - unsafe { - let guard = LOCK.lock(); - - start_tx.send(()); - guard.wait(); // block the scheduler thread - guard.signal(); // let them know we have the lock - } - - fin_tx.send(()); - }))); - drop(handle); - - let mut handle = pool.spawn_sched(); - handle.send(PinnedTask(pool.task(TaskOpts::new(), proc() { - // Wait until the other task has its lock - start_rx.recv(); - - fn pingpong(po: &Receiver, ch: &Sender) { - let mut val = 20; - while val > 0 { - val = po.recv(); - let _ = ch.send_opt(val - 1); - } - } - - let (setup_tx, setup_rx) = channel(); - let (parent_tx, parent_rx) = channel(); - spawn(proc() { - let (child_tx, child_rx) = channel(); - setup_tx.send(child_tx); - pingpong(&child_rx, &parent_tx); - }); - - let child_tx = setup_rx.recv(); - child_tx.send(20); - pingpong(&parent_rx, &child_tx); - unsafe { - let guard = LOCK.lock(); - guard.signal(); // wakeup waiting scheduler - guard.wait(); // wait for them to grab the lock - } - }))); - drop(handle); - - fin_rx.recv(); - pool.shutdown(); - } - unsafe { LOCK.destroy(); } - } -} diff --git a/src/libgreen/simple.rs b/src/libgreen/simple.rs deleted file mode 100644 index e26a099c028..00000000000 --- a/src/libgreen/simple.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A small module implementing a simple "runtime" used for bootstrapping a rust -//! scheduler pool and then interacting with it. - -use std::any::Any; -use std::mem; -use std::rt::Runtime; -use std::rt::local::Local; -use std::rt::mutex::NativeMutex; -use std::rt::task::{Task, BlockedTask, TaskOpts}; - -struct SimpleTask { - lock: NativeMutex, - awoken: bool, -} - -impl Runtime for SimpleTask { - // Implement the simple tasks of descheduling and rescheduling, but only in - // a simple number of cases. - fn deschedule(mut self: Box, - times: uint, - mut cur_task: Box, - f: |BlockedTask| -> Result<(), BlockedTask>) { - assert!(times == 1); - - let me = &mut *self as *mut SimpleTask; - let cur_dupe = &mut *cur_task as *mut Task; - cur_task.put_runtime(self); - let task = BlockedTask::block(cur_task); - - // See libnative/task.rs for what's going on here with the `awoken` - // field and the while loop around wait() - unsafe { - let guard = (*me).lock.lock(); - (*me).awoken = false; - match f(task) { - Ok(()) => { - while !(*me).awoken { - guard.wait(); - } - } - Err(task) => { mem::forget(task.wake()); } - } - drop(guard); - cur_task = mem::transmute(cur_dupe); - } - Local::put(cur_task); - } - fn reawaken(mut self: Box, mut to_wake: Box) { - let me = &mut *self as *mut SimpleTask; - to_wake.put_runtime(self); - unsafe { - mem::forget(to_wake); - let guard = (*me).lock.lock(); - (*me).awoken = true; - guard.signal(); - } - } - - // These functions are all unimplemented and panic as a result. This is on - // purpose. A "simple task" is just that, a very simple task that can't - // really do a whole lot. The only purpose of the task is to get us off our - // feet and running. - fn yield_now(self: Box, _cur_task: Box) { panic!() } - fn maybe_yield(self: Box, _cur_task: Box) { panic!() } - fn spawn_sibling(self: Box, - _cur_task: Box, - _opts: TaskOpts, - _f: proc():Send) { - panic!() - } - - fn stack_bounds(&self) -> (uint, uint) { panic!() } - fn stack_guard(&self) -> Option { panic!() } - - fn can_block(&self) -> bool { true } - fn wrap(self: Box) -> Box { panic!() } -} - -pub fn task() -> Box { - let mut task = box Task::new(); - task.put_runtime(box SimpleTask { - lock: unsafe {NativeMutex::new()}, - awoken: false, - }); - return task; -} diff --git a/src/libgreen/sleeper_list.rs b/src/libgreen/sleeper_list.rs deleted file mode 100644 index 5df866955e6..00000000000 --- a/src/libgreen/sleeper_list.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Maintains a shared list of sleeping schedulers. Schedulers -//! use this to wake each other up. - -use std::sync::mpmc_bounded_queue::Queue; - -use sched::SchedHandle; - -pub struct SleeperList { - q: Queue, -} - -impl SleeperList { - pub fn new() -> SleeperList { - SleeperList{q: Queue::with_capacity(8*1024)} - } - - pub fn push(&mut self, value: SchedHandle) { - assert!(self.q.push(value)) - } - - pub fn pop(&mut self) -> Option { - self.q.pop() - } - - pub fn casual_pop(&mut self) -> Option { - self.q.pop() - } -} - -impl Clone for SleeperList { - fn clone(&self) -> SleeperList { - SleeperList { - q: self.q.clone() - } - } -} diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs deleted file mode 100644 index 81e6152b3d7..00000000000 --- a/src/libgreen/stack.rs +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::ptr; -use std::sync::atomic; -use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable, - MapNonStandardFlags, getenv}; -use libc; - -/// A task's stack. The name "Stack" is a vestige of segmented stacks. -pub struct Stack { - buf: Option, - min_size: uint, - valgrind_id: libc::c_uint, -} - -// Try to use MAP_STACK on platforms that support it (it's what we're doing -// anyway), but some platforms don't support it at all. For example, it appears -// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always -// panics): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html -// -// DragonFly BSD also seems to suffer from the same problem. When MAP_STACK is -// used, it returns the same `ptr` multiple times. -#[cfg(not(any(windows, target_os = "freebsd", target_os = "dragonfly")))] -static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE | - libc::MAP_ANON; -#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] -static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON; -#[cfg(windows)] -static STACK_FLAGS: libc::c_int = 0; - -impl Stack { - /// Allocate a new stack of `size`. If size = 0, this will panic. Use - /// `dummy_stack` if you want a zero-sized stack. - pub fn new(size: uint) -> Stack { - // Map in a stack. Eventually we might be able to handle stack - // allocation failure, which would fail to spawn the task. But there's - // not many sensible things to do on OOM. Panic seems fine (and is - // what the old stack allocation did). - let stack = match MemoryMap::new(size, &[MapReadable, MapWritable, - MapNonStandardFlags(STACK_FLAGS)]) { - Ok(map) => map, - Err(e) => panic!("mmap for stack of size {} failed: {}", size, e) - }; - - // Change the last page to be inaccessible. This is to provide safety; - // when an FFI function overflows it will (hopefully) hit this guard - // page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is - // guaranteed to be aligned properly. - if !protect_last_page(&stack) { - panic!("Could not memory-protect guard page. stack={}, errno={}", - stack.data(), errno()); - } - - let mut stk = Stack { - buf: Some(stack), - min_size: size, - valgrind_id: 0 - }; - - // FIXME: Using the FFI to call a C macro. Slow - stk.valgrind_id = unsafe { - rust_valgrind_stack_register(stk.start() as *const libc::uintptr_t, - stk.end() as *const libc::uintptr_t) - }; - return stk; - } - - /// Create a 0-length stack which starts (and ends) at 0. - pub unsafe fn dummy_stack() -> Stack { - Stack { - buf: None, - min_size: 0, - valgrind_id: 0 - } - } - - /// Point to the last writable byte of the stack - pub fn guard(&self) -> *const uint { - (self.start() as uint + page_size()) as *const uint - } - - /// Point to the low end of the allocated stack - pub fn start(&self) -> *const uint { - self.buf.as_ref().map(|m| m.data() as *const uint) - .unwrap_or(ptr::null()) - } - - /// Point one uint beyond the high end of the allocated stack - pub fn end(&self) -> *const uint { - self.buf.as_ref().map(|buf| unsafe { - buf.data().offset(buf.len() as int) as *const uint - }).unwrap_or(ptr::null()) - } -} - -#[cfg(unix)] -fn protect_last_page(stack: &MemoryMap) -> bool { - unsafe { - // This may seem backwards: the start of the segment is the last page? - // Yes! The stack grows from higher addresses (the end of the allocated - // block) to lower addresses (the start of the allocated block). - let last_page = stack.data() as *mut libc::c_void; - libc::mprotect(last_page, page_size() as libc::size_t, - libc::PROT_NONE) != -1 - } -} - -#[cfg(windows)] -fn protect_last_page(stack: &MemoryMap) -> bool { - unsafe { - // see above - let last_page = stack.data() as *mut libc::c_void; - let mut old_prot: libc::DWORD = 0; - libc::VirtualProtect(last_page, page_size() as libc::SIZE_T, - libc::PAGE_NOACCESS, - &mut old_prot as libc::LPDWORD) != 0 - } -} - -impl Drop for Stack { - fn drop(&mut self) { - unsafe { - // FIXME: Using the FFI to call a C macro. Slow - rust_valgrind_stack_deregister(self.valgrind_id); - } - } -} - -pub struct StackPool { - // Ideally this would be some data structure that preserved ordering on - // Stack.min_size. - stacks: Vec, -} - -impl StackPool { - pub fn new() -> StackPool { - StackPool { - stacks: vec![], - } - } - - pub fn take_stack(&mut self, min_size: uint) -> Stack { - // Ideally this would be a binary search - match self.stacks.iter().position(|s| min_size <= s.min_size) { - Some(idx) => self.stacks.swap_remove(idx).unwrap(), - None => Stack::new(min_size) - } - } - - pub fn give_stack(&mut self, stack: Stack) { - if self.stacks.len() <= max_cached_stacks() { - self.stacks.push(stack) - } - } -} - -fn max_cached_stacks() -> uint { - static AMT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT; - match AMT.load(atomic::SeqCst) { - 0 => {} - n => return n - 1, - } - let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice())); - // This default corresponds to 20M of cache per scheduler (at the - // default size). - let amt = amt.unwrap_or(10); - // 0 is our sentinel value, so ensure that we'll never see 0 after - // initialization has run - AMT.store(amt + 1, atomic::SeqCst); - return amt; -} - -extern { - fn rust_valgrind_stack_register(start: *const libc::uintptr_t, - end: *const libc::uintptr_t) -> libc::c_uint; - fn rust_valgrind_stack_deregister(id: libc::c_uint); -} - -#[cfg(test)] -mod tests { - use super::StackPool; - - #[test] - fn stack_pool_caches() { - let mut p = StackPool::new(); - let s = p.take_stack(10); - p.give_stack(s); - let s = p.take_stack(4); - assert_eq!(s.min_size, 10); - p.give_stack(s); - let s = p.take_stack(14); - assert_eq!(s.min_size, 14); - p.give_stack(s); - } - - #[test] - fn stack_pool_caches_exact() { - let mut p = StackPool::new(); - let mut s = p.take_stack(10); - s.valgrind_id = 100; - p.give_stack(s); - - let s = p.take_stack(10); - assert_eq!(s.min_size, 10); - assert_eq!(s.valgrind_id, 100); - } -} diff --git a/src/libgreen/task.rs b/src/libgreen/task.rs deleted file mode 100644 index e159c153bc3..00000000000 --- a/src/libgreen/task.rs +++ /dev/null @@ -1,602 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The Green Task implementation -//! -//! This module contains the glue to the libstd runtime necessary to integrate -//! M:N scheduling. This GreenTask structure is hidden as a trait object in all -//! rust tasks and virtual calls are made in order to interface with it. -//! -//! Each green task contains a scheduler if it is currently running, and it also -//! contains the rust task itself in order to juggle around ownership of the -//! values. - -pub use self::TaskType::*; -pub use self::Home::*; - -use std::any::Any; -use std::mem; -use std::raw; -use std::rt::Runtime; -use std::rt::local::Local; -use std::rt::mutex::NativeMutex; -use std::rt::stack; -use std::rt::task::{Task, BlockedTask, TaskOpts}; -use std::rt; - -use context::Context; -use coroutine::Coroutine; -use sched::{Scheduler, SchedHandle, RunOnce}; -use stack::StackPool; - -/// The necessary fields needed to keep track of a green task (as opposed to a -/// 1:1 task). -pub struct GreenTask { - /// Coroutine that this task is running on, otherwise known as the register - /// context and the stack that this task owns. This field is optional to - /// relinquish ownership back to a scheduler to recycle stacks at a later - /// date. - pub coroutine: Option, - - /// Optional handle back into the home sched pool of this task. This field - /// is lazily initialized. - pub handle: Option, - - /// Slot for maintaining ownership of a scheduler. If a task is running, - /// this value will be Some(sched) where the task is running on "sched". - pub sched: Option>, - - /// Temporary ownership slot of a std::rt::task::Task object. This is used - /// to squirrel that libstd task away while we're performing green task - /// operations. - pub task: Option>, - - /// Dictates whether this is a sched task or a normal green task - pub task_type: TaskType, - - /// Home pool that this task was spawned into. This field is lazily - /// initialized until when the task is initially scheduled, and is used to - /// make sure that tasks are always woken up in the correct pool of - /// schedulers. - pub pool_id: uint, - - // See the comments in the scheduler about why this is necessary - pub nasty_deschedule_lock: NativeMutex, -} - -pub enum TaskType { - TypeGreen(Option), - TypeSched, -} - -pub enum Home { - AnySched, - HomeSched(SchedHandle), -} - -/// Trampoline code for all new green tasks which are running around. This -/// function is passed through to Context::new as the initial rust landing pad -/// for all green tasks. This code is actually called after the initial context -/// switch onto a green thread. -/// -/// The first argument to this function is the `Box` pointer, and -/// the next two arguments are the user-provided procedure for running code. -/// -/// The goal for having this weird-looking function is to reduce the number of -/// allocations done on a green-task startup as much as possible. -extern fn bootstrap_green_task(task: uint, code: *mut (), env: *mut ()) -> ! { - // Acquire ownership of the `proc()` - let start: proc() = unsafe { - mem::transmute(raw::Procedure { code: code, env: env }) - }; - - // Acquire ownership of the `Box` - let mut task: Box = unsafe { mem::transmute(task) }; - - // First code after swap to this new context. Run our cleanup job - task.pool_id = { - let sched = task.sched.as_mut().unwrap(); - sched.run_cleanup_job(); - sched.task_state.increment(); - sched.pool_id - }; - - // Convert our green task to a libstd task and then execute the code - // requested. This is the "try/catch" block for this green task and - // is the wrapper for *all* code run in the task. - let mut start = Some(start); - let task = task.swap().run(|| start.take().unwrap()()).destroy(); - - // Once the function has exited, it's time to run the termination - // routine. This means we need to context switch one more time but - // clean ourselves up on the other end. Since we have no way of - // preserving a handle to the GreenTask down to this point, this - // unfortunately must call `GreenTask::convert`. In order to avoid - // this we could add a `terminate` function to the `Runtime` trait - // in libstd, but that seems less appropriate since the conversion - // method exists. - GreenTask::convert(task).terminate(); -} - -impl GreenTask { - /// Creates a new green task which is not homed to any particular scheduler - /// and will not have any contained Task structure. - pub fn new(stack_pool: &mut StackPool, - stack_size: Option, - start: proc():Send) -> Box { - GreenTask::new_homed(stack_pool, stack_size, AnySched, start) - } - - /// Creates a new task (like `new`), but specifies the home for new task. - pub fn new_homed(stack_pool: &mut StackPool, - stack_size: Option, - home: Home, - start: proc():Send) -> Box { - // Allocate ourselves a GreenTask structure - let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home))); - - // Allocate a stack for us to run on - let stack_size = stack_size.unwrap_or_else(|| rt::min_stack()); - let mut stack = stack_pool.take_stack(stack_size); - let context = Context::new(bootstrap_green_task, ops.as_uint(), start, - &mut stack); - - // Package everything up in a coroutine and return - ops.coroutine = Some(Coroutine { - current_stack_segment: stack, - saved_context: context, - }); - return ops; - } - - /// Creates a new green task with the specified coroutine and type, this is - /// useful when creating scheduler tasks. - pub fn new_typed(coroutine: Option, - task_type: TaskType) -> Box { - box GreenTask { - pool_id: 0, - coroutine: coroutine, - task_type: task_type, - sched: None, - handle: None, - nasty_deschedule_lock: unsafe { NativeMutex::new() }, - task: Some(box Task::new()), - } - } - - /// Creates a new green task with the given configuration options for the - /// contained Task object. The given stack pool is also used to allocate a - /// new stack for this task. - pub fn configure(pool: &mut StackPool, - opts: TaskOpts, - f: proc():Send) -> Box { - let TaskOpts { name, stack_size, on_exit } = opts; - - let mut green = GreenTask::new(pool, stack_size, f); - { - let task = green.task.as_mut().unwrap(); - task.name = name; - task.death.on_exit = on_exit; - } - return green; - } - - /// Just like the `maybe_take_runtime` function, this function should *not* - /// exist. Usage of this function is _strongly_ discouraged. This is an - /// absolute last resort necessary for converting a libstd task to a green - /// task. - /// - /// This function will assert that the task is indeed a green task before - /// returning (and will kill the entire process if this is wrong). - pub fn convert(mut task: Box) -> Box { - match task.maybe_take_runtime::() { - Some(mut green) => { - green.put_task(task); - green - } - None => rtabort!("not a green task any more?"), - } - } - - pub fn give_home(&mut self, new_home: Home) { - match self.task_type { - TypeGreen(ref mut home) => { *home = Some(new_home); } - TypeSched => rtabort!("type error: used SchedTask as GreenTask"), - } - } - - pub fn take_unwrap_home(&mut self) -> Home { - match self.task_type { - TypeGreen(ref mut home) => home.take().unwrap(), - TypeSched => rtabort!("type error: used SchedTask as GreenTask"), - } - } - - // New utility functions for homes. - - pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool { - match self.task_type { - TypeGreen(Some(AnySched)) => { false } - TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => { - *id == sched.sched_id() - } - TypeGreen(None) => { rtabort!("task without home"); } - TypeSched => { - // Awe yea - rtabort!("type error: expected: TypeGreen, found: TaskSched"); - } - } - } - - pub fn homed(&self) -> bool { - match self.task_type { - TypeGreen(Some(AnySched)) => { false } - TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true } - TypeGreen(None) => { - rtabort!("task without home"); - } - TypeSched => { - rtabort!("type error: expected: TypeGreen, found: TaskSched"); - } - } - } - - pub fn is_sched(&self) -> bool { - match self.task_type { - TypeGreen(..) => false, TypeSched => true, - } - } - - // Unsafe functions for transferring ownership of this GreenTask across - // context switches - - pub fn as_uint(&self) -> uint { - self as *const GreenTask as uint - } - - pub unsafe fn from_uint(val: uint) -> Box { - mem::transmute(val) - } - - // Runtime glue functions and helpers - - pub fn put_with_sched(mut self: Box, sched: Box) { - assert!(self.sched.is_none()); - self.sched = Some(sched); - self.put(); - } - - pub fn put_task(&mut self, task: Box) { - assert!(self.task.is_none()); - self.task = Some(task); - } - - pub fn swap(mut self: Box) -> Box { - let mut task = self.task.take().unwrap(); - task.put_runtime(self); - return task; - } - - pub fn put(self: Box) { - assert!(self.sched.is_some()); - Local::put(self.swap()); - } - - fn terminate(mut self: Box) -> ! { - let sched = self.sched.take().unwrap(); - sched.terminate_current_task(self) - } - - // This function is used to remotely wakeup this green task back on to its - // original pool of schedulers. In order to do so, each tasks arranges a - // SchedHandle upon descheduling to be available for sending itself back to - // the original pool. - // - // Note that there is an interesting transfer of ownership going on here. We - // must relinquish ownership of the green task, but then also send the task - // over the handle back to the original scheduler. In order to safely do - // this, we leverage the already-present "nasty descheduling lock". The - // reason for doing this is that each task will bounce on this lock after - // resuming after a context switch. By holding the lock over the enqueueing - // of the task, we're guaranteed that the SchedHandle's memory will be valid - // for this entire function. - // - // An alternative would include having incredibly cheaply cloneable handles, - // but right now a SchedHandle is something like 6 allocations, so it is - // *not* a cheap operation to clone a handle. Until the day comes that we - // need to optimize this, a lock should do just fine (it's completely - // uncontended except for when the task is rescheduled). - fn reawaken_remotely(mut self: Box) { - unsafe { - let mtx = &mut self.nasty_deschedule_lock as *mut NativeMutex; - let handle = self.handle.as_mut().unwrap() as *mut SchedHandle; - let _guard = (*mtx).lock(); - (*handle).send(RunOnce(self)); - } - } -} - -impl Runtime for GreenTask { - fn yield_now(mut self: Box, cur_task: Box) { - self.put_task(cur_task); - let sched = self.sched.take().unwrap(); - sched.yield_now(self); - } - - fn maybe_yield(mut self: Box, cur_task: Box) { - self.put_task(cur_task); - let sched = self.sched.take().unwrap(); - sched.maybe_yield(self); - } - - fn deschedule(mut self: Box, - times: uint, - cur_task: Box, - f: |BlockedTask| -> Result<(), BlockedTask>) { - self.put_task(cur_task); - let mut sched = self.sched.take().unwrap(); - - // In order for this task to be reawoken in all possible contexts, we - // may need a handle back in to the current scheduler. When we're woken - // up in anything other than the local scheduler pool, this handle is - // used to send this task back into the scheduler pool. - if self.handle.is_none() { - self.handle = Some(sched.make_handle()); - self.pool_id = sched.pool_id; - } - - // This code is pretty standard, except for the usage of - // `GreenTask::convert`. Right now if we use `reawaken` directly it will - // expect for there to be a task in local TLS, but that is not true for - // this deschedule block (because the scheduler must retain ownership of - // the task while the cleanup job is running). In order to get around - // this for now, we invoke the scheduler directly with the converted - // Task => GreenTask structure. - if times == 1 { - sched.deschedule_running_task_and_then(self, |sched, task| { - match f(task) { - Ok(()) => {} - Err(t) => { - t.wake().map(|t| { - sched.enqueue_task(GreenTask::convert(t)) - }); - } - } - }); - } else { - sched.deschedule_running_task_and_then(self, |sched, task| { - for task in task.make_selectable(times) { - match f(task) { - Ok(()) => {}, - Err(task) => { - task.wake().map(|t| { - sched.enqueue_task(GreenTask::convert(t)) - }); - break - } - } - } - }); - } - } - - fn reawaken(mut self: Box, to_wake: Box) { - self.put_task(to_wake); - assert!(self.sched.is_none()); - - // Optimistically look for a local task, but if one's not available to - // inspect (in order to see if it's in the same sched pool as we are), - // then just use our remote wakeup routine and carry on! - let mut running_task: Box = match Local::try_take() { - Some(task) => task, - None => return self.reawaken_remotely() - }; - - // Waking up a green thread is a bit of a tricky situation. We have no - // guarantee about where the current task is running. The options we - // have for where this current task is running are: - // - // 1. Our original scheduler pool - // 2. Some other scheduler pool - // 3. Something that isn't a scheduler pool - // - // In order to figure out what case we're in, this is the reason that - // the `maybe_take_runtime` function exists. Using this function we can - // dynamically check to see which of these cases is the current - // situation and then dispatch accordingly. - // - // In case 1, we just use the local scheduler to resume ourselves - // immediately (if a rescheduling is possible). - // - // In case 2 and 3, we need to remotely reawaken ourself in order to be - // transplanted back to the correct scheduler pool. - match running_task.maybe_take_runtime::() { - Some(mut running_green_task) => { - running_green_task.put_task(running_task); - let sched = running_green_task.sched.take().unwrap(); - - if sched.pool_id == self.pool_id { - sched.run_task(running_green_task, self); - } else { - self.reawaken_remotely(); - - // put that thing back where it came from! - running_green_task.put_with_sched(sched); - } - } - None => { - self.reawaken_remotely(); - Local::put(running_task); - } - } - } - - fn spawn_sibling(mut self: Box, - cur_task: Box, - opts: TaskOpts, - f: proc():Send) { - self.put_task(cur_task); - - // First, set up a bomb which when it goes off will restore the local - // task unless its disarmed. This will allow us to gracefully panic from - // inside of `configure` which allocates a new task. - struct Bomb { inner: Option> } - impl Drop for Bomb { - fn drop(&mut self) { - let _ = self.inner.take().map(|task| task.put()); - } - } - let mut bomb = Bomb { inner: Some(self) }; - - // Spawns a task into the current scheduler. We allocate the new task's - // stack from the scheduler's stack pool, and then configure it - // accordingly to `opts`. Afterwards we bootstrap it immediately by - // switching to it. - // - // Upon returning, our task is back in TLS and we're good to return. - let sibling = { - let sched = bomb.inner.as_mut().unwrap().sched.as_mut().unwrap(); - GreenTask::configure(&mut sched.stack_pool, opts, f) - }; - let mut me = bomb.inner.take().unwrap(); - let sched = me.sched.take().unwrap(); - sched.run_task(me, sibling) - } - - fn stack_bounds(&self) -> (uint, uint) { - let c = self.coroutine.as_ref() - .expect("GreenTask.stack_bounds called without a coroutine"); - - // Don't return the red zone as part of the usable stack of this task, - // it's essentially an implementation detail. - (c.current_stack_segment.start() as uint + stack::RED_ZONE, - c.current_stack_segment.end() as uint) - } - - fn stack_guard(&self) -> Option { - let c = self.coroutine.as_ref() - .expect("GreenTask.stack_guard called without a coroutine"); - - Some(c.current_stack_segment.guard() as uint) - } - - fn can_block(&self) -> bool { false } - - fn wrap(self: Box) -> Box { - self as Box - } -} - -#[cfg(test)] -mod tests { - use std::rt::local::Local; - use std::rt::task::Task; - use std::task; - use std::rt::task::TaskOpts; - - use super::super::{PoolConfig, SchedPool}; - use super::GreenTask; - - fn spawn_opts(opts: TaskOpts, f: proc():Send) { - let mut pool = SchedPool::new(PoolConfig { - threads: 1, - event_loop_factory: super::super::basic::event_loop, - }); - pool.spawn(opts, f); - pool.shutdown(); - } - - #[test] - fn smoke() { - let (tx, rx) = channel(); - spawn_opts(TaskOpts::new(), proc() { - tx.send(()); - }); - rx.recv(); - } - - #[test] - fn smoke_panic() { - let (tx, rx) = channel::(); - spawn_opts(TaskOpts::new(), proc() { - let _tx = tx; - panic!() - }); - assert_eq!(rx.recv_opt(), Err(())); - } - - #[test] - fn smoke_opts() { - let mut opts = TaskOpts::new(); - opts.name = Some("test".into_maybe_owned()); - opts.stack_size = Some(20 * 4096); - let (tx, rx) = channel(); - opts.on_exit = Some(proc(r) tx.send(r)); - spawn_opts(opts, proc() {}); - assert!(rx.recv().is_ok()); - } - - #[test] - fn smoke_opts_panic() { - let mut opts = TaskOpts::new(); - let (tx, rx) = channel(); - opts.on_exit = Some(proc(r) tx.send(r)); - spawn_opts(opts, proc() { panic!() }); - assert!(rx.recv().is_err()); - } - - #[test] - fn yield_test() { - let (tx, rx) = channel(); - spawn_opts(TaskOpts::new(), proc() { - for _ in range(0u, 10) { task::deschedule(); } - tx.send(()); - }); - rx.recv(); - } - - #[test] - fn spawn_children() { - let (tx1, rx) = channel(); - spawn_opts(TaskOpts::new(), proc() { - let (tx2, rx) = channel(); - spawn(proc() { - let (tx3, rx) = channel(); - spawn(proc() { - tx3.send(()); - }); - rx.recv(); - tx2.send(()); - }); - rx.recv(); - tx1.send(()); - }); - rx.recv(); - } - - #[test] - fn spawn_inherits() { - let (tx, rx) = channel(); - spawn_opts(TaskOpts::new(), proc() { - spawn(proc() { - let mut task: Box = Local::take(); - match task.maybe_take_runtime::() { - Some(ops) => { - task.put_runtime(ops); - } - None => panic!(), - } - Local::put(task); - tx.send(()); - }); - }); - rx.recv(); - } -} From a68ec98166bf638c6cbf4036f51036012695718d Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 14:33:51 -0800 Subject: [PATCH 04/10] Rewrite sync::mutex as thin layer over native mutexes Previously, sync::mutex had to split between green and native runtime systems and thus could not simply use the native mutex facility. This commit rewrites sync::mutex to link directly to native mutexes; in the future, the two will probably be coalesced into a single module (once librustrt is pulled into libstd wholesale). --- src/libsync/lib.rs | 1 - src/libsync/mpsc_intrusive.rs | 144 ------------- src/libsync/mutex.rs | 390 ++-------------------------------- 3 files changed, 12 insertions(+), 523 deletions(-) delete mode 100644 src/libsync/mpsc_intrusive.rs diff --git a/src/libsync/lib.rs b/src/libsync/lib.rs index ffff32f04c4..ec5b08fa754 100644 --- a/src/libsync/lib.rs +++ b/src/libsync/lib.rs @@ -54,7 +54,6 @@ pub mod atomic; // Concurrent data structures -mod mpsc_intrusive; pub mod spsc_queue; pub mod mpsc_queue; pub mod mpmc_bounded_queue; diff --git a/src/libsync/mpsc_intrusive.rs b/src/libsync/mpsc_intrusive.rs deleted file mode 100644 index 1f7841de7c1..00000000000 --- a/src/libsync/mpsc_intrusive.rs +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO - * EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, - * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -//! A mostly lock-free multi-producer, single consumer queue. -//! -//! This module implements an intrusive MPSC queue. This queue is incredibly -//! unsafe (due to use of unsafe pointers for nodes), and hence is not public. - -#![experimental] - -// http://www.1024cores.net/home/lock-free-algorithms -// /queues/intrusive-mpsc-node-based-queue - -use core::prelude::*; - -use core::atomic; -use core::mem; -use core::cell::UnsafeCell; - -// NB: all links are done as AtomicUint instead of AtomicPtr to allow for static -// initialization. - -pub struct Node { - pub next: atomic::AtomicUint, - pub data: T, -} - -pub struct DummyNode { - pub next: atomic::AtomicUint, -} - -pub struct Queue { - pub head: atomic::AtomicUint, - pub tail: UnsafeCell<*mut Node>, - pub stub: DummyNode, -} - -impl Queue { - pub fn new() -> Queue { - Queue { - head: atomic::AtomicUint::new(0), - tail: UnsafeCell::new(0 as *mut Node), - stub: DummyNode { - next: atomic::AtomicUint::new(0), - }, - } - } - - pub unsafe fn push(&self, node: *mut Node) { - (*node).next.store(0, atomic::Release); - let prev = self.head.swap(node as uint, atomic::AcqRel); - - // Note that this code is slightly modified to allow static - // initialization of these queues with rust's flavor of static - // initialization. - if prev == 0 { - self.stub.next.store(node as uint, atomic::Release); - } else { - let prev = prev as *mut Node; - (*prev).next.store(node as uint, atomic::Release); - } - } - - /// You'll note that the other MPSC queue in std::sync is non-intrusive and - /// returns a `PopResult` here to indicate when the queue is inconsistent. - /// An "inconsistent state" in the other queue means that a pusher has - /// pushed, but it hasn't finished linking the rest of the chain. - /// - /// This queue also suffers from this problem, but I currently haven't been - /// able to detangle when this actually happens. This code is translated - /// verbatim from the website above, and is more complicated than the - /// non-intrusive version. - /// - /// Right now consumers of this queue must be ready for this fact. Just - /// because `pop` returns `None` does not mean that there is not data - /// on the queue. - pub unsafe fn pop(&self) -> Option<*mut Node> { - let tail = *self.tail.get(); - let mut tail = if !tail.is_null() {tail} else { - mem::transmute(&self.stub) - }; - let mut next = (*tail).next(atomic::Relaxed); - if tail as uint == &self.stub as *const DummyNode as uint { - if next.is_null() { - return None; - } - *self.tail.get() = next; - tail = next; - next = (*next).next(atomic::Relaxed); - } - if !next.is_null() { - *self.tail.get() = next; - return Some(tail); - } - let head = self.head.load(atomic::Acquire) as *mut Node; - if tail != head { - return None; - } - let stub = mem::transmute(&self.stub); - self.push(stub); - next = (*tail).next(atomic::Relaxed); - if !next.is_null() { - *self.tail.get() = next; - return Some(tail); - } - return None - } -} - -impl Node { - pub fn new(t: T) -> Node { - Node { - data: t, - next: atomic::AtomicUint::new(0), - } - } - pub unsafe fn next(&self, ord: atomic::Ordering) -> *mut Node { - mem::transmute::>(self.next.load(ord)) - } -} diff --git a/src/libsync/mutex.rs b/src/libsync/mutex.rs index e05f3e1910b..6672126f55c 100644 --- a/src/libsync/mutex.rs +++ b/src/libsync/mutex.rs @@ -8,80 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A proper mutex implementation regardless of the "flavor of task" which is -//! acquiring the lock. +//! A simple native mutex implementation. Warning: this API is likely +//! to change soon. -// # Implementation of Rust mutexes -// -// Most answers to the question of "how do I use a mutex" are "use pthreads", -// but for Rust this isn't quite sufficient. Green threads cannot acquire an OS -// mutex because they can context switch among many OS threads, leading to -// deadlocks with other green threads. -// -// Another problem for green threads grabbing an OS mutex is that POSIX dictates -// that unlocking a mutex on a different thread from where it was locked is -// undefined behavior. Remember that green threads can migrate among OS threads, -// so this would mean that we would have to pin green threads to OS threads, -// which is less than ideal. -// -// ## Using deschedule/reawaken -// -// We already have primitives for descheduling/reawakening tasks, so they're the -// first obvious choice when implementing a mutex. The idea would be to have a -// concurrent queue that everyone is pushed on to, and then the owner of the -// mutex is the one popping from the queue. -// -// Unfortunately, this is not very performant for native tasks. The suspected -// reason for this is that each native thread is suspended on its own condition -// variable, unique from all the other threads. In this situation, the kernel -// has no idea what the scheduling semantics are of the user program, so all of -// the threads are distributed among all cores on the system. This ends up -// having very expensive wakeups of remote cores high up in the profile when -// handing off the mutex among native tasks. On the other hand, when using an OS -// mutex, the kernel knows that all native threads are contended on the same -// mutex, so they're in theory all migrated to a single core (fast context -// switching). -// -// ## Mixing implementations -// -// From that above information, we have two constraints. The first is that -// green threads can't touch os mutexes, and the second is that native tasks -// pretty much *must* touch an os mutex. -// -// As a compromise, the queueing implementation is used for green threads and -// the os mutex is used for native threads (why not have both?). This ends up -// leading to fairly decent performance for both native threads and green -// threads on various workloads (uncontended and contended). -// -// The crux of this implementation is an atomic work which is CAS'd on many -// times in order to manage a few flags about who's blocking where and whether -// it's locked or not. +#![allow(dead_code)] use core::prelude::*; use self::Flavor::*; use alloc::boxed::Box; -use core::atomic; -use core::mem; -use core::cell::UnsafeCell; -use rustrt::local::Local; use rustrt::mutex; -use rustrt::task::{BlockedTask, Task}; -use rustrt::thread::Thread; - -use mpsc_intrusive as q; pub const LOCKED: uint = 1 << 0; -pub const GREEN_BLOCKED: uint = 1 << 1; -pub const NATIVE_BLOCKED: uint = 1 << 2; +pub const BLOCKED: uint = 1 << 1; /// A mutual exclusion primitive useful for protecting shared data /// -/// This mutex is an implementation of a lock for all flavors of tasks which may -/// be grabbing. A common problem with green threads is that they cannot grab -/// locks (if they reschedule during the lock a contender could deadlock the -/// system), but this mutex does *not* suffer this problem. -/// /// This mutex will properly block tasks waiting for the lock to become /// available. The mutex can also be statically initialized or created via a /// `new` constructor. @@ -107,14 +49,6 @@ pub struct Mutex { lock: Box, } -#[deriving(PartialEq, Show)] -enum Flavor { - Unlocked, - TryLockAcquisition, - GreenAcquisition, - NativeAcquisition, -} - /// The static mutex type is provided to allow for static allocation of mutexes. /// /// Note that this is a separate type because using a Mutex correctly means that @@ -137,310 +71,35 @@ enum Flavor { /// // lock is unlocked here. /// ``` pub struct StaticMutex { - /// Current set of flags on this mutex - state: atomic::AtomicUint, - /// an OS mutex used by native threads lock: mutex::StaticNativeMutex, - - /// Type of locking operation currently on this mutex - flavor: UnsafeCell, - /// uint-cast of the green thread waiting for this mutex - green_blocker: UnsafeCell, - /// uint-cast of the native thread waiting for this mutex - native_blocker: UnsafeCell, - - /// A concurrent mpsc queue used by green threads, along with a count used - /// to figure out when to dequeue and enqueue. - q: q::Queue, - green_cnt: atomic::AtomicUint, } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. #[must_use] pub struct Guard<'a> { - lock: &'a StaticMutex, + guard: mutex::LockGuard<'a>, +} + +fn lift_guard(guard: mutex::LockGuard) -> Guard { + Guard { guard: guard } } /// Static initialization of a mutex. This constant can be used to initialize /// other mutex constants. pub const MUTEX_INIT: StaticMutex = StaticMutex { - lock: mutex::NATIVE_MUTEX_INIT, - state: atomic::INIT_ATOMIC_UINT, - flavor: UnsafeCell { value: Unlocked }, - green_blocker: UnsafeCell { value: 0 }, - native_blocker: UnsafeCell { value: 0 }, - green_cnt: atomic::INIT_ATOMIC_UINT, - q: q::Queue { - head: atomic::INIT_ATOMIC_UINT, - tail: UnsafeCell { value: 0 as *mut q::Node }, - stub: q::DummyNode { - next: atomic::INIT_ATOMIC_UINT, - } - } + lock: mutex::NATIVE_MUTEX_INIT }; impl StaticMutex { /// Attempts to grab this lock, see `Mutex::try_lock` pub fn try_lock<'a>(&'a self) -> Option> { - // Attempt to steal the mutex from an unlocked state. - // - // FIXME: this can mess up the fairness of the mutex, seems bad - match self.state.compare_and_swap(0, LOCKED, atomic::SeqCst) { - 0 => { - // After acquiring the mutex, we can safely access the inner - // fields. - let prev = unsafe { - mem::replace(&mut *self.flavor.get(), TryLockAcquisition) - }; - assert_eq!(prev, Unlocked); - Some(Guard::new(self)) - } - _ => None - } + unsafe { self.lock.trylock().map(lift_guard) } } /// Acquires this lock, see `Mutex::lock` pub fn lock<'a>(&'a self) -> Guard<'a> { - // First, attempt to steal the mutex from an unlocked state. The "fast - // path" needs to have as few atomic instructions as possible, and this - // one cmpxchg is already pretty expensive. - // - // FIXME: this can mess up the fairness of the mutex, seems bad - match self.try_lock() { - Some(guard) => return guard, - None => {} - } - - // After we've failed the fast path, then we delegate to the different - // locking protocols for green/native tasks. This will select two tasks - // to continue further (one native, one green). - let t: Box = Local::take(); - let can_block = t.can_block(); - let native_bit; - if can_block { - self.native_lock(t); - native_bit = NATIVE_BLOCKED; - } else { - self.green_lock(t); - native_bit = GREEN_BLOCKED; - } - - // After we've arbitrated among task types, attempt to re-acquire the - // lock (avoids a deschedule). This is very important to do in order to - // allow threads coming out of the native_lock function to try their - // best to not hit a cvar in deschedule. - let mut old = match self.state.compare_and_swap(0, LOCKED, - atomic::SeqCst) { - 0 => { - let flavor = if can_block { - NativeAcquisition - } else { - GreenAcquisition - }; - // We've acquired the lock, so this unsafe access to flavor is - // allowed. - unsafe { *self.flavor.get() = flavor; } - return Guard::new(self) - } - old => old, - }; - - // Alright, everything else failed. We need to deschedule ourselves and - // flag ourselves as waiting. Note that this case should only happen - // regularly in native/green contention. Due to try_lock and the header - // of lock stealing the lock, it's also possible for native/native - // contention to hit this location, but as less common. - let t: Box = Local::take(); - t.deschedule(1, |task| { - let task = unsafe { task.cast_to_uint() }; - - // These accesses are protected by the respective native/green - // mutexes which were acquired above. - let prev = if can_block { - unsafe { mem::replace(&mut *self.native_blocker.get(), task) } - } else { - unsafe { mem::replace(&mut *self.green_blocker.get(), task) } - }; - assert_eq!(prev, 0); - - loop { - assert_eq!(old & native_bit, 0); - // If the old state was locked, then we need to flag ourselves - // as blocking in the state. If the old state was unlocked, then - // we attempt to acquire the mutex. Everything here is a CAS - // loop that'll eventually make progress. - if old & LOCKED != 0 { - old = match self.state.compare_and_swap(old, - old | native_bit, - atomic::SeqCst) { - n if n == old => return Ok(()), - n => n - }; - } else { - assert_eq!(old, 0); - old = match self.state.compare_and_swap(old, - old | LOCKED, - atomic::SeqCst) { - n if n == old => { - // After acquiring the lock, we have access to the - // flavor field, and we've regained access to our - // respective native/green blocker field. - let prev = if can_block { - unsafe { - *self.native_blocker.get() = 0; - mem::replace(&mut *self.flavor.get(), - NativeAcquisition) - } - } else { - unsafe { - *self.green_blocker.get() = 0; - mem::replace(&mut *self.flavor.get(), - GreenAcquisition) - } - }; - assert_eq!(prev, Unlocked); - return Err(unsafe { - BlockedTask::cast_from_uint(task) - }) - } - n => n, - }; - } - } - }); - - Guard::new(self) - } - - // Tasks which can block are super easy. These tasks just call the blocking - // `lock()` function on an OS mutex - fn native_lock(&self, t: Box) { - Local::put(t); - unsafe { self.lock.lock_noguard(); } - } - - fn native_unlock(&self) { - unsafe { self.lock.unlock_noguard(); } - } - - fn green_lock(&self, t: Box) { - // Green threads flag their presence with an atomic counter, and if they - // fail to be the first to the mutex, they enqueue themselves on a - // concurrent internal queue with a stack-allocated node. - // - // FIXME: There isn't a cancellation currently of an enqueue, forcing - // the unlocker to spin for a bit. - if self.green_cnt.fetch_add(1, atomic::SeqCst) == 0 { - Local::put(t); - return - } - - let mut node = q::Node::new(0); - t.deschedule(1, |task| { - unsafe { - node.data = task.cast_to_uint(); - self.q.push(&mut node); - } - Ok(()) - }); - } - - fn green_unlock(&self) { - // If we're the only green thread, then no need to check the queue, - // otherwise the fixme above forces us to spin for a bit. - if self.green_cnt.fetch_sub(1, atomic::SeqCst) == 1 { return } - let node; - loop { - match unsafe { self.q.pop() } { - Some(t) => { node = t; break; } - None => Thread::yield_now(), - } - } - let task = unsafe { BlockedTask::cast_from_uint((*node).data) }; - task.wake().map(|t| t.reawaken()); - } - - fn unlock(&self) { - // Unlocking this mutex is a little tricky. We favor any task that is - // manually blocked (not in each of the separate locks) in order to help - // provide a little fairness (green threads will wake up the pending - // native thread and native threads will wake up the pending green - // thread). - // - // There's also the question of when we unlock the actual green/native - // locking halves as well. If we're waking up someone, then we can wait - // to unlock until we've acquired the task to wake up (we're guaranteed - // the mutex memory is still valid when there's contenders), but as soon - // as we don't find any contenders we must unlock the mutex, and *then* - // flag the mutex as unlocked. - // - // This flagging can fail, leading to another round of figuring out if a - // task needs to be woken, and in this case it's ok that the "mutex - // halves" are unlocked, we're just mainly dealing with the atomic state - // of the outer mutex. - let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) }; - - let mut state = self.state.load(atomic::SeqCst); - let mut unlocked = false; - let task; - loop { - assert!(state & LOCKED != 0); - if state & GREEN_BLOCKED != 0 { - self.unset(state, GREEN_BLOCKED); - task = unsafe { - *self.flavor.get() = GreenAcquisition; - let task = mem::replace(&mut *self.green_blocker.get(), 0); - BlockedTask::cast_from_uint(task) - }; - break; - } else if state & NATIVE_BLOCKED != 0 { - self.unset(state, NATIVE_BLOCKED); - task = unsafe { - *self.flavor.get() = NativeAcquisition; - let task = mem::replace(&mut *self.native_blocker.get(), 0); - BlockedTask::cast_from_uint(task) - }; - break; - } else { - assert_eq!(state, LOCKED); - if !unlocked { - match flavor { - GreenAcquisition => { self.green_unlock(); } - NativeAcquisition => { self.native_unlock(); } - TryLockAcquisition => {} - Unlocked => unreachable!(), - } - unlocked = true; - } - match self.state.compare_and_swap(LOCKED, 0, atomic::SeqCst) { - LOCKED => return, - n => { state = n; } - } - } - } - if !unlocked { - match flavor { - GreenAcquisition => { self.green_unlock(); } - NativeAcquisition => { self.native_unlock(); } - TryLockAcquisition => {} - Unlocked => unreachable!(), - } - } - - task.wake().map(|t| t.reawaken()); - } - - /// Loops around a CAS to unset the `bit` in `state` - fn unset(&self, mut state: uint, bit: uint) { - loop { - assert!(state & bit != 0); - let new = state ^ bit; - match self.state.compare_and_swap(state, new, atomic::SeqCst) { - n if n == state => break, - n => { state = n; } - } - } + lift_guard(unsafe { self.lock.lock() }) } /// Deallocates resources associated with this static mutex. @@ -463,12 +122,6 @@ impl Mutex { pub fn new() -> Mutex { Mutex { lock: box StaticMutex { - state: atomic::AtomicUint::new(0), - flavor: UnsafeCell::new(Unlocked), - green_blocker: UnsafeCell::new(0), - native_blocker: UnsafeCell::new(0), - green_cnt: atomic::AtomicUint::new(0), - q: q::Queue::new(), lock: unsafe { mutex::StaticNativeMutex::new() }, } } @@ -494,25 +147,6 @@ impl Mutex { pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() } } -impl<'a> Guard<'a> { - fn new<'b>(lock: &'b StaticMutex) -> Guard<'b> { - if cfg!(debug) { - // once we've acquired a lock, it's ok to access the flavor - assert!(unsafe { *lock.flavor.get() != Unlocked }); - assert!(lock.state.load(atomic::SeqCst) & LOCKED != 0); - } - Guard { lock: lock } - } -} - -#[unsafe_destructor] -impl<'a> Drop for Guard<'a> { - #[inline] - fn drop(&mut self) { - self.lock.unlock(); - } -} - impl Drop for Mutex { fn drop(&mut self) { // This is actually safe b/c we know that there is no further usage of From 40c78ab037c70d61eb4f8c95c7a4fec8f098644b Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 14:38:41 -0800 Subject: [PATCH 05/10] Fallout from libgreen and libnative removal --- mk/crates.mk | 5 +- src/README.md | 2 - src/doc/reference.md | 8 +- src/liballoc/lib.rs | 1 - src/libcollections/lib.rs | 1 - src/liblibc/lib.rs | 1 - src/librand/lib.rs | 1 - src/librustrt/lib.rs | 1 - src/librustrt/stack_overflow.rs | 9 +- src/libstd/io/process.rs | 2 - src/libstd/lib.rs | 1 - src/libstd/rt/mod.rs | 12 +- src/libstd/sys/common/helper_thread.rs | 2 +- src/libstd/task.rs | 126 ++---------------- src/libsync/atomic.rs | 9 +- src/libsync/comm/mod.rs | 55 +------- src/libsync/comm/shared.rs | 11 -- src/libsync/lib.rs | 1 - src/test/bench/rt-spawn-rate.rs | 41 ------ src/test/bench/silly-test-spawn.rs | 25 ---- src/test/pretty/issue-4264.pp | 1 - src/test/run-fail/native-panic.rs | 21 --- .../bootstrap-from-c-with-native/lib.rs | 4 +- src/test/run-pass/backtrace.rs | 7 - src/test/run-pass/capturing-logging.rs | 8 -- src/test/run-pass/issue-12699.rs | 8 -- src/test/run-pass/issue-8860.rs | 25 ++-- src/test/run-pass/running-with-no-runtime.rs | 8 +- 28 files changed, 47 insertions(+), 349 deletions(-) delete mode 100644 src/test/bench/rt-spawn-rate.rs delete mode 100644 src/test/bench/silly-test-spawn.rs delete mode 100644 src/test/run-fail/native-panic.rs diff --git a/mk/crates.mk b/mk/crates.mk index 2523575b078..012b43a2b00 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -37,7 +37,7 @@ # # DEPS_ # These lists are the dependencies of the that is to be built. -# Rust dependencies are listed bare (i.e. std, green) and native +# Rust dependencies are listed bare (i.e. std) and native # dependencies have a "native:" prefix (i.e. native:hoedown). All deps # will be built before the crate itself is built. # @@ -49,7 +49,7 @@ # automatically generated for all stage/host/target combinations. ################################################################################ -TARGET_CRATES := libc std green flate arena term \ +TARGET_CRATES := libc std flate arena term \ serialize sync getopts collections test time rand \ log regex graphviz core rbml alloc rustrt \ unicode @@ -66,7 +66,6 @@ DEPS_rustrt := alloc core libc collections native:rustrt_native DEPS_std := core libc rand alloc collections rustrt sync unicode \ native:rust_builtin native:backtrace DEPS_graphviz := std -DEPS_green := std native:context_switch DEPS_syntax := std term serialize log fmt_macros arena libc DEPS_rustc_trans := rustc rustc_back rustc_llvm libc DEPS_rustc := syntax flate arena serialize getopts rbml \ diff --git a/src/README.md b/src/README.md index 1bfa2641b4a..c72fd14ec5b 100644 --- a/src/README.md +++ b/src/README.md @@ -9,8 +9,6 @@ Source layout: | `libcore/` | The Rust core library | | `libdebug/` | Debugging utilities | | `libstd/` | The standard library (imported and linked by default) | -| `libgreen/` | The M:N runtime library | -| `libnative/` | The 1:1 runtime library | | `libsyntax/` | The Rust parser and pretty-printer | | `libtest/` | Rust's test-runner code | | ------------------- | --------------------------------------------------------- | diff --git a/src/doc/reference.md b/src/doc/reference.md index 4f0c9a50422..f732e06062e 100644 --- a/src/doc/reference.md +++ b/src/doc/reference.md @@ -999,14 +999,14 @@ An example of what will and will not work for `use` items: ``` # #![allow(unused_imports)] -use foo::native::start; // good: foo is at the root of the crate +use foo::core::iter; // good: foo is at the root of the crate use foo::baz::foobaz; // good: foo is at the root of the crate mod foo { - extern crate native; + extern crate core; - use foo::native::start; // good: foo is at crate root -// use native::start; // bad: native is not at the crate root + use foo::core::iter; // good: foo is at crate root +// use core::iter; // bad: native is not at the crate root use self::baz::foobaz; // good: self refers to module 'foo' use foo::bar::foobar; // good: foo is at crate root diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 2ff151c1363..173ca008d03 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -73,7 +73,6 @@ extern crate libc; // Allow testing this library -#[cfg(test)] extern crate native; #[cfg(test)] #[phase(plugin, link)] extern crate std; #[cfg(test)] #[phase(plugin, link)] extern crate log; diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 5ec8a85fb0f..7965ac26a62 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -31,7 +31,6 @@ extern crate unicode; extern crate alloc; -#[cfg(test)] extern crate native; #[cfg(test)] extern crate test; #[cfg(test)] #[phase(plugin, link)] extern crate std; diff --git a/src/liblibc/lib.rs b/src/liblibc/lib.rs index 1a86ef2c6e0..10610b70584 100644 --- a/src/liblibc/lib.rs +++ b/src/liblibc/lib.rs @@ -83,7 +83,6 @@ extern crate core; #[cfg(test)] extern crate std; #[cfg(test)] extern crate test; -#[cfg(test)] extern crate native; pub use self::Nullable::*; diff --git a/src/librand/lib.rs b/src/librand/lib.rs index b7b5b09cfe4..1ff66d0653f 100644 --- a/src/librand/lib.rs +++ b/src/librand/lib.rs @@ -33,7 +33,6 @@ extern crate core; #[cfg(test)] #[phase(plugin, link)] extern crate std; #[cfg(test)] #[phase(plugin, link)] extern crate log; -#[cfg(test)] extern crate native; use core::prelude::*; diff --git a/src/librustrt/lib.rs b/src/librustrt/lib.rs index 387b430b8f8..65e6bdb70f8 100644 --- a/src/librustrt/lib.rs +++ b/src/librustrt/lib.rs @@ -30,7 +30,6 @@ extern crate collections; #[cfg(test)] extern crate "rustrt" as realrustrt; #[cfg(test)] extern crate test; -#[cfg(test)] extern crate native; #[cfg(test)] #[phase(plugin, link)] extern crate std; diff --git a/src/librustrt/stack_overflow.rs b/src/librustrt/stack_overflow.rs index 10a3efbca10..19348449680 100644 --- a/src/librustrt/stack_overflow.rs +++ b/src/librustrt/stack_overflow.rs @@ -65,14 +65,7 @@ pub unsafe fn report() { #[cfg(any(windows, target_os = "linux", target_os = "macos"))] unsafe fn get_task_guard_page() -> Option { let task: Option<*mut Task> = Local::try_unsafe_borrow(); - - task.map(|task| { - let runtime = (*task).take_runtime(); - let guard = runtime.stack_guard(); - (*task).put_runtime(runtime); - - guard.unwrap_or(0) - }) + task.map(|task| (&*task).stack_guard().unwrap_or(0)) } #[cfg(windows)] diff --git a/src/libstd/io/process.rs b/src/libstd/io/process.rs index 592ec0681a9..d4d24c1e12f 100644 --- a/src/libstd/io/process.rs +++ b/src/libstd/io/process.rs @@ -740,8 +740,6 @@ impl Drop for Process { mod tests { #![allow(unused_imports)] - extern crate native; - use super::*; use prelude::*; use io::timer::*; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 70b30997e18..c27faea74bb 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -117,7 +117,6 @@ #![reexport_test_harness_main = "test_main"] -#[cfg(test)] extern crate green; #[cfg(test)] #[phase(plugin, link)] extern crate log; extern crate alloc; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 322df17f4f1..b6e57186afe 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -58,7 +58,7 @@ Several modules in `core` are clients of `rt`: use failure; use rustrt; -use startup; +use os; // Reexport some of our utilities which are expected by other crates. pub use self::util::{default_sched_threads, min_stack, running_on_valgrind}; @@ -66,9 +66,9 @@ pub use self::util::{default_sched_threads, min_stack, running_on_valgrind}; // Reexport functionality from librustrt and other crates underneath the // standard library which work together to create the entire runtime. pub use alloc::heap; -pub use rustrt::{task, local, mutex, exclusive, stack, args, rtio, thread}; +pub use rustrt::{task, local, mutex, exclusive, stack, args, thread}; pub use rustrt::{Stdio, Stdout, Stderr, begin_unwind, begin_unwind_fmt}; -pub use rustrt::{bookkeeping, at_exit, unwind, DEFAULT_ERROR_CODE, Runtime}; +pub use rustrt::{at_exit, unwind, DEFAULT_ERROR_CODE}; // Simple backtrace functionality (to print on panic) pub mod backtrace; @@ -95,7 +95,7 @@ static OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20); #[cfg(not(test))] #[lang = "start"] fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int { - use std::mem; + use mem; start(argc, argv, proc() { let main: extern "Rust" fn() = unsafe { mem::transmute(main) }; main(); @@ -147,8 +147,8 @@ pub fn start(argc: int, argv: *const *const u8, main: proc()) -> int { init(argc, argv); let mut exit_code = None; let mut main = Some(main); - let mut task = task::new((my_stack_bottom, my_stack_top), - rt::thread::main_guard_page()); + let mut task = Task::new(Some((my_stack_bottom, my_stack_top)), + Some(rt::thread::main_guard_page())); task.name = Some(str::Slice("
")); drop(task.run(|| { unsafe { diff --git a/src/libstd/sys/common/helper_thread.rs b/src/libstd/sys/common/helper_thread.rs index 87907fde277..d7c286bf0b9 100644 --- a/src/libstd/sys/common/helper_thread.rs +++ b/src/libstd/sys/common/helper_thread.rs @@ -21,7 +21,7 @@ //! time. use mem; -use rt::bookkeeping; +use rustrt::bookkeeping; use rt::mutex::StaticNativeMutex; use rt; use cell::UnsafeCell; diff --git a/src/libstd/task.rs b/src/libstd/task.rs index c7e31dae3d4..8da32ba4b89 100644 --- a/src/libstd/task.rs +++ b/src/libstd/task.rs @@ -11,11 +11,7 @@ //! Task creation //! //! An executing Rust program consists of a collection of tasks, each -//! with their own stack and local state. A Rust task is typically -//! backed by an operating system thread, making tasks 'just threads', -//! but may also be implemented via other strategies as well -//! (e.g. Rust comes with the [`green`](../../green/index.html) -//! scheduling crate for creating tasks backed by green threads). +//! with their own stack and local state. //! //! Tasks generally have their memory *isolated* from each other by //! virtue of Rust's owned types (which of course may only be owned by @@ -36,13 +32,6 @@ //! the main task panics the application will exit with a non-zero //! exit code. //! -//! # Basic task scheduling -//! -//! By default, every task is created with the same "flavor" as the calling task. -//! This flavor refers to the scheduling mode, with two possibilities currently -//! being 1:1 and M:N modes. Green (M:N) tasks are cooperatively scheduled and -//! native (1:1) tasks are scheduled by the OS kernel. -//! //! ## Example //! //! ```rust @@ -50,46 +39,6 @@ //! println!("Hello, World!"); //! }) //! ``` -//! -//! # Advanced task scheduling -//! -//! Task spawning can also be configured to use a particular scheduler, to -//! redirect the new task's output, or to yield a `future` representing the -//! task's final result. The configuration is established using the -//! `TaskBuilder` API: -//! -//! ## Example -//! -//! ```rust -//! extern crate green; -//! extern crate native; -//! -//! use std::task::TaskBuilder; -//! use green::{SchedPool, PoolConfig, GreenTaskBuilder}; -//! use native::NativeTaskBuilder; -//! -//! # fn main() { -//! // Create a green scheduler pool with the default configuration -//! let mut pool = SchedPool::new(PoolConfig::new()); -//! -//! // Spawn a task in the green pool -//! let mut fut_green = TaskBuilder::new().green(&mut pool).try_future(proc() { -//! /* ... */ -//! }); -//! -//! // Spawn a native task -//! let mut fut_native = TaskBuilder::new().native().try_future(proc() { -//! /* ... */ -//! }); -//! -//! // Wait for both tasks to finish, recording their outcome -//! let res_green = fut_green.unwrap(); -//! let res_native = fut_native.unwrap(); -//! -//! // Shut down the green scheduler pool -//! pool.shutdown(); -//! # } -//! ``` #![unstable = "The task spawning model will be changed as part of runtime reform, and the module \ will likely be renamed from `task` to `thread`."] @@ -108,26 +57,6 @@ use str::{Str, SendStr, IntoMaybeOwned}; use string::{String, ToString}; use sync::Future; -/// A means of spawning a task -pub trait Spawner { - /// Spawn a task, given low-level task options. - fn spawn(self, opts: task::TaskOpts, f: proc():Send); -} - -/// The default task spawner, which spawns siblings to the current task. -pub struct SiblingSpawner; - -impl Spawner for SiblingSpawner { - fn spawn(self, opts: task::TaskOpts, f: proc():Send) { - // bind tb to provide type annotation - let tb: Option> = Local::try_take(); - match tb { - Some(t) => t.spawn_sibling(opts, f), - None => panic!("need a local task to spawn a sibling task"), - }; - } -} - /// The task builder type. /// /// Provides detailed control over the properties and behavior of new tasks. @@ -139,7 +68,7 @@ impl Spawner for SiblingSpawner { // when you try to reuse the builder to spawn a new task. We'll just // sidestep that whole issue by making builders uncopyable and making // the run function move them in. -pub struct TaskBuilder { +pub struct TaskBuilder { // A name for the task-to-be, for identification in panic messages name: Option, // The size of the stack for the spawned task @@ -148,88 +77,60 @@ pub struct TaskBuilder { stdout: Option>, // Task-local stderr stderr: Option>, - // The mechanics of actually spawning the task (i.e.: green or native) - spawner: S, // Optionally wrap the eventual task body gen_body: Option proc():Send>, nocopy: marker::NoCopy, } -impl TaskBuilder { +impl TaskBuilder { /// Generate the base configuration for spawning a task, off of which more /// configuration methods can be chained. - pub fn new() -> TaskBuilder { + pub fn new() -> TaskBuilder { TaskBuilder { name: None, stack_size: None, stdout: None, stderr: None, - spawner: SiblingSpawner, gen_body: None, nocopy: marker::NoCopy, } } } -impl TaskBuilder { +impl TaskBuilder { /// Name the task-to-be. Currently the name is used for identification /// only in panic messages. #[unstable = "IntoMaybeOwned will probably change."] - pub fn named>(mut self, name: T) -> TaskBuilder { + pub fn named>(mut self, name: T) -> TaskBuilder { self.name = Some(name.into_maybe_owned()); self } /// Set the size of the stack for the new task. - pub fn stack_size(mut self, size: uint) -> TaskBuilder { + pub fn stack_size(mut self, size: uint) -> TaskBuilder { self.stack_size = Some(size); self } /// Redirect task-local stdout. #[experimental = "May not want to make stdio overridable here."] - pub fn stdout(mut self, stdout: Box) -> TaskBuilder { + pub fn stdout(mut self, stdout: Box) -> TaskBuilder { self.stdout = Some(stdout); self } /// Redirect task-local stderr. #[experimental = "May not want to make stdio overridable here."] - pub fn stderr(mut self, stderr: Box) -> TaskBuilder { + pub fn stderr(mut self, stderr: Box) -> TaskBuilder { self.stderr = Some(stderr); self } - /// Set the spawning mechanism for the task. - /// - /// The `TaskBuilder` API configures a task to be spawned, but defers to the - /// "spawner" to actually create and spawn the task. The `spawner` method - /// should not be called directly by `TaskBuiler` clients. It is intended - /// for use by downstream crates (like `native` and `green`) that implement - /// tasks. These downstream crates then add extension methods to the - /// builder, like `.native()` and `.green(pool)`, that actually set the - /// spawner. - pub fn spawner(self, spawner: T) -> TaskBuilder { - // repackage the entire TaskBuilder since its type is changing. - let TaskBuilder { - name, stack_size, stdout, stderr, spawner: _, gen_body, nocopy - } = self; - TaskBuilder { - name: name, - stack_size: stack_size, - stdout: stdout, - stderr: stderr, - spawner: spawner, - gen_body: gen_body, - nocopy: nocopy, - } - } - // Where spawning actually happens (whether yielding a future or not) fn spawn_internal(self, f: proc():Send, on_exit: Option>):Send>) { let TaskBuilder { - name, stack_size, stdout, stderr, spawner, mut gen_body, nocopy: _ + name, stack_size, stdout, stderr, mut gen_body, nocopy: _ } = self; let f = match gen_body.take() { Some(gen) => gen(f), @@ -348,11 +249,8 @@ pub fn name() -> Option { /// Yield control to the task scheduler. #[unstable = "Name will change."] pub fn deschedule() { - use rt::local::Local; - - // FIXME(#7544): Optimize this, since we know we won't block. - let task: Box = Local::take(); - task.yield_now(); + use rt::task::Task; + Task::yield_now(); } /// True if the running task is currently panicking (e.g. will return `true` inside a diff --git a/src/libsync/atomic.rs b/src/libsync/atomic.rs index e853e44d6f9..b4b2ef5218c 100644 --- a/src/libsync/atomic.rs +++ b/src/libsync/atomic.rs @@ -42,7 +42,6 @@ //! ``` //! use std::sync::Arc; //! use std::sync::atomic::{AtomicUint, SeqCst}; -//! use std::task::deschedule; //! //! fn main() { //! let spinlock = Arc::new(AtomicUint::new(1)); @@ -53,13 +52,7 @@ //! }); //! //! // Wait for the other task to release the lock -//! while spinlock.load(SeqCst) != 0 { -//! // Since tasks may not be preemptive (if they are green threads) -//! // yield to the scheduler to let the other task run. Low level -//! // concurrent code needs to take into account Rust's two threading -//! // models. -//! deschedule(); -//! } +//! while spinlock.load(SeqCst) != 0 {} //! } //! ``` //! diff --git a/src/libsync/comm/mod.rs b/src/libsync/comm/mod.rs index 2a9a19a7fa6..02fdc69448e 100644 --- a/src/libsync/comm/mod.rs +++ b/src/libsync/comm/mod.rs @@ -65,10 +65,6 @@ //! the `try_send` method on a `SyncSender`, but no other operations are //! guaranteed to be safe. //! -//! Additionally, channels can interoperate between runtimes. If one task in a -//! program is running on libnative and another is running on libgreen, they can -//! still communicate with one another using channels. -//! //! # Example //! //! Simple usage: @@ -328,13 +324,10 @@ pub use self::TrySendError::*; use self::Flavor::*; use alloc::arc::Arc; -use alloc::boxed::Box; -use core::cell::Cell; use core::kinds::marker; use core::mem; use core::cell::UnsafeCell; -use rustrt::local::Local; -use rustrt::task::{Task, BlockedTask}; +use rustrt::task::BlockedTask; pub use comm::select::{Select, Handle}; @@ -345,21 +338,12 @@ macro_rules! test ( use std::prelude::*; - use native; use comm::*; use super::*; use super::super::*; use std::task; - fn f() $b - - $(#[$a])* #[test] fn uv() { f() } - $(#[$a])* #[test] fn native() { - use native; - let (tx, rx) = channel(); - spawn(proc() { tx.send(f()) }); - rx.recv(); - } + $(#[$a])* #[test] fn f() { $b } } ) ) @@ -370,16 +354,11 @@ mod shared; mod stream; mod sync; -// Use a power of 2 to allow LLVM to optimize to something that's not a -// division, this is hit pretty regularly. -static RESCHED_FREQ: int = 256; - /// The receiving-half of Rust's channel type. This half can only be owned by /// one task #[unstable] pub struct Receiver { inner: UnsafeCell>, - receives: Cell, // can't share in an arc _marker: marker::NoSync, } @@ -397,7 +376,6 @@ pub struct Messages<'a, T:'a> { #[unstable] pub struct Sender { inner: UnsafeCell>, - sends: Cell, // can't share in an arc _marker: marker::NoSync, } @@ -544,7 +522,6 @@ impl Sender { fn new(inner: Flavor) -> Sender { Sender { inner: UnsafeCell::new(inner), - sends: Cell::new(0), _marker: marker::NoSync, } } @@ -608,21 +585,6 @@ impl Sender { /// ``` #[unstable = "this function may be renamed to send() in the future"] pub fn send_opt(&self, t: T) -> Result<(), T> { - // In order to prevent starvation of other tasks in situations where - // a task sends repeatedly without ever receiving, we occasionally - // yield instead of doing a send immediately. - // - // Don't unconditionally attempt to yield because the TLS overhead can - // be a bit much, and also use `try_take` instead of `take` because - // there's no reason that this send shouldn't be usable off the - // runtime. - let cnt = self.sends.get() + 1; - self.sends.set(cnt); - if cnt % (RESCHED_FREQ as uint) == 0 { - let task: Option> = Local::try_take(); - task.map(|t| t.maybe_yield()); - } - let (new_inner, ret) = match *unsafe { self.inner() } { Oneshot(ref p) => { unsafe { @@ -809,7 +771,7 @@ impl Drop for SyncSender { impl Receiver { fn new(inner: Flavor) -> Receiver { - Receiver { inner: UnsafeCell::new(inner), receives: Cell::new(0), _marker: marker::NoSync } + Receiver { inner: UnsafeCell::new(inner), _marker: marker::NoSync } } /// Blocks waiting for a value on this receiver @@ -854,17 +816,6 @@ impl Receiver { /// This function cannot panic. #[unstable = "the return type of this function may be altered"] pub fn try_recv(&self) -> Result { - // If a thread is spinning in try_recv, we should take the opportunity - // to reschedule things occasionally. See notes above in scheduling on - // sends for why this doesn't always hit TLS, and also for why this uses - // `try_take` instead of `take`. - let cnt = self.receives.get() + 1; - self.receives.set(cnt); - if cnt % (RESCHED_FREQ as uint) == 0 { - let task: Option> = Local::try_take(); - task.map(|t| t.maybe_yield()); - } - loop { let new_port = match *unsafe { self.inner() } { Oneshot(ref p) => { diff --git a/src/libsync/comm/shared.rs b/src/libsync/comm/shared.rs index 5ca89ea3666..96c0acacd80 100644 --- a/src/libsync/comm/shared.rs +++ b/src/libsync/comm/shared.rs @@ -279,17 +279,6 @@ impl Packet { // because the remote sender should finish their enqueue // operation "very quickly". // - // Note that this yield loop does *not* attempt to do a green - // yield (regardless of the context), but *always* performs an - // OS-thread yield. The reasoning for this is that the pusher in - // question which is causing the inconsistent state is - // guaranteed to *not* be a blocked task (green tasks can't get - // pre-empted), so it must be on a different OS thread. Also, - // `try_recv` is normally a "guaranteed no rescheduling" context - // in a green-thread situation. By yielding control of the - // thread, we will hopefully allow time for the remote task on - // the other OS thread to make progress. - // // Avoiding this yield loop would require a different queue // abstraction which provides the guarantee that after M // pushes have succeeded, at least M pops will succeed. The diff --git a/src/libsync/lib.rs b/src/libsync/lib.rs index ec5b08fa754..9d6f6513a65 100644 --- a/src/libsync/lib.rs +++ b/src/libsync/lib.rs @@ -38,7 +38,6 @@ extern crate collections; extern crate rustrt; #[cfg(test)] extern crate test; -#[cfg(test)] extern crate native; #[cfg(test)] #[phase(plugin, link)] extern crate std; pub use alloc::arc::{Arc, Weak}; diff --git a/src/test/bench/rt-spawn-rate.rs b/src/test/bench/rt-spawn-rate.rs deleted file mode 100644 index 6f02bff9f31..00000000000 --- a/src/test/bench/rt-spawn-rate.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![no_start] - -extern crate green; - -use std::task::spawn; -use std::os; -use std::uint; - -// Very simple spawn rate test. Spawn N tasks that do nothing and -// return. - -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - green::start(argc, argv, green::basic::event_loop, main) -} - -fn main() { - - let args = os::args(); - let args = args.as_slice(); - let n = if args.len() == 2 { - from_str::(args[1].as_slice()).unwrap() - } else { - 100000 - }; - - for _ in range(0, n) { - spawn(proc() {}); - } - -} diff --git a/src/test/bench/silly-test-spawn.rs b/src/test/bench/silly-test-spawn.rs deleted file mode 100644 index bc2723f6d74..00000000000 --- a/src/test/bench/silly-test-spawn.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This is (hopefully) a quick test to get a good idea about spawning -// performance in libgreen. - -extern crate green; - -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - green::start(argc, argv, green::basic::event_loop, main) -} - -fn main() { - for _ in range(1u32, 100_000) { - spawn(proc() {}) - } -} diff --git a/src/test/pretty/issue-4264.pp b/src/test/pretty/issue-4264.pp index f3c749da95f..94a168a74eb 100644 --- a/src/test/pretty/issue-4264.pp +++ b/src/test/pretty/issue-4264.pp @@ -3,7 +3,6 @@ #![feature(globs)] #[phase(plugin, link)] extern crate "std" as std; -extern crate "native" as rt; #[prelude_import] use std::prelude::*; // Copyright 2014 The Rust Project Developers. See the COPYRIGHT diff --git a/src/test/run-fail/native-panic.rs b/src/test/run-fail/native-panic.rs deleted file mode 100644 index 0b261676cb2..00000000000 --- a/src/test/run-fail/native-panic.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// ignore-android (FIXME #11419) -// error-pattern:explicit panic - -extern crate native; - -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - native::start(argc, argv, proc() { - panic!(); - }) -} diff --git a/src/test/run-make/bootstrap-from-c-with-native/lib.rs b/src/test/run-make/bootstrap-from-c-with-native/lib.rs index 99dd473344a..34d9cc48ffe 100644 --- a/src/test/run-make/bootstrap-from-c-with-native/lib.rs +++ b/src/test/run-make/bootstrap-from-c-with-native/lib.rs @@ -11,11 +11,11 @@ #![crate_name="boot"] #![crate_type="dylib"] -extern crate native; +use std::rt; #[no_mangle] // this needs to get called from C pub extern "C" fn foo(argc: int, argv: *const *const u8) -> int { - native::start(argc, argv, proc() { + rt::start(argc, argv, proc() { spawn(proc() { println!("hello"); }); diff --git a/src/test/run-pass/backtrace.rs b/src/test/run-pass/backtrace.rs index 7e7399c403a..a5e65e49d38 100644 --- a/src/test/run-pass/backtrace.rs +++ b/src/test/run-pass/backtrace.rs @@ -10,18 +10,11 @@ // no-pretty-expanded FIXME #15189 // ignore-windows FIXME #13259 -extern crate native; - use std::os; use std::io::process::Command; use std::finally::Finally; use std::str; -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - native::start(argc, argv, main) -} - #[inline(never)] fn foo() { let _v = vec![1i, 2, 3]; diff --git a/src/test/run-pass/capturing-logging.rs b/src/test/run-pass/capturing-logging.rs index 33ee2ffd359..a6744585e47 100644 --- a/src/test/run-pass/capturing-logging.rs +++ b/src/test/run-pass/capturing-logging.rs @@ -15,7 +15,6 @@ #[phase(plugin, link)] extern crate log; -extern crate native; use log::{set_logger, Logger, LogRecord}; use std::fmt; @@ -30,13 +29,6 @@ impl Logger for MyWriter { } } -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - native::start(argc, argv, proc() { - main(); - }) -} - fn main() { let (tx, rx) = channel(); let (mut r, w) = (ChanReader::new(rx), ChanWriter::new(tx)); diff --git a/src/test/run-pass/issue-12699.rs b/src/test/run-pass/issue-12699.rs index 6b6e770bc99..2dc25181606 100644 --- a/src/test/run-pass/issue-12699.rs +++ b/src/test/run-pass/issue-12699.rs @@ -8,17 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -extern crate native; - use std::io::timer; use std::time::Duration; -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - native::start(argc, argv, main) -} - fn main() { timer::sleep(Duration::milliseconds(250)); } diff --git a/src/test/run-pass/issue-8860.rs b/src/test/run-pass/issue-8860.rs index d775f23bab4..35f713c4c2c 100644 --- a/src/test/run-pass/issue-8860.rs +++ b/src/test/run-pass/issue-8860.rs @@ -8,24 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -extern crate green; - static mut DROP: int = 0i; static mut DROP_S: int = 0i; static mut DROP_T: int = 0i; -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - let ret = green::start(argc, argv, green::basic::event_loop, main); - unsafe { - assert_eq!(2, DROP); - assert_eq!(1, DROP_S); - assert_eq!(1, DROP_T); - } - ret -} - struct S; impl Drop for S { fn drop(&mut self) { @@ -48,7 +34,7 @@ impl Drop for T { } fn g(ref _t: T) {} -fn main() { +fn do_test() { let s = S; f(s); unsafe { @@ -59,3 +45,12 @@ fn main() { g(t); unsafe { assert_eq!(1, DROP_T); } } + +fn main() { + do_test(); + unsafe { + assert_eq!(2, DROP); + assert_eq!(1, DROP_S); + assert_eq!(1, DROP_T); + } +} diff --git a/src/test/run-pass/running-with-no-runtime.rs b/src/test/run-pass/running-with-no-runtime.rs index 942542a6bcd..ed4c20c8094 100644 --- a/src/test/run-pass/running-with-no-runtime.rs +++ b/src/test/run-pass/running-with-no-runtime.rs @@ -8,12 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -extern crate native; +extern crate rustrt; use std::io::process::{Command, ProcessOutput}; use std::os; use std::str; -use std::rt::unwind::try; +use std::rt; + +use rustrt::unwind::try; local_data_key!(foo: int) @@ -36,7 +38,7 @@ fn start(argc: int, argv: *const *const u8) -> int { return 0 } - native::start(argc, argv, main) + rt::start(argc, argv, main) } fn main() { From 6987ad22e46f55b12d8749be7522f4578d227c62 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 16:30:16 -0800 Subject: [PATCH 06/10] Make most of std::rt private Previously, the entire runtime API surface was publicly exposed, but that is neither necessary nor desirable. This commit hides most of the module, using librustrt directly as needed. The arrangement will need to be revisited when rustrt is pulled into std. [breaking-change] --- src/libcollections/slice.rs | 8 ++++-- src/librustrt/local.rs | 4 ++- src/librustrt/mutex.rs | 10 ++++--- src/librustrt/task.rs | 4 ++- src/libstd/dynamic_lib.rs | 2 +- src/libstd/failure.rs | 2 +- src/libstd/io/stdio.rs | 8 +++--- src/libstd/lib.rs | 3 +- src/libstd/os.rs | 6 ++-- src/libstd/rt/backtrace.rs | 4 +-- src/libstd/rt/mod.rs | 14 ++++------ src/libstd/sys/common/helper_thread.rs | 6 ++-- src/libstd/sys/common/net.rs | 2 +- src/libstd/sys/unix/mod.rs | 2 +- src/libstd/sys/unix/pipe.rs | 2 +- src/libstd/sys/windows/mod.rs | 2 +- src/libstd/sys/windows/pipe.rs | 2 +- src/libstd/task.rs | 16 +++++------ src/libsync/comm/mod.rs | 8 ++++-- src/libsync/deque.rs | 2 +- src/test/run-pass/foreign-call-no-runtime.rs | 3 +- .../match-ref-binding-in-guard-3256.rs | 4 ++- src/test/run-pass/native-always-waits.rs | 28 ------------------- src/test/run-pass/writealias.rs | 3 +- 24 files changed, 64 insertions(+), 81 deletions(-) delete mode 100644 src/test/run-pass/native-always-waits.rs diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs index 4a54b361001..c3a248ce318 100644 --- a/src/libcollections/slice.rs +++ b/src/libcollections/slice.rs @@ -666,6 +666,8 @@ pub mod raw { #[cfg(test)] mod tests { + extern crate rustrt; + use std::cell::Cell; use std::default::Default; use std::mem; @@ -949,9 +951,9 @@ mod tests { #[test] fn test_swap_remove_noncopyable() { // Tests that we don't accidentally run destructors twice. - let mut v = vec![rt::exclusive::Exclusive::new(()), - rt::exclusive::Exclusive::new(()), - rt::exclusive::Exclusive::new(())]; + let mut v = vec![rustrt::exclusive::Exclusive::new(()), + rustrt::exclusive::Exclusive::new(()), + rustrt::exclusive::Exclusive::new(())]; let mut _e = v.swap_remove(0); assert_eq!(v.len(), 2); _e = v.swap_remove(1); diff --git a/src/librustrt/local.rs b/src/librustrt/local.rs index 8531f569a6b..93c5508e042 100644 --- a/src/librustrt/local.rs +++ b/src/librustrt/local.rs @@ -52,8 +52,10 @@ impl Local> for Task { #[cfg(test)] mod test { + extern crate rustrt; + use std::prelude::*; - use std::rt::thread::Thread; + use rustrt::thread::Thread; use super::*; use task::Task; diff --git a/src/librustrt/mutex.rs b/src/librustrt/mutex.rs index 1c448736d3e..11f60159363 100644 --- a/src/librustrt/mutex.rs +++ b/src/librustrt/mutex.rs @@ -33,7 +33,7 @@ //! # Example //! //! ```rust -//! use std::rt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT}; +//! use rustrt::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT}; //! //! // Use a statically initialized mutex //! static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; @@ -108,7 +108,7 @@ impl StaticNativeMutex { /// # Example /// /// ```rust - /// use std::rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + /// use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; /// static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; /// unsafe { /// let _guard = LOCK.lock(); @@ -225,7 +225,7 @@ impl NativeMutex { /// # Example /// /// ```rust - /// use std::rt::mutex::NativeMutex; + /// use rustrt::mutex::NativeMutex; /// unsafe { /// let mut lock = NativeMutex::new(); /// @@ -649,11 +649,13 @@ mod imp { #[cfg(test)] mod test { + extern crate rustrt; + use std::prelude::*; use std::mem::drop; use super::{StaticNativeMutex, NATIVE_MUTEX_INIT}; - use std::rt::thread::Thread; + use rustrt::thread::Thread; #[test] fn smoke_lock() { diff --git a/src/librustrt/task.rs b/src/librustrt/task.rs index 34c913c5bcb..cec28a464f8 100644 --- a/src/librustrt/task.rs +++ b/src/librustrt/task.rs @@ -544,6 +544,8 @@ impl Death { #[cfg(test)] mod test { + extern crate rustrt; + use super::*; use std::prelude::*; use std::task; @@ -592,7 +594,7 @@ mod test { #[test] #[should_fail] fn test_begin_unwind() { - use std::rt::unwind::begin_unwind; + use rustrt::unwind::begin_unwind; begin_unwind("cause", &(file!(), line!())) } diff --git a/src/libstd/dynamic_lib.rs b/src/libstd/dynamic_lib.rs index 8bb82d5bc1e..0f119d44485 100644 --- a/src/libstd/dynamic_lib.rs +++ b/src/libstd/dynamic_lib.rs @@ -229,7 +229,7 @@ pub mod dl { } pub fn check_for_errors_in(f: || -> T) -> Result { - use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; unsafe { // dlerror isn't thread safe, so we need to lock around this entire diff --git a/src/libstd/failure.rs b/src/libstd/failure.rs index 07759974356..c23e043c174 100644 --- a/src/libstd/failure.rs +++ b/src/libstd/failure.rs @@ -18,7 +18,7 @@ use kinds::Send; use option::{Some, None}; use result::Ok; use rt::backtrace; -use rt::{Stderr, Stdio}; +use rustrt::{Stderr, Stdio}; use rustrt::local::Local; use rustrt::task::Task; use str::Str; diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 362e80f9f12..7374668a69d 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -40,9 +40,9 @@ use option::{Option, Some, None}; use boxed::Box; use sys::{fs, tty}; use result::{Ok, Err}; -use rt; -use rt::local::Local; -use rt::task::Task; +use rustrt; +use rustrt::local::Local; +use rustrt::task::Task; use slice::SlicePrelude; use str::StrPrelude; use uint; @@ -207,7 +207,7 @@ fn with_task_stdout(f: |&mut Writer| -> IoResult<()>) { local_stdout.replace(Some(my_stdout)); result } else { - let mut io = rt::Stdout; + let mut io = rustrt::Stdout; f(&mut io as &mut Writer) }; match result { diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index c27faea74bb..b35c49efdd8 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -162,7 +162,6 @@ pub use core::result; pub use core::option; pub use alloc::boxed; - pub use alloc::rc; pub use core_collections::slice; @@ -247,7 +246,7 @@ pub mod fmt; #[path = "sys/common/mod.rs"] mod sys_common; -mod rt; +pub mod rt; mod failure; // A curious inner-module that's not exported that contains the binding diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 68ddabfd48f..d7ba4877086 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -208,7 +208,7 @@ Accessing environment variables is not generally threadsafe. Serialize access through a global lock. */ fn with_env_lock(f: || -> T) -> T { - use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; static LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; @@ -1039,9 +1039,9 @@ fn real_args_as_bytes() -> Vec> { target_os = "freebsd", target_os = "dragonfly"))] fn real_args_as_bytes() -> Vec> { - use rt; + use rustrt; - match rt::args::clone() { + match rustrt::args::clone() { Some(args) => args, None => panic!("process arguments not initialized") } diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs index 11257d506b0..107518ef27c 100644 --- a/src/libstd/rt/backtrace.rs +++ b/src/libstd/rt/backtrace.rs @@ -238,7 +238,7 @@ mod imp { use mem; use option::{Some, None, Option}; use result::{Ok, Err}; - use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; /// As always - iOS on arm uses SjLj exceptions and /// _Unwind_Backtrace is even not available there. Still, @@ -667,7 +667,7 @@ mod imp { use option::{Some, None}; use path::Path; use result::{Ok, Err}; - use rt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + use rustrt::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; use slice::SlicePrelude; use str::StrPrelude; use dynamic_lib::DynamicLibrary; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index b6e57186afe..21b4edb6375 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -66,9 +66,7 @@ pub use self::util::{default_sched_threads, min_stack, running_on_valgrind}; // Reexport functionality from librustrt and other crates underneath the // standard library which work together to create the entire runtime. pub use alloc::heap; -pub use rustrt::{task, local, mutex, exclusive, stack, args, thread}; -pub use rustrt::{Stdio, Stdout, Stderr, begin_unwind, begin_unwind_fmt}; -pub use rustrt::{at_exit, unwind, DEFAULT_ERROR_CODE}; +pub use rustrt::{begin_unwind, begin_unwind_fmt, at_exit}; // Simple backtrace functionality (to print on panic) pub mod backtrace; @@ -84,7 +82,7 @@ mod util; #[allow(experimental)] pub fn init(argc: int, argv: *const *const u8) { rustrt::init(argc, argv); - unsafe { unwind::register(failure::on_fail); } + unsafe { rustrt::unwind::register(failure::on_fail); } } #[cfg(any(windows, android))] @@ -147,19 +145,19 @@ pub fn start(argc: int, argv: *const *const u8, main: proc()) -> int { init(argc, argv); let mut exit_code = None; let mut main = Some(main); - let mut task = Task::new(Some((my_stack_bottom, my_stack_top)), - Some(rt::thread::main_guard_page())); + let mut task = box Task::new(Some((my_stack_bottom, my_stack_top)), + Some(rustrt::thread::main_guard_page())); task.name = Some(str::Slice("
")); drop(task.run(|| { unsafe { - rt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top); + rustrt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top); } (main.take().unwrap())(); exit_code = Some(os::get_exit_status()); }).destroy()); unsafe { rt::cleanup(); } // If the exit code wasn't set, then the task block must have panicked. - return exit_code.unwrap_or(rt::DEFAULT_ERROR_CODE); + return exit_code.unwrap_or(rustrt::DEFAULT_ERROR_CODE); } /// One-time runtime cleanup. diff --git a/src/libstd/sys/common/helper_thread.rs b/src/libstd/sys/common/helper_thread.rs index d7c286bf0b9..9508d8d9232 100644 --- a/src/libstd/sys/common/helper_thread.rs +++ b/src/libstd/sys/common/helper_thread.rs @@ -22,8 +22,8 @@ use mem; use rustrt::bookkeeping; -use rt::mutex::StaticNativeMutex; -use rt; +use rustrt::mutex::StaticNativeMutex; +use rustrt; use cell::UnsafeCell; use sys::helper_signal; use prelude::*; @@ -83,7 +83,7 @@ impl Helper { self.lock.lock().signal() }); - rt::at_exit(proc() { self.shutdown() }); + rustrt::at_exit(proc() { self.shutdown() }); *self.initialized.get() = true; } } diff --git a/src/libstd/sys/common/net.rs b/src/libstd/sys/common/net.rs index 9b2b594a9c7..029fc852742 100644 --- a/src/libstd/sys/common/net.rs +++ b/src/libstd/sys/common/net.rs @@ -16,7 +16,7 @@ use libc::{mod, c_char, c_int}; use mem; use num::Int; use ptr::{mod, null, null_mut}; -use rt::mutex; +use rustrt::mutex; use io::net::ip::{SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr}; use io::net::addrinfo; use io::{IoResult, IoError}; diff --git a/src/libstd/sys/unix/mod.rs b/src/libstd/sys/unix/mod.rs index 4db9e8a9df8..664a6a1e70c 100644 --- a/src/libstd/sys/unix/mod.rs +++ b/src/libstd/sys/unix/mod.rs @@ -25,7 +25,7 @@ use sys_common::mkerr_libc; macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => ( static $name: Helper<$m> = Helper { - lock: ::rt::mutex::NATIVE_MUTEX_INIT, + lock: ::rustrt::mutex::NATIVE_MUTEX_INIT, chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> }, signal: ::cell::UnsafeCell { value: 0 }, initialized: ::cell::UnsafeCell { value: false }, diff --git a/src/libstd/sys/unix/pipe.rs b/src/libstd/sys/unix/pipe.rs index 3fba06e0c7f..4d3469a9c24 100644 --- a/src/libstd/sys/unix/pipe.rs +++ b/src/libstd/sys/unix/pipe.rs @@ -12,7 +12,7 @@ use alloc::arc::Arc; use libc; use c_str::CString; use mem; -use rt::mutex; +use rustrt::mutex; use sync::atomic; use io::{mod, IoResult, IoError}; use prelude::*; diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs index f316b2d8493..815ace21f87 100644 --- a/src/libstd/sys/windows/mod.rs +++ b/src/libstd/sys/windows/mod.rs @@ -26,7 +26,7 @@ use sync::{Once, ONCE_INIT}; macro_rules! helper_init( (static $name:ident: Helper<$m:ty>) => ( static $name: Helper<$m> = Helper { - lock: ::rt::mutex::NATIVE_MUTEX_INIT, + lock: ::rustrt::mutex::NATIVE_MUTEX_INIT, chan: ::cell::UnsafeCell { value: 0 as *mut Sender<$m> }, signal: ::cell::UnsafeCell { value: 0 }, initialized: ::cell::UnsafeCell { value: false }, diff --git a/src/libstd/sys/windows/pipe.rs b/src/libstd/sys/windows/pipe.rs index e38202302fb..a623c2cd8e2 100644 --- a/src/libstd/sys/windows/pipe.rs +++ b/src/libstd/sys/windows/pipe.rs @@ -90,7 +90,7 @@ use c_str::CString; use mem; use ptr; use sync::atomic; -use rt::mutex; +use rustrt::mutex; use io::{mod, IoError, IoResult}; use prelude::*; diff --git a/src/libstd/task.rs b/src/libstd/task.rs index 8da32ba4b89..4f5f47e980c 100644 --- a/src/libstd/task.rs +++ b/src/libstd/task.rs @@ -50,9 +50,9 @@ use kinds::{Send, marker}; use option::{None, Some, Option}; use boxed::Box; use result::Result; -use rt::local::Local; -use rt::task; -use rt::task::Task; +use rustrt::local::Local; +use rustrt::task; +use rustrt::task::Task; use str::{Str, SendStr, IntoMaybeOwned}; use string::{String, ToString}; use sync::Future; @@ -142,13 +142,13 @@ impl TaskBuilder { stack_size: stack_size, }; if stdout.is_some() || stderr.is_some() { - spawner.spawn(opts, proc() { + Task::spawn(opts, proc() { let _ = stdout.map(stdio::set_stdout); let _ = stderr.map(stdio::set_stderr); f(); }) } else { - spawner.spawn(opts, f) + Task::spawn(opts, f) } } @@ -237,7 +237,7 @@ pub fn try_future(f: proc():Send -> T) -> Future Option { - use rt::task::Task; + use rustrt::task::Task; let task = Local::borrow(None::); match task.name { @@ -249,7 +249,7 @@ pub fn name() -> Option { /// Yield control to the task scheduler. #[unstable = "Name will change."] pub fn deschedule() { - use rt::task::Task; + use rustrt::task::Task; Task::yield_now(); } @@ -257,7 +257,7 @@ pub fn deschedule() { /// destructor that is run while unwinding the stack after a call to `panic!()`). #[unstable = "May move to a different module."] pub fn failing() -> bool { - use rt::task::Task; + use rustrt::task::Task; Local::borrow(None::).unwinder.unwinding() } diff --git a/src/libsync/comm/mod.rs b/src/libsync/comm/mod.rs index 02fdc69448e..3c7e46036d6 100644 --- a/src/libsync/comm/mod.rs +++ b/src/libsync/comm/mod.rs @@ -336,6 +336,8 @@ macro_rules! test ( mod $name { #![allow(unused_imports)] + extern crate rustrt; + use std::prelude::*; use comm::*; @@ -1512,7 +1514,7 @@ mod test { }) test!(fn sends_off_the_runtime() { - use std::rt::thread::Thread; + use rustrt::thread::Thread; let (tx, rx) = channel(); let t = Thread::start(proc() { @@ -1527,7 +1529,7 @@ mod test { }) test!(fn try_recvs_off_the_runtime() { - use std::rt::thread::Thread; + use rustrt::thread::Thread; let (tx, rx) = channel(); let (cdone, pdone) = channel(); @@ -1977,7 +1979,7 @@ mod sync_tests { }) test!(fn try_recvs_off_the_runtime() { - use std::rt::thread::Thread; + use rustrt::thread::Thread; let (tx, rx) = sync_channel::<()>(0); let (cdone, pdone) = channel(); diff --git a/src/libsync/deque.rs b/src/libsync/deque.rs index 2f5c455556c..1fece03b273 100644 --- a/src/libsync/deque.rs +++ b/src/libsync/deque.rs @@ -414,7 +414,7 @@ mod tests { use super::{Data, BufferPool, Abort, Empty, Worker, Stealer}; use std::mem; - use std::rt::thread::Thread; + use rustrt::thread::Thread; use std::rand; use std::rand::Rng; use atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, diff --git a/src/test/run-pass/foreign-call-no-runtime.rs b/src/test/run-pass/foreign-call-no-runtime.rs index 9dd52dfb6da..af36387f06c 100644 --- a/src/test/run-pass/foreign-call-no-runtime.rs +++ b/src/test/run-pass/foreign-call-no-runtime.rs @@ -9,9 +9,10 @@ // except according to those terms. extern crate libc; +extern crate rustrt; use std::mem; -use std::rt::thread::Thread; +use rustrt::thread::Thread; #[link(name = "rust_test_helpers")] extern { diff --git a/src/test/run-pass/match-ref-binding-in-guard-3256.rs b/src/test/run-pass/match-ref-binding-in-guard-3256.rs index 243c87c0eeb..ac783961b50 100644 --- a/src/test/run-pass/match-ref-binding-in-guard-3256.rs +++ b/src/test/run-pass/match-ref-binding-in-guard-3256.rs @@ -8,9 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +extern crate rustrt; + pub fn main() { unsafe { - let x = Some(::std::rt::exclusive::Exclusive::new(true)); + let x = Some(::rustrt::exclusive::Exclusive::new(true)); match x { Some(ref z) if *z.lock() => { assert!(*z.lock()); diff --git a/src/test/run-pass/native-always-waits.rs b/src/test/run-pass/native-always-waits.rs deleted file mode 100644 index ea3eb299648..00000000000 --- a/src/test/run-pass/native-always-waits.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// ignore-android (FIXME #11419) - -extern crate native; - -static mut set: bool = false; - -#[start] -fn start(argc: int, argv: *const *const u8) -> int { - // make sure that native::start always waits for all children to finish - native::start(argc, argv, proc() { - spawn(proc() { - unsafe { set = true; } - }); - }); - - // if we didn't set the global, then return a nonzero code - if unsafe {set} {0} else {1} -} diff --git a/src/test/run-pass/writealias.rs b/src/test/run-pass/writealias.rs index ae49c07093b..c8d281a791c 100644 --- a/src/test/run-pass/writealias.rs +++ b/src/test/run-pass/writealias.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +extern crate rustrt; struct Point {x: int, y: int, z: int} @@ -15,7 +16,7 @@ fn f(p: &mut Point) { p.z = 13; } pub fn main() { unsafe { - let x = Some(::std::rt::exclusive::Exclusive::new(true)); + let x = Some(::rustrt::exclusive::Exclusive::new(true)); match x { Some(ref z) if *z.lock() => { assert!(*z.lock()); From b3d43790421dab80a8ad57f1c3ad3c98dbef339d Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 14 Nov 2014 23:05:37 -0800 Subject: [PATCH 07/10] Fallout from new termination semantics --- src/librustrt/local.rs | 16 +++++++--------- src/librustrt/mutex.rs | 4 +--- src/librustrt/task.rs | 9 ++++----- .../run-pass/out-of-stack-new-thread-no-split.rs | 3 +++ .../process-spawn-with-unicode-params.rs | 2 -- 5 files changed, 15 insertions(+), 19 deletions(-) diff --git a/src/librustrt/local.rs b/src/librustrt/local.rs index 93c5508e042..b1d387a9cc3 100644 --- a/src/librustrt/local.rs +++ b/src/librustrt/local.rs @@ -52,17 +52,15 @@ impl Local> for Task { #[cfg(test)] mod test { - extern crate rustrt; - use std::prelude::*; - use rustrt::thread::Thread; + use thread::Thread; use super::*; use task::Task; #[test] fn thread_local_task_smoke_test() { Thread::start(proc() { - let task = box Task::new(); + let task = box Task::new(None, None); Local::put(task); let task: Box = Local::take(); cleanup_task(task); @@ -72,11 +70,11 @@ mod test { #[test] fn thread_local_task_two_instances() { Thread::start(proc() { - let task = box Task::new(); + let task = box Task::new(None, None); Local::put(task); let task: Box = Local::take(); cleanup_task(task); - let task = box Task::new(); + let task = box Task::new(None, None); Local::put(task); let task: Box = Local::take(); cleanup_task(task); @@ -86,7 +84,7 @@ mod test { #[test] fn borrow_smoke_test() { Thread::start(proc() { - let task = box Task::new(); + let task = box Task::new(None, None); Local::put(task); unsafe { @@ -100,7 +98,7 @@ mod test { #[test] fn borrow_with_return() { Thread::start(proc() { - let task = box Task::new(); + let task = box Task::new(None, None); Local::put(task); { @@ -115,7 +113,7 @@ mod test { #[test] fn try_take() { Thread::start(proc() { - let task = box Task::new(); + let task = box Task::new(None, None); Local::put(task); let t: Box = Local::try_take().unwrap(); diff --git a/src/librustrt/mutex.rs b/src/librustrt/mutex.rs index 11f60159363..2f0daf8f6e2 100644 --- a/src/librustrt/mutex.rs +++ b/src/librustrt/mutex.rs @@ -649,13 +649,11 @@ mod imp { #[cfg(test)] mod test { - extern crate rustrt; - use std::prelude::*; use std::mem::drop; use super::{StaticNativeMutex, NATIVE_MUTEX_INIT}; - use rustrt::thread::Thread; + use thread::Thread; #[test] fn smoke_lock() { diff --git a/src/librustrt/task.rs b/src/librustrt/task.rs index cec28a464f8..64c402bfbbc 100644 --- a/src/librustrt/task.rs +++ b/src/librustrt/task.rs @@ -544,11 +544,10 @@ impl Death { #[cfg(test)] mod test { - extern crate rustrt; - use super::*; use std::prelude::*; use std::task; + use unwind; #[test] fn tls() { @@ -594,20 +593,20 @@ mod test { #[test] #[should_fail] fn test_begin_unwind() { - use rustrt::unwind::begin_unwind; + use unwind::begin_unwind; begin_unwind("cause", &(file!(), line!())) } #[test] fn drop_new_task_ok() { - drop(Task::new()); + drop(Task::new(None, None)); } // Task blocking tests #[test] fn block_and_wake() { - let task = box Task::new(); + let task = box Task::new(None, None); let task = BlockedTask::block(task).wake().unwrap(); task.drop(); } diff --git a/src/test/run-pass/out-of-stack-new-thread-no-split.rs b/src/test/run-pass/out-of-stack-new-thread-no-split.rs index e4a42161322..21847a486d9 100644 --- a/src/test/run-pass/out-of-stack-new-thread-no-split.rs +++ b/src/test/run-pass/out-of-stack-new-thread-no-split.rs @@ -36,9 +36,12 @@ fn main() { let args = os::args(); let args = args.as_slice(); if args.len() > 1 && args[1].as_slice() == "recurse" { + let (tx, rx) = channel(); spawn(proc() { recurse(); + tx.send(()); }); + rx.recv(); } else { let recurse = Command::new(args[0].as_slice()).arg("recurse").output().unwrap(); assert!(!recurse.status.success()); diff --git a/src/test/run-pass/process-spawn-with-unicode-params.rs b/src/test/run-pass/process-spawn-with-unicode-params.rs index 1c24210d6cd..cceb0bf4d96 100644 --- a/src/test/run-pass/process-spawn-with-unicode-params.rs +++ b/src/test/run-pass/process-spawn-with-unicode-params.rs @@ -16,8 +16,6 @@ // non-ASCII characters. The child process ensures all the strings are // intact. -extern crate native; - use std::io; use std::io::fs; use std::io::Command; From 243bfc277e0f222f81d4afe1f6f28b9fd1ffe6b4 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Mon, 17 Nov 2014 15:27:58 -0800 Subject: [PATCH 08/10] Fallout from namespaced enums --- src/libsync/mutex.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/libsync/mutex.rs b/src/libsync/mutex.rs index 6672126f55c..365609695ff 100644 --- a/src/libsync/mutex.rs +++ b/src/libsync/mutex.rs @@ -14,8 +14,6 @@ #![allow(dead_code)] use core::prelude::*; -use self::Flavor::*; - use alloc::boxed::Box; use rustrt::mutex; From 86992b6437e36faa6a7bc1de434576ccedb7905d Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Tue, 18 Nov 2014 21:15:13 -0800 Subject: [PATCH 09/10] Loosen possibly bogus constraints in backtrace test --- src/test/run-pass/backtrace.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/test/run-pass/backtrace.rs b/src/test/run-pass/backtrace.rs index a5e65e49d38..a267b8dcc86 100644 --- a/src/test/run-pass/backtrace.rs +++ b/src/test/run-pass/backtrace.rs @@ -57,7 +57,9 @@ fn runtest(me: &str) { let out = p.wait_with_output().unwrap(); assert!(!out.status.success()); let s = str::from_utf8(out.error.as_slice()).unwrap(); - assert!(s.contains("stack backtrace") && s.contains("double::h"), + // loosened the following from double::h to double:: due to + // spurious failures on mac, 32bit, optimized + assert!(s.contains("stack backtrace") && s.contains("double::"), "bad output3: {}", s); // Make sure a stack trace isn't printed too many times From 32c3d027801b8f30f741b1b5340682e7009d02ac Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Wed, 19 Nov 2014 22:57:28 -0800 Subject: [PATCH 10/10] Disable dubious pipe test --- src/libstd/io/pipe.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/io/pipe.rs b/src/libstd/io/pipe.rs index 5137db305f0..8c20ea08863 100644 --- a/src/libstd/io/pipe.rs +++ b/src/libstd/io/pipe.rs @@ -45,7 +45,7 @@ impl PipeStream { /// /// # Example /// - /// ```rust + /// ```{rust,no_run} /// # #![allow(unused_must_use)] /// extern crate libc; ///