833: automatically wait for worker threads r=matklad a=matklad

closes #817 



Co-authored-by: Aleksey Kladov <aleksey.kladov@gmail.com>
This commit is contained in:
bors[bot] 2019-02-14 18:22:19 +00:00
commit c530f04df5
10 changed files with 121 additions and 155 deletions

1
Cargo.lock generated
View File

@ -1651,7 +1651,6 @@ name = "thread_worker"
version = "0.1.0"
dependencies = [
"crossbeam-channel 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"drop_bomb 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
]

View File

@ -121,7 +121,6 @@ impl BatchDatabase {
.collect();
let db = BatchDatabase::load(crate_graph, &mut vfs);
let _ = vfs.shutdown();
Ok((db, local_roots))
}
}

View File

@ -54,19 +54,20 @@ pub fn main_loop(
) -> Result<()> {
let pool = ThreadPool::new(THREADPOOL_SIZE);
let (task_sender, task_receiver) = unbounded::<Task>();
let (ws_worker, ws_watcher) = workspace_loader();
ws_worker.send(ws_root.clone()).unwrap();
// FIXME: support dynamic workspace loading.
let workspaces = match ws_worker.recv().unwrap() {
Ok(ws) => vec![ws],
Err(e) => {
log::error!("loading workspace failed: {}", e);
Vec::new()
let workspaces = {
let ws_worker = workspace_loader();
ws_worker.sender().send(ws_root.clone()).unwrap();
match ws_worker.receiver().recv().unwrap() {
Ok(ws) => vec![ws],
Err(e) => {
log::error!("loading workspace failed: {}", e);
Vec::new()
}
}
};
ws_worker.shutdown();
ws_watcher.shutdown().map_err(|_| format_err!("ws watcher died"))?;
let mut state = ServerWorldState::new(ws_root.clone(), workspaces);
log::info!("server initialized, serving requests");
@ -94,12 +95,9 @@ pub fn main_loop(
log::info!("...threadpool has finished");
let vfs = Arc::try_unwrap(state.vfs).expect("all snapshots should be dead");
let vfs_res = vfs.into_inner().shutdown();
drop(vfs);
main_res?;
vfs_res.map_err(|_| format_err!("fs watcher died"))?;
Ok(())
main_res
}
enum Event {

View File

@ -1,6 +1,6 @@
use std::path::PathBuf;
use thread_worker::{WorkerHandle, Worker};
use thread_worker::Worker;
use crate::Result;
@ -8,8 +8,8 @@ pub use ra_project_model::{
ProjectWorkspace, CargoWorkspace, Package, Target, TargetKind, Sysroot,
};
pub fn workspace_loader() -> (Worker<PathBuf, Result<ProjectWorkspace>>, WorkerHandle) {
thread_worker::spawn::<PathBuf, Result<ProjectWorkspace>, _>(
pub fn workspace_loader() -> Worker<PathBuf, Result<ProjectWorkspace>> {
Worker::<PathBuf, Result<ProjectWorkspace>>::spawn(
"workspace loader",
1,
|input_receiver, output_sender| {

View File

@ -17,7 +17,7 @@ use lsp_types::{
use serde::Serialize;
use serde_json::{to_string_pretty, Value};
use tempfile::TempDir;
use thread_worker::{WorkerHandle, Worker};
use thread_worker::Worker;
use test_utils::{parse_fixture, find_mismatch};
use ra_lsp_server::{
@ -45,13 +45,12 @@ pub struct Server {
messages: RefCell<Vec<RawMessage>>,
dir: TempDir,
worker: Option<Worker<RawMessage, RawMessage>>,
watcher: Option<WorkerHandle>,
}
impl Server {
fn new(dir: TempDir, files: Vec<(PathBuf, String)>) -> Server {
let path = dir.path().to_path_buf();
let (worker, watcher) = thread_worker::spawn::<RawMessage, RawMessage, _>(
let worker = Worker::<RawMessage, RawMessage>::spawn(
"test server",
128,
move |mut msg_receiver, mut msg_sender| {
@ -63,7 +62,6 @@ impl Server {
dir,
messages: Default::default(),
worker: Some(worker),
watcher: Some(watcher),
};
for (path, text) in files {
@ -117,7 +115,7 @@ impl Server {
}
fn send_request_(&self, r: RawRequest) -> Value {
let id = r.id;
self.worker.as_ref().unwrap().send(RawMessage::Request(r)).unwrap();
self.worker.as_ref().unwrap().sender().send(RawMessage::Request(r)).unwrap();
while let Some(msg) = self.recv() {
match msg {
RawMessage::Request(req) => panic!("unexpected request: {:?}", req),
@ -157,24 +155,19 @@ impl Server {
}
}
fn recv(&self) -> Option<RawMessage> {
recv_timeout(&self.worker.as_ref().unwrap().out).map(|msg| {
recv_timeout(&self.worker.as_ref().unwrap().receiver()).map(|msg| {
self.messages.borrow_mut().push(msg.clone());
msg
})
}
fn send_notification(&self, not: RawNotification) {
self.worker.as_ref().unwrap().send(RawMessage::Notification(not)).unwrap();
self.worker.as_ref().unwrap().sender().send(RawMessage::Notification(not)).unwrap();
}
}
impl Drop for Server {
fn drop(&mut self) {
self.send_request::<Shutdown>(());
let receiver = self.worker.take().unwrap().shutdown();
while let Some(msg) = recv_timeout(&receiver) {
drop(msg);
}
self.watcher.take().unwrap().shutdown().unwrap();
}
}

View File

@ -1,13 +1,11 @@
use std::{
fs,
thread,
path::{Path, PathBuf},
sync::{mpsc, Arc},
time::Duration,
};
use crossbeam_channel::{Receiver, Sender, unbounded, RecvError, select};
use crossbeam_channel::{Sender, unbounded, RecvError, select};
use relative_path::RelativePathBuf;
use thread_worker::WorkerHandle;
use walkdir::WalkDir;
use notify::{DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher as _Watcher};
@ -48,37 +46,42 @@ enum ChangeKind {
const WATCHER_DELAY: Duration = Duration::from_millis(250);
pub(crate) struct Worker {
worker: thread_worker::Worker<Task, TaskResult>,
worker_handle: WorkerHandle,
}
pub(crate) type Worker = thread_worker::Worker<Task, TaskResult>;
pub(crate) fn start(roots: Arc<Roots>) -> Worker {
// This is a pretty elaborate setup of threads & channels! It is
// explained by the following concerns:
// * we need to burn a thread translating from notify's mpsc to
// crossbeam_channel.
// * we want to read all files from a single thread, to guarantee that
// we always get fresher versions and never go back in time.
// * we want to tear down everything neatly during shutdown.
Worker::spawn(
"vfs",
128,
// This are the channels we use to communicate with outside world.
// If `input_receiver` is closed we need to tear ourselves down.
// `output_sender` should not be closed unless the parent died.
move |input_receiver, output_sender| {
// Make sure that the destruction order is
//
// * notify_sender
// * _thread
// * watcher_sender
//
// this is required to avoid deadlocks.
impl Worker {
pub(crate) fn start(roots: Arc<Roots>) -> Worker {
// This is a pretty elaborate setup of threads & channels! It is
// explained by the following concerns:
// * we need to burn a thread translating from notify's mpsc to
// crossbeam_channel.
// * we want to read all files from a single thread, to guarantee that
// we always get fresher versions and never go back in time.
// * we want to tear down everything neatly during shutdown.
let (worker, worker_handle) = thread_worker::spawn(
"vfs",
128,
// This are the channels we use to communicate with outside world.
// If `input_receiver` is closed we need to tear ourselves down.
// `output_sender` should not be closed unless the parent died.
move |input_receiver, output_sender| {
// These are the corresponding crossbeam channels
let (watcher_sender, watcher_receiver) = unbounded();
let _thread;
{
// These are `std` channels notify will send events to
let (notify_sender, notify_receiver) = mpsc::channel();
// These are the corresponding crossbeam channels
let (watcher_sender, watcher_receiver) = unbounded();
let mut watcher = notify::watcher(notify_sender, WATCHER_DELAY)
.map_err(|e| log::error!("failed to spawn notify {}", e))
.ok();
// Start a silly thread to transform between two channels
let thread = thread::spawn(move || {
_thread = thread_worker::ScopedThread::spawn("notify-convertor", move || {
notify_receiver
.into_iter()
.for_each(|event| convert_notify_event(event, &watcher_sender))
@ -110,35 +113,11 @@ impl Worker {
},
}
}
// Stopped the watcher
drop(watcher.take());
// Drain pending events: we are not interested in them anyways!
watcher_receiver.into_iter().for_each(|_| ());
let res = thread.join();
match &res {
Ok(()) => log::info!("... Watcher terminated with ok"),
Err(_) => log::error!("... Watcher terminated with err"),
}
res.unwrap();
},
);
Worker { worker, worker_handle }
}
pub(crate) fn sender(&self) -> &Sender<Task> {
&self.worker.inp
}
pub(crate) fn receiver(&self) -> &Receiver<TaskResult> {
&self.worker.out
}
pub(crate) fn shutdown(self) -> thread::Result<()> {
let _ = self.worker.shutdown();
self.worker_handle.shutdown()
}
}
// Drain pending events: we are not interested in them anyways!
watcher_receiver.into_iter().for_each(|_| ());
},
)
}
fn watch_root(

View File

@ -22,7 +22,6 @@ use std::{
fmt, fs, mem,
path::{Path, PathBuf},
sync::Arc,
thread,
};
use crossbeam_channel::Receiver;
@ -160,7 +159,7 @@ impl fmt::Debug for Vfs {
impl Vfs {
pub fn new(roots: Vec<PathBuf>) -> (Vfs, Vec<VfsRoot>) {
let roots = Arc::new(Roots::new(roots));
let worker = io::Worker::start(Arc::clone(&roots));
let worker = io::start(Arc::clone(&roots));
let mut root2files = ArenaMap::default();
for (root, config) in roots.iter() {
@ -337,11 +336,6 @@ impl Vfs {
mem::replace(&mut self.pending_changes, Vec::new())
}
/// Shutdown the VFS and terminate the background watching thread.
pub fn shutdown(self) -> thread::Result<()> {
self.worker.shutdown()
}
fn add_file(
&mut self,
root: VfsRoot,

View File

@ -158,6 +158,5 @@ fn test_vfs_works() -> std::io::Result<()> {
Err(RecvTimeoutError::Timeout)
);
vfs.shutdown().unwrap();
Ok(())
}

View File

@ -5,7 +5,6 @@ version = "0.1.0"
authors = ["rust-analyzer developers"]
[dependencies]
drop_bomb = "0.1.0"
crossbeam-channel = "0.3.5"
log = "0.4.3"

View File

@ -2,74 +2,80 @@
use std::thread;
use crossbeam_channel::{bounded, unbounded, Receiver, Sender, RecvError, SendError};
use drop_bomb::DropBomb;
use crossbeam_channel::{bounded, unbounded, Receiver, Sender};
/// Like `std::thread::JoinHandle<()>`, but joins thread in drop automatically.
pub struct ScopedThread {
// Option for drop
inner: Option<thread::JoinHandle<()>>,
}
impl Drop for ScopedThread {
fn drop(&mut self) {
let inner = self.inner.take().unwrap();
let name = inner.thread().name().unwrap().to_string();
log::info!("waiting for {} to finish...", name);
let res = inner.join();
log::info!(".. {} terminated with {}", name, if res.is_ok() { "ok" } else { "err" });
// escalate panic, but avoid aborting the process
match res {
Err(e) => {
if !thread::panicking() {
panic!(e)
}
}
_ => (),
}
}
}
impl ScopedThread {
pub fn spawn(name: &'static str, f: impl FnOnce() + Send + 'static) -> ScopedThread {
let inner = thread::Builder::new().name(name.into()).spawn(f).unwrap();
ScopedThread { inner: Some(inner) }
}
}
/// A wrapper around event-processing thread with automatic shutdown semantics.
pub struct Worker<I, O> {
pub inp: Sender<I>,
pub out: Receiver<O>,
}
pub struct WorkerHandle {
name: &'static str,
thread: thread::JoinHandle<()>,
bomb: DropBomb,
}
pub fn spawn<I, O, F>(name: &'static str, buf: usize, f: F) -> (Worker<I, O>, WorkerHandle)
where
F: FnOnce(Receiver<I>, Sender<O>) + Send + 'static,
I: Send + 'static,
O: Send + 'static,
{
let (worker, inp_r, out_s) = worker_chan(buf);
let watcher = WorkerHandle::spawn(name, move || f(inp_r, out_s));
(worker, watcher)
// XXX: field order is significant here.
//
// In Rust, fields are dropped in the declaration order, and we rely on this
// here. We must close input first, so that the `thread` (who holds the
// opposite side of the channel) noticed shutdown. Then, we must join the
// thread, but we must keep out alive so that the thread does not panic.
//
// Note that a potential problem here is that we might drop some messages
// from receiver on the floor. This is ok for rust-analyzer: we have only a
// single client, so, if we are shutting down, nobody is interested in the
// unfinished work anyway!
sender: Sender<I>,
_thread: ScopedThread,
receiver: Receiver<O>,
}
impl<I, O> Worker<I, O> {
/// Stops the worker. Returns the message receiver to fetch results which
/// have become ready before the worker is stopped.
pub fn shutdown(self) -> Receiver<O> {
self.out
}
pub fn send(&self, item: I) -> Result<(), SendError<I>> {
self.inp.send(item)
}
pub fn recv(&self) -> Result<O, RecvError> {
self.out.recv()
pub fn spawn<F>(name: &'static str, buf: usize, f: F) -> Worker<I, O>
where
F: FnOnce(Receiver<I>, Sender<O>) + Send + 'static,
I: Send + 'static,
O: Send + 'static,
{
// Set up worker channels in a deadlock-avoiding way. If one sets both input
// and output buffers to a fixed size, a worker might get stuck.
let (sender, input_receiver) = bounded::<I>(buf);
let (output_sender, receiver) = unbounded::<O>();
let _thread = ScopedThread::spawn(name, move || f(input_receiver, output_sender));
Worker { sender, _thread, receiver }
}
}
impl WorkerHandle {
fn spawn(name: &'static str, f: impl FnOnce() + Send + 'static) -> WorkerHandle {
let thread = thread::spawn(f);
WorkerHandle {
name,
thread,
bomb: DropBomb::new(format!("WorkerHandle {} was not shutdown", name)),
}
impl<I, O> Worker<I, O> {
pub fn sender(&self) -> &Sender<I> {
&self.sender
}
pub fn shutdown(mut self) -> thread::Result<()> {
log::info!("waiting for {} to finish ...", self.name);
let name = self.name;
self.bomb.defuse();
let res = self.thread.join();
match &res {
Ok(()) => log::info!("... {} terminated with ok", name),
Err(_) => log::error!("... {} terminated with err", name),
}
res
pub fn receiver(&self) -> &Receiver<O> {
&self.receiver
}
}
/// Sets up worker channels in a deadlock-avoiding way.
/// If one sets both input and output buffers to a fixed size,
/// a worker might get stuck.
fn worker_chan<I, O>(buf: usize) -> (Worker<I, O>, Receiver<I>, Sender<O>) {
let (input_sender, input_receiver) = bounded::<I>(buf);
let (output_sender, output_receiver) = unbounded::<O>();
(Worker { inp: input_sender, out: output_receiver }, input_receiver, output_sender)
}