From 5c0feb86b9d51e1ac80321b10dde43d1dbc3c042 Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Wed, 13 Sep 2017 01:09:56 +0300 Subject: [PATCH] add proofs and fix postorder traversal I don't think the "quasi-postorder" travesal could cause any issues, but there's no reason for it to stay broken. --- src/librustc/hir/intravisit.rs | 18 +++++++--- src/librustc/middle/region.rs | 36 ++++++++++++++++--- .../check/generator_interior.rs | 12 ++++--- src/test/ui/generator/not-send-sync.stderr | 8 ++--- 4 files changed, 57 insertions(+), 17 deletions(-) diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index 216a6d025e3..29147a2a50a 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -27,11 +27,19 @@ //! for more details. //! //! Note: it is an important invariant that the default visitor walks -//! the body of a function in "execution order" (more concretely, -//! reverse post-order with respect to the CFG implied by the AST), -//! meaning that if AST node A may execute before AST node B, then A -//! is visited first. The borrow checker in particular relies on this -//! property. +//! the body of a function in "execution order" - more concretely, if +//! we consider the reverse post-order (RPO) of the CFG implied by the HIR, +//! then a pre-order traversal of the HIR is consistent with the CFG RPO +//! on the *initial CFG point* of each HIR node, while a post-order traversal +//! of the HIR is consistent with the CFG RPO on each *final CFG point* of +//! each CFG node. +//! +//! One thing that follows is that if HIR node A always starts/ends executing +//! before HIR node B, then A appears in traversal pre/postorder before B, +//! respectively. (This follows from RPO respecting CFG domination). +//! +//! This order consistency is required in a few places in rustc, for +//! example generator inference, and possibly also HIR borrowck. use syntax::abi::Abi; use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute}; diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index fef8431debb..d5b6e388bde 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -249,8 +249,36 @@ pub struct ScopeTree { closure_tree: FxHashMap, /// If there are any `yield` nested within a scope, this map - /// stores the `Span` of the last one and the number of expressions - /// which came before it in a generator body. + /// stores the `Span` of the last one and its index in the + /// postorder of the Visitor traversal on the HIR. + /// + /// HIR Visitor postorder indexes might seem like a peculiar + /// thing to care about. but it turns out that HIR bindings + /// and the temporary results of HIR expressions are never + /// storage-live at the end of HIR nodes with postorder indexes + /// lower than theirs, and therefore don't need to be suspended + /// at yield-points at these indexes. + /// + /// Let's show that: let `D` be our binding/temporary and `U` be our + /// other HIR node, with `HIR-postorder(U) < HIR-postorder(D)`. + /// + /// Then: + /// 1. From the ordering guarantee of HIR visitors (see + /// `rustc::hir::intravisit`), `D` does not dominate `U`. + /// 2. Therefore, `D` is *potentially* storage-dead at `U` (because + /// we might visit `U` without ever getting to `D`). + /// 3. However, we guarantee that at each HIR point, each + /// binding/temporary is always either always storage-live + /// or always storage-dead. This is what is being guaranteed + /// by `terminating_scopes` including all blocks where the + /// count of executions is not guaranteed. + /// 4. By `2.` and `3.`, `D` is *statically* dead at `U`, + /// QED. + /// + /// I don't think this property relies on `3.` in an essential way - it + /// is probably still correct even if we have "unrestricted" terminating + /// scopes. However, why use the complicated proof when a simple one + /// works? yield_in_scope: FxHashMap, /// The number of visit_expr calls done in the body. @@ -754,8 +782,6 @@ fn resolve_stmt<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, stmt: fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: &'tcx hir::Expr) { debug!("resolve_expr(expr.id={:?})", expr.id); - visitor.expr_count += 1; - let prev_cx = visitor.cx; visitor.enter_node_scope_with_dtor(expr.hir_id.local_id); @@ -837,6 +863,8 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: _ => intravisit::walk_expr(visitor, expr) } + visitor.expr_count += 1; + if let hir::ExprYield(..) = expr.node { // Mark this expr's scope and all parent scopes as containing `yield`. let mut scope = Scope::Node(expr.hir_id.local_id); diff --git a/src/librustc_typeck/check/generator_interior.rs b/src/librustc_typeck/check/generator_interior.rs index 8f4ee6290c1..168e0251caf 100644 --- a/src/librustc_typeck/check/generator_interior.rs +++ b/src/librustc_typeck/check/generator_interior.rs @@ -35,7 +35,12 @@ impl<'a, 'gcx, 'tcx> InteriorVisitor<'a, 'gcx, 'tcx> { let live_across_yield = scope.map_or(Some(DUMMY_SP), |s| { self.region_scope_tree.yield_in_scope(s).and_then(|(span, expr_count)| { - // Check if the span in the region comes after the expression + // If we are recording an expression that is the last yield + // in the scope, or that has a postorder CFG index larger + // than the one of all of the yields, then its value can't + // be storage-live (and therefore live) at any of the yields. + // + // See the mega-comment at `yield_in_scope` for a proof. if expr.is_none() || expr_count >= self.expr_count { Some(span) } else { @@ -114,14 +119,13 @@ impl<'a, 'gcx, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'gcx, 'tcx> { } fn visit_expr(&mut self, expr: &'tcx Expr) { + intravisit::walk_expr(self, expr); + self.expr_count += 1; let scope = self.region_scope_tree.temporary_scope(expr.hir_id.local_id); - let ty = self.fcx.tables.borrow().expr_ty_adjusted(expr); self.record(ty, scope, Some(expr)); - - intravisit::walk_expr(self, expr); } } diff --git a/src/test/ui/generator/not-send-sync.stderr b/src/test/ui/generator/not-send-sync.stderr index e0c32a95e0d..a1f110accc1 100644 --- a/src/test/ui/generator/not-send-sync.stderr +++ b/src/test/ui/generator/not-send-sync.stderr @@ -9,15 +9,15 @@ error[E0277]: the trait bound `std::cell::Cell: std::marker::Sync` is not s = note: required because it appears within the type `[generator@$DIR/not-send-sync.rs:26:17: 30:6 a:&std::cell::Cell _]` = note: required by `main::assert_send` -error[E0277]: the trait bound `std::cell::Cell: std::marker::Sync` is not satisfied in `[generator@$DIR/not-send-sync.rs:19:17: 23:6 ((), std::cell::Cell)]` +error[E0277]: the trait bound `std::cell::Cell: std::marker::Sync` is not satisfied in `[generator@$DIR/not-send-sync.rs:19:17: 23:6 (std::cell::Cell, ())]` --> $DIR/not-send-sync.rs:19:5 | 19 | assert_sync(|| { | ^^^^^^^^^^^ `std::cell::Cell` cannot be shared between threads safely | - = help: within `[generator@$DIR/not-send-sync.rs:19:17: 23:6 ((), std::cell::Cell)]`, the trait `std::marker::Sync` is not implemented for `std::cell::Cell` - = note: required because it appears within the type `((), std::cell::Cell)` - = note: required because it appears within the type `[generator@$DIR/not-send-sync.rs:19:17: 23:6 ((), std::cell::Cell)]` + = help: within `[generator@$DIR/not-send-sync.rs:19:17: 23:6 (std::cell::Cell, ())]`, the trait `std::marker::Sync` is not implemented for `std::cell::Cell` + = note: required because it appears within the type `(std::cell::Cell, ())` + = note: required because it appears within the type `[generator@$DIR/not-send-sync.rs:19:17: 23:6 (std::cell::Cell, ())]` = note: required by `main::assert_sync` error: aborting due to 2 previous errors