diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index f207216d6f4..03b1d426df4 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -382,7 +382,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { place: PlaceIndex, mut operand: OpTy<'tcx>, projection: &[PlaceElem<'tcx>], - ) -> Option { + ) { for &(mut proj_elem) in projection { if let PlaceElem::Index(index) = proj_elem { if let FlatSet::Elem(index) = state.get(index.into(), &self.map) @@ -391,10 +391,14 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { { proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false }; } else { - return None; + return; } } - operand = self.ecx.project(&operand, proj_elem).ok()?; + operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) { + operand + } else { + return; + } } self.map.for_each_projection_value( @@ -426,8 +430,6 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { } }, ); - - None } fn binary_op( diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs index 96c52845a4a..8997b1d57cd 100644 --- a/compiler/rustc_mir_transform/src/jump_threading.rs +++ b/compiler/rustc_mir_transform/src/jump_threading.rs @@ -191,26 +191,26 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { /// Recursion entry point to find threading opportunities. #[instrument(level = "trace", skip(self))] - fn start_from_switch(&mut self, bb: BasicBlock) -> Option { + fn start_from_switch(&mut self, bb: BasicBlock) { let bbdata = &self.body[bb]; if bbdata.is_cleanup || self.loop_headers.contains(bb) { - return None; + return; } - let (discr, targets) = bbdata.terminator().kind.as_switch()?; - let discr = discr.place()?; + let Some((discr, targets)) = bbdata.terminator().kind.as_switch() else { return }; + let Some(discr) = discr.place() else { return }; debug!(?discr, ?bb); let discr_ty = discr.ty(self.body, self.tcx).ty; - let discr_layout = self.ecx.layout_of(discr_ty).ok()?; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; - let discr = self.map.find(discr.as_ref())?; + let Some(discr) = self.map.find(discr.as_ref()) else { return }; debug!(?discr); let cost = CostChecker::new(self.tcx, self.param_env, None, self.body); let mut state = State::new_reachable(); let conds = if let Some((value, then, else_)) = targets.as_static_if() { - let value = ScalarInt::try_from_uint(value, discr_layout.size)?; + let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; self.arena.alloc_from_iter([ Condition { value, polarity: Polarity::Eq, target: then }, Condition { value, polarity: Polarity::Ne, target: else_ }, @@ -225,7 +225,6 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { state.insert_value_idx(discr, conds, self.map); self.find_opportunity(bb, state, cost, 0); - None } /// Recursively walk statements backwards from this bb's terminator to find threading @@ -364,18 +363,17 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { lhs: PlaceIndex, rhs: ImmTy<'tcx>, state: &mut State>, - ) -> Option { + ) { let register_opportunity = |c: Condition| { debug!(?bb, ?c.target, "register"); self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) }; - let conditions = state.try_get_idx(lhs, self.map)?; - if let Immediate::Scalar(Scalar::Int(int)) = *rhs { + if let Some(conditions) = state.try_get_idx(lhs, self.map) + && let Immediate::Scalar(Scalar::Int(int)) = *rhs + { conditions.iter_matches(int).for_each(register_opportunity); } - - None } /// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. @@ -428,22 +426,23 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { lhs: PlaceIndex, rhs: &Operand<'tcx>, state: &mut State>, - ) -> Option { + ) { match rhs { // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. Operand::Constant(constant) => { - let constant = - self.ecx.eval_mir_constant(&constant.const_, constant.span, None).ok()?; + let Ok(constant) = + self.ecx.eval_mir_constant(&constant.const_, constant.span, None) + else { + return; + }; self.process_constant(bb, lhs, constant, state); } // Transfer the conditions on the copied rhs. Operand::Move(rhs) | Operand::Copy(rhs) => { - let rhs = self.map.find(rhs.as_ref())?; + let Some(rhs) = self.map.find(rhs.as_ref()) else { return }; state.insert_place_idx(rhs, lhs, self.map); } } - - None } #[instrument(level = "trace", skip(self))] @@ -453,16 +452,14 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { lhs_place: &Place<'tcx>, rhs: &Rvalue<'tcx>, state: &mut State>, - ) -> Option { - let lhs = self.map.find(lhs_place.as_ref())?; + ) { + let Some(lhs) = self.map.find(lhs_place.as_ref()) else { return }; match rhs { - Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state)?, + Rvalue::Use(operand) => self.process_operand(bb, lhs, operand, state), // Transfer the conditions on the copy rhs. - Rvalue::CopyForDeref(rhs) => { - self.process_operand(bb, lhs, &Operand::Copy(*rhs), state)? - } + Rvalue::CopyForDeref(rhs) => self.process_operand(bb, lhs, &Operand::Copy(*rhs), state), Rvalue::Discriminant(rhs) => { - let rhs = self.map.find_discr(rhs.as_ref())?; + let Some(rhs) = self.map.find_discr(rhs.as_ref()) else { return }; state.insert_place_idx(rhs, lhs, self.map); } // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. @@ -470,7 +467,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { let agg_ty = lhs_place.ty(self.body, self.tcx).ty; let lhs = match kind { // Do not support unions. - AggregateKind::Adt(.., Some(_)) => return None, + AggregateKind::Adt(.., Some(_)) => return, AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => { if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant) && let Ok(discr_value) = @@ -478,7 +475,11 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { { self.process_immediate(bb, discr_target, discr_value, state); } - self.map.apply(lhs, TrackElem::Variant(*variant_index))? + if let Some(idx) = self.map.apply(lhs, TrackElem::Variant(*variant_index)) { + idx + } else { + return; + } } _ => lhs, }; @@ -490,8 +491,8 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } // Transfer the conditions on the copy rhs, after inversing polarity. Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => { - let conditions = state.try_get_idx(lhs, self.map)?; - let place = self.map.find(place.as_ref())?; + let Some(conditions) = state.try_get_idx(lhs, self.map) else { return }; + let Some(place) = self.map.find(place.as_ref()) else { return }; let conds = conditions.map(self.arena, Condition::inv); state.insert_value_idx(place, conds, self.map); } @@ -502,21 +503,25 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value)) | box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)), ) => { - let conditions = state.try_get_idx(lhs, self.map)?; - let place = self.map.find(place.as_ref())?; + let Some(conditions) = state.try_get_idx(lhs, self.map) else { return }; + let Some(place) = self.map.find(place.as_ref()) else { return }; let equals = match op { BinOp::Eq => ScalarInt::TRUE, BinOp::Ne => ScalarInt::FALSE, - _ => return None, + _ => return, }; if value.const_.ty().is_floating_point() { // Floating point equality does not follow bit-patterns. // -0.0 and NaN both have special rules for equality, // and therefore we cannot use integer comparisons for them. // Avoid handling them, though this could be extended in the future. - return None; + return; } - let value = value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int()?; + let Some(value) = + value.const_.normalize(self.tcx, self.param_env).try_to_scalar_int() + else { + return; + }; let conds = conditions.map(self.arena, |c| Condition { value, polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne }, @@ -527,8 +532,6 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { _ => {} } - - None } #[instrument(level = "trace", skip(self))] @@ -537,7 +540,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { bb: BasicBlock, stmt: &Statement<'tcx>, state: &mut State>, - ) -> Option { + ) { let register_opportunity = |c: Condition| { debug!(?bb, ?c.target, "register"); self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) @@ -550,12 +553,12 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // If we expect `discriminant(place) ?= A`, // we have an opportunity if `variant_index ?= A`. StatementKind::SetDiscriminant { box place, variant_index } => { - let discr_target = self.map.find_discr(place.as_ref())?; + let Some(discr_target) = self.map.find_discr(place.as_ref()) else { return }; let enum_ty = place.ty(self.body, self.tcx).ty; // `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant // of a niche encoding. If we cannot ensure that we write to the discriminant, do // nothing. - let enum_layout = self.ecx.layout_of(enum_ty).ok()?; + let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return }; let writes_discriminant = match enum_layout.variants { Variants::Single { index } => { assert_eq!(index, *variant_index); @@ -568,24 +571,25 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } => *variant_index != untagged_variant, }; if writes_discriminant { - let discr = self.ecx.discriminant_for_variant(enum_ty, *variant_index).ok()?; - self.process_immediate(bb, discr_target, discr, state)?; + let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index) + else { + return; + }; + self.process_immediate(bb, discr_target, discr, state); } } // If we expect `lhs ?= true`, we have an opportunity if we assume `lhs == true`. StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume( Operand::Copy(place) | Operand::Move(place), )) => { - let conditions = state.try_get(place.as_ref(), self.map)?; + let Some(conditions) = state.try_get(place.as_ref(), self.map) else { return }; conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity); } StatementKind::Assign(box (lhs_place, rhs)) => { - self.process_assign(bb, lhs_place, rhs, state)?; + self.process_assign(bb, lhs_place, rhs, state); } _ => {} } - - None } #[instrument(level = "trace", skip(self, state, cost))] @@ -638,17 +642,17 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { targets: &SwitchTargets, target_bb: BasicBlock, state: &mut State>, - ) -> Option { + ) { debug_assert_ne!(target_bb, START_BLOCK); debug_assert_eq!(self.body.basic_blocks.predecessors()[target_bb].len(), 1); - let discr = discr.place()?; + let Some(discr) = discr.place() else { return }; let discr_ty = discr.ty(self.body, self.tcx).ty; - let discr_layout = self.ecx.layout_of(discr_ty).ok()?; - let conditions = state.try_get(discr.as_ref(), self.map)?; + let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; + let Some(conditions) = state.try_get(discr.as_ref(), self.map) else { return }; if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) { - let value = ScalarInt::try_from_uint(value, discr_layout.size)?; + let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; debug_assert_eq!(targets.iter().filter(|&(_, target)| target == target_bb).count(), 1); // We are inside `target_bb`. Since we have a single predecessor, we know we passed @@ -662,7 +666,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } else if let Some((value, _, else_bb)) = targets.as_static_if() && target_bb == else_bb { - let value = ScalarInt::try_from_uint(value, discr_layout.size)?; + let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; // We only know that `discr != value`. That's much weaker information than // the equality we had in the previous arm. All we can conclude is that @@ -675,8 +679,6 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } } } - - None } } diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 7eed47cf239..3427b93c1f6 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -469,12 +469,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { msg: &AssertKind>, cond: &Operand<'tcx>, location: Location, - ) -> Option { - let value = &self.eval_operand(cond)?; + ) { + let Some(value) = &self.eval_operand(cond) else { return }; trace!("assertion on {:?} should be {:?}", value, expected); let expected = Scalar::from_bool(expected); - let value_const = self.use_ecx(|this| this.ecx.read_scalar(value))?; + let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value)) else { return }; if expected != value_const { // Poison all places this operand references so that further code @@ -516,14 +516,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { AssertKind::BoundsCheck { len, index } } // Remaining overflow errors are already covered by checks on the binary operators. - AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => return None, + AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => return, // Need proper const propagator for these. - _ => return None, + _ => return, }; self.report_assert_as_lint(location, AssertLintKind::UnconditionalPanic, msg); } - - None } fn ensure_not_propagated(&self, local: Local) {