Rollup merge of #78668 - tmiasko:inline, r=oli-obk

inliner: Remove redundant loop

No functional changes intended.
This commit is contained in:
Yuki Okushi 2020-11-03 15:27:19 +09:00 committed by GitHub
commit cf062179a8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -93,96 +93,79 @@ impl Inliner<'tcx> {
return;
}
let mut local_change;
let mut changed = false;
while let Some(callsite) = callsites.pop_front() {
debug!("checking whether to inline callsite {:?}", callsite);
loop {
local_change = false;
while let Some(callsite) = callsites.pop_front() {
debug!("checking whether to inline callsite {:?}", callsite);
if let InstanceDef::Item(_) = callsite.callee.def {
if !self.tcx.is_mir_available(callsite.callee.def_id()) {
debug!(
"checking whether to inline callsite {:?} - MIR unavailable",
callsite,
);
continue;
}
if let InstanceDef::Item(_) = callsite.callee.def {
if !self.tcx.is_mir_available(callsite.callee.def_id()) {
debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite,);
continue;
}
}
let callee_body = if let Some(callee_def_id) = callsite.callee.def_id().as_local() {
let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
// Avoid a cycle here by only using `instance_mir` only if we have
// a lower `HirId` than the callee. This ensures that the callee will
// not inline us. This trick only works without incremental compilation.
// So don't do it if that is enabled. Also avoid inlining into generators,
// since their `optimized_mir` is used for layout computation, which can
// create a cycle, even when no attempt is made to inline the function
// in the other direction.
if !self.tcx.dep_graph.is_fully_enabled()
&& self_hir_id < callee_hir_id
&& caller_body.generator_kind.is_none()
{
self.tcx.instance_mir(callsite.callee.def)
} else {
continue;
}
} else {
// This cannot result in a cycle since the callee MIR is from another crate
// and is already optimized.
let callee_body = if let Some(callee_def_id) = callsite.callee.def_id().as_local() {
let callee_hir_id = self.tcx.hir().local_def_id_to_hir_id(callee_def_id);
// Avoid a cycle here by only using `instance_mir` only if we have
// a lower `HirId` than the callee. This ensures that the callee will
// not inline us. This trick only works without incremental compilation.
// So don't do it if that is enabled. Also avoid inlining into generators,
// since their `optimized_mir` is used for layout computation, which can
// create a cycle, even when no attempt is made to inline the function
// in the other direction.
if !self.tcx.dep_graph.is_fully_enabled()
&& self_hir_id < callee_hir_id
&& caller_body.generator_kind.is_none()
{
self.tcx.instance_mir(callsite.callee.def)
};
let callee_body: &Body<'tcx> = &*callee_body;
let callee_body = if self.consider_optimizing(callsite, callee_body) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.callee.substs,
self.param_env,
callee_body,
)
} else {
continue;
};
// Copy only unevaluated constants from the callee_body into the caller_body.
// Although we are only pushing `ConstKind::Unevaluated` consts to
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
// because we are calling `subst_and_normalize_erasing_regions`.
caller_body.required_consts.extend(
callee_body.required_consts.iter().copied().filter(|&constant| {
matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _))
}),
);
let start = caller_body.basic_blocks().len();
debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
if !self.inline_call(callsite, caller_body, callee_body) {
debug!("attempting to inline callsite {:?} - failure", callsite);
continue;
}
debug!("attempting to inline callsite {:?} - success", callsite);
} else {
// This cannot result in a cycle since the callee MIR is from another crate
// and is already optimized.
self.tcx.instance_mir(callsite.callee.def)
};
// Add callsites from inlined function
for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
if let Some(new_callsite) =
self.get_valid_function_call(bb, bb_data, caller_body)
{
// Don't inline the same function multiple times.
if callsite.callee != new_callsite.callee {
callsites.push_back(new_callsite);
}
let callee_body: &Body<'tcx> = &*callee_body;
let callee_body = if self.consider_optimizing(callsite, callee_body) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.callee.substs,
self.param_env,
callee_body,
)
} else {
continue;
};
// Copy only unevaluated constants from the callee_body into the caller_body.
// Although we are only pushing `ConstKind::Unevaluated` consts to
// `required_consts`, here we may not only have `ConstKind::Unevaluated`
// because we are calling `subst_and_normalize_erasing_regions`.
caller_body.required_consts.extend(callee_body.required_consts.iter().copied().filter(
|&constant| matches!(constant.literal.val, ConstKind::Unevaluated(_, _, _)),
));
let start = caller_body.basic_blocks().len();
debug!("attempting to inline callsite {:?} - body={:?}", callsite, callee_body);
if !self.inline_call(callsite, caller_body, callee_body) {
debug!("attempting to inline callsite {:?} - failure", callsite);
continue;
}
debug!("attempting to inline callsite {:?} - success", callsite);
// Add callsites from inlined function
for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
if let Some(new_callsite) = self.get_valid_function_call(bb, bb_data, caller_body) {
// Don't inline the same function multiple times.
if callsite.callee != new_callsite.callee {
callsites.push_back(new_callsite);
}
}
local_change = true;
changed = true;
}
if !local_change {
break;
}
changed = true;
}
// Simplify if we inlined anything.