Auto merge of #130456 - matthiaskrgr:rollup-h2qvk1f, r=matthiaskrgr

Rollup of 4 pull requests

Successful merges:

 - #130380 (coverage: Clarify some parts of coverage counter creation)
 - #130427 (run_make_support: rectify symlink handling)
 - #130447 (rustc_llvm: update for llvm/llvm-project@2ae968a0d9fb61606b020e898d88…)
 - #130448 (fix: Remove duplicate `LazyLock` example.)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-09-17 03:40:29 +00:00
commit c8dff289a0
10 changed files with 334 additions and 235 deletions

View File

@ -25,7 +25,6 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Scalar.h"
#define LLVM_VERSION_GE(major, minor) \
@ -34,6 +33,12 @@
#define LLVM_VERSION_LT(major, minor) (!LLVM_VERSION_GE((major), (minor)))
#if LLVM_VERSION_GE(20, 0)
#include "llvm/Transforms/Utils/Instrumentation.h"
#else
#include "llvm/Transforms/Instrumentation.h"
#endif
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Bitcode/BitcodeReader.h"

View File

@ -39,7 +39,6 @@
#include "llvm/TargetParser/Host.h"
#endif
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
#if LLVM_VERSION_GE(19, 0)

View File

@ -95,11 +95,33 @@ impl CoverageCounters {
this
}
fn make_counter(&mut self, site: CounterIncrementSite) -> BcbCounter {
/// Shared helper used by [`Self::make_phys_node_counter`] and
/// [`Self::make_phys_edge_counter`]. Don't call this directly.
fn make_counter_inner(&mut self, site: CounterIncrementSite) -> BcbCounter {
let id = self.counter_increment_sites.push(site);
BcbCounter::Counter { id }
}
/// Creates a new physical counter attached a BCB node.
/// The node must not already have a counter.
fn make_phys_node_counter(&mut self, bcb: BasicCoverageBlock) -> BcbCounter {
let counter = self.make_counter_inner(CounterIncrementSite::Node { bcb });
debug!(?bcb, ?counter, "node gets a physical counter");
self.set_bcb_counter(bcb, counter)
}
/// Creates a new physical counter attached to a BCB edge.
/// The edge must not already have a counter.
fn make_phys_edge_counter(
&mut self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
) -> BcbCounter {
let counter = self.make_counter_inner(CounterIncrementSite::Edge { from_bcb, to_bcb });
debug!(?from_bcb, ?to_bcb, ?counter, "edge gets a physical counter");
self.set_bcb_edge_counter(from_bcb, to_bcb, counter)
}
fn make_expression(&mut self, lhs: BcbCounter, op: Op, rhs: BcbCounter) -> BcbCounter {
let new_expr = BcbExpression { lhs, op, rhs };
*self
@ -294,25 +316,27 @@ impl<'a> MakeBcbCounters<'a> {
let successors = self.basic_coverage_blocks.successors[from_bcb].as_slice();
// If this node doesn't have multiple out-edges, or all of its out-edges
// already have counters, then we don't need to create edge counters.
let needs_out_edge_counters = successors.len() > 1
&& successors.iter().any(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb));
if !needs_out_edge_counters {
// If this node's out-edges won't sum to the node's counter,
// then there's no reason to create edge counters here.
if !self.basic_coverage_blocks[from_bcb].is_out_summable {
return;
}
if tracing::enabled!(tracing::Level::DEBUG) {
let _span =
debug_span!("node has some out-edges without counters", ?from_bcb).entered();
for &to_bcb in successors {
debug!(?to_bcb, counter=?self.edge_counter(from_bcb, to_bcb));
}
}
// Determine the set of out-edges that don't yet have edge counters.
let candidate_successors = self.basic_coverage_blocks.successors[from_bcb]
.iter()
.copied()
.filter(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb))
.collect::<Vec<_>>();
debug!(?candidate_successors);
// Of the out-edges that don't have counters yet, one can be given an expression
// (computed from the other out-edges) instead of a dedicated counter.
let expression_to_bcb = self.choose_out_edge_for_expression(traversal, from_bcb);
// If there are out-edges without counters, choose one to be given an expression
// (computed from this node and the other out-edges) instead of a physical counter.
let Some(expression_to_bcb) =
self.choose_out_edge_for_expression(traversal, &candidate_successors)
else {
return;
};
// For each out-edge other than the one that was chosen to get an expression,
// ensure that it has a counter (existing counter/expression or a new counter),
@ -324,10 +348,11 @@ impl<'a> MakeBcbCounters<'a> {
.filter(|&to_bcb| to_bcb != expression_to_bcb)
.map(|to_bcb| self.get_or_make_edge_counter(from_bcb, to_bcb))
.collect::<Vec<_>>();
let sum_of_all_other_out_edges: BcbCounter = self
.coverage_counters
.make_sum(&other_out_edge_counters)
.expect("there must be at least one other out-edge");
let Some(sum_of_all_other_out_edges) =
self.coverage_counters.make_sum(&other_out_edge_counters)
else {
return;
};
// Now create an expression for the chosen edge, by taking the counter
// for its source node and subtracting the sum of its sibling out-edges.
@ -338,10 +363,13 @@ impl<'a> MakeBcbCounters<'a> {
);
debug!("{expression_to_bcb:?} gets an expression: {expression:?}");
if self.basic_coverage_blocks.bcb_has_multiple_in_edges(expression_to_bcb) {
self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression);
} else {
if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(expression_to_bcb) {
// This edge normally wouldn't get its own counter, so attach the expression
// to its target node instead, so that `edge_has_no_counter` can see it.
assert_eq!(sole_pred, from_bcb);
self.coverage_counters.set_bcb_counter(expression_to_bcb, expression);
} else {
self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression);
}
}
@ -353,28 +381,21 @@ impl<'a> MakeBcbCounters<'a> {
return counter_kind;
}
// A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
// Also, a BCB that loops back to itself gets a simple `Counter`. This may indicate the
// program results in a tight infinite loop, but it should still compile.
let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb);
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
let counter_kind =
self.coverage_counters.make_counter(CounterIncrementSite::Node { bcb });
if one_path_to_target {
debug!("{bcb:?} gets a new counter: {counter_kind:?}");
} else {
debug!(
"{bcb:?} has itself as its own predecessor. It can't be part of its own \
Expression sum, so it will get its own new counter: {counter_kind:?}. \
(Note, the compiled code will generate an infinite loop.)",
);
}
return self.coverage_counters.set_bcb_counter(bcb, counter_kind);
let predecessors = self.basic_coverage_blocks.predecessors[bcb].as_slice();
// Handle cases where we can't compute a node's count from its in-edges:
// - START_BCB has no in-edges, so taking the sum would panic (or be wrong).
// - For nodes with one in-edge, or that directly loop to themselves,
// trying to get the in-edge counts would require this node's counter,
// leading to infinite recursion.
if predecessors.len() <= 1 || predecessors.contains(&bcb) {
debug!(?bcb, ?predecessors, "node has <=1 predecessors or is its own predecessor");
return self.coverage_counters.make_phys_node_counter(bcb);
}
// A BCB with multiple incoming edges can compute its count by ensuring that counters
// exist for each of those edges, and then adding them up to get a total count.
let in_edge_counters = self.basic_coverage_blocks.predecessors[bcb]
let in_edge_counters = predecessors
.iter()
.copied()
.map(|from_bcb| self.get_or_make_edge_counter(from_bcb, bcb))
@ -394,16 +415,19 @@ impl<'a> MakeBcbCounters<'a> {
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
) -> BcbCounter {
// If the target BCB has only one in-edge (i.e. this one), then create
// a node counter instead, since it will have the same value.
if !self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) {
assert_eq!([from_bcb].as_slice(), self.basic_coverage_blocks.predecessors[to_bcb]);
// If the target node has exactly one in-edge (i.e. this one), then just
// use the node's counter, since it will have the same value.
if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(to_bcb) {
assert_eq!(sole_pred, from_bcb);
// This call must take care not to invoke `get_or_make_edge` for
// this edge, since that would result in infinite recursion!
return self.get_or_make_node_counter(to_bcb);
}
// If the source BCB has only one successor (assumed to be the given target), an edge
// counter is unnecessary. Just get or make a counter for the source BCB.
if self.bcb_successors(from_bcb).len() == 1 {
// If the source node has exactly one out-edge (i.e. this one) and would have
// the same execution count as that edge, then just use the node's counter.
if let Some(simple_succ) = self.basic_coverage_blocks.simple_successor(from_bcb) {
assert_eq!(simple_succ, to_bcb);
return self.get_or_make_node_counter(from_bcb);
}
@ -416,118 +440,81 @@ impl<'a> MakeBcbCounters<'a> {
}
// Make a new counter to count this edge.
let counter_kind =
self.coverage_counters.make_counter(CounterIncrementSite::Edge { from_bcb, to_bcb });
debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}");
self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind)
self.coverage_counters.make_phys_edge_counter(from_bcb, to_bcb)
}
/// Choose one of the out-edges of `from_bcb` to receive an expression
/// instead of a physical counter, and returns that edge's target node.
///
/// - Precondition: The node must have at least one out-edge without a counter.
/// - Postcondition: The selected edge does not have an edge counter.
/// Given a set of candidate out-edges (represented by their successor node),
/// choose one to be given a counter expression instead of a physical counter.
fn choose_out_edge_for_expression(
&self,
traversal: &TraverseCoverageGraphWithLoops<'_>,
from_bcb: BasicCoverageBlock,
) -> BasicCoverageBlock {
if let Some(reloop_target) = self.find_good_reloop_edge(traversal, from_bcb) {
assert!(self.edge_has_no_counter(from_bcb, reloop_target));
candidate_successors: &[BasicCoverageBlock],
) -> Option<BasicCoverageBlock> {
// Try to find a candidate that leads back to the top of a loop,
// because reloop edges tend to be executed more times than loop-exit edges.
if let Some(reloop_target) = self.find_good_reloop_edge(traversal, &candidate_successors) {
debug!("Selecting reloop target {reloop_target:?} to get an expression");
return reloop_target;
return Some(reloop_target);
}
// We couldn't identify a "good" edge, so just choose any edge that
// doesn't already have a counter.
let arbitrary_target = self
.bcb_successors(from_bcb)
.iter()
.copied()
.find(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb))
.expect("precondition: at least one out-edge without a counter");
// We couldn't identify a "good" edge, so just choose an arbitrary one.
let arbitrary_target = candidate_successors.first().copied()?;
debug!(?arbitrary_target, "selecting arbitrary out-edge to get an expression");
arbitrary_target
Some(arbitrary_target)
}
/// Tries to find an edge that leads back to the top of a loop, and that
/// doesn't already have a counter. Such edges are good candidates to
/// be given an expression (instead of a physical counter), because they
/// will tend to be executed more times than a loop-exit edge.
/// Given a set of candidate out-edges (represented by their successor node),
/// tries to find one that leads back to the top of a loop.
///
/// Reloop edges are good candidates for counter expressions, because they
/// will tend to be executed more times than a loop-exit edge, so it's nice
/// for them to be able to avoid a physical counter increment.
fn find_good_reloop_edge(
&self,
traversal: &TraverseCoverageGraphWithLoops<'_>,
from_bcb: BasicCoverageBlock,
candidate_successors: &[BasicCoverageBlock],
) -> Option<BasicCoverageBlock> {
let successors = self.bcb_successors(from_bcb);
// If there are no candidates, avoid iterating over the loop stack.
if candidate_successors.is_empty() {
return None;
}
// Consider each loop on the current traversal context stack, top-down.
for reloop_bcbs in traversal.reloop_bcbs_per_loop() {
let mut all_edges_exit_this_loop = true;
// Try to find an out-edge that doesn't exit this loop and doesn't
// already have a counter.
for &target_bcb in successors {
// Try to find a candidate edge that doesn't exit this loop.
for &target_bcb in candidate_successors {
// An edge is a reloop edge if its target dominates any BCB that has
// an edge back to the loop header. (Otherwise it's an exit edge.)
let is_reloop_edge = reloop_bcbs.iter().any(|&reloop_bcb| {
self.basic_coverage_blocks.dominates(target_bcb, reloop_bcb)
});
if is_reloop_edge {
all_edges_exit_this_loop = false;
if self.edge_has_no_counter(from_bcb, target_bcb) {
// We found a good out-edge to be given an expression.
return Some(target_bcb);
}
// Keep looking for another reloop edge without a counter.
} else {
// This edge exits the loop.
// We found a good out-edge to be given an expression.
return Some(target_bcb);
}
}
if !all_edges_exit_this_loop {
// We found one or more reloop edges, but all of them already
// have counters. Let the caller choose one of the other edges.
debug!("All reloop edges had counters; skipping the other loops");
return None;
}
// All of the out-edges exit this loop, so keep looking for a good
// reloop edge for one of the outer loops.
// All of the candidate edges exit this loop, so keep looking
// for a good reloop edge for one of the outer loops.
}
None
}
#[inline]
fn bcb_predecessors(&self, bcb: BasicCoverageBlock) -> &[BasicCoverageBlock] {
&self.basic_coverage_blocks.predecessors[bcb]
}
#[inline]
fn bcb_successors(&self, bcb: BasicCoverageBlock) -> &[BasicCoverageBlock] {
&self.basic_coverage_blocks.successors[bcb]
}
#[inline]
fn edge_has_no_counter(
&self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
) -> bool {
self.edge_counter(from_bcb, to_bcb).is_none()
}
let edge_counter =
if let Some(sole_pred) = self.basic_coverage_blocks.sole_predecessor(to_bcb) {
assert_eq!(sole_pred, from_bcb);
self.coverage_counters.bcb_counters[to_bcb]
} else {
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)).copied()
};
fn edge_counter(
&self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
) -> Option<&BcbCounter> {
if self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) {
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
} else {
self.coverage_counters.bcb_counters[to_bcb].as_ref()
}
edge_counter.is_none()
}
}

View File

@ -87,7 +87,11 @@ impl CoverageGraph {
for &bb in basic_blocks.iter() {
bb_to_bcb[bb] = Some(bcb);
}
let bcb_data = BasicCoverageBlockData::from(basic_blocks);
let is_out_summable = basic_blocks.last().map_or(false, |&bb| {
bcb_filtered_successors(mir_body[bb].terminator()).is_out_summable()
});
let bcb_data = BasicCoverageBlockData { basic_blocks, is_out_summable };
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
bcbs.push(bcb_data);
};
@ -161,23 +165,33 @@ impl CoverageGraph {
self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b)
}
/// Returns true if the given node has 2 or more in-edges, i.e. 2 or more
/// predecessors.
///
/// This property is interesting to code that assigns counters to nodes and
/// edges, because if a node _doesn't_ have multiple in-edges, then there's
/// no benefit in having a separate counter for its in-edge, because it
/// would have the same value as the node's own counter.
///
/// FIXME: That assumption might not be true for [`TerminatorKind::Yield`]?
#[inline(always)]
pub(crate) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool {
// Even though bcb0 conceptually has an extra virtual in-edge due to
// being the entry point, we've already asserted that it has no _other_
// in-edges, so there's no possibility of it having _multiple_ in-edges.
// (And since its virtual in-edge doesn't exist in the graph, that edge
// can't have a separate counter anyway.)
self.predecessors[bcb].len() > 1
/// Returns the source of this node's sole in-edge, if it has exactly one.
/// That edge can be assumed to have the same execution count as the node
/// itself (in the absence of panics).
pub(crate) fn sole_predecessor(
&self,
to_bcb: BasicCoverageBlock,
) -> Option<BasicCoverageBlock> {
// Unlike `simple_successor`, there is no need for extra checks here.
if let &[from_bcb] = self.predecessors[to_bcb].as_slice() { Some(from_bcb) } else { None }
}
/// Returns the target of this node's sole out-edge, if it has exactly
/// one, but only if that edge can be assumed to have the same execution
/// count as the node itself (in the absence of panics).
pub(crate) fn simple_successor(
&self,
from_bcb: BasicCoverageBlock,
) -> Option<BasicCoverageBlock> {
// If a node's count is the sum of its out-edges, and it has exactly
// one out-edge, then that edge has the same count as the node.
if self.bcbs[from_bcb].is_out_summable
&& let &[to_bcb] = self.successors[from_bcb].as_slice()
{
Some(to_bcb)
} else {
None
}
}
}
@ -266,14 +280,16 @@ rustc_index::newtype_index! {
#[derive(Debug, Clone)]
pub(crate) struct BasicCoverageBlockData {
pub(crate) basic_blocks: Vec<BasicBlock>,
/// If true, this node's execution count can be assumed to be the sum of the
/// execution counts of all of its **out-edges** (assuming no panics).
///
/// Notably, this is false for a node ending with [`TerminatorKind::Yield`],
/// because the yielding coroutine might not be resumed.
pub(crate) is_out_summable: bool,
}
impl BasicCoverageBlockData {
fn from(basic_blocks: Vec<BasicBlock>) -> Self {
assert!(basic_blocks.len() > 0);
Self { basic_blocks }
}
#[inline(always)]
pub(crate) fn leader_bb(&self) -> BasicBlock {
self.basic_blocks[0]
@ -295,6 +311,9 @@ enum CoverageSuccessors<'a> {
Chainable(BasicBlock),
/// The block cannot be combined into the same BCB as its successor(s).
NotChainable(&'a [BasicBlock]),
/// Yield terminators are not chainable, and their execution count can also
/// differ from the execution count of their out-edge.
Yield(BasicBlock),
}
impl CoverageSuccessors<'_> {
@ -302,6 +321,17 @@ impl CoverageSuccessors<'_> {
match self {
Self::Chainable(_) => true,
Self::NotChainable(_) => false,
Self::Yield(_) => false,
}
}
/// Returns true if the terminator itself is assumed to have the same
/// execution count as the sum of its out-edges (assuming no panics).
fn is_out_summable(&self) -> bool {
match self {
Self::Chainable(_) => true,
Self::NotChainable(_) => true,
Self::Yield(_) => false,
}
}
}
@ -312,7 +342,9 @@ impl IntoIterator for CoverageSuccessors<'_> {
fn into_iter(self) -> Self::IntoIter {
match self {
Self::Chainable(bb) => Some(bb).into_iter().chain((&[]).iter().copied()),
Self::Chainable(bb) | Self::Yield(bb) => {
Some(bb).into_iter().chain((&[]).iter().copied())
}
Self::NotChainable(bbs) => None.into_iter().chain(bbs.iter().copied()),
}
}
@ -331,7 +363,7 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
// A yield terminator has exactly 1 successor, but should not be chained,
// because its resume edge has a different execution count.
Yield { ref resume, .. } => CoverageSuccessors::NotChainable(std::slice::from_ref(resume)),
Yield { resume, .. } => CoverageSuccessors::Yield(resume),
// These terminators have exactly one coverage-relevant successor,
// and can be chained into it.
@ -341,15 +373,15 @@ fn bcb_filtered_successors<'a, 'tcx>(terminator: &'a Terminator<'tcx>) -> Covera
| FalseUnwind { real_target: target, .. }
| Goto { target } => CoverageSuccessors::Chainable(target),
// A call terminator can normally be chained, except when they have no
// successor because they are known to diverge.
// A call terminator can normally be chained, except when it has no
// successor because it is known to diverge.
Call { target: maybe_target, .. } => match maybe_target {
Some(target) => CoverageSuccessors::Chainable(target),
None => CoverageSuccessors::NotChainable(&[]),
},
// An inline asm terminator can normally be chained, except when it diverges or uses asm
// goto.
// An inline asm terminator can normally be chained, except when it
// diverges or uses asm goto.
InlineAsm { ref targets, .. } => {
if let [target] = targets[..] {
CoverageSuccessors::Chainable(target)

View File

@ -44,8 +44,6 @@ union Data<T, F> {
///
/// // The `String` is built, stored in the `LazyLock`, and returned as `&String`.
/// let _ = &*DEEP_THOUGHT;
/// // The `String` is retrieved from the `LazyLock` and returned as `&String`.
/// let _ = &*DEEP_THOUGHT;
/// ```
///
/// Initialize fields with `LazyLock`.

View File

@ -1,42 +1,6 @@
use std::io;
use std::path::{Path, PathBuf};
// FIXME(jieyouxu): modify create_symlink to panic on windows.
/// Creates a new symlink to a path on the filesystem, adjusting for Windows or Unix.
#[cfg(target_family = "windows")]
pub fn create_symlink<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) {
if link.as_ref().exists() {
std::fs::remove_dir(link.as_ref()).unwrap();
}
if original.as_ref().is_file() {
std::os::windows::fs::symlink_file(original.as_ref(), link.as_ref()).expect(&format!(
"failed to create symlink {:?} for {:?}",
link.as_ref().display(),
original.as_ref().display(),
));
} else {
std::os::windows::fs::symlink_dir(original.as_ref(), link.as_ref()).expect(&format!(
"failed to create symlink {:?} for {:?}",
link.as_ref().display(),
original.as_ref().display(),
));
}
}
/// Creates a new symlink to a path on the filesystem, adjusting for Windows or Unix.
#[cfg(target_family = "unix")]
pub fn create_symlink<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) {
if link.as_ref().exists() {
std::fs::remove_dir(link.as_ref()).unwrap();
}
std::os::unix::fs::symlink(original.as_ref(), link.as_ref()).expect(&format!(
"failed to create symlink {:?} for {:?}",
link.as_ref().display(),
original.as_ref().display(),
));
}
/// Copy a directory into another.
pub fn copy_dir_all(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
fn copy_dir_all_inner(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> io::Result<()> {
@ -50,7 +14,31 @@ pub fn copy_dir_all(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
if ty.is_dir() {
copy_dir_all_inner(entry.path(), dst.join(entry.file_name()))?;
} else if ty.is_symlink() {
copy_symlink(entry.path(), dst.join(entry.file_name()))?;
// Traverse symlink once to find path of target entity.
let target_path = std::fs::read_link(entry.path())?;
let new_symlink_path = dst.join(entry.file_name());
#[cfg(windows)]
{
use std::os::windows::fs::FileTypeExt;
if ty.is_symlink_dir() {
std::os::windows::fs::symlink_dir(&target_path, new_symlink_path)?;
} else {
// Target may be a file or another symlink, in any case we can use
// `symlink_file` here.
std::os::windows::fs::symlink_file(&target_path, new_symlink_path)?;
}
}
#[cfg(unix)]
{
std::os::unix::fs::symlink(target_path, new_symlink_path)?;
}
#[cfg(not(any(windows, unix)))]
{
// Technically there's also wasi, but I have no clue about wasi symlink
// semantics and which wasi targets / environment support symlinks.
unimplemented!("unsupported target");
}
} else {
std::fs::copy(entry.path(), dst.join(entry.file_name()))?;
}
@ -69,12 +57,6 @@ pub fn copy_dir_all(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
}
}
fn copy_symlink<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
let target_path = std::fs::read_link(from).unwrap();
create_symlink(target_path, to);
Ok(())
}
/// Helper for reading entries in a given directory.
pub fn read_dir_entries<P: AsRef<Path>, F: FnMut(&Path)>(dir: P, mut callback: F) {
for entry in read_dir(dir) {
@ -85,8 +67,17 @@ pub fn read_dir_entries<P: AsRef<Path>, F: FnMut(&Path)>(dir: P, mut callback: F
/// A wrapper around [`std::fs::remove_file`] which includes the file path in the panic message.
#[track_caller]
pub fn remove_file<P: AsRef<Path>>(path: P) {
std::fs::remove_file(path.as_ref())
.expect(&format!("the file in path \"{}\" could not be removed", path.as_ref().display()));
if let Err(e) = std::fs::remove_file(path.as_ref()) {
panic!("failed to remove file at `{}`: {e}", path.as_ref().display());
}
}
/// A wrapper around [`std::fs::remove_dir`] which includes the directory path in the panic message.
#[track_caller]
pub fn remove_dir<P: AsRef<Path>>(path: P) {
if let Err(e) = std::fs::remove_dir(path.as_ref()) {
panic!("failed to remove directory at `{}`: {e}", path.as_ref().display());
}
}
/// A wrapper around [`std::fs::copy`] which includes the file path in the panic message.
@ -165,13 +156,32 @@ pub fn create_dir_all<P: AsRef<Path>>(path: P) {
));
}
/// A wrapper around [`std::fs::metadata`] which includes the file path in the panic message.
/// A wrapper around [`std::fs::metadata`] which includes the file path in the panic message. Note
/// that this will traverse symlinks and will return metadata about the target file. Use
/// [`symlink_metadata`] if you don't want to traverse symlinks.
///
/// See [`std::fs::metadata`] docs for more details.
#[track_caller]
pub fn metadata<P: AsRef<Path>>(path: P) -> std::fs::Metadata {
std::fs::metadata(path.as_ref()).expect(&format!(
"the file's metadata in path \"{}\" could not be read",
path.as_ref().display()
))
match std::fs::metadata(path.as_ref()) {
Ok(m) => m,
Err(e) => panic!("failed to read file metadata at `{}`: {e}", path.as_ref().display()),
}
}
/// A wrapper around [`std::fs::symlink_metadata`] which includes the file path in the panic
/// message. Note that this will not traverse symlinks and will return metadata about the filesystem
/// entity itself. Use [`metadata`] if you want to traverse symlinks.
///
/// See [`std::fs::symlink_metadata`] docs for more details.
#[track_caller]
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> std::fs::Metadata {
match std::fs::symlink_metadata(path.as_ref()) {
Ok(m) => m,
Err(e) => {
panic!("failed to read file metadata (shallow) at `{}`: {e}", path.as_ref().display())
}
}
}
/// A wrapper around [`std::fs::rename`] which includes the file path in the panic message.
@ -205,3 +215,73 @@ pub fn shallow_find_dir_entries<P: AsRef<Path>>(dir: P) -> Vec<PathBuf> {
}
output
}
/// Create a new symbolic link to a directory.
///
/// # Removing the symlink
///
/// - On Windows, a symlink-to-directory needs to be removed with a corresponding [`fs::remove_dir`]
/// and not [`fs::remove_file`].
/// - On Unix, remove the symlink with [`fs::remove_file`].
///
/// [`fs::remove_dir`]: crate::fs::remove_dir
/// [`fs::remove_file`]: crate::fs::remove_file
pub fn symlink_dir<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) {
#[cfg(unix)]
{
if let Err(e) = std::os::unix::fs::symlink(original.as_ref(), link.as_ref()) {
panic!(
"failed to create symlink: original=`{}`, link=`{}`: {e}",
original.as_ref().display(),
link.as_ref().display()
);
}
}
#[cfg(windows)]
{
if let Err(e) = std::os::windows::fs::symlink_dir(original.as_ref(), link.as_ref()) {
panic!(
"failed to create symlink-to-directory: original=`{}`, link=`{}`: {e}",
original.as_ref().display(),
link.as_ref().display()
);
}
}
#[cfg(not(any(windows, unix)))]
{
unimplemented!("target family not currently supported")
}
}
/// Create a new symbolic link to a file.
///
/// # Removing the symlink
///
/// On both Windows and Unix, a symlink-to-file needs to be removed with a corresponding
/// [`fs::remove_file`](crate::fs::remove_file) and not [`fs::remove_dir`](crate::fs::remove_dir).
pub fn symlink_file<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) {
#[cfg(unix)]
{
if let Err(e) = std::os::unix::fs::symlink(original.as_ref(), link.as_ref()) {
panic!(
"failed to create symlink: original=`{}`, link=`{}`: {e}",
original.as_ref().display(),
link.as_ref().display()
);
}
}
#[cfg(windows)]
{
if let Err(e) = std::os::windows::fs::symlink_file(original.as_ref(), link.as_ref()) {
panic!(
"failed to create symlink-to-file: original=`{}`, link=`{}`: {e}",
original.as_ref().display(),
link.as_ref().display()
);
}
}
#[cfg(not(any(windows, unix)))]
{
unimplemented!("target family not currently supported")
}
}

View File

@ -1,13 +1,14 @@
// In this test, the symlink created is invalid (valid relative to the root, but not
// relatively to where it is located), and used to cause an internal
// compiler error (ICE) when passed as a library search path. This was fixed in #26044,
// and this test checks that the invalid symlink is instead simply ignored.
// In this test, the symlink created is invalid (valid relative to the root, but not relatively to
// where it is located), and used to cause an internal compiler error (ICE) when passed as a library
// search path. This was fixed in #26044, and this test checks that the invalid symlink is instead
// simply ignored.
//
// See https://github.com/rust-lang/rust/issues/26006
//@ needs-symlink
//Reason: symlink requires elevated permission in Windows
use run_make_support::{rfs, rustc};
use run_make_support::{path, rfs, rustc};
fn main() {
// We create two libs: `bar` which depends on `foo`. We need to compile `foo` first.
@ -20,9 +21,9 @@ fn main() {
.metadata("foo")
.output("out/foo/libfoo.rlib")
.run();
rfs::create_dir("out/bar");
rfs::create_dir("out/bar/deps");
rfs::create_symlink("out/foo/libfoo.rlib", "out/bar/deps/libfoo.rlib");
rfs::create_dir_all("out/bar/deps");
rfs::symlink_file(path("out/foo/libfoo.rlib"), path("out/bar/deps/libfoo.rlib"));
// Check that the invalid symlink does not cause an ICE
rustc()
.input("in/bar/lib.rs")

View File

@ -1,22 +1,22 @@
// Crates that are resolved normally have their path canonicalized and all
// symlinks resolved. This did not happen for paths specified
// using the --extern option to rustc, which could lead to rustc thinking
// that it encountered two different versions of a crate, when it's
// actually the same version found through different paths.
// See https://github.com/rust-lang/rust/pull/16505
// This test checks that --extern and symlinks together
// can result in successful compilation.
// Crates that are resolved normally have their path canonicalized and all symlinks resolved. This
// did not happen for paths specified using the `--extern` option to rustc, which could lead to
// rustc thinking that it encountered two different versions of a crate, when it's actually the same
// version found through different paths.
//
// This test checks that `--extern` and symlinks together can result in successful compilation.
//
// See <https://github.com/rust-lang/rust/pull/16505>.
//@ ignore-cross-compile
//@ needs-symlink
use run_make_support::{cwd, rfs, rustc};
use run_make_support::{cwd, path, rfs, rustc};
fn main() {
rustc().input("foo.rs").run();
rfs::create_dir_all("other");
rfs::create_symlink("libfoo.rlib", "other");
rfs::symlink_file(path("libfoo.rlib"), path("other").join("libfoo.rlib"));
rustc().input("bar.rs").library_search_path(cwd()).run();
rustc().input("baz.rs").extern_("foo", "other").library_search_path(cwd()).run();
rustc().input("baz.rs").extern_("foo", "other/libfoo.rlib").library_search_path(cwd()).run();
}

View File

@ -1,18 +1,15 @@
// When a directory and a symlink simultaneously exist with the same name,
// setting that name as the library search path should not cause rustc
// to avoid looking in the symlink and cause an error. This test creates
// a directory and a symlink named "other", and places the library in the symlink.
// If it succeeds, the library was successfully found.
// See https://github.com/rust-lang/rust/issues/12459
// Avoid erroring on symlinks pointing to the same file that are present in the library search path.
//
// See <https://github.com/rust-lang/rust/issues/12459>.
//@ ignore-cross-compile
//@ needs-symlink
use run_make_support::{dynamic_lib_name, rfs, rustc};
use run_make_support::{cwd, dynamic_lib_name, path, rfs, rustc};
fn main() {
rustc().input("foo.rs").arg("-Cprefer-dynamic").run();
rfs::create_dir_all("other");
rfs::create_symlink(dynamic_lib_name("foo"), "other");
rustc().input("bar.rs").library_search_path("other").run();
rfs::symlink_file(dynamic_lib_name("foo"), path("other").join(dynamic_lib_name("foo")));
rustc().input("bar.rs").library_search_path(cwd()).library_search_path("other").run();
}

View File

@ -12,6 +12,6 @@ use run_make_support::{cwd, rfs, rustc};
fn main() {
rustc().input("foo.rs").crate_type("rlib").output("foo.xxx").run();
rfs::create_symlink("foo.xxx", "libfoo.rlib");
rfs::symlink_file("foo.xxx", "libfoo.rlib");
rustc().input("bar.rs").library_search_path(cwd()).run();
}