2021-12-04 18:05:30 +00:00
|
|
|
use super::{AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, PathStyle};
|
2020-04-27 17:56:11 +00:00
|
|
|
use rustc_ast as ast;
|
2020-02-29 17:37:32 +00:00
|
|
|
use rustc_ast::attr;
|
|
|
|
use rustc_ast::token::{self, Nonterminal};
|
2020-01-11 16:02:46 +00:00
|
|
|
use rustc_ast_pretty::pprust;
|
2022-01-23 20:41:46 +00:00
|
|
|
use rustc_errors::{error_code, Diagnostic, PResult};
|
2021-08-23 10:49:31 +00:00
|
|
|
use rustc_span::{sym, BytePos, Span};
|
2020-11-28 23:33:17 +00:00
|
|
|
use std::convert::TryInto;
|
2019-10-11 11:06:36 +00:00
|
|
|
|
2020-08-14 06:05:01 +00:00
|
|
|
use tracing::debug;
|
2014-05-16 07:16:13 +00:00
|
|
|
|
2020-11-26 01:54:08 +00:00
|
|
|
// Public for rustfmt usage
|
2018-03-20 22:58:25 +00:00
|
|
|
#[derive(Debug)]
|
2020-11-26 01:54:08 +00:00
|
|
|
pub enum InnerAttrPolicy<'a> {
|
2016-07-06 04:35:12 +00:00
|
|
|
Permitted,
|
2020-03-05 10:42:56 +00:00
|
|
|
Forbidden { reason: &'a str, saw_doc_comment: bool, prev_attr_sp: Option<Span> },
|
2016-07-06 04:35:12 +00:00
|
|
|
}
|
|
|
|
|
2018-12-04 11:46:10 +00:00
|
|
|
const DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG: &str = "an inner attribute is not \
|
|
|
|
permitted in this context";
|
2016-07-06 04:35:12 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
pub(super) const DEFAULT_INNER_ATTR_FORBIDDEN: InnerAttrPolicy<'_> = InnerAttrPolicy::Forbidden {
|
|
|
|
reason: DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG,
|
|
|
|
saw_doc_comment: false,
|
|
|
|
prev_attr_sp: None,
|
|
|
|
};
|
|
|
|
|
2021-08-23 10:49:31 +00:00
|
|
|
enum OuterAttributeType {
|
|
|
|
DocComment,
|
|
|
|
DocBlockComment,
|
|
|
|
Attribute,
|
|
|
|
}
|
|
|
|
|
2015-10-24 01:37:21 +00:00
|
|
|
impl<'a> Parser<'a> {
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses attributes that appear before an item.
|
2021-01-22 18:28:08 +00:00
|
|
|
pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
|
2014-02-28 21:09:09 +00:00
|
|
|
let mut attrs: Vec<ast::Attribute> = Vec::new();
|
2016-07-06 04:35:12 +00:00
|
|
|
let mut just_parsed_doc_comment = false;
|
2020-11-28 23:33:17 +00:00
|
|
|
let start_pos = self.token_cursor.num_next_calls;
|
2012-06-30 10:54:54 +00:00
|
|
|
loop {
|
2020-11-05 17:27:48 +00:00
|
|
|
let attr = if self.check(&token::Pound) {
|
|
|
|
let inner_error_reason = if just_parsed_doc_comment {
|
|
|
|
"an inner attribute is not permitted following an outer doc comment"
|
|
|
|
} else if !attrs.is_empty() {
|
|
|
|
"an inner attribute is not permitted following an outer attribute"
|
|
|
|
} else {
|
|
|
|
DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG
|
|
|
|
};
|
|
|
|
let inner_parse_policy = InnerAttrPolicy::Forbidden {
|
|
|
|
reason: inner_error_reason,
|
|
|
|
saw_doc_comment: just_parsed_doc_comment,
|
|
|
|
prev_attr_sp: attrs.last().map(|a| a.span),
|
|
|
|
};
|
|
|
|
just_parsed_doc_comment = false;
|
|
|
|
Some(self.parse_attribute(inner_parse_policy)?)
|
2020-10-22 19:17:40 +00:00
|
|
|
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
|
2020-11-05 17:27:48 +00:00
|
|
|
if attr_style != ast::AttrStyle::Outer {
|
2021-08-23 10:49:31 +00:00
|
|
|
let span = self.token.span;
|
|
|
|
let mut err = self.sess.span_diagnostic.struct_span_err_with_code(
|
|
|
|
span,
|
|
|
|
"expected outer doc comment",
|
|
|
|
error_code!(E0753),
|
|
|
|
);
|
|
|
|
if let Some(replacement_span) = self.annotate_following_item_if_applicable(
|
|
|
|
&mut err,
|
|
|
|
span,
|
|
|
|
match comment_kind {
|
|
|
|
token::CommentKind::Line => OuterAttributeType::DocComment,
|
|
|
|
token::CommentKind::Block => OuterAttributeType::DocBlockComment,
|
|
|
|
},
|
|
|
|
) {
|
|
|
|
err.note(
|
|
|
|
"inner doc comments like this (starting with `//!` or `/*!`) can \
|
|
|
|
only appear before items",
|
|
|
|
);
|
|
|
|
err.span_suggestion_verbose(
|
|
|
|
replacement_span,
|
|
|
|
"you might have meant to write a regular comment",
|
|
|
|
String::new(),
|
|
|
|
rustc_errors::Applicability::MachineApplicable,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
err.emit();
|
2020-11-05 17:27:48 +00:00
|
|
|
}
|
|
|
|
self.bump();
|
|
|
|
just_parsed_doc_comment = true;
|
2021-07-02 16:30:20 +00:00
|
|
|
// Always make an outer attribute - this allows us to recover from a misplaced
|
|
|
|
// inner attribute.
|
|
|
|
Some(attr::mk_doc_comment(
|
|
|
|
comment_kind,
|
|
|
|
ast::AttrStyle::Outer,
|
|
|
|
data,
|
|
|
|
self.prev_token.span,
|
|
|
|
))
|
2020-10-22 19:17:40 +00:00
|
|
|
} else {
|
2020-11-05 17:27:48 +00:00
|
|
|
None
|
2020-10-22 19:17:40 +00:00
|
|
|
};
|
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
if let Some(attr) = attr {
|
2020-03-05 10:42:56 +00:00
|
|
|
attrs.push(attr);
|
|
|
|
} else {
|
|
|
|
break;
|
2012-06-30 10:54:54 +00:00
|
|
|
}
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2020-11-28 23:33:17 +00:00
|
|
|
Ok(AttrWrapper::new(attrs.into(), start_pos))
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches `attribute = # ! [ meta_item ]`.
|
2020-11-05 17:27:48 +00:00
|
|
|
/// `inner_parse_policy` prescribes how to handle inner attributes.
|
2020-11-26 01:54:08 +00:00
|
|
|
// Public for rustfmt usage.
|
|
|
|
pub fn parse_attribute(
|
2019-10-08 07:35:34 +00:00
|
|
|
&mut self,
|
2020-03-05 10:42:56 +00:00
|
|
|
inner_parse_policy: InnerAttrPolicy<'_>,
|
2019-10-08 07:35:34 +00:00
|
|
|
) -> PResult<'a, ast::Attribute> {
|
2019-12-22 22:42:04 +00:00
|
|
|
debug!(
|
2020-11-05 17:27:48 +00:00
|
|
|
"parse_attribute: inner_parse_policy={:?} self.token={:?}",
|
2019-12-22 22:42:04 +00:00
|
|
|
inner_parse_policy, self.token
|
|
|
|
);
|
2020-03-05 10:42:56 +00:00
|
|
|
let lo = self.token.span;
|
2021-08-23 10:49:31 +00:00
|
|
|
// Attributes can't have attributes of their own [Editor's note: not with that attitude]
|
2021-01-22 18:28:08 +00:00
|
|
|
self.collect_tokens_no_attrs(|this| {
|
2020-11-05 17:27:48 +00:00
|
|
|
if this.eat(&token::Pound) {
|
|
|
|
let style = if this.eat(&token::Not) {
|
|
|
|
ast::AttrStyle::Inner
|
|
|
|
} else {
|
|
|
|
ast::AttrStyle::Outer
|
|
|
|
};
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
this.expect(&token::OpenDelim(token::Bracket))?;
|
|
|
|
let item = this.parse_attr_item(false)?;
|
|
|
|
this.expect(&token::CloseDelim(token::Bracket))?;
|
|
|
|
let attr_sp = lo.to(this.prev_token.span);
|
|
|
|
|
|
|
|
// Emit error if inner attribute is encountered and forbidden.
|
|
|
|
if style == ast::AttrStyle::Inner {
|
|
|
|
this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
|
|
|
|
}
|
|
|
|
|
2021-01-13 21:28:57 +00:00
|
|
|
Ok(attr::mk_attr_from_item(item, None, style, attr_sp))
|
2020-11-05 17:27:48 +00:00
|
|
|
} else {
|
|
|
|
let token_str = pprust::token_to_string(&this.token);
|
2022-03-15 10:13:56 +00:00
|
|
|
let msg = &format!("expected `#`, found `{token_str}`");
|
2020-11-05 17:27:48 +00:00
|
|
|
Err(this.struct_span_err(this.token.span, msg))
|
|
|
|
}
|
2021-01-13 21:28:57 +00:00
|
|
|
})
|
2020-03-05 10:42:56 +00:00
|
|
|
}
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2021-08-23 10:49:31 +00:00
|
|
|
fn annotate_following_item_if_applicable(
|
|
|
|
&self,
|
2022-01-23 20:41:46 +00:00
|
|
|
err: &mut Diagnostic,
|
2021-08-23 10:49:31 +00:00
|
|
|
span: Span,
|
|
|
|
attr_type: OuterAttributeType,
|
|
|
|
) -> Option<Span> {
|
2022-03-10 13:11:00 +00:00
|
|
|
let mut snapshot = self.create_snapshot_for_diagnostic();
|
2021-08-23 10:49:31 +00:00
|
|
|
let lo = span.lo()
|
|
|
|
+ BytePos(match attr_type {
|
|
|
|
OuterAttributeType::Attribute => 1,
|
|
|
|
_ => 2,
|
|
|
|
});
|
|
|
|
let hi = lo + BytePos(1);
|
|
|
|
let replacement_span = span.with_lo(lo).with_hi(hi);
|
|
|
|
if let OuterAttributeType::DocBlockComment | OuterAttributeType::DocComment = attr_type {
|
|
|
|
snapshot.bump();
|
|
|
|
}
|
|
|
|
loop {
|
|
|
|
// skip any other attributes, we want the item
|
|
|
|
if snapshot.token.kind == token::Pound {
|
2022-01-26 03:39:14 +00:00
|
|
|
if let Err(err) = snapshot.parse_attribute(InnerAttrPolicy::Permitted) {
|
2021-08-23 10:49:31 +00:00
|
|
|
err.cancel();
|
|
|
|
return Some(replacement_span);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
match snapshot.parse_item_common(
|
|
|
|
AttrWrapper::empty(),
|
|
|
|
true,
|
|
|
|
false,
|
2021-12-04 18:05:30 +00:00
|
|
|
FnParseMode { req_name: |_| true, req_body: true },
|
2021-08-23 10:49:31 +00:00
|
|
|
ForceCollect::No,
|
|
|
|
) {
|
|
|
|
Ok(Some(item)) => {
|
|
|
|
let attr_name = match attr_type {
|
|
|
|
OuterAttributeType::Attribute => "attribute",
|
|
|
|
_ => "doc comment",
|
|
|
|
};
|
|
|
|
err.span_label(
|
|
|
|
item.span,
|
|
|
|
&format!("the inner {} doesn't annotate this {}", attr_name, item.kind.descr()),
|
|
|
|
);
|
|
|
|
err.span_suggestion_verbose(
|
|
|
|
replacement_span,
|
|
|
|
&format!(
|
|
|
|
"to annotate the {}, change the {} from inner to outer style",
|
|
|
|
item.kind.descr(),
|
|
|
|
attr_name
|
|
|
|
),
|
|
|
|
(match attr_type {
|
|
|
|
OuterAttributeType::Attribute => "",
|
|
|
|
OuterAttributeType::DocBlockComment => "*",
|
|
|
|
OuterAttributeType::DocComment => "/",
|
|
|
|
})
|
|
|
|
.to_string(),
|
|
|
|
rustc_errors::Applicability::MachineApplicable,
|
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
2022-01-26 03:39:14 +00:00
|
|
|
Err(item_err) => {
|
2021-08-23 10:49:31 +00:00
|
|
|
item_err.cancel();
|
|
|
|
}
|
|
|
|
Ok(None) => {}
|
|
|
|
}
|
|
|
|
Some(replacement_span)
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy<'_>) {
|
|
|
|
if let InnerAttrPolicy::Forbidden { reason, saw_doc_comment, prev_attr_sp } = policy {
|
|
|
|
let prev_attr_note =
|
|
|
|
if saw_doc_comment { "previous doc comment" } else { "previous outer attribute" };
|
2019-07-27 09:45:45 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
let mut diag = self.struct_span_err(attr_sp, reason);
|
|
|
|
|
|
|
|
if let Some(prev_attr_sp) = prev_attr_sp {
|
|
|
|
diag.span_label(attr_sp, "not permitted following an outer attribute")
|
|
|
|
.span_label(prev_attr_sp, prev_attr_note);
|
2014-05-28 16:24:28 +00:00
|
|
|
}
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
diag.note(
|
2021-08-23 10:49:31 +00:00
|
|
|
"inner attributes, like `#![no_std]`, annotate the item enclosing them, and \
|
|
|
|
are usually found at the beginning of source files",
|
|
|
|
);
|
|
|
|
if self
|
|
|
|
.annotate_following_item_if_applicable(
|
|
|
|
&mut diag,
|
|
|
|
attr_sp,
|
|
|
|
OuterAttributeType::Attribute,
|
|
|
|
)
|
|
|
|
.is_some()
|
|
|
|
{
|
|
|
|
diag.note("outer attributes, like `#[test]`, annotate the item following them");
|
|
|
|
};
|
|
|
|
diag.emit();
|
2020-03-05 10:42:56 +00:00
|
|
|
}
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses an inner part of an attribute (the path and following tokens).
|
2018-08-12 17:15:59 +00:00
|
|
|
/// The tokens must be either a delimited token stream, or empty token stream,
|
|
|
|
/// or the "legacy" key-value form.
|
2019-09-06 02:56:45 +00:00
|
|
|
/// PATH `(` TOKEN_STREAM `)`
|
|
|
|
/// PATH `[` TOKEN_STREAM `]`
|
|
|
|
/// PATH `{` TOKEN_STREAM `}`
|
|
|
|
/// PATH
|
2019-09-30 21:58:30 +00:00
|
|
|
/// PATH `=` UNSUFFIXED_LIT
|
2018-08-12 17:15:59 +00:00
|
|
|
/// The delimiters or `=` are still put into the resulting token stream.
|
2020-09-26 23:33:42 +00:00
|
|
|
pub fn parse_attr_item(&mut self, capture_tokens: bool) -> PResult<'a, ast::AttrItem> {
|
2019-08-17 22:10:56 +00:00
|
|
|
let item = match self.token.kind {
|
2020-07-01 10:16:49 +00:00
|
|
|
token::Interpolated(ref nt) => match **nt {
|
2019-12-13 00:25:08 +00:00
|
|
|
Nonterminal::NtMeta(ref item) => Some(item.clone().into_inner()),
|
2017-03-08 23:13:35 +00:00
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
_ => None,
|
|
|
|
};
|
2019-08-17 22:10:56 +00:00
|
|
|
Ok(if let Some(item) = item {
|
2017-03-08 23:13:35 +00:00
|
|
|
self.bump();
|
2019-08-17 22:10:56 +00:00
|
|
|
item
|
2017-03-08 23:13:35 +00:00
|
|
|
} else {
|
2020-09-26 23:33:42 +00:00
|
|
|
let do_parse = |this: &mut Self| {
|
|
|
|
let path = this.parse_path(PathStyle::Mod)?;
|
|
|
|
let args = this.parse_attr_args()?;
|
|
|
|
Ok(ast::AttrItem { path, args, tokens: None })
|
|
|
|
};
|
2021-01-22 18:28:08 +00:00
|
|
|
// Attr items don't have attributes
|
|
|
|
if capture_tokens { self.collect_tokens_no_attrs(do_parse) } else { do_parse(self) }?
|
2017-03-08 23:13:35 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses attributes that appear after the opening of an item. These should
|
2014-06-09 20:12:30 +00:00
|
|
|
/// be preceded by an exclamation mark, but we accept and warn about one
|
2015-03-13 09:34:51 +00:00
|
|
|
/// terminated by a semicolon.
|
2019-09-06 02:56:45 +00:00
|
|
|
///
|
|
|
|
/// Matches `inner_attrs*`.
|
2018-05-31 22:53:30 +00:00
|
|
|
crate fn parse_inner_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
|
2015-03-13 09:34:51 +00:00
|
|
|
let mut attrs: Vec<ast::Attribute> = vec![];
|
2012-06-30 10:54:54 +00:00
|
|
|
loop {
|
2020-11-28 23:33:17 +00:00
|
|
|
let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
|
2020-10-22 19:17:40 +00:00
|
|
|
// Only try to parse if it is an inner attribute (has `!`).
|
2020-11-05 17:27:48 +00:00
|
|
|
let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
|
|
|
|
Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
|
|
|
|
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
|
|
|
|
if attr_style == ast::AttrStyle::Inner {
|
|
|
|
self.bump();
|
|
|
|
Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
2020-11-05 17:27:48 +00:00
|
|
|
None
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
if let Some(attr) = attr {
|
2020-11-28 23:33:17 +00:00
|
|
|
let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
|
|
|
|
// If we are currently capturing tokens, mark the location of this inner attribute.
|
|
|
|
// If capturing ends up creating a `LazyTokenStream`, we will include
|
|
|
|
// this replace range with it, removing the inner attribute from the final
|
|
|
|
// `AttrAnnotatedTokenStream`. Inner attributes are stored in the parsed AST note.
|
|
|
|
// During macro expansion, they are selectively inserted back into the
|
2022-03-30 05:39:38 +00:00
|
|
|
// token stream (the first inner attribute is removed each time we invoke the
|
2020-11-28 23:33:17 +00:00
|
|
|
// corresponding macro).
|
|
|
|
let range = start_pos..end_pos;
|
|
|
|
if let Capturing::Yes = self.capture_state.capturing {
|
|
|
|
self.capture_state.inner_attr_ranges.insert(attr.id, (range, vec![]));
|
|
|
|
}
|
2020-09-26 23:33:42 +00:00
|
|
|
attrs.push(attr);
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
|
|
|
break;
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
2015-10-24 02:02:38 +00:00
|
|
|
Ok(attrs)
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-05 13:19:00 +00:00
|
|
|
crate fn parse_unsuffixed_lit(&mut self) -> PResult<'a, ast::Lit> {
|
2016-08-20 01:58:14 +00:00
|
|
|
let lit = self.parse_lit()?;
|
2019-07-15 21:23:39 +00:00
|
|
|
debug!("checking if {:?} is unusuffixed", lit);
|
2016-08-20 01:58:14 +00:00
|
|
|
|
2019-09-26 15:56:53 +00:00
|
|
|
if !lit.kind.is_unsuffixed() {
|
2020-03-05 10:42:56 +00:00
|
|
|
self.struct_span_err(lit.span, "suffixed literals are not allowed in attributes")
|
2019-12-22 22:42:04 +00:00
|
|
|
.help(
|
2020-03-07 08:31:00 +00:00
|
|
|
"instead of using a suffixed literal (`1u8`, `1.0f32`, etc.), \
|
|
|
|
use an unsuffixed version (`1`, `1.0`, etc.)",
|
2019-12-22 22:42:04 +00:00
|
|
|
)
|
2020-01-31 12:24:57 +00:00
|
|
|
.emit();
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(lit)
|
|
|
|
}
|
|
|
|
|
2019-10-08 07:14:07 +00:00
|
|
|
/// Parses `cfg_attr(pred, attr_item_list)` where `attr_item_list` is comma-delimited.
|
2019-10-10 08:26:10 +00:00
|
|
|
pub fn parse_cfg_attr(&mut self) -> PResult<'a, (ast::MetaItem, Vec<(ast::AttrItem, Span)>)> {
|
2019-10-08 07:14:07 +00:00
|
|
|
let cfg_predicate = self.parse_meta_item()?;
|
|
|
|
self.expect(&token::Comma)?;
|
|
|
|
|
|
|
|
// Presumably, the majority of the time there will only be one attr.
|
|
|
|
let mut expanded_attrs = Vec::with_capacity(1);
|
2019-12-05 05:45:50 +00:00
|
|
|
while self.token.kind != token::Eof {
|
|
|
|
let lo = self.token.span;
|
2020-09-26 23:33:42 +00:00
|
|
|
let item = self.parse_attr_item(true)?;
|
2020-02-29 11:56:15 +00:00
|
|
|
expanded_attrs.push((item, lo.to(self.prev_token.span)));
|
2019-12-05 13:19:00 +00:00
|
|
|
if !self.eat(&token::Comma) {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-08 07:14:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok((cfg_predicate, expanded_attrs))
|
|
|
|
}
|
|
|
|
|
2019-12-05 13:19:00 +00:00
|
|
|
/// Matches `COMMASEP(meta_item_inner)`.
|
|
|
|
crate fn parse_meta_seq_top(&mut self) -> PResult<'a, Vec<ast::NestedMetaItem>> {
|
|
|
|
// Presumably, the majority of the time there will only be one attr.
|
|
|
|
let mut nmis = Vec::with_capacity(1);
|
|
|
|
while self.token.kind != token::Eof {
|
|
|
|
nmis.push(self.parse_meta_item_inner()?);
|
|
|
|
if !self.eat(&token::Comma) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(nmis)
|
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches the following grammar (per RFC 1559).
|
2016-08-20 01:58:14 +00:00
|
|
|
///
|
2019-09-30 21:58:30 +00:00
|
|
|
/// meta_item : PATH ( '=' UNSUFFIXED_LIT | '(' meta_item_inner? ')' )? ;
|
2019-09-06 02:56:45 +00:00
|
|
|
/// meta_item_inner : (meta_item | UNSUFFIXED_LIT) (',' meta_item_inner)? ;
|
2016-11-15 10:17:24 +00:00
|
|
|
pub fn parse_meta_item(&mut self) -> PResult<'a, ast::MetaItem> {
|
2019-06-04 22:17:07 +00:00
|
|
|
let nt_meta = match self.token.kind {
|
2020-07-01 10:16:49 +00:00
|
|
|
token::Interpolated(ref nt) => match **nt {
|
2016-11-02 03:03:55 +00:00
|
|
|
token::NtMeta(ref e) => Some(e.clone()),
|
|
|
|
_ => None,
|
|
|
|
},
|
2016-01-27 19:42:26 +00:00
|
|
|
_ => None,
|
2014-09-13 16:06:01 +00:00
|
|
|
};
|
|
|
|
|
2019-08-17 22:10:56 +00:00
|
|
|
if let Some(item) = nt_meta {
|
|
|
|
return match item.meta(item.path.span) {
|
|
|
|
Some(meta) => {
|
|
|
|
self.bump();
|
|
|
|
Ok(meta)
|
|
|
|
}
|
|
|
|
None => self.unexpected(),
|
2019-12-22 22:42:04 +00:00
|
|
|
};
|
2014-03-26 23:14:07 +00:00
|
|
|
}
|
|
|
|
|
2019-06-07 10:31:13 +00:00
|
|
|
let lo = self.token.span;
|
2019-03-02 16:15:26 +00:00
|
|
|
let path = self.parse_path(PathStyle::Mod)?;
|
2019-09-26 17:04:05 +00:00
|
|
|
let kind = self.parse_meta_item_kind()?;
|
2020-02-29 11:56:15 +00:00
|
|
|
let span = lo.to(self.prev_token.span);
|
2019-09-26 17:04:05 +00:00
|
|
|
Ok(ast::MetaItem { path, kind, span })
|
2017-03-03 09:23:59 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 22:53:30 +00:00
|
|
|
crate fn parse_meta_item_kind(&mut self) -> PResult<'a, ast::MetaItemKind> {
|
2017-03-03 09:23:59 +00:00
|
|
|
Ok(if self.eat(&token::Eq) {
|
2016-11-15 07:37:10 +00:00
|
|
|
ast::MetaItemKind::NameValue(self.parse_unsuffixed_lit()?)
|
2019-12-04 09:13:29 +00:00
|
|
|
} else if self.check(&token::OpenDelim(token::Paren)) {
|
|
|
|
// Matches `meta_seq = ( COMMASEP(meta_item_inner) )`.
|
|
|
|
let (list, _) = self.parse_paren_comma_seq(|p| p.parse_meta_item_inner())?;
|
|
|
|
ast::MetaItemKind::List(list)
|
2016-11-15 07:37:10 +00:00
|
|
|
} else {
|
|
|
|
ast::MetaItemKind::Word
|
2017-03-03 09:23:59 +00:00
|
|
|
})
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches `meta_item_inner : (meta_item | UNSUFFIXED_LIT) ;`.
|
2016-08-20 01:58:14 +00:00
|
|
|
fn parse_meta_item_inner(&mut self) -> PResult<'a, ast::NestedMetaItem> {
|
|
|
|
match self.parse_unsuffixed_lit() {
|
2019-12-04 09:13:29 +00:00
|
|
|
Ok(lit) => return Ok(ast::NestedMetaItem::Literal(lit)),
|
2022-01-26 03:39:14 +00:00
|
|
|
Err(err) => err.cancel(),
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
match self.parse_meta_item() {
|
2019-12-04 09:13:29 +00:00
|
|
|
Ok(mi) => return Ok(ast::NestedMetaItem::MetaItem(mi)),
|
2022-01-26 03:39:14 +00:00
|
|
|
Err(err) => err.cancel(),
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-12-07 02:07:35 +00:00
|
|
|
let found = pprust::token_to_string(&self.token);
|
2022-03-15 10:13:56 +00:00
|
|
|
let msg = format!("expected unsuffixed literal or identifier, found `{found}`");
|
2019-12-30 13:56:57 +00:00
|
|
|
Err(self.struct_span_err(self.token.span, &msg))
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
|
|
|
|
pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool {
|
2020-11-14 11:47:14 +00:00
|
|
|
// One of the attributes may either itself be a macro,
|
2020-11-18 22:50:16 +00:00
|
|
|
// or expand to macro attributes (`cfg_attr`).
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
attrs.iter().any(|attr| {
|
2020-11-28 23:33:17 +00:00
|
|
|
if attr.is_doc_comment() {
|
|
|
|
return false;
|
|
|
|
}
|
2020-11-18 22:50:16 +00:00
|
|
|
attr.ident().map_or(true, |ident| {
|
2020-11-14 11:47:14 +00:00
|
|
|
ident.name == sym::cfg_attr || !rustc_feature::is_builtin_attr_name(ident.name)
|
2020-11-18 22:50:16 +00:00
|
|
|
})
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
})
|
|
|
|
}
|