2020-03-05 10:42:56 +00:00
|
|
|
use super::{Parser, PathStyle};
|
2020-04-27 17:56:11 +00:00
|
|
|
use rustc_ast as ast;
|
2020-02-29 17:37:32 +00:00
|
|
|
use rustc_ast::attr;
|
|
|
|
use rustc_ast::token::{self, Nonterminal};
|
2020-01-11 16:02:46 +00:00
|
|
|
use rustc_ast_pretty::pprust;
|
2020-04-22 09:08:50 +00:00
|
|
|
use rustc_errors::{error_code, PResult};
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
use rustc_span::{sym, Span};
|
2019-10-11 11:06:36 +00:00
|
|
|
|
2020-08-14 06:05:01 +00:00
|
|
|
use tracing::debug;
|
2014-05-16 07:16:13 +00:00
|
|
|
|
2018-03-20 22:58:25 +00:00
|
|
|
#[derive(Debug)]
|
2020-03-05 10:42:56 +00:00
|
|
|
pub(super) enum InnerAttrPolicy<'a> {
|
2016-07-06 04:35:12 +00:00
|
|
|
Permitted,
|
2020-03-05 10:42:56 +00:00
|
|
|
Forbidden { reason: &'a str, saw_doc_comment: bool, prev_attr_sp: Option<Span> },
|
2016-07-06 04:35:12 +00:00
|
|
|
}
|
|
|
|
|
2018-12-04 11:46:10 +00:00
|
|
|
const DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG: &str = "an inner attribute is not \
|
|
|
|
permitted in this context";
|
2016-07-06 04:35:12 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
pub(super) const DEFAULT_INNER_ATTR_FORBIDDEN: InnerAttrPolicy<'_> = InnerAttrPolicy::Forbidden {
|
|
|
|
reason: DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG,
|
|
|
|
saw_doc_comment: false,
|
|
|
|
prev_attr_sp: None,
|
|
|
|
};
|
|
|
|
|
2015-10-24 01:37:21 +00:00
|
|
|
impl<'a> Parser<'a> {
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses attributes that appear before an item.
|
2019-10-08 07:35:34 +00:00
|
|
|
pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
|
2014-02-28 21:09:09 +00:00
|
|
|
let mut attrs: Vec<ast::Attribute> = Vec::new();
|
2016-07-06 04:35:12 +00:00
|
|
|
let mut just_parsed_doc_comment = false;
|
2012-06-30 10:54:54 +00:00
|
|
|
loop {
|
2020-10-22 19:17:40 +00:00
|
|
|
debug!("parse_outer_attributes: self.token={:?}", self.token);
|
2020-11-05 17:27:48 +00:00
|
|
|
let attr = if self.check(&token::Pound) {
|
|
|
|
let inner_error_reason = if just_parsed_doc_comment {
|
|
|
|
"an inner attribute is not permitted following an outer doc comment"
|
|
|
|
} else if !attrs.is_empty() {
|
|
|
|
"an inner attribute is not permitted following an outer attribute"
|
|
|
|
} else {
|
|
|
|
DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG
|
|
|
|
};
|
|
|
|
let inner_parse_policy = InnerAttrPolicy::Forbidden {
|
|
|
|
reason: inner_error_reason,
|
|
|
|
saw_doc_comment: just_parsed_doc_comment,
|
|
|
|
prev_attr_sp: attrs.last().map(|a| a.span),
|
|
|
|
};
|
|
|
|
just_parsed_doc_comment = false;
|
|
|
|
Some(self.parse_attribute(inner_parse_policy)?)
|
2020-10-22 19:17:40 +00:00
|
|
|
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
|
2020-11-05 17:27:48 +00:00
|
|
|
if attr_style != ast::AttrStyle::Outer {
|
|
|
|
self.sess
|
|
|
|
.span_diagnostic
|
|
|
|
.struct_span_err_with_code(
|
|
|
|
self.token.span,
|
|
|
|
"expected outer doc comment",
|
|
|
|
error_code!(E0753),
|
|
|
|
)
|
|
|
|
.note(
|
|
|
|
"inner doc comments like this (starting with \
|
|
|
|
`//!` or `/*!`) can only appear before items",
|
|
|
|
)
|
|
|
|
.emit();
|
|
|
|
}
|
|
|
|
self.bump();
|
|
|
|
just_parsed_doc_comment = true;
|
|
|
|
Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
|
2020-10-22 19:17:40 +00:00
|
|
|
} else {
|
2020-11-05 17:27:48 +00:00
|
|
|
None
|
2020-10-22 19:17:40 +00:00
|
|
|
};
|
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
if let Some(attr) = attr {
|
2020-03-05 10:42:56 +00:00
|
|
|
attrs.push(attr);
|
|
|
|
} else {
|
|
|
|
break;
|
2012-06-30 10:54:54 +00:00
|
|
|
}
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2017-05-12 18:05:39 +00:00
|
|
|
Ok(attrs)
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches `attribute = # ! [ meta_item ]`.
|
2020-11-05 17:27:48 +00:00
|
|
|
/// `inner_parse_policy` prescribes how to handle inner attributes.
|
|
|
|
fn parse_attribute(
|
2019-10-08 07:35:34 +00:00
|
|
|
&mut self,
|
2020-03-05 10:42:56 +00:00
|
|
|
inner_parse_policy: InnerAttrPolicy<'_>,
|
2019-10-08 07:35:34 +00:00
|
|
|
) -> PResult<'a, ast::Attribute> {
|
2019-12-22 22:42:04 +00:00
|
|
|
debug!(
|
2020-11-05 17:27:48 +00:00
|
|
|
"parse_attribute: inner_parse_policy={:?} self.token={:?}",
|
2019-12-22 22:42:04 +00:00
|
|
|
inner_parse_policy, self.token
|
|
|
|
);
|
2020-03-05 10:42:56 +00:00
|
|
|
let lo = self.token.span;
|
2020-11-05 17:27:48 +00:00
|
|
|
let ((item, style, span), tokens) = self.collect_tokens(|this| {
|
|
|
|
if this.eat(&token::Pound) {
|
|
|
|
let style = if this.eat(&token::Not) {
|
|
|
|
ast::AttrStyle::Inner
|
|
|
|
} else {
|
|
|
|
ast::AttrStyle::Outer
|
|
|
|
};
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
this.expect(&token::OpenDelim(token::Bracket))?;
|
|
|
|
let item = this.parse_attr_item(false)?;
|
|
|
|
this.expect(&token::CloseDelim(token::Bracket))?;
|
|
|
|
let attr_sp = lo.to(this.prev_token.span);
|
|
|
|
|
|
|
|
// Emit error if inner attribute is encountered and forbidden.
|
|
|
|
if style == ast::AttrStyle::Inner {
|
|
|
|
this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok((item, style, attr_sp))
|
|
|
|
} else {
|
|
|
|
let token_str = pprust::token_to_string(&this.token);
|
|
|
|
let msg = &format!("expected `#`, found `{}`", token_str);
|
|
|
|
Err(this.struct_span_err(this.token.span, msg))
|
|
|
|
}
|
|
|
|
})?;
|
2019-07-27 09:45:45 +00:00
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
Ok(attr::mk_attr_from_item(item, tokens, style, span))
|
2020-03-05 10:42:56 +00:00
|
|
|
}
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy<'_>) {
|
|
|
|
if let InnerAttrPolicy::Forbidden { reason, saw_doc_comment, prev_attr_sp } = policy {
|
|
|
|
let prev_attr_note =
|
|
|
|
if saw_doc_comment { "previous doc comment" } else { "previous outer attribute" };
|
2019-07-27 09:45:45 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
let mut diag = self.struct_span_err(attr_sp, reason);
|
|
|
|
|
|
|
|
if let Some(prev_attr_sp) = prev_attr_sp {
|
|
|
|
diag.span_label(attr_sp, "not permitted following an outer attribute")
|
|
|
|
.span_label(prev_attr_sp, prev_attr_note);
|
2014-05-28 16:24:28 +00:00
|
|
|
}
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2020-03-05 10:42:56 +00:00
|
|
|
diag.note(
|
2020-03-07 08:31:00 +00:00
|
|
|
"inner attributes, like `#![no_std]`, annotate the item enclosing them, \
|
|
|
|
and are usually found at the beginning of source files. \
|
|
|
|
Outer attributes, like `#[test]`, annotate the item following them.",
|
2020-03-05 10:42:56 +00:00
|
|
|
)
|
|
|
|
.emit();
|
|
|
|
}
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses an inner part of an attribute (the path and following tokens).
|
2018-08-12 17:15:59 +00:00
|
|
|
/// The tokens must be either a delimited token stream, or empty token stream,
|
|
|
|
/// or the "legacy" key-value form.
|
2019-09-06 02:56:45 +00:00
|
|
|
/// PATH `(` TOKEN_STREAM `)`
|
|
|
|
/// PATH `[` TOKEN_STREAM `]`
|
|
|
|
/// PATH `{` TOKEN_STREAM `}`
|
|
|
|
/// PATH
|
2019-09-30 21:58:30 +00:00
|
|
|
/// PATH `=` UNSUFFIXED_LIT
|
2018-08-12 17:15:59 +00:00
|
|
|
/// The delimiters or `=` are still put into the resulting token stream.
|
2020-09-26 23:33:42 +00:00
|
|
|
pub fn parse_attr_item(&mut self, capture_tokens: bool) -> PResult<'a, ast::AttrItem> {
|
2019-08-17 22:10:56 +00:00
|
|
|
let item = match self.token.kind {
|
2020-07-01 10:16:49 +00:00
|
|
|
token::Interpolated(ref nt) => match **nt {
|
2019-12-13 00:25:08 +00:00
|
|
|
Nonterminal::NtMeta(ref item) => Some(item.clone().into_inner()),
|
2017-03-08 23:13:35 +00:00
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
_ => None,
|
|
|
|
};
|
2019-08-17 22:10:56 +00:00
|
|
|
Ok(if let Some(item) = item {
|
2017-03-08 23:13:35 +00:00
|
|
|
self.bump();
|
2019-08-17 22:10:56 +00:00
|
|
|
item
|
2017-03-08 23:13:35 +00:00
|
|
|
} else {
|
2020-09-26 23:33:42 +00:00
|
|
|
let do_parse = |this: &mut Self| {
|
|
|
|
let path = this.parse_path(PathStyle::Mod)?;
|
|
|
|
let args = this.parse_attr_args()?;
|
|
|
|
Ok(ast::AttrItem { path, args, tokens: None })
|
|
|
|
};
|
|
|
|
if capture_tokens {
|
|
|
|
let (mut item, tokens) = self.collect_tokens(do_parse)?;
|
2020-10-22 14:09:08 +00:00
|
|
|
item.tokens = tokens;
|
2020-09-26 23:33:42 +00:00
|
|
|
item
|
|
|
|
} else {
|
|
|
|
do_parse(self)?
|
|
|
|
}
|
2017-03-08 23:13:35 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses attributes that appear after the opening of an item. These should
|
2014-06-09 20:12:30 +00:00
|
|
|
/// be preceded by an exclamation mark, but we accept and warn about one
|
2015-03-13 09:34:51 +00:00
|
|
|
/// terminated by a semicolon.
|
2019-09-06 02:56:45 +00:00
|
|
|
///
|
|
|
|
/// Matches `inner_attrs*`.
|
2018-05-31 22:53:30 +00:00
|
|
|
crate fn parse_inner_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
|
2015-03-13 09:34:51 +00:00
|
|
|
let mut attrs: Vec<ast::Attribute> = vec![];
|
2012-06-30 10:54:54 +00:00
|
|
|
loop {
|
2020-10-22 19:17:40 +00:00
|
|
|
// Only try to parse if it is an inner attribute (has `!`).
|
2020-11-05 17:27:48 +00:00
|
|
|
let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
|
|
|
|
Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
|
|
|
|
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
|
|
|
|
if attr_style == ast::AttrStyle::Inner {
|
|
|
|
self.bump();
|
|
|
|
Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
2020-11-05 17:27:48 +00:00
|
|
|
None
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
if let Some(attr) = attr {
|
2020-09-26 23:33:42 +00:00
|
|
|
attrs.push(attr);
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
|
|
|
break;
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
2015-10-24 02:02:38 +00:00
|
|
|
Ok(attrs)
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
|
|
|
|
2019-12-05 13:19:00 +00:00
|
|
|
crate fn parse_unsuffixed_lit(&mut self) -> PResult<'a, ast::Lit> {
|
2016-08-20 01:58:14 +00:00
|
|
|
let lit = self.parse_lit()?;
|
2019-07-15 21:23:39 +00:00
|
|
|
debug!("checking if {:?} is unusuffixed", lit);
|
2016-08-20 01:58:14 +00:00
|
|
|
|
2019-09-26 15:56:53 +00:00
|
|
|
if !lit.kind.is_unsuffixed() {
|
2020-03-05 10:42:56 +00:00
|
|
|
self.struct_span_err(lit.span, "suffixed literals are not allowed in attributes")
|
2019-12-22 22:42:04 +00:00
|
|
|
.help(
|
2020-03-07 08:31:00 +00:00
|
|
|
"instead of using a suffixed literal (`1u8`, `1.0f32`, etc.), \
|
|
|
|
use an unsuffixed version (`1`, `1.0`, etc.)",
|
2019-12-22 22:42:04 +00:00
|
|
|
)
|
2020-01-31 12:24:57 +00:00
|
|
|
.emit();
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(lit)
|
|
|
|
}
|
|
|
|
|
2019-10-08 07:14:07 +00:00
|
|
|
/// Parses `cfg_attr(pred, attr_item_list)` where `attr_item_list` is comma-delimited.
|
2019-10-10 08:26:10 +00:00
|
|
|
pub fn parse_cfg_attr(&mut self) -> PResult<'a, (ast::MetaItem, Vec<(ast::AttrItem, Span)>)> {
|
2019-10-08 07:14:07 +00:00
|
|
|
let cfg_predicate = self.parse_meta_item()?;
|
|
|
|
self.expect(&token::Comma)?;
|
|
|
|
|
|
|
|
// Presumably, the majority of the time there will only be one attr.
|
|
|
|
let mut expanded_attrs = Vec::with_capacity(1);
|
2019-12-05 05:45:50 +00:00
|
|
|
while self.token.kind != token::Eof {
|
|
|
|
let lo = self.token.span;
|
2020-09-26 23:33:42 +00:00
|
|
|
let item = self.parse_attr_item(true)?;
|
2020-02-29 11:56:15 +00:00
|
|
|
expanded_attrs.push((item, lo.to(self.prev_token.span)));
|
2019-12-05 13:19:00 +00:00
|
|
|
if !self.eat(&token::Comma) {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-08 07:14:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok((cfg_predicate, expanded_attrs))
|
|
|
|
}
|
|
|
|
|
2019-12-05 13:19:00 +00:00
|
|
|
/// Matches `COMMASEP(meta_item_inner)`.
|
|
|
|
crate fn parse_meta_seq_top(&mut self) -> PResult<'a, Vec<ast::NestedMetaItem>> {
|
|
|
|
// Presumably, the majority of the time there will only be one attr.
|
|
|
|
let mut nmis = Vec::with_capacity(1);
|
|
|
|
while self.token.kind != token::Eof {
|
|
|
|
nmis.push(self.parse_meta_item_inner()?);
|
|
|
|
if !self.eat(&token::Comma) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(nmis)
|
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches the following grammar (per RFC 1559).
|
2016-08-20 01:58:14 +00:00
|
|
|
///
|
2019-09-30 21:58:30 +00:00
|
|
|
/// meta_item : PATH ( '=' UNSUFFIXED_LIT | '(' meta_item_inner? ')' )? ;
|
2019-09-06 02:56:45 +00:00
|
|
|
/// meta_item_inner : (meta_item | UNSUFFIXED_LIT) (',' meta_item_inner)? ;
|
2016-11-15 10:17:24 +00:00
|
|
|
pub fn parse_meta_item(&mut self) -> PResult<'a, ast::MetaItem> {
|
2019-06-04 22:17:07 +00:00
|
|
|
let nt_meta = match self.token.kind {
|
2020-07-01 10:16:49 +00:00
|
|
|
token::Interpolated(ref nt) => match **nt {
|
2016-11-02 03:03:55 +00:00
|
|
|
token::NtMeta(ref e) => Some(e.clone()),
|
|
|
|
_ => None,
|
|
|
|
},
|
2016-01-27 19:42:26 +00:00
|
|
|
_ => None,
|
2014-09-13 16:06:01 +00:00
|
|
|
};
|
|
|
|
|
2019-08-17 22:10:56 +00:00
|
|
|
if let Some(item) = nt_meta {
|
|
|
|
return match item.meta(item.path.span) {
|
|
|
|
Some(meta) => {
|
|
|
|
self.bump();
|
|
|
|
Ok(meta)
|
|
|
|
}
|
|
|
|
None => self.unexpected(),
|
2019-12-22 22:42:04 +00:00
|
|
|
};
|
2014-03-26 23:14:07 +00:00
|
|
|
}
|
|
|
|
|
2019-06-07 10:31:13 +00:00
|
|
|
let lo = self.token.span;
|
2019-03-02 16:15:26 +00:00
|
|
|
let path = self.parse_path(PathStyle::Mod)?;
|
2019-09-26 17:04:05 +00:00
|
|
|
let kind = self.parse_meta_item_kind()?;
|
2020-02-29 11:56:15 +00:00
|
|
|
let span = lo.to(self.prev_token.span);
|
2019-09-26 17:04:05 +00:00
|
|
|
Ok(ast::MetaItem { path, kind, span })
|
2017-03-03 09:23:59 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 22:53:30 +00:00
|
|
|
crate fn parse_meta_item_kind(&mut self) -> PResult<'a, ast::MetaItemKind> {
|
2017-03-03 09:23:59 +00:00
|
|
|
Ok(if self.eat(&token::Eq) {
|
2016-11-15 07:37:10 +00:00
|
|
|
ast::MetaItemKind::NameValue(self.parse_unsuffixed_lit()?)
|
2019-12-04 09:13:29 +00:00
|
|
|
} else if self.check(&token::OpenDelim(token::Paren)) {
|
|
|
|
// Matches `meta_seq = ( COMMASEP(meta_item_inner) )`.
|
|
|
|
let (list, _) = self.parse_paren_comma_seq(|p| p.parse_meta_item_inner())?;
|
|
|
|
ast::MetaItemKind::List(list)
|
2016-11-15 07:37:10 +00:00
|
|
|
} else {
|
|
|
|
ast::MetaItemKind::Word
|
2017-03-03 09:23:59 +00:00
|
|
|
})
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches `meta_item_inner : (meta_item | UNSUFFIXED_LIT) ;`.
|
2016-08-20 01:58:14 +00:00
|
|
|
fn parse_meta_item_inner(&mut self) -> PResult<'a, ast::NestedMetaItem> {
|
|
|
|
match self.parse_unsuffixed_lit() {
|
2019-12-04 09:13:29 +00:00
|
|
|
Ok(lit) => return Ok(ast::NestedMetaItem::Literal(lit)),
|
2019-09-07 14:38:02 +00:00
|
|
|
Err(ref mut err) => err.cancel(),
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
match self.parse_meta_item() {
|
2019-12-04 09:13:29 +00:00
|
|
|
Ok(mi) => return Ok(ast::NestedMetaItem::MetaItem(mi)),
|
2019-09-07 14:38:02 +00:00
|
|
|
Err(ref mut err) => err.cancel(),
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-12-07 02:07:35 +00:00
|
|
|
let found = pprust::token_to_string(&self.token);
|
2019-03-06 21:16:52 +00:00
|
|
|
let msg = format!("expected unsuffixed literal or identifier, found `{}`", found);
|
2019-12-30 13:56:57 +00:00
|
|
|
Err(self.struct_span_err(self.token.span, &msg))
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
|
|
|
|
pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool {
|
2020-11-18 22:50:16 +00:00
|
|
|
// One of the attributes may either itself be a macro, or apply derive macros (`derive`),
|
|
|
|
// or expand to macro attributes (`cfg_attr`).
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
attrs.iter().any(|attr| {
|
2020-11-18 22:50:16 +00:00
|
|
|
attr.ident().map_or(true, |ident| {
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
ident.name == sym::derive
|
2020-11-18 22:50:16 +00:00
|
|
|
|| ident.name == sym::cfg_attr
|
|
|
|
|| !rustc_feature::is_builtin_attr_name(ident.name)
|
|
|
|
})
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
})
|
|
|
|
}
|