2022-09-01 17:29:23 +00:00
|
|
|
use crate::errors::{InvalidMetaItem, SuffixedLiteralInAttribute};
|
|
|
|
|
2021-12-04 18:05:30 +00:00
|
|
|
use super::{AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, PathStyle};
|
2020-04-27 17:56:11 +00:00
|
|
|
use rustc_ast as ast;
|
2020-02-29 17:37:32 +00:00
|
|
|
use rustc_ast::attr;
|
2022-04-26 12:40:14 +00:00
|
|
|
use rustc_ast::token::{self, Delimiter, Nonterminal};
|
2022-08-31 11:20:59 +00:00
|
|
|
use rustc_errors::{error_code, fluent, Diagnostic, IntoDiagnostic, PResult};
|
2021-08-23 10:49:31 +00:00
|
|
|
use rustc_span::{sym, BytePos, Span};
|
2020-11-28 23:33:17 +00:00
|
|
|
use std::convert::TryInto;
|
2019-10-11 11:06:36 +00:00
|
|
|
|
2020-11-26 01:54:08 +00:00
|
|
|
// Public for rustfmt usage
|
2018-03-20 22:58:25 +00:00
|
|
|
#[derive(Debug)]
|
2022-08-31 11:20:59 +00:00
|
|
|
pub enum InnerAttrPolicy {
|
2016-07-06 04:35:12 +00:00
|
|
|
Permitted,
|
2022-08-31 11:20:59 +00:00
|
|
|
Forbidden(Option<InnerAttrForbiddenReason>),
|
2016-07-06 04:35:12 +00:00
|
|
|
}
|
|
|
|
|
2022-08-31 11:20:59 +00:00
|
|
|
#[derive(Clone, Copy, Debug)]
|
|
|
|
pub enum InnerAttrForbiddenReason {
|
|
|
|
InCodeBlock,
|
|
|
|
AfterOuterDocComment { prev_doc_comment_span: Span },
|
|
|
|
AfterOuterAttribute { prev_outer_attr_sp: Span },
|
|
|
|
}
|
2020-03-05 10:42:56 +00:00
|
|
|
|
2021-08-23 10:49:31 +00:00
|
|
|
enum OuterAttributeType {
|
|
|
|
DocComment,
|
|
|
|
DocBlockComment,
|
|
|
|
Attribute,
|
|
|
|
}
|
|
|
|
|
2015-10-24 01:37:21 +00:00
|
|
|
impl<'a> Parser<'a> {
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses attributes that appear before an item.
|
2021-01-22 18:28:08 +00:00
|
|
|
pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
|
2022-08-17 02:34:33 +00:00
|
|
|
let mut outer_attrs = ast::AttrVec::new();
|
2016-07-06 04:35:12 +00:00
|
|
|
let mut just_parsed_doc_comment = false;
|
2020-11-28 23:33:17 +00:00
|
|
|
let start_pos = self.token_cursor.num_next_calls;
|
2012-06-30 10:54:54 +00:00
|
|
|
loop {
|
2020-11-05 17:27:48 +00:00
|
|
|
let attr = if self.check(&token::Pound) {
|
2022-03-21 23:36:47 +00:00
|
|
|
let prev_outer_attr_sp = outer_attrs.last().map(|attr| attr.span);
|
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
let inner_error_reason = if just_parsed_doc_comment {
|
2022-08-31 11:20:59 +00:00
|
|
|
Some(InnerAttrForbiddenReason::AfterOuterDocComment {
|
|
|
|
prev_doc_comment_span: prev_outer_attr_sp.unwrap(),
|
|
|
|
})
|
|
|
|
} else if let Some(prev_outer_attr_sp) = prev_outer_attr_sp {
|
|
|
|
Some(InnerAttrForbiddenReason::AfterOuterAttribute { prev_outer_attr_sp })
|
2020-11-05 17:27:48 +00:00
|
|
|
} else {
|
2022-08-31 11:20:59 +00:00
|
|
|
None
|
2020-11-05 17:27:48 +00:00
|
|
|
};
|
2022-08-31 11:20:59 +00:00
|
|
|
let inner_parse_policy = InnerAttrPolicy::Forbidden(inner_error_reason);
|
2020-11-05 17:27:48 +00:00
|
|
|
just_parsed_doc_comment = false;
|
|
|
|
Some(self.parse_attribute(inner_parse_policy)?)
|
2020-10-22 19:17:40 +00:00
|
|
|
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
|
2020-11-05 17:27:48 +00:00
|
|
|
if attr_style != ast::AttrStyle::Outer {
|
2021-08-23 10:49:31 +00:00
|
|
|
let span = self.token.span;
|
|
|
|
let mut err = self.sess.span_diagnostic.struct_span_err_with_code(
|
|
|
|
span,
|
2022-10-22 09:07:54 +00:00
|
|
|
fluent::parser_inner_doc_comment_not_permitted,
|
2021-08-23 10:49:31 +00:00
|
|
|
error_code!(E0753),
|
|
|
|
);
|
|
|
|
if let Some(replacement_span) = self.annotate_following_item_if_applicable(
|
|
|
|
&mut err,
|
|
|
|
span,
|
|
|
|
match comment_kind {
|
|
|
|
token::CommentKind::Line => OuterAttributeType::DocComment,
|
|
|
|
token::CommentKind::Block => OuterAttributeType::DocBlockComment,
|
|
|
|
},
|
|
|
|
) {
|
2022-10-22 09:07:54 +00:00
|
|
|
err.note(fluent::note);
|
2021-08-23 10:49:31 +00:00
|
|
|
err.span_suggestion_verbose(
|
|
|
|
replacement_span,
|
2022-10-22 09:07:54 +00:00
|
|
|
fluent::suggestion,
|
2022-06-13 06:48:40 +00:00
|
|
|
"",
|
2021-08-23 10:49:31 +00:00
|
|
|
rustc_errors::Applicability::MachineApplicable,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
err.emit();
|
2020-11-05 17:27:48 +00:00
|
|
|
}
|
|
|
|
self.bump();
|
|
|
|
just_parsed_doc_comment = true;
|
2021-07-02 16:30:20 +00:00
|
|
|
// Always make an outer attribute - this allows us to recover from a misplaced
|
|
|
|
// inner attribute.
|
|
|
|
Some(attr::mk_doc_comment(
|
2022-09-02 08:29:40 +00:00
|
|
|
&self.sess.attr_id_generator,
|
2021-07-02 16:30:20 +00:00
|
|
|
comment_kind,
|
|
|
|
ast::AttrStyle::Outer,
|
|
|
|
data,
|
|
|
|
self.prev_token.span,
|
|
|
|
))
|
2020-10-22 19:17:40 +00:00
|
|
|
} else {
|
2020-11-05 17:27:48 +00:00
|
|
|
None
|
2020-10-22 19:17:40 +00:00
|
|
|
};
|
|
|
|
|
2020-11-05 17:27:48 +00:00
|
|
|
if let Some(attr) = attr {
|
2022-03-21 23:36:47 +00:00
|
|
|
if attr.style == ast::AttrStyle::Outer {
|
|
|
|
outer_attrs.push(attr);
|
|
|
|
}
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
|
|
|
break;
|
2012-06-30 10:54:54 +00:00
|
|
|
}
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2022-08-17 02:34:33 +00:00
|
|
|
Ok(AttrWrapper::new(outer_attrs, start_pos))
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches `attribute = # ! [ meta_item ]`.
|
2020-11-05 17:27:48 +00:00
|
|
|
/// `inner_parse_policy` prescribes how to handle inner attributes.
|
2020-11-26 01:54:08 +00:00
|
|
|
// Public for rustfmt usage.
|
|
|
|
pub fn parse_attribute(
|
2019-10-08 07:35:34 +00:00
|
|
|
&mut self,
|
2022-08-31 11:20:59 +00:00
|
|
|
inner_parse_policy: InnerAttrPolicy,
|
2019-10-08 07:35:34 +00:00
|
|
|
) -> PResult<'a, ast::Attribute> {
|
2016-07-06 04:35:12 +00:00
|
|
|
debug!(
|
2020-11-05 17:27:48 +00:00
|
|
|
"parse_attribute: inner_parse_policy={:?} self.token={:?}",
|
2016-07-06 04:35:12 +00:00
|
|
|
inner_parse_policy, self.token
|
|
|
|
);
|
2020-03-05 10:42:56 +00:00
|
|
|
let lo = self.token.span;
|
2021-08-23 10:49:31 +00:00
|
|
|
// Attributes can't have attributes of their own [Editor's note: not with that attitude]
|
2021-01-22 18:28:08 +00:00
|
|
|
self.collect_tokens_no_attrs(|this| {
|
2022-09-01 17:29:59 +00:00
|
|
|
assert!(this.eat(&token::Pound), "parse_attribute called in non-attribute position");
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2022-09-01 17:29:59 +00:00
|
|
|
let style =
|
|
|
|
if this.eat(&token::Not) { ast::AttrStyle::Inner } else { ast::AttrStyle::Outer };
|
2020-11-05 17:27:48 +00:00
|
|
|
|
2022-09-01 17:29:59 +00:00
|
|
|
this.expect(&token::OpenDelim(Delimiter::Bracket))?;
|
|
|
|
let item = this.parse_attr_item(false)?;
|
|
|
|
this.expect(&token::CloseDelim(Delimiter::Bracket))?;
|
|
|
|
let attr_sp = lo.to(this.prev_token.span);
|
2020-11-05 17:27:48 +00:00
|
|
|
|
2022-09-01 17:29:59 +00:00
|
|
|
// Emit error if inner attribute is encountered and forbidden.
|
|
|
|
if style == ast::AttrStyle::Inner {
|
|
|
|
this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
|
2020-11-05 17:27:48 +00:00
|
|
|
}
|
2022-09-01 17:29:59 +00:00
|
|
|
|
|
|
|
Ok(attr::mk_attr_from_item(&self.sess.attr_id_generator, item, None, style, attr_sp))
|
2021-01-13 21:28:57 +00:00
|
|
|
})
|
2020-03-05 10:42:56 +00:00
|
|
|
}
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2021-08-23 10:49:31 +00:00
|
|
|
fn annotate_following_item_if_applicable(
|
|
|
|
&self,
|
2022-01-23 20:41:46 +00:00
|
|
|
err: &mut Diagnostic,
|
2021-08-23 10:49:31 +00:00
|
|
|
span: Span,
|
|
|
|
attr_type: OuterAttributeType,
|
|
|
|
) -> Option<Span> {
|
2022-03-10 13:11:00 +00:00
|
|
|
let mut snapshot = self.create_snapshot_for_diagnostic();
|
2021-08-23 10:49:31 +00:00
|
|
|
let lo = span.lo()
|
|
|
|
+ BytePos(match attr_type {
|
|
|
|
OuterAttributeType::Attribute => 1,
|
|
|
|
_ => 2,
|
|
|
|
});
|
|
|
|
let hi = lo + BytePos(1);
|
|
|
|
let replacement_span = span.with_lo(lo).with_hi(hi);
|
|
|
|
if let OuterAttributeType::DocBlockComment | OuterAttributeType::DocComment = attr_type {
|
|
|
|
snapshot.bump();
|
|
|
|
}
|
|
|
|
loop {
|
|
|
|
// skip any other attributes, we want the item
|
|
|
|
if snapshot.token.kind == token::Pound {
|
2022-01-26 03:39:14 +00:00
|
|
|
if let Err(err) = snapshot.parse_attribute(InnerAttrPolicy::Permitted) {
|
2021-08-23 10:49:31 +00:00
|
|
|
err.cancel();
|
|
|
|
return Some(replacement_span);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
match snapshot.parse_item_common(
|
|
|
|
AttrWrapper::empty(),
|
|
|
|
true,
|
|
|
|
false,
|
2021-12-04 18:05:30 +00:00
|
|
|
FnParseMode { req_name: |_| true, req_body: true },
|
2021-08-23 10:49:31 +00:00
|
|
|
ForceCollect::No,
|
|
|
|
) {
|
|
|
|
Ok(Some(item)) => {
|
2022-08-31 11:20:59 +00:00
|
|
|
// FIXME(#100717)
|
|
|
|
err.set_arg("item", item.kind.descr());
|
2022-10-22 09:07:54 +00:00
|
|
|
err.span_label(item.span, fluent::label_does_not_annotate_this);
|
2021-08-23 10:49:31 +00:00
|
|
|
err.span_suggestion_verbose(
|
|
|
|
replacement_span,
|
2022-10-22 09:07:54 +00:00
|
|
|
fluent::sugg_change_inner_to_outer,
|
2022-06-13 06:48:40 +00:00
|
|
|
match attr_type {
|
2021-08-23 10:49:31 +00:00
|
|
|
OuterAttributeType::Attribute => "",
|
|
|
|
OuterAttributeType::DocBlockComment => "*",
|
|
|
|
OuterAttributeType::DocComment => "/",
|
2022-06-13 06:48:40 +00:00
|
|
|
},
|
2021-08-23 10:49:31 +00:00
|
|
|
rustc_errors::Applicability::MachineApplicable,
|
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
2022-01-26 03:39:14 +00:00
|
|
|
Err(item_err) => {
|
2021-08-23 10:49:31 +00:00
|
|
|
item_err.cancel();
|
|
|
|
}
|
|
|
|
Ok(None) => {}
|
|
|
|
}
|
|
|
|
Some(replacement_span)
|
|
|
|
}
|
|
|
|
|
2022-08-31 11:20:59 +00:00
|
|
|
pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy) {
|
|
|
|
if let InnerAttrPolicy::Forbidden(reason) = policy {
|
|
|
|
let mut diag = match reason.as_ref().copied() {
|
|
|
|
Some(InnerAttrForbiddenReason::AfterOuterDocComment { prev_doc_comment_span }) => {
|
|
|
|
let mut diag = self.struct_span_err(
|
|
|
|
attr_sp,
|
2022-10-22 09:07:54 +00:00
|
|
|
fluent::parser_inner_attr_not_permitted_after_outer_doc_comment,
|
2022-08-31 11:20:59 +00:00
|
|
|
);
|
2022-10-22 09:07:54 +00:00
|
|
|
diag.span_label(attr_sp, fluent::label_attr)
|
|
|
|
.span_label(prev_doc_comment_span, fluent::label_prev_doc_comment);
|
2022-08-31 11:20:59 +00:00
|
|
|
diag
|
|
|
|
}
|
|
|
|
Some(InnerAttrForbiddenReason::AfterOuterAttribute { prev_outer_attr_sp }) => {
|
|
|
|
let mut diag = self.struct_span_err(
|
|
|
|
attr_sp,
|
2022-10-22 09:07:54 +00:00
|
|
|
fluent::parser_inner_attr_not_permitted_after_outer_attr,
|
2022-08-31 11:20:59 +00:00
|
|
|
);
|
2022-10-22 09:07:54 +00:00
|
|
|
diag.span_label(attr_sp, fluent::label_attr)
|
|
|
|
.span_label(prev_outer_attr_sp, fluent::label_prev_attr);
|
2022-08-31 11:20:59 +00:00
|
|
|
diag
|
|
|
|
}
|
|
|
|
Some(InnerAttrForbiddenReason::InCodeBlock) | None => {
|
2022-10-22 09:07:54 +00:00
|
|
|
self.struct_span_err(attr_sp, fluent::parser_inner_attr_not_permitted)
|
2022-08-31 11:20:59 +00:00
|
|
|
}
|
|
|
|
};
|
2014-02-25 02:42:40 +00:00
|
|
|
|
2022-10-22 09:07:54 +00:00
|
|
|
diag.note(fluent::parser_inner_attr_explanation);
|
2021-08-23 10:49:31 +00:00
|
|
|
if self
|
|
|
|
.annotate_following_item_if_applicable(
|
|
|
|
&mut diag,
|
|
|
|
attr_sp,
|
|
|
|
OuterAttributeType::Attribute,
|
|
|
|
)
|
|
|
|
.is_some()
|
|
|
|
{
|
2022-10-22 09:07:54 +00:00
|
|
|
diag.note(fluent::parser_outer_attr_explanation);
|
2021-08-23 10:49:31 +00:00
|
|
|
};
|
|
|
|
diag.emit();
|
2020-03-05 10:42:56 +00:00
|
|
|
}
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses an inner part of an attribute (the path and following tokens).
|
2018-08-12 17:15:59 +00:00
|
|
|
/// The tokens must be either a delimited token stream, or empty token stream,
|
|
|
|
/// or the "legacy" key-value form.
|
2019-09-06 02:56:45 +00:00
|
|
|
/// PATH `(` TOKEN_STREAM `)`
|
|
|
|
/// PATH `[` TOKEN_STREAM `]`
|
|
|
|
/// PATH `{` TOKEN_STREAM `}`
|
|
|
|
/// PATH
|
2019-09-30 21:58:30 +00:00
|
|
|
/// PATH `=` UNSUFFIXED_LIT
|
2018-08-12 17:15:59 +00:00
|
|
|
/// The delimiters or `=` are still put into the resulting token stream.
|
2020-09-26 23:33:42 +00:00
|
|
|
pub fn parse_attr_item(&mut self, capture_tokens: bool) -> PResult<'a, ast::AttrItem> {
|
2019-08-17 22:10:56 +00:00
|
|
|
let item = match self.token.kind {
|
2020-07-01 10:16:49 +00:00
|
|
|
token::Interpolated(ref nt) => match **nt {
|
2019-12-13 00:25:08 +00:00
|
|
|
Nonterminal::NtMeta(ref item) => Some(item.clone().into_inner()),
|
2017-03-08 23:13:35 +00:00
|
|
|
_ => None,
|
|
|
|
},
|
|
|
|
_ => None,
|
|
|
|
};
|
2019-08-17 22:10:56 +00:00
|
|
|
Ok(if let Some(item) = item {
|
2017-03-08 23:13:35 +00:00
|
|
|
self.bump();
|
2019-08-17 22:10:56 +00:00
|
|
|
item
|
2017-03-08 23:13:35 +00:00
|
|
|
} else {
|
2020-09-26 23:33:42 +00:00
|
|
|
let do_parse = |this: &mut Self| {
|
|
|
|
let path = this.parse_path(PathStyle::Mod)?;
|
|
|
|
let args = this.parse_attr_args()?;
|
|
|
|
Ok(ast::AttrItem { path, args, tokens: None })
|
|
|
|
};
|
2021-01-22 18:28:08 +00:00
|
|
|
// Attr items don't have attributes
|
|
|
|
if capture_tokens { self.collect_tokens_no_attrs(do_parse) } else { do_parse(self) }?
|
2017-03-08 23:13:35 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Parses attributes that appear after the opening of an item. These should
|
2014-06-09 20:12:30 +00:00
|
|
|
/// be preceded by an exclamation mark, but we accept and warn about one
|
2015-03-13 09:34:51 +00:00
|
|
|
/// terminated by a semicolon.
|
2019-09-06 02:56:45 +00:00
|
|
|
///
|
|
|
|
/// Matches `inner_attrs*`.
|
2022-08-17 02:34:33 +00:00
|
|
|
pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, ast::AttrVec> {
|
|
|
|
let mut attrs = ast::AttrVec::new();
|
2012-06-30 10:54:54 +00:00
|
|
|
loop {
|
2020-11-28 23:33:17 +00:00
|
|
|
let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
|
2020-10-22 19:17:40 +00:00
|
|
|
// Only try to parse if it is an inner attribute (has `!`).
|
2020-11-05 17:27:48 +00:00
|
|
|
let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
|
|
|
|
Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
|
|
|
|
} else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
|
|
|
|
if attr_style == ast::AttrStyle::Inner {
|
|
|
|
self.bump();
|
2022-09-02 08:29:40 +00:00
|
|
|
Some(attr::mk_doc_comment(
|
|
|
|
&self.sess.attr_id_generator,
|
|
|
|
comment_kind,
|
|
|
|
attr_style,
|
|
|
|
data,
|
|
|
|
self.prev_token.span,
|
|
|
|
))
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
2020-11-05 17:27:48 +00:00
|
|
|
None
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
if let Some(attr) = attr {
|
2020-11-28 23:33:17 +00:00
|
|
|
let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
|
|
|
|
// If we are currently capturing tokens, mark the location of this inner attribute.
|
2022-09-09 07:15:53 +00:00
|
|
|
// If capturing ends up creating a `LazyAttrTokenStream`, we will include
|
2020-11-28 23:33:17 +00:00
|
|
|
// this replace range with it, removing the inner attribute from the final
|
2022-09-09 02:44:05 +00:00
|
|
|
// `AttrTokenStream`. Inner attributes are stored in the parsed AST note.
|
2020-11-28 23:33:17 +00:00
|
|
|
// During macro expansion, they are selectively inserted back into the
|
2022-03-30 05:39:38 +00:00
|
|
|
// token stream (the first inner attribute is removed each time we invoke the
|
2020-11-28 23:33:17 +00:00
|
|
|
// corresponding macro).
|
|
|
|
let range = start_pos..end_pos;
|
|
|
|
if let Capturing::Yes = self.capture_state.capturing {
|
|
|
|
self.capture_state.inner_attr_ranges.insert(attr.id, (range, vec![]));
|
|
|
|
}
|
2020-09-26 23:33:42 +00:00
|
|
|
attrs.push(attr);
|
2020-03-05 10:42:56 +00:00
|
|
|
} else {
|
|
|
|
break;
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
2015-10-24 02:02:38 +00:00
|
|
|
Ok(attrs)
|
2012-05-24 20:44:42 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 23:51:09 +00:00
|
|
|
pub(crate) fn parse_unsuffixed_lit(&mut self) -> PResult<'a, ast::Lit> {
|
2022-10-10 02:40:56 +00:00
|
|
|
let lit = self.parse_ast_lit()?;
|
|
|
|
debug!("checking if {:?} is unsuffixed", lit);
|
2016-08-20 01:58:14 +00:00
|
|
|
|
2019-09-26 15:56:53 +00:00
|
|
|
if !lit.kind.is_unsuffixed() {
|
2022-09-01 17:29:23 +00:00
|
|
|
self.sess.emit_err(SuffixedLiteralInAttribute { span: lit.span });
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(lit)
|
|
|
|
}
|
|
|
|
|
2019-10-08 07:14:07 +00:00
|
|
|
/// Parses `cfg_attr(pred, attr_item_list)` where `attr_item_list` is comma-delimited.
|
2019-10-10 08:26:10 +00:00
|
|
|
pub fn parse_cfg_attr(&mut self) -> PResult<'a, (ast::MetaItem, Vec<(ast::AttrItem, Span)>)> {
|
2019-10-08 07:14:07 +00:00
|
|
|
let cfg_predicate = self.parse_meta_item()?;
|
|
|
|
self.expect(&token::Comma)?;
|
|
|
|
|
|
|
|
// Presumably, the majority of the time there will only be one attr.
|
|
|
|
let mut expanded_attrs = Vec::with_capacity(1);
|
2019-12-05 05:45:50 +00:00
|
|
|
while self.token.kind != token::Eof {
|
|
|
|
let lo = self.token.span;
|
2020-09-26 23:33:42 +00:00
|
|
|
let item = self.parse_attr_item(true)?;
|
2020-02-29 11:56:15 +00:00
|
|
|
expanded_attrs.push((item, lo.to(self.prev_token.span)));
|
2019-12-05 13:19:00 +00:00
|
|
|
if !self.eat(&token::Comma) {
|
|
|
|
break;
|
|
|
|
}
|
2019-10-08 07:14:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok((cfg_predicate, expanded_attrs))
|
|
|
|
}
|
|
|
|
|
2019-12-05 13:19:00 +00:00
|
|
|
/// Matches `COMMASEP(meta_item_inner)`.
|
2022-05-20 23:51:09 +00:00
|
|
|
pub(crate) fn parse_meta_seq_top(&mut self) -> PResult<'a, Vec<ast::NestedMetaItem>> {
|
2019-12-05 13:19:00 +00:00
|
|
|
// Presumably, the majority of the time there will only be one attr.
|
|
|
|
let mut nmis = Vec::with_capacity(1);
|
|
|
|
while self.token.kind != token::Eof {
|
|
|
|
nmis.push(self.parse_meta_item_inner()?);
|
|
|
|
if !self.eat(&token::Comma) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(nmis)
|
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches the following grammar (per RFC 1559).
|
2022-04-15 22:04:34 +00:00
|
|
|
/// ```ebnf
|
|
|
|
/// meta_item : PATH ( '=' UNSUFFIXED_LIT | '(' meta_item_inner? ')' )? ;
|
|
|
|
/// meta_item_inner : (meta_item | UNSUFFIXED_LIT) (',' meta_item_inner)? ;
|
|
|
|
/// ```
|
2016-11-15 10:17:24 +00:00
|
|
|
pub fn parse_meta_item(&mut self) -> PResult<'a, ast::MetaItem> {
|
2019-06-04 22:17:07 +00:00
|
|
|
let nt_meta = match self.token.kind {
|
2020-07-01 10:16:49 +00:00
|
|
|
token::Interpolated(ref nt) => match **nt {
|
2016-11-02 03:03:55 +00:00
|
|
|
token::NtMeta(ref e) => Some(e.clone()),
|
|
|
|
_ => None,
|
|
|
|
},
|
2016-01-27 19:42:26 +00:00
|
|
|
_ => None,
|
2014-09-13 16:06:01 +00:00
|
|
|
};
|
|
|
|
|
2019-08-17 22:10:56 +00:00
|
|
|
if let Some(item) = nt_meta {
|
|
|
|
return match item.meta(item.path.span) {
|
|
|
|
Some(meta) => {
|
|
|
|
self.bump();
|
|
|
|
Ok(meta)
|
|
|
|
}
|
|
|
|
None => self.unexpected(),
|
|
|
|
};
|
2014-03-26 23:14:07 +00:00
|
|
|
}
|
|
|
|
|
2019-06-07 10:31:13 +00:00
|
|
|
let lo = self.token.span;
|
2019-03-02 16:15:26 +00:00
|
|
|
let path = self.parse_path(PathStyle::Mod)?;
|
2019-09-26 17:04:05 +00:00
|
|
|
let kind = self.parse_meta_item_kind()?;
|
2020-02-29 11:56:15 +00:00
|
|
|
let span = lo.to(self.prev_token.span);
|
2019-09-26 17:04:05 +00:00
|
|
|
Ok(ast::MetaItem { path, kind, span })
|
2017-03-03 09:23:59 +00:00
|
|
|
}
|
|
|
|
|
2022-05-20 23:51:09 +00:00
|
|
|
pub(crate) fn parse_meta_item_kind(&mut self) -> PResult<'a, ast::MetaItemKind> {
|
2017-03-03 09:23:59 +00:00
|
|
|
Ok(if self.eat(&token::Eq) {
|
2016-11-15 07:37:10 +00:00
|
|
|
ast::MetaItemKind::NameValue(self.parse_unsuffixed_lit()?)
|
2022-04-26 12:40:14 +00:00
|
|
|
} else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
|
2019-12-04 09:13:29 +00:00
|
|
|
// Matches `meta_seq = ( COMMASEP(meta_item_inner) )`.
|
|
|
|
let (list, _) = self.parse_paren_comma_seq(|p| p.parse_meta_item_inner())?;
|
|
|
|
ast::MetaItemKind::List(list)
|
2016-11-15 07:37:10 +00:00
|
|
|
} else {
|
|
|
|
ast::MetaItemKind::Word
|
2017-03-03 09:23:59 +00:00
|
|
|
})
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 02:56:45 +00:00
|
|
|
/// Matches `meta_item_inner : (meta_item | UNSUFFIXED_LIT) ;`.
|
2016-08-20 01:58:14 +00:00
|
|
|
fn parse_meta_item_inner(&mut self) -> PResult<'a, ast::NestedMetaItem> {
|
|
|
|
match self.parse_unsuffixed_lit() {
|
2019-12-04 09:13:29 +00:00
|
|
|
Ok(lit) => return Ok(ast::NestedMetaItem::Literal(lit)),
|
2022-01-26 03:39:14 +00:00
|
|
|
Err(err) => err.cancel(),
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
match self.parse_meta_item() {
|
2019-12-04 09:13:29 +00:00
|
|
|
Ok(mi) => return Ok(ast::NestedMetaItem::MetaItem(mi)),
|
2022-01-26 03:39:14 +00:00
|
|
|
Err(err) => err.cancel(),
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-09-22 16:39:17 +00:00
|
|
|
Err(InvalidMetaItem { span: self.token.span, token: self.token.clone() }
|
2022-09-01 17:29:23 +00:00
|
|
|
.into_diagnostic(&self.sess.span_diagnostic))
|
2016-08-20 01:58:14 +00:00
|
|
|
}
|
2012-04-20 03:51:31 +00:00
|
|
|
}
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
|
|
|
|
pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool {
|
2020-11-14 11:47:14 +00:00
|
|
|
// One of the attributes may either itself be a macro,
|
2020-11-18 22:50:16 +00:00
|
|
|
// or expand to macro attributes (`cfg_attr`).
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
attrs.iter().any(|attr| {
|
2020-11-28 23:33:17 +00:00
|
|
|
if attr.is_doc_comment() {
|
|
|
|
return false;
|
|
|
|
}
|
2020-11-18 22:50:16 +00:00
|
|
|
attr.ident().map_or(true, |ident| {
|
2020-11-14 11:47:14 +00:00
|
|
|
ident.name == sym::cfg_attr || !rustc_feature::is_builtin_attr_name(ident.name)
|
2020-11-18 22:50:16 +00:00
|
|
|
})
|
Rewrite `collect_tokens` implementations to use a flattened buffer
Instead of trying to collect tokens at each depth, we 'flatten' the
stream as we go allong, pushing open/close delimiters to our buffer
just like regular tokens. One capturing is complete, we reconstruct a
nested `TokenTree::Delimited` structure, producing a normal
`TokenStream`.
The reconstructed `TokenStream` is not created immediately - instead, it is
produced on-demand by a closure (wrapped in a new `LazyTokenStream` type). This
closure stores a clone of the original `TokenCursor`, plus a record of the
number of calls to `next()/next_desugared()`. This is sufficient to reconstruct
the tokenstream seen by the callback without storing any additional state. If
the tokenstream is never used (e.g. when a captured `macro_rules!` argument is
never passed to a proc macro), we never actually create a `TokenStream`.
This implementation has a number of advantages over the previous one:
* It is significantly simpler, with no edge cases around capturing the
start/end of a delimited group.
* It can be easily extended to allow replacing tokens an an arbitrary
'depth' by just using `Vec::splice` at the proper position. This is
important for PR #76130, which requires us to track information about
attributes along with tokens.
* The lazy approach to `TokenStream` construction allows us to easily
parse an AST struct, and then decide after the fact whether we need a
`TokenStream`. This will be useful when we start collecting tokens for
`Attribute` - we can discard the `LazyTokenStream` if the parsed
attribute doesn't need tokens (e.g. is a builtin attribute).
The performance impact seems to be neglibile (see
https://github.com/rust-lang/rust/pull/77250#issuecomment-703960604). There is a
small slowdown on a few benchmarks, but it only rises above 1% for incremental
builds, where it represents a larger fraction of the much smaller instruction
count. There a ~1% speedup on a few other incremental benchmarks - my guess is
that the speedups and slowdowns will usually cancel out in practice.
2020-09-27 01:56:29 +00:00
|
|
|
})
|
|
|
|
}
|