Move TokenCursor::num_next_calls into Parser and rename it.

It's more of a `Parser`-level concern than a `TokenCursor`-level
concern. Also, `num_bump_calls` is a more accurate name, because it's
incremented in `Parser::bump`.
This commit is contained in:
Nicholas Nethercote 2023-07-31 16:15:54 +10:00
parent 203cba76e0
commit 54eb6bc34c
3 changed files with 14 additions and 18 deletions

View File

@ -36,7 +36,7 @@ impl<'a> Parser<'a> {
pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> { pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
let mut outer_attrs = ast::AttrVec::new(); let mut outer_attrs = ast::AttrVec::new();
let mut just_parsed_doc_comment = false; let mut just_parsed_doc_comment = false;
let start_pos = self.token_cursor.num_next_calls; let start_pos = self.num_bump_calls;
loop { loop {
let attr = if self.check(&token::Pound) { let attr = if self.check(&token::Pound) {
let prev_outer_attr_sp = outer_attrs.last().map(|attr| attr.span); let prev_outer_attr_sp = outer_attrs.last().map(|attr| attr.span);
@ -277,7 +277,7 @@ impl<'a> Parser<'a> {
pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, ast::AttrVec> { pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, ast::AttrVec> {
let mut attrs = ast::AttrVec::new(); let mut attrs = ast::AttrVec::new();
loop { loop {
let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap(); let start_pos: u32 = self.num_bump_calls.try_into().unwrap();
// Only try to parse if it is an inner attribute (has `!`). // Only try to parse if it is an inner attribute (has `!`).
let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) { let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
Some(self.parse_attribute(InnerAttrPolicy::Permitted)?) Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
@ -298,7 +298,7 @@ impl<'a> Parser<'a> {
None None
}; };
if let Some(attr) = attr { if let Some(attr) = attr {
let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap(); let end_pos: u32 = self.num_bump_calls.try_into().unwrap();
// If we are currently capturing tokens, mark the location of this inner attribute. // If we are currently capturing tokens, mark the location of this inner attribute.
// If capturing ends up creating a `LazyAttrTokenStream`, we will include // If capturing ends up creating a `LazyAttrTokenStream`, we will include
// this replace range with it, removing the inner attribute from the final // this replace range with it, removing the inner attribute from the final

View File

@ -213,6 +213,7 @@ impl<'a> Parser<'a> {
let start_token = (self.token.clone(), self.token_spacing); let start_token = (self.token.clone(), self.token_spacing);
let cursor_snapshot = self.token_cursor.clone(); let cursor_snapshot = self.token_cursor.clone();
let start_pos = self.num_bump_calls;
let has_outer_attrs = !attrs.attrs.is_empty(); let has_outer_attrs = !attrs.attrs.is_empty();
let prev_capturing = std::mem::replace(&mut self.capture_state.capturing, Capturing::Yes); let prev_capturing = std::mem::replace(&mut self.capture_state.capturing, Capturing::Yes);
@ -273,8 +274,7 @@ impl<'a> Parser<'a> {
let replace_ranges_end = self.capture_state.replace_ranges.len(); let replace_ranges_end = self.capture_state.replace_ranges.len();
let cursor_snapshot_next_calls = cursor_snapshot.num_next_calls; let mut end_pos = self.num_bump_calls;
let mut end_pos = self.token_cursor.num_next_calls;
let mut captured_trailing = false; let mut captured_trailing = false;
@ -306,7 +306,7 @@ impl<'a> Parser<'a> {
end_pos += 1; end_pos += 1;
} }
let num_calls = end_pos - cursor_snapshot_next_calls; let num_calls = end_pos - start_pos;
// If we have no attributes, then we will never need to // If we have no attributes, then we will never need to
// use any replace ranges. // use any replace ranges.
@ -316,7 +316,7 @@ impl<'a> Parser<'a> {
// Grab any replace ranges that occur *inside* the current AST node. // Grab any replace ranges that occur *inside* the current AST node.
// We will perform the actual replacement when we convert the `LazyAttrTokenStream` // We will perform the actual replacement when we convert the `LazyAttrTokenStream`
// to an `AttrTokenStream`. // to an `AttrTokenStream`.
let start_calls: u32 = cursor_snapshot_next_calls.try_into().unwrap(); let start_calls: u32 = start_pos.try_into().unwrap();
self.capture_state.replace_ranges[replace_ranges_start..replace_ranges_end] self.capture_state.replace_ranges[replace_ranges_start..replace_ranges_end]
.iter() .iter()
.cloned() .cloned()
@ -359,8 +359,7 @@ impl<'a> Parser<'a> {
// with a `FlatToken::AttrTarget`. If this AST node is inside an item // with a `FlatToken::AttrTarget`. If this AST node is inside an item
// that has `#[derive]`, then this will allow us to cfg-expand this // that has `#[derive]`, then this will allow us to cfg-expand this
// AST node. // AST node.
let start_pos = let start_pos = if has_outer_attrs { attrs.start_pos } else { start_pos };
if has_outer_attrs { attrs.start_pos } else { cursor_snapshot_next_calls };
let new_tokens = vec![(FlatToken::AttrTarget(attr_data), Spacing::Alone)]; let new_tokens = vec![(FlatToken::AttrTarget(attr_data), Spacing::Alone)];
assert!( assert!(
@ -464,6 +463,6 @@ mod size_asserts {
use rustc_data_structures::static_assert_size; use rustc_data_structures::static_assert_size;
// tidy-alphabetical-start // tidy-alphabetical-start
static_assert_size!(AttrWrapper, 16); static_assert_size!(AttrWrapper, 16);
static_assert_size!(LazyAttrTokenStreamImpl, 120); static_assert_size!(LazyAttrTokenStreamImpl, 112);
// tidy-alphabetical-end // tidy-alphabetical-end
} }

View File

@ -135,9 +135,9 @@ pub struct Parser<'a> {
pub capture_cfg: bool, pub capture_cfg: bool,
restrictions: Restrictions, restrictions: Restrictions,
expected_tokens: Vec<TokenType>, expected_tokens: Vec<TokenType>,
// Important: This must only be advanced from `bump` to ensure that
// `token_cursor.num_next_calls` is updated properly.
token_cursor: TokenCursor, token_cursor: TokenCursor,
// The number of calls to `bump`, i.e. the position in the token stream.
num_bump_calls: usize,
/// This field is used to keep track of how many left angle brackets we have seen. This is /// This field is used to keep track of how many left angle brackets we have seen. This is
/// required in order to detect extra leading left angle brackets (`<` characters) and error /// required in order to detect extra leading left angle brackets (`<` characters) and error
/// appropriately. /// appropriately.
@ -224,9 +224,6 @@ struct TokenCursor {
// because it's the outermost token stream which never has delimiters. // because it's the outermost token stream which never has delimiters.
stack: Vec<(TokenTreeCursor, Delimiter, DelimSpan)>, stack: Vec<(TokenTreeCursor, Delimiter, DelimSpan)>,
// Counts the number of calls to `{,inlined_}next`.
num_next_calls: usize,
// During parsing, we may sometimes need to 'unglue' a // During parsing, we may sometimes need to 'unglue' a
// glued token into two component tokens // glued token into two component tokens
// (e.g. '>>' into '>' and '>), so that the parser // (e.g. '>>' into '>' and '>), so that the parser
@ -402,9 +399,9 @@ impl<'a> Parser<'a> {
token_cursor: TokenCursor { token_cursor: TokenCursor {
tree_cursor: stream.into_trees(), tree_cursor: stream.into_trees(),
stack: Vec::new(), stack: Vec::new(),
num_next_calls: 0,
break_last_token: false, break_last_token: false,
}, },
num_bump_calls: 0,
unmatched_angle_bracket_count: 0, unmatched_angle_bracket_count: 0,
max_angle_bracket_count: 0, max_angle_bracket_count: 0,
last_unexpected_token_span: None, last_unexpected_token_span: None,
@ -1049,7 +1046,7 @@ impl<'a> Parser<'a> {
// Note: destructuring here would give nicer code, but it was found in #96210 to be slower // Note: destructuring here would give nicer code, but it was found in #96210 to be slower
// than `.0`/`.1` access. // than `.0`/`.1` access.
let mut next = self.token_cursor.inlined_next(); let mut next = self.token_cursor.inlined_next();
self.token_cursor.num_next_calls += 1; self.num_bump_calls += 1;
// We've retrieved an token from the underlying // We've retrieved an token from the underlying
// cursor, so we no longer need to worry about // cursor, so we no longer need to worry about
// an unglued token. See `break_and_eat` for more details // an unglued token. See `break_and_eat` for more details
@ -1446,7 +1443,7 @@ impl<'a> Parser<'a> {
} }
pub fn approx_token_stream_pos(&self) -> usize { pub fn approx_token_stream_pos(&self) -> usize {
self.token_cursor.num_next_calls self.num_bump_calls
} }
} }