syntax: doc comments all the things

This commit is contained in:
Corey Richardson 2014-06-09 13:12:30 -07:00
parent 5716abe3f0
commit 4989a56448
34 changed files with 1135 additions and 1136 deletions

View File

@ -60,9 +60,12 @@ pub struct AbiData {
} }
pub enum AbiArchitecture { pub enum AbiArchitecture {
RustArch, // Not a real ABI (e.g., intrinsic) /// Not a real ABI (e.g., intrinsic)
AllArch, // An ABI that specifies cross-platform defaults (e.g., "C") RustArch,
Archs(u32) // Multiple architectures (bitset) /// An ABI that specifies cross-platform defaults (e.g., "C")
AllArch,
/// Multiple architectures (bitset)
Archs(u32)
} }
static AbiDatas: &'static [AbiData] = &[ static AbiDatas: &'static [AbiData] = &[
@ -84,21 +87,13 @@ static AbiDatas: &'static [AbiData] = &[
AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch}, AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch},
]; ];
/// Iterates through each of the defined ABIs.
fn each_abi(op: |abi: Abi| -> bool) -> bool { fn each_abi(op: |abi: Abi| -> bool) -> bool {
/*!
*
* Iterates through each of the defined ABIs.
*/
AbiDatas.iter().advance(|abi_data| op(abi_data.abi)) AbiDatas.iter().advance(|abi_data| op(abi_data.abi))
} }
/// Returns the ABI with the given name (if any).
pub fn lookup(name: &str) -> Option<Abi> { pub fn lookup(name: &str) -> Option<Abi> {
/*!
*
* Returns the ABI with the given name (if any).
*/
let mut res = None; let mut res = None;
each_abi(|abi| { each_abi(|abi| {

View File

@ -24,7 +24,8 @@ use std::rc::Rc;
use std::gc::{Gc, GC}; use std::gc::{Gc, GC};
use serialize::{Encodable, Decodable, Encoder, Decoder}; use serialize::{Encodable, Decodable, Encoder, Decoder};
/// A pointer abstraction. FIXME(eddyb) #10676 use Rc<T> in the future. /// A pointer abstraction.
// FIXME(eddyb) #10676 use Rc<T> in the future.
pub type P<T> = Gc<T>; pub type P<T> = Gc<T>;
#[allow(non_snake_case_functions)] #[allow(non_snake_case_functions)]
@ -36,10 +37,10 @@ pub fn P<T: 'static>(value: T) -> P<T> {
// FIXME #6993: in librustc, uses of "ident" should be replaced // FIXME #6993: in librustc, uses of "ident" should be replaced
// by just "Name". // by just "Name".
// an identifier contains a Name (index into the interner /// An identifier contains a Name (index into the interner
// table) and a SyntaxContext to track renaming and /// table) and a SyntaxContext to track renaming and
// macro expansion per Flatt et al., "Macros /// macro expansion per Flatt et al., "Macros
// That Work Together" /// That Work Together"
#[deriving(Clone, Hash, PartialOrd, Eq, Ord, Show)] #[deriving(Clone, Hash, PartialOrd, Eq, Ord, Show)]
pub struct Ident { pub struct Ident {
pub name: Name, pub name: Name,
@ -122,10 +123,9 @@ pub struct Lifetime {
pub name: Name pub name: Name
} }
// a "Path" is essentially Rust's notion of a name; /// A "Path" is essentially Rust's notion of a name; for instance:
// for instance: std::cmp::PartialEq . It's represented /// std::cmp::PartialEq . It's represented as a sequence of identifiers,
// as a sequence of identifiers, along with a bunch /// along with a bunch of supporting information.
// of supporting information.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Path { pub struct Path {
pub span: Span, pub span: Span,
@ -163,15 +163,15 @@ pub struct DefId {
pub static LOCAL_CRATE: CrateNum = 0; pub static LOCAL_CRATE: CrateNum = 0;
pub static CRATE_NODE_ID: NodeId = 0; pub static CRATE_NODE_ID: NodeId = 0;
// When parsing and doing expansions, we initially give all AST nodes this AST /// When parsing and doing expansions, we initially give all AST nodes this AST
// node value. Then later, in the renumber pass, we renumber them to have /// node value. Then later, in the renumber pass, we renumber them to have
// small, positive ids. /// small, positive ids.
pub static DUMMY_NODE_ID: NodeId = -1; pub static DUMMY_NODE_ID: NodeId = -1;
// The AST represents all type param bounds as types. /// The AST represents all type param bounds as types.
// typeck::collect::compute_bounds matches these against /// typeck::collect::compute_bounds matches these against
// the "special" built-in traits (see middle::lang_items) and /// the "special" built-in traits (see middle::lang_items) and
// detects Copy, Send and Share. /// detects Copy, Send and Share.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum TyParamBound { pub enum TyParamBound {
TraitTyParamBound(TraitRef), TraitTyParamBound(TraitRef),
@ -210,9 +210,9 @@ impl Generics {
} }
} }
// The set of MetaItems that define the compilation environment of the crate, /// The set of MetaItems that define the compilation environment of the crate,
// used to drive conditional compilation /// used to drive conditional compilation
pub type CrateConfig = Vec<Gc<MetaItem>>; pub type CrateConfig = Vec<Gc<MetaItem>> ;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Crate { pub struct Crate {
@ -289,13 +289,13 @@ pub enum BindingMode {
pub enum Pat_ { pub enum Pat_ {
PatWild, PatWild,
PatWildMulti, PatWildMulti,
// A PatIdent may either be a new bound variable, /// A PatIdent may either be a new bound variable,
// or a nullary enum (in which case the third field /// or a nullary enum (in which case the third field
// is None). /// is None).
// In the nullary enum case, the parser can't determine /// In the nullary enum case, the parser can't determine
// which it is. The resolver determines this, and /// which it is. The resolver determines this, and
// records this pattern's NodeId in an auxiliary /// records this pattern's NodeId in an auxiliary
// set (of "PatIdents that refer to nullary enums") /// set (of "PatIdents that refer to nullary enums")
PatIdent(BindingMode, SpannedIdent, Option<Gc<Pat>>), PatIdent(BindingMode, SpannedIdent, Option<Gc<Pat>>),
PatEnum(Path, Option<Vec<Gc<Pat>>>), /* "none" means a * pattern where PatEnum(Path, Option<Vec<Gc<Pat>>>), /* "none" means a * pattern where
* we don't bind the fields to names */ * we don't bind the fields to names */
@ -305,8 +305,8 @@ pub enum Pat_ {
PatRegion(Gc<Pat>), // reference pattern PatRegion(Gc<Pat>), // reference pattern
PatLit(Gc<Expr>), PatLit(Gc<Expr>),
PatRange(Gc<Expr>, Gc<Expr>), PatRange(Gc<Expr>, Gc<Expr>),
// [a, b, ..i, y, z] is represented as /// [a, b, ..i, y, z] is represented as:
// PatVec(~[a, b], Some(i), ~[y, z]) /// PatVec(~[a, b], Some(i), ~[y, z])
PatVec(Vec<Gc<Pat>>, Option<Gc<Pat>>, Vec<Gc<Pat>>), PatVec(Vec<Gc<Pat>>, Option<Gc<Pat>>, Vec<Gc<Pat>>),
PatMac(Mac), PatMac(Mac),
} }
@ -319,9 +319,12 @@ pub enum Mutability {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ExprVstore { pub enum ExprVstore {
ExprVstoreUniq, // ~[1,2,3,4] /// ~[1, 2, 3, 4]
ExprVstoreSlice, // &[1,2,3,4] ExprVstoreUniq,
ExprVstoreMutSlice, // &mut [1,2,3,4] /// &[1, 2, 3, 4]
ExprVstoreSlice,
/// &mut [1, 2, 3, 4]
ExprVstoreMutSlice,
} }
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
@ -359,16 +362,16 @@ pub type Stmt = Spanned<Stmt_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Stmt_ { pub enum Stmt_ {
// could be an item or a local (let) binding: /// Could be an item or a local (let) binding:
StmtDecl(Gc<Decl>, NodeId), StmtDecl(Gc<Decl>, NodeId),
// expr without trailing semi-colon (must have unit type): /// Expr without trailing semi-colon (must have unit type):
StmtExpr(Gc<Expr>, NodeId), StmtExpr(Gc<Expr>, NodeId),
// expr with trailing semi-colon (may have any type): /// Expr with trailing semi-colon (may have any type):
StmtSemi(Gc<Expr>, NodeId), StmtSemi(Gc<Expr>, NodeId),
// bool: is there a trailing sem-colon? /// bool: is there a trailing sem-colon?
StmtMac(Mac, bool), StmtMac(Mac, bool),
} }
@ -397,9 +400,9 @@ pub type Decl = Spanned<Decl_>;
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Decl_ { pub enum Decl_ {
// a local (let) binding: /// A local (let) binding:
DeclLocal(Gc<Local>), DeclLocal(Gc<Local>),
// an item binding: /// An item binding:
DeclItem(Gc<Item>), DeclItem(Gc<Item>),
} }
@ -443,7 +446,7 @@ pub struct Expr {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Expr_ { pub enum Expr_ {
ExprVstore(Gc<Expr>, ExprVstore), ExprVstore(Gc<Expr>, ExprVstore),
// First expr is the place; second expr is the value. /// First expr is the place; second expr is the value.
ExprBox(Gc<Expr>, Gc<Expr>), ExprBox(Gc<Expr>, Gc<Expr>),
ExprVec(Vec<Gc<Expr>>), ExprVec(Vec<Gc<Expr>>),
ExprCall(Gc<Expr>, Vec<Gc<Expr>>), ExprCall(Gc<Expr>, Vec<Gc<Expr>>),
@ -483,124 +486,121 @@ pub enum Expr_ {
ExprMac(Mac), ExprMac(Mac),
// A struct literal expression. /// A struct literal expression.
ExprStruct(Path, Vec<Field> , Option<Gc<Expr>> /* base */), ExprStruct(Path, Vec<Field> , Option<Gc<Expr>> /* base */),
// A vector literal constructed from one repeated element. /// A vector literal constructed from one repeated element.
ExprRepeat(Gc<Expr> /* element */, Gc<Expr> /* count */), ExprRepeat(Gc<Expr> /* element */, Gc<Expr> /* count */),
// No-op: used solely so we can pretty-print faithfully /// No-op: used solely so we can pretty-print faithfully
ExprParen(Gc<Expr>) ExprParen(Gc<Expr>)
} }
// When the main rust parser encounters a syntax-extension invocation, it /// When the main rust parser encounters a syntax-extension invocation, it
// parses the arguments to the invocation as a token-tree. This is a very /// parses the arguments to the invocation as a token-tree. This is a very
// loose structure, such that all sorts of different AST-fragments can /// loose structure, such that all sorts of different AST-fragments can
// be passed to syntax extensions using a uniform type. /// be passed to syntax extensions using a uniform type.
// ///
// If the syntax extension is an MBE macro, it will attempt to match its /// If the syntax extension is an MBE macro, it will attempt to match its
// LHS "matchers" against the provided token tree, and if it finds a /// LHS "matchers" against the provided token tree, and if it finds a
// match, will transcribe the RHS token tree, splicing in any captured /// match, will transcribe the RHS token tree, splicing in any captured
// macro_parser::matched_nonterminals into the TTNonterminals it finds. /// macro_parser::matched_nonterminals into the TTNonterminals it finds.
// ///
// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq /// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq
// makes any real sense. You could write them elsewhere but nothing /// makes any real sense. You could write them elsewhere but nothing
// else knows what to do with them, so you'll probably get a syntax /// else knows what to do with them, so you'll probably get a syntax
// error. /// error.
//
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
#[doc="For macro invocations; parsing is delegated to the macro"] #[doc="For macro invocations; parsing is delegated to the macro"]
pub enum TokenTree { pub enum TokenTree {
// a single token /// A single token
TTTok(Span, ::parse::token::Token), TTTok(Span, ::parse::token::Token),
// a delimited sequence (the delimiters appear as the first /// A delimited sequence (the delimiters appear as the first
// and last elements of the vector) /// and last elements of the vector)
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST. // FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTDelim(Rc<Vec<TokenTree>>), TTDelim(Rc<Vec<TokenTree>>),
// These only make sense for right-hand-sides of MBE macros: // These only make sense for right-hand-sides of MBE macros:
// a kleene-style repetition sequence with a span, a TTForest, /// A kleene-style repetition sequence with a span, a TTForest,
// an optional separator, and a boolean where true indicates /// an optional separator, and a boolean where true indicates
// zero or more (..), and false indicates one or more (+). /// zero or more (..), and false indicates one or more (+).
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST. // FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTSeq(Span, Rc<Vec<TokenTree>>, Option<::parse::token::Token>, bool), TTSeq(Span, Rc<Vec<TokenTree>>, Option<::parse::token::Token>, bool),
// a syntactic variable that will be filled in by macro expansion. /// A syntactic variable that will be filled in by macro expansion.
TTNonterminal(Span, Ident) TTNonterminal(Span, Ident)
} }
// /// Matchers are nodes defined-by and recognized-by the main rust parser and
// Matchers are nodes defined-by and recognized-by the main rust parser and /// language, but they're only ever found inside syntax-extension invocations;
// language, but they're only ever found inside syntax-extension invocations; /// indeed, the only thing that ever _activates_ the rules in the rust parser
// indeed, the only thing that ever _activates_ the rules in the rust parser /// for parsing a matcher is a matcher looking for the 'matchers' nonterminal
// for parsing a matcher is a matcher looking for the 'matchers' nonterminal /// itself. Matchers represent a small sub-language for pattern-matching
// itself. Matchers represent a small sub-language for pattern-matching /// token-trees, and are thus primarily used by the macro-defining extension
// token-trees, and are thus primarily used by the macro-defining extension /// itself.
// itself. ///
// /// MatchTok
// MatchTok /// --------
// -------- ///
// /// A matcher that matches a single token, denoted by the token itself. So
// A matcher that matches a single token, denoted by the token itself. So /// long as there's no $ involved.
// long as there's no $ involved. ///
// ///
// /// MatchSeq
// MatchSeq /// --------
// -------- ///
// /// A matcher that matches a sequence of sub-matchers, denoted various
// A matcher that matches a sequence of sub-matchers, denoted various /// possible ways:
// possible ways: ///
// /// $(M)* zero or more Ms
// $(M)* zero or more Ms /// $(M)+ one or more Ms
// $(M)+ one or more Ms /// $(M),+ one or more comma-separated Ms
// $(M),+ one or more comma-separated Ms /// $(A B C);* zero or more semi-separated 'A B C' seqs
// $(A B C);* zero or more semi-separated 'A B C' seqs ///
// ///
// /// MatchNonterminal
// MatchNonterminal /// -----------------
// ----------------- ///
// /// A matcher that matches one of a few interesting named rust
// A matcher that matches one of a few interesting named rust /// nonterminals, such as types, expressions, items, or raw token-trees. A
// nonterminals, such as types, expressions, items, or raw token-trees. A /// black-box matcher on expr, for example, binds an expr to a given ident,
// black-box matcher on expr, for example, binds an expr to a given ident, /// and that ident can re-occur as an interpolation in the RHS of a
// and that ident can re-occur as an interpolation in the RHS of a /// macro-by-example rule. For example:
// macro-by-example rule. For example: ///
// /// $foo:expr => 1 + $foo // interpolate an expr
// $foo:expr => 1 + $foo // interpolate an expr /// $foo:tt => $foo // interpolate a token-tree
// $foo:tt => $foo // interpolate a token-tree /// $foo:tt => bar! $foo // only other valid interpolation
// $foo:tt => bar! $foo // only other valid interpolation /// // is in arg position for another
// // is in arg position for another /// // macro
// // macro ///
// /// As a final, horrifying aside, note that macro-by-example's input is
// As a final, horrifying aside, note that macro-by-example's input is /// also matched by one of these matchers. Holy self-referential! It is matched
// also matched by one of these matchers. Holy self-referential! It is matched /// by a MatchSeq, specifically this one:
// by a MatchSeq, specifically this one: ///
// /// $( $lhs:matchers => $rhs:tt );+
// $( $lhs:matchers => $rhs:tt );+ ///
// /// If you understand that, you have closed the loop and understand the whole
// If you understand that, you have closed to loop and understand the whole /// macro system. Congratulations.
// macro system. Congratulations.
//
pub type Matcher = Spanned<Matcher_>; pub type Matcher = Spanned<Matcher_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Matcher_ { pub enum Matcher_ {
// match one token /// Match one token
MatchTok(::parse::token::Token), MatchTok(::parse::token::Token),
// match repetitions of a sequence: body, separator, zero ok?, /// Match repetitions of a sequence: body, separator, zero ok?,
// lo, hi position-in-match-array used: /// lo, hi position-in-match-array used:
MatchSeq(Vec<Matcher> , Option<::parse::token::Token>, bool, uint, uint), MatchSeq(Vec<Matcher> , Option<::parse::token::Token>, bool, uint, uint),
// parse a Rust NT: name to bind, name of NT, position in match array: /// Parse a Rust NT: name to bind, name of NT, position in match array:
MatchNonterminal(Ident, Ident, uint) MatchNonterminal(Ident, Ident, uint)
} }
pub type Mac = Spanned<Mac_>; pub type Mac = Spanned<Mac_>;
// represents a macro invocation. The Path indicates which macro /// Represents a macro invocation. The Path indicates which macro
// is being invoked, and the vector of token-trees contains the source /// is being invoked, and the vector of token-trees contains the source
// of the macro invocation. /// of the macro invocation.
// There's only one flavor, now, so this could presumably be simplified. /// There's only one flavor, now, so this could presumably be simplified.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Mac_ { pub enum Mac_ {
MacInvocTT(Path, Vec<TokenTree> , SyntaxContext), // new macro-invocation MacInvocTT(Path, Vec<TokenTree> , SyntaxContext), // new macro-invocation
@ -659,11 +659,10 @@ pub struct TypeMethod {
pub vis: Visibility, pub vis: Visibility,
} }
/// Represents a method declaration in a trait declaration, possibly /// Represents a method declaration in a trait declaration, possibly including
/// including a default implementation /// a default implementation A trait method is either required (meaning it
// A trait method is either required (meaning it doesn't have an /// doesn't have an implementation, just a signature) or provided (meaning it
// implementation, just a signature) or provided (meaning it has a default /// has a default implementation).
// implementation).
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum TraitMethod { pub enum TraitMethod {
Required(TypeMethod), Required(TypeMethod),
@ -720,7 +719,7 @@ pub struct Ty {
pub span: Span, pub span: Span,
} }
// Not represented directly in the AST, referred to by name through a ty_path. /// Not represented directly in the AST, referred to by name through a ty_path.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum PrimTy { pub enum PrimTy {
TyInt(IntTy), TyInt(IntTy),
@ -753,10 +752,10 @@ pub struct ClosureTy {
pub fn_style: FnStyle, pub fn_style: FnStyle,
pub onceness: Onceness, pub onceness: Onceness,
pub decl: P<FnDecl>, pub decl: P<FnDecl>,
// Optional optvec distinguishes between "fn()" and "fn:()" so we can /// Optional optvec distinguishes between "fn()" and "fn:()" so we can
// implement issue #7264. None means "fn()", which means infer a default /// implement issue #7264. None means "fn()", which means infer a default
// bound based on pointer sigil during typeck. Some(Empty) means "fn:()", /// bound based on pointer sigil during typeck. Some(Empty) means "fn:()",
// which means use no bounds (e.g., not even Owned on a ~fn()). /// which means use no bounds (e.g., not even Owned on a ~fn()).
pub bounds: Option<OwnedSlice<TyParamBound>>, pub bounds: Option<OwnedSlice<TyParamBound>>,
} }
@ -789,11 +788,11 @@ pub enum Ty_ {
TyUnboxedFn(Gc<UnboxedFnTy>), TyUnboxedFn(Gc<UnboxedFnTy>),
TyTup(Vec<P<Ty>> ), TyTup(Vec<P<Ty>> ),
TyPath(Path, Option<OwnedSlice<TyParamBound>>, NodeId), // for #7264; see above TyPath(Path, Option<OwnedSlice<TyParamBound>>, NodeId), // for #7264; see above
// No-op; kept solely so that we can pretty-print faithfully /// No-op; kept solely so that we can pretty-print faithfully
TyParen(P<Ty>), TyParen(P<Ty>),
TyTypeof(Gc<Expr>), TyTypeof(Gc<Expr>),
// TyInfer means the type should be inferred instead of it having been /// TyInfer means the type should be inferred instead of it having been
// specified. This can appear anywhere in a type. /// specified. This can appear anywhere in a type.
TyInfer, TyInfer,
} }
@ -854,8 +853,10 @@ pub struct FnDecl {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum FnStyle { pub enum FnStyle {
UnsafeFn, // declared with "unsafe fn" /// Declared with "unsafe fn"
NormalFn, // declared with "fn" UnsafeFn,
/// Declared with "fn"
NormalFn,
} }
impl fmt::Show for FnStyle { impl fmt::Show for FnStyle {
@ -869,18 +870,24 @@ impl fmt::Show for FnStyle {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum RetStyle { pub enum RetStyle {
NoReturn, // functions with return type _|_ that always /// Functions with return type ! that always
// raise an error or exit (i.e. never return to the caller) /// raise an error or exit (i.e. never return to the caller)
Return, // everything else NoReturn,
/// Everything else
Return,
} }
/// Represents the kind of 'self' associated with a method /// Represents the kind of 'self' associated with a method
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ExplicitSelf_ { pub enum ExplicitSelf_ {
SelfStatic, // no self /// No self
SelfValue(Ident), // `self` SelfStatic,
SelfRegion(Option<Lifetime>, Mutability, Ident), // `&'lt self`, `&'lt mut self` /// `self
SelfUniq(Ident), // `~self` SelfValue(Ident),
/// `&'lt self`, `&'lt mut self`
SelfRegion(Option<Lifetime>, Mutability, Ident),
/// `~self`
SelfUniq(Ident)
} }
pub type ExplicitSelf = Spanned<ExplicitSelf_>; pub type ExplicitSelf = Spanned<ExplicitSelf_>;
@ -959,17 +966,17 @@ pub type ViewPath = Spanned<ViewPath_>;
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ViewPath_ { pub enum ViewPath_ {
// quux = foo::bar::baz /// `quux = foo::bar::baz`
// ///
// or just /// or just
// ///
// foo::bar::baz (with 'baz =' implicitly on the left) /// `foo::bar::baz ` (with 'baz =' implicitly on the left)
ViewPathSimple(Ident, Path, NodeId), ViewPathSimple(Ident, Path, NodeId),
// foo::bar::* /// `foo::bar::*`
ViewPathGlob(Path, NodeId), ViewPathGlob(Path, NodeId),
// foo::bar::{a,b,c} /// `foo::bar::{a,b,c}`
ViewPathList(Path, Vec<PathListIdent> , NodeId) ViewPathList(Path, Vec<PathListIdent> , NodeId)
} }
@ -983,20 +990,20 @@ pub struct ViewItem {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ViewItem_ { pub enum ViewItem_ {
// ident: name used to refer to this crate in the code /// Ident: name used to refer to this crate in the code
// optional (InternedString,StrStyle): if present, this is a location /// optional (InternedString,StrStyle): if present, this is a location
// (containing arbitrary characters) from which to fetch the crate sources /// (containing arbitrary characters) from which to fetch the crate sources
// For example, extern crate whatever = "github.com/rust-lang/rust" /// For example, extern crate whatever = "github.com/rust-lang/rust"
ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId), ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId),
ViewItemUse(Gc<ViewPath>), ViewItemUse(Gc<ViewPath>),
} }
// Meta-data associated with an item /// Meta-data associated with an item
pub type Attribute = Spanned<Attribute_>; pub type Attribute = Spanned<Attribute_>;
// Distinguishes between Attributes that decorate items and Attributes that /// Distinguishes between Attributes that decorate items and Attributes that
// are contained as statements within items. These two cases need to be /// are contained as statements within items. These two cases need to be
// distinguished for pretty-printing. /// distinguished for pretty-printing.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum AttrStyle { pub enum AttrStyle {
AttrOuter, AttrOuter,
@ -1006,7 +1013,7 @@ pub enum AttrStyle {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct AttrId(pub uint); pub struct AttrId(pub uint);
// doc-comments are promoted to attributes that have is_sugared_doc = true /// Doc-comments are promoted to attributes that have is_sugared_doc = true
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Attribute_ { pub struct Attribute_ {
pub id: AttrId, pub id: AttrId,
@ -1015,13 +1022,12 @@ pub struct Attribute_ {
pub is_sugared_doc: bool, pub is_sugared_doc: bool,
} }
/*
TraitRef's appear in impls. /// TraitRef's appear in impls.
resolve maps each TraitRef's ref_id to its defining trait; that's all /// resolve maps each TraitRef's ref_id to its defining trait; that's all
that the ref_id is for. The impl_id maps to the "self type" of this impl. /// that the ref_id is for. The impl_id maps to the "self type" of this impl.
If this impl is an ItemImpl, the impl_id is redundant (it could be the /// If this impl is an ItemImpl, the impl_id is redundant (it could be the
same as the impl's node id). /// same as the impl's node id).
*/
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct TraitRef { pub struct TraitRef {
pub path: Path, pub path: Path,
@ -1065,7 +1071,8 @@ pub type StructField = Spanned<StructField_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum StructFieldKind { pub enum StructFieldKind {
NamedField(Ident, Visibility), NamedField(Ident, Visibility),
UnnamedField(Visibility), // element of a tuple-like struct /// Element of a tuple-like struct
UnnamedField(Visibility),
} }
impl StructFieldKind { impl StructFieldKind {
@ -1079,12 +1086,15 @@ impl StructFieldKind {
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct StructDef { pub struct StructDef {
pub fields: Vec<StructField>, /* fields, not including ctor */ /// Fields, not including ctor
/* ID of the constructor. This is only used for tuple- or enum-like pub fields: Vec<StructField>,
* structs. */ /// ID of the constructor. This is only used for tuple- or enum-like
/// structs.
pub ctor_id: Option<NodeId>, pub ctor_id: Option<NodeId>,
pub super_struct: Option<P<Ty>>, // Super struct, if specified. /// Super struct, if specified.
pub is_virtual: bool, // True iff the struct may be inherited from. pub super_struct: Option<P<Ty>>,
/// True iff the struct may be inherited from.
pub is_virtual: bool,
} }
/* /*
@ -1120,7 +1130,7 @@ pub enum Item_ {
Option<TraitRef>, // (optional) trait this impl implements Option<TraitRef>, // (optional) trait this impl implements
P<Ty>, // self P<Ty>, // self
Vec<Gc<Method>>), Vec<Gc<Method>>),
// a macro invocation (which includes macro definition) /// A macro invocation (which includes macro definition)
ItemMac(Mac), ItemMac(Mac),
} }
@ -1140,9 +1150,9 @@ pub enum ForeignItem_ {
ForeignItemStatic(P<Ty>, /* is_mutbl */ bool), ForeignItemStatic(P<Ty>, /* is_mutbl */ bool),
} }
// The data we save and restore about an inlined item or method. This is not /// The data we save and restore about an inlined item or method. This is not
// part of the AST that we parse from a file, but it becomes part of the tree /// part of the AST that we parse from a file, but it becomes part of the tree
// that we trans. /// that we trans.
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum InlinedItem { pub enum InlinedItem {
IIItem(Gc<Item>), IIItem(Gc<Item>),

View File

@ -112,13 +112,13 @@ pub enum Node {
NodeLifetime(Gc<Lifetime>), NodeLifetime(Gc<Lifetime>),
} }
// The odd layout is to bring down the total size. /// The odd layout is to bring down the total size.
#[deriving(Clone)] #[deriving(Clone)]
enum MapEntry { enum MapEntry {
// Placeholder for holes in the map. /// Placeholder for holes in the map.
NotPresent, NotPresent,
// All the node types, with a parent ID. /// All the node types, with a parent ID.
EntryItem(NodeId, Gc<Item>), EntryItem(NodeId, Gc<Item>),
EntryForeignItem(NodeId, Gc<ForeignItem>), EntryForeignItem(NodeId, Gc<ForeignItem>),
EntryTraitMethod(NodeId, Gc<TraitMethod>), EntryTraitMethod(NodeId, Gc<TraitMethod>),
@ -133,14 +133,14 @@ enum MapEntry {
EntryStructCtor(NodeId, Gc<StructDef>), EntryStructCtor(NodeId, Gc<StructDef>),
EntryLifetime(NodeId, Gc<Lifetime>), EntryLifetime(NodeId, Gc<Lifetime>),
// Roots for node trees. /// Roots for node trees.
RootCrate, RootCrate,
RootInlinedParent(P<InlinedParent>) RootInlinedParent(P<InlinedParent>)
} }
struct InlinedParent { struct InlinedParent {
path: Vec<PathElem> , path: Vec<PathElem> ,
// Required by NodeTraitMethod and NodeMethod. /// Required by NodeTraitMethod and NodeMethod.
def_id: DefId def_id: DefId
} }
@ -243,7 +243,7 @@ impl Map {
ItemForeignMod(ref nm) => Some(nm.abi), ItemForeignMod(ref nm) => Some(nm.abi),
_ => None _ => None
}, },
// Wrong but OK, because the only inlined foreign items are intrinsics. /// Wrong but OK, because the only inlined foreign items are intrinsics.
Some(RootInlinedParent(_)) => Some(abi::RustIntrinsic), Some(RootInlinedParent(_)) => Some(abi::RustIntrinsic),
_ => None _ => None
}; };
@ -432,8 +432,8 @@ pub trait FoldOps {
pub struct Ctx<'a, F> { pub struct Ctx<'a, F> {
map: &'a Map, map: &'a Map,
// The node in which we are currently mapping (an item or a method). /// The node in which we are currently mapping (an item or a method).
// When equal to DUMMY_NODE_ID, the next mapped node becomes the parent. /// When equal to DUMMY_NODE_ID, the next mapped node becomes the parent.
parent: NodeId, parent: NodeId,
fold_ops: F fold_ops: F
} }
@ -618,9 +618,9 @@ pub fn map_crate<F: FoldOps>(krate: Crate, fold_ops: F) -> (Crate, Map) {
(krate, map) (krate, map)
} }
// Used for items loaded from external crate that are being inlined into this /// Used for items loaded from external crate that are being inlined into this
// crate. The `path` should be the path to the item but should not include /// crate. The `path` should be the path to the item but should not include
// the item itself. /// the item itself.
pub fn map_decoded_item<F: FoldOps>(map: &Map, pub fn map_decoded_item<F: FoldOps>(map: &Map,
path: Vec<PathElem> , path: Vec<PathElem> ,
fold_ops: F, fold_ops: F,

View File

@ -101,8 +101,8 @@ pub fn is_path(e: Gc<Expr>) -> bool {
return match e.node { ExprPath(_) => true, _ => false }; return match e.node { ExprPath(_) => true, _ => false };
} }
// Get a string representation of a signed int type, with its value. /// Get a string representation of a signed int type, with its value.
// We want to avoid "45int" and "-3int" in favor of "45" and "-3" /// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String { pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
let s = match t { let s = match t {
TyI if val.is_some() => "i", TyI if val.is_some() => "i",
@ -131,8 +131,8 @@ pub fn int_ty_max(t: IntTy) -> u64 {
} }
} }
// Get a string representation of an unsigned int type, with its value. /// Get a string representation of an unsigned int type, with its value.
// We want to avoid "42uint" in favor of "42u" /// We want to avoid "42uint" in favor of "42u"
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String { pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
let s = match t { let s = match t {
TyU if val.is_some() => "u", TyU if val.is_some() => "u",
@ -249,8 +249,8 @@ pub fn public_methods(ms: Vec<Gc<Method>> ) -> Vec<Gc<Method>> {
}).collect() }).collect()
} }
// extract a TypeMethod from a TraitMethod. if the TraitMethod is /// extract a TypeMethod from a TraitMethod. if the TraitMethod is
// a default, pull out the useful fields to make a TypeMethod /// a default, pull out the useful fields to make a TypeMethod
pub fn trait_method_to_ty_method(method: &TraitMethod) -> TypeMethod { pub fn trait_method_to_ty_method(method: &TraitMethod) -> TypeMethod {
match *method { match *method {
Required(ref m) => (*m).clone(), Required(ref m) => (*m).clone(),
@ -705,7 +705,7 @@ pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> boo
} }
} }
// Returns true if this literal is a string and false otherwise. /// Returns true if this literal is a string and false otherwise.
pub fn lit_is_str(lit: Gc<Lit>) -> bool { pub fn lit_is_str(lit: Gc<Lit>) -> bool {
match lit.node { match lit.node {
LitStr(..) => true, LitStr(..) => true,

View File

@ -46,10 +46,8 @@ pub trait AttrMetaMethods {
/// #[foo="bar"] and #[foo(bar)] /// #[foo="bar"] and #[foo(bar)]
fn name(&self) -> InternedString; fn name(&self) -> InternedString;
/** /// Gets the string value if self is a MetaNameValue variant
* Gets the string value if self is a MetaNameValue variant /// containing a string, otherwise None.
* containing a string, otherwise None.
*/
fn value_str(&self) -> Option<InternedString>; fn value_str(&self) -> Option<InternedString>;
/// Gets a list of inner meta items from a list MetaItem type. /// Gets a list of inner meta items from a list MetaItem type.
fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]>; fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]>;
@ -420,18 +418,16 @@ pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[Gc<MetaItem>]) {
} }
/** /// Fold this over attributes to parse #[repr(...)] forms.
* Fold this over attributes to parse #[repr(...)] forms. ///
* /// Valid repr contents: any of the primitive integral type names (see
* Valid repr contents: any of the primitive integral type names (see /// `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
* `int_type_of_word`, below) to specify the discriminant type; and `C`, to use /// the same discriminant size that the corresponding C enum would. These are
* the same discriminant size that the corresponding C enum would. These are /// not allowed on univariant or zero-variant enums, which have no discriminant.
* not allowed on univariant or zero-variant enums, which have no discriminant. ///
* /// If a discriminant type is so specified, then the discriminant will be
* If a discriminant type is so specified, then the discriminant will be /// present (before fields, if any) with that type; reprensentation
* present (before fields, if any) with that type; reprensentation /// optimizations which would remove it will not be done.
* optimizations which would remove it will not be done.
*/
pub fn find_repr_attr(diagnostic: &SpanHandler, attr: &Attribute, acc: ReprAttr) pub fn find_repr_attr(diagnostic: &SpanHandler, attr: &Attribute, acc: ReprAttr)
-> ReprAttr { -> ReprAttr {
let mut acc = acc; let mut acc = acc;

View File

@ -252,15 +252,15 @@ pub struct FileMap {
} }
impl FileMap { impl FileMap {
// EFFECT: register a start-of-line offset in the /// EFFECT: register a start-of-line offset in the
// table of line-beginnings. /// table of line-beginnings.
// UNCHECKED INVARIANT: these offsets must be added in the right /// UNCHECKED INVARIANT: these offsets must be added in the right
// order and must be in the right places; there is shared knowledge /// order and must be in the right places; there is shared knowledge
// about what ends a line between this file and parse.rs /// about what ends a line between this file and parse.rs
// WARNING: pos param here is the offset relative to start of CodeMap, /// WARNING: pos param here is the offset relative to start of CodeMap,
// and CodeMap will append a newline when adding a filemap without a newline at the end, /// and CodeMap will append a newline when adding a filemap without a newline at the end,
// so the safe way to call this is with value calculated as /// so the safe way to call this is with value calculated as
// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap. /// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) { pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one). // the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();; let mut lines = self.lines.borrow_mut();;
@ -269,7 +269,7 @@ impl FileMap {
lines.push(pos); lines.push(pos);
} }
// get a line from the list of pre-computed line-beginnings /// get a line from the list of pre-computed line-beginnings
pub fn get_line(&self, line: int) -> String { pub fn get_line(&self, line: int) -> String {
let mut lines = self.lines.borrow_mut(); let mut lines = self.lines.borrow_mut();
let begin: BytePos = *lines.get(line as uint) - self.start_pos; let begin: BytePos = *lines.get(line as uint) - self.start_pos;
@ -428,7 +428,7 @@ impl CodeMap {
FileMapAndBytePos {fm: fm, pos: offset} FileMapAndBytePos {fm: fm, pos: offset}
} }
// Converts an absolute BytePos to a CharPos relative to the filemap and above. /// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos { pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
debug!("codemap: converting {:?} to char pos", bpos); debug!("codemap: converting {:?} to char pos", bpos);
let idx = self.lookup_filemap_idx(bpos); let idx = self.lookup_filemap_idx(bpos);

View File

@ -21,7 +21,7 @@ use std::string::String;
use term::WriterWrapper; use term::WriterWrapper;
use term; use term;
// maximum number of lines we will print for each error; arbitrary. /// maximum number of lines we will print for each error; arbitrary.
static MAX_LINES: uint = 6u; static MAX_LINES: uint = 6u;
#[deriving(Clone)] #[deriving(Clone)]
@ -73,9 +73,9 @@ pub struct FatalError;
/// or `.span_bug` rather than a failed assertion, etc. /// or `.span_bug` rather than a failed assertion, etc.
pub struct ExplicitBug; pub struct ExplicitBug;
// a span-handler is like a handler but also /// A span-handler is like a handler but also
// accepts span information for source-location /// accepts span information for source-location
// reporting. /// reporting.
pub struct SpanHandler { pub struct SpanHandler {
pub handler: Handler, pub handler: Handler,
pub cm: codemap::CodeMap, pub cm: codemap::CodeMap,
@ -114,9 +114,9 @@ impl SpanHandler {
} }
} }
// a handler deals with errors; certain errors /// A handler deals with errors; certain errors
// (fatal, bug, unimpl) may cause immediate exit, /// (fatal, bug, unimpl) may cause immediate exit,
// others log errors for later reporting. /// others log errors for later reporting.
pub struct Handler { pub struct Handler {
err_count: Cell<uint>, err_count: Cell<uint>,
emit: RefCell<Box<Emitter + Send>>, emit: RefCell<Box<Emitter + Send>>,
@ -442,12 +442,12 @@ fn highlight_lines(err: &mut EmitterWriter,
Ok(()) Ok(())
} }
// Here are the differences between this and the normal `highlight_lines`: /// Here are the differences between this and the normal `highlight_lines`:
// `custom_highlight_lines` will always put arrow on the last byte of the /// `custom_highlight_lines` will always put arrow on the last byte of the
// span (instead of the first byte). Also, when the span is too long (more /// span (instead of the first byte). Also, when the span is too long (more
// than 6 lines), `custom_highlight_lines` will print the first line, then /// than 6 lines), `custom_highlight_lines` will print the first line, then
// dot dot dot, then last line, whereas `highlight_lines` prints the first /// dot dot dot, then last line, whereas `highlight_lines` prints the first
// six lines. /// six lines.
fn custom_highlight_lines(w: &mut EmitterWriter, fn custom_highlight_lines(w: &mut EmitterWriter,
cm: &codemap::CodeMap, cm: &codemap::CodeMap,
sp: Span, sp: Span,

View File

@ -278,9 +278,9 @@ pub enum SyntaxExtension {
pub type NamedSyntaxExtension = (Name, SyntaxExtension); pub type NamedSyntaxExtension = (Name, SyntaxExtension);
pub struct BlockInfo { pub struct BlockInfo {
// should macros escape from this scope? /// Should macros escape from this scope?
pub macros_escape: bool, pub macros_escape: bool,
// what are the pending renames? /// What are the pending renames?
pub pending_renames: mtwt::RenameList, pub pending_renames: mtwt::RenameList,
} }
@ -293,8 +293,8 @@ impl BlockInfo {
} }
} }
// The base map of methods for expanding syntax extension /// The base map of methods for expanding syntax extension
// AST nodes into full ASTs /// AST nodes into full ASTs
pub fn syntax_expander_table() -> SyntaxEnv { pub fn syntax_expander_table() -> SyntaxEnv {
// utility function to simplify creating NormalTT syntax extensions // utility function to simplify creating NormalTT syntax extensions
fn builtin_normal_expander(f: MacroExpanderFn) -> SyntaxExtension { fn builtin_normal_expander(f: MacroExpanderFn) -> SyntaxExtension {
@ -398,9 +398,9 @@ pub fn syntax_expander_table() -> SyntaxEnv {
syntax_expanders syntax_expanders
} }
// One of these is made during expansion and incrementally updated as we go; /// One of these is made during expansion and incrementally updated as we go;
// when a macro expansion occurs, the resulting nodes have the backtrace() /// when a macro expansion occurs, the resulting nodes have the backtrace()
// -> expn_info of their expansion context stored into their span. /// -> expn_info of their expansion context stored into their span.
pub struct ExtCtxt<'a> { pub struct ExtCtxt<'a> {
pub parse_sess: &'a parse::ParseSess, pub parse_sess: &'a parse::ParseSess,
pub cfg: ast::CrateConfig, pub cfg: ast::CrateConfig,
@ -612,11 +612,11 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
Some(es) Some(es)
} }
// in order to have some notion of scoping for macros, /// In order to have some notion of scoping for macros,
// we want to implement the notion of a transformation /// we want to implement the notion of a transformation
// environment. /// environment.
// This environment maps Names to SyntaxExtensions. /// This environment maps Names to SyntaxExtensions.
//impl question: how to implement it? Initially, the //impl question: how to implement it? Initially, the
// env will contain only macros, so it might be painful // env will contain only macros, so it might be painful
@ -633,7 +633,6 @@ struct MapChainFrame {
map: HashMap<Name, SyntaxExtension>, map: HashMap<Name, SyntaxExtension>,
} }
// Only generic to make it easy to test
pub struct SyntaxEnv { pub struct SyntaxEnv {
chain: Vec<MapChainFrame> , chain: Vec<MapChainFrame> ,
} }

View File

@ -8,79 +8,76 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
/*! //! The compiler code necessary to implement the `#[deriving(Encodable)]`
//! (and `Decodable`, in decodable.rs) extension. The idea here is that
The compiler code necessary to implement the `#[deriving(Encodable)]` //! type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`.
(and `Decodable`, in decodable.rs) extension. The idea here is that //!
type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`. //! For example, a type like:
//!
For example, a type like: //! ```ignore
//! #[deriving(Encodable, Decodable)]
```ignore //! struct Node { id: uint }
#[deriving(Encodable, Decodable)] //! ```
struct Node { id: uint } //!
``` //! would generate two implementations like:
//!
would generate two implementations like: //! ```ignore
//! impl<S:serialize::Encoder> Encodable<S> for Node {
```ignore //! fn encode(&self, s: &S) {
impl<S:serialize::Encoder> Encodable<S> for Node { //! s.emit_struct("Node", 1, || {
fn encode(&self, s: &S) { //! s.emit_field("id", 0, || s.emit_uint(self.id))
s.emit_struct("Node", 1, || { //! })
s.emit_field("id", 0, || s.emit_uint(self.id)) //! }
}) //! }
} //!
} //! impl<D:Decoder> Decodable for node_id {
//! fn decode(d: &D) -> Node {
impl<D:Decoder> Decodable for node_id { //! d.read_struct("Node", 1, || {
fn decode(d: &D) -> Node { //! Node {
d.read_struct("Node", 1, || { //! id: d.read_field("x".to_string(), 0, || decode(d))
Node { //! }
id: d.read_field("x".to_string(), 0, || decode(d)) //! })
} //! }
}) //! }
} //! ```
} //!
``` //! Other interesting scenarios are whe the item has type parameters or
//! references other non-built-in types. A type definition like:
Other interesting scenarios are whe the item has type parameters or //!
references other non-built-in types. A type definition like: //! ```ignore
//! #[deriving(Encodable, Decodable)]
```ignore //! struct spanned<T> { node: T, span: Span }
#[deriving(Encodable, Decodable)] //! ```
struct spanned<T> { node: T, span: Span } //!
``` //! would yield functions like:
//!
would yield functions like: //! ```ignore
//! impl<
```ignore //! S: Encoder,
impl< //! T: Encodable<S>
S: Encoder, //! > spanned<T>: Encodable<S> {
T: Encodable<S> //! fn encode<S:Encoder>(s: &S) {
> spanned<T>: Encodable<S> { //! s.emit_rec(|| {
fn encode<S:Encoder>(s: &S) { //! s.emit_field("node", 0, || self.node.encode(s));
s.emit_rec(|| { //! s.emit_field("span", 1, || self.span.encode(s));
s.emit_field("node", 0, || self.node.encode(s)); //! })
s.emit_field("span", 1, || self.span.encode(s)); //! }
}) //! }
} //!
} //! impl<
//! D: Decoder,
impl< //! T: Decodable<D>
D: Decoder, //! > spanned<T>: Decodable<D> {
T: Decodable<D> //! fn decode(d: &D) -> spanned<T> {
> spanned<T>: Decodable<D> { //! d.read_rec(|| {
fn decode(d: &D) -> spanned<T> { //! {
d.read_rec(|| { //! node: d.read_field("node".to_string(), 0, || decode(d)),
{ //! span: d.read_field("span".to_string(), 1, || decode(d)),
node: d.read_field("node".to_string(), 0, || decode(d)), //! }
span: d.read_field("span".to_string(), 1, || decode(d)), //! })
} //! }
}) //! }
} //! ```
}
```
*/
use ast::{MetaItem, Item, Expr, ExprRet, MutMutable, LitNil}; use ast::{MetaItem, Item, Expr, ExprRet, MutMutable, LitNil};
use codemap::Span; use codemap::Span;

View File

@ -8,174 +8,170 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
/*! //! Some code that abstracts away much of the boilerplate of writing
//! `deriving` instances for traits. Among other things it manages getting
Some code that abstracts away much of the boilerplate of writing //! access to the fields of the 4 different sorts of structs and enum
`deriving` instances for traits. Among other things it manages getting //! variants, as well as creating the method and impl ast instances.
access to the fields of the 4 different sorts of structs and enum //!
variants, as well as creating the method and impl ast instances. //! Supported features (fairly exhaustive):
//!
Supported features (fairly exhaustive): //! - Methods taking any number of parameters of any type, and returning
//! any type, other than vectors, bottom and closures.
- Methods taking any number of parameters of any type, and returning //! - Generating `impl`s for types with type parameters and lifetimes
any type, other than vectors, bottom and closures. //! (e.g. `Option<T>`), the parameters are automatically given the
- Generating `impl`s for types with type parameters and lifetimes //! current trait as a bound. (This includes separate type parameters
(e.g. `Option<T>`), the parameters are automatically given the //! and lifetimes for methods.)
current trait as a bound. (This includes separate type parameters //! - Additional bounds on the type parameters, e.g. the `Ord` instance
and lifetimes for methods.) //! requires an explicit `PartialEq` bound at the
- Additional bounds on the type parameters, e.g. the `Ord` instance //! moment. (`TraitDef.additional_bounds`)
requires an explicit `PartialEq` bound at the //!
moment. (`TraitDef.additional_bounds`) //! Unsupported: FIXME #6257: calling methods on reference fields,
//! e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`,
Unsupported: FIXME #6257: calling methods on reference fields, //! because of how the auto-dereferencing happens.
e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`, //!
because of how the auto-dereferencing happens. //! The most important thing for implementers is the `Substructure` and
//! `SubstructureFields` objects. The latter groups 5 possibilities of the
The most important thing for implementers is the `Substructure` and //! arguments:
`SubstructureFields` objects. The latter groups 5 possibilities of the //!
arguments: //! - `Struct`, when `Self` is a struct (including tuple structs, e.g
//! `struct T(int, char)`).
- `Struct`, when `Self` is a struct (including tuple structs, e.g //! - `EnumMatching`, when `Self` is an enum and all the arguments are the
`struct T(int, char)`). //! same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`)
- `EnumMatching`, when `Self` is an enum and all the arguments are the //! - `EnumNonMatching` when `Self` is an enum and the arguments are not
same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`) //! the same variant (e.g. `None`, `Some(1)` and `None`). If
- `EnumNonMatching` when `Self` is an enum and the arguments are not //! `const_nonmatching` is true, this will contain an empty list.
the same variant (e.g. `None`, `Some(1)` and `None`). If //! - `StaticEnum` and `StaticStruct` for static methods, where the type
`const_nonmatching` is true, this will contain an empty list. //! being derived upon is either an enum or struct respectively. (Any
- `StaticEnum` and `StaticStruct` for static methods, where the type //! argument with type Self is just grouped among the non-self
being derived upon is either an enum or struct respectively. (Any //! arguments.)
argument with type Self is just grouped among the non-self //!
arguments.) //! In the first two cases, the values from the corresponding fields in
//! all the arguments are grouped together. In the `EnumNonMatching` case
In the first two cases, the values from the corresponding fields in //! this isn't possible (different variants have different fields), so the
all the arguments are grouped together. In the `EnumNonMatching` case //! fields are grouped by which argument they come from. There are no
this isn't possible (different variants have different fields), so the //! fields with values in the static cases, so these are treated entirely
fields are grouped by which argument they come from. There are no //! differently.
fields with values in the static cases, so these are treated entirely //!
differently. //! The non-static cases have `Option<ident>` in several places associated
//! with field `expr`s. This represents the name of the field it is
The non-static cases have `Option<ident>` in several places associated //! associated with. It is only not `None` when the associated field has
with field `expr`s. This represents the name of the field it is //! an identifier in the source code. For example, the `x`s in the
associated with. It is only not `None` when the associated field has //! following snippet
an identifier in the source code. For example, the `x`s in the //!
following snippet //! ```rust
//! struct A { x : int }
```rust //!
struct A { x : int } //! struct B(int);
//!
struct B(int); //! enum C {
//! C0(int),
enum C { //! C1 { x: int }
C0(int), //! }
C1 { x: int } //! ```
} //!
``` //! The `int`s in `B` and `C0` don't have an identifier, so the
//! `Option<ident>`s would be `None` for them.
The `int`s in `B` and `C0` don't have an identifier, so the //!
`Option<ident>`s would be `None` for them. //! In the static cases, the structure is summarised, either into the just
//! spans of the fields or a list of spans and the field idents (for tuple
In the static cases, the structure is summarised, either into the just //! structs and record structs, respectively), or a list of these, for
spans of the fields or a list of spans and the field idents (for tuple //! enums (one for each variant). For empty struct and empty enum
structs and record structs, respectively), or a list of these, for //! variants, it is represented as a count of 0.
enums (one for each variant). For empty struct and empty enum //!
variants, it is represented as a count of 0. //! # Examples
//!
# Examples //! The following simplified `PartialEq` is used for in-code examples:
//!
The following simplified `PartialEq` is used for in-code examples: //! ```rust
//! trait PartialEq {
```rust //! fn eq(&self, other: &Self);
trait PartialEq { //! }
fn eq(&self, other: &Self); //! impl PartialEq for int {
} //! fn eq(&self, other: &int) -> bool {
impl PartialEq for int { //! *self == *other
fn eq(&self, other: &int) -> bool { //! }
*self == *other //! }
} //! ```
} //!
``` //! Some examples of the values of `SubstructureFields` follow, using the
//! above `PartialEq`, `A`, `B` and `C`.
Some examples of the values of `SubstructureFields` follow, using the //!
above `PartialEq`, `A`, `B` and `C`. //! ## Structs
//!
## Structs //! When generating the `expr` for the `A` impl, the `SubstructureFields` is
//!
When generating the `expr` for the `A` impl, the `SubstructureFields` is //! ~~~text
//! Struct(~[FieldInfo {
~~~text //! span: <span of x>
Struct(~[FieldInfo { //! name: Some(<ident of x>),
span: <span of x> //! self_: <expr for &self.x>,
name: Some(<ident of x>), //! other: ~[<expr for &other.x]
self_: <expr for &self.x>, //! }])
other: ~[<expr for &other.x] //! ~~~
}]) //!
~~~ //! For the `B` impl, called with `B(a)` and `B(b)`,
//!
For the `B` impl, called with `B(a)` and `B(b)`, //! ~~~text
//! Struct(~[FieldInfo {
~~~text //! span: <span of `int`>,
Struct(~[FieldInfo { //! name: None,
span: <span of `int`>, //! <expr for &a>
name: None, //! ~[<expr for &b>]
<expr for &a> //! }])
~[<expr for &b>] //! ~~~
}]) //!
~~~ //! ## Enums
//!
## Enums //! When generating the `expr` for a call with `self == C0(a)` and `other
//! == C0(b)`, the SubstructureFields is
When generating the `expr` for a call with `self == C0(a)` and `other //!
== C0(b)`, the SubstructureFields is //! ~~~text
//! EnumMatching(0, <ast::Variant for C0>,
~~~text //! ~[FieldInfo {
EnumMatching(0, <ast::Variant for C0>, //! span: <span of int>
~[FieldInfo { //! name: None,
span: <span of int> //! self_: <expr for &a>,
name: None, //! other: ~[<expr for &b>]
self_: <expr for &a>, //! }])
other: ~[<expr for &b>] //! ~~~
}]) //!
~~~ //! For `C1 {x}` and `C1 {x}`,
//!
For `C1 {x}` and `C1 {x}`, //! ~~~text
//! EnumMatching(1, <ast::Variant for C1>,
~~~text //! ~[FieldInfo {
EnumMatching(1, <ast::Variant for C1>, //! span: <span of x>
~[FieldInfo { //! name: Some(<ident of x>),
span: <span of x> //! self_: <expr for &self.x>,
name: Some(<ident of x>), //! other: ~[<expr for &other.x>]
self_: <expr for &self.x>, //! }])
other: ~[<expr for &other.x>] //! ~~~
}]) //!
~~~ //! For `C0(a)` and `C1 {x}` ,
//!
For `C0(a)` and `C1 {x}` , //! ~~~text
//! EnumNonMatching(~[(0, <ast::Variant for B0>,
~~~text //! ~[(<span of int>, None, <expr for &a>)]),
EnumNonMatching(~[(0, <ast::Variant for B0>, //! (1, <ast::Variant for B1>,
~[(<span of int>, None, <expr for &a>)]), //! ~[(<span of x>, Some(<ident of x>),
(1, <ast::Variant for B1>, //! <expr for &other.x>)])])
~[(<span of x>, Some(<ident of x>), //! ~~~
<expr for &other.x>)])]) //!
~~~ //! (and vice versa, but with the order of the outermost list flipped.)
//!
(and vice versa, but with the order of the outermost list flipped.) //! ## Static
//!
## Static //! A static method on the above would result in,
//!
A static method on the above would result in, //! ~~~text
//! StaticStruct(<ast::StructDef of A>, Named(~[(<ident of x>, <span of x>)]))
~~~text //!
StaticStruct(<ast::StructDef of A>, Named(~[(<ident of x>, <span of x>)])) //! StaticStruct(<ast::StructDef of B>, Unnamed(~[<span of x>]))
//!
StaticStruct(<ast::StructDef of B>, Unnamed(~[<span of x>])) //! StaticEnum(<ast::EnumDef of C>, ~[(<ident of C0>, <span of C0>, Unnamed(~[<span of int>])),
//! (<ident of C1>, <span of C1>,
StaticEnum(<ast::EnumDef of C>, ~[(<ident of C0>, <span of C0>, Unnamed(~[<span of int>])), //! Named(~[(<ident of x>, <span of x>)]))])
(<ident of C1>, <span of C1>, //! ~~~
Named(~[(<ident of x>, <span of x>)]))])
~~~
*/
use std::cell::RefCell; use std::cell::RefCell;
use std::gc::{Gc, GC}; use std::gc::{Gc, GC};

View File

@ -25,8 +25,10 @@ use std::gc::Gc;
/// The types of pointers /// The types of pointers
pub enum PtrTy<'a> { pub enum PtrTy<'a> {
Send, // ~ /// ~
Borrowed(Option<&'a str>, ast::Mutability), // &['lifetime] [mut] Send,
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
} }
/// A path, e.g. `::std::option::Option::<int>` (global). Has support /// A path, e.g. `::std::option::Option::<int>` (global). Has support
@ -83,12 +85,12 @@ impl<'a> Path<'a> {
/// A type. Supports pointers (except for *), Self, and literals /// A type. Supports pointers (except for *), Self, and literals
pub enum Ty<'a> { pub enum Ty<'a> {
Self, Self,
// &/Box/ Ty /// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>), Ptr(Box<Ty<'a>>, PtrTy<'a>),
// mod::mod::Type<[lifetime], [Params...]>, including a plain type /// mod::mod::Type<[lifetime], [Params...]>, including a plain type
// parameter, and things like `int` /// parameter, and things like `int`
Literal(Path<'a>), Literal(Path<'a>),
// includes nil /// includes unit
Tuple(Vec<Ty<'a>> ) Tuple(Vec<Ty<'a>> )
} }

View File

@ -55,8 +55,8 @@ pub fn expand_deriving_show(cx: &mut ExtCtxt,
trait_def.expand(cx, mitem, item, push) trait_def.expand(cx, mitem, item, push)
} }
// we construct a format string and then defer to std::fmt, since that /// We construct a format string and then defer to std::fmt, since that
// knows what's up with formatting at so on. /// knows what's up with formatting and so on.
fn show_substructure(cx: &mut ExtCtxt, span: Span, fn show_substructure(cx: &mut ExtCtxt, span: Span,
substr: &Substructure) -> Gc<Expr> { substr: &Substructure) -> Gc<Expr> {
// build `<name>`, `<name>({}, {}, ...)` or `<name> { <field>: {}, // build `<name>`, `<name>({}, {}, ...)` or `<name> { <field>: {},

View File

@ -246,11 +246,11 @@ pub fn expand_expr(e: Gc<ast::Expr>, fld: &mut MacroExpander) -> Gc<ast::Expr> {
} }
} }
// Rename loop label and expand its loop body /// Rename loop label and expand its loop body
// ///
// The renaming procedure for loop is different in the sense that the loop /// The renaming procedure for loop is different in the sense that the loop
// body is in a block enclosed by loop head so the renaming of loop label /// body is in a block enclosed by loop head so the renaming of loop label
// must be propagated to the enclosed context. /// must be propagated to the enclosed context.
fn expand_loop_block(loop_block: P<Block>, fn expand_loop_block(loop_block: P<Block>,
opt_ident: Option<Ident>, opt_ident: Option<Ident>,
fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) { fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) {

View File

@ -37,24 +37,24 @@ struct Context<'a, 'b> {
ecx: &'a mut ExtCtxt<'b>, ecx: &'a mut ExtCtxt<'b>,
fmtsp: Span, fmtsp: Span,
// Parsed argument expressions and the types that we've found so far for /// Parsed argument expressions and the types that we've found so far for
// them. /// them.
args: Vec<Gc<ast::Expr>>, args: Vec<Gc<ast::Expr>>,
arg_types: Vec<Option<ArgumentType>>, arg_types: Vec<Option<ArgumentType>>,
// Parsed named expressions and the types that we've found for them so far. /// Parsed named expressions and the types that we've found for them so far.
// Note that we keep a side-array of the ordering of the named arguments /// Note that we keep a side-array of the ordering of the named arguments
// found to be sure that we can translate them in the same order that they /// found to be sure that we can translate them in the same order that they
// were declared in. /// were declared in.
names: HashMap<String, Gc<ast::Expr>>, names: HashMap<String, Gc<ast::Expr>>,
name_types: HashMap<String, ArgumentType>, name_types: HashMap<String, ArgumentType>,
name_ordering: Vec<String>, name_ordering: Vec<String>,
// Collection of the compiled `rt::Piece` structures /// Collection of the compiled `rt::Piece` structures
pieces: Vec<Gc<ast::Expr>>, pieces: Vec<Gc<ast::Expr>>,
name_positions: HashMap<String, uint>, name_positions: HashMap<String, uint>,
method_statics: Vec<Gc<ast::Item>>, method_statics: Vec<Gc<ast::Item>>,
// Updated as arguments are consumed or methods are entered /// Updated as arguments are consumed or methods are entered
nest_level: uint, nest_level: uint,
next_arg: uint, next_arg: uint,
} }

View File

@ -21,16 +21,16 @@ use std::cell::RefCell;
use std::rc::Rc; use std::rc::Rc;
use std::collections::HashMap; use std::collections::HashMap;
// the SCTable contains a table of SyntaxContext_'s. It /// The SCTable contains a table of SyntaxContext_'s. It
// represents a flattened tree structure, to avoid having /// represents a flattened tree structure, to avoid having
// managed pointers everywhere (that caused an ICE). /// managed pointers everywhere (that caused an ICE).
// the mark_memo and rename_memo fields are side-tables /// the mark_memo and rename_memo fields are side-tables
// that ensure that adding the same mark to the same context /// that ensure that adding the same mark to the same context
// gives you back the same context as before. This shouldn't /// gives you back the same context as before. This shouldn't
// change the semantics--everything here is immutable--but /// change the semantics--everything here is immutable--but
// it should cut down on memory use *a lot*; applying a mark /// it should cut down on memory use *a lot*; applying a mark
// to a tree containing 50 identifiers would otherwise generate /// to a tree containing 50 identifiers would otherwise generate
// 50 new contexts /// 50 new contexts
pub struct SCTable { pub struct SCTable {
table: RefCell<Vec<SyntaxContext_>>, table: RefCell<Vec<SyntaxContext_>>,
mark_memo: RefCell<HashMap<(SyntaxContext,Mrk),SyntaxContext>>, mark_memo: RefCell<HashMap<(SyntaxContext,Mrk),SyntaxContext>>,
@ -41,16 +41,16 @@ pub struct SCTable {
pub enum SyntaxContext_ { pub enum SyntaxContext_ {
EmptyCtxt, EmptyCtxt,
Mark (Mrk,SyntaxContext), Mark (Mrk,SyntaxContext),
// flattening the name and syntaxcontext into the rename... /// flattening the name and syntaxcontext into the rename...
// HIDDEN INVARIANTS: /// HIDDEN INVARIANTS:
// 1) the first name in a Rename node /// 1) the first name in a Rename node
// can only be a programmer-supplied name. /// can only be a programmer-supplied name.
// 2) Every Rename node with a given Name in the /// 2) Every Rename node with a given Name in the
// "to" slot must have the same name and context /// "to" slot must have the same name and context
// in the "from" slot. In essence, they're all /// in the "from" slot. In essence, they're all
// pointers to a single "rename" event node. /// pointers to a single "rename" event node.
Rename (Ident,Name,SyntaxContext), Rename (Ident,Name,SyntaxContext),
// actually, IllegalCtxt may not be necessary. /// actually, IllegalCtxt may not be necessary.
IllegalCtxt IllegalCtxt
} }
@ -62,7 +62,7 @@ pub fn apply_mark(m: Mrk, ctxt: SyntaxContext) -> SyntaxContext {
with_sctable(|table| apply_mark_internal(m, ctxt, table)) with_sctable(|table| apply_mark_internal(m, ctxt, table))
} }
// Extend a syntax context with a given mark and sctable (explicit memoization) /// Extend a syntax context with a given mark and sctable (explicit memoization)
fn apply_mark_internal(m: Mrk, ctxt: SyntaxContext, table: &SCTable) -> SyntaxContext { fn apply_mark_internal(m: Mrk, ctxt: SyntaxContext, table: &SCTable) -> SyntaxContext {
let key = (ctxt, m); let key = (ctxt, m);
let new_ctxt = |_: &(SyntaxContext, Mrk)| let new_ctxt = |_: &(SyntaxContext, Mrk)|
@ -77,7 +77,7 @@ pub fn apply_rename(id: Ident, to:Name,
with_sctable(|table| apply_rename_internal(id, to, ctxt, table)) with_sctable(|table| apply_rename_internal(id, to, ctxt, table))
} }
// Extend a syntax context with a given rename and sctable (explicit memoization) /// Extend a syntax context with a given rename and sctable (explicit memoization)
fn apply_rename_internal(id: Ident, fn apply_rename_internal(id: Ident,
to: Name, to: Name,
ctxt: SyntaxContext, ctxt: SyntaxContext,
@ -141,7 +141,7 @@ pub fn clear_tables() {
with_resolve_table_mut(|table| *table = HashMap::new()); with_resolve_table_mut(|table| *table = HashMap::new());
} }
// Add a value to the end of a vec, return its index /// Add a value to the end of a vec, return its index
fn idx_push<T>(vec: &mut Vec<T> , val: T) -> u32 { fn idx_push<T>(vec: &mut Vec<T> , val: T) -> u32 {
vec.push(val); vec.push(val);
(vec.len() - 1) as u32 (vec.len() - 1) as u32
@ -173,8 +173,8 @@ fn with_resolve_table_mut<T>(op: |&mut ResolveTable| -> T) -> T {
} }
} }
// Resolve a syntax object to a name, per MTWT. /// Resolve a syntax object to a name, per MTWT.
// adding memoization to resolve 500+ seconds in resolve for librustc (!) /// adding memoization to resolve 500+ seconds in resolve for librustc (!)
fn resolve_internal(id: Ident, fn resolve_internal(id: Ident,
table: &SCTable, table: &SCTable,
resolve_table: &mut ResolveTable) -> Name { resolve_table: &mut ResolveTable) -> Name {
@ -264,8 +264,8 @@ pub fn outer_mark(ctxt: SyntaxContext) -> Mrk {
}) })
} }
// Push a name... unless it matches the one on top, in which /// Push a name... unless it matches the one on top, in which
// case pop and discard (so two of the same marks cancel) /// case pop and discard (so two of the same marks cancel)
fn xor_push(marks: &mut Vec<Mrk>, mark: Mrk) { fn xor_push(marks: &mut Vec<Mrk>, mark: Mrk) {
if (marks.len() > 0) && (*marks.last().unwrap() == mark) { if (marks.len() > 0) && (*marks.last().unwrap() == mark) {
marks.pop().unwrap(); marks.pop().unwrap();

View File

@ -28,7 +28,7 @@ use std::str;
// the column/row/filename of the expression, or they include // the column/row/filename of the expression, or they include
// a given file into the current one. // a given file into the current one.
/* line!(): expands to the current line number */ /// line!(): expands to the current line number
pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> { -> Box<base::MacResult> {
base::check_zero_tts(cx, sp, tts, "line!"); base::check_zero_tts(cx, sp, tts, "line!");
@ -49,9 +49,9 @@ pub fn expand_col(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint())) base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint()))
} }
/* file!(): expands to the current filename */ /// file!(): expands to the current filename */
/* The filemap (`loc.file`) contains a bunch more information we could spit /// The filemap (`loc.file`) contains a bunch more information we could spit
* out if we wanted. */ /// out if we wanted.
pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> { -> Box<base::MacResult> {
base::check_zero_tts(cx, sp, tts, "file!"); base::check_zero_tts(cx, sp, tts, "file!");
@ -82,9 +82,9 @@ pub fn expand_mod(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
token::intern_and_get_ident(string.as_slice()))) token::intern_and_get_ident(string.as_slice())))
} }
// include! : parse the given file as an expr /// include! : parse the given file as an expr
// This is generally a bad idea because it's going to behave /// This is generally a bad idea because it's going to behave
// unhygienically. /// unhygienically.
pub fn expand_include(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) pub fn expand_include(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> { -> Box<base::MacResult> {
let file = match get_single_str_from_tts(cx, sp, tts, "include!") { let file = match get_single_str_from_tts(cx, sp, tts, "include!") {

View File

@ -8,7 +8,72 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
// Earley-like parser for macros. //! This is an Earley-like parser, without support for in-grammar nonterminals,
//! only by calling out to the main rust parser for named nonterminals (which it
//! commits to fully when it hits one in a grammar). This means that there are no
//! completer or predictor rules, and therefore no need to store one column per
//! token: instead, there's a set of current Earley items and a set of next
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional Earley parsing, but it's an
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
//! lower. (In order to prevent the pathological case, we'd need to lazily
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
//! and require more memory to keep around old items, but it would also save
//! overhead)
//!
//! Quick intro to how the parser works:
//!
//! A 'position' is a dot in the middle of a matcher, usually represented as a
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
//!
//! The parser walks through the input a character at a time, maintaining a list
//! of items consistent with the current position in the input string: `cur_eis`.
//!
//! As it processes them, it fills up `eof_eis` with items that would be valid if
//! the macro invocation is now over, `bb_eis` with items that are waiting on
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
//! on the a particular token. Most of the logic concerns moving the · through the
//! repetitions indicated by Kleene stars. It only advances or calls out to the
//! real Rust parser when no `cur_eis` items remain
//!
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
//!
//! Remaining input: `a a a a b`
//! next_eis: [· a $( a )* a b]
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a a b`
//! cur: [a · $( a )* a b]
//! Descend/Skip (first item).
//! next: [a $( · a )* a b] [a $( a )* · a b].
//!
//! - - - Advance over an `a`. - - -
//!
//! Remaining input: `a a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
use ast; use ast;
use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident}; use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident};
@ -25,75 +90,6 @@ use std::rc::Rc;
use std::gc::GC; use std::gc::GC;
use std::collections::HashMap; use std::collections::HashMap;
/* This is an Earley-like parser, without support for in-grammar nonterminals,
only by calling out to the main rust parser for named nonterminals (which it
commits to fully when it hits one in a grammar). This means that there are no
completer or predictor rules, and therefore no need to store one column per
token: instead, there's a set of current Earley items and a set of next
ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
pathological cases, is worse than traditional Earley parsing, but it's an
easier fit for Macro-by-Example-style rules, and I think the overhead is
lower. (In order to prevent the pathological case, we'd need to lazily
construct the resulting `NamedMatch`es at the very end. It'd be a pain,
and require more memory to keep around old items, but it would also save
overhead)*/
/* Quick intro to how the parser works:
A 'position' is a dot in the middle of a matcher, usually represented as a
dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
The parser walks through the input a character at a time, maintaining a list
of items consistent with the current position in the input string: `cur_eis`.
As it processes them, it fills up `eof_eis` with items that would be valid if
the macro invocation is now over, `bb_eis` with items that are waiting on
a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
on the a particular token. Most of the logic concerns moving the · through the
repetitions indicated by Kleene stars. It only advances or calls out to the
real Rust parser when no `cur_eis` items remain
Example: Start parsing `a a a a b` against [· a $( a )* a b].
Remaining input: `a a a a b`
next_eis: [· a $( a )* a b]
- - - Advance over an `a`. - - -
Remaining input: `a a a b`
cur: [a · $( a )* a b]
Descend/Skip (first item).
next: [a $( · a )* a b] [a $( a )* · a b].
- - - Advance over an `a`. - - -
Remaining input: `a a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `a b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
- - - Advance over an `a`. - - - (this looks exactly like the last step)
Remaining input: `b`
cur: [a $( a · )* a b] next: [a $( a )* a · b]
Finish/Repeat (first item)
next: [a $( a )* · a b] [a $( · a )* a b]
- - - Advance over a `b`. - - -
Remaining input: ``
eof: [a $( a )* a b ·]
*/
/* to avoid costly uniqueness checks, we require that `MatchSeq` always has a /* to avoid costly uniqueness checks, we require that `MatchSeq` always has a
nonempty body. */ nonempty body. */
@ -147,24 +143,24 @@ pub fn initial_matcher_pos(ms: Vec<Matcher> , sep: Option<Token>, lo: BytePos)
} }
} }
// NamedMatch is a pattern-match result for a single ast::MatchNonterminal: /// NamedMatch is a pattern-match result for a single ast::MatchNonterminal:
// so it is associated with a single ident in a parse, and all /// so it is associated with a single ident in a parse, and all
// MatchedNonterminal's in the NamedMatch have the same nonterminal type /// MatchedNonterminal's in the NamedMatch have the same nonterminal type
// (expr, item, etc). All the leaves in a single NamedMatch correspond to a /// (expr, item, etc). All the leaves in a single NamedMatch correspond to a
// single matcher_nonterminal in the ast::Matcher that produced it. /// single matcher_nonterminal in the ast::Matcher that produced it.
// ///
// It should probably be renamed, it has more or less exact correspondence to /// It should probably be renamed, it has more or less exact correspondence to
// ast::match nodes, and the in-memory structure of a particular NamedMatch /// ast::match nodes, and the in-memory structure of a particular NamedMatch
// represents the match that occurred when a particular subset of an /// represents the match that occurred when a particular subset of an
// ast::match -- those ast::Matcher nodes leading to a single /// ast::match -- those ast::Matcher nodes leading to a single
// MatchNonterminal -- was applied to a particular token tree. /// MatchNonterminal -- was applied to a particular token tree.
// ///
// The width of each MatchedSeq in the NamedMatch, and the identity of the /// The width of each MatchedSeq in the NamedMatch, and the identity of the
// MatchedNonterminal's, will depend on the token tree it was applied to: each /// MatchedNonterminal's, will depend on the token tree it was applied to: each
// MatchedSeq corresponds to a single MatchSeq in the originating /// MatchedSeq corresponds to a single MatchSeq in the originating
// ast::Matcher. The depth of the NamedMatch structure will therefore depend /// ast::Matcher. The depth of the NamedMatch structure will therefore depend
// only on the nesting depth of ast::MatchSeq's in the originating /// only on the nesting depth of ast::MatchSeq's in the originating
// ast::Matcher it was derived from. /// ast::Matcher it was derived from.
pub enum NamedMatch { pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span), MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
@ -224,7 +220,8 @@ pub fn parse_or_else(sess: &ParseSess,
} }
} }
// perform a token equality check, ignoring syntax context (that is, an unhygienic comparison) /// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) { match (t1,t2) {
(&token::IDENT(id1,_),&token::IDENT(id2,_)) (&token::IDENT(id1,_),&token::IDENT(id2,_))

View File

@ -119,7 +119,7 @@ impl MacResult for MacroRulesDefiner {
} }
} }
// Given `lhses` and `rhses`, this is the new macro we create /// Given `lhses` and `rhses`, this is the new macro we create
fn generic_extension(cx: &ExtCtxt, fn generic_extension(cx: &ExtCtxt,
sp: Span, sp: Span,
name: Ident, name: Ident,
@ -193,9 +193,9 @@ fn generic_extension(cx: &ExtCtxt,
cx.span_fatal(best_fail_spot, best_fail_msg.as_slice()); cx.span_fatal(best_fail_spot, best_fail_msg.as_slice());
} }
// this procedure performs the expansion of the /// This procedure performs the expansion of the
// macro_rules! macro. It parses the RHS and adds /// macro_rules! macro. It parses the RHS and adds
// an extension to the current context. /// an extension to the current context.
pub fn add_new_extension(cx: &mut ExtCtxt, pub fn add_new_extension(cx: &mut ExtCtxt,
sp: Span, sp: Span,
name: Ident, name: Ident,

View File

@ -32,7 +32,7 @@ struct TtFrame {
#[deriving(Clone)] #[deriving(Clone)]
pub struct TtReader<'a> { pub struct TtReader<'a> {
pub sp_diag: &'a SpanHandler, pub sp_diag: &'a SpanHandler,
// the unzipped tree: /// the unzipped tree:
stack: Vec<TtFrame>, stack: Vec<TtFrame>,
/* for MBE-style macro transcription */ /* for MBE-style macro transcription */
interpolations: HashMap<Ident, Rc<NamedMatch>>, interpolations: HashMap<Ident, Rc<NamedMatch>>,
@ -43,9 +43,9 @@ pub struct TtReader<'a> {
pub cur_span: Span, pub cur_span: Span,
} }
/** This can do Macro-By-Example transcription. On the other hand, if /// This can do Macro-By-Example transcription. On the other hand, if
* `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and /// `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and
* should) be none. */ /// should) be none.
pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler, pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
interp: Option<HashMap<Ident, Rc<NamedMatch>>>, interp: Option<HashMap<Ident, Rc<NamedMatch>>>,
src: Vec<ast::TokenTree> ) src: Vec<ast::TokenTree> )
@ -138,8 +138,8 @@ fn lockstep_iter_size(t: &TokenTree, r: &TtReader) -> LockstepIterSize {
} }
} }
// return the next token from the TtReader. /// Return the next token from the TtReader.
// EFFECT: advances the reader's token field /// EFFECT: advances the reader's token field
pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
// FIXME(pcwalton): Bad copy? // FIXME(pcwalton): Bad copy?
let ret_val = TokenAndSpan { let ret_val = TokenAndSpan {

View File

@ -8,15 +8,11 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
/*! //! The Rust parser and macro expander.
//!
The Rust parser and macro expander. //! # Note
//!
# Note //! This API is completely unstable and subject to change.
This API is completely unstable and subject to change.
*/
#![crate_id = "syntax#0.11.0"] // NOTE: remove after stage0 #![crate_id = "syntax#0.11.0"] // NOTE: remove after stage0
#![crate_name = "syntax"] #![crate_name = "syntax"]

View File

@ -18,7 +18,7 @@ use parse::token::INTERPOLATED;
use std::gc::{Gc, GC}; use std::gc::{Gc, GC};
// a parser that can parse attributes. /// A parser that can parse attributes.
pub trait ParserAttr { pub trait ParserAttr {
fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>; fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>;
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute; fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute;
@ -30,7 +30,7 @@ pub trait ParserAttr {
} }
impl<'a> ParserAttr for Parser<'a> { impl<'a> ParserAttr for Parser<'a> {
// Parse attributes that appear before an item /// Parse attributes that appear before an item
fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> { fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> {
let mut attrs: Vec<ast::Attribute> = Vec::new(); let mut attrs: Vec<ast::Attribute> = Vec::new();
loop { loop {
@ -59,10 +59,10 @@ impl<'a> ParserAttr for Parser<'a> {
return attrs; return attrs;
} }
// matches attribute = # ! [ meta_item ] /// Matches `attribute = # ! [ meta_item ]`
// ///
// if permit_inner is true, then a leading `!` indicates an inner /// If permit_inner is true, then a leading `!` indicates an inner
// attribute /// attribute
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute { fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute {
debug!("parse_attributes: permit_inner={:?} self.token={:?}", debug!("parse_attributes: permit_inner={:?} self.token={:?}",
permit_inner, self.token); permit_inner, self.token);
@ -114,17 +114,17 @@ impl<'a> ParserAttr for Parser<'a> {
}; };
} }
// Parse attributes that appear after the opening of an item. These should /// Parse attributes that appear after the opening of an item. These should
// be preceded by an exclamation mark, but we accept and warn about one /// be preceded by an exclamation mark, but we accept and warn about one
// terminated by a semicolon. In addition to a vector of inner attributes, /// terminated by a semicolon. In addition to a vector of inner attributes,
// this function also returns a vector that may contain the first outer /// this function also returns a vector that may contain the first outer
// attribute of the next item (since we can't know whether the attribute /// attribute of the next item (since we can't know whether the attribute
// is an inner attribute of the containing item or an outer attribute of /// is an inner attribute of the containing item or an outer attribute of
// the first contained item until we see the semi). /// the first contained item until we see the semi).
// matches inner_attrs* outer_attr? /// matches inner_attrs* outer_attr?
// you can make the 'next' field an Option, but the result is going to be /// you can make the 'next' field an Option, but the result is going to be
// more useful as a vector. /// more useful as a vector.
fn parse_inner_attrs_and_next(&mut self) fn parse_inner_attrs_and_next(&mut self)
-> (Vec<ast::Attribute> , Vec<ast::Attribute> ) { -> (Vec<ast::Attribute> , Vec<ast::Attribute> ) {
let mut inner_attrs: Vec<ast::Attribute> = Vec::new(); let mut inner_attrs: Vec<ast::Attribute> = Vec::new();
@ -157,9 +157,9 @@ impl<'a> ParserAttr for Parser<'a> {
(inner_attrs, next_outer_attrs) (inner_attrs, next_outer_attrs)
} }
// matches meta_item = IDENT /// matches meta_item = IDENT
// | IDENT = lit /// | IDENT = lit
// | IDENT meta_seq /// | IDENT meta_seq
fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> { fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> {
match self.token { match self.token {
token::INTERPOLATED(token::NtMeta(e)) => { token::INTERPOLATED(token::NtMeta(e)) => {
@ -201,7 +201,7 @@ impl<'a> ParserAttr for Parser<'a> {
} }
} }
// matches meta_seq = ( COMMASEP(meta_item) ) /// matches meta_seq = ( COMMASEP(meta_item) )
fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> { fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> {
self.parse_seq(&token::LPAREN, self.parse_seq(&token::LPAREN,
&token::RPAREN, &token::RPAREN,

View File

@ -15,13 +15,13 @@
use ast; use ast;
use std::gc::Gc; use std::gc::Gc;
// does this expression require a semicolon to be treated /// Does this expression require a semicolon to be treated
// as a statement? The negation of this: 'can this expression /// as a statement? The negation of this: 'can this expression
// be used as a statement without a semicolon' -- is used /// be used as a statement without a semicolon' -- is used
// as an early-bail-out in the parser so that, for instance, /// as an early-bail-out in the parser so that, for instance,
// 'if true {...} else {...} /// if true {...} else {...}
// |x| 5 ' /// |x| 5
// isn't parsed as (if true {...} else {...} | x) | 5 /// isn't parsed as (if true {...} else {...} | x) | 5
pub fn expr_requires_semi_to_be_stmt(e: Gc<ast::Expr>) -> bool { pub fn expr_requires_semi_to_be_stmt(e: Gc<ast::Expr>) -> bool {
match e.node { match e.node {
ast::ExprIf(..) ast::ExprIf(..)
@ -41,9 +41,9 @@ pub fn expr_is_simple_block(e: Gc<ast::Expr>) -> bool {
} }
} }
// this statement requires a semicolon after it. /// this statement requires a semicolon after it.
// note that in one case (stmt_semi), we've already /// note that in one case (stmt_semi), we've already
// seen the semicolon, and thus don't need another. /// seen the semicolon, and thus don't need another.
pub fn stmt_ends_with_semi(stmt: &ast::Stmt) -> bool { pub fn stmt_ends_with_semi(stmt: &ast::Stmt) -> bool {
return match stmt.node { return match stmt.node {
ast::StmtDecl(d, _) => { ast::StmtDecl(d, _) => {

View File

@ -12,8 +12,8 @@
use parse::token; use parse::token;
// SeqSep : a sequence separator (token) /// SeqSep : a sequence separator (token)
// and whether a trailing separator is allowed. /// and whether a trailing separator is allowed.
pub struct SeqSep { pub struct SeqSep {
pub sep: Option<token::Token>, pub sep: Option<token::Token>,
pub trailing_sep_allowed: bool pub trailing_sep_allowed: bool

View File

@ -24,10 +24,14 @@ use std::uint;
#[deriving(Clone, PartialEq)] #[deriving(Clone, PartialEq)]
pub enum CommentStyle { pub enum CommentStyle {
Isolated, // No code on either side of each line of the comment /// No code on either side of each line of the comment
Trailing, // Code exists to the left of the comment Isolated,
Mixed, // Code before /* foo */ and after the comment /// Code exists to the left of the comment
BlankLine, // Just a manual blank line "\n\n", for layout Trailing,
/// Code before /* foo */ and after the comment
Mixed,
/// Just a manual blank line "\n\n", for layout
BlankLine,
} }
#[deriving(Clone)] #[deriving(Clone)]
@ -198,9 +202,9 @@ fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
} }
} }
// Returns None if the first col chars of s contain a non-whitespace char. /// Returns None if the first col chars of s contain a non-whitespace char.
// Otherwise returns Some(k) where k is first char offset after that leading /// Otherwise returns Some(k) where k is first char offset after that leading
// whitespace. Note k may be outside bounds of s. /// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> { fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len(); let len = s.len();
let mut col = col.to_uint(); let mut col = col.to_uint();

View File

@ -44,13 +44,13 @@ pub struct TokenAndSpan {
pub struct StringReader<'a> { pub struct StringReader<'a> {
pub span_diagnostic: &'a SpanHandler, pub span_diagnostic: &'a SpanHandler,
// The absolute offset within the codemap of the next character to read /// The absolute offset within the codemap of the next character to read
pub pos: BytePos, pub pos: BytePos,
// The absolute offset within the codemap of the last character read(curr) /// The absolute offset within the codemap of the last character read(curr)
pub last_pos: BytePos, pub last_pos: BytePos,
// The column of the next character to read /// The column of the next character to read
pub col: CharPos, pub col: CharPos,
// The last character to be read /// The last character to be read
pub curr: Option<char>, pub curr: Option<char>,
pub filemap: Rc<codemap::FileMap>, pub filemap: Rc<codemap::FileMap>,
/* cached: */ /* cached: */
@ -60,7 +60,7 @@ pub struct StringReader<'a> {
impl<'a> Reader for StringReader<'a> { impl<'a> Reader for StringReader<'a> {
fn is_eof(&self) -> bool { self.curr.is_none() } fn is_eof(&self) -> bool { self.curr.is_none() }
// return the next token. EFFECT: advances the string_reader. /// Return the next token. EFFECT: advances the string_reader.
fn next_token(&mut self) -> TokenAndSpan { fn next_token(&mut self) -> TokenAndSpan {
let ret_val = TokenAndSpan { let ret_val = TokenAndSpan {
tok: replace(&mut self.peek_tok, token::UNDERSCORE), tok: replace(&mut self.peek_tok, token::UNDERSCORE),
@ -417,7 +417,7 @@ impl<'a> StringReader<'a> {
return self.consume_any_line_comment(); return self.consume_any_line_comment();
} }
// might return a sugared-doc-attr /// Might return a sugared-doc-attr
fn consume_block_comment(&mut self) -> Option<TokenAndSpan> { fn consume_block_comment(&mut self) -> Option<TokenAndSpan> {
// block comments starting with "/**" or "/*!" are doc-comments // block comments starting with "/**" or "/*!" are doc-comments
let is_doc_comment = self.curr_is('*') || self.curr_is('!'); let is_doc_comment = self.curr_is('*') || self.curr_is('!');

View File

@ -10,7 +10,6 @@
//! The main parser interface //! The main parser interface
use ast; use ast;
use codemap::{Span, CodeMap, FileMap}; use codemap::{Span, CodeMap, FileMap};
use diagnostic::{SpanHandler, mk_span_handler, default_handler, Auto}; use diagnostic::{SpanHandler, mk_span_handler, default_handler, Auto};
@ -32,7 +31,7 @@ pub mod common;
pub mod classify; pub mod classify;
pub mod obsolete; pub mod obsolete;
// info about a parsing session. /// Info about a parsing session.
pub struct ParseSess { pub struct ParseSess {
pub span_diagnostic: SpanHandler, // better be the same as the one in the reader! pub span_diagnostic: SpanHandler, // better be the same as the one in the reader!
/// Used to determine and report recursive mod inclusions /// Used to determine and report recursive mod inclusions
@ -241,14 +240,14 @@ pub fn file_to_filemap(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
unreachable!() unreachable!()
} }
// given a session and a string, add the string to /// Given a session and a string, add the string to
// the session's codemap and return the new filemap /// the session's codemap and return the new filemap
pub fn string_to_filemap(sess: &ParseSess, source: String, path: String) pub fn string_to_filemap(sess: &ParseSess, source: String, path: String)
-> Rc<FileMap> { -> Rc<FileMap> {
sess.span_diagnostic.cm.new_filemap(path, source) sess.span_diagnostic.cm.new_filemap(path, source)
} }
// given a filemap, produce a sequence of token-trees /// Given a filemap, produce a sequence of token-trees
pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>) pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>)
-> Vec<ast::TokenTree> { -> Vec<ast::TokenTree> {
// it appears to me that the cfg doesn't matter here... indeed, // it appears to me that the cfg doesn't matter here... indeed,
@ -259,7 +258,7 @@ pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>)
p1.parse_all_token_trees() p1.parse_all_token_trees()
} }
// given tts and cfg, produce a parser /// Given tts and cfg, produce a parser
pub fn tts_to_parser<'a>(sess: &'a ParseSess, pub fn tts_to_parser<'a>(sess: &'a ParseSess,
tts: Vec<ast::TokenTree>, tts: Vec<ast::TokenTree>,
cfg: ast::CrateConfig) -> Parser<'a> { cfg: ast::CrateConfig) -> Parser<'a> {
@ -267,7 +266,7 @@ pub fn tts_to_parser<'a>(sess: &'a ParseSess,
Parser::new(sess, cfg, box trdr) Parser::new(sess, cfg, box trdr)
} }
// abort if necessary /// Abort if necessary
pub fn maybe_aborted<T>(result: T, mut p: Parser) -> T { pub fn maybe_aborted<T>(result: T, mut p: Parser) -> T {
p.abort_if_errors(); p.abort_if_errors();
result result

View File

@ -38,8 +38,8 @@ pub enum ObsoleteSyntax {
pub trait ParserObsoleteMethods { pub trait ParserObsoleteMethods {
/// Reports an obsolete syntax non-fatal error. /// Reports an obsolete syntax non-fatal error.
fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax); fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax);
// Reports an obsolete syntax non-fatal error, and returns /// Reports an obsolete syntax non-fatal error, and returns
// a placeholder expression /// a placeholder expression
fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr>; fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr>;
fn report(&mut self, fn report(&mut self,
sp: Span, sp: Span,
@ -83,8 +83,8 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> {
self.report(sp, kind, kind_str, desc); self.report(sp, kind, kind_str, desc);
} }
// Reports an obsolete syntax non-fatal error, and returns /// Reports an obsolete syntax non-fatal error, and returns
// a placeholder expression /// a placeholder expression
fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr> { fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr> {
self.obsolete(sp, kind); self.obsolete(sp, kind);
self.mk_expr(sp.lo, sp.hi, ExprLit(box(GC) respan(sp, LitNil))) self.mk_expr(sp.lo, sp.hi, ExprLit(box(GC) respan(sp, LitNil)))

File diff suppressed because it is too large Load Diff

View File

@ -92,9 +92,9 @@ pub enum Token {
LIT_BINARY_RAW(Rc<Vec<u8>>, uint), /* raw binary str delimited by n hash symbols */ LIT_BINARY_RAW(Rc<Vec<u8>>, uint), /* raw binary str delimited by n hash symbols */
/* Name components */ /* Name components */
// an identifier contains an "is_mod_name" boolean, /// An identifier contains an "is_mod_name" boolean,
// indicating whether :: follows this token with no /// indicating whether :: follows this token with no
// whitespace in between. /// whitespace in between.
IDENT(ast::Ident, bool), IDENT(ast::Ident, bool),
UNDERSCORE, UNDERSCORE,
LIFETIME(ast::Ident), LIFETIME(ast::Ident),

View File

@ -8,58 +8,56 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
/* //! This pretty-printer is a direct reimplementation of Philip Karlton's
* This pretty-printer is a direct reimplementation of Philip Karlton's //! Mesa pretty-printer, as described in appendix A of
* Mesa pretty-printer, as described in appendix A of //!
* //! STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen.
* STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen. //! Stanford Department of Computer Science, 1979.
* Stanford Department of Computer Science, 1979. //!
* //! The algorithm's aim is to break a stream into as few lines as possible
* The algorithm's aim is to break a stream into as few lines as possible //! while respecting the indentation-consistency requirements of the enclosing
* while respecting the indentation-consistency requirements of the enclosing //! block, and avoiding breaking at silly places on block boundaries, for
* block, and avoiding breaking at silly places on block boundaries, for //! example, between "x" and ")" in "x)".
* example, between "x" and ")" in "x)". //!
* //! I am implementing this algorithm because it comes with 20 pages of
* I am implementing this algorithm because it comes with 20 pages of //! documentation explaining its theory, and because it addresses the set of
* documentation explaining its theory, and because it addresses the set of //! concerns I've seen other pretty-printers fall down on. Weirdly. Even though
* concerns I've seen other pretty-printers fall down on. Weirdly. Even though //! it's 32 years old. What can I say?
* it's 32 years old. What can I say? //!
* //! Despite some redundancies and quirks in the way it's implemented in that
* Despite some redundancies and quirks in the way it's implemented in that //! paper, I've opted to keep the implementation here as similar as I can,
* paper, I've opted to keep the implementation here as similar as I can, //! changing only what was blatantly wrong, a typo, or sufficiently
* changing only what was blatantly wrong, a typo, or sufficiently //! non-idiomatic rust that it really stuck out.
* non-idiomatic rust that it really stuck out. //!
* //! In particular you'll see a certain amount of churn related to INTEGER vs.
* In particular you'll see a certain amount of churn related to INTEGER vs. //! CARDINAL in the Mesa implementation. Mesa apparently interconverts the two
* CARDINAL in the Mesa implementation. Mesa apparently interconverts the two //! somewhat readily? In any case, I've used uint for indices-in-buffers and
* somewhat readily? In any case, I've used uint for indices-in-buffers and //! ints for character-sizes-and-indentation-offsets. This respects the need
* ints for character-sizes-and-indentation-offsets. This respects the need //! for ints to "go negative" while carrying a pending-calculation balance, and
* for ints to "go negative" while carrying a pending-calculation balance, and //! helps differentiate all the numbers flying around internally (slightly).
* helps differentiate all the numbers flying around internally (slightly). //!
* //! I also inverted the indentation arithmetic used in the print stack, since
* I also inverted the indentation arithmetic used in the print stack, since //! the Mesa implementation (somewhat randomly) stores the offset on the print
* the Mesa implementation (somewhat randomly) stores the offset on the print //! stack in terms of margin-col rather than col itself. I store col.
* stack in terms of margin-col rather than col itself. I store col. //!
* //! I also implemented a small change in the String token, in that I store an
* I also implemented a small change in the String token, in that I store an //! explicit length for the string. For most tokens this is just the length of
* explicit length for the string. For most tokens this is just the length of //! the accompanying string. But it's necessary to permit it to differ, for
* the accompanying string. But it's necessary to permit it to differ, for //! encoding things that are supposed to "go on their own line" -- certain
* encoding things that are supposed to "go on their own line" -- certain //! classes of comment and blank-line -- where relying on adjacent
* classes of comment and blank-line -- where relying on adjacent //! hardbreak-like Break tokens with long blankness indication doesn't actually
* hardbreak-like Break tokens with long blankness indication doesn't actually //! work. To see why, consider when there is a "thing that should be on its own
* work. To see why, consider when there is a "thing that should be on its own //! line" between two long blocks, say functions. If you put a hardbreak after
* line" between two long blocks, say functions. If you put a hardbreak after //! each function (or before each) and the breaking algorithm decides to break
* each function (or before each) and the breaking algorithm decides to break //! there anyways (because the functions themselves are long) you wind up with
* there anyways (because the functions themselves are long) you wind up with //! extra blank lines. If you don't put hardbreaks you can wind up with the
* extra blank lines. If you don't put hardbreaks you can wind up with the //! "thing which should be on its own line" not getting its own line in the
* "thing which should be on its own line" not getting its own line in the //! rare case of "really small functions" or such. This re-occurs with comments
* rare case of "really small functions" or such. This re-occurs with comments //! and explicit blank lines. So in those cases we use a string with a payload
* and explicit blank lines. So in those cases we use a string with a payload //! we want isolated to a line and an explicit length that's huge, surrounded
* we want isolated to a line and an explicit length that's huge, surrounded //! by two zero-length breaks. The algorithm will try its best to fit it on a
* by two zero-length breaks. The algorithm will try its best to fit it on a //! line (which it can't) and so naturally place the content on its own line to
* line (which it can't) and so naturally place the content on its own line to //! avoid combining it with other lines and making matters even worse.
* avoid combining it with other lines and making matters even worse.
*/
use std::io; use std::io;
use std::string::String; use std::string::String;
@ -186,107 +184,116 @@ pub fn mk_printer(out: Box<io::Writer>, linewidth: uint) -> Printer {
} }
/* /// In case you do not have the paper, here is an explanation of what's going
* In case you do not have the paper, here is an explanation of what's going /// on.
* on. ///
* /// There is a stream of input tokens flowing through this printer.
* There is a stream of input tokens flowing through this printer. ///
* /// The printer buffers up to 3N tokens inside itself, where N is linewidth.
* The printer buffers up to 3N tokens inside itself, where N is linewidth. /// Yes, linewidth is chars and tokens are multi-char, but in the worst
* Yes, linewidth is chars and tokens are multi-char, but in the worst /// case every token worth buffering is 1 char long, so it's ok.
* case every token worth buffering is 1 char long, so it's ok. ///
* /// Tokens are String, Break, and Begin/End to delimit blocks.
* Tokens are String, Break, and Begin/End to delimit blocks. ///
* /// Begin tokens can carry an offset, saying "how far to indent when you break
* Begin tokens can carry an offset, saying "how far to indent when you break /// inside here", as well as a flag indicating "consistent" or "inconsistent"
* inside here", as well as a flag indicating "consistent" or "inconsistent" /// breaking. Consistent breaking means that after the first break, no attempt
* breaking. Consistent breaking means that after the first break, no attempt /// will be made to flow subsequent breaks together onto lines. Inconsistent
* will be made to flow subsequent breaks together onto lines. Inconsistent /// is the opposite. Inconsistent breaking example would be, say:
* is the opposite. Inconsistent breaking example would be, say: ///
* /// foo(hello, there, good, friends)
* foo(hello, there, good, friends) ///
* /// breaking inconsistently to become
* breaking inconsistently to become ///
* /// foo(hello, there
* foo(hello, there /// good, friends);
* good, friends); ///
* /// whereas a consistent breaking would yield:
* whereas a consistent breaking would yield: ///
* /// foo(hello,
* foo(hello, /// there
* there /// good,
* good, /// friends);
* friends); ///
* /// That is, in the consistent-break blocks we value vertical alignment
* That is, in the consistent-break blocks we value vertical alignment /// more than the ability to cram stuff onto a line. But in all cases if it
* more than the ability to cram stuff onto a line. But in all cases if it /// can make a block a one-liner, it'll do so.
* can make a block a one-liner, it'll do so. ///
* /// Carrying on with high-level logic:
* Carrying on with high-level logic: ///
* /// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
* The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and /// 'right' indices denote the active portion of the ring buffer as well as
* 'right' indices denote the active portion of the ring buffer as well as /// describing hypothetical points-in-the-infinite-stream at most 3N tokens
* describing hypothetical points-in-the-infinite-stream at most 3N tokens /// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
* apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch /// between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer
* between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer /// and point-in-infinite-stream senses freely.
* and point-in-infinite-stream senses freely. ///
* /// There is a parallel ring buffer, 'size', that holds the calculated size of
* There is a parallel ring buffer, 'size', that holds the calculated size of /// each token. Why calculated? Because for Begin/End pairs, the "size"
* each token. Why calculated? Because for Begin/End pairs, the "size" /// includes everything betwen the pair. That is, the "size" of Begin is
* includes everything between the pair. That is, the "size" of Begin is /// actually the sum of the sizes of everything between Begin and the paired
* actually the sum of the sizes of everything between Begin and the paired /// End that follows. Since that is arbitrarily far in the future, 'size' is
* End that follows. Since that is arbitrarily far in the future, 'size' is /// being rewritten regularly while the printer runs; in fact most of the
* being rewritten regularly while the printer runs; in fact most of the /// machinery is here to work out 'size' entries on the fly (and give up when
* machinery is here to work out 'size' entries on the fly (and give up when /// they're so obviously over-long that "infinity" is a good enough
* they're so obviously over-long that "infinity" is a good enough /// approximation for purposes of line breaking).
* approximation for purposes of line breaking). ///
* /// The "input side" of the printer is managed as an abstract process called
* The "input side" of the printer is managed as an abstract process called /// SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to
* SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to /// manage calculating 'size'. SCAN is, in other words, the process of
* manage calculating 'size'. SCAN is, in other words, the process of /// calculating 'size' entries.
* calculating 'size' entries. ///
* /// The "output side" of the printer is managed by an abstract process called
* The "output side" of the printer is managed by an abstract process called /// PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to
* PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to /// do with each token/size pair it consumes as it goes. It's trying to consume
* do with each token/size pair it consumes as it goes. It's trying to consume /// the entire buffered window, but can't output anything until the size is >=
* the entire buffered window, but can't output anything until the size is >= /// 0 (sizes are set to negative while they're pending calculation).
* 0 (sizes are set to negative while they're pending calculation). ///
* /// So SCAN takes input and buffers tokens and pending calculations, while
* So SCAN takes input and buffers tokens and pending calculations, while /// PRINT gobbles up completed calculations and tokens from the buffer. The
* PRINT gobbles up completed calculations and tokens from the buffer. The /// theory is that the two can never get more than 3N tokens apart, because
* theory is that the two can never get more than 3N tokens apart, because /// once there's "obviously" too much data to fit on a line, in a size
* once there's "obviously" too much data to fit on a line, in a size /// calculation, SCAN will write "infinity" to the size and let PRINT consume
* calculation, SCAN will write "infinity" to the size and let PRINT consume /// it.
* it. ///
* /// In this implementation (following the paper, again) the SCAN process is
* In this implementation (following the paper, again) the SCAN process is /// the method called 'pretty_print', and the 'PRINT' process is the method
* the method called 'pretty_print', and the 'PRINT' process is the method /// called 'print'.
* called 'print'.
*/
pub struct Printer { pub struct Printer {
pub out: Box<io::Writer>, pub out: Box<io::Writer>,
buf_len: uint, buf_len: uint,
margin: int, // width of lines we're constrained to /// Width of lines we're constrained to
space: int, // number of spaces left on line margin: int,
left: uint, // index of left side of input stream /// Number of spaces left on line
right: uint, // index of right side of input stream space: int,
token: Vec<Token> , // ring-buffr stream goes through /// Index of left side of input stream
size: Vec<int> , // ring-buffer of calculated sizes left: uint,
left_total: int, // running size of stream "...left" /// Index of right side of input stream
right_total: int, // running size of stream "...right" right: uint,
// pseudo-stack, really a ring too. Holds the /// Ring-buffr stream goes through
// primary-ring-buffers index of the Begin that started the token: Vec<Token> ,
// current block, possibly with the most recent Break after that /// Ring-buffer of calculated sizes
// Begin (if there is any) on top of it. Stuff is flushed off the size: Vec<int> ,
// bottom as it becomes irrelevant due to the primary ring-buffer /// Running size of stream "...left"
// advancing. left_total: int,
/// Running size of stream "...right"
right_total: int,
/// Pseudo-stack, really a ring too. Holds the
/// primary-ring-buffers index of the Begin that started the
/// current block, possibly with the most recent Break after that
/// Begin (if there is any) on top of it. Stuff is flushed off the
/// bottom as it becomes irrelevant due to the primary ring-buffer
/// advancing.
scan_stack: Vec<uint> , scan_stack: Vec<uint> ,
scan_stack_empty: bool, // top==bottom disambiguator /// Top==bottom disambiguator
top: uint, // index of top of scan_stack scan_stack_empty: bool,
bottom: uint, // index of bottom of scan_stack /// Index of top of scan_stack
// stack of blocks-in-progress being flushed by print top: uint,
/// Index of bottom of scan_stack
bottom: uint,
/// Stack of blocks-in-progress being flushed by print
print_stack: Vec<PrintStackElem> , print_stack: Vec<PrintStackElem> ,
// buffered indentation to avoid writing trailing whitespace /// Buffered indentation to avoid writing trailing whitespace
pending_indentation: int, pending_indentation: int,
} }

View File

@ -88,9 +88,9 @@ pub static indent_unit: uint = 4u;
pub static default_columns: uint = 78u; pub static default_columns: uint = 78u;
// Requires you to pass an input filename and reader so that /// Requires you to pass an input filename and reader so that
// it can scan the input text for comments and literals to /// it can scan the input text for comments and literals to
// copy forward. /// copy forward.
pub fn print_crate<'a>(cm: &'a CodeMap, pub fn print_crate<'a>(cm: &'a CodeMap,
span_diagnostic: &diagnostic::SpanHandler, span_diagnostic: &diagnostic::SpanHandler,
krate: &ast::Crate, krate: &ast::Crate,

View File

@ -8,9 +8,9 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
// An "interner" is a data structure that associates values with uint tags and //! An "interner" is a data structure that associates values with uint tags and
// allows bidirectional lookup; i.e. given a value, one can easily find the //! allows bidirectional lookup; i.e. given a value, one can easily find the
// type, and vice versa. //! type, and vice versa.
use ast::Name; use ast::Name;

View File

@ -17,14 +17,14 @@ use parse::token;
use std::gc::Gc; use std::gc::Gc;
// map a string to tts, using a made-up filename: /// Map a string to tts, using a made-up filename:
pub fn string_to_tts(source_str: String) -> Vec<ast::TokenTree> { pub fn string_to_tts(source_str: String) -> Vec<ast::TokenTree> {
let ps = new_parse_sess(); let ps = new_parse_sess();
filemap_to_tts(&ps, filemap_to_tts(&ps,
string_to_filemap(&ps, source_str, "bogofile".to_string())) string_to_filemap(&ps, source_str, "bogofile".to_string()))
} }
// map string to parser (via tts) /// Map string to parser (via tts)
pub fn string_to_parser<'a>(ps: &'a ParseSess, source_str: String) -> Parser<'a> { pub fn string_to_parser<'a>(ps: &'a ParseSess, source_str: String) -> Parser<'a> {
new_parser_from_source_str(ps, new_parser_from_source_str(ps,
Vec::new(), Vec::new(),
@ -40,51 +40,51 @@ fn with_error_checking_parse<T>(s: String, f: |&mut Parser| -> T) -> T {
x x
} }
// parse a string, return a crate. /// Parse a string, return a crate.
pub fn string_to_crate (source_str : String) -> ast::Crate { pub fn string_to_crate (source_str : String) -> ast::Crate {
with_error_checking_parse(source_str, |p| { with_error_checking_parse(source_str, |p| {
p.parse_crate_mod() p.parse_crate_mod()
}) })
} }
// parse a string, return an expr /// Parse a string, return an expr
pub fn string_to_expr (source_str : String) -> Gc<ast::Expr> { pub fn string_to_expr (source_str : String) -> Gc<ast::Expr> {
with_error_checking_parse(source_str, |p| { with_error_checking_parse(source_str, |p| {
p.parse_expr() p.parse_expr()
}) })
} }
// parse a string, return an item /// Parse a string, return an item
pub fn string_to_item (source_str : String) -> Option<Gc<ast::Item>> { pub fn string_to_item (source_str : String) -> Option<Gc<ast::Item>> {
with_error_checking_parse(source_str, |p| { with_error_checking_parse(source_str, |p| {
p.parse_item(Vec::new()) p.parse_item(Vec::new())
}) })
} }
// parse a string, return a stmt /// Parse a string, return a stmt
pub fn string_to_stmt(source_str : String) -> Gc<ast::Stmt> { pub fn string_to_stmt(source_str : String) -> Gc<ast::Stmt> {
with_error_checking_parse(source_str, |p| { with_error_checking_parse(source_str, |p| {
p.parse_stmt(Vec::new()) p.parse_stmt(Vec::new())
}) })
} }
// parse a string, return a pat. Uses "irrefutable"... which doesn't /// Parse a string, return a pat. Uses "irrefutable"... which doesn't
// (currently) affect parsing. /// (currently) affect parsing.
pub fn string_to_pat(source_str: String) -> Gc<ast::Pat> { pub fn string_to_pat(source_str: String) -> Gc<ast::Pat> {
string_to_parser(&new_parse_sess(), source_str).parse_pat() string_to_parser(&new_parse_sess(), source_str).parse_pat()
} }
// convert a vector of strings to a vector of ast::Ident's /// Convert a vector of strings to a vector of ast::Ident's
pub fn strs_to_idents(ids: Vec<&str> ) -> Vec<ast::Ident> { pub fn strs_to_idents(ids: Vec<&str> ) -> Vec<ast::Ident> {
ids.iter().map(|u| token::str_to_ident(*u)).collect() ids.iter().map(|u| token::str_to_ident(*u)).collect()
} }
// does the given string match the pattern? whitespace in the first string /// Does the given string match the pattern? whitespace in the first string
// may be deleted or replaced with other whitespace to match the pattern. /// may be deleted or replaced with other whitespace to match the pattern.
// this function is unicode-ignorant; fortunately, the careful design of /// this function is unicode-ignorant; fortunately, the careful design of
// UTF-8 mitigates this ignorance. In particular, this function only collapses /// UTF-8 mitigates this ignorance. In particular, this function only collapses
// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode /// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode
// chars. Unsurprisingly, it doesn't do NKF-normalization(?). /// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
pub fn matches_codepattern(a : &str, b : &str) -> bool { pub fn matches_codepattern(a : &str, b : &str) -> bool {
let mut idx_a = 0; let mut idx_a = 0;
let mut idx_b = 0; let mut idx_b = 0;
@ -122,9 +122,9 @@ pub fn matches_codepattern(a : &str, b : &str) -> bool {
} }
} }
// given a string and an index, return the first uint >= idx /// Given a string and an index, return the first uint >= idx
// that is a non-ws-char or is outside of the legal range of /// that is a non-ws-char or is outside of the legal range of
// the string. /// the string.
fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint { fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint {
let mut i = idx; let mut i = idx;
let len = a.len(); let len = a.len();
@ -134,7 +134,7 @@ fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint {
i i
} }
// copied from lexer. /// Copied from lexer.
pub fn is_whitespace(c: char) -> bool { pub fn is_whitespace(c: char) -> bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'; return c == ' ' || c == '\t' || c == '\r' || c == '\n';
} }

View File

@ -8,6 +8,18 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
//! Context-passing AST walker. Each overridden visit method has full control
//! over what happens with its node, it can do its own traversal of the node's
//! children (potentially passing in different contexts to each), call
//! `visit::visit_*` to apply the default traversal algorithm (again, it can
//! override the context), or prevent deeper traversal by doing nothing.
//!
//! Note: it is an important invariant that the default visitor walks the body
//! of a function in "execution order" (more concretely, reverse post-order
//! with respect to the CFG implied by the AST), meaning that if AST node A may
//! execute before AST node B, then A is visited first. The borrow checker in
//! particular relies on this property.
//!
use abi::Abi; use abi::Abi;
use ast::*; use ast::*;
use ast; use ast;
@ -17,27 +29,15 @@ use owned_slice::OwnedSlice;
use std::gc::Gc; use std::gc::Gc;
// Context-passing AST walker. Each overridden visit method has full control
// over what happens with its node, it can do its own traversal of the node's
// children (potentially passing in different contexts to each), call
// visit::visit_* to apply the default traversal algorithm (again, it can
// override the context), or prevent deeper traversal by doing nothing.
//
// Note: it is an important invariant that the default visitor walks the body
// of a function in "execution order" (more concretely, reverse post-order
// with respect to the CFG implied by the AST), meaning that if AST node A may
// execute before AST node B, then A is visited first. The borrow checker in
// particular relies on this property.
pub enum FnKind<'a> { pub enum FnKind<'a> {
// fn foo() or extern "Abi" fn foo() /// fn foo() or extern "Abi" fn foo()
FkItemFn(Ident, &'a Generics, FnStyle, Abi), FkItemFn(Ident, &'a Generics, FnStyle, Abi),
// fn foo(&self) /// fn foo(&self)
FkMethod(Ident, &'a Generics, &'a Method), FkMethod(Ident, &'a Generics, &'a Method),
// |x, y| ... /// |x, y| ...
// proc(x, y) ... /// proc(x, y) ...
FkFnBlock, FkFnBlock,
} }