diff --git a/Makefile.in b/Makefile.in index 2612761cef9..5683eb7ba06 100644 --- a/Makefile.in +++ b/Makefile.in @@ -216,6 +216,7 @@ ifneq ($(strip $(findstring check,$(MAKECMDGOALS)) \ $(findstring tidy,$(MAKECMDGOALS))),) CFG_INFO := $(info cfg: including test rules) include $(CFG_SRC_DIR)mk/tests.mk + include $(CFG_SRC_DIR)mk/grammar.mk endif # Performance and benchmarking @@ -252,19 +253,6 @@ ifneq ($(findstring clean,$(MAKECMDGOALS)),) include $(CFG_SRC_DIR)mk/clean.mk endif -# Grammar tests - -ifneq ($(findstring lexer,$(MAKECMDGOALS)),) - ifdef CFG_JAVAC - ifdef CFG_ANTLR4 - ifdef CFG_GRUN - CFG_INFO := $(info cfg: including grammar tests) - include $(CFG_SRC_DIR)mk/grammar.mk - endif - endif - endif -endif - # CTAGS building ifneq ($(strip $(findstring TAGS.emacs,$(MAKECMDGOALS)) \ $(findstring TAGS.vi,$(MAKECMDGOALS))),) diff --git a/mk/grammar.mk b/mk/grammar.mk index 03e253c7278..c0afa3eb769 100644 --- a/mk/grammar.mk +++ b/mk/grammar.mk @@ -38,6 +38,18 @@ $(BG)verify: $(SG)verify.rs rustc-stage2-H-$(CFG_BUILD) $(LD)stamp.regex_macros $(Q)$(RUSTC) -O --out-dir $(BG) -L $(L) $(SG)verify.rs check-lexer: $(BG) $(BG)RustLexer.class $(BG)verify +ifdef CFG_JAVAC +ifdef CFG_ANTLR4 +ifdef CFG_GRUN $(info Verifying libsyntax against the reference lexer ...) - $(Q)find $(S) -iname '*.rs' -exec "$(SG)check.sh" {} "$(BG)" \ - "$(CFG_GRUN)" "$(BG)verify" "$(BG)RustLexer.tokens" "$(VERBOSE)" \; + $(Q)$(SG)check.sh $(S) "$(BG)" \ + "$(CFG_GRUN)" "$(BG)verify" "$(BG)RustLexer.tokens" +else +$(info grun not available, skipping lexer test...) +endif +else +$(info antlr4 not available, skipping lexer test...) +endif +else +$(info javac not available, skipping lexer test...) +endif diff --git a/mk/tests.mk b/mk/tests.mk index d2e4388521e..6068af8f7f4 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -171,7 +171,7 @@ endif # Main test targets ###################################################################### -check: cleantmptestlogs cleantestlibs check-notidy tidy +check: cleantmptestlogs cleantestlibs check-notidy tidy check-syntax check-notidy: cleantmptestlogs cleantestlibs all check-stage2 $(Q)$(CFG_PYTHON) $(S)src/etc/check-summary.py tmp/*.log @@ -192,6 +192,8 @@ check-docs: cleantestlibs cleantmptestlogs check-stage2-docs # NOTE: Remove after reprogramming windows bots check-fast: check-lite +check-syntax: check-lexer + .PHONY: cleantmptestlogs cleantestlibs cleantmptestlogs: diff --git a/src/grammar/check.sh b/src/grammar/check.sh index 3ddbb8a34c8..69ec490a08a 100755 --- a/src/grammar/check.sh +++ b/src/grammar/check.sh @@ -2,20 +2,33 @@ # Run the reference lexer against libsyntax and compare the tokens and spans. # If "// ignore-lexer-test" is present in the file, it will be ignored. -# + + # Argument $1 is the file to check, $2 is the classpath to use, $3 is the path # to the grun binary, $4 is the path to the verify binary, $5 is the path to # RustLexer.tokens - if [ "${VERBOSE}" == "1" ]; then set -x fi -grep -q "// ignore lexer-test" $1; +check() { + grep --silent "// ignore-lexer-test" $1; -if [ $? -eq 1 ]; then - cd $2 # This `cd` is so java will pick up RustLexer.class. I couldn't - # figure out how to wrangle the CLASSPATH, just adding build/grammr didn't - # seem to have anny effect. - $3 RustLexer tokens -tokens < $1 | $4 $1 $5 -fi + # if it's *not* found... + if [ $? -eq 1 ]; then + cd $2 # This `cd` is so java will pick up RustLexer.class. I couldn't + # figure out how to wrangle the CLASSPATH, just adding build/grammr didn't + # seem to have anny effect. + if $3 RustLexer tokens -tokens < $1 | $4 $1 $5; then + echo "pass: $1" + else + echo "fail: $1" + fi + else + echo "skip: $1" + fi +} + +for file in $(find $1 -iname '*.rs' ! -path '*/test/compile-fail/*' ); do + check $file $2 $3 $4 $5 +done diff --git a/src/grammar/verify.rs b/src/grammar/verify.rs index a6a1a75854d..f2ae5a1ea4e 100644 --- a/src/grammar/verify.rs +++ b/src/grammar/verify.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![feature(globs, phase, macro_rules)] extern crate syntax; @@ -158,7 +168,9 @@ fn count(lit: &str) -> uint { } fn parse_antlr_token(s: &str, tokens: &HashMap) -> TokenAndSpan { - let re = regex!(r"\[@(?P\d+),(?P\d+):(?P\d+)='(?P.+?)',<(?P-?\d+)>,\d+:\d+]"); + let re = regex!( + r"\[@(?P\d+),(?P\d+):(?P\d+)='(?P.+?)',<(?P-?\d+)>,\d+:\d+]" + ); let m = re.captures(s).expect(format!("The regex didn't match {}", s).as_slice()); let start = m.name("start"); @@ -166,7 +178,8 @@ fn parse_antlr_token(s: &str, tokens: &HashMap) -> TokenAndSpan { let toknum = m.name("toknum"); let content = m.name("content"); - let proto_tok = tokens.find_equiv(&toknum).expect(format!("didn't find token {} in the map", toknum).as_slice()); + let proto_tok = tokens.find_equiv(&toknum).expect(format!("didn't find token {} in the map", + toknum).as_slice()); let nm = parse::token::intern(content); @@ -229,7 +242,8 @@ fn main() { let token_map = parse_token_list(token_file.read_to_string().unwrap().as_slice()); let mut stdin = std::io::stdin(); - let mut antlr_tokens = stdin.lines().map(|l| parse_antlr_token(l.unwrap().as_slice().trim(), &token_map)); + let mut antlr_tokens = stdin.lines().map(|l| parse_antlr_token(l.unwrap().as_slice().trim(), + &token_map)); let code = File::open(&Path::new(args.get(1).as_slice())).unwrap().read_to_string().unwrap(); let options = config::basic_options(); @@ -246,7 +260,8 @@ fn main() { continue } - assert!(rustc_tok.sp == antlr_tok.sp, "{} and {} have different spans", rustc_tok, antlr_tok); + assert!(rustc_tok.sp == antlr_tok.sp, "{} and {} have different spans", rustc_tok, + antlr_tok); macro_rules! matches ( ( $($x:pat),+ ) => (