auto merge of #13948 : huonw/rust/test-regex-filter, r=alexcrichton

This allows writing a regex to filter tests more precisely, rather than having to list long paths e.g.

```
$ ./stdtest-x86_64-unknown-linux-gnu 'vec.*clone'

running 2 tests
test vec::tests::test_clone ... ok
test vec::tests::test_clone_from ... ok

test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured
```

The regex change is fully backwards compatible, since test names are Rust
identifiers + `:`, and hence not special regex characters.

(See commits for details.)
This commit is contained in:
bors 2014-05-15 11:22:02 -07:00
commit ba5f53009a
6 changed files with 139 additions and 87 deletions

View File

@ -80,7 +80,7 @@ DEPS_collections := std rand
DEPS_fourcc := syntax std DEPS_fourcc := syntax std
DEPS_hexfloat := syntax std DEPS_hexfloat := syntax std
DEPS_num := std rand DEPS_num := std rand
DEPS_test := std collections getopts serialize term time DEPS_test := std collections getopts serialize term time regex
DEPS_time := std serialize DEPS_time := std serialize
DEPS_rand := std DEPS_rand := std
DEPS_url := std collections DEPS_url := std collections

View File

@ -10,6 +10,7 @@
use std::from_str::FromStr; use std::from_str::FromStr;
use std::fmt; use std::fmt;
use regex::Regex;
#[deriving(Clone, Eq)] #[deriving(Clone, Eq)]
pub enum Mode { pub enum Mode {
@ -88,7 +89,7 @@ pub struct Config {
pub run_ignored: bool, pub run_ignored: bool,
// Only run tests that match this filter // Only run tests that match this filter
pub filter: Option<~str>, pub filter: Option<Regex>,
// Write out a parseable log of tests that were run // Write out a parseable log of tests that were run
pub logfile: Option<Path>, pub logfile: Option<Path>,

View File

@ -23,6 +23,8 @@ extern crate log;
extern crate green; extern crate green;
extern crate rustuv; extern crate rustuv;
extern crate regex;
use std::os; use std::os;
use std::io; use std::io;
use std::io::fs; use std::io::fs;
@ -113,6 +115,19 @@ pub fn parse_config(args: Vec<~str> ) -> Config {
Path::new(m.opt_str(nm).unwrap()) Path::new(m.opt_str(nm).unwrap())
} }
let filter = if !matches.free.is_empty() {
let s = matches.free.get(0).as_slice();
match regex::Regex::new(s) {
Ok(re) => Some(re),
Err(e) => {
println!("failed to parse filter /{}/: {}", s, e);
fail!()
}
}
} else {
None
};
Config { Config {
compile_lib_path: matches.opt_str("compile-lib-path").unwrap(), compile_lib_path: matches.opt_str("compile-lib-path").unwrap(),
run_lib_path: matches.opt_str("run-lib-path").unwrap(), run_lib_path: matches.opt_str("run-lib-path").unwrap(),
@ -125,12 +140,7 @@ pub fn parse_config(args: Vec<~str> ) -> Config {
stage_id: matches.opt_str("stage-id").unwrap(), stage_id: matches.opt_str("stage-id").unwrap(),
mode: FromStr::from_str(matches.opt_str("mode").unwrap()).expect("invalid mode"), mode: FromStr::from_str(matches.opt_str("mode").unwrap()).expect("invalid mode"),
run_ignored: matches.opt_present("ignored"), run_ignored: matches.opt_present("ignored"),
filter: filter: filter,
if !matches.free.is_empty() {
Some((*matches.free.get(0)).clone())
} else {
None
},
logfile: matches.opt_str("logfile").map(|s| Path::new(s)), logfile: matches.opt_str("logfile").map(|s| Path::new(s)),
save_metrics: matches.opt_str("save-metrics").map(|s| Path::new(s)), save_metrics: matches.opt_str("save-metrics").map(|s| Path::new(s)),
ratchet_metrics: ratchet_metrics:
@ -169,7 +179,7 @@ pub fn log_config(config: &Config) {
logv(c, format!("stage_id: {}", config.stage_id)); logv(c, format!("stage_id: {}", config.stage_id));
logv(c, format!("mode: {}", config.mode)); logv(c, format!("mode: {}", config.mode));
logv(c, format!("run_ignored: {}", config.run_ignored)); logv(c, format!("run_ignored: {}", config.run_ignored));
logv(c, format!("filter: {}", opt_str(&config.filter))); logv(c, format!("filter: {}", opt_str(&config.filter.as_ref().map(|re| re.to_str()))));
logv(c, format!("runtool: {}", opt_str(&config.runtool))); logv(c, format!("runtool: {}", opt_str(&config.runtool)));
logv(c, format!("host-rustcflags: {}", opt_str(&config.host_rustcflags))); logv(c, format!("host-rustcflags: {}", opt_str(&config.host_rustcflags)));
logv(c, format!("target-rustcflags: {}", opt_str(&config.target_rustcflags))); logv(c, format!("target-rustcflags: {}", opt_str(&config.target_rustcflags)));
@ -238,7 +248,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts {
test::TestOpts { test::TestOpts {
filter: match config.filter { filter: match config.filter {
None => None, None => None,
Some(ref filter) => Some(filter.to_strbuf()), Some(ref filter) => Some(filter.clone()),
}, },
run_ignored: config.run_ignored, run_ignored: config.run_ignored,
logfile: config.logfile.clone(), logfile: config.logfile.clone(),

View File

@ -90,10 +90,15 @@ fn test_out_of_bounds_failure() {
~~~ ~~~
A test runner built with the `--test` flag supports a limited set of A test runner built with the `--test` flag supports a limited set of
arguments to control which tests are run: the first free argument arguments to control which tests are run:
passed to a test runner specifies a filter used to narrow down the set
of tests being run; the `--ignored` flag tells the test runner to run - the first free argument passed to a test runner is interpreted as a
only tests with the `ignore` attribute. regular expression
([syntax reference](regex/index.html#syntax))
and is used to narrow down the set of tests being run. Note: a plain
string is a valid regular expression that matches itself.
- the `--ignored` flag tells the test runner to run only tests with the
`ignore` attribute.
## Parallelism ## Parallelism
@ -146,16 +151,31 @@ result: FAILED. 1 passed; 1 failed; 0 ignored
### Running a subset of tests ### Running a subset of tests
~~~ {.notrust} Using a plain string:
$ mytests mytest1
running 11 tests ~~~ {.notrust}
$ mytests mytest23
running 1 tests
running driver::tests::mytest23 ... ok
result: ok. 1 passed; 0 failed; 0 ignored
~~~
Using some regular expression features:
~~~ {.notrust}
$ mytests 'mytest[145]'
running 13 tests
running driver::tests::mytest1 ... ok running driver::tests::mytest1 ... ok
running driver::tests::mytest4 ... ok
running driver::tests::mytest5 ... ok
running driver::tests::mytest10 ... ignored running driver::tests::mytest10 ... ignored
... snip ... ... snip ...
running driver::tests::mytest19 ... ok running driver::tests::mytest19 ... ok
result: ok. 11 passed; 0 failed; 1 ignored result: ok. 13 passed; 0 failed; 1 ignored
~~~ ~~~
# Microbenchmarking # Microbenchmarking

View File

@ -16,8 +16,8 @@ A simple wrapper over the platform's dynamic library facilities
*/ */
use c_str::ToCStr; use c_str::ToCStr;
use iter::Iterator;
use mem; use mem;
use ops::*; use ops::*;
use option::*; use option::*;
@ -25,7 +25,7 @@ use os;
use path::GenericPath; use path::GenericPath;
use path; use path;
use result::*; use result::*;
use slice::{Vector,OwnedVector}; use slice::Vector;
use str; use str;
use vec::Vec; use vec::Vec;
@ -85,10 +85,12 @@ impl DynamicLibrary {
} else { } else {
("LD_LIBRARY_PATH", ':' as u8) ("LD_LIBRARY_PATH", ':' as u8)
}; };
let newenv = os::getenv_as_bytes(envvar).unwrap_or(box []); let mut newenv = Vec::from_slice(path.as_vec());
let mut newenv = newenv.move_iter().collect::<Vec<_>>(); newenv.push(sep);
newenv.push_all(&[sep]); match os::getenv_as_bytes(envvar) {
newenv.push_all(path.as_vec()); Some(bytes) => newenv.push_all(bytes),
None => {}
}
os::setenv(envvar, str::from_utf8(newenv.as_slice()).unwrap()); os::setenv(envvar, str::from_utf8(newenv.as_slice()).unwrap());
} }

View File

@ -37,6 +37,7 @@
extern crate collections; extern crate collections;
extern crate getopts; extern crate getopts;
extern crate regex;
extern crate serialize; extern crate serialize;
extern crate term; extern crate term;
extern crate time; extern crate time;
@ -45,6 +46,7 @@ use collections::TreeMap;
use stats::Stats; use stats::Stats;
use time::precise_time_ns; use time::precise_time_ns;
use getopts::{OptGroup, optflag, optopt}; use getopts::{OptGroup, optflag, optopt};
use regex::Regex;
use serialize::{json, Decodable}; use serialize::{json, Decodable};
use serialize::json::{Json, ToJson}; use serialize::json::{Json, ToJson};
use term::Terminal; use term::Terminal;
@ -53,6 +55,7 @@ use term::color::{Color, RED, YELLOW, GREEN, CYAN};
use std::cmp; use std::cmp;
use std::f64; use std::f64;
use std::fmt; use std::fmt;
use std::fmt::Show;
use std::from_str::FromStr; use std::from_str::FromStr;
use std::io::stdio::StdWriter; use std::io::stdio::StdWriter;
use std::io::{File, ChanReader, ChanWriter}; use std::io::{File, ChanReader, ChanWriter};
@ -85,14 +88,19 @@ pub enum TestName {
StaticTestName(&'static str), StaticTestName(&'static str),
DynTestName(StrBuf) DynTestName(StrBuf)
} }
impl fmt::Show for TestName { impl TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn as_slice<'a>(&'a self) -> &'a str {
match *self { match *self {
StaticTestName(s) => f.buf.write_str(s), StaticTestName(s) => s,
DynTestName(ref s) => f.buf.write_str(s.as_slice()), DynTestName(ref s) => s.as_slice()
} }
} }
} }
impl Show for TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.as_slice().fmt(f)
}
}
#[deriving(Clone)] #[deriving(Clone)]
enum NamePadding { PadNone, PadOnLeft, PadOnRight } enum NamePadding { PadNone, PadOnLeft, PadOnRight }
@ -100,7 +108,7 @@ enum NamePadding { PadNone, PadOnLeft, PadOnRight }
impl TestDesc { impl TestDesc {
fn padded_name(&self, column_count: uint, align: NamePadding) -> StrBuf { fn padded_name(&self, column_count: uint, align: NamePadding) -> StrBuf {
use std::num::Saturating; use std::num::Saturating;
let mut name = StrBuf::from_str(self.name.to_str()); let mut name = StrBuf::from_str(self.name.as_slice());
let fill = column_count.saturating_sub(name.len()); let fill = column_count.saturating_sub(name.len());
let mut pad = StrBuf::from_owned_str(" ".repeat(fill)); let mut pad = StrBuf::from_owned_str(" ".repeat(fill));
match align { match align {
@ -257,7 +265,7 @@ pub fn test_main_static_x(args: &[~str], tests: &[TestDescAndFn]) {
} }
pub struct TestOpts { pub struct TestOpts {
pub filter: Option<StrBuf>, pub filter: Option<Regex>,
pub run_ignored: bool, pub run_ignored: bool,
pub run_tests: bool, pub run_tests: bool,
pub run_benchmarks: bool, pub run_benchmarks: bool,
@ -312,14 +320,12 @@ fn optgroups() -> Vec<getopts::OptGroup> {
task, allow printing directly")) task, allow printing directly"))
} }
fn usage(binary: &str, helpstr: &str) { fn usage(binary: &str) {
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!("{}", getopts::usage(message, optgroups().as_slice())); println!(r"{usage}
println!("");
if helpstr == "help" { The FILTER regex is tested against the name of all tests to run, and
println!("{}", "\ only those tests that match are run.
The FILTER is matched against the name of all tests to run, and if any tests
have a substring match, only those tests are run.
By default, all tests are run in parallel. This can be altered with the By default, all tests are run in parallel. This can be altered with the
RUST_TEST_TASKS environment variable when running tests (set it to 1). RUST_TEST_TASKS environment variable when running tests (set it to 1).
@ -330,18 +336,18 @@ environment variable. Logging is not captured by default.
Test Attributes: Test Attributes:
#[test] - Indicates a function is a test to be run. This function \#[test] - Indicates a function is a test to be run. This function
takes no arguments. takes no arguments.
#[bench] - Indicates a function is a benchmark to be run. This \#[bench] - Indicates a function is a benchmark to be run. This
function takes one argument (test::Bencher). function takes one argument (test::Bencher).
#[should_fail] - This function (also labeled with #[test]) will only pass if \#[should_fail] - This function (also labeled with \#[test]) will only pass if
the code causes a failure (an assertion failure or fail!) the code causes a failure (an assertion failure or fail!)
#[ignore] - When applied to a function which is already attributed as a \#[ignore] - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during test, then the test runner will ignore these tests during
normal test runs. Running with --ignored will run these normal test runs. Running with --ignored will run these
tests. This may also be written as #[ignore(cfg(...))] to tests. This may also be written as \#[ignore(cfg(...))] to
ignore the test on certain configurations."); ignore the test on certain configurations.",
} usage = getopts::usage(message, optgroups().as_slice()));
} }
// Parses command line arguments into test options // Parses command line arguments into test options
@ -357,21 +363,17 @@ pub fn parse_opts(args: &[StrBuf]) -> Option<OptRes> {
Err(f) => return Some(Err(f.to_err_msg().to_strbuf())) Err(f) => return Some(Err(f.to_err_msg().to_strbuf()))
}; };
if matches.opt_present("h") { if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
usage(args[0].as_slice(), "h");
return None;
}
if matches.opt_present("help") {
usage(args[0].as_slice(), "help");
return None;
}
let filter = let filter = if matches.free.len() > 0 {
if matches.free.len() > 0 { let s = matches.free.get(0).as_slice();
Some((*matches.free.get(0)).to_strbuf()) match Regex::new(s) {
} else { Ok(re) => Some(re),
None Err(e) => return Some(Err(format_strbuf!("could not parse /{}/: {}", s, e)))
}; }
} else {
None
};
let run_ignored = matches.opt_present("ignored"); let run_ignored = matches.opt_present("ignored");
@ -590,7 +592,7 @@ impl<T: Writer> ConsoleTestState<T> {
TrIgnored => "ignored".to_strbuf(), TrIgnored => "ignored".to_strbuf(),
TrMetrics(ref mm) => fmt_metrics(mm), TrMetrics(ref mm) => fmt_metrics(mm),
TrBench(ref bs) => fmt_bench_samples(bs) TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.to_str()); }, test.name.as_slice());
o.write(s.as_bytes()) o.write(s.as_bytes())
} }
} }
@ -604,7 +606,7 @@ impl<T: Writer> ConsoleTestState<T> {
failures.push(f.name.to_str()); failures.push(f.name.to_str());
if stdout.len() > 0 { if stdout.len() > 0 {
fail_out.push_str(format!("---- {} stdout ----\n\t", fail_out.push_str(format!("---- {} stdout ----\n\t",
f.name.to_str())); f.name.as_slice()));
let output = str::from_utf8_lossy(stdout.as_slice()); let output = str::from_utf8_lossy(stdout.as_slice());
fail_out.push_str(output.as_slice().replace("\n", "\n\t")); fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
fail_out.push_str("\n"); fail_out.push_str("\n");
@ -618,7 +620,7 @@ impl<T: Writer> ConsoleTestState<T> {
try!(self.write_plain("\nfailures:\n")); try!(self.write_plain("\nfailures:\n"));
failures.as_mut_slice().sort(); failures.as_mut_slice().sort();
for name in failures.iter() { for name in failures.iter() {
try!(self.write_plain(format!(" {}\n", name.to_str()))); try!(self.write_plain(format!(" {}\n", name.as_slice())));
} }
Ok(()) Ok(())
} }
@ -753,7 +755,7 @@ pub fn run_tests_console(opts: &TestOpts,
TrOk => st.passed += 1, TrOk => st.passed += 1,
TrIgnored => st.ignored += 1, TrIgnored => st.ignored += 1,
TrMetrics(mm) => { TrMetrics(mm) => {
let tname = test.name.to_str(); let tname = test.name.as_slice();
let MetricMap(mm) = mm; let MetricMap(mm) = mm;
for (k,v) in mm.iter() { for (k,v) in mm.iter() {
st.metrics st.metrics
@ -764,7 +766,7 @@ pub fn run_tests_console(opts: &TestOpts,
st.measured += 1 st.measured += 1
} }
TrBench(bs) => { TrBench(bs) => {
st.metrics.insert_metric(test.name.to_str(), st.metrics.insert_metric(test.name.as_slice(),
bs.ns_iter_summ.median, bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min); bs.ns_iter_summ.max - bs.ns_iter_summ.min);
st.measured += 1 st.measured += 1
@ -782,12 +784,12 @@ pub fn run_tests_console(opts: &TestOpts,
fn len_if_padded(t: &TestDescAndFn) -> uint { fn len_if_padded(t: &TestDescAndFn) -> uint {
match t.testfn.padding() { match t.testfn.padding() {
PadNone => 0u, PadNone => 0u,
PadOnLeft | PadOnRight => t.desc.name.to_str().len(), PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
} }
} }
match tests.iter().max_by(|t|len_if_padded(*t)) { match tests.iter().max_by(|t|len_if_padded(*t)) {
Some(t) => { Some(t) => {
let n = t.desc.name.to_str(); let n = t.desc.name.as_slice();
st.max_name_len = n.len(); st.max_name_len = n.len();
}, },
None => {} None => {}
@ -939,26 +941,12 @@ pub fn filter_tests(
let mut filtered = tests; let mut filtered = tests;
// Remove tests that don't match the test filter // Remove tests that don't match the test filter
filtered = if opts.filter.is_none() { filtered = match opts.filter {
filtered None => filtered,
} else { Some(ref re) => {
let filter_str = match opts.filter { filtered.move_iter()
Some(ref f) => (*f).clone(), .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
None => "".to_strbuf()
};
fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
Option<TestDescAndFn> {
if test.desc.name.to_str().contains(filter_str) {
return Some(test);
} else {
return None;
}
} }
filtered.move_iter()
.filter_map(|x| filter_fn(x, filter_str.as_slice()))
.collect()
}; };
// Maybe pull out the ignored test and unignore them // Maybe pull out the ignored test and unignore them
@ -980,7 +968,7 @@ pub fn filter_tests(
}; };
// Sort the tests alphabetically // Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str())); filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(&t2.desc.name.as_slice()));
// Shard the remaining tests, if sharding requested. // Shard the remaining tests, if sharding requested.
match opts.test_shard { match opts.test_shard {
@ -1445,12 +1433,12 @@ mod tests {
#[test] #[test]
fn first_free_arg_should_be_a_filter() { fn first_free_arg_should_be_a_filter() {
let args = vec!("progname".to_strbuf(), "filter".to_strbuf()); let args = vec!("progname".to_strbuf(), "some_regex_filter".to_strbuf());
let opts = match parse_opts(args.as_slice()) { let opts = match parse_opts(args.as_slice()) {
Some(Ok(o)) => o, Some(Ok(o)) => o,
_ => fail!("Malformed arg in first_free_arg_should_be_a_filter") _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
}; };
assert!("filter" == opts.filter.clone().unwrap().as_slice()); assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
} }
#[test] #[test]
@ -1549,6 +1537,37 @@ mod tests {
} }
} }
#[test]
pub fn filter_tests_regex() {
let mut opts = TestOpts::new();
opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
"no::XYZ", "no::abc"];
names.sort();
fn test_fn() {}
let tests = names.iter().map(|name| {
TestDescAndFn {
desc: TestDesc {
name: DynTestName(name.to_strbuf()),
ignore: false,
should_fail: false
},
testfn: DynTestFn(test_fn)
}
}).collect();
let filtered = filter_tests(&opts, tests);
let expected: Vec<&str> =
names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
assert_eq!(filtered.len(), expected.len());
for (test, expected_name) in filtered.iter().zip(expected.iter()) {
assert_eq!(test.desc.name.as_slice(), *expected_name);
}
}
#[test] #[test]
pub fn test_metricmap_compare() { pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new(); let mut m1 = MetricMap::new();