def main(args=None): if args is None: args = sys.argv[1:] options = get_options(args) options.skip_dirs = [] # read user prefs from ~/.testflo file. # create one if it doesn't exist homedir = os.path.expanduser('~') rcfile = os.path.join(homedir, '.testflo') if not os.path.isfile(rcfile): with open(rcfile, 'w') as f: f.write("""[testflo] skip_dirs=site-packages, dist-packages, build, contrib """) read_config_file(rcfile, options) if options.cfg: read_config_file(options.cfg, options) tests = options.tests if options.testfile: tests += list(read_test_file(options.testfile)) if not tests: tests = [os.getcwd()] def dir_exclude(d): for skip in options.skip_dirs: if fnmatch(os.path.basename(d), skip): return True return False setup_coverage(options) if options.benchmark: options.num_procs = 1 options.isolated = True discoverer = TestDiscoverer(module_pattern=six.text_type('benchmark*.py'), func_pattern=six.text_type('benchmark*'), dir_exclude=dir_exclude) benchmark_file = open(options.benchmarkfile, 'a') else: discoverer = TestDiscoverer(dir_exclude=dir_exclude) benchmark_file = open(os.devnull, 'a') retval = 0 if options.isolated or not options.nompi: # create a distributed queue and get a proxy to it manager, queue = get_server_queue() else: manager, queue = (None, None) with open(options.outfile, 'w') as report, benchmark_file as bdata: pipeline = [ discoverer.get_iter, ] if options.dryrun: pipeline.append(dryrun) else: if options.pre_announce: options.num_procs = 1 runner = ConcurrentTestRunner(options, queue) pipeline.append(runner.get_iter) if options.benchmark: pipeline.append(BenchmarkWriter(stream=bdata).get_iter) if options.compact: verbose = -1 else: verbose = int(options.verbose) pipeline.extend([ ResultPrinter(verbose=verbose).get_iter, ResultSummary(options).get_iter, ]) if not options.noreport: # print verbose results and summary to a report file pipeline.extend([ ResultPrinter(report, verbose=1).get_iter, ResultSummary(options, stream=report).get_iter, ]) if options.maxtime > 0: pipeline.append(TimeFilter(options.maxtime).get_iter) if options.save_fails: pipeline.append(FailFilter().get_iter) retval = run_pipeline(tests, pipeline) finalize_coverage(options) if manager is not None: manager.shutdown() return retval
import sys from testflo.util import elapsed_str from testflo.options import get_options options = get_options() _result_map = { ('FAIL', False): 'F', ('FAIL', True): 'X', # expected failure ('SKIP', False): 'S', ('SKIP', True): 'S', ('OK', False): '.', ('OK', True): 'U', # unexpected success } class ResultPrinter(object): """Prints the status and error message (if any) of each Test object after its test has been run if verbose is True. If verbose is False, it displays a dot for each successful test, but skips or failures are still displayed in verbose form. """ def __init__(self, options, stream=sys.stdout, verbose=0): self.stream = stream self.options = options self.verbose = verbose def get_iter(self, input_iter): for result in input_iter: self._print_result(result)
from fnmatch import fnmatch from testflo.runner import ConcurrentTestRunner, TestRunner from testflo.test import Test from testflo.printer import ResultPrinter from testflo.benchmark import BenchmarkWriter from testflo.summary import ResultSummary from testflo.discover import TestDiscoverer from testflo.filters import TimeFilter, FailFilter from testflo.util import read_config_file, read_test_file, _get_parser from testflo.cover import setup_coverage, finalize_coverage from testflo.options import get_options from testflo.qman import get_server_queue options = get_options() def dryrun(input_iter): """Iterator added to the pipeline when user only wants a dry run, listing all of the discovered tests but not actually running them. """ for tests in input_iter: for test in tests: if test.status is None: test.status = 'OK' print(test) yield test def run_pipeline(source, pipe):
def main(args=None): if args is None: args = sys.argv[1:] options = get_options(args) options.skip_dirs = [] # read user prefs from ~/.testflo file. # create one if it doesn't exist homedir = os.path.expanduser('~') rcfile = os.path.join(homedir, '.testflo') if not os.path.isfile(rcfile): with open(rcfile, 'w') as f: f.write("""[testflo] skip_dirs=site-packages, dist-packages, build, contrib """) read_config_file(rcfile, options) if options.cfg: read_config_file(options.cfg, options) tests = options.tests if options.testfile: tests += list(read_test_file(options.testfile)) if not tests: tests = [os.getcwd()] def dir_exclude(d): for skip in options.skip_dirs: if fnmatch(os.path.basename(d), skip): return True return False setup_coverage(options) if options.noreport: report_file = open(os.devnull, 'a') else: report_file = open(options.outfile, 'w') if not options.test_glob: options.test_glob = ['test*'] def func_matcher(funcname): for pattern in options.test_glob: if fnmatchcase(funcname, pattern): return True return False if options.benchmark: options.num_procs = 1 options.isolated = True discoverer = TestDiscoverer(module_pattern=six.text_type('benchmark*.py'), func_match=lambda f: fnmatchcase(f, 'benchmark*'), dir_exclude=dir_exclude) benchmark_file = open(options.benchmarkfile, 'a') else: discoverer = TestDiscoverer(dir_exclude=dir_exclude, func_match=func_matcher) benchmark_file = open(os.devnull, 'a') retval = 0 if options.isolated or not options.nompi: # create a distributed queue and get a proxy to it manager, queue = get_server_queue() else: manager, queue = (None, None) with report_file as report, benchmark_file as bdata: pipeline = [ discoverer.get_iter, ] if options.dryrun: pipeline.append(dryrun) else: if options.pre_announce: options.num_procs = 1 runner = ConcurrentTestRunner(options, queue) pipeline.append(runner.get_iter) if options.benchmark: pipeline.append(BenchmarkWriter(stream=bdata).get_iter) if options.compact: verbose = -1 else: verbose = int(options.verbose) pipeline.extend([ ResultPrinter(verbose=verbose).get_iter, ResultSummary(options).get_iter, ]) if not options.noreport: # print verbose results and summary to a report file pipeline.extend([ ResultPrinter(report, verbose=1).get_iter, ResultSummary(options, stream=report).get_iter, ]) if options.maxtime > 0: pipeline.append(TimeFilter(options.maxtime).get_iter) if options.save_fails: pipeline.append(FailFilter().get_iter) retval = run_pipeline(tests, pipeline) finalize_coverage(options) if manager is not None: manager.shutdown() return retval
import os import traceback from mpi4py import MPI from testflo.test import Test from testflo.cover import setup_coverage, save_coverage from testflo.qman import get_client_queue from testflo.options import get_options exitcode = 0 # use 0 for exit code of all ranks != 0 because otherwise, # MPI will terminate other processes queue = get_client_queue() os.environ['TESTFLO_QUEUE'] = '' setup_coverage(get_options()) try: try: comm = MPI.COMM_WORLD test = Test(sys.argv[1]) test.nocapture = True # so we don't lose stdout test.run() except: print(traceback.format_exc()) test.status = 'FAIL' test.err_msg = traceback.format_exc() # collect results results = comm.gather(test, root=0) if comm.rank == 0:
def main(args=None): # FIXME: get rid of this if args is None: args = sys.argv[1:] options = get_options(args) options.skip_dirs = [] # read user prefs from ~/.testflo file. # create one if it doesn't exist homedir = os.path.expanduser('~') rcfile = os.path.join(homedir, '.testflo') if not os.path.isfile(rcfile): with open(rcfile, 'w') as f: f.write("""[testflo] skip_dirs=site-packages, dist-packages, build, contrib """) read_config_file(rcfile, options) if options.cfg: read_config_file(options.cfg, options) tests = options.tests if options.testfile: tests += list(read_test_file(options.testfile)) if not tests: tests = [os.getcwd()] def dir_exclude(d): for skip in options.skip_dirs: if fnmatch(os.path.basename(d), skip): return True return False setup_coverage(options) setup_profile(options) if options.benchmark: options.num_procs = 1 options.isolated = True discoverer = TestDiscoverer(module_pattern=six.text_type('benchmark*.py'), func_pattern=six.text_type('benchmark*'), dir_exclude=dir_exclude) benchmark_file = open(options.benchmarkfile, 'a') else: discoverer = TestDiscoverer(dir_exclude=dir_exclude) benchmark_file = open(os.devnull, 'a') try: retval = 0 server_proc = None if options.isolated or not options.nompi: addr = get_open_address() authkey = 'foo' cmd = [sys.executable, os.path.join(os.path.dirname(__file__), 'qman.py')] if sys.platform == 'win32': cmd.extend((addr, authkey)) else: cmd.extend((addr[0], str(addr[1]), authkey)) server_proc = subprocess.Popen(cmd, env=os.environ) # make sure the server is up before we continue onward retries = 10 man = None while retries: try: man = get_client_manager(addr, authkey) break except: msg = traceback.format_exc() time.sleep(0.5) retries -= 1 if man is None: raise ConnectionRefusedError("Can't connect to queue server: %s" % msg) del man else: addr = authkey = None with open(options.outfile, 'w') as report, benchmark_file as bdata: pipeline = [ discoverer.get_iter, ] if options.dryrun: pipeline.extend([ dryrun, ]) else: runner = ConcurrentTestRunner(options, addr, authkey) pipeline.append(runner.get_iter) if options.benchmark: pipeline.append(BenchmarkWriter(stream=bdata).get_iter) pipeline.extend([ ResultPrinter(verbose=options.verbose).get_iter, ResultSummary(options).get_iter, ]) if not options.noreport: # print verbose results and summary to a report file pipeline.extend([ ResultPrinter(report, verbose=True).get_iter, ResultSummary(options, stream=report).get_iter, ]) if options.maxtime > 0: pipeline.append(TimeFilter(options.maxtime).get_iter) if options.save_fails: pipeline.append(FailFilter().get_iter) retval = run_pipeline(tests, pipeline) finalize_coverage(options) finalize_profile(options) finally: if server_proc is not None and (options.isolated or not options.nompi): try: server_proc.terminate() except: # send msg to stdout instead of stderr to avoid failures when # testing under PowerShell. print("failed to terminate queue server") return retval