Example #1
0
 def _load_configs(clazz, search_path, glob_expression):
     result = []
     for next_path in search_path:
         for next_file in file_path.glob(next_path, glob_expression):
             filename = file_util.remove_head(next_file, next_path)
             config = simple_config.from_file(next_file,
                                              ignore_extends=True)
             result.append(
                 clazz._found_config(next_path, filename, next_file,
                                     config))
     return result
Example #2
0
    def find(clazz,
             root_dir,
             relative=True,
             min_depth=None,
             max_depth=None,
             file_type=FILE):
        if max_depth and min_depth and not (max_depth >= min_depth):
            raise RuntimeError('max_depth needs to be >= min_depth.')

        if min_depth and min_depth < 1:
            raise RuntimeError('min_depth needs to be >= 1.')

        def _in_range(depth, min_depth, max_depth):
            if min_depth and max_depth:
                return depth >= min_depth and depth <= max_depth
            elif min_depth:
                return depth >= min_depth
            elif max_depth:
                return depth <= max_depth
            else:
                return True

        result = []

        root_dir = path.normpath(root_dir)
        root_dir_count = root_dir.count(os.sep)

        for root, dirs, files in clazz.walk_with_depth(root_dir,
                                                       max_depth=max_depth):
            to_check = []
            if clazz._want_file_type(file_type,
                                     clazz.FILE | clazz.LINK | clazz.DEVICE):
                to_check += files
            if clazz._want_file_type(file_type, clazz.DIR):
                to_check += dirs
            else:
                links = [
                    d for d in dirs
                    if path.islink(path.normpath(path.join(root, d)))
                ]
                to_check += links
            for name in to_check:
                f = path.normpath(path.join(root, name))
                depth = f.count(os.sep) - root_dir_count
                if _in_range(depth, min_depth, max_depth):
                    if clazz._match_file_type(f, file_type):
                        if relative:
                            result.append(file_util.remove_head(f, root_dir))
                        else:
                            result.append(f)
        return sorted(result)
Example #3
0
 def _test_grep(self, content, text, node_type, word_boundary=False):
     tmp_dir = self._make_temp_content(content)
     options = refactor_options(word_boundary=word_boundary)
     real_result = refactor_ast.grep([tmp_dir],
                                     text,
                                     node_type,
                                     options=options)
     result = []
     for item in real_result:
         filename = file_util.remove_head(item.filename, tmp_dir + os.sep)
         t = (filename, item.snippet, item.snippet_lines)
         result.append(t)
     json = json_util.to_json(result, indent=2)
     return self._test_grep_result(tmp_dir, real_result, json)
Example #4
0
    def list_dir(self, remote_dir, recursive, options):
        'List entries in a directory.'
        check.check_string(remote_dir)
        check.check_bool(recursive)
        check.check_vfs_file_info_options(options, allow_none=True)

        remote_dir = vfs_path_util.normalize(remote_dir)
        options = options or vfs_file_info_options()

        result = node(self.SEP)
        local_dir_path = self._make_local_dir_path(remote_dir)
        self.log.log_d(
            'list_dir: remote_dir={} recursive={} local_dir_path={}'.format(
                remote_dir, recursive, local_dir_path))
        if not path.exists(local_dir_path):
            raise vfs_error('dir does not exist: {}'.format(remote_dir))

        if not path.isdir(local_dir_path):
            raise vfs_error('not a dir: {}'.format(remote_dir))

        max_depth = None if recursive else 1
        setattr(result, '_remote_filename', self.SEP)
        setattr(result, '_local_filename', self._local_root_dir)
        setattr(result, '_is_file', False)

        num_added = 0
        for root, dirs, files in file_find.walk_with_depth(local_dir_path,
                                                           max_depth=max_depth,
                                                           follow_links=True):
            if root == local_dir_path:
                rel = os.sep
            else:
                rel = file_util.ensure_lsep(
                    file_util.remove_head(root, local_dir_path))
            self.log.log_d(
                'list_dir: next: root={} dirs={} files={} rel={}'.format(
                    root, dirs, files, rel))
            files_set = set(files)
            if not self._should_include_file(rel):
                self.log.log_d('list_dir: skipping {}'.format(rel))
                continue
            for next_file_or_dir in sorted(files + dirs):
                if self._should_include_file(next_file_or_dir):
                    self.log.log_d(
                        'list_dir: rel={} next_file_or_dir={}'.format(
                            rel, next_file_or_dir))
                    local_filename_rel = path.join(rel, next_file_or_dir)
                    remote_filename = local_filename_rel.replace(
                        os.sep, self.SEP)
                    self.log.log_d(
                        'list_dir: local_filename_rel={} remote_filename={}'.
                        format(local_filename_rel, remote_filename))
                    assert local_filename_rel[0] == os.sep
                    assert remote_filename[0] == self.SEP
                    remote_filename = remote_filename[1:]
                    local_filename = path.join(root, next_file_or_dir)
                    parts = remote_filename.split('/')
                    new_node = result.ensure_path(parts)
                    setattr(new_node, '_remote_filename', remote_filename)
                    setattr(new_node, '_local_filename', local_filename)
                    setattr(new_node, '_is_file', next_file_or_dir
                            in files_set)
                    num_added += 1
                else:
                    self.log.log_d(
                        'list_dir: skipping {}'.format(next_file_or_dir))
        if num_added == 0:
            return vfs_file_info_list()
        fs_tree = self._convert_node_to_fs_tree(result, 0, options)
        return fs_tree.children
Example #5
0
 def relative_filename(self):
   'Return the filename relative to the config root_dir or None if no config was found.'
   if self.config:
     return file_util.remove_head(self.filename, self.config.root_dir)
   else:
     return None
Example #6
0
def _test_execute(python_exe, test_map, filename, tests, options, index,
                  total_files, cwd, env):
    short_filename = file_util.remove_head(filename, cwd)

    cmd = ['"{}"'.format(python_exe)]

    if options.coverage_output:
        cmd.extend(['run', 'a'])
    else:
        cmd.append('-B')

    if options.profile_output:
        cmd.extend(['-m', 'cProfile', '-o', options.profile_output])

    cmd.append(string_util.quote_if_needed(filename))

    total_unit_tests = len(test_map[filename])

    if tests:
        cmd.extend(['%s.%s' % (test.fixture, test.function) for test in tests])
        wanted_unit_tests = len(
            [test for test in tests if test.filename == filename])
    else:
        wanted_unit_tests = total_unit_tests

    if wanted_unit_tests == total_unit_tests:
        function_count_blurb = '(%d %s)' % (
            total_unit_tests, _make_test_string(total_unit_tests))
    else:
        function_count_blurb = '(%d of %d)' % (wanted_unit_tests,
                                               total_unit_tests)

    output = ''
    try:
        #    if options.stop:
        #      cmd.append('--stop')

        if total_files > 1:
            filename_count_blurb = ' ' + _make_count_blurb(index, total_files)
        else:
            filename_count_blurb = ''

        if options.dry_run:
            label = 'dry-run'
        else:
            label = 'testing'
        longest_python_exe = max(
            [len(path.basename(p)) for p in options.interpreters])
        if len(options.interpreters) > 1:
            python_exe_blurb = path.basename(python_exe).rjust(
                longest_python_exe)
            python_exe_blurb_sep = ' '
        else:
            python_exe_blurb = ''
            python_exe_blurb_sep = ''
        blurb = '%7s:%s%s%s %s - %s ' % (
            label, filename_count_blurb, python_exe_blurb_sep,
            python_exe_blurb, short_filename, function_count_blurb)
        printer.writeln_name(blurb)

        if options.verbose and tests:
            for i, test in enumerate(tests):
                blurb = '%7s:   %s.%s' % ('tests', test.fixture, test.function)
                printer.writeln_name(blurb)

        if options.dry_run:
            return test_result(True, 0, 0.0, None)

        env = copy.deepcopy(env)
        env['BES_TEST_DATA_DIR'] = _test_data_dir(filename)
        if options.verbose:
            env['BES_VERBOSE'] = '1'
        if options.temp_dir:
            env['BES_TEMP_DIR'] = options.temp_dir
        env['HOME'] = options.home_dir
        time_start = time.time()
        _LOG.log_d('cmd=={}'.format(cmd))
        process = subprocess.Popen(' '.join(cmd),
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT,
                                   shell=True,
                                   env=env)
        communicate_args = {}
        if sys.version_info.major >= 3:
            communicate_args['timeout'] = 60.0 * 5
        output = process.communicate(**communicate_args)
        exit_code = process.wait()
        elapsed_time = time.time() - time_start
        decoded_output = output[0].decode('utf-8')
        fixed_output = _fix_output(decoded_output)
        success = exit_code == 0
        writeln_output = not success or options.verbose
        if success:
            label = 'passed'
        else:
            label = 'FAILED'
        if writeln_output:
            printer.writeln_name('%7s: %s' % (label, short_filename))
            try:
                printer.writeln(fixed_output)
            except UnicodeEncodeError as ex:
                fixed_output = decoded_output.encode('ascii', 'replace')
                printer.writeln(fixed_output)
        return test_result(success, wanted_unit_tests, elapsed_time,
                           fixed_output)
    except Exception as ex:
        ex_output = traceback.format_exc()
        printer.writeln_name('Caught exception on {}\n{}'.format(
            filename, ex_output))
        for s in ex_output.split('\n'):
            printer.writeln_name(s)
        return test_result(False, wanted_unit_tests, 0.0, ex_output)
Example #7
0
def main():
    DEBUG = os.environ.get('DEBUG', False)

    import bes
    vcli = version_cli(bes)
    parser = argparse.ArgumentParser()
    parser.add_argument('files',
                        action='store',
                        nargs='*',
                        help='Files or directories to rename')
    vcli.version_add_arguments(parser)
    parser.add_argument('--dry-run',
                        '-n',
                        action='store_true',
                        default=False,
                        help='Only print what files will get tests [ False ]')
    parser.add_argument(
        '--timing',
        '-t',
        action='store_true',
        default=False,
        help='Show the amount of time it takes to run tests [ False ]')
    parser.add_argument('--verbose',
                        '-v',
                        action='store_true',
                        default=False,
                        help='Verbose debug output [ False ]')
    parser.add_argument('--stop',
                        '-s',
                        action='store_true',
                        default=False,
                        help='Stop right after the first failure. [ False ]')
    parser.add_argument(
        '--randomize',
        action='store_true',
        default=False,
        help='Randomize the order in which unit tests run. [ False ]')
    parser.add_argument(
        '--python',
        action='append',
        default=[],
        help=
        'Python executable) to use.  Multiple flags can be used for running with mutiple times with different python versions [ python ]'
    )
    parser.add_argument('--page',
                        '-p',
                        action='store_true',
                        default=False,
                        help='Page output with $PAGER [ False ]')
    parser.add_argument(
        '--profile',
        action='store',
        default=None,
        help=
        'Profile the code with cProfile and store the output in the given argument [ None ]'
    )
    parser.add_argument(
        '--coverage',
        action='store',
        default=None,
        help=
        'Run coverage on the code and store the output in the given argument [ None ]'
    )
    parser.add_argument('--pager',
                        action='store',
                        default=os.environ.get('PAGER', 'more'),
                        help='Pager to use when paging [ %s ]' %
                        (os.environ.get('PAGER', 'more')))
    parser.add_argument('--iterations',
                        '-i',
                        action='store',
                        default=1,
                        type=int,
                        help='Python executable to use [ python ]')
    parser.add_argument(
        '--git',
        '-g',
        action='store_true',
        default=False,
        help='Use git status to figure out what has changed to test [ False ]')
    parser.add_argument(
        '--commit',
        '-c',
        action='store',
        type=str,
        default=None,
        help='Test only the files affected by the given git commit [ None ]')
    parser.add_argument('--pre-commit',
                        action='store_true',
                        default=False,
                        help='Run pre commit checks [ False ]')
    parser.add_argument('--print-tests',
                        action='store_true',
                        default=False,
                        help='Print the list of unit tests [ False ]')
    parser.add_argument('--print-python',
                        action='store_true',
                        default=False,
                        help='Print the detected python executable [ False ]')
    parser.add_argument('--print-files',
                        action='store_true',
                        default=False,
                        help='Print the list of unit files [ False ]')
    parser.add_argument(
        '--egg',
        action='store_true',
        default=False,
        help=
        'Make an egg of the package and run the tests against that instead the live files. [ False ]'
    )
    parser.add_argument(
        '--save-egg',
        action='store_true',
        default=False,
        help='Save the egg in the current directory. [ False ]')
    parser.add_argument('--ignore',
                        action='append',
                        default=[],
                        help='Patterns of filenames to ignore []')
    parser.add_argument(
        '--root-dir',
        action='store',
        default=None,
        help=
        'The root directory for all your projets.  By default its computed from your git struture.  [ None ]'
    )
    parser.add_argument('--dont-hack-env',
                        action='store_true',
                        default=False,
                        help='Dont hack PATH and PYTHONPATH. [ False ]')
    parser.add_argument(
        '--compile-only',
        action='store_true',
        default=False,
        help='Just compile the files to verify syntax [ False ]')
    parser.add_argument(
        '--print-deps',
        action='store_true',
        default=False,
        help='Print python dependencies for test files [ False ]')
    parser.add_argument('--print-configs',
                        action='store_true',
                        default=False,
                        help='Print testing configs found [ False ]')
    parser.add_argument('--print-root-dir',
                        action='store_true',
                        default=False,
                        help='Print the root dir [ False ]')
    parser.add_argument('--print-path',
                        action='store_true',
                        default=False,
                        help='Print sys.path [ False ]')
    parser.add_argument(
        '--file-ignore-file',
        action='append',
        default=[],
        help=
        'List of file ignore files. [ .bes_test_ignore .bes_test_internal_ignore ]'
    )
    parser.add_argument('--env',
                        action='append',
                        default=[],
                        help='Environment variables to set [ None ]')
    parser.add_argument('--no-env-deps',
                        action='store_true',
                        default=False,
                        help='Dont use env deps. [ False ]')
    parser.add_argument(
        '--temp-dir',
        action='store',
        default=None,
        help=
        'The directory to use for tmp files overriding the system default.  [ None ]'
    )
    parser.add_argument(
        '--keep-side-effects',
        action='store_true',
        default=DEBUG,
        help='Dont delete side effects - for debugging. [ False ]')
    parser.add_argument(
        '--ignore-side-effects',
        action='store_true',
        default=DEBUG,
        help='Dont delete side effects - for debugging. [ False ]')

    found_git_exe = git_exe.find_git_exe()
    if not found_git_exe:
        printer.writeln_name(
            'ERROR: No git found.  Git is needed to run bes_test.')
        return 1

    for g in parser._action_groups:
        g._group_actions.sort(key=lambda x: x.dest)

    args = parser.parse_args()

    args.python = _resolve_python_exe_list(args.python)

    if not args.python:
        python_exe = python.find_python_exe()
        if python_exe:
            args.python = [python_exe]

    if not args.python:
        printer.writeln_name(
            'ERROR: No python found.  Python is needed to run bes_test.')
        return 1

    _LOG.log_d('using python={}'.format(args.python))

    if args.git and args.commit:
        printer.writeln_name(
            'ERROR: Only one of --git or --commit can be given.')
        return 1

    if args.temp_dir:
        file_util.mkdir(args.temp_dir)
        tempfile.tempdir = args.temp_dir

    if DEBUG:
        args.verbose = True

    cwd = os.getcwd()

    if args.version:
        vcli.version_print_version()
        return 0

    args.env = _parse_args_env(args.env)

    if not args.files:
        args.files = [cwd]

    if not args.file_ignore_file:
        args.file_ignore_file = [
            '.bes_test_ignore', '.bes_test_internal_ignore'
        ]

    if args.commit:
        if args.commit in ['HEAD', 'last']:
            args.commit = git.last_commit_hash('.')

    ar = argument_resolver(cwd,
                           args.files,
                           root_dir=args.root_dir,
                           file_ignore_filename=args.file_ignore_file,
                           check_git=args.git,
                           git_commit=args.commit,
                           use_env_deps=not args.no_env_deps)
    ar.num_iterations = args.iterations
    ar.randomize = args.randomize

    ar.ignore_with_patterns(args.ignore)

    if args.compile_only:
        total_files = len(ar.all_files)
        for i, f in enumerate(ar.all_files):
            tmp = temp_file.make_temp_file()
            filename_count_blurb = ' ' + _make_count_blurb(i + 1, total_files)
            short_filename = file_util.remove_head(f, cwd)
            blurb = '%7s:%s %s ' % ('compile', filename_count_blurb,
                                    short_filename)
            printer.writeln_name(blurb)
            py_compile.compile(f, cfile=tmp, doraise=True)
        return 0

    if not ar.test_descriptions:
        return 1

    if args.print_python:
        for python_exe in args.python:
            print(python_exe)
        return 0

    if args.print_path:
        for p in sys.path:
            print(p)
        return 0

    if args.print_configs:
        ar.print_configs()
        return 0

    if args.print_root_dir:
        print(ar.root_dir)
        return 0

    if args.print_files:
        ar.print_files()
        return 0

    if args.print_tests:
        ar.print_tests()
        return 0

    if args.print_deps or args.pre_commit and not ar.supports_test_dependency_files(
    ):
        printer.writeln_name(
            'ERROR: Cannot figure out dependencies.  snakefood missing.')
        return 1

    if args.print_deps:
        dep_files = ar.test_dependency_files()
        for filename in sorted(dep_files.keys()):
            print(filename)
            for dep_file in dep_files[filename]:
                print('  %s' % (dep_file.filename))
        return 0

    # Read ~/.bes_test/bes_test.config (or use a default config)
    bes_test_config = _read_config_file()
    keep_patterns = bes_test_config.get_value_string_list(
        'environment', 'keep_patterns')

    # Start with a clean environment so unit testing can be deterministic and not subject
    # to whatever the user happened to have exported.  PYTHONPATH and PATH for dependencies
    # are set below by iterating the configs
    keep_keys = bes_test_config.get_value_string_list('environment',
                                                      'keep_keys')
    if args.dont_hack_env:
        keep_keys.extend(['PATH', 'PYTHONPATH'])

    keep_keys.extend(['TMPDIR', 'TEMP', 'TMP'])
    env = os_env.make_clean_env(
        keep_keys=keep_keys,
        keep_func=lambda key: _env_var_should_keep(key, keep_patterns))
    env_var(env, 'PATH').prepend(path.dirname(found_git_exe))
    for python_exe in args.python:
        env_var(env, 'PATH').prepend(path.dirname(python_exe))
    env['PYTHONDONTWRITEBYTECODE'] = 'x'

    variables = {
        'rebuild_dir': path.expanduser('~/.rebuild'),
        'system': host.SYSTEM,
    }

    if not args.dont_hack_env:
        for var in ar.env_dependencies_variables():
            ov = os_env_var(var)
            if ov.is_set:
                value = ov.value
            else:
                value = ''
            variables[var] = value
        ar.update_environment(env, variables)

    # Update env with whatever was given in --env
    env.update(args.env)

    # Use a custom TMP dir so that we can catch temporary side effects and flag them
    tmp_tmp = temp_file.make_temp_dir(prefix='bes_test_',
                                      suffix='.tmp.tmp.dir',
                                      delete=False)
    env.update({
        'TMPDIR': tmp_tmp,
        'TEMP': tmp_tmp,
        'TMP': tmp_tmp,
    })
    side_effects = {}

    num_passed = 0
    num_failed = 0
    num_executed = 0
    num_tests = len(ar.test_descriptions)
    failed_tests = []

    # Remove current dir from sys.path to avoid side effects
    if cwd in sys.path:
        sys.path.remove(cwd)

    if args.egg:
        pythonpath = env_var(env, 'PYTHONPATH')
        pythonpath.remove(cwd)
        for config in ar.env_dependencies_configs:
            setup_dot_py = path.join(config.root_dir, 'setup.py')
            if not path.isfile(setup_dot_py):
                raise RuntimeError('No setup.py found in %s to make the egg.' %
                                   (cwd))
            egg_zip = egg.make(config.root_dir,
                               'master',
                               setup_dot_py,
                               untracked=False)
            pythonpath.prepend(egg_zip)
            printer.writeln_name('using tmp egg: %s' % (egg_zip))
            if args.save_egg:
                file_util.copy(egg_zip, path.join(cwd, path.basename(egg_zip)))

    if args.pre_commit:
        missing_from_git = []
        for filename, dep_files in ar.test_dependency_files().items():
            for dep_file in dep_files:
                if dep_file.config and not dep_file.git_tracked:
                    missing_from_git.append(dep_file.filename)
        if missing_from_git:
            for f in missing_from_git:
                printer.writeln_name('PRE_COMMIT: missing from git: %s' %
                                     (path.relpath(f)))
            return 1
        return 0

    ar.cleanup_python_compiled_files()

    # Do all our work with a temporary working directory to be able to check for side effects
    tmp_cwd = temp_file.make_temp_dir(prefix='bes_test_',
                                      suffix='.tmp.cwd.dir',
                                      delete=False)
    tmp_home = temp_file.make_temp_dir(prefix='bes_test_',
                                       suffix='.tmp.home.dir',
                                       delete=False)
    os.environ['HOME'] = tmp_home
    os.chdir(tmp_cwd)

    # Use what the OS thinks the path is (to deal with symlinks and virtual tmpfs things)
    tmp_cwd = os.getcwd()

    if not args.dry_run and args.page:
        printer.OUTPUT = tempfile.NamedTemporaryFile(prefix='bes_test',
                                                     delete=True,
                                                     mode='w')

    total_tests = _count_tests(ar.inspect_map, ar.test_descriptions)
    total_files = len(ar.test_descriptions)

    total_num_tests = 0

    if args.profile:
        args.profile = path.abspath(args.profile)
        if not _check_program('cprofilev'):
            return 1

    if args.coverage:
        args.coverage = path.abspath(args.coverage)
        coverage_exe = _check_program('coverage')
        if not coverage_exe:
            return 1
        args.python = [coverage_exe]

    if args.profile and args.coverage:
        printer.writeln_name(
            'ERROR: --profile and --coverage are mutually exclusive.')
        return 1

    options = test_options(args.dry_run, args.verbose, args.stop, args.timing,
                           args.profile, args.coverage, args.python,
                           args.temp_dir, tmp_home)

    timings = {}

    total_time_start = time.time()

    stopped = False
    for i, test_desc in enumerate(ar.test_descriptions):
        file_info = test_desc.file_info
        filename = file_info.filename
        if not filename in timings:
            timings[filename] = []
        for python_exe in args.python:
            result = _test_execute(python_exe, ar.inspect_map, filename,
                                   test_desc.tests, options, i + 1,
                                   total_files, cwd, env)
            _collect_side_effects(side_effects, filename, tmp_home, 'home',
                                  args.keep_side_effects)
            _collect_side_effects(side_effects, filename, tmp_tmp, 'tmp',
                                  args.keep_side_effects)
            _collect_side_effects(side_effects, filename, os.getcwd(), 'cwd',
                                  args.keep_side_effects)
            timings[filename].append(result.elapsed_time)
            total_num_tests += result.num_tests_run
            num_executed += 1
            if result.success:
                num_passed += 1
            else:
                num_failed += 1
                failed_tests.append((python_exe, filename, result))
            if args.stop and not result.success:
                stopped = True
        if stopped:
            break
    total_elapsed_time = 1000 * (time.time() - total_time_start)

    if args.dry_run:
        return 0
    num_skipped = num_tests - num_executed
    summary_parts = []

    if total_num_tests == total_tests:
        function_summary = '(%d %s)' % (total_tests,
                                        _make_test_string(total_tests))
    else:
        function_summary = '(%d of %d %s)' % (total_num_tests, total_tests,
                                              _make_test_string(total_tests))

    if num_failed > 0:
        summary_parts.append('%d of %d fixtures FAILED' %
                             (num_failed, num_tests))
    summary_parts.append('%d of %d passed %s' %
                         (num_passed, num_tests, function_summary))
    if num_skipped > 0:
        summary_parts.append('%d of %d skipped' % (num_skipped, num_tests))

    summary = '; '.join(summary_parts)
    printer.writeln_name('%s' % (summary))
    if failed_tests:
        longest_python_exe = max(
            [len(path.basename(p)) for p in options.interpreters])
        for python_exe, filename, result in failed_tests:
            if len(options.interpreters) > 1:
                python_exe_blurb = path.basename(python_exe).rjust(
                    longest_python_exe)
            else:
                python_exe_blurb = ''
            error_status = unit_test_output.error_status(result.output)
            for error in error_status.errors:
                printer.writeln_name('%5s: %s %s :%s.%s' %
                                     (error.error_type, python_exe_blurb,
                                      file_util.remove_head(filename, cwd),
                                      error.fixture, error.function))

    if num_failed > 0:
        rv = 1
    else:
        rv = 0

    if args.timing:
        filenames = sorted(timings.keys())
        num_filenames = len(filenames)
        for i, filename in zip(range(0, num_filenames), filenames):
            short_filename = file_util.remove_head(filename, cwd)
            all_timings = timings[filename]
            num_timings = len(all_timings)
            avg_ms = _timing_average(all_timings) * 1000.0
            if num_timings > 1:
                run_blurb = '(average of %d runs)' % (num_timings)
            else:
                run_blurb = ''
            if num_filenames > 1:
                count_blurb = '[%s of %s] ' % (i + 1, num_filenames)
            else:
                count_blurb = ''

            printer.writeln_name(
                'timing: %s%s - %2.2f ms %s' %
                (count_blurb, short_filename, avg_ms, run_blurb))
        if total_elapsed_time >= 1000.0:
            printer.writeln_name('total time: %2.2f s' %
                                 (total_elapsed_time / 1000.0))
        else:
            printer.writeln_name('total time: %2.2f ms' % (total_elapsed_time))

    if args.page:
        subprocess.call([args.pager, printer.OUTPUT.name])

    current_cwd = os.getcwd()
    if current_cwd != tmp_cwd:
        rv = 1
        printer.writeln_name(
            'SIDE EFFECT: working directory was changed from %s to %s' %
            (tmp_cwd, current_cwd))

    if not args.ignore_side_effects:
        for test, items in sorted(side_effects.items()):
            for item in items:
                rv = 1
                filename = item.filename
                print('SIDE EFFECT [{}] {} {}'.format(
                    item.label, test.replace(cwd + os.sep, ''), filename))

    os.chdir('/tmp')

    if not args.keep_side_effects:
        file_util.remove(tmp_cwd)
        file_util.remove(tmp_home)
        file_util.remove(tmp_tmp)

    return rv