Example #1
0
 def test_output_table(self):
     table = [['Header 1', 'Header 2', 'Header 999'],
              [1, '0000000002', 'foo'], ['bar', 6, 'This is a content.']]
     expected = \
         "Header 1  Header 2    Header 999\n" \
         "--------  ----------  ------------------\n" \
         "1         0000000002  foo\n" \
         "bar       6           This is a content.\n"
     with io.StringIO() as f:
         output.output_table(table, f)
         actual = f.getvalue()
         self.assertEqual(expected, actual)
Example #2
0
def run(args):
    repo = util.get_repo_open(args[0].repo_type, args[0].repo_url)
    try:
        latest_id = repo.latest_id()
    except KeyError:
        return 3
    # what happens when there is no timing info?
    test_times = repo.get_test_times(repo.get_test_ids(latest_id))
    known_times = list(test_times['known'].items())
    known_times.sort(key=itemgetter(1), reverse=True)
    if len(known_times) > 0:
        # By default show 10 rows
        if not args[0].all:
            known_times = known_times[:10]
        known_times = format_times(known_times)
        header = ('Test id', 'Runtime (s)')
        rows = [header] + known_times
        output.output_table(rows)
    return 0
Example #3
0
def slowest(repo_type='file',
            repo_url=None,
            show_all=False,
            stdout=sys.stdout):
    """Print the slowest times from the last run in the repository

    This function will print to STDOUT the 10 slowests tests in the last run.
    Optionally, using the ``show_all`` argument, it will print all the tests,
    instead of just 10. sorted by time.

    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param bool show_all: Show timing for all tests.
    :param file stdout: The output file to write all output to. By default
        this is sys.stdout

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """

    repo = util.get_repo_open(repo_type, repo_url)
    try:
        latest_id = repo.latest_id()
    except KeyError:
        return 3
    # what happens when there is no timing info?
    test_times = repo.get_test_times(repo.get_test_ids(latest_id))
    known_times = list(test_times['known'].items())
    known_times.sort(key=itemgetter(1), reverse=True)
    if len(known_times) > 0:
        # By default show 10 rows
        if not show_all:
            known_times = known_times[:10]
        known_times = format_times(known_times)
        header = ('Test id', 'Runtime (s)')
        rows = [header] + known_times
        output.output_table(rows, output=stdout)
    return 0
Example #4
0
    def bisect_tests(self, spurious_failures):

        test_conflicts = {}
        if not spurious_failures:
            raise ValueError('No failures provided to bisect the cause of')
        for spurious_failure in spurious_failures:
            candidate_causes = self._prior_tests(self.latest_run,
                                                 spurious_failure)
            bottom = 0
            top = len(candidate_causes)
            width = top - bottom
            while width:
                check_width = int(math.ceil(width / 2.0))
                test_ids = candidate_causes[bottom:bottom +
                                            check_width] + [spurious_failure]
                cmd = self.conf.get_run_command(test_ids,
                                                group_regex=self.group_regex,
                                                repo_type=self.repo_type,
                                                repo_url=self.repo_url,
                                                serial=self.serial,
                                                concurrency=self.concurrency,
                                                test_path=self.test_path,
                                                top_dir=self.top_dir)
                self.run_func(cmd,
                              False,
                              True,
                              False,
                              False,
                              pretty_out=False,
                              repo_type=self.repo_type,
                              repo_url=self.repo_url)
                # check that the test we're probing still failed - still
                # awkward.
                found_fail = []

                def find_fail(test_dict):
                    if test_dict['id'] == spurious_failure:
                        found_fail.append(True)

                checker = testtools.StreamToDict(find_fail)
                checker.startTestRun()
                try:
                    self.repo.get_failing().get_test().run(checker)
                finally:
                    checker.stopTestRun()
                if found_fail:
                    # Our conflict is in bottom - clamp the range down.
                    top = bottom + check_width
                    if width == 1:
                        # found the cause
                        test_conflicts[spurious_failure] = candidate_causes[
                            bottom]
                        width = 0
                    else:
                        width = top - bottom
                else:
                    # Conflict in the range we did not run: discard bottom.
                    bottom = bottom + check_width
                    if width == 1:
                        # there will be no more to check, so we didn't
                        # reproduce the failure.
                        width = 0
                    else:
                        width = top - bottom
            if spurious_failure not in test_conflicts:
                # Could not determine cause
                test_conflicts[spurious_failure] = 'unknown - no conflicts'
        if test_conflicts:
            table = [('failing test', 'caused by test')]
            for failure in sorted(test_conflicts):
                causes = test_conflicts[failure]
                table.append((failure, causes))
            output.output_table(table)
            return 3
        return 0
Example #5
0
def run(arguments):
    args = arguments[0]
    filters = arguments[1] or None
    try:
        repo = util.get_repo_open(args.repo_type, args.repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(args.config):
            raise
        repo = util.get_repo_initialise(args.repo_type, args.repo_url)
    if args.no_discover:
        ids = args.no_discover
        if ids.find('/') != -1:
            root, _ = os.path.splitext(ids)
            ids = root.replace('/', '.')
        run_cmd = 'python -m subunit.run ' + ids

        def run_tests():
            run_proc = [('subunit', output.ReturnCodeToSubunit(
                subprocess.Popen(run_cmd, shell=True,
                                 stdout=subprocess.PIPE)))]
            return load.load((None, None), in_streams=run_proc,
                             partial=args.partial, subunit_out=args.subunit,
                             repo_type=args.repo_type,
                             repo_url=args.repo_url)

        if not args.until_failure:
            return run_tests()
        else:
            result = run_tests()
            while not result:
                result = run_tests()
            return result

    if args.failing or args.analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if args.load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(args.load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(args.config)
    if not args.analyze_isolation:
        cmd = conf.get_run_command(args, ids, filters)
        if args.isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command(args, [test_id], filters)
                run_result = _run_tests(cmd, args.failing,
                                        args.analyze_isolation,
                                        args.isolated,
                                        args.until_failure,
                                        subunit_out=args.subunit)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd, args.failing, args.analyze_isolation,
                              args.isolated, args.until_failure,
                              subunit_out=args.subunit)
    else:
        # Where do we source data about the cause of conflicts.
        # XXX: Should instead capture the run id in with the failing test
        # data so that we can deal with failures split across many partial
        # runs.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command(args, [test_id])
            if not _run_tests(cmd):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failng.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        # spurious-failure -> cause.
        test_conflicts = {}
        for spurious_failure in spurious_failures:
            candidate_causes = _prior_tests(
                latest_run, spurious_failure)
            bottom = 0
            top = len(candidate_causes)
            width = top - bottom
            while width:
                check_width = int(ceil(width / 2.0))
                # TODO(mtreinish): Add regex
                cmd = conf.get_run_command(
                    args,
                    candidate_causes[bottom:bottom + check_width]
                    + [spurious_failure])
                _run_tests(cmd)
                # check that the test we're probing still failed - still
                # awkward.
                found_fail = []

                def find_fail(test_dict):
                    if test_dict['id'] == spurious_failure:
                        found_fail.append(True)

                checker = testtools.StreamToDict(find_fail)
                checker.startTestRun()
                try:
                    repo.get_failing().get_test().run(checker)
                finally:
                    checker.stopTestRun()
                if found_fail:
                    # Our conflict is in bottom - clamp the range down.
                    top = bottom + check_width
                    if width == 1:
                        # found the cause
                        test_conflicts[
                            spurious_failure] = candidate_causes[bottom]
                        width = 0
                    else:
                        width = top - bottom
                else:
                    # Conflict in the range we did not run: discard bottom.
                    bottom = bottom + check_width
                    if width == 1:
                        # there will be no more to check, so we didn't
                        # reproduce the failure.
                        width = 0
                    else:
                        width = top - bottom
            if spurious_failure not in test_conflicts:
                # Could not determine cause
                test_conflicts[spurious_failure] = 'unknown - no conflicts'
        if test_conflicts:
            table = [('failing test', 'caused by test')]
            for failure, causes in test_conflicts.items():
                table.append((failure, causes))
            output.output_table(table)
            return 3
        return 0