Beispiel #1
0
def _get_run_details(stream_file, stdout):
    stream = subunit.ByteStreamToStreamResult(stream_file,
                                              non_subunit_name='stdout')
    global start_times
    global stop_times
    start_times = []
    stop_times = []

    def collect_data(stream, test):
        global start_times
        global stop_times
        start_times.append(test['timestamps'][0])
        stop_times.append(test['timestamps'][1])

    outcomes = testtools.StreamToDict(functools.partial(collect_data, stdout))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    successful = results.wasSuccessful(summary)
    if start_times and stop_times:
        start_time = min(start_times)
        stop_time = max(stop_times)
        run_time = subunit_trace.get_duration([start_time, stop_time])
    else:
        run_time = '---'
        successful = '---'
        start_time = '---'
    return {'passed': successful, 'runtime': run_time, 'start': start_time}
Beispiel #2
0
def _run_tests(cmd,
               until_failure,
               subunit_out=False,
               combine_id=None,
               repo_type='file',
               repo_url=None,
               pretty_out=True,
               color=False,
               stdout=sys.stdout,
               abbreviate=False,
               suppress_attachments=False,
               all_attachments=False):
    """Run the tests cmd was parameterised with."""
    cmd.setUp()
    try:

        def run_tests():
            run_procs = [('subunit', output.ReturnCodeToSubunit(proc))
                         for proc in cmd.run_tests()]
            if not run_procs:
                stdout.write("The specified regex doesn't match with anything")
                return 1
            return load.load((None, None),
                             in_streams=run_procs,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments,
                             all_attachments=all_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit_out:
                    repo = util.get_repo_open(repo_type, repo_url)
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result
    finally:
        cmd.cleanUp()
Beispiel #3
0
def trace(stdin,
          stdout,
          print_failures=False,
          failonly=False,
          enable_diff=False,
          abbreviate=False,
          color=False,
          post_fails=False,
          no_summary=False,
          suppress_attachments=False,
          all_attachments=False,
          show_binary_attachments=False):
    stream = subunit.ByteStreamToStreamResult(stdin, non_subunit_name='stdout')
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome,
                          stdout,
                          print_failures=print_failures,
                          failonly=failonly,
                          enable_diff=enable_diff,
                          abbreviate=abbreviate,
                          enable_color=color,
                          suppress_attachments=suppress_attachments,
                          all_attachments=all_attachments,
                          show_binary_attachments=show_binary_attachments))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    start_times = []
    stop_times = []
    for worker in RESULTS:
        start_times += [x['timestamps'][0] for x in RESULTS[worker]]
        stop_times += [x['timestamps'][1] for x in RESULTS[worker]]
    start_time = min(start_times)
    stop_time = max(stop_times)
    elapsed_time = stop_time - start_time

    if count_tests('status', '.*') == 0:
        print("The test run didn't actually run any tests", file=sys.stderr)
        return 1
    if post_fails:
        print_fails(stdout)
    if not no_summary:
        print_summary(stdout, elapsed_time)

    # NOTE(mtreinish): Ideally this should live in testtools streamSummary
    # this is just in place until the behavior lands there (if it ever does)
    if count_tests('status', '^success$') == 0:
        print("\nNo tests were successful during the run", file=sys.stderr)
        return 1
    return 0 if results.wasSuccessful(summary) else 1
Beispiel #4
0
        output_result = results.CLITestResult(inserter.get_id, stdout,
                                              previous_run)
        summary_result = output_result.get_summary()
    result = testtools.CopyStreamResult([inserter, output_result])
    result.startTestRun()
    try:
        case.run(result)
    finally:
        result.stopTestRun()
    if pretty_out and not subunit_out:
        start_times = []
        stop_times = []
        for worker in subunit_trace.RESULTS:
            for test in subunit_trace.RESULTS[worker]:
                if not test['timestamps'][0] or not test['timestamps'][1]:
                    continue
                start_times.append(test['timestamps'][0])
                stop_times.append(test['timestamps'][1])
        if not start_times or not stop_times:
            sys.stderr.write("\nNo tests were successful during the run")
            return 1
        start_time = min(start_times)
        stop_time = max(stop_times)
        elapsed_time = stop_time - start_time
        subunit_trace.print_fails(stdout)
        subunit_trace.print_summary(stdout, elapsed_time)
    if not results.wasSuccessful(summary_result):
        return 1
    else:
        return 0
Beispiel #5
0
def run_command(config='.stestr.conf',
                repo_type='file',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                partial=False,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                blacklist_file=None,
                whitelist_file=None,
                black_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool partial: DEPRECATED: Only some tests will be run. Implied by
        `--failing`. This flag is deprecated because and doesn't do anything
        it will be removed in a future release.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str blacklist_file: Path to a blacklist file, this file contains a
        separate regex exclude on each newline.
    :param str whitelist_file: Path to a whitelist file, this file contains a
        separate regex on each newline.
    :param str black_regex: Test rejection regex. If a test cases name matches
        on re.search() operation, it will be removed from the final test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    if partial:
        warnings.warn('The partial flag is deprecated and has no effect '
                      'anymore')
    try:
        repo = util.get_repo_open(repo_type, repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            msg = ("No config file found and --test-path not specified. "
                   "Either create or specify a .stestr.conf or use "
                   "--test-path ")
            stdout.write(msg)
            exit(1)
        repo = util.get_repo_initialise(repo_type, repo_url)
    combine_id = None
    if combine:
        latest_id = repo.latest_id()
        combine_id = six.text_type(latest_id)
    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        run_cmd = 'python -m subunit.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_type=repo_type,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   blacklist_file=blacklist_file,
                                   whitelist_file=whitelist_file,
                                   black_regex=black_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_type=repo_type,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           blacklist_file=blacklist_file,
                                           whitelist_file=whitelist_file,
                                           black_regex=black_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_type=repo_type,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_type=repo_type,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_type=repo_type,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       blacklist_file=blacklist_file,
                                       whitelist_file=whitelist_file,
                                       black_regex=black_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_type=repo_type,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)
Beispiel #6
0
    case = latest_run.get_test()
    try:
        if repo_type == 'file':
            previous_run = repo.get_test_run(repo.latest_id() - 1)
        # TODO(mtreinish): add a repository api to get the previous_run to
        # unify this logic
        else:
            previous_run = None
    except KeyError:
        previous_run = None
    failed = False
    if not pretty_out:
        output_result = results.CLITestResult(latest_run.get_id, stdout,
                                              previous_run)
        summary = output_result.get_summary()
        output_result.startTestRun()
        try:
            case.run(output_result)
        finally:
            output_result.stopTestRun()
        failed = not results.wasSuccessful(summary)
    else:
        stream = latest_run.get_subunit_stream()
        failed = subunit_trace.trace(stream, stdout, post_fails=True,
                                     color=color,
                                     suppress_attachments=suppress_attachments)
    if failed:
        return 1
    else:
        return 0
Beispiel #7
0
def run_command(config='.stestr.conf',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                exclude_list=None,
                include_list=None,
                exclude_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False,
                all_attachments=False,
                show_binary_attachments=True,
                pdb=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str exclude_list: Path to an exclusion list file, this file
        contains a separate regex exclude on each newline.
    :param str include_list: Path to a inclusion list file, this file
        contains a separate regex on each newline.
    :param str exclude_regex: Test rejection regex. If a test cases name
        matches on re.search() operation, it will be removed from the final
        test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.
    :param str pdb: Takes in a single test_id to bypasses test
        discover and just execute the test specified without launching any
        additional processes. A file name may be used in place of a test name.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    # If a repo is not found, and there a stestr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            # If there is no config and no test-path
            if os.path.isfile('tox.ini'):
                tox_conf = configparser.SafeConfigParser()
                tox_conf.read('tox.ini')
                if not tox_conf.has_section('stestr'):
                    msg = ("No file found, --test-path not specified, and "
                           "stestr section not found in tox.ini. Either "
                           "create or specify a .stestr.conf, use "
                           "--test-path, or add an stestr section to the "
                           "tox.ini")
                    stdout.write(msg)
                    exit(1)
            else:
                msg = ("No config file found and --test-path not specified. "
                       "Either create or specify a .stestr.conf or use "
                       "--test-path ")
                stdout.write(msg)
                exit(1)
        try:
            repo = util.get_repo_initialise(repo_url=repo_url)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
            repo_path = repo_url or './stestr'
            stdout.write('The specified repository directory %s already '
                         'exists. Please check if the repository already '
                         'exists or select a different path\n' % repo_path)
            return 1

    combine_id = None
    concurrency = _to_int(concurrency)

    if concurrency and concurrency < 0:
        msg = ("The provided concurrency value: %s is not valid. An integer "
               ">= 0 must be used.\n" % concurrency)
        stdout.write(msg)
        return 2
    if combine:
        latest_id = repo.latest_id()
        combine_id = str(latest_id)
    if no_discover and pdb:
        msg = ("--no-discover and --pdb are mutually exclusive options, "
               "only specify one at a time")
        stdout.write(msg)
        return 2
    if pdb and until_failure:
        msg = ("pdb mode does not function with the --until-failure flag, "
               "only specify one at a time")
        stdout.write(msg)
        return 2

    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        stestr_python = sys.executable
        if os.environ.get('PYTHON'):
            python_bin = os.environ.get('PYTHON')
        elif stestr_python:
            python_bin = stestr_python
        else:
            raise RuntimeError("The Python interpreter was not found and "
                               "PYTHON is not set")
        run_cmd = python_bin + ' -m stestr.subunit_runner.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments,
                             all_attachments=all_attachments,
                             show_binary_attachments=show_binary_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if pdb:
        ids = pdb
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        runner = subunit_run.SubunitTestRunner
        stream = io.BytesIO()
        program.TestProgram(module=None,
                            argv=['stestr', ids],
                            testRunner=functools.partial(runner,
                                                         stdout=stream))
        stream.seek(0)
        run_proc = [('subunit', stream)]
        return load.load(in_streams=run_proc,
                         subunit_out=subunit_out,
                         repo_url=repo_url,
                         run_id=combine_id,
                         pretty_out=pretty_out,
                         color=color,
                         stdout=stdout,
                         abbreviate=abbreviate,
                         suppress_attachments=suppress_attachments,
                         all_attachments=all_attachments,
                         show_binary_attachments=show_binary_attachments)

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    if config and os.path.isfile(config):
        conf = config_file.TestrConf(config)
    elif os.path.isfile('tox.ini'):
        conf = config_file.TestrConf('tox.ini', section='stestr')
    else:
        conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   exclude_list=exclude_list,
                                   include_list=include_list,
                                   exclude_regex=exclude_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           exclude_list=exclude_list,
                                           include_list=include_list,
                                           exclude_regex=exclude_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments,
                    all_attachments=all_attachments,
                    show_binary_attachments=show_binary_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments,
                              show_binary_attachments=show_binary_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       exclude_list=exclude_list,
                                       include_list=include_list,
                                       exclude_regex=exclude_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)