Exemple #1
0
def run_command(config='.stestr.conf',
                repo_type='file',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                partial=False,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                blacklist_file=None,
                whitelist_file=None,
                black_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool partial: DEPRECATED: Only some tests will be run. Implied by
        `--failing`. This flag is deprecated because and doesn't do anything
        it will be removed in a future release.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str blacklist_file: Path to a blacklist file, this file contains a
        separate regex exclude on each newline.
    :param str whitelist_file: Path to a whitelist file, this file contains a
        separate regex on each newline.
    :param str black_regex: Test rejection regex. If a test cases name matches
        on re.search() operation, it will be removed from the final test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    if partial:
        warnings.warn('The partial flag is deprecated and has no effect '
                      'anymore')
    try:
        repo = util.get_repo_open(repo_type, repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            msg = ("No config file found and --test-path not specified. "
                   "Either create or specify a .stestr.conf or use "
                   "--test-path ")
            stdout.write(msg)
            exit(1)
        repo = util.get_repo_initialise(repo_type, repo_url)
    combine_id = None
    if combine:
        latest_id = repo.latest_id()
        combine_id = six.text_type(latest_id)
    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        run_cmd = 'python -m subunit.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_type=repo_type,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   blacklist_file=blacklist_file,
                                   whitelist_file=whitelist_file,
                                   black_regex=black_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_type=repo_type,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           blacklist_file=blacklist_file,
                                           whitelist_file=whitelist_file,
                                           black_regex=black_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_type=repo_type,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_type=repo_type,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_type=repo_type,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       blacklist_file=blacklist_file,
                                       whitelist_file=whitelist_file,
                                       black_regex=black_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_type=repo_type,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)
                    stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
                    for line in res:
                        line = line.encode('utf8')
                        stream.write("%s\n" % line)
                    stream.write('\n\n')
                    ADDPROP_FAIL.append(test)
                    break
        else:
            FAILS.append(test)
    elif status == 'success' or status == 'xfail':
        SUCCESS.append(test)
    elif status == 'skip':
        SKIPS.append(test)


stream = subunit.ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout')
outcome = testtools.StreamToDict(functools.partial(show_outcome, sys.stdout))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([outcome, summary])
result.startTestRun()
try:
    stream.run(result)
finally:
    result.stopTestRun()

print("\n\n------------------------------------------------------------------")
print("%s Tests Failed" % len(FAILS))
print("%s Tests Failed with AdditionalProperties" % len(ADDPROP_FAIL))
print("%s Tests Skipped" % len(SKIPS))
print("%s Tests Passed" % len(SUCCESS))
print("To see the full details run this subunit stream through subunit-trace")
Exemple #3
0
            case = subunit.ByteStreamToStreamResult(stream,
                                                    non_subunit_name='stdout')
            decorate = functools.partial(mktagger, pos)
            case = testtools.DecorateTestCaseResult(case, decorate)
            yield (case, str(pos))

    if not run_id:
        inserter = repo.get_inserter()
    else:
        inserter = repo.get_inserter(run_id=run_id)

    retval = 0
    if serial:
        for stream in streams:
            # Calls StreamResult API.
            case = subunit.ByteStreamToStreamResult(stream,
                                                    non_subunit_name='stdout')
            result = _load_case(inserter, repo, case, subunit_out, pretty_out,
                                color, stdout, abbreviate,
                                suppress_attachments, all_attachments)
            if result or retval:
                retval = 1
            else:
                retval = 0
    else:
        case = testtools.ConcurrentStreamTestSuite(make_tests)
        retval = _load_case(inserter, repo, case, subunit_out, pretty_out,
                            color, stdout, abbreviate, suppress_attachments,
                            all_attachments)

    return retval
Exemple #4
0
def run_command(config='.stestr.conf',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                exclude_list=None,
                include_list=None,
                exclude_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False,
                all_attachments=False,
                show_binary_attachments=True,
                pdb=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str exclude_list: Path to an exclusion list file, this file
        contains a separate regex exclude on each newline.
    :param str include_list: Path to a inclusion list file, this file
        contains a separate regex on each newline.
    :param str exclude_regex: Test rejection regex. If a test cases name
        matches on re.search() operation, it will be removed from the final
        test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.
    :param str pdb: Takes in a single test_id to bypasses test
        discover and just execute the test specified without launching any
        additional processes. A file name may be used in place of a test name.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    # If a repo is not found, and there a stestr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            # If there is no config and no test-path
            if os.path.isfile('tox.ini'):
                tox_conf = configparser.SafeConfigParser()
                tox_conf.read('tox.ini')
                if not tox_conf.has_section('stestr'):
                    msg = ("No file found, --test-path not specified, and "
                           "stestr section not found in tox.ini. Either "
                           "create or specify a .stestr.conf, use "
                           "--test-path, or add an stestr section to the "
                           "tox.ini")
                    stdout.write(msg)
                    exit(1)
            else:
                msg = ("No config file found and --test-path not specified. "
                       "Either create or specify a .stestr.conf or use "
                       "--test-path ")
                stdout.write(msg)
                exit(1)
        try:
            repo = util.get_repo_initialise(repo_url=repo_url)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
            repo_path = repo_url or './stestr'
            stdout.write('The specified repository directory %s already '
                         'exists. Please check if the repository already '
                         'exists or select a different path\n' % repo_path)
            return 1

    combine_id = None
    concurrency = _to_int(concurrency)

    if concurrency and concurrency < 0:
        msg = ("The provided concurrency value: %s is not valid. An integer "
               ">= 0 must be used.\n" % concurrency)
        stdout.write(msg)
        return 2
    if combine:
        latest_id = repo.latest_id()
        combine_id = str(latest_id)
    if no_discover and pdb:
        msg = ("--no-discover and --pdb are mutually exclusive options, "
               "only specify one at a time")
        stdout.write(msg)
        return 2
    if pdb and until_failure:
        msg = ("pdb mode does not function with the --until-failure flag, "
               "only specify one at a time")
        stdout.write(msg)
        return 2

    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        stestr_python = sys.executable
        if os.environ.get('PYTHON'):
            python_bin = os.environ.get('PYTHON')
        elif stestr_python:
            python_bin = stestr_python
        else:
            raise RuntimeError("The Python interpreter was not found and "
                               "PYTHON is not set")
        run_cmd = python_bin + ' -m stestr.subunit_runner.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments,
                             all_attachments=all_attachments,
                             show_binary_attachments=show_binary_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if pdb:
        ids = pdb
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        runner = subunit_run.SubunitTestRunner
        stream = io.BytesIO()
        program.TestProgram(module=None,
                            argv=['stestr', ids],
                            testRunner=functools.partial(runner,
                                                         stdout=stream))
        stream.seek(0)
        run_proc = [('subunit', stream)]
        return load.load(in_streams=run_proc,
                         subunit_out=subunit_out,
                         repo_url=repo_url,
                         run_id=combine_id,
                         pretty_out=pretty_out,
                         color=color,
                         stdout=stdout,
                         abbreviate=abbreviate,
                         suppress_attachments=suppress_attachments,
                         all_attachments=all_attachments,
                         show_binary_attachments=show_binary_attachments)

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    if config and os.path.isfile(config):
        conf = config_file.TestrConf(config)
    elif os.path.isfile('tox.ini'):
        conf = config_file.TestrConf('tox.ini', section='stestr')
    else:
        conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   exclude_list=exclude_list,
                                   include_list=include_list,
                                   exclude_regex=exclude_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           exclude_list=exclude_list,
                                           include_list=include_list,
                                           exclude_regex=exclude_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments,
                    all_attachments=all_attachments,
                    show_binary_attachments=show_binary_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments,
                              show_binary_attachments=show_binary_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       exclude_list=exclude_list,
                                       include_list=include_list,
                                       exclude_regex=exclude_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)
 def check_events(self, events):
     self.subunit.seek(0)
     eventstream = StreamResult()
     subunit.ByteStreamToStreamResult(self.subunit).run(eventstream)
     self.assertEqual(events, eventstream._events)
Exemple #6
0
def trace(stdin,
          stdout,
          print_failures=False,
          failonly=False,
          enable_diff=False,
          abbreviate=False,
          color=False,
          post_fails=False,
          no_summary=False,
          suppress_attachments=False,
          all_attachments=False,
          show_binary_attachments=False):
    stream = subunit.ByteStreamToStreamResult(stdin, non_subunit_name='stdout')
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome,
                          stdout,
                          print_failures=print_failures,
                          failonly=failonly,
                          enable_diff=enable_diff,
                          abbreviate=abbreviate,
                          enable_color=color,
                          suppress_attachments=suppress_attachments,
                          all_attachments=all_attachments,
                          show_binary_attachments=show_binary_attachments))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    start_times = []
    stop_times = []
    for worker in RESULTS:
        start_times += [
            x['timestamps'][0] for x in RESULTS[worker]
            if x['timestamps'][0] is not None
        ]
        stop_times += [
            x['timestamps'][1] for x in RESULTS[worker]
            if x['timestamps'][1] is not None
        ]
    if not start_times:
        print("The test run didn't actually run any tests", file=sys.stderr)
        return 1
    start_time = min(start_times)
    stop_time = max(stop_times)
    elapsed_time = stop_time - start_time

    if count_tests('status', '.*') == 0:
        print("The test run didn't actually run any tests", file=sys.stderr)
        return 1
    if post_fails:
        print_fails(stdout)
    if not no_summary:
        print_summary(stdout, elapsed_time)

    # NOTE(mtreinish): Ideally this should live in testtools streamSummary
    # this is just in place until the behavior lands there (if it ever does)
    if count_tests('status', '^success$') == 0:
        print("\nNo tests were successful during the run", file=sys.stderr)
        return 1
    in_progress = get_stuck_in_progress()
    if in_progress:
        print(
            "\nThe following tests exited without returning a status\n"
            "and likely segfaulted or crashed Python:",
            file=sys.stderr)
        for test in in_progress:
            print("\n\t* %s" % test, file=sys.stderr)
        return 1
    return 0 if results.wasSuccessful(summary) else 1