def test_shows_last_run_first_run(self):
     ui, cmd = self.get_test_ui_and_cmd()
     cmd.repository_factory = memory.RepositoryFactory()
     repo = cmd.repository_factory.initialise(ui.here)
     inserter = repo.get_inserter()
     inserter.startTestRun()
     inserter.status(test_id='failing', test_status='fail')
     inserter.status(test_id='ok', test_status='success')
     inserter.stopTestRun()
     id = inserter.get_id()
     self.assertEqual(1, cmd.execute())
     # We should have seen test outputs (of the failure) and summary data.
     self.assertEqual([('results', Wildcard),
                       ('summary', False, 2, None, Wildcard, Wildcard, [
                           ('id', id, None), ('failures', 1, None)
                       ])], ui.outputs)
     suite = ui.outputs[0][1]
     result = testtools.StreamSummary()
     result.startTestRun()
     try:
         suite.run(result)
     finally:
         result.stopTestRun()
     self.assertEqual(1, len(result.errors))
     self.assertEqual(2, result.testsRun)
 def test_load_new_shows_test_failure_details(self):
     if v2_avail:
         buffer = BytesIO()
         stream = subunit.StreamResultToBytes(buffer)
         stream.status(test_id='foo', test_status='inprogress')
         stream.status(test_id='foo',
                       test_status='fail',
                       file_name="traceback",
                       mime_type='text/plain;charset=utf8',
                       file_bytes=b'arg\n')
         subunit_bytes = buffer.getvalue()
     else:
         subunit_bytes = b'test: foo\nfailure: foo [\narg\n]\n'
     ui = UI([('subunit', subunit_bytes)])
     cmd = load.load(ui)
     ui.set_command(cmd)
     cmd.repository_factory = memory.RepositoryFactory()
     cmd.repository_factory.initialise(ui.here)
     self.assertEqual(1, cmd.execute())
     suite = ui.outputs[0][1]
     self.assertEqual([('results', Wildcard),
                       ('summary', False, 1, None, Wildcard, None, [
                           ('id', 0, None), ('failures', 1, None)
                       ])], ui.outputs)
     result = testtools.StreamSummary()
     result.startTestRun()
     try:
         suite.run(result)
     finally:
         result.stopTestRun()
     self.assertEqual(1, result.testsRun)
     self.assertEqual(1, len(result.errors))
Exemple #3
0
def _run_tests(cmd,
               failing,
               analyze_isolation,
               isolated,
               until_failure,
               subunit_out=False,
               combine_id=None,
               repo_type='file',
               repo_url=None,
               pretty_out=True,
               color=False,
               stdout=sys.stdout,
               abbreviate=False,
               suppress_attachments=False):
    """Run the tests cmd was parameterised with."""
    cmd.setUp()
    try:

        def run_tests():
            run_procs = [('subunit', output.ReturnCodeToSubunit(proc))
                         for proc in cmd.run_tests()]
            if not run_procs:
                stdout.write("The specified regex doesn't match with anything")
                return 1
            return load.load((None, None),
                             in_streams=run_procs,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit_out:
                    repo = util.get_repo_open(repo_type, repo_url)
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not summary.wasSuccessful():
                        result = 1
                if result:
                    return result
    finally:
        cmd.cleanUp()
Exemple #4
0
 def _make_result(self, repo):
     testcommand = self.command_factory(self.ui, repo)
     if self.ui.options.list:
         list_result = testtools.StreamSummary()
         return list_result, list_result
     else:
         return self.ui.make_result(repo.latest_id, testcommand)
Exemple #5
0
 def __init__(self,
              stream_file,
              attachments=False,
              attr_regex=None,
              targets=None,
              use_wall_time=False,
              non_subunit_name=None):
     if targets is None:
         targets = []
     else:
         targets = targets[:]
     self.use_wall_time = use_wall_time
     self.stream_file = stream_file
     self.stream = subunit.ByteStreamToStreamResult(
         self.stream_file, non_subunit_name=non_subunit_name)
     starts = testtools.StreamResult()
     summary = testtools.StreamSummary()
     outcomes = testtools.StreamToDict(functools.partial(
         self.parse_outcome))
     targets.extend([starts, outcomes, summary])
     self.result = testtools.CopyStreamResult(targets)
     self.results = {}
     self.attachments = attachments
     if attr_regex:
         self.attr_regex = re.compile(attr_regex)
     # NOTE(mtreinish): Default to the previous implicit regex if None is
     # specified for backwards compat
     else:
         self.attr_regex = re.compile('\[(.*)\]')
Exemple #6
0
def _get_run_details(stream_file, stdout):
    stream = subunit.ByteStreamToStreamResult(stream_file,
                                              non_subunit_name='stdout')
    global start_times
    global stop_times
    start_times = []
    stop_times = []

    def collect_data(stream, test):
        global start_times
        global stop_times
        start_times.append(test['timestamps'][0])
        stop_times.append(test['timestamps'][1])

    outcomes = testtools.StreamToDict(functools.partial(collect_data, stdout))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    successful = results.wasSuccessful(summary)
    if start_times and stop_times:
        start_time = min(start_times)
        stop_time = max(stop_times)
        run_time = subunit_trace.get_duration([start_time, stop_time])
    else:
        run_time = '---'
        successful = '---'
        start_time = '---'
    return {'passed': successful, 'runtime': run_time, 'start': start_time}
def main():
    args = parse_args()
    stream = subunit.ByteStreamToStreamResult(sys.stdin,
                                              non_subunit_name='stdout')
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome,
                          sys.stdout,
                          print_failures=args.print_failures,
                          failonly=args.failonly))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(sys.stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    start_time = datetime.datetime.utcnow()
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    stop_time = datetime.datetime.utcnow()
    elapsed_time = stop_time - start_time

    if count_tests('status', '.*') == 0:
        print("The test run didn't actually run any tests")
        exit(1)
    if args.post_fails:
        print_fails(sys.stdout)
    print_summary(sys.stdout, elapsed_time)
    exit(0 if summary.wasSuccessful() else 1)
Exemple #8
0
def make_result(get_id, output=sys.stdout):
    serializer = subunit.StreamResultToBytes(output)
    # By pass user transforms - just forward it all,
    result = serializer
    # and interpret everything as success.
    summary = testtools.StreamSummary()
    summary.startTestRun()
    summary.stopTestRun()
    return result, summary
Exemple #9
0
def trace(stdin,
          stdout,
          print_failures=False,
          failonly=False,
          enable_diff=False,
          abbreviate=False,
          color=False,
          post_fails=False,
          no_summary=False,
          suppress_attachments=False,
          all_attachments=False,
          show_binary_attachments=False):
    stream = subunit.ByteStreamToStreamResult(stdin, non_subunit_name='stdout')
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome,
                          stdout,
                          print_failures=print_failures,
                          failonly=failonly,
                          enable_diff=enable_diff,
                          abbreviate=abbreviate,
                          enable_color=color,
                          suppress_attachments=suppress_attachments,
                          all_attachments=all_attachments,
                          show_binary_attachments=show_binary_attachments))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    start_times = []
    stop_times = []
    for worker in RESULTS:
        start_times += [x['timestamps'][0] for x in RESULTS[worker]]
        stop_times += [x['timestamps'][1] for x in RESULTS[worker]]
    start_time = min(start_times)
    stop_time = max(stop_times)
    elapsed_time = stop_time - start_time

    if count_tests('status', '.*') == 0:
        print("The test run didn't actually run any tests", file=sys.stderr)
        return 1
    if post_fails:
        print_fails(stdout)
    if not no_summary:
        print_summary(stdout, elapsed_time)

    # NOTE(mtreinish): Ideally this should live in testtools streamSummary
    # this is just in place until the behavior lands there (if it ever does)
    if count_tests('status', '^success$') == 0:
        print("\nNo tests were successful during the run", file=sys.stderr)
        return 1
    return 0 if results.wasSuccessful(summary) else 1
Exemple #10
0
 def get_failing(self, repo):
     """Analyze a failing stream from repo and return it."""
     run = repo.get_failing()
     analyzer = testtools.StreamSummary()
     analyzer.startTestRun()
     try:
         run.get_test().run(analyzer)
     finally:
         analyzer.stopTestRun()
     return analyzer
Exemple #11
0
 def get_last_run(self, repo):
     """Return the results from a stream."""
     run = repo.get_test_run(repo.latest_id())
     analyzer = testtools.StreamSummary()
     analyzer.startTestRun()
     try:
         run.get_test().run(analyzer)
     finally:
         analyzer.stopTestRun()
     return analyzer
Exemple #12
0
def _make_result(repo, list_tests=False):
    if list_tests:
        list_result = testtools.StreamSummary()
        return list_result, list_result
    else:

        def _get_id():
            return repo.get_latest_run().get_id()

        output_result = results.CLITestResult(_get_id, sys.stdout, None)
        summary_result = output_result.get_summary()
        return output_result, summary_result
Exemple #13
0
def main():
    stream = subunit.ByteStreamToStreamResult(sys.stdin,
                                              non_subunit_name='stdout')
    starts = Starts(sys.stdout)
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome, sys.stdout))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([starts, outcomes, summary])
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    print_summary(sys.stdout)
    return (0 if summary.wasSuccessful() else 1)
Exemple #14
0
    def _check_subunit(self, output_stream):
        stream = subunit_lib.ByteStreamToStreamResult(output_stream)
        starts = testtools.StreamResult()
        summary = testtools.StreamSummary()
        tests = []

        def _add_dict(test):
            tests.append(test)

        outcomes = testtools.StreamToDict(functools.partial(_add_dict))
        result = testtools.CopyStreamResult([starts, outcomes, summary])
        result.startTestRun()
        try:
            stream.run(result)
        finally:
            result.stopTestRun()
        self.assertThat(len(tests), testtools.matchers.GreaterThan(0))
Exemple #15
0
def trace(stdin,
          stdout,
          print_failures=False,
          failonly=False,
          enable_diff=False,
          abbreviate=False,
          color=False,
          post_fails=False,
          no_summary=False):
    stream = subunit.ByteStreamToStreamResult(stdin, non_subunit_name='stdout')
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome,
                          stdout,
                          print_failures=print_failures,
                          failonly=failonly,
                          enable_diff=enable_diff,
                          abbreviate=abbreviate,
                          enable_color=color))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result = testtools.StreamResultRouter(result)
    cat = subunit.test_results.CatFiles(stdout)
    result.add_rule(cat, 'test_id', test_id=None)
    start_time = datetime.datetime.utcnow()
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    stop_time = datetime.datetime.utcnow()
    elapsed_time = stop_time - start_time

    if count_tests('status', '.*') == 0:
        print("The test run didn't actually run any tests")
        return 1
    if post_fails:
        print_fails(stdout)
    if not no_summary:
        print_summary(stdout, elapsed_time)

    # NOTE(mtreinish): Ideally this should live in testtools streamSummary
    # this is just in place until the behavior lands there (if it ever does)
    if count_tests('status', '^success$') == 0:
        print("\nNo tests were successful during the run")
        return 1
    return 0 if summary.wasSuccessful() else 1
Exemple #16
0
 def test_get_test_from_test_run(self):
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('testrepository.tests.test_repository.Case.method', True).run(legacy_result)
     legacy_result.stopTestRun()
     inserted = result.get_id()
     run = repo.get_test_run(inserted)
     test = run.get_test()
     result = testtools.StreamSummary()
     result.startTestRun()
     try:
         test.run(result)
     finally:
         result.stopTestRun()
     self.assertEqual(1, result.testsRun)
Exemple #17
0
    def assertRunExit(self, cmd, expected, subunit=False, stdin=None):
        if stdin:
            p = subprocess.Popen("%s" % cmd,
                                 shell=True,
                                 stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            out, err = p.communicate(stdin)
        else:
            p = subprocess.Popen("%s" % cmd,
                                 shell=True,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            out, err = p.communicate()

        if not subunit:
            self.assertEqual(p.returncode, expected,
                             "Stdout: %s; Stderr: %s" % (out, err))
            return (out, err)
        else:
            self.assertEqual(
                p.returncode, expected,
                "Expected return code: %s doesn't match actual "
                "return code of: %s" % (expected, p.returncode))
            output_stream = io.BytesIO(out)
            stream = subunit_lib.ByteStreamToStreamResult(output_stream)
            starts = testtools.StreamResult()
            summary = testtools.StreamSummary()
            tests = []

            def _add_dict(test):
                tests.append(test)

            outcomes = testtools.StreamToDict(functools.partial(_add_dict))
            result = testtools.CopyStreamResult([starts, outcomes, summary])
            result.startTestRun()
            try:
                stream.run(result)
            finally:
                result.stopTestRun()
            self.assertThat(len(tests), testtools.matchers.GreaterThan(0))
            return (out, err)
Exemple #18
0
 def make_result(self, get_id, test_command, previous_run=None):
     if getattr(self.options, 'subunit', False):
         serializer = subunit.StreamResultToBytes(self._stdout)
         # By pass user transforms - just forward it all,
         result = serializer
         # and interpret everything as success.
         summary = testtools.StreamSummary()
         summary.startTestRun()
         summary.stopTestRun()
         return result, summary
     else:
         # Apply user defined transforms.
         filter_tags = test_command.get_filter_tags()
         output = CLITestResult(self,
                                get_id,
                                self._stdout,
                                previous_run,
                                filter_tags=filter_tags)
         summary = output._summary
     return output, summary
def main():
    args = parse_args()
    stream = subunit.ByteStreamToStreamResult(sys.stdin,
                                              non_subunit_name='stdout')
    outcomes = testtools.StreamToDict(
        functools.partial(show_outcome,
                          sys.stdout,
                          print_failures=args.print_failures))
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([outcomes, summary])
    result.startTestRun()
    try:
        stream.run(result)
    finally:
        result.stopTestRun()
    if count_tests('status', '.*') == 0:
        print("The test run didn't actually run any tests")
        return 1
    if args.post_fails:
        print_fails(sys.stdout)
    print_summary(sys.stdout)
    return (0 if summary.wasSuccessful() else 1)
Exemple #20
0
def main():
    parser = optparse.OptionParser(description=__doc__)
    parser.add_option(
        "--times",
        action="store_true",
        help="list the time each test took (requires a timestamped stream)",
        default=False)
    parser.add_option(
        "--exists",
        action="store_true",
        help="list tests that are reported as existing (as well as ran)",
        default=False)
    parser.add_option("--no-passthrough",
                      action="store_true",
                      help="Hide all non subunit input.",
                      default=False,
                      dest="no_passthrough")
    (options, args) = parser.parse_args()
    test = pysubunit.ByteStreamToStreamResult(filters.find_stream(
        sys.stdin, args),
                                              non_subunit_name="stdout")
    result = test_results.TestIdPrintingResult(sys.stdout, options.times,
                                               options.exists)
    if not options.no_passthrough:
        result = testtools.StreamResultRouter(result)
        cat = test_results.CatFiles(sys.stdout)
        result.add_rule(cat, 'test_id', test_id=None)
    summary = testtools.StreamSummary()
    result = testtools.CopyStreamResult([result, summary])
    result.startTestRun()
    test.run(result)
    result.stopTestRun()
    if summary.wasSuccessful():
        exit_code = 0
    else:
        exit_code = 1
    sys.exit(exit_code)
Exemple #21
0

def _load_case(inserter, repo, case, subunit_out, pretty_out, color, stdout,
               abbreviate, suppress_attachments, all_attachments):
    if subunit_out:
        output_result, summary_result = output.make_result(inserter.get_id,
                                                           output=stdout)
    elif pretty_out:
        outcomes = testtools.StreamToDict(
            functools.partial(subunit_trace.show_outcome,
                              stdout,
                              enable_color=color,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments))
        summary_result = testtools.StreamSummary()
        output_result = testtools.CopyStreamResult([outcomes, summary_result])
        output_result = testtools.StreamResultRouter(output_result)
        cat = subunit.test_results.CatFiles(stdout)
        output_result.add_rule(cat, 'test_id', test_id=None)
    else:
        try:
            previous_run = repo.get_latest_run()
        except KeyError:
            previous_run = None
        output_result = results.CLITestResult(inserter.get_id, stdout,
                                              previous_run)
        summary_result = output_result.get_summary()
    result = testtools.CopyStreamResult([inserter, output_result])
    result.startTestRun()
    try:
Exemple #22
0
def run_command(config='.stestr.conf',
                repo_type='file',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                partial=False,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                blacklist_file=None,
                whitelist_file=None,
                black_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool partial: DEPRECATED: Only some tests will be run. Implied by
        `--failing`. This flag is deprecated because and doesn't do anything
        it will be removed in a future release.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str blacklist_file: Path to a blacklist file, this file contains a
        separate regex exclude on each newline.
    :param str whitelist_file: Path to a whitelist file, this file contains a
        separate regex on each newline.
    :param str black_regex: Test rejection regex. If a test cases name matches
        on re.search() operation, it will be removed from the final test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    if partial:
        warnings.warn('The partial flag is deprecated and has no effect '
                      'anymore')
    try:
        repo = util.get_repo_open(repo_type, repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            msg = ("No config file found and --test-path not specified. "
                   "Either create or specify a .stestr.conf or use "
                   "--test-path ")
            stdout.write(msg)
            exit(1)
        repo = util.get_repo_initialise(repo_type, repo_url)
    combine_id = None
    if combine:
        latest_id = repo.latest_id()
        combine_id = six.text_type(latest_id)
    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        run_cmd = 'python -m subunit.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_type=repo_type,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   blacklist_file=blacklist_file,
                                   whitelist_file=whitelist_file,
                                   black_regex=black_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_type=repo_type,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           blacklist_file=blacklist_file,
                                           whitelist_file=whitelist_file,
                                           black_regex=black_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_type=repo_type,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_type=repo_type,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_type=repo_type,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       blacklist_file=blacklist_file,
                                       whitelist_file=whitelist_file,
                                       black_regex=black_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_type=repo_type,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)
if args.list:
    data = file("test/testcases.js").read()
    for test in re.compile("(?<=').+(?=')").findall(data):
        print test[:-5]
    sys.exit(-1)

if args.load_list:
    tests = list(
        set(
            x.split(':')[0].strip() + '.html'
            for x in args.load_list.readlines()))
else:
    tests = []

# Collect summary of all the individual test runs
summary = testtools.StreamSummary()

# Output information to stdout
if not args.subunit:
    # Human readable test output
    pertest = testtools.StreamToExtendedDecorator(
        testtools.MultiTestResult(
            # Individual test progress
            unittest.TextTestResult(
                unittest.runner._WritelnDecorator(sys.stdout), False, 2),
            # End of run, summary of failures.
            testtools.TextTestResult(sys.stdout),
        ))
else:
    from subunit.v2 import StreamResultToBytes
    pertest = StreamResultToBytes(sys.stdout)
Exemple #24
0
def run_command(config='.stestr.conf',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                exclude_list=None,
                include_list=None,
                exclude_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False,
                all_attachments=False,
                show_binary_attachments=True,
                pdb=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str exclude_list: Path to an exclusion list file, this file
        contains a separate regex exclude on each newline.
    :param str include_list: Path to a inclusion list file, this file
        contains a separate regex on each newline.
    :param str exclude_regex: Test rejection regex. If a test cases name
        matches on re.search() operation, it will be removed from the final
        test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.
    :param str pdb: Takes in a single test_id to bypasses test
        discover and just execute the test specified without launching any
        additional processes. A file name may be used in place of a test name.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    # If a repo is not found, and there a stestr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            # If there is no config and no test-path
            if os.path.isfile('tox.ini'):
                tox_conf = configparser.SafeConfigParser()
                tox_conf.read('tox.ini')
                if not tox_conf.has_section('stestr'):
                    msg = ("No file found, --test-path not specified, and "
                           "stestr section not found in tox.ini. Either "
                           "create or specify a .stestr.conf, use "
                           "--test-path, or add an stestr section to the "
                           "tox.ini")
                    stdout.write(msg)
                    exit(1)
            else:
                msg = ("No config file found and --test-path not specified. "
                       "Either create or specify a .stestr.conf or use "
                       "--test-path ")
                stdout.write(msg)
                exit(1)
        try:
            repo = util.get_repo_initialise(repo_url=repo_url)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
            repo_path = repo_url or './stestr'
            stdout.write('The specified repository directory %s already '
                         'exists. Please check if the repository already '
                         'exists or select a different path\n' % repo_path)
            return 1

    combine_id = None
    concurrency = _to_int(concurrency)

    if concurrency and concurrency < 0:
        msg = ("The provided concurrency value: %s is not valid. An integer "
               ">= 0 must be used.\n" % concurrency)
        stdout.write(msg)
        return 2
    if combine:
        latest_id = repo.latest_id()
        combine_id = str(latest_id)
    if no_discover and pdb:
        msg = ("--no-discover and --pdb are mutually exclusive options, "
               "only specify one at a time")
        stdout.write(msg)
        return 2
    if pdb and until_failure:
        msg = ("pdb mode does not function with the --until-failure flag, "
               "only specify one at a time")
        stdout.write(msg)
        return 2

    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        stestr_python = sys.executable
        if os.environ.get('PYTHON'):
            python_bin = os.environ.get('PYTHON')
        elif stestr_python:
            python_bin = stestr_python
        else:
            raise RuntimeError("The Python interpreter was not found and "
                               "PYTHON is not set")
        run_cmd = python_bin + ' -m stestr.subunit_runner.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments,
                             all_attachments=all_attachments,
                             show_binary_attachments=show_binary_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if pdb:
        ids = pdb
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        runner = subunit_run.SubunitTestRunner
        stream = io.BytesIO()
        program.TestProgram(module=None,
                            argv=['stestr', ids],
                            testRunner=functools.partial(runner,
                                                         stdout=stream))
        stream.seek(0)
        run_proc = [('subunit', stream)]
        return load.load(in_streams=run_proc,
                         subunit_out=subunit_out,
                         repo_url=repo_url,
                         run_id=combine_id,
                         pretty_out=pretty_out,
                         color=color,
                         stdout=stdout,
                         abbreviate=abbreviate,
                         suppress_attachments=suppress_attachments,
                         all_attachments=all_attachments,
                         show_binary_attachments=show_binary_attachments)

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    if config and os.path.isfile(config):
        conf = config_file.TestrConf(config)
    elif os.path.isfile('tox.ini'):
        conf = config_file.TestrConf('tox.ini', section='stestr')
    else:
        conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   exclude_list=exclude_list,
                                   include_list=include_list,
                                   exclude_regex=exclude_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           exclude_list=exclude_list,
                                           include_list=include_list,
                                           exclude_regex=exclude_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments,
                    all_attachments=all_attachments,
                    show_binary_attachments=show_binary_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments,
                              show_binary_attachments=show_binary_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       exclude_list=exclude_list,
                                       include_list=include_list,
                                       exclude_regex=exclude_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)