示例#1
0
 def test_empty_with_pretty_out(self):
     stream = io.BytesIO()
     output = io.BytesIO()
     res = load.load(in_streams=[('subunit', stream)],
                     pretty_out=True,
                     stdout=output)
     self.assertEqual(1, res)
示例#2
0
 def run_tests():
     run_proc = [('subunit', output.ReturnCodeToSubunit(
         subprocess.Popen(run_cmd, shell=True,
                          stdout=subprocess.PIPE)))]
     return load.load((None, None), in_streams=run_proc,
                      partial=args.partial, subunit_out=args.subunit,
                      repo_type=args.repo_type,
                      repo_url=args.repo_url)
示例#3
0
 def run_tests():
     run_proc = [('subunit', output.ReturnCodeToSubunit(
         subprocess.Popen(run_cmd, shell=True,
                          stdout=subprocess.PIPE)))]
     return load.load(in_streams=run_proc,
                      subunit_out=subunit_out,
                      repo_type=repo_type,
                      repo_url=repo_url, run_id=combine_id,
                      pretty_out=pretty_out,
                      color=color, stdout=stdout, abbreviate=abbreviate,
                      suppress_attachments=suppress_attachments)
示例#4
0
 def run_tests():
     run_procs = [('subunit',
                   output.ReturnCodeToSubunit(
                       proc)) for proc in cmd.run_tests()]
     partial = False
     if (failing or analyze_isolation or isolated):
         partial = True
     if not run_procs:
         print("The specified regex doesn't match with anything.")
         return 0
     return load.load((None, None), in_streams=run_procs,
                      partial=partial, subunit_out=subunit_out,
                      repo_type=cmd.options.repo_type,
                      repo_url=cmd.options.repo_url)
示例#5
0
 def run_tests():
     run_procs = [('subunit',
                   output.ReturnCodeToSubunit(
                       proc)) for proc in cmd.run_tests()]
     if not run_procs:
         stdout.write("The specified regex doesn't match with anything")
         return 1
     return load.load((None, None), in_streams=run_procs,
                      subunit_out=subunit_out,
                      repo_type=repo_type,
                      repo_url=repo_url, run_id=combine_id,
                      pretty_out=pretty_out, color=color, stdout=stdout,
                      abbreviate=abbreviate,
                      suppress_attachments=suppress_attachments)
示例#6
0
文件: run.py 项目: arun-n2020/stestr
def run_command(config='.stestr.conf', repo_type='file',
                repo_url=None, test_path=None, top_dir=None, group_regex=None,
                failing=False, serial=False, concurrency=0, load_list=None,
                partial=False, subunit_out=False, until_failure=False,
                analyze_isolation=False, isolated=False, worker_path=None,
                blacklist_file=None, whitelist_file=None, black_regex=None,
                no_discover=False, random=False, combine=False, filters=None,
                pretty_out=True, color=False, stdout=sys.stdout,
                abbreviate=False, suppress_attachments=False,
                all_attachments=False, show_binary_attachments=True,
                pdb=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool partial: DEPRECATED: Only some tests will be run. Implied by
        `--failing`. This flag is deprecated because and doesn't do anything
        it will be removed in a future release.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str blacklist_file: Path to a blacklist file, this file contains a
        separate regex exclude on each newline.
    :param str whitelist_file: Path to a whitelist file, this file contains a
        separate regex on each newline.
    :param str black_regex: Test rejection regex. If a test cases name matches
        on re.search() operation, it will be removed from the final test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.
    :param str pdb: Takes in a single test_id to bypasses test
        discover and just execute the test specified without launching any
        additional processes. A file name may be used in place of a test name.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    if partial:
        warnings.warn('The partial flag is deprecated and has no effect '
                      'anymore')
    try:
        repo = util.get_repo_open(repo_type, repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            msg = ("No config file found and --test-path not specified. "
                   "Either create or specify a .stestr.conf or use "
                   "--test-path ")
            stdout.write(msg)
            exit(1)
        try:
            repo = util.get_repo_initialise(repo_type, repo_url)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
            repo_path = repo_url or './stestr'
            stdout.write('The specified repository directory %s already '
                         'exists. Please check if the repository already '
                         'exists or select a different path\n' % repo_path)
            return 1

    combine_id = None
    concurrency = _to_int(concurrency)

    if concurrency and concurrency < 0:
        msg = ("The provided concurrency value: %s is not valid. An integer "
               ">= 0 must be used.\n" % concurrency)
        stdout.write(msg)
        return 2
    if combine:
        latest_id = repo.latest_id()
        combine_id = str(latest_id)
    if no_discover and pdb:
        msg = ("--no-discover and --pdb are mutually exclusive options, "
               "only specify one at a time")
        stdout.write(msg)
        return 2
    if pdb and until_failure:
        msg = ("pdb mode does not function with the --until-failure flag, "
               "only specify one at a time")
        stdout.write(msg)
        return 2

    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        stestr_python = sys.executable
        if os.environ.get('PYTHON'):
            python_bin = os.environ.get('PYTHON')
        elif stestr_python:
            python_bin = stestr_python
        else:
            raise RuntimeError("The Python interpreter was not found and "
                               "PYTHON is not set")
        run_cmd = python_bin + ' -m stestr.subunit_runner.run ' + ids

        def run_tests():
            run_proc = [('subunit', output.ReturnCodeToSubunit(
                subprocess.Popen(run_cmd, shell=True,
                                 stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url, run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color, stdout=stdout, abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments,
                             all_attachments=all_attachments,
                             show_binary_attachments=show_binary_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if pdb:
        ids = pdb
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        runner = subunit_run.SubunitTestRunner
        stream = io.BytesIO()
        program.TestProgram(module=None, argv=['stestr', ids],
                            testRunner=functools.partial(runner,
                                                         stdout=stream))
        stream.seek(0)
        run_proc = [('subunit', stream)]
        return load.load(in_streams=run_proc,
                         subunit_out=subunit_out,
                         repo_type=repo_type,
                         repo_url=repo_url, run_id=combine_id,
                         pretty_out=pretty_out,
                         color=color, stdout=stdout, abbreviate=abbreviate,
                         suppress_attachments=suppress_attachments,
                         all_attachments=all_attachments,
                         show_binary_attachments=show_binary_attachments)

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(
            ids, regexes=filters, group_regex=group_regex, repo_type=repo_type,
            repo_url=repo_url, serial=serial, worker_path=worker_path,
            concurrency=concurrency, blacklist_file=blacklist_file,
            whitelist_file=whitelist_file, black_regex=black_regex,
            top_dir=top_dir, test_path=test_path, randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command(
                    [test_id], filters, group_regex=group_regex,
                    repo_type=repo_type, repo_url=repo_url, serial=serial,
                    worker_path=worker_path, concurrency=concurrency,
                    blacklist_file=blacklist_file,
                    whitelist_file=whitelist_file, black_regex=black_regex,
                    randomize=random, test_path=test_path, top_dir=top_dir)

                run_result = _run_tests(
                    cmd, until_failure,
                    subunit_out=subunit_out, combine_id=combine_id,
                    repo_type=repo_type, repo_url=repo_url,
                    pretty_out=pretty_out, color=color, abbreviate=abbreviate,
                    stdout=stdout, suppress_attachments=suppress_attachments,
                    all_attachments=all_attachments,
                    show_binary_attachments=show_binary_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd, until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_type=repo_type,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments,
                              show_binary_attachments=show_binary_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command(
                [test_id], group_regex=group_regex, repo_type=repo_type,
                repo_url=repo_url, serial=serial, worker_path=worker_path,
                concurrency=concurrency, blacklist_file=blacklist_file,
                whitelist_file=whitelist_file, black_regex=black_regex,
                randomize=random, test_path=test_path,
                top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(
            latest_run, conf, _run_tests, repo, test_path=test_path,
            top_dir=top_dir, group_regex=group_regex, repo_type=repo_type,
            repo_url=repo_url, serial=serial, concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)