Esempio n. 1
0
def run(arguments):
    args = arguments[0]
    repo = util.get_repo_open(args.repo_type, args.repo_url)
    latest_run = repo.get_latest_run()
    if args.subunit:
        stream = latest_run.get_subunit_stream()
        output.output_stream(stream)
        # Exits 0 if we successfully wrote the stream.
        return 0
    case = latest_run.get_test()
    try:
        if args.repo_type == 'file':
            previous_run = repo.get_test_run(repo.latest_id() - 1)
        # TODO(mtreinish): add a repository api to get the previous_run to
        # unify this logic
        else:
            previous_run = None
    except KeyError:
        previous_run = None
    failed = False
    output_result = results.CLITestResult(latest_run.get_id, sys.stdout,
                                          previous_run)
    summary = output_result.get_summary()
    output_result.startTestRun()
    try:
        case.run(output_result)
Esempio n. 2
0
def failing(repo_url=None, list_tests=False, subunit=False, stdout=sys.stdout):
    """Print the failing tests from the most recent run in the repository

    This function will print to STDOUT whether there are any tests that failed
    in the last run. It optionally will print the test_ids for the failing
    tests if ``list_tests`` is true. If ``subunit`` is true a subunit stream
    with just the failed tests will be printed to STDOUT.

    Note this function depends on the cwd for the repository if `repo_url` is
    not specified it will use the repository located at CWD/.stestr

    :param str repo_url: The url of the repository to use.
    :param bool list_test: Show only a list of failing tests.
    :param bool subunit: Show output as a subunit stream.
    :param file stdout: The output file to write all output to. By default
        this is sys.stdout

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    repo = util.get_repo_open(repo_url=repo_url)
    run = repo.get_failing()
    if subunit:
        return _show_subunit(run)
    case = run.get_test()
    failed = False
    result, summary = _make_result(repo, list_tests=list_tests)
    result.startTestRun()
    try:
        case.run(result)
Esempio n. 3
0
File: run.py Progetto: zaneb/stestr
def _run_tests(cmd,
               failing,
               analyze_isolation,
               isolated,
               until_failure,
               subunit_out=False,
               combine_id=None,
               repo_type='file',
               repo_url=None,
               pretty_out=True,
               color=False,
               stdout=sys.stdout,
               abbreviate=False,
               suppress_attachments=False):
    """Run the tests cmd was parameterised with."""
    cmd.setUp()
    try:

        def run_tests():
            run_procs = [('subunit', output.ReturnCodeToSubunit(proc))
                         for proc in cmd.run_tests()]
            if not run_procs:
                stdout.write("The specified regex doesn't match with anything")
                return 1
            return load.load((None, None),
                             in_streams=run_procs,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit_out:
                    repo = util.get_repo_open(repo_type, repo_url)
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not summary.wasSuccessful():
                        result = 1
                if result:
                    return result
    finally:
        cmd.cleanUp()
Esempio n. 4
0
    def get_run_command(self, options, test_ids=None, regexes=None):
        """Get a test_listing_fixture.TestListingFixture for this config file

        :param options: A argparse Namespace object of the cli options that
            were used in the invocation of the original CLI command that
            needs a TestListingFixture
        :param list test_ids: an optional list of test_ids to use when running
            tests
        :param list regexes: an optional list of regex strings to use for
            filtering the tests to run. See the test_filters parameter in
            TestListingFixture to see how this is used.
        :returns: a TestListingFixture object for the specified config file and
            any arguments passed into this function
        :rtype: test_listing_fixture.TestListingFixture
        """

        if options.test_path:
            test_path = options.test_path
        elif self.parser.has_option('DEFAULT', 'test_path'):
            test_path = self.parser.get('DEFAULT', 'test_path')
        else:
            print("no test_path can be found in either the command line "
                  "options nor in config file {0}.  Are you running stestr "
                  "from an unexpected location?".format(self.config_file))
            sys.exit(1)
        top_dir = './'
        if options.top_dir:
            top_dir = options.top_dir
        elif self.parser.has_option('DEFAULT', 'top_dir'):
            top_dir = self.parser.get('DEFAULT', 'top_dir')
        command = "${PYTHON:-python} -m subunit.run discover -t" \
                  " %s %s $LISTOPT $IDOPTION" % (top_dir, test_path)
        listopt = "--list"
        idoption = "--load-list $IDFILE"
        # If the command contains $IDOPTION read that command from config
        # Use a group regex if one is defined
        group_regex = None
        if options.group_regex:
            group_regex = options.group_regex
        elif self.parser.has_option('DEFAULT', 'group_regex'):
            group_regex = self.parser.get('DEFAULT', 'group_regex')
        if group_regex:
            def group_callback(test_id, regex=re.compile(group_regex)):
                match = regex.match(test_id)
                if match:
                    return match.group(0)
        else:
            group_callback = None

        # Handle the results repository
        repository = util.get_repo_open(options.repo_type, options.repo_url)
        return test_listing_fixture.TestListingFixture(
            test_ids, options, command, listopt, idoption, repository,
            test_filters=regexes, group_callback=group_callback)
Esempio n. 5
0
def run(arguments):
    args = arguments[0]
    repo = util.get_repo_open(args.repo_type, args.repo_url)
    run = repo.get_failing()
    if args.subunit:
        return _show_subunit(run)
    case = run.get_test()
    failed = False
    result, summary = _make_result(repo, list_tests=args.list)
    result.startTestRun()
    try:
        case.run(result)
Esempio n. 6
0
def history_list(repo_url=None, show_metadata=False, stdout=sys.stdout):
    """Show a list of runs in a repository

    Note this function depends on the cwd for the repository if `repo_url` is
    not specified it will use the repository located at CWD/.stestr

    :param str repo_url: The url of the repository to use.
    :param bool show_metadata: If set to ``True`` a column with any metadata
        for a run will be included in the output.
    :param file stdout: The output file to write all output to. By default
         this is sys.stdout

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """

    field_names = ()
    if show_metadata:
        field_names = ('Run ID', 'Passed', 'Runtime', 'Date', 'Metadata')
    else:
        field_names = ('Run ID', 'Passed', 'Runtime', 'Date')
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    except abstract.RepositoryNotFound as e:
        stdout.write(str(e) + '\n')
        return 1
    try:
        run_ids = repo.get_run_ids()
    except KeyError as e:
        stdout.write(str(e) + '\n')
        return 1
    rows = []
    for run_id in run_ids:
        run = repo.get_test_run(run_id)
        stream = run.get_subunit_stream()
        data = _get_run_details(stream, stdout)
        if show_metadata:
            rows.append(
                (run_id, data['passed'], data['runtime'], data['start'],
                 run.get_metadata()))
        else:
            rows.append(
                (run_id, data['passed'], data['runtime'], data['start']))

    return (field_names, rows)
Esempio n. 7
0
def run(args):
    repo = util.get_repo_open(args[0].repo_type, args[0].repo_url)
    try:
        latest_id = repo.latest_id()
    except KeyError:
        return 3
    # what happens when there is no timing info?
    test_times = repo.get_test_times(repo.get_test_ids(latest_id))
    known_times = list(test_times['known'].items())
    known_times.sort(key=itemgetter(1), reverse=True)
    if len(known_times) > 0:
        # By default show 10 rows
        if not args[0].all:
            known_times = known_times[:10]
        known_times = format_times(known_times)
        header = ('Test id', 'Runtime (s)')
        rows = [header] + known_times
        output.output_table(rows)
    return 0
Esempio n. 8
0
def slowest(repo_type='file',
            repo_url=None,
            show_all=False,
            stdout=sys.stdout):
    """Print the slowest times from the last run in the repository

    This function will print to STDOUT the 10 slowests tests in the last run.
    Optionally, using the ``show_all`` argument, it will print all the tests,
    instead of just 10. sorted by time.

    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param bool show_all: Show timing for all tests.
    :param file stdout: The output file to write all output to. By default
        this is sys.stdout

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """

    repo = util.get_repo_open(repo_type, repo_url)
    try:
        latest_id = repo.latest_id()
    except KeyError:
        return 3
    # what happens when there is no timing info?
    test_times = repo.get_test_times(repo.get_test_ids(latest_id))
    known_times = list(test_times['known'].items())
    known_times.sort(key=itemgetter(1), reverse=True)
    if len(known_times) > 0:
        # By default show 10 rows
        if not show_all:
            known_times = known_times[:10]
        known_times = format_times(known_times)
        header = ('Test id', 'Runtime (s)')
        rows = [header] + known_times
        output.output_table(rows, output=stdout)
    return 0
Esempio n. 9
0
def history_remove(run_id, repo_url=None, stdout=sys.stdout):
    """Remove a run from a repository

    Note this function depends on the cwd for the repository if `repo_url` is
    not specified it will use the repository located at CWD/.stestr

    :param str run_id: The run id to remove from the repository. Also, can be
        set to ``all`` which will remove all runs from the repository.
    :param str repo_url: The url of the repository to use.
    :param file stdout: The output file to write all output to. By default
         this is sys.stdout

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    except abstract.RepositoryNotFound as e:
        stdout.write(str(e) + '\n')
        return 1
    if run_id == 'all':
        try:
            run_ids = repo.get_run_ids()
        except KeyError as e:
            stdout.write(str(e) + '\n')
            return 1
        for run_id in run_ids:
            repo.remove_run_id(run_id)
    else:
        try:
            repo.remove_run_id(run_id)
        except KeyError as e:
            stdout.write(str(e) + '\n')
            return 1
    return 0
Esempio n. 10
0
def run_command(config='.stestr.conf',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                exclude_list=None,
                include_list=None,
                exclude_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False,
                all_attachments=False,
                show_binary_attachments=True,
                pdb=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str exclude_list: Path to an exclusion list file, this file
        contains a separate regex exclude on each newline.
    :param str include_list: Path to a inclusion list file, this file
        contains a separate regex on each newline.
    :param str exclude_regex: Test rejection regex. If a test cases name
        matches on re.search() operation, it will be removed from the final
        test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.
    :param str pdb: Takes in a single test_id to bypasses test
        discover and just execute the test specified without launching any
        additional processes. A file name may be used in place of a test name.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    # If a repo is not found, and there a stestr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            # If there is no config and no test-path
            if os.path.isfile('tox.ini'):
                tox_conf = configparser.SafeConfigParser()
                tox_conf.read('tox.ini')
                if not tox_conf.has_section('stestr'):
                    msg = ("No file found, --test-path not specified, and "
                           "stestr section not found in tox.ini. Either "
                           "create or specify a .stestr.conf, use "
                           "--test-path, or add an stestr section to the "
                           "tox.ini")
                    stdout.write(msg)
                    exit(1)
            else:
                msg = ("No config file found and --test-path not specified. "
                       "Either create or specify a .stestr.conf or use "
                       "--test-path ")
                stdout.write(msg)
                exit(1)
        try:
            repo = util.get_repo_initialise(repo_url=repo_url)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
            repo_path = repo_url or './stestr'
            stdout.write('The specified repository directory %s already '
                         'exists. Please check if the repository already '
                         'exists or select a different path\n' % repo_path)
            return 1

    combine_id = None
    concurrency = _to_int(concurrency)

    if concurrency and concurrency < 0:
        msg = ("The provided concurrency value: %s is not valid. An integer "
               ">= 0 must be used.\n" % concurrency)
        stdout.write(msg)
        return 2
    if combine:
        latest_id = repo.latest_id()
        combine_id = str(latest_id)
    if no_discover and pdb:
        msg = ("--no-discover and --pdb are mutually exclusive options, "
               "only specify one at a time")
        stdout.write(msg)
        return 2
    if pdb and until_failure:
        msg = ("pdb mode does not function with the --until-failure flag, "
               "only specify one at a time")
        stdout.write(msg)
        return 2

    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        stestr_python = sys.executable
        if os.environ.get('PYTHON'):
            python_bin = os.environ.get('PYTHON')
        elif stestr_python:
            python_bin = stestr_python
        else:
            raise RuntimeError("The Python interpreter was not found and "
                               "PYTHON is not set")
        run_cmd = python_bin + ' -m stestr.subunit_runner.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments,
                             all_attachments=all_attachments,
                             show_binary_attachments=show_binary_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if pdb:
        ids = pdb
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        runner = subunit_run.SubunitTestRunner
        stream = io.BytesIO()
        program.TestProgram(module=None,
                            argv=['stestr', ids],
                            testRunner=functools.partial(runner,
                                                         stdout=stream))
        stream.seek(0)
        run_proc = [('subunit', stream)]
        return load.load(in_streams=run_proc,
                         subunit_out=subunit_out,
                         repo_url=repo_url,
                         run_id=combine_id,
                         pretty_out=pretty_out,
                         color=color,
                         stdout=stdout,
                         abbreviate=abbreviate,
                         suppress_attachments=suppress_attachments,
                         all_attachments=all_attachments,
                         show_binary_attachments=show_binary_attachments)

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    if config and os.path.isfile(config):
        conf = config_file.TestrConf(config)
    elif os.path.isfile('tox.ini'):
        conf = config_file.TestrConf('tox.ini', section='stestr')
    else:
        conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   exclude_list=exclude_list,
                                   include_list=include_list,
                                   exclude_regex=exclude_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           exclude_list=exclude_list,
                                           include_list=include_list,
                                           exclude_regex=exclude_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments,
                    all_attachments=all_attachments,
                    show_binary_attachments=show_binary_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments,
                              all_attachments=all_attachments,
                              show_binary_attachments=show_binary_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       exclude_list=exclude_list,
                                       include_list=include_list,
                                       exclude_regex=exclude_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)
Esempio n. 11
0
def load(force_init=False,
         in_streams=None,
         partial=False,
         subunit_out=False,
         repo_type='file',
         repo_url=None,
         run_id=None,
         streams=None,
         pretty_out=False,
         color=False,
         stdout=sys.stdout,
         abbreviate=False,
         suppress_attachments=False,
         serial=False,
         all_attachments=False):
    """Load subunit streams into a repository

    This function will load subunit streams into the repository. It will
    output to STDOUT the results from the input stream. Internally this is
    used by the run command to both output the results as well as store the
    result in the repository.

    :param bool force_init: Initialize the specifiedrepository if it hasn't
        been created.
    :param list in_streams: A list of file objects that will be saved into the
        repository
    :param bool partial: DEPRECATED: Specify the input is a partial stream.
        This option is deprecated and no longer does anything. It will be
        removed in the future.
    :param bool subunit_out: Output the subunit stream to stdout
    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param run_id: The optional run id to save the subunit stream to.
    :param list streams: A list of file paths to read for the input streams.
    :param bool pretty_out: Use the subunit-trace output filter for the loaded
        stream.
    :param bool color: Enabled colorized subunit-trace output
    :param file stdout: The output file to write all output to. By default
        this is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    if partial:
        warnings.warn('The partial flag is deprecated and has no effect '
                      'anymore')

    try:
        repo = util.get_repo_open(repo_type, repo_url)
    except repository.RepositoryNotFound:
        if force_init:
            repo = util.get_repo_initialise(repo_type, repo_url)
        else:
            raise
    # Not a full implementation of TestCase, but we only need to iterate
    # back to it. Needs to be a callable - its a head fake for
    # testsuite.add.
    if in_streams:
        streams = utils.iter_streams(in_streams, 'subunit')
    elif streams:
        opener = functools.partial(open, mode='rb')
        streams = map(opener, streams)
    else:
        streams = [sys.stdin]

    def mktagger(pos, result):
        return testtools.StreamTagger([result], add=['worker-%d' % pos])

    def make_tests():
        for pos, stream in enumerate(streams):
            # Calls StreamResult API.
            case = subunit.ByteStreamToStreamResult(stream,
                                                    non_subunit_name='stdout')
            decorate = functools.partial(mktagger, pos)
            case = testtools.DecorateTestCaseResult(case, decorate)
            yield (case, str(pos))
Esempio n. 12
0
def run_command(config='.stestr.conf',
                repo_type='file',
                repo_url=None,
                test_path=None,
                top_dir=None,
                group_regex=None,
                failing=False,
                serial=False,
                concurrency=0,
                load_list=None,
                partial=False,
                subunit_out=False,
                until_failure=False,
                analyze_isolation=False,
                isolated=False,
                worker_path=None,
                blacklist_file=None,
                whitelist_file=None,
                black_regex=None,
                no_discover=False,
                random=False,
                combine=False,
                filters=None,
                pretty_out=True,
                color=False,
                stdout=sys.stdout,
                abbreviate=False,
                suppress_attachments=False):
    """Function to execute the run command

    This function implements the run command. It will run the tests specified
    in the parameters based on the provided config file and/or arguments
    specified in the way specified by the arguments. The results will be
    printed to STDOUT and loaded into the repository.

    :param str config: The path to the stestr config file. Must be a string.
    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param str test_path: Set the test path to use for unittest discovery.
        If both this and the corresponding config file option are set, this
        value will be used.
    :param str top_dir: The top dir to use for unittest discovery. This takes
        precedence over the value in the config file. (if one is present in
        the config file)
    :param str group_regex: Set a group regex to use for grouping tests
        together in the stestr scheduler. If both this and the corresponding
        config file option are set this value will be used.
    :param bool failing: Run only tests known to be failing.
    :param bool serial: Run tests serially
    :param int concurrency: "How many processes to use. The default (0)
        autodetects your CPU count and uses that.
    :param str load_list: The path to a list of test_ids. If specified only
        tests listed in the named file will be run.
    :param bool partial: DEPRECATED: Only some tests will be run. Implied by
        `--failing`. This flag is deprecated because and doesn't do anything
        it will be removed in a future release.
    :param bool subunit_out: Display results in subunit format.
    :param bool until_failure: Repeat the run again and again until failure
        occurs.
    :param bool analyze_isolation: Search the last test run for 2-test test
        isolation interactions.
    :param bool isolated: Run each test id in a separate test runner.
    :param str worker_path: Optional path of a manual worker grouping file
        to use for the run.
    :param str blacklist_file: Path to a blacklist file, this file contains a
        separate regex exclude on each newline.
    :param str whitelist_file: Path to a whitelist file, this file contains a
        separate regex on each newline.
    :param str black_regex: Test rejection regex. If a test cases name matches
        on re.search() operation, it will be removed from the final test list.
    :param str no_discover: Takes in a single test_id to bypasses test
        discover and just execute the test specified. A file name may be used
        in place of a test name.
    :param bool random: Randomize the test order after they are partitioned
        into separate workers
    :param bool combine: Combine the results from the test run with the
        last run in the repository
    :param list filters: A list of string regex filters to initially apply on
        the test list. Tests that match any of the regexes will be used.
        (assuming any other filtering specified also uses it)
    :param bool pretty_out: Use the subunit-trace output filter
    :param bool color: Enable colorized output in subunit-trace
    :param file stdout: The file object to write all output to. By default this
        is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    if partial:
        warnings.warn('The partial flag is deprecated and has no effect '
                      'anymore')
    try:
        repo = util.get_repo_open(repo_type, repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(config) and not test_path:
            msg = ("No config file found and --test-path not specified. "
                   "Either create or specify a .stestr.conf or use "
                   "--test-path ")
            stdout.write(msg)
            exit(1)
        repo = util.get_repo_initialise(repo_type, repo_url)
    combine_id = None
    if combine:
        latest_id = repo.latest_id()
        combine_id = six.text_type(latest_id)
    if no_discover:
        ids = no_discover
        if '::' in ids:
            ids = ids.replace('::', '.')
        if ids.find('/') != -1:
            root = ids.replace('.py', '')
            ids = root.replace('/', '.')
        run_cmd = 'python -m subunit.run ' + ids

        def run_tests():
            run_proc = [('subunit',
                         output.ReturnCodeToSubunit(
                             subprocess.Popen(run_cmd,
                                              shell=True,
                                              stdout=subprocess.PIPE)))]
            return load.load(in_streams=run_proc,
                             subunit_out=subunit_out,
                             repo_type=repo_type,
                             repo_url=repo_url,
                             run_id=combine_id,
                             pretty_out=pretty_out,
                             color=color,
                             stdout=stdout,
                             abbreviate=abbreviate,
                             suppress_attachments=suppress_attachments)

        if not until_failure:
            return run_tests()
        else:
            while True:
                result = run_tests()
                # If we're using subunit output we want to make sure to check
                # the result from the repository because load() returns 0
                # always on subunit output
                if subunit:
                    summary = testtools.StreamSummary()
                    last_run = repo.get_latest_run().get_subunit_stream()
                    stream = subunit.ByteStreamToStreamResult(last_run)
                    summary.startTestRun()
                    try:
                        stream.run(summary)
                    finally:
                        summary.stopTestRun()
                    if not results.wasSuccessful(summary):
                        result = 1
                if result:
                    return result

    if failing or analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(config)
    if not analyze_isolation:
        cmd = conf.get_run_command(ids,
                                   regexes=filters,
                                   group_regex=group_regex,
                                   repo_type=repo_type,
                                   repo_url=repo_url,
                                   serial=serial,
                                   worker_path=worker_path,
                                   concurrency=concurrency,
                                   blacklist_file=blacklist_file,
                                   whitelist_file=whitelist_file,
                                   black_regex=black_regex,
                                   top_dir=top_dir,
                                   test_path=test_path,
                                   randomize=random)
        if isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command([test_id],
                                           filters,
                                           group_regex=group_regex,
                                           repo_type=repo_type,
                                           repo_url=repo_url,
                                           serial=serial,
                                           worker_path=worker_path,
                                           concurrency=concurrency,
                                           blacklist_file=blacklist_file,
                                           whitelist_file=whitelist_file,
                                           black_regex=black_regex,
                                           randomize=random,
                                           test_path=test_path,
                                           top_dir=top_dir)

                run_result = _run_tests(
                    cmd,
                    until_failure,
                    subunit_out=subunit_out,
                    combine_id=combine_id,
                    repo_type=repo_type,
                    repo_url=repo_url,
                    pretty_out=pretty_out,
                    color=color,
                    abbreviate=abbreviate,
                    stdout=stdout,
                    suppress_attachments=suppress_attachments)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd,
                              until_failure,
                              subunit_out=subunit_out,
                              combine_id=combine_id,
                              repo_type=repo_type,
                              repo_url=repo_url,
                              pretty_out=pretty_out,
                              color=color,
                              stdout=stdout,
                              abbreviate=abbreviate,
                              suppress_attachments=suppress_attachments)
    else:
        # Where do we source data about the cause of conflicts.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command([test_id],
                                       group_regex=group_regex,
                                       repo_type=repo_type,
                                       repo_url=repo_url,
                                       serial=serial,
                                       worker_path=worker_path,
                                       concurrency=concurrency,
                                       blacklist_file=blacklist_file,
                                       whitelist_file=whitelist_file,
                                       black_regex=black_regex,
                                       randomize=random,
                                       test_path=test_path,
                                       top_dir=top_dir)
            if not _run_tests(cmd, until_failure):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failing.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        bisect_runner = bisect_tests.IsolationAnalyzer(latest_run,
                                                       conf,
                                                       _run_tests,
                                                       repo,
                                                       test_path=test_path,
                                                       top_dir=top_dir,
                                                       group_regex=group_regex,
                                                       repo_type=repo_type,
                                                       repo_url=repo_url,
                                                       serial=serial,
                                                       concurrency=concurrency)
        # spurious-failure -> cause.
        return bisect_runner.bisect_tests(spurious_failures)
Esempio n. 13
0
def history_show(run_id,
                 repo_url=None,
                 subunit_out=False,
                 pretty_out=True,
                 color=False,
                 stdout=sys.stdout,
                 suppress_attachments=False,
                 all_attachments=False,
                 show_binary_attachments=False):
    """Show a run loaded into a repository

    This function will print the results from the last run in the repository
    to STDOUT. It can optionally print the subunit stream for the last run
    to STDOUT if the ``subunit`` option is set to true.

    Note this function depends on the cwd for the repository if `repo_url` is
    not specified it will use the repository located at CWD/.stestr

    :param str run_id: The run id to show
    :param str repo_url: The url of the repository to use.
    :param bool subunit_out: Show output as a subunit stream.
    :param pretty_out: Use the subunit-trace output filter.
    :param color: Enable colorized output with the subunit-trace output filter.
    :param bool subunit: Show output as a subunit stream.
    :param file stdout: The output file to write all output to. By default
         this is sys.stdout
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    except abstract.RepositoryNotFound as e:
        stdout.write(str(e) + '\n')
        return 1
    try:
        if run_id:
            run = repo.get_test_run(run_id)
        else:
            run = repo.get_latest_run()
    except KeyError as e:
        stdout.write(str(e) + '\n')
        return 1

    if subunit_out:
        stream = run.get_subunit_stream()
        output.output_stream(stream, output=stdout)
        # Exits 0 if we successfully wrote the stream.
        return 0
    case = run.get_test()
    try:
        if run_id:
            previous_run = int(run_id) - 1
        else:
            previous_run = repo.get_test_run(repo.latest_id() - 1)
    except KeyError:
        previous_run = None
    failed = False
    if not pretty_out:
        output_result = results.CLITestResult(run.get_id, stdout, previous_run)
        summary = output_result.get_summary()
        output_result.startTestRun()
        try:
            case.run(output_result)
        finally:
Esempio n. 14
0
def last(repo_type='file', repo_url=None, subunit_out=False, pretty_out=True,
         color=False, stdout=sys.stdout, suppress_attachments=False):
    """Show the last run loaded into a a repository

    This function will print the results from the last run in the repository
    to STDOUT. It can optionally print the subunit stream for the last run
    to STDOUT if the ``subunit`` option is set to true.

    Note this function depends on the cwd for the repository if `repo_type` is
    set to file and `repo_url` is not specified it will use the repository
    located at CWD/.stestr

    :param str repo_type: This is the type of repository to use. Valid choices
        are 'file' and 'sql'.
    :param str repo_url: The url of the repository to use.
    :param bool subunit_out: Show output as a subunit stream.
    :param pretty_out: Use the subunit-trace output filter.
    :param color: Enable colorized output with the subunit-trace output filter.
    :param bool subunit: Show output as a subunit stream.
    :param file stdout: The output file to write all output to. By default
         this is sys.stdout
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_type, repo_url)
    except abstract.RepositoryNotFound as e:
        stdout.write(str(e) + '\n')
        return 1

    try:
        latest_run = repo.get_latest_run()
    except KeyError as e:
        stdout.write(str(e) + '\n')
        return 1

    if subunit_out:
        stream = latest_run.get_subunit_stream()
        output.output_stream(stream, output=stdout)
        # Exits 0 if we successfully wrote the stream.
        return 0
    case = latest_run.get_test()
    try:
        if repo_type == 'file':
            previous_run = repo.get_test_run(repo.latest_id() - 1)
        # TODO(mtreinish): add a repository api to get the previous_run to
        # unify this logic
        else:
            previous_run = None
    except KeyError:
        previous_run = None
    failed = False
    if not pretty_out:
        output_result = results.CLITestResult(latest_run.get_id, stdout,
                                              previous_run)
        summary = output_result.get_summary()
        output_result.startTestRun()
        try:
            case.run(output_result)
        finally:
Esempio n. 15
0
    def get_run_command(self,
                        test_ids=None,
                        regexes=None,
                        test_path=None,
                        top_dir=None,
                        group_regex=None,
                        repo_type='file',
                        repo_url=None,
                        serial=False,
                        worker_path=None,
                        concurrency=0,
                        blacklist_file=None,
                        whitelist_file=None,
                        black_regex=None,
                        randomize=False,
                        parallel_class=None):
        """Get a test_processor.TestProcessorFixture for this config file

        Any parameters about running tests will be used for initialize the
        output fixture so the settings are correct when that fixture is used
        to run tests. Parameters will take precedence over values in the config
        file.

        :param options: A argparse Namespace object of the cli options that
            were used in the invocation of the original CLI command that
            needs a TestProcessorFixture
        :param list test_ids: an optional list of test_ids to use when running
            tests
        :param list regexes: an optional list of regex strings to use for
            filtering the tests to run. See the test_filters parameter in
            TestProcessorFixture to see how this is used.
        :param str test_path: Set the test path to use for unittest discovery.
            If both this and the corresponding config file option are set, this
            value will be used.
        :param str top_dir: The top dir to use for unittest discovery. This
            takes precedence over the value in the config file. (if one is
            present in the config file)
        :param str group_regex: Set a group regex to use for grouping tests
            together in the stestr scheduler. If both this and the
            corresponding config file option are set this value will be used.
        :param str repo_type: This is the type of repository to use. Valid
            choices are 'file' and 'sql'.
        :param str repo_url: The url of the repository to use.
        :param bool serial: If tests are run from the returned fixture, they
            will be run serially
        :param str worker_path: Optional path of a manual worker grouping file
            to use for the run.
        :param int concurrency: How many processes to use. The default (0)
            autodetects your CPU count and uses that.
        :param str blacklist_file: Path to a blacklist file, this file contains
            a separate regex exclude on each newline.
        :param str whitelist_file: Path to a whitelist file, this file contains
            a separate regex on each newline.
        :param str black_regex: Test rejection regex. If a test cases name
            matches on re.search() operation, it will be removed from the final
            test list.
        :param bool randomize: Randomize the test order after they are
            partitioned into separate workers
        :param bool parallel_class: Set the flag to group tests together in the
            stestr scheduler by class. If both this and the corresponding
            config file option which includes `group-regex` are set, this value
            will be used.

        :returns: a TestProcessorFixture object for the specified config file
            and any arguments passed into this function
        :rtype: test_processor.TestProcessorFixture
        """

        if not test_path and self.parser.has_option('DEFAULT', 'test_path'):
            test_path = self.parser.get('DEFAULT', 'test_path')
        elif not test_path:
            sys.exit("No test_path can be found in either the command line "
                     "options nor in the specified config file {0}.  Please "
                     "specify a test path either in the config file or via "
                     "the --test-path argument".format(self.config_file))
        if not top_dir and self.parser.has_option('DEFAULT', 'top_dir'):
            top_dir = self.parser.get('DEFAULT', 'top_dir')
        elif not top_dir:
            top_dir = './'

        stestr_python = sys.executable
        # let's try to be explicit, even if it means a longer set of ifs
        if sys.platform == 'win32':
            # it may happen, albeit rarely
            if not stestr_python:
                raise RuntimeError("The Python interpreter was not found")
            python = stestr_python
        else:
            if os.environ.get('PYTHON'):
                python = '${PYTHON}'
            elif stestr_python:
                python = stestr_python
            else:
                raise RuntimeError("The Python interpreter was not found and "
                                   "PYTHON is not set")

        command = '%s -m subunit.run discover -t "%s" "%s" ' \
                  '$LISTOPT $IDOPTION' % (python, top_dir, test_path)
        listopt = "--list"
        idoption = "--load-list $IDFILE"
        # If the command contains $IDOPTION read that command from config
        # Use a group regex if one is defined
        if parallel_class:
            group_regex = '([^\.]*\.)*'
        if not group_regex \
                and self.parser.has_option('DEFAULT', 'parallel_class') \
                and self.parser.getboolean('DEFAULT', 'parallel_class'):
            group_regex = '([^\.]*\.)*'
        if not group_regex and self.parser.has_option('DEFAULT',
                                                      'group_regex'):
            group_regex = self.parser.get('DEFAULT', 'group_regex')
        if group_regex:

            def group_callback(test_id, regex=re.compile(group_regex)):
                match = regex.match(test_id)
                if match:
                    return match.group(0)
        else:
            group_callback = None
        # Handle the results repository
        repository = util.get_repo_open(repo_type, repo_url)
        return test_processor.TestProcessorFixture(
            test_ids,
            command,
            listopt,
            idoption,
            repository,
            test_filters=regexes,
            group_callback=group_callback,
            serial=serial,
            worker_path=worker_path,
            concurrency=concurrency,
            blacklist_file=blacklist_file,
            black_regex=black_regex,
            whitelist_file=whitelist_file,
            randomize=randomize)
Esempio n. 16
0
def run(arguments):
    args = arguments[0]
    filters = arguments[1] or None
    try:
        repo = util.get_repo_open(args.repo_type, args.repo_url)
    # If a repo is not found, and there a testr config exists just create it
    except repository.RepositoryNotFound:
        if not os.path.isfile(args.config):
            raise
        repo = util.get_repo_initialise(args.repo_type, args.repo_url)
    if args.no_discover:
        ids = args.no_discover
        if ids.find('/') != -1:
            root, _ = os.path.splitext(ids)
            ids = root.replace('/', '.')
        run_cmd = 'python -m subunit.run ' + ids

        def run_tests():
            run_proc = [('subunit', output.ReturnCodeToSubunit(
                subprocess.Popen(run_cmd, shell=True,
                                 stdout=subprocess.PIPE)))]
            return load.load((None, None), in_streams=run_proc,
                             partial=args.partial, subunit_out=args.subunit,
                             repo_type=args.repo_type,
                             repo_url=args.repo_url)

        if not args.until_failure:
            return run_tests()
        else:
            result = run_tests()
            while not result:
                result = run_tests()
            return result

    if args.failing or args.analyze_isolation:
        ids = _find_failing(repo)
    else:
        ids = None
    if args.load_list:
        list_ids = set()
        # Should perhaps be text.. currently does its own decode.
        with open(args.load_list, 'rb') as list_file:
            list_ids = set(parse_list(list_file.read()))
        if ids is None:
            # Use the supplied list verbatim
            ids = list_ids
        else:
            # We have some already limited set of ids, just reduce to ids
            # that are both failing and listed.
            ids = list_ids.intersection(ids)

    conf = config_file.TestrConf(args.config)
    if not args.analyze_isolation:
        cmd = conf.get_run_command(args, ids, filters)
        if args.isolated:
            result = 0
            cmd.setUp()
            try:
                ids = cmd.list_tests()
            finally:
                cmd.cleanUp()
            for test_id in ids:
                # TODO(mtreinish): add regex
                cmd = conf.get_run_command(args, [test_id], filters)
                run_result = _run_tests(cmd, args.failing,
                                        args.analyze_isolation,
                                        args.isolated,
                                        args.until_failure,
                                        subunit_out=args.subunit)
                if run_result > result:
                    result = run_result
            return result
        else:
            return _run_tests(cmd, args.failing, args.analyze_isolation,
                              args.isolated, args.until_failure,
                              subunit_out=args.subunit)
    else:
        # Where do we source data about the cause of conflicts.
        # XXX: Should instead capture the run id in with the failing test
        # data so that we can deal with failures split across many partial
        # runs.
        latest_run = repo.get_latest_run()
        # Stage one: reduce the list of failing tests (possibly further
        # reduced by testfilters) to eliminate fails-on-own tests.
        spurious_failures = set()
        for test_id in ids:
            # TODO(mtrienish): Add regex
            cmd = conf.get_run_command(args, [test_id])
            if not _run_tests(cmd):
                # If the test was filtered, it won't have been run.
                if test_id in repo.get_test_ids(repo.latest_id()):
                    spurious_failures.add(test_id)
                # This is arguably ugly, why not just tell the system that
                # a pass here isn't a real pass? [so that when we find a
                # test that is spuriously failing, we don't forget
                # that it is actually failng.
                # Alternatively, perhaps this is a case for data mining:
                # when a test starts passing, keep a journal, and allow
                # digging back in time to see that it was a failure,
                # what it failed with etc...
                # The current solution is to just let it get marked as
                # a pass temporarily.
        if not spurious_failures:
            # All done.
            return 0
        # spurious-failure -> cause.
        test_conflicts = {}
        for spurious_failure in spurious_failures:
            candidate_causes = _prior_tests(
                latest_run, spurious_failure)
            bottom = 0
            top = len(candidate_causes)
            width = top - bottom
            while width:
                check_width = int(ceil(width / 2.0))
                # TODO(mtreinish): Add regex
                cmd = conf.get_run_command(
                    args,
                    candidate_causes[bottom:bottom + check_width]
                    + [spurious_failure])
                _run_tests(cmd)
                # check that the test we're probing still failed - still
                # awkward.
                found_fail = []

                def find_fail(test_dict):
                    if test_dict['id'] == spurious_failure:
                        found_fail.append(True)

                checker = testtools.StreamToDict(find_fail)
                checker.startTestRun()
                try:
                    repo.get_failing().get_test().run(checker)
                finally:
                    checker.stopTestRun()
                if found_fail:
                    # Our conflict is in bottom - clamp the range down.
                    top = bottom + check_width
                    if width == 1:
                        # found the cause
                        test_conflicts[
                            spurious_failure] = candidate_causes[bottom]
                        width = 0
                    else:
                        width = top - bottom
                else:
                    # Conflict in the range we did not run: discard bottom.
                    bottom = bottom + check_width
                    if width == 1:
                        # there will be no more to check, so we didn't
                        # reproduce the failure.
                        width = 0
                    else:
                        width = top - bottom
            if spurious_failure not in test_conflicts:
                # Could not determine cause
                test_conflicts[spurious_failure] = 'unknown - no conflicts'
        if test_conflicts:
            table = [('failing test', 'caused by test')]
            for failure, causes in test_conflicts.items():
                table.append((failure, causes))
            output.output_table(table)
            return 3
        return 0
Esempio n. 17
0
def run(args):
    repo = util.get_repo_open(args[0].repo_type, args[0].repo_url)
    sys.stdout.write('%s=%s\n' % ('runs', repo.count()))
    return 0
Esempio n. 18
0
def load(force_init=False,
         in_streams=None,
         subunit_out=False,
         repo_url=None,
         run_id=None,
         streams=None,
         pretty_out=False,
         color=False,
         stdout=sys.stdout,
         abbreviate=False,
         suppress_attachments=False,
         serial=False,
         all_attachments=False,
         show_binary_attachments=False):
    """Load subunit streams into a repository

    This function will load subunit streams into the repository. It will
    output to STDOUT the results from the input stream. Internally this is
    used by the run command to both output the results as well as store the
    result in the repository.

    :param bool force_init: Initialize the specified repository if it hasn't
        been created.
    :param list in_streams: A list of file objects that will be saved into the
        repository
    :param bool subunit_out: Output the subunit stream to stdout
    :param str repo_url: The url of the repository to use.
    :param run_id: The optional run id to save the subunit stream to.
    :param list streams: A list of file paths to read for the input streams.
    :param bool pretty_out: Use the subunit-trace output filter for the loaded
        stream.
    :param bool color: Enabled colorized subunit-trace output
    :param file stdout: The output file to write all output to. By default
        this is sys.stdout
    :param bool abbreviate: Use abbreviated output if set true
    :param bool suppress_attachments: When set true attachments subunit_trace
        will not print attachments on successful test execution.
    :param bool all_attachments: When set true subunit_trace will print all
        text attachments on successful test execution.
    :param bool show_binary_attachments: When set to true, subunit_trace will
        print binary attachments in addition to text attachments.

    :return return_code: The exit code for the command. 0 for success and > 0
        for failures.
    :rtype: int
    """
    try:
        repo = util.get_repo_open(repo_url=repo_url)
    except repository.RepositoryNotFound:
        if force_init:
            try:
                repo = util.get_repo_initialise(repo_url=repo_url)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
                repo_path = repo_url or './stestr'
                stdout.write('The specified repository directory %s already '
                             'exists. Please check if the repository already '
                             'exists or select a different path\n' % repo_path)
                exit(1)
        else:
            raise
    # Not a full implementation of TestCase, but we only need to iterate
    # back to it. Needs to be a callable - its a head fake for
    # testsuite.add.
    if in_streams:
        streams = utils.iter_streams(in_streams, 'subunit')
    elif streams:
        opener = functools.partial(open, mode='rb')
        streams = map(opener, streams)
    else:
        streams = [sys.stdin]

    def mktagger(pos, result):
        return testtools.StreamTagger([result], add=['worker-%d' % pos])

    def make_tests():
        for pos, stream in enumerate(streams):
            # Calls StreamResult API.
            case = subunit.ByteStreamToStreamResult(stream,
                                                    non_subunit_name='stdout')
            decorate = functools.partial(mktagger, pos)
            case = testtools.DecorateTestCaseResult(case, decorate)
            yield (case, str(pos))