def run(args): _args = args[0] ids = None filters = None if args[1]: filters = args[1] conf = config_file.TestrConf(_args.config) cmd = conf.get_run_command(_args, ids, filters) not_filtered = filters is None and _args.blacklist_file is None\ and _args.whitelist_file is None and _args.black_regex is None try: cmd.setUp() # List tests if the fixture has not already needed to to filter. if not_filtered: ids = cmd.list_tests() else: ids = cmd.test_ids stream = BytesIO() for id in ids: stream.write(('%s\n' % id).encode('utf8')) stream.seek(0) output.output_stream(stream) return 0 finally: cmd.cleanUp()
def list_command(config='.stestr.conf', repo_type='file', repo_url=None, test_path=None, top_dir=None, group_regex=None, blacklist_file=None, whitelist_file=None, black_regex=None, filters=None, stdout=sys.stdout): """Print a list of test_ids for a project This function will print the test_ids for tests in a project. You can filter the output just like with the run command to see exactly what will be run. :param str config: The path to the stestr config file. Must be a string. :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param str test_path: Set the test path to use for unittest discovery. If both this and the corresponding config file option are set, this value will be used. :param str top_dir: The top dir to use for unittest discovery. This takes precedence over the value in the config file. (if one is present in the config file) :param str group_regex: Set a group regex to use for grouping tests together in the stestr scheduler. If both this and the corresponding config file option are set this value will be used. :param str blacklist_file: Path to a blacklist file, this file contains a separate regex exclude on each newline. :param str whitelist_file: Path to a whitelist file, this file contains a separate regex on each newline. :param str black_regex: Test rejection regex. If a test cases name matches on re.search() operation, it will be removed from the final test list. :param list filters: A list of string regex filters to initially apply on the test list. Tests that match any of the regexes will be used. (assuming any other filtering specified also uses it) :param file stdout: The output file to write all output to. By default this is sys.stdout """ ids = None conf = config_file.TestrConf(config) cmd = conf.get_run_command( regexes=filters, repo_type=repo_type, repo_url=repo_url, group_regex=group_regex, blacklist_file=blacklist_file, whitelist_file=whitelist_file, black_regex=black_regex, test_path=test_path, top_dir=top_dir) not_filtered = filters is None and blacklist_file is None\ and whitelist_file is None and black_regex is None try: cmd.setUp() # List tests if the fixture has not already needed to to filter. if not_filtered: ids = cmd.list_tests() else: ids = cmd.test_ids stream = BytesIO() for id in ids: stream.write(('%s\n' % id).encode('utf8')) stream.seek(0) output.output_stream(stream, output=stdout) return 0 finally: cmd.cleanUp()
def run(arguments): args = arguments[0] repo = util.get_repo_open(args.repo_type, args.repo_url) latest_run = repo.get_latest_run() if args.subunit: stream = latest_run.get_subunit_stream() output.output_stream(stream) # Exits 0 if we successfully wrote the stream. return 0 case = latest_run.get_test() try: if args.repo_type == 'file': previous_run = repo.get_test_run(repo.latest_id() - 1) # TODO(mtreinish): add a repository api to get the previous_run to # unify this logic else: previous_run = None except KeyError: previous_run = None failed = False output_result = results.CLITestResult(latest_run.get_id, sys.stdout, previous_run) summary = output_result.get_summary() output_result.startTestRun() try: case.run(output_result)
def last(repo_type='file', repo_url=None, subunit_out=False, pretty_out=True, color=False, stdout=sys.stdout, suppress_attachments=False): """Show the last run loaded into a a repository This function will print the results from the last run in the repository to STDOUT. It can optionally print the subunit stream for the last run to STDOUT if the ``subunit`` option is set to true. Note this function depends on the cwd for the repository if `repo_type` is set to file and `repo_url` is not specified it will use the repository located at CWD/.stestr :param str repo_type: This is the type of repository to use. Valid choices are 'file' and 'sql'. :param str repo_url: The url of the repository to use. :param bool subunit_out: Show output as a subunit stream. :param pretty_out: Use the subunit-trace output filter. :param color: Enable colorized output with the subunit-trace output filter. :param bool subunit: Show output as a subunit stream. :param file stdout: The output file to write all output to. By default this is sys.stdout :param bool suppress_attachments: When set true attachments subunit_trace will not print attachments on successful test execution. :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ try: repo = util.get_repo_open(repo_type, repo_url) except abstract.RepositoryNotFound as e: stdout.write(str(e) + '\n') return 1 try: latest_run = repo.get_latest_run() except KeyError as e: stdout.write(str(e) + '\n') return 1 if subunit_out: stream = latest_run.get_subunit_stream() output.output_stream(stream, output=stdout) # Exits 0 if we successfully wrote the stream. return 0 case = latest_run.get_test() try: if repo_type == 'file': previous_run = repo.get_test_run(repo.latest_id() - 1) # TODO(mtreinish): add a repository api to get the previous_run to # unify this logic else: previous_run = None except KeyError: previous_run = None failed = False if not pretty_out: output_result = results.CLITestResult(latest_run.get_id, stdout, previous_run) summary = output_result.get_summary() output_result.startTestRun() try: case.run(output_result) finally:
def history_show(run_id, repo_url=None, subunit_out=False, pretty_out=True, color=False, stdout=sys.stdout, suppress_attachments=False, all_attachments=False, show_binary_attachments=False): """Show a run loaded into a repository This function will print the results from the last run in the repository to STDOUT. It can optionally print the subunit stream for the last run to STDOUT if the ``subunit`` option is set to true. Note this function depends on the cwd for the repository if `repo_url` is not specified it will use the repository located at CWD/.stestr :param str run_id: The run id to show :param str repo_url: The url of the repository to use. :param bool subunit_out: Show output as a subunit stream. :param pretty_out: Use the subunit-trace output filter. :param color: Enable colorized output with the subunit-trace output filter. :param bool subunit: Show output as a subunit stream. :param file stdout: The output file to write all output to. By default this is sys.stdout :param bool suppress_attachments: When set true attachments subunit_trace will not print attachments on successful test execution. :param bool all_attachments: When set true subunit_trace will print all text attachments on successful test execution. :param bool show_binary_attachments: When set to true, subunit_trace will print binary attachments in addition to text attachments. :return return_code: The exit code for the command. 0 for success and > 0 for failures. :rtype: int """ try: repo = util.get_repo_open(repo_url=repo_url) except abstract.RepositoryNotFound as e: stdout.write(str(e) + '\n') return 1 try: if run_id: run = repo.get_test_run(run_id) else: run = repo.get_latest_run() except KeyError as e: stdout.write(str(e) + '\n') return 1 if subunit_out: stream = run.get_subunit_stream() output.output_stream(stream, output=stdout) # Exits 0 if we successfully wrote the stream. return 0 case = run.get_test() try: if run_id: previous_run = int(run_id) - 1 else: previous_run = repo.get_test_run(repo.latest_id() - 1) except KeyError: previous_run = None failed = False if not pretty_out: output_result = results.CLITestResult(run.get_id, stdout, previous_run) summary = output_result.get_summary() output_result.startTestRun() try: case.run(output_result) finally: