class RefguideCheck(Task): """:wrench: Run refguide check""" ctx = CONTEXT submodule = Option( ['--submodule', '-s'], default=None, metavar='SUBMODULE', help="Submodule whose tests to run (cluster, constants, ...)") verbose = Option(['--verbose', '-v'], default=False, is_flag=True, help="verbosity") @classmethod def task_meta(cls, **kwargs): kwargs.update(cls.ctx.get()) Args = namedtuple('Args', [k for k in kwargs.keys()]) args = Args(**kwargs) dirs = Dirs(args) cmd = [str(dirs.root / 'tools' / 'refguide_check.py'), '--doctests'] if args.verbose: cmd += ['-vvv'] if args.submodule: cmd += [args.submodule] cmd_str = ' '.join(cmd) return { 'actions': [f'env PYTHONPATH={dirs.site} {cmd_str}'], 'task_dep': ['build'], 'io': { 'capture': False }, }
def decorator(f: Command) -> Command: f.params.append( Argument(("yamlfile", ), type=click.Path(exists=True, dir_okay=False))) f.params.append( Option(("--format", "-f"), type=click.Choice(g.valid_formats), help="Output format", default=g.valid_formats[0])) f.params.append( Option(("--metadata/--no-metadata", ), default=True, help="Include metadata in output")) f.params.append( Option(("--useuris/--metauris", ), default=True, help="Include metadata in output")) f.params.append( Option(("--importmap", "-im"), type=click.File(), help="Import mapping file")) f.params.append( Option(("--log_level", ), type=click.Choice(_LOG_LEVEL_STRINGS), help=f"Logging level (default={DEFAULT_LOG_LEVEL})", default=DEFAULT_LOG_LEVEL)) return f
def test_command_multi_registration(basicApp): def _test_command(arg): print(arg) plugin = basicApp.plugins.get("CommandPlugin") with pytest.raises(CommandExistException): plugin.commands.register("test", "my test command", _test_command, params=[Option(("--arg", "-a"))]) plugin.commands.unregister("test") plugin.commands.register("test", "my test command", _test_command, params=[Option(("--arg", "-a"))]) assert len(basicApp.commands.get()) == 1 plugin.commands.register("test2", "my test2 command", _test_command, params=[Option(("--arg", "-a"))]) assert len(basicApp.commands.get()) == 2 basicApp.plugins.deactivate(["CommandPlugin"]) print(basicApp.commands.get().keys()) assert len(basicApp.commands.get().keys()) == 0
class Doc(Task): """:wrench: Build documentation TARGETS: Sphinx build targets [default: 'html-scipyorg'] """ ctx = CONTEXT args = Argument(['args'], nargs=-1, metavar='TARGETS', required=False) list_targets = Option( ['--list-targets', '-t'], default=False, is_flag=True, help='List doc targets', ) parallel = Option(['--parallel', '-j'], default=1, metavar='PARALLEL', help="Number of parallel jobs") @classmethod def task_meta(cls, list_targets, parallel, args, **kwargs): if list_targets: # list MAKE targets, remove default target task_dep = [] targets = '' else: task_dep = ['build'] targets = ' '.join(args) if args else 'html-scipyorg' kwargs.update(cls.ctx.get()) Args = namedtuple('Args', [k for k in kwargs.keys()]) build_args = Args(**kwargs) dirs = Dirs(build_args) make_params = [f'PYTHON="{sys.executable}"'] if parallel: make_params.append(f'SPHINXOPTS="-j{parallel}"') return { 'actions': [ # move to doc/ so local scipy does not get imported (f'cd doc; env PYTHONPATH="{dirs.site}" ' f'make {" ".join(make_params)} {targets}'), ], 'task_dep': task_dep, 'io': { 'capture': False }, }
class Python(): """:wrench: Start a Python shell with PYTHONPATH set""" ctx = CONTEXT pythonpath = Option( ['--pythonpath', '-p'], metavar='PYTHONPATH', default=None, help='Paths to prepend to PYTHONPATH') extra_argv = Argument( ['extra_argv'], nargs=-1, metavar='ARGS', required=False) @classmethod def _setup(cls, pythonpath, **kwargs): vals = Build.opt_defaults() vals.update(kwargs) Build.run(add_path=True, **vals) if pythonpath: for p in reversed(pythonpath.split(os.pathsep)): sys.path.insert(0, p) @classmethod def run(cls, pythonpath, extra_argv=None, **kwargs): cls._setup(pythonpath, **kwargs) if extra_argv: # Don't use subprocess, since we don't want to include the # current path in PYTHONPATH. sys.argv = extra_argv with open(extra_argv[0], 'r') as f: script = f.read() sys.modules['__main__'] = new_module('__main__') ns = dict(__name__='__main__', __file__=extra_argv[0]) exec(script, ns) else: import code code.interact()
def add_yaml_option(app): help = "Celery configuration in a YAML file." if celery_version.major < 5: def add_preload_arguments(parser): parser.add_argument("--yaml", default=None, help=help) app.user_options["preload"].add(add_preload_arguments) else: from click import Option from celery import bootsteps app.user_options["preload"].add( Option(["--yaml"], required=True, help=help)) class YamlBootstep(bootsteps.Step): def __init__(self, parent, yaml: str = "", **options): try: loader = YamlLoader(app, config=yaml) loader.read_configuration() loader.import_default_modules() except Exception as err: print(err) sys.exit(-1) super().__init__(parent, **options) app.steps["worker"].add(YamlBootstep)
def activate(self): # Argument for our command, which stores the csv file path. path_argument = Argument(("csv_file", ), required=True, type=str) interval_option = Option( ("-i", "--interval"), type=int, default=10, help="Sets the time between two checks in seconds") self.commands.register("csv_watch", "Monitors csv files", self.csv_watcher_command, params=[path_argument, interval_option]) self.signals.connect(receiver="csv_change_receiver", signal="csv_watcher_change", function=self.csv_change_monitor, description="Gets called for each csv change") csv_files_by_config = self.app.config.get("CSV_FILES", []) csv_interval_by_config = self.app.config.get("CSV_INTERVAL", 5) for csv_file in csv_files_by_config: self.csv_watcher_command(csv_file, csv_interval_by_config)
def test_command_flag_count(basicApp): """ This test case registers a command with a countable flag option. The assigned command handler throws an exception if the flag count is not equal to 3. Then it calls that command with the flag multiplied 3 times. It is asserted, that the first call returns 0, thus proving that flag count was passed correctly to the handler, otherwise it would have thrown an exception resulting in exit code -1 from the CliRunner. """ def _test_command(*args, **kwargs): print(args) print(kwargs) assert kwargs['flag'] == 3 plugin = basicApp.plugins.get("CommandPlugin") plugin.commands.unregister("test") # register a command with a countable flag plugin.commands.register("test", "my test command", _test_command, params=[Option(["-f", "--flag"], count=True)]) # call command with --fff -> count == 3 is asserted in the command handler result = CliRunner().invoke( basicApp.commands.get("test").click_command, ['-fff']) assert result.exit_code == 0
def test_command_flag_on_off(basicApp): """ This test case registers a command with a flag option. The assigned command handler throws an exception if the flag is not set. Then it calls that command twice: frist with the flag set; then with the flag not set. It is asserted, that the first call returns 0, the second one 1 - as that is exit code click's CliRunner returns in case an unhandled exception has been thrown in the command handler. """ def _test_command(*args, **kwargs): print(args) print(kwargs) if not kwargs['flag_on']: raise Exception plugin = basicApp.plugins.get("CommandPlugin") plugin.commands.unregister("test") # register a command with an on/off flag; command throws exception if flag is off plugin.commands.register("test", "my test command", _test_command, params=[Option(["--flag-on/--flag-off"])]) # call command with --flag-on (True) -> expect ok result = CliRunner().invoke( basicApp.commands.get("test").click_command, ['--flag-on']) assert result.exit_code == 0 # call command with --flag-off (False) -> expect error result = CliRunner().invoke( basicApp.commands.get("test").click_command, ['--flag-off']) assert result.exit_code == 1
def test_command_path_option(basicApp): """ This test case registers a command with a path option that need to exist. Then it calls that command twice: first with an existing path (the path of this source file); then with a non-existent one. It is asserted, that the first call returns 0, the second one 2 as exit code. """ def _test_command(*args, **kwargs): print(args) print(kwargs) plugin = basicApp.plugins.get("CommandPlugin") plugin.commands.unregister("test") # register a command with a path option, that must exist plugin.commands.register( "test", "my test command", _test_command, params=[Option(["--path"], type=click.Path(exists=True))]) # call command with existing path as option -> expect ok result = CliRunner().invoke( basicApp.commands.get("test").click_command, ['--path', __file__]) assert result.exit_code == 0 # call command with non-existing path as option -> expect error result = CliRunner().invoke( basicApp.commands.get("test").click_command, ['--path', 'no such path']) assert result.exit_code == 2
def test_command_optional_option(basicApp): """ This test case registers a command with an optional option. Then it calls that command twice: first with the optional option there; then with it missing. It is asserted, that both calls return 0 exit code. """ def _test_command(*args, **kwargs): print(args) print(kwargs) plugin = basicApp.plugins.get("CommandPlugin") plugin.commands.unregister("test") # register a command with a mandatory option plugin.commands.register("test", "my test command", _test_command, params=[Option(["--opt-opt"], required=False)]) # call command with option --opt-opt -> expect ok result = CliRunner().invoke( basicApp.commands.get("test").click_command, ["--opt-opt", 123]) assert result.exit_code == 0 # call command with no option -> expect ok result = CliRunner().invoke( basicApp.commands.get("test").click_command, []) assert result.exit_code == 0
def get_help_option(self, ctx): from ..core import format_help """Override for showing formatted main help via --help and -h options""" help_options = self.get_help_option_names(ctx) if not help_options or not self.add_help_option: return def show_help(ctx, param, value): if value and not ctx.resilient_parsing: if not ctx.invoked_subcommand: # legit main help echo(format_help(ctx.get_help())) else: # legit sub-command help echo(ctx.get_help(), color=ctx.color) ctx.exit() return Option( help_options, is_flag=True, is_eager=True, expose_value=False, callback=show_help, help="Show this message and exit.", )
def create_print_wrapper(f): """Creates a print wrapper for commands This adds the necessary options for formatting results as well as wrapping the command to handle those formatting options. :param f: the function to wrap :type f: function :returns: the wrapped function :rtype: function """ def new_func(*args, **kwargs): response_format = kwargs.pop('format') response = f(*args, **kwargs) echo(format_output(response, response_format)) return response new_func = update_wrapper(new_func, f) insert_click_param( new_func, Option( ['--format'], type=Choice(['json_pp', 'json', 'yaml', 'column']), default='column', help= 'Specify how responses should be formatted and echoed to the terminal.' )) return new_func
def activate(self): # Argument for our command, which stores the csv file path. path_argument = Argument(("csv_file",), required=True, type=str) interval_option = Option(("-i", "--interval"), type=int, default=10, help="Sets the time between two checks in seconds") self.commands.register("csv_watcher_list", "Shows all csv watchers", self.csv_watcher_list) self.commands.register("csv_watcher_add", "Adds a permanent watcher", self.csv_watcher_add, params=[path_argument, interval_option]) self.commands.register("csv_watcher_delete", "Removes a permanent watcher", self.csv_watcher_delete, params=[path_argument]) self.setup_db() self.load_watchers()
class Lint(): """:dash: run flake8, and check PEP 8 compliance on branch diff.""" output_file = Option( ['--output-file'], default=None, help='Redirect report to a file') def run(output_file): opts = {'output_file': output_file} run_doit_task({'flake8': opts, 'pep8-diff': {}})
def presentOptionHelp(ctx: Context, option: Option) -> XMLContent: yield option.help if not option.is_flag: default = option.get_default(ctx) if default is not None: value: XMLContent = str(default) if isinstance(default, Path) and value == '.': value = 'current directory' else: value = xhtml.code[value] yield xhtml.br, xhtml.em['default:'], ' ', value
def test_executor_factory_type(): tp = ExecutorFactoryType() param = Option(["--executor"]) assert tp.get_metavar(param) == "[process|thread]" assert tp.get_missing_message( param) == "Choose from:\n\tprocess,\n\tthread" assert tp.convert("process", None, None) is PROCESS_POOL_FACTORY assert tp.convert("Thread", None, None) is THREAD_POOL_FACTORY assert tp.convert(PROCESS_POOL_FACTORY, None, None) is PROCESS_POOL_FACTORY
def sample_value(opt: click.Option): if sample_get_value: value = sample_get_value(opt) if value is not None: return value if isinstance(opt.default, (int, str)): return opt.default elif isinstance(opt.default, pathlib.Path): return str(opt.default) else: return opt.make_metavar()
def test_level_type(): tp = LevelType() param = Option(["--level"]) assert tp.get_metavar(param) == "[skipped|success|unstable|failure]" assert tp.get_missing_message(param) == ( "Choose from:\n\tskipped,\n\tsuccess,\n\tunstable,\n\tfailure") assert tp.convert("skipped", None, None) == Status.SKIPPED assert tp.convert("SUCCESS", None, None) == Status.SUCCESS assert tp.convert("UnStable", None, None) == Status.UNSTABLE assert tp.convert("FAILURE", None, None) == Status.FAILURE assert tp.convert(Status.SUCCESS, None, None) == Status.SUCCESS
def task_from_dir(__ctx: click.Context, __param: click.Option, __value: bool) -> None: """Override task name default using name of current directory. Args: __ctx: Current command context __param: Parameter being processed __value: True if flag given """ if not __value or __ctx.resilient_parsing: return __param = [p for p in __ctx.command.params if p.name == 'task'][0] __param.default = os.path.basename(os.path.abspath(os.curdir))
def task_from_dir( __ctx: click.Context, __param: click.Option, __value: bool ) -> None: """Override task name default using name of current directory. Args: __ctx: Current command context __param: Parameter being processed __value: True if flag given """ if not __value or __ctx.resilient_parsing: return __param = [p for p in __ctx.command.params if p.name == 'task'][0] __param.default = os.path.basename(os.path.abspath(os.curdir))
def activate(self): # Argument for our command, which stores the csv file path. path_argument = Argument(("csv_file", ), required=True, type=str) interval_option = Option( ("-i", "--interval"), type=int, default=10, help="Sets the time between two checks in seconds") self.commands.register("csv_watch", "Monitors csv files", self.csv_watcher_command, params=[path_argument, interval_option])
def test_command_multi_plugin_registration(basicApp, EmptyCommandPlugin): def _test_command(arg): print(arg) plugin = basicApp.plugins.get("CommandPlugin") plugin2 = EmptyCommandPlugin(app=basicApp, name="CommandPlugin2") plugin2.activate() plugin2.commands.register("test2", "my test2 command", _test_command, params=[Option(("--arg", "-a"))]) assert len(basicApp.commands.get()) == 2 assert len(plugin.commands.get()) == 1 assert len(plugin2.commands.get()) == 1 basicApp.plugins.deactivate(["CommandPlugin2"]) assert len(basicApp.commands.get()) == 1 assert len(plugin.commands.get()) == 1 assert len(plugin2.commands.get()) == 0
def read_black_config(src: Path, value: Optional[str]) -> Dict[str, Union[bool, int]]: """Read the black configuration from pyproject.toml""" command = Command("main") context = Context(command) context.params["src"] = (str(src), ) parameter = Option(["--config"]) read_pyproject_toml(context, parameter, value) return { key: value for key, value in (context.default_map or {}).items() if key in ["line_length", "skip_string_normalization"] }
def __init__(self, path, help_msg): """Initializes the command.""" super().__init__(help='Subcommands are loaded from a ' 'plugin folder dynamically') opt_help = dict(help_option_names=['-h', '--help']) opt_version = Option(['--version', '-v'], is_flag=True, callback=print_version, expose_value=False, is_eager=True, help='Prints PyMLST version.') self.params.append(opt_version) self.context_settings.update(opt_help) self.help = help_msg self.path = path
def activate(self): self.commands.register( "resources", "Prints used resources", self._print_resources, params=[ Argument(("resource", ), required=False), Option( ("--description", "-d"), required=False, help="Will print also a short description of each value", default=False, is_flag=True) ]) self.commands.register("resources_list", "Prints a list of all available resources", self._print_resources_list)
def get_show_all_commands_help_option(self, ctx): """Returns the help option object.""" help_options = self.get_show_all_commands_help_names(ctx) # Add `add_show_all_commands` attribute to class """if not help_options or not self.add_help_option: return""" def show_all_commands_help(ctx, param, value): if value and not ctx.resilient_parsing: echo(self.show_all_commands(ctx), color=ctx.color) ctx.exit() return Option(help_options, is_flag=True, is_eager=True, expose_value=False, callback=show_all_commands_help, help='Show this message and exit as help functionality.')
def activate(self): # Argument for our command, which stores the csv file path. path_argument = Argument(("csv_file", ), required=True, type=str) interval_option = Option( ("-i", "--interval"), type=int, default=10, help="Sets the time between two checks in seconds") self.commands.register("csv_watcher_list", "Shows all csv watchers", self.csv_watcher_list) self.commands.register("csv_watcher_add", "Adds a permanent watcher", self.csv_watcher_add, params=[path_argument, interval_option]) self.commands.register("csv_watcher_delete", "Removes a permanent watcher", self.csv_watcher_delete, params=[path_argument]) self.setup_db() self.load_watchers() self.web.db.register(self.Watcher, self.db.session) try: menu_csv = self.web.menus.register(name="CSV", link="#") except Exception: menu_csv = self.web.menus.get("CSV") with self.app.web.flask.app_context(): # Will be http://127.0.0.1:5000/admin/admin_csvwatchers/ menu_csv.register(name="Watchers", link=url_for("admin_csvwatchers.index_view")) self.web.rest.register(self.Watcher, self.db.session) with self.app.web.flask.app_context(): menu_csv.register(name="REST CsvWatchers", link=rest_url_for(self.Watcher))
class CommandField(Field): def set_fields(self, fields): if fields is None: from fxdayu_sinta.IO.config import get_codes, STOCK super(CommandField, self).set_fields(get_codes()[STOCK]) else: super(CommandField, self).set_fields(fields.split(',')) from click import Option, STRING FIELD_OPTION = Option( ['-f', '--fields'], type=STRING, default=None, required=False, help="Specify fields of stock like 000001.XSHE,000002.XSHE .") START_OPTION = Option(["-s", "--start"], type=STRING, default=None, required=False, help="Time format: yyyy-mm-dd") END_OPTION = Option(["-e", "--end"], type=STRING, default=None, required=False, help="Time format: yyyy-mm-dd") END_OPTION_EMPTY = Option(["-e", "--end"], type=STRING, default="",
class Bench(Task): """:wrench: Run benchmarks Examples: $ python do.py bench -t integrate.SolveBVP $ python do.py bench -t linalg.Norm $ python do.py bench --compare main """ ctx = CONTEXT TASK_META = { 'task_dep': ['build'], } submodule = Option( ['--submodule', '-s'], default=None, metavar='SUBMODULE', help="Submodule whose tests to run (cluster, constants, ...)") tests = Option(['--tests', '-t'], default=None, multiple=True, metavar='TESTS', help='Specify tests to run') compare = Option( ['--compare', '-c'], default=None, metavar='COMPARE', multiple=True, help=("Compare benchmark results of current HEAD to BEFORE. " "Use an additional --bench COMMIT to override HEAD with COMMIT. " "Note that you need to commit your changes first!")) @staticmethod def run_asv(dirs, cmd): EXTRA_PATH = [ '/usr/lib/ccache', '/usr/lib/f90cache', '/usr/local/lib/ccache', '/usr/local/lib/f90cache' ] bench_dir = dirs.root / 'benchmarks' sys.path.insert(0, str(bench_dir)) # Always use ccache, if installed env = dict(os.environ) env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep)) # Control BLAS/LAPACK threads env['OPENBLAS_NUM_THREADS'] = '1' env['MKL_NUM_THREADS'] = '1' # Limit memory usage from benchmarks.common import set_mem_rlimit try: set_mem_rlimit() except (ImportError, RuntimeError): pass try: return subprocess.call(cmd, env=env, cwd=bench_dir) except OSError as err: if err.errno == errno.ENOENT: cmd_str = " ".join(cmd) print(f"Error when running '{cmd_str}': {err}\n") print("You need to install Airspeed Velocity " "(https://airspeed-velocity.github.io/asv/)") print("to run Scipy benchmarks") return 1 raise @classmethod def scipy_bench(cls, args): dirs = Dirs(args) dirs.add_sys_path() print(f"SciPy from development installed path at: {dirs.site}") with working_dir(dirs.site): runner, version, mod_path = get_test_runner(PROJECT_MODULE) extra_argv = [] if args.tests: extra_argv.append(args.tests) if args.submodule: extra_argv.append([args.submodule]) bench_args = [] for a in extra_argv: bench_args.extend(['--bench', ' '.join(str(x) for x in a)]) if not args.compare: print("Running benchmarks for Scipy version %s at %s" % (version, mod_path)) cmd = [ 'asv', 'run', '--dry-run', '--show-stderr', '--python=same', '--quick' ] + bench_args retval = cls.run_asv(dirs, cmd) sys.exit(retval) else: if len(args.compare) == 1: commit_a = args.compare[0] commit_b = 'HEAD' elif len(args.compare) == 2: commit_a, commit_b = args.compare else: print("Too many commits to compare benchmarks for") # Check for uncommitted files if commit_b == 'HEAD': r1 = subprocess.call( ['git', 'diff-index', '--quiet', '--cached', 'HEAD']) r2 = subprocess.call(['git', 'diff-files', '--quiet']) if r1 != 0 or r2 != 0: print("*" * 80) print("WARNING: you have uncommitted changes --- " "these will NOT be benchmarked!") print("*" * 80) # Fix commit ids (HEAD is local to current repo) p = subprocess.Popen(['git', 'rev-parse', commit_b], stdout=subprocess.PIPE) out, err = p.communicate() commit_b = out.strip() p = subprocess.Popen(['git', 'rev-parse', commit_a], stdout=subprocess.PIPE) out, err = p.communicate() commit_a = out.strip() cmd_compare = [ 'asv', 'continuous', '--show-stderr', '--factor', '1.05', commit_a, commit_b ] + bench_args cls.run_asv(dirs, cmd_compare) sys.exit(1) @classmethod def run(cls, **kwargs): """run benchamark""" kwargs.update(cls.ctx.get()) Args = namedtuple('Args', [k for k in kwargs.keys()]) args = Args(**kwargs) cls.scipy_bench(args)
class Test(Task): """:wrench: Run tests Examples: $ python do.py test -s {SAMPLE_SUBMODULE} $ python do.py test -t scipy.optimize.tests.test_minimize_constrained $ python do.py test -s stats -- --tb=line """ ctx = CONTEXT verbose = Option(['--verbose', '-v'], default=False, is_flag=True, help="more verbosity") # removed doctests as currently not supported by _lib/_testutils.py # doctests = Option(['--doctests'], default=False) coverage = Option(['--coverage'], default=False, is_flag=True, help=("report coverage of project code. " "HTML output goes under build/coverage")) submodule = Option( ['--submodule', '-s'], default=None, metavar='SUBMODULE', help="Submodule whose tests to run (cluster, constants, ...)") tests = Option(['--tests', '-t'], default=None, multiple=True, metavar='TESTS', help='Specify tests to run') mode = Option(['--mode', '-m'], default='fast', metavar='MODE', show_default=True, help=("'fast', 'full', or something that could be passed to " "`pytest -m` as a marker expression")) parallel = Option(['--parallel', '-j'], default=1, metavar='PARALLEL', help="Number of parallel jobs for testing") pytest_args = Argument(['pytest_args'], nargs=-1, metavar='PYTEST-ARGS', required=False) TASK_META = { 'task_dep': ['build'], } @classmethod def scipy_tests(cls, args, pytest_args): dirs = Dirs(args) dirs.add_sys_path() print(f"SciPy from development installed path at: {dirs.site}") # FIXME: support pos-args with doit extra_argv = pytest_args[:] if pytest_args else [] if extra_argv and extra_argv[0] == '--': extra_argv = extra_argv[1:] if args.coverage: dst_dir = dirs.root / args.build_dir / 'coverage' fn = dst_dir / 'coverage_html.js' if dst_dir.is_dir() and fn.is_file(): shutil.rmtree(dst_dir) extra_argv += ['--cov-report=html:' + str(dst_dir)] shutil.copyfile(dirs.root / '.coveragerc', dirs.site / '.coveragerc') # convert options to test selection if args.submodule: tests = [PROJECT_MODULE + "." + args.submodule] elif args.tests: tests = args.tests else: tests = None runner, version, mod_path = get_test_runner(PROJECT_MODULE) # FIXME: changing CWD is not a good practice with working_dir(dirs.site): print("Running tests for {} version:{}, installed at:{}".format( PROJECT_MODULE, version, mod_path)) # runner verbosity - convert bool to int verbose = int(args.verbose) + 1 result = runner( # scipy._lib._testutils:PytestTester args.mode, verbose=verbose, extra_argv=extra_argv, doctests=False, coverage=args.coverage, tests=tests, parallel=args.parallel) return result @classmethod def run(cls, pytest_args, **kwargs): """run unit-tests""" kwargs.update(cls.ctx.get()) Args = namedtuple('Args', [k for k in kwargs.keys()]) args = Args(**kwargs) return cls.scipy_tests(args, pytest_args)