def test_history_only_failures(dirty_start_small): runner = CliRunner() result = runner.invoke(app, ["history"]) assert "bad" in result.output result = runner.invoke(app, ["history", "--only-failures"]) assert "bad" in result.output assert "buz" not in result.output
def test_compose_commands(cli: typer.Typer, cli_runner: CliRunner): # NOTE: this tests is mostly here to raise awareness about what options # are exposed in the CLI so we can add tests if there is any update # result = cli_runner.invoke(cli, ["--help"], catch_exceptions=False) print(result.stdout) assert result.exit_code == 0, result # first command result = cli_runner.invoke(cli, ["run", "--help"], catch_exceptions=False) print(result.stdout) assert result.exit_code == 0, result # settings command result = cli_runner.invoke(cli, ["settings", "--help"], catch_exceptions=False) print(result.stdout) assert "--compact" in result.stdout assert result.exit_code == 0, result def extract_lines(text): lines = [line.strip() for line in text.split("\n") if line.strip()] return lines assert extract_lines(HELP) == extract_lines(result.stdout)
def test_command_line_interface(self): """Test the CLI.""" runner = CliRunner() result = runner.invoke(cli.main) assert result.exit_code == 0 assert '{{ cookiecutter.project_slug }}.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output
def test_command_line_interface(): """Test the CLI.""" runner = CliRunner() result = runner.invoke(app) assert result.exit_code == 0 assert "Welcome to Eve Esi Jobs" in result.output help_result = runner.invoke(app, ["--help"]) assert help_result.exit_code == 0 assert "[OPTIONS] COMMAND [ARGS]" in help_result.output
def test_schema(): runner = CliRunner() result = runner.invoke(app, ["schema"]) assert result.exit_code == 0 assert "Usage: eve-esi schema [OPTIONS] COMMAND [ARGS]" in result.output help_result = runner.invoke(app, ["schema", "--help"]) assert help_result.exit_code == 0 assert "Usage: eve-esi schema [OPTIONS] COMMAND [ARGS]" in help_result.output print(os.getenv("PFMSOFT_eve_esi_jobs_TESTING", "Not set"))
def test_download(test_app_dir, monkeypatch): # set_env(test_app_dir, monkeypatch) runner = CliRunner() result = runner.invoke(app, ["schema", "download"]) print(result.output) assert result.exit_code == 0 assert "Schema saved to" in result.output help_result = runner.invoke(app, ["schema", "download", "--help"]) assert help_result.exit_code == 0 assert "eve-esi schema download [OPTIONS]" in help_result.output
def test_bridge_discover(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["bridge", "discover", "--help"]) assert res.exit_code == 0 assert "Discover online Bridges in the local network" in res.output res = runner.invoke(cli.app, ["bridge", "discover"]) assert res.exit_code == 0 assert api_mock.call_count == 1 assert res.output == "Hue Bridges Discovered:\n{}\n"
def test_bridge_info(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["bridge", "info", "--help"]) assert res.exit_code == 0 assert "List all the information about a Hue Bridge" in res.output res = runner.invoke(cli.app, ["bridge", "info", "-i", self.ip, "-u", self.user]) assert res.exit_code == 0 assert api_mock.call_count == 1 assert res.output == f"[{self.ip}] Bridge Info:\n{{}}\n"
def test_bridge_config(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["bridge", "get", "--help"]) assert res.exit_code == 0 assert "Get the config of a Bridge" in res.output res = runner.invoke(cli.app, ["bridge", "get", "-i", self.ip, "-u", self.user]) assert res.exit_code == 0 assert api_mock.call_count == 1 assert res.output == f"[{self.ip}] Bridge Config:\n{{}}\n"
def test_main_cli(cli_runner: CliRunner): result = cli_runner.invoke(main, "--help") assert "settings" in result.stdout assert "run" in result.stdout assert result.exit_code == 0 result = cli_runner.invoke(main, ["settings", "--help"]) assert result.exit_code == 0 result = cli_runner.invoke(main, ["run", "--help"]) assert result.exit_code == 0
def test_light_info_failure(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["light", "info"]) assert res.exit_code == 2 assert api_mock.call_count == 0 assert "Error: Missing option '--ip'" in res.output res = runner.invoke(cli.app, ["light", "info", "-i", self.ip]) assert res.exit_code == 2 assert api_mock.call_count == 0 assert "Error: Missing option '--user'" in res.output
def test_create(esi_schema: FileResource): """Test the CLI.""" runner = CliRunner() result = runner.invoke(app) assert result.exit_code == 0 assert "Welcome to Eve Esi Jobs" in result.output help_result = runner.invoke( app, ["-s", str(esi_schema.file_path), "create", "jobs", "--help"]) print(help_result.output) assert help_result.exit_code == 0 assert "eve-esi create jobs [OPTIONS] OP_ID" in help_result.output
def test_bridge_config_failure(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["bridge", "get"]) assert res.exit_code == 2 assert api_mock.call_count == 0 assert "Error: Missing option '--ip'" in res.output res = runner.invoke(cli.app, ["bridge", "get", "-i", self.ip]) assert res.exit_code == 2 assert api_mock.call_count == 0 assert "Error: Missing option '--user'" in res.output
def test_jobs_run_via_schedule(dirty_start_via_schedule): runner = CliRunner() result = runner.invoke(app, ["history"]) assert "good-job" in result.output result = runner.invoke(app, ["history", "--only-failures"]) assert "bad-job" in result.output assert "good-job" not in result.output printer_path = skedulord_path() / "printer" logfile = next(printer_path.glob("*.txt")) printer_logs = pathlib.Path(logfile).read_text() assert "--this that" in printer_logs assert "--one two" in printer_logs assert "--three 3" in printer_logs
def test_light_power_on(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["light", "on", "--help"]) assert res.exit_code == 0 assert "Power on a light" in res.output res = runner.invoke( cli.app, ["light", "on", str(self.num), "-i", self.ip, "-u", self.user]) assert res.exit_code == 0 assert api_mock.call_count == 1 assert res.output == f"[{self.ip}] Light {self.num} On:\n{{}}\n"
def test_light_state(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["light", "get", "--help"]) assert res.exit_code == 0 assert "Get the state of a Light" in res.output res = runner.invoke( cli.app, ["light", "get", str(self.num), "-i", self.ip, "-u", self.user]) assert res.exit_code == 0 assert api_mock.call_count == 1 assert res.output == f"[{self.ip}] Light {self.num} State:\n{{}}\n"
def test_light_info(self, api_mock): runner = CliRunner() res = runner.invoke(cli.app, ["light", "info", "--help"]) assert res.exit_code == 0 assert "List all the information about a Hue Light" in res.output res = runner.invoke( cli.app, ["light", "info", str(self.num), "-i", self.ip, "-u", self.user]) assert res.exit_code == 0 assert api_mock.call_count == 1 assert res.output == f"[{self.ip}] Light {self.num}:\n{{}}\n"
def test_search(self): runner = CliRunner() path = get_test_resource("inchis.txt") result = runner.invoke(cli, ["search", "atc", str(path)]) if result.exception is not None: raise result.exception assert result.stdout == ""
def test_print_config(self, cli_runner: CliRunner, mocker: MockerFixture) -> None: patched_settings = mocker.patch("opcua_webhmi_bridge.main.Settings") patched_settings.return_value.__str__.return_value = "settings instance" result = cli_runner.invoke(app, "--config") assert "settings instance" in result.output assert result.exit_code == 0
def test_invalid_export_format(): runner = CliRunner() result = runner.invoke(app, ["configure", "-f", "invalid-output-format"]) assert result.exit_code == 2 assert "Error" in result.output assert "Invalid value for '--export-format' / '-f'" in result.output assert "invalid-output-format" in result.output
def test_integration(tmp_path): addons = { "a": {"version": "13.0.1.0.0", "depends": ["b", "c"]}, "b": {"depends": ["base", "mail"]}, "c": {"depends": ["account", "b"]}, "account": {"depends": ["base"]}, "base": {}, } populate_addons_dir(tmp_path, addons) runner = CliRunner(mix_stderr=False) result = runner.invoke( app, ["--select=a", f"--addons-path={tmp_path}", "tree"], catch_exceptions=False, ) assert not result.exception assert result.exit_code == 0, result.stderr assert result.stdout == textwrap.dedent( """\ a (13.0.1.0.0) ├── b (no version) │ └── mail (✘ not installed) └── c (no version) ├── account (13.0+c) └── b ⬆ """ )
def test_build(runner: CliRunner, temp_cwd: Path) -> None: workdir = Path("article") shutil.copytree( Path(__file__).parent.joinpath("data/article-yaml-md"), "article" ) source_path = workdir / "article.tex" pdf_path = workdir / "article.pdf" result = runner.invoke( app, [ "build", "--source", str(source_path), "--pdf", str(pdf_path), "--parser", "article", "--theme", "minimalist", ], ) print(result.stdout) assert result.exit_code == 0 metadata_output = Path("_build/metadata.json") metadata = json.loads(metadata_output.read_text()) print(json.dumps(metadata, indent=2)) # From "canonical_url" field in lander.yaml assert ( metadata["canonical_url"] == "https://example.com/example-article-document/" ) # From "metadata.license_identifier" field in lander.yaml assert metadata["license_identifier"] == "MIT"
def test_no_command(): """Test the CLI errors with no command.""" runner = CliRunner() result = runner.invoke(app) assert result.exit_code > 0 assert "Error" in result.output assert "Missing command." in result.output
def test_create_workorder(test_app_dir, jobs: Dict[str, FileResource], esi_schema: FileResource): runner = CliRunner() output_path = test_app_dir / Path("create_workorder_test_result/") keys = list(jobs.keys()) path_in = jobs[keys[0]].file_path.parent result = runner.invoke( app, [ "-s", str(esi_schema.file_path), "create", "workorder", str(path_in), str(output_path), ], catch_exceptions=False, ) print(result.output) assert result.exit_code == 0 sub_dir = test_app_dir / Path("create_workorder_test_result") # NOTE: Expects to find only one workorder json file in sub directories. json_files: List[Path] = list(sub_dir.glob("**/*.json")) assert len(json_files) == 1 for file in json_files: assert file.stat().st_size > 10 workorder_string = file.read_text() workorder = EsiWorkOrder.deserialize_json(workorder_string) assert len(workorder.jobs) == 6 yaml_string = workorder.serialize_yaml() print(yaml_string) workorder_yaml = EsiWorkOrder.deserialize_yaml(yaml_string) assert len(workorder_yaml.jobs) == 6 assert workorder == workorder_yaml
def test_error_when_path_does_not_exist_and_creation_disabled( self, runner: CliRunner, sources: pathlib.Path) -> None: async_folder = sources / "_async" result = runner.invoke(cli, ["--no-folders", str(async_folder)]) assert result.exit_code == 2
def test_sample_size(mocker, temp_sqlite_path, args): mocker.patch("piicatcher.command_line.scan_database") mocker.patch.object(Catalog, "get_source") mocker.patch("piicatcher.command_line.str_output") extended_args = args + [ "--sample-size", "10", ] catalog_args = ["--catalog-path", temp_sqlite_path] runner = CliRunner() result = runner.invoke(app, catalog_args + extended_args) print(result.stdout) assert result.exit_code == 0 piicatcher.command_line.scan_database.assert_called_once_with( catalog=ANY, source=ANY, scan_type=ScanTypeEnum.metadata, incremental=True, output_format=OutputFormat.tabular, list_all=False, exclude_schema_regex=(), exclude_table_regex=(), include_schema_regex=(), include_table_regex=(), sample_size=10, ) piicatcher.command_line.str_output.assert_called_once() Catalog.get_source.assert_called_once_with("db_cli")
def test_train(): with TemporaryDirectory() as path: # generate training dataset for n_channels in [1, 2]: TRAIN_CONFIG["n_channels"] = n_channels generate_fake_training_dataset(path, n_channels=n_channels, fs=TRAIN_CONFIG["sample_rate"]) # set training command arguments runner = CliRunner() model_dir = join(path, f'model_{n_channels}') train_dir = join(path, f'train') cache_dir = join(path, f'cache_{n_channels}') TRAIN_CONFIG['train_csv'] = join(train_dir, 'train.csv') TRAIN_CONFIG['validation_csv'] = join(train_dir, 'train.csv') TRAIN_CONFIG['model_dir'] = model_dir TRAIN_CONFIG['training_cache'] = join(cache_dir, 'training') TRAIN_CONFIG['validation_cache'] = join(cache_dir, 'validation') with open('useless_config.json', 'w') as stream: json.dump(TRAIN_CONFIG, stream) # execute training result = runner.invoke(spleeter, [ 'train', '-p', 'useless_config.json', '-d', path, "--verbose" ]) # assert that model checkpoint was created. assert os.path.exists(join(model_dir, 'model.ckpt-10.index')) assert os.path.exists(join(model_dir, 'checkpoint')) assert os.path.exists(join(model_dir, 'model.ckpt-0.meta')) assert result.exit_code == 0
def test_unsupported_env_format( cli: typer.Typer, settings_cls: Type[BaseCustomSettings], cli_runner: CliRunner, mocked_settings_cls_env: str, mocked_environment: Callable[[str], ContextManager[None]], ) -> None: with mocked_environment(mocked_settings_cls_env): settings_object = settings_cls.create_from_envs() assert settings_object setting_env_content_compact = cli_runner.invoke( cli, ["settings", "--compact"], ).stdout print(setting_env_content_compact) # The compact format is not parsable directly by Pydantic. # Also removed compact and mixed compact mocks .env files with pytest.raises(ValidationError): # if support for this test is ever added (meaning this test will fail) # please redefine the below files inside the mocks directory # ".env-compact", ".env-granular", ".env-fails", ".env-mixed", ".env-sample" # removed by https://github.com/ITISFoundation/osparc-simcore/pull/2438 # parse compact format with mocked_environment(setting_env_content_compact): settings_object = settings_cls.create_from_envs() assert settings_object
def run_e2e_test(extra_args: List[str], expected_differences: Dict[Path, str]): runner = CliRunner() openapi_path = Path(__file__).parent / "openapi.json" config_path = Path(__file__).parent / "config.yml" gr_path = Path(__file__).parent / "golden-record" output_path = Path.cwd() / "my-test-api-client" shutil.rmtree(output_path, ignore_errors=True) args = ["generate", f"--config={config_path}", f"--path={openapi_path}"] if extra_args: args.extend(extra_args) result = runner.invoke(app, args) if result.exit_code != 0: raise result.exception # Use absolute paths for expected differences for easier comparisons expected_differences = {output_path.joinpath(key): value for key, value in expected_differences.items()} _compare_directories(gr_path, output_path, expected_differences=expected_differences) import mypy.api out, err, status = mypy.api.run([str(output_path), "--strict"]) assert status == 0, f"Type checking client failed: {out}" shutil.rmtree(output_path)
def test_do_example_workorder(esi_schema, workorders: Dict[str, FileResource], test_app_dir: Path): runner = CliRunner() ewo_path = workorders["example_workorder.json"].file_path output_path = test_app_dir / Path("test_do_example_workorder") result = runner.invoke( app, [ "-s", str(esi_schema.file_path), "do", "workorder", str(ewo_path), str(output_path), ], catch_exceptions=False, ) print(result.output) assert result.exit_code == 0 json_files: List[Path] = list(output_path.glob("**/*.json")) assert len(json_files) == 5 for file in json_files: assert file.stat().st_size > 10 assert "Uncertain Result" not in result.output assert "Failed" not in result.output