def test_cli_validate_exec( trace_gdf, assume_error: rules.ValidationResults, pandera_valid: bool, database, tmp_path_factory, metadata_json: Path, ): """ Test tracerepo validate command with a set up of invalidated data. """ assert metadata_json.exists() and metadata_json.is_file() area_gdf: gpd.GeoDataFrame = tests.kb11_area tmp_path = tmp_path_factory.mktemp(basename="test_cli_validate_exec", numbered=True) assert len(list(tmp_path.glob("*"))) == 0 # Make default directories repo.scaffold(tmp_path) organizer = tests.set_up_repo_with_invalids_organized( database=database, trace_gdf=trace_gdf, area_gdf=area_gdf, tracerepository_path=tmp_path, ) database_csv_path: Path = tmp_path / rules.DATABASE_CSV if database_csv_path.exists(): database_csv_path.unlink() repo.write_database_csv(path=database_csv_path, database=organizer.database) # Test if all column headers are in csv database_text = database_csv_path.read_text() database_first_line = database_text.splitlines()[0] for column_enum in rules.ColumnNames: assert column_enum.value in database_first_line.split(",") result = runner.invoke( app=app, args=[ "validate", "--report", f"--metadata-json={metadata_json}", f"--tracerepository-path={tmp_path}", ], ) reports_path = Path(tmp_path) / Path(rules.PathNames.REPORTS.value) if not pandera_valid: assert reports_path.exists() assert reports_path.is_dir() assert len(list(reports_path.glob("*.html"))) > 0 # TODO: Inconsistent results here if not "html" in result.output: warn(f"Unknown error, html not found in result.output.") tests.click_error_print(result) # TODO: Inconsistent results here if assume_error.value not in database_csv_path.read_text(): warn(f"Expected {assume_error.value} to be in {database_csv_path}.")
def test_fractopo_network_cli(traces_path, area_path, tmp_path): """ Test fractopo network cli entrypoint. """ tmp_path.mkdir(exist_ok=True) result = typer_cli_runner.invoke( cli.app, [ "network", str(traces_path), str(area_path), "--general-output", str(tmp_path), ], ) click_error_print(result) output_files = list(tmp_path.glob("*")) assert len(output_files) > 0 assert "branches" in str(output_files) assert "nodes" in str(output_files) assert len(list(tmp_path.glob("*.svg"))) > 0
def test_fractopo_network_cli(traces_path, area_path, determine_branches_nodes, tmp_path): """ Test fractopo network cli entrypoint. """ tmp_path.mkdir(exist_ok=True) result = typer_cli_runner.invoke( cli.app, [ "network", str(traces_path), str(area_path), "--general-output", str(tmp_path), ] + ([] if determine_branches_nodes else ["--no-determine-branches-nodes"]), ) click_error_print(result) output_files = list(tmp_path.glob("*")) output_files_names = [path.name for path in output_files] assert len(output_files) > 0 assert "branches" in str(output_files) or not determine_branches_nodes assert "nodes" in str(output_files) or not determine_branches_nodes assert len(list(tmp_path.glob("*.svg"))) > 0 assert "additional_numerical_data.json" in output_files_names # Sanity check assert "adddditional_numerical_data.json" not in output_files_names
def test_cli_app_help(subcommand: str): """ Test tracerepo cli help cmd. """ args = ["--help"] if len(subcommand) > 0: args.insert(0, subcommand) assert isinstance(subcommand, str) result: Result = runner.invoke(app=app, args=args) tests.click_error_print(result=result)
def test_tracevalidate_only_area(args, tmp_path): """ Test tracevalidate script with --only-area-validation. """ outputs_cmds = ["--output", str(tmp_path / "output_traces")] clirunner = TyperCliRunner() result = clirunner.invoke(cli.app, ["tracevalidate"] + args + outputs_cmds) # Check that exit code is 0 (i.e. ran successfully.) click_error_print(result) assert Path(outputs_cmds[1]).exists() assert Validation.ERROR_COLUMN in gpd.read_file(outputs_cmds[1]).columns
def test_fractopo_callback(logging_level_str: str): """ Test . """ result = typer_cli_runner.invoke( cli.app, [ "--logging-level", logging_level_str, "info", ], ) click_error_print(result=result)
def test_fractopo_callback_error(logging_level_str: str): """ Test . """ with pytest.raises(Exception): result = typer_cli_runner.invoke( cli.app, [ "--logging-level", logging_level_str, "info", ], ) click_error_print(result=result)
def test_cli_organize(database, trace_gdf, other_args, tmp_path_factory): """ Test cli_organize click entrypoint. """ area_gdf: gpd.GeoDataFrame = tests.kb11_area tmp_path = tmp_path_factory.mktemp(basename="test_cli_organize", numbered=True) other_args.append(f"--tracerepository-path={tmp_path}") args = ["organize"] + other_args # make default directories repo.scaffold(tmp_path) organizer = tests.set_up_repo_with_invalids_organized( database=database, trace_gdf=trace_gdf, area_gdf=area_gdf, organized=False, tracerepository_path=tmp_path, ) repo.write_database_csv( path=tmp_path / rules.DATABASE_CSV, database=organizer.database ) result = runner.invoke(app=app, args=args) if "--simulate" not in args: organizer.check() if "--report" in args: assert len(result.stdout) > 0 elif "--no-report" in args: assert len(result.stdout) == 0 tests.click_error_print(result)
def test_tracevalidate_typer( trace_path: Path, area_path: Path, auto_fix: str, tmp_path: Path, snap_threshold: float, ): """ Tests tracevalidate typer functionality. """ clirunner = TyperCliRunner() output_file = tmp_path / f"{trace_path.stem}.{trace_path.suffix}" cli_args = [ "tracevalidate", str(trace_path), str(area_path), auto_fix, "--output", str(output_file), "--summary", "--snap-threshold", # should be valid for both 0.01 and 0.001 str(snap_threshold), ] result = clirunner.invoke(cli.app, cli_args) # Check that exit code is 0 (i.e. ran successfully.) click_error_print(result) # Checks if output is saved assert output_file.exists() output_gdf = gpd.read_file(output_file) assert isinstance(output_gdf, gpd.GeoDataFrame) assert output_gdf.crs == gpd.read_file(trace_path).crs assert Validation.ERROR_COLUMN in output_gdf.columns if "--summary" in cli_args: assert "Out of" in result.output assert "There were" in result.output
def test_all_cli(ready_tracerepository: Path): """ Test all cli tools in ready-made tracerepository. """ metadata_json_path = ready_tracerepository / Path(rules.PathNames.METADATA.value) database_csv_path = ready_tracerepository / Path(rules.DATABASE_CSV) reports_path = ready_tracerepository / Path(rules.PathNames.REPORTS.value) assert (metadata_json_path).exists() assert (database_csv_path).exists() # Read database.csv before execution csv_text_before = database_csv_path.read_text() # Assert that that what we expect is in database assert rules.ValidationResults.VALID.value in csv_text_before assert rules.ValidationResults.INVALID.value in csv_text_before assert rules.ValidationResults.CRITICAL.value not in csv_text_before # Run help and subcommands without arguments for cmd in ("--help", "check", "organize", "format-geojson"): # Run tracerepo --help help_result = runner.invoke( app=app, args=[ cmd, f"--tracerepository-path={ready_tracerepository}" if "--help" not in cmd else "", ], ) tests.click_error_print(help_result) # Run tracerepo validate # Validate kb* and hastholmen infinity traces validate_result = runner.invoke( app=app, args=[ "validate", "--traces-filter=kb", "--traces-filter=hastholmen", "--report", f"--metadata-json={metadata_json_path}", f"--tracerepository-path={ready_tracerepository}", ], ) # Make sure pandera error was caught assert "Reported" in validate_result.stdout assert "html" in validate_result.stdout assert reports_path.exists() assert len(list(reports_path.glob("*.html"))) > 0 # Test that there were no changes to database csv_text_after = database_csv_path.read_text() assert csv_text_after == csv_text_before assert rules.ValidationResults.VALID.value in csv_text_after assert rules.ValidationResults.INVALID.value in csv_text_after assert rules.ValidationResults.CRITICAL.value not in csv_text_after # Run tracerepo export validate_result = runner.invoke( app=app, args=[ "export", str(ready_tracerepository), f"--tracerepository-path={ready_tracerepository}", ], ) # Find export directory and check contents found = [] for directory in ready_tracerepository.glob(f"{utils.EXPORT_DIR_PREFIX}*"): found.append(directory) if directory.is_dir(): # Verify contents assert len(list(directory.rglob("*.shp"))) > 0 assert len(found) > 0