def test_submit_for_inference(test_output_dirs: OutputFolderForTests) -> None: """ Execute the submit_for_inference script on the model that was recently trained. This starts an AzureML job, and downloads the segmentation. Then check if the segmentation was actually produced. :return: """ model = get_most_recent_model( fallback_run_id_for_local_execution=FALLBACK_SINGLE_RUN) image_file = fixed_paths_for_tests.full_ml_test_data_path( ) / "train_and_test_data" / "id1_channel1.nii.gz" assert image_file.exists(), f"Image file not found: {image_file}" settings_file = fixed_paths.SETTINGS_YAML_FILE assert settings_file.exists(), f"Settings file not found: {settings_file}" # Read the name of the branch from environment, so that the inference experiment is also listed alongside # all other AzureML runs that belong to the current PR. build_branch = os.environ.get("BUILD_BRANCH", None) experiment_name = to_azure_friendly_string( build_branch) if build_branch else "model_inference" args = [ "--image_file", str(image_file), "--model_id", model.id, "--settings", str(settings_file), "--download_folder", str(test_output_dirs.root_dir), "--cluster", "training-nc12", "--experiment", experiment_name ] seg_path = test_output_dirs.root_dir / DEFAULT_RESULT_IMAGE_NAME assert not seg_path.exists( ), f"Result file {seg_path} should not yet exist" submit_for_inference.main( args, project_root=fixed_paths.repository_root_directory()) assert seg_path.exists(), f"Result file {seg_path} was not created"
def test_submit_for_inference(test_output_dirs: OutputFolderForTests) -> None: """ Execute the submit_for_inference script on the model that was recently trained. This starts an AzureML job, and downloads the segmentation. Then check if the segmentation was actually produced. :param test_output_dirs: Test output directories. """ model = get_most_recent_model(fallback_run_id_for_local_execution=FALLBACK_SINGLE_RUN) assert PYTHON_ENVIRONMENT_NAME in model.tags, "Environment name not present in model properties" # Both parts of this test rely on the same model that was trained in a previous run. If these tests are executed # independently (via pytest.mark.parametrize), get_most_recent_model would pick up the AML run that the # previously executed part of this test submitted. for use_dicom in [False, True]: if use_dicom: size = (64, 64, 64) spacing = (1., 1., 2.5) image_file = test_output_dirs.root_dir / "temp_pack_dicom_series" / "dicom_series.zip" scratch_folder = test_output_dirs.root_dir / "temp_dicom_series" zip_random_dicom_series(size, spacing, image_file, scratch_folder) else: image_file = fixed_paths_for_tests.full_ml_test_data_path() / "train_and_test_data" / "id1_channel1.nii.gz" assert image_file.exists(), f"Image file not found: {image_file}" settings_file = fixed_paths.SETTINGS_YAML_FILE assert settings_file.exists(), f"Settings file not found: {settings_file}" args = ["--image_file", str(image_file), "--model_id", model.id, "--settings", str(settings_file), "--download_folder", str(test_output_dirs.root_dir), "--cluster", "training-nc12", "--experiment", get_experiment_name_from_environment() or "model_inference", "--use_dicom", str(use_dicom)] download_file = DEFAULT_RESULT_ZIP_DICOM_NAME if use_dicom else DEFAULT_RESULT_IMAGE_NAME seg_path = test_output_dirs.root_dir / download_file assert not seg_path.exists(), f"Result file {seg_path} should not yet exist" submit_for_inference.main(args, project_root=fixed_paths.repository_root_directory()) assert seg_path.exists(), f"Result file {seg_path} was not created"
def test_submit_for_inference() -> None: args = [ "--image_file", "Tests/ML/test_data/train_and_test_data/id1_channel1.nii.gz", "--model_id", DEFAULT_MODEL_ID_NUMERIC, "--settings", "InnerEye/settings.yml", "--download_folder", "." ] seg_path = Path(DEFAULT_RESULT_IMAGE_NAME) if seg_path.exists(): seg_path.unlink() main(args) assert seg_path.exists()
def test_submit_for_inference(use_dicom: bool, test_output_dirs: OutputFolderForTests) -> None: """ Execute the submit_for_inference script on the model that was recently trained. This starts an AzureML job, and downloads the segmentation. Then check if the segmentation was actually produced. :param use_dicom: True to test DICOM in/out, False otherwise. :param test_output_dirs: Test output directories. """ model = get_most_recent_model( fallback_run_id_for_local_execution=FALLBACK_SINGLE_RUN) assert PYTHON_ENVIRONMENT_NAME in model.tags, "Environment name not present in model properties" if use_dicom: size = (64, 64, 64) spacing = (1., 1., 2.5) image_file = test_output_dirs.root_dir / "temp_pack_dicom_series" / "dicom_series.zip" scratch_folder = test_output_dirs.root_dir / "temp_dicom_series" zip_random_dicom_series(size, spacing, image_file, scratch_folder) else: image_file = fixed_paths_for_tests.full_ml_test_data_path( ) / "train_and_test_data" / "id1_channel1.nii.gz" assert image_file.exists(), f"Image file not found: {image_file}" settings_file = fixed_paths.SETTINGS_YAML_FILE assert settings_file.exists(), f"Settings file not found: {settings_file}" # Read the name of the branch from environment, so that the inference experiment is also listed alongside # all other AzureML runs that belong to the current PR. build_branch = os.environ.get("BUILD_BRANCH", None) experiment_name = to_azure_friendly_string( build_branch) if build_branch else "model_inference" args = [ "--image_file", str(image_file), "--model_id", model.id, "--settings", str(settings_file), "--download_folder", str(test_output_dirs.root_dir), "--cluster", "training-nc12", "--experiment", experiment_name, "--use_dicom", str(use_dicom) ] download_file = DEFAULT_RESULT_ZIP_DICOM_NAME if use_dicom else DEFAULT_RESULT_IMAGE_NAME seg_path = test_output_dirs.root_dir / download_file assert not seg_path.exists( ), f"Result file {seg_path} should not yet exist" submit_for_inference.main( args, project_root=fixed_paths.repository_root_directory()) assert seg_path.exists(), f"Result file {seg_path} was not created"