Пример #1
0
def test_score_image_dicom_mock_none(
        test_output_dirs: OutputFolderForTests) -> None:
    """
    Test that dicom in and dicom-rt out works.

    Finally there is no mocking and full image scoring is run using the PassThroughModel.

    :param test_output_dirs: Test output directories.
    """
    model_config = PassThroughModel()
    model_config.set_output_to(test_output_dirs.root_dir)
    checkpoint_path = model_config.checkpoint_folder / "checkpoint.ckpt"
    create_model_and_store_checkpoint(model_config, checkpoint_path)

    azure_config = AzureConfig()
    project_root = Path(__file__).parent.parent
    ml_runner = MLRunner(model_config=model_config,
                         azure_config=azure_config,
                         project_root=project_root)
    model_folder = test_output_dirs.root_dir / "final"
    ml_runner.copy_child_paths_to_folder(model_folder=model_folder,
                                         checkpoint_paths=[checkpoint_path])

    zipped_dicom_series_path = zip_dicom_series(model_folder)

    score_pipeline_config = ScorePipelineConfig(
        data_folder=zipped_dicom_series_path.parent,
        model_folder=str(model_folder),
        image_files=[str(zipped_dicom_series_path)],
        result_image_name=HNSEGMENTATION_FILE.name,
        use_gpu=False,
        use_dicom=True)

    segmentation = score_image(score_pipeline_config)
    assert_zip_file_contents(segmentation, HN_DICOM_RT_ZIPPED, model_folder)
Пример #2
0
def test_convert_nifti_to_zipped_dicom_rt(
        test_output_dirs: OutputFolderForTests) -> None:
    """
    Test calling convert_nifti_to_zipped_dicom_rt.

    :param test_output_dirs: Test output directories.
    """
    model_folder = test_output_dirs.root_dir / "final"
    model_folder.mkdir()

    zipped_dicom_series_path = test_output_dirs.root_dir / "temp_pack_dicom_series" / "dicom_series.zip"
    zip_known_dicom_series(zipped_dicom_series_path)
    reference_series_folder = model_folder / "temp_extraction"
    nifti_filename = model_folder / "temp_nifti.nii.gz"
    convert_zipped_dicom_to_nifti(zipped_dicom_series_path,
                                  reference_series_folder, nifti_filename)
    model_config = PassThroughModel()
    result_dst = convert_nifti_to_zipped_dicom_rt(
        HNSEGMENTATION_FILE,
        reference_series_folder,
        model_folder,
        model_config,
        DEFAULT_RESULT_ZIP_DICOM_NAME,
        model_id="test_model:1")
    assert_zip_file_contents(result_dst, HN_DICOM_RT_ZIPPED, model_folder)
Пример #3
0
def test_score_image_dicom_mock_all(
        test_output_dirs: OutputFolderForTests) -> None:
    """
    Test that dicom in and dicom-rt out works, by mocking out functions that do most of the work.

    This mocks out init_from_model_inference_json, run_inference and store_as_ubyte_nifti so that
    only the skeleton of the logic is tested, particularly the final conversion to DICOM-RT.

    :param test_output_dirs: Test output directories.
    """
    mock_pipeline_base = {'mock_pipeline_base': True}
    model_config = PassThroughModel()
    mock_segmentation = {'mock_segmentation': True}

    model_folder = test_output_dirs.root_dir / "final"
    model_folder.mkdir()

    zipped_dicom_series_path = test_output_dirs.root_dir / "temp_pack_dicom_series" / "dicom_series.zip"
    zip_known_dicom_series(zipped_dicom_series_path)

    score_pipeline_config = ScorePipelineConfig(
        data_folder=zipped_dicom_series_path.parent,
        model_folder=str(model_folder),
        image_files=[str(zipped_dicom_series_path)],
        result_image_name=HNSEGMENTATION_FILE.name,
        use_gpu=False,
        use_dicom=True,
        model_id="Dummy:1")

    with mock.patch('score.init_from_model_inference_json',
                    return_value=(
                        mock_pipeline_base,
                        model_config)) as mock_init_from_model_inference_json:
        with mock.patch('score.run_inference',
                        return_value=mock_segmentation) as mock_run_inference:
            with mock.patch('score.store_as_ubyte_nifti',
                            return_value=HNSEGMENTATION_FILE
                            ) as mock_store_as_ubyte_nifti:
                segmentation = score_image(score_pipeline_config)
                assert_zip_file_contents(segmentation, HN_DICOM_RT_ZIPPED,
                                         model_folder)

    mock_init_from_model_inference_json.assert_called_once_with(
        Path(score_pipeline_config.model_folder),
        score_pipeline_config.use_gpu)
    mock_run_inference.assert_called()
    mock_store_as_ubyte_nifti.assert_called()
Пример #4
0
def test_convert_nifti_to_zipped_dicom_rt(
        test_output_dirs: OutputFolderForTests) -> None:
    """
    Test calling convert_nifti_to_zipped_dicom_rt.

    :param test_output_dirs: Test output directories.
    """
    model_folder = test_output_dirs.root_dir / "final"
    model_folder.mkdir()

    zipped_dicom_series_path = zip_dicom_series(model_folder)
    nifti_filename, reference_series_folder = convert_zipped_dicom_to_nifti(
        zipped_dicom_series_path, model_folder)
    model_config = PassThroughModel()
    result_dst = convert_nifti_to_zipped_dicom_rt(HNSEGMENTATION_FILE,
                                                  reference_series_folder,
                                                  model_folder, model_config)
    assert_zip_file_contents(result_dst, HN_DICOM_RT_ZIPPED, model_folder)
Пример #5
0
def run_model_inference_train_and_test(
        test_output_dirs: OutputFolderForTests,
        perform_cross_validation: bool,
        inference_on_train_set: Optional[bool] = None,
        inference_on_val_set: Optional[bool] = None,
        inference_on_test_set: Optional[bool] = None,
        ensemble_inference_on_train_set: Optional[bool] = None,
        ensemble_inference_on_val_set: Optional[bool] = None,
        ensemble_inference_on_test_set: Optional[bool] = None,
        model_proc: ModelProcessing = ModelProcessing.DEFAULT) -> None:
    """
    Test running inference produces expected output metrics, files, folders and calls to upload_folder.

    :param test_output_dirs: Test output directories.
    :param perform_cross_validation: Whether to test with cross validation.
    :param inference_on_train_set: Override for inference on train data sets.
    :param inference_on_val_set: Override for inference on validation data sets.
    :param inference_on_test_set: Override for inference on test data sets.
    :param ensemble_inference_on_train_set: Override for ensemble inference on train data sets.
    :param ensemble_inference_on_val_set: Override for ensemble inference on validation data sets.
    :param ensemble_inference_on_test_set: Override for ensemble inference on test data sets.
    :param model_proc: Model processing to test.
    :return: None.
    """
    dummy_model = DummyModel()

    config = PassThroughModel()
    # Copy settings from DummyModel
    config.image_channels = dummy_model.image_channels
    config.ground_truth_ids = dummy_model.ground_truth_ids
    config.ground_truth_ids_display_names = dummy_model.ground_truth_ids_display_names
    config.colours = dummy_model.colours
    config.fill_holes = dummy_model.fill_holes
    config.roi_interpreted_types = dummy_model.roi_interpreted_types

    config.test_crop_size = (16, 16, 16)
    config.number_of_cross_validation_splits = 2 if perform_cross_validation else 0
    config.inference_on_train_set = inference_on_train_set
    config.inference_on_val_set = inference_on_val_set
    config.inference_on_test_set = inference_on_test_set
    config.ensemble_inference_on_train_set = ensemble_inference_on_train_set
    config.ensemble_inference_on_val_set = ensemble_inference_on_val_set
    config.ensemble_inference_on_test_set = ensemble_inference_on_test_set
    # Plotting crashes with random TCL errors on Windows, disable that for Windows PR builds.
    config.is_plotting_enabled = common_util.is_linux()

    config.set_output_to(test_output_dirs.root_dir)
    train_and_test_data_small_dir = test_output_dirs.root_dir / "train_and_test_data_small"
    config.local_dataset = create_train_and_test_data_small_dataset(
        config.test_crop_size, full_ml_test_data_path(), "train_and_test_data",
        train_and_test_data_small_dir, "data")

    checkpoint_path = config.checkpoint_folder / LAST_CHECKPOINT_FILE_NAME_WITH_SUFFIX
    create_model_and_store_checkpoint(config, checkpoint_path)
    checkpoint_handler = get_default_checkpoint_handler(
        model_config=config, project_root=test_output_dirs.root_dir)
    checkpoint_handler.additional_training_done()

    mock_upload_path = test_output_dirs.root_dir / "mock_upload"
    mock_upload_path.mkdir()

    run = create_mock_run(mock_upload_path, config)

    azure_config = Mock(name='mock_azure_config')
    azure_config.fetch_run.return_value = run

    runner = MLRunner(model_config=config, azure_config=azure_config)

    with mock.patch("InnerEye.ML.model_testing.PARENT_RUN_CONTEXT", run):
        metrics = runner.model_inference_train_and_test(
            checkpoint_paths=checkpoint_handler.get_checkpoints_to_test(),
            model_proc=model_proc)

    if model_proc == ModelProcessing.ENSEMBLE_CREATION:
        # Create a fake ensemble dataset.csv
        dataset_df = create_dataset_df()
        dataset_df.to_csv(config.outputs_folder / DATASET_CSV_FILE_NAME)

        with mock.patch.object(PlotCrossValidationConfig,
                               'azure_config',
                               return_value=azure_config):
            with mock.patch("InnerEye.Azure.azure_util.PARENT_RUN_CONTEXT",
                            run):
                with mock.patch("InnerEye.ML.run_ml.PARENT_RUN_CONTEXT", run):
                    runner.plot_cross_validation_and_upload_results()
                    runner.generate_report(ModelProcessing.ENSEMBLE_CREATION)

    if model_proc == ModelProcessing.DEFAULT:
        named_metrics = {
            ModelExecutionMode.TRAIN: inference_on_train_set,
            ModelExecutionMode.TEST: inference_on_test_set,
            ModelExecutionMode.VAL: inference_on_val_set
        }
    else:
        named_metrics = {
            ModelExecutionMode.TRAIN: ensemble_inference_on_train_set,
            ModelExecutionMode.TEST: ensemble_inference_on_test_set,
            ModelExecutionMode.VAL: ensemble_inference_on_val_set
        }

    error = ''
    expected_upload_folder_count = 0
    for mode, flag in named_metrics.items():
        if mode in metrics:
            metric = metrics[mode]
            assert isinstance(metric, InferenceMetricsForSegmentation)

        if flag is None:
            # No override supplied, calculate the expected default:
            if model_proc == ModelProcessing.DEFAULT:
                if not perform_cross_validation:
                    # If a "normal" run then default to val or test.
                    flag = mode == ModelExecutionMode.TEST
                else:
                    # If an ensemble child then default to never.
                    flag = False
            else:
                # If an ensemble then default to test only.
                flag = mode == ModelExecutionMode.TEST

        if mode in metrics and not flag:
            error = error + f"Error: {mode.value} cannot be not None."
        elif mode not in metrics and flag:
            error = error + f"Error: {mode.value} cannot be None."
        results_folder = config.outputs_folder / get_best_epoch_results_path(
            mode, model_proc)
        folder_exists = results_folder.is_dir()
        assert folder_exists == flag
        if flag and model_proc == ModelProcessing.ENSEMBLE_CREATION:
            expected_upload_folder_count = expected_upload_folder_count + 1
            expected_name = get_best_epoch_results_path(
                mode, ModelProcessing.DEFAULT)
            run.upload_folder.assert_any_call(name=str(expected_name),
                                              path=str(results_folder))
    if len(error):
        raise ValueError(error)

    if model_proc == ModelProcessing.ENSEMBLE_CREATION:
        # The report should have been mock uploaded
        expected_upload_folder_count = expected_upload_folder_count + 1

    assert run.upload_folder.call_count == expected_upload_folder_count