Exemple #1
0
    def post_process(
            self,
            results: InferencePipeline.Result) -> InferencePipeline.Result:
        """
        Perform connected component analysis to update segmentation with largest
        connected component based on the configurations
        :param results: inference results to post-process
        :return: post-processed version of results
        """
        if self.model_config.posterior_smoothing_mm:
            posteriors = gaussian_smooth_posteriors(
                posteriors=results.posteriors,
                kernel_size_mm=self.model_config.posterior_smoothing_mm,
                voxel_spacing_mm=results.voxel_spacing_mm)

            results = InferencePipeline.Result(
                epoch=results.epoch,
                patient_id=results.patient_id,
                posteriors=posteriors,
                segmentation=posteriors_to_segmentation(posteriors),
                voxel_spacing_mm=results.voxel_spacing_mm)

        if self.model_config.summed_probability_rules and not self.model_config.disable_extra_postprocessing:
            assert isinstance(self.model_config, SegmentationModelBase)
            results = results.with_new_segmentation(
                image_util.apply_summed_probability_rules(
                    self.model_config, results.posteriors,
                    results.segmentation))

        if self.model_config.largest_connected_component_foreground_classes is not None:
            # get indices for classes to restrict
            restrict_class_indices_and_thresholds = []
            for name, idx in self.model_config.class_and_index_with_background(
            ).items():
                for name2, threshold in self.model_config.largest_connected_component_foreground_classes:
                    if name2 == name:
                        restrict_class_indices_and_thresholds.append(
                            (idx, threshold))
            results = results.with_new_segmentation(
                image_util.extract_largest_foreground_connected_component(
                    multi_label_array=results.segmentation,
                    # mypy gets confused below because List is invariant. Sequence is covariant
                    # but does not allow "append".
                    restrictions=restrict_class_indices_and_thresholds)
            )  # type: ignore

        if self.model_config.slice_exclusion_rules and not self.model_config.disable_extra_postprocessing:
            results = results.with_new_segmentation(
                image_util.apply_slice_exclusion_rules(self.model_config,
                                                       results.segmentation))

        return results
Exemple #2
0
def test_posterior_smoothing() -> None:
    def _load_and_scale_image(name: str) -> ImageWithHeader:
        image_with_header = load_nifti_image(full_ml_test_data_path(name))
        return ImageWithHeader(image=LinearTransform.transform(
            data=image_with_header.image,
            input_range=(0, 255),
            output_range=(0, 1)),
                               header=image_with_header.header)

    original_posterior_fg = _load_and_scale_image("posterior_bladder.nii.gz")
    expected_posterior_fg = _load_and_scale_image(
        "smoothed_posterior_bladder.nii.gz").image

    # create the BG/FG posterior pair from single posterior and apply smoothing
    smoothed_posterior_fg = gaussian_smooth_posteriors(
        posteriors=np.stack(
            [1 - original_posterior_fg.image, original_posterior_fg.image],
            axis=0),
        kernel_size_mm=(2, 2, 2),
        voxel_spacing_mm=original_posterior_fg.header.spacing)[1]

    # smooth and check if as expected (tolerance required due to scaling from (0, 255) <=> (0, 1))
    assert np.allclose(expected_posterior_fg, smoothed_posterior_fg, atol=1e-2)
Exemple #3
0
def inference_identity(
        test_output_dirs: OutputFolderForTests,
        image_size: Any = (4, 5, 8),
        crop_size: Any = (5, 5, 5),
        shrink_by: Any = (0, 0, 0),
        num_classes: int = 5,
        create_mask: bool = True,
        extract_largest_foreground_connected_component: bool = False,
        is_ensemble: bool = False,
        posterior_smoothing_mm: Any = None) -> None:
    """
    Test to make sure inference pipeline is identity preserving, ie: we can recreate deterministic
    model output, ensuring the patching and stitching is robust.
    """
    # fix random seed
    np.random.seed(0)

    ground_truth_ids = list(map(str, range(num_classes)))
    # image to run inference on: The mock model passes the input image through, hence the input
    # image must have as many channels as we have classes (plus background), such that the output is
    # also a valid posterior.
    num_channels = num_classes + 1
    image_channels = np.random.randn(num_channels, *list(image_size))
    # create a random mask if required
    mask = np.round(np.random.uniform(
        size=image_size)).astype(np.int) if create_mask else None
    config = InferenceIdentityModel(shrink_by=shrink_by)
    config.crop_size = crop_size
    config.test_crop_size = crop_size
    config.image_channels = list(map(str, range(num_channels)))
    config.ground_truth_ids = ground_truth_ids
    config.posterior_smoothing_mm = posterior_smoothing_mm

    # We have to set largest_connected_component_foreground_classes after creating the model config,
    # because this parameter is not overridable and hence will not be set by GenericConfig's constructor.
    if extract_largest_foreground_connected_component:
        config.largest_connected_component_foreground_classes = [
            (c, None) for c in ground_truth_ids
        ]
    # set expected posteriors
    expected_posteriors = torch.nn.functional.softmax(
        torch.tensor(image_channels), dim=0).numpy()
    # apply the mask if required
    if mask is not None:
        expected_posteriors = image_util.apply_mask_to_posteriors(
            expected_posteriors, mask)
    if posterior_smoothing_mm is not None:
        expected_posteriors = image_util.gaussian_smooth_posteriors(
            posteriors=expected_posteriors,
            kernel_size_mm=posterior_smoothing_mm,
            voxel_spacing_mm=(1, 1, 1))
    # compute expected segmentation
    expected_segmentation = image_util.posteriors_to_segmentation(
        expected_posteriors)
    if extract_largest_foreground_connected_component:
        largest_component = image_util.extract_largest_foreground_connected_component(
            multi_label_array=expected_segmentation)
        # make sure the test data is accurate by checking if more than one component exists
        assert not np.array_equal(largest_component, expected_segmentation)
        expected_segmentation = largest_component

    # instantiate the model
    checkpoint = test_output_dirs.root_dir / "checkpoint.ckpt"
    create_model_and_store_checkpoint(config, checkpoint_path=checkpoint)

    # create single or ensemble inference pipeline
    inference_pipeline = InferencePipeline.create_from_checkpoint(
        path_to_checkpoint=checkpoint, model_config=config)
    assert inference_pipeline is not None
    full_image_inference_pipeline = EnsemblePipeline([inference_pipeline], config) \
        if is_ensemble else inference_pipeline

    # compute full image inference results
    inference_result = full_image_inference_pipeline \
        .predict_and_post_process_whole_image(image_channels=image_channels, mask=mask, voxel_spacing_mm=(1, 1, 1))

    # Segmentation must have same size as input image
    assert inference_result.segmentation.shape == image_size
    assert inference_result.posteriors.shape == (num_classes +
                                                 1, ) + image_size
    # check that the posteriors and segmentations are as expected. Flatten to a list so that the error
    # messages are more informative.
    assert np.allclose(inference_result.posteriors, expected_posteriors)
    assert np.array_equal(inference_result.segmentation, expected_segmentation)