Ejemplo n.º 1
0
def test_apply_summed_probability_rules_no_change(
        model_config: SegmentationModelBase, is_batched: bool) -> None:
    """
    Test `apply_summed_probability_rules` with valid inputs and no expected change
    """
    posteriors = np.zeros(shape=(2, 4, 3, 3, 3))
    posteriors[:, 3] = 0.6
    posteriors[:, 1, :2] = 0.2
    posteriors[:, 2, :2] = 0.2

    # class 1 and class 2 together do not have a larger probability than external (class 3).
    expected_segmentation = np.full(shape=(2, 3, 3, 3), fill_value=3)

    if not is_batched:
        posteriors = posteriors[0]
        expected_segmentation = expected_segmentation[0]

    segmentation = image_util.posteriors_to_segmentation(posteriors)

    # test for both np arrays and tensors
    # we make a copy of segmentation as apply_summed_probability_rules modifies it in place
    assert np.array_equal(
        expected_segmentation,
        image_util.apply_summed_probability_rules(model_config, posteriors,
                                                  np.copy(segmentation)))
    # noinspection PyTypeChecker
    assert torch.equal(
        torch.from_numpy(expected_segmentation).type(
            torch.LongTensor),  # type: ignore
        image_util.apply_summed_probability_rules(
            model_config, torch.from_numpy(posteriors),
            torch.from_numpy(np.copy(segmentation))))
Ejemplo n.º 2
0
def test_apply_summed_probability_rules_incorrect_input(
        model_config: SegmentationModelBase) -> None:
    """
    Test `apply_summed_probability_rules` with invalid inputs
    """
    posteriors = np.zeros(shape=(2, 4, 3, 3, 3))
    segmentation = image_util.posteriors_to_segmentation(posteriors)

    with pytest.raises(ValueError):
        # noinspection PyTypeChecker
        image_util.apply_summed_probability_rules(model_config,
                                                  posteriors=posteriors,
                                                  segmentation=None)

    with pytest.raises(ValueError):
        # noinspection PyTypeChecker
        image_util.apply_summed_probability_rules(model_config,
                                                  posteriors=None,
                                                  segmentation=segmentation)

    with pytest.raises(ValueError):
        image_util.apply_summed_probability_rules(
            model_config,
            posteriors=posteriors,
            segmentation=np.zeros(shape=(3, 3, 3)))

    with pytest.raises(ValueError):
        image_util.apply_summed_probability_rules(
            model_config,
            posteriors=posteriors,
            segmentation=np.zeros(shape=(3, 3, 3, 3)))
Ejemplo n.º 3
0
    def post_process(
            self,
            results: InferencePipeline.Result) -> InferencePipeline.Result:
        """
        Perform connected component analysis to update segmentation with largest
        connected component based on the configurations
        :param results: inference results to post-process
        :return: post-processed version of results
        """
        if self.model_config.posterior_smoothing_mm:
            posteriors = gaussian_smooth_posteriors(
                posteriors=results.posteriors,
                kernel_size_mm=self.model_config.posterior_smoothing_mm,
                voxel_spacing_mm=results.voxel_spacing_mm)

            results = InferencePipeline.Result(
                epoch=results.epoch,
                patient_id=results.patient_id,
                posteriors=posteriors,
                segmentation=posteriors_to_segmentation(posteriors),
                voxel_spacing_mm=results.voxel_spacing_mm)

        if self.model_config.summed_probability_rules and not self.model_config.disable_extra_postprocessing:
            assert isinstance(self.model_config, SegmentationModelBase)
            results = results.with_new_segmentation(
                image_util.apply_summed_probability_rules(
                    self.model_config, results.posteriors,
                    results.segmentation))

        if self.model_config.largest_connected_component_foreground_classes is not None:
            # get indices for classes to restrict
            restrict_class_indices_and_thresholds = []
            for name, idx in self.model_config.class_and_index_with_background(
            ).items():
                for name2, threshold in self.model_config.largest_connected_component_foreground_classes:
                    if name2 == name:
                        restrict_class_indices_and_thresholds.append(
                            (idx, threshold))
            results = results.with_new_segmentation(
                image_util.extract_largest_foreground_connected_component(
                    multi_label_array=results.segmentation,
                    # mypy gets confused below because List is invariant. Sequence is covariant
                    # but does not allow "append".
                    restrictions=restrict_class_indices_and_thresholds)
            )  # type: ignore

        if self.model_config.slice_exclusion_rules and not self.model_config.disable_extra_postprocessing:
            results = results.with_new_segmentation(
                image_util.apply_slice_exclusion_rules(self.model_config,
                                                       results.segmentation))

        return results
Ejemplo n.º 4
0
def test_apply_summed_probability_rules_change(
        model_config: SegmentationModelBase, is_batched: bool) -> None:
    """
    Test `apply_summed_probability_rules` with valid inputs and an expected change
    """
    posteriors = np.zeros(shape=(2, 4, 3, 3, 3))
    posteriors[:, 3] = 0.4
    posteriors[:, 1, :1] = 0.35
    posteriors[:, 2, :1] = 0.25
    posteriors[:, 1, 1:2] = 0.25
    posteriors[:, 2, 1:2] = 0.35

    # class 1 and class 2 together have a larger probability than class 3 in half the image
    # In this region, we replace external pixels (class 3) with class 1 or class 2, whichever has the higher probability
    expected_segmentation = np.full(shape=(2, 3, 3, 3), fill_value=3)
    expected_segmentation[:, :1] = 1
    expected_segmentation[:, 1:2] = 2

    if not is_batched:
        posteriors = posteriors[0]
        expected_segmentation = expected_segmentation[0]

    segmentation = image_util.posteriors_to_segmentation(posteriors)

    # test for both np arrays and tensors
    # we make a copy of segmentation as apply_summed_probability_rules modifies it in place
    assert np.array_equal(
        expected_segmentation,
        image_util.apply_summed_probability_rules(model_config, posteriors,
                                                  np.copy(segmentation)))
    # noinspection PyTypeChecker
    assert torch.equal(
        torch.from_numpy(expected_segmentation).type(
            torch.LongTensor),  # type: ignore
        image_util.apply_summed_probability_rules(
            model_config, torch.from_numpy(posteriors),
            torch.from_numpy(np.copy(segmentation))))