Beispiel #1
0
def test_apply_mask_to_posteriors(image_size: List[int]) -> None:
    """
    Test to make sure masks are being applied as expected.
    """
    image = np.ones(image_size)
    mask_size = image_size[:1] + image_size[-3:] if len(
        image_size) == 5 else image_size[-3:]
    mask = np.zeros(shape=mask_size)
    mask[0] = 1

    with pytest.raises(Exception):
        # noinspection PyTypeChecker
        image_util.apply_mask_to_posteriors(posteriors=None, mask=None)
    with pytest.raises(Exception):
        # noinspection PyTypeChecker
        image_util.apply_mask_to_posteriors(posteriors=image, mask=None)
    with pytest.raises(Exception):
        # noinspection PyTypeChecker
        image_util.apply_mask_to_posteriors(posteriors=None, mask=image)
    with pytest.raises(Exception):
        # noinspection PyTypeChecker
        image_util.apply_mask_to_posteriors(posteriors=image, mask=image)

    image = image_util.apply_mask_to_posteriors(posteriors=image, mask=mask)
    assert np.all(image[:, 0, ...] == 1)
    assert np.all(image[1:, 1:, ...] == 0)
Beispiel #2
0
    def post_process(self) -> InferenceBatch:
        """
        Perform post processing on the computed outputs of the a single pass of the pipelines.
        Currently the following operations are performed:
        -------------------------------------------------------------------------------------
        1) the mask is applied to the posteriors (if required).
        2) the final posteriors are used to perform an argmax to generate a multi-label segmentation.
        3) extract the largest foreground connected component in the segmentation if required
        """
        mask = self.get_component(InferenceBatch.Components.Mask)
        posteriors = self.get_component(InferenceBatch.Components.Posteriors)
        if mask is not None:
            posteriors = image_util.apply_mask_to_posteriors(
                posteriors=posteriors, mask=mask)

        # create segmentation using an argmax over the posterior probabilities
        segmentation = image_util.posteriors_to_segmentation(posteriors)

        # update the post-processed posteriors and save the segmentation
        self.set_component(component=InferenceBatch.Components.Posteriors,
                           data=posteriors)
        self.set_component(component=InferenceBatch.Components.Segmentation,
                           data=segmentation)

        return self
Beispiel #3
0
    def _forward_pass(
            self,
            patches: torch.Tensor,
            mask: torch.Tensor = None,
            labels: torch.Tensor = None) -> SegmentationForwardPass.Result:

        # ensure that we always have float tensors as the model is defined over floats
        # and transfer the tensors to the GPU if possible before the forward pass
        patches = self.config.get_gpu_tensor_if_possible(patches)
        if mask is not None:
            mask = self.config.get_gpu_tensor_if_possible(mask)

        logits, loss = self._compute_loss(patches, labels)

        if self.in_training_mode:
            if loss is None:
                raise ValueError(
                    "When running training, the labels must be present for loss computation."
                )
            assert self.optimizer is not None  # for mypy
            single_optimizer_step(loss, self.optimizer, self.gradient_scaler)

        # Aggregate data parallel logits if multiple hardware are used in forward pass
        if isinstance(logits, list):
            # When using multiple GPUs, logits is a list of tensors. Gather will concatenate them
            # across the first dimension, and move them to GPU0.
            logits = torch.nn.parallel.gather(logits, target_device=0)

        # apply Softmax on dimension 1 (Class) to map model output into a posterior probability distribution [0,1]
        posteriors = torch.nn.functional.softmax(logits, dim=1)

        # apply mask if required
        if not self.in_training_mode and mask is not None:
            posteriors = image_util.apply_mask_to_posteriors(
                posteriors=posteriors, mask=mask)

        # post process posteriors to compute result
        segmentations = image_util.posteriors_to_segmentation(
            posteriors=posteriors).data.cpu().numpy()
        posteriors = posteriors.data.cpu().numpy()

        return SegmentationForwardPass.Result(
            posteriors=posteriors,
            segmentations=segmentations,
            loss=loss.item() if loss is not None else None)
Beispiel #4
0
    def post_process_posteriors(
            self,
            posteriors: np.ndarray,
            mask: np.ndarray = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        Perform post processing on the computed outputs of the a single pass of the pipelines.
        Currently the following operations are performed:
        -------------------------------------------------------------------------------------
        1) the mask is applied to the posteriors (if required).
        2) the final posteriors are used to perform an argmax to generate a multi-label segmentation.
        3) extract the largest foreground connected component in the segmentation if required
        """
        if mask is not None:
            posteriors = image_util.apply_mask_to_posteriors(
                posteriors=posteriors, mask=mask)

        # create segmentation using an argmax over the posterior probabilities
        segmentation = image_util.posteriors_to_segmentation(posteriors)

        return posteriors, segmentation
Beispiel #5
0
    def training_or_validation_step(self, sample: Dict[str,
                                                       Any], batch_index: int,
                                    is_training: bool) -> torch.Tensor:
        """
        Runs training for a single minibatch of training or validation data, and computes all metrics.
        :param is_training: If true, the method is called from `training_step`, otherwise it is called from
        `validation_step`.
        :param sample: The batched sample on which the model should be trained.
        :param batch_index: The index of the present batch (supplied only for diagnostics).
        """
        cropped_sample: CroppedSample = CroppedSample.from_dict(sample=sample)
        # Forward propagation can lead to a model output that is smaller than the input image (crop).
        # labels_center_crop is the relevant part of the labels tensor that the model will actually produce.
        labels = cropped_sample.labels_center_crop

        mask = cropped_sample.mask_center_crop if is_training else None
        if is_training:
            logits = self.model(cropped_sample.image)
        else:
            with torch.no_grad():
                logits = self.model(cropped_sample.image)
        loss = self.loss_fn(logits, labels)

        # apply Softmax on dimension 1 (Class) to map model output into a posterior probability distribution [0,1]
        posteriors = self.logits_to_posterior(logits)

        # apply mask if required
        if mask is not None:
            posteriors = image_util.apply_mask_to_posteriors(
                posteriors=posteriors, mask=mask)  # type: ignore

        # post process posteriors to compute result
        segmentation = image_util.posteriors_to_segmentation(
            posteriors=posteriors)  # type: ignore
        self.compute_metrics(cropped_sample, segmentation,
                             is_training)  # type: ignore

        self.write_loss(is_training, loss)
        return loss
Beispiel #6
0
def inference_identity(
        test_output_dirs: OutputFolderForTests,
        image_size: Any = (4, 5, 8),
        crop_size: Any = (5, 5, 5),
        shrink_by: Any = (0, 0, 0),
        num_classes: int = 5,
        create_mask: bool = True,
        extract_largest_foreground_connected_component: bool = False,
        is_ensemble: bool = False,
        posterior_smoothing_mm: Any = None) -> None:
    """
    Test to make sure inference pipeline is identity preserving, ie: we can recreate deterministic
    model output, ensuring the patching and stitching is robust.
    """
    # fix random seed
    np.random.seed(0)

    ground_truth_ids = list(map(str, range(num_classes)))
    # image to run inference on: The mock model passes the input image through, hence the input
    # image must have as many channels as we have classes (plus background), such that the output is
    # also a valid posterior.
    num_channels = num_classes + 1
    image_channels = np.random.randn(num_channels, *list(image_size))
    # create a random mask if required
    mask = np.round(np.random.uniform(
        size=image_size)).astype(np.int) if create_mask else None
    config = InferenceIdentityModel(shrink_by=shrink_by)
    config.crop_size = crop_size
    config.test_crop_size = crop_size
    config.image_channels = list(map(str, range(num_channels)))
    config.ground_truth_ids = ground_truth_ids
    config.posterior_smoothing_mm = posterior_smoothing_mm

    # We have to set largest_connected_component_foreground_classes after creating the model config,
    # because this parameter is not overridable and hence will not be set by GenericConfig's constructor.
    if extract_largest_foreground_connected_component:
        config.largest_connected_component_foreground_classes = [
            (c, None) for c in ground_truth_ids
        ]
    # set expected posteriors
    expected_posteriors = torch.nn.functional.softmax(
        torch.tensor(image_channels), dim=0).numpy()
    # apply the mask if required
    if mask is not None:
        expected_posteriors = image_util.apply_mask_to_posteriors(
            expected_posteriors, mask)
    if posterior_smoothing_mm is not None:
        expected_posteriors = image_util.gaussian_smooth_posteriors(
            posteriors=expected_posteriors,
            kernel_size_mm=posterior_smoothing_mm,
            voxel_spacing_mm=(1, 1, 1))
    # compute expected segmentation
    expected_segmentation = image_util.posteriors_to_segmentation(
        expected_posteriors)
    if extract_largest_foreground_connected_component:
        largest_component = image_util.extract_largest_foreground_connected_component(
            multi_label_array=expected_segmentation)
        # make sure the test data is accurate by checking if more than one component exists
        assert not np.array_equal(largest_component, expected_segmentation)
        expected_segmentation = largest_component

    # instantiate the model
    checkpoint = test_output_dirs.root_dir / "checkpoint.ckpt"
    create_model_and_store_checkpoint(config, checkpoint_path=checkpoint)

    # create single or ensemble inference pipeline
    inference_pipeline = InferencePipeline.create_from_checkpoint(
        path_to_checkpoint=checkpoint, model_config=config)
    assert inference_pipeline is not None
    full_image_inference_pipeline = EnsemblePipeline([inference_pipeline], config) \
        if is_ensemble else inference_pipeline

    # compute full image inference results
    inference_result = full_image_inference_pipeline \
        .predict_and_post_process_whole_image(image_channels=image_channels, mask=mask, voxel_spacing_mm=(1, 1, 1))

    # Segmentation must have same size as input image
    assert inference_result.segmentation.shape == image_size
    assert inference_result.posteriors.shape == (num_classes +
                                                 1, ) + image_size
    # check that the posteriors and segmentations are as expected. Flatten to a list so that the error
    # messages are more informative.
    assert np.allclose(inference_result.posteriors, expected_posteriors)
    assert np.array_equal(inference_result.segmentation, expected_segmentation)