def test(self):
        """
        The test function. Performs inference on the the validation images and calculates the loss.
        """
        print('Testing...')

        channel_axis = 0
        if self.data_format == 'channels_last':
            channel_axis = 3

        landmarks = {}
        num_entries = self.dataset_val.num_entries()
        for i in range(num_entries):
            dataset_entry = self.dataset_val.get_next()
            current_id = dataset_entry['id']['image_id']
            datasources = dataset_entry['datasources']
            input_image = datasources['image']

            image, prediction, transformation = self.test_full_image(dataset_entry)
            predictions_sitk = utils.sitk_image.transform_np_output_to_sitk_input(output_image=prediction,
                                                                                  output_spacing=self.image_spacing,
                                                                                  channel_axis=channel_axis,
                                                                                  input_image_sitk=input_image,
                                                                                  transform=transformation,
                                                                                  interpolator='linear',
                                                                                  output_pixel_type=sitk.sitkUInt8)
            if self.save_output_images:
                origin = transformation.TransformPoint(np.zeros(3, np.float64))
                heatmap_normalization_mode = (0, 1)
                utils.io.image.write_multichannel_np(image, self.output_file_for_current_iteration(current_id + '_input.mha'), normalization_mode='min_max', split_channel_axis=True, sitk_image_mode='default', data_format=self.data_format, image_type=np.uint8, spacing=self.image_spacing, origin=origin)
                utils.io.image.write_multichannel_np(prediction, self.output_file_for_current_iteration(current_id + '_prediction.mha'), normalization_mode=heatmap_normalization_mode, split_channel_axis=True, data_format=self.data_format, image_type=np.uint8, spacing=self.image_spacing, origin=origin)
                utils.io.image.write(predictions_sitk[0], self.output_file_for_current_iteration(current_id + '_prediction_original.mha'))

            predictions_com = input_image.TransformContinuousIndexToPhysicalPoint(list(reversed(utils.np_image.center_of_mass(utils.sitk_np.sitk_to_np_no_copy(predictions_sitk[0])))))
            landmarks[current_id] = [Landmark(predictions_com)]
            print_progress_bar(i, num_entries, prefix='Testing ', suffix=' complete')

        utils.io.landmark.save_points_csv(landmarks, self.output_file_for_current_iteration('points.csv'))

        # finalize loss values
        if self.has_validation_groundtruth:
            self.val_loss_aggregator.finalize(self.current_iter)
Beispiel #2
0
    def test(self):
        """
        The test function. Performs inference on the the validation images and calculates the loss.
        """
        print('Testing...')

        channel_axis = 0
        if self.data_format == 'channels_last':
            channel_axis = 3
        labels = list(range(self.num_labels_all))
        segmentation_test = SegmentationTest(labels,
                                             channel_axis=channel_axis,
                                             largest_connected_component=False,
                                             all_labels_are_connected=False)
        segmentation_statistics = SegmentationStatistics(
            list(range(self.num_labels_all)),
            self.output_folder_for_current_iteration(),
            metrics=OrderedDict([('dice', DiceMetric()),
                                 ('h', HausdorffDistanceMetric())]))
        filter_largest_cc = True

        # iterate over all images
        for i, image_id in enumerate(self.test_id_list):
            first = True
            prediction_resampled_np = None
            input_image = None
            groundtruth = None
            # iterate over all valid landmarks
            for landmark_id in self.valid_landmarks[image_id]:
                dataset_entry = self.dataset_val.get({
                    'image_id': image_id,
                    'landmark_id': landmark_id
                })
                datasources = dataset_entry['datasources']
                if first:
                    input_image = datasources['image']
                    if self.has_validation_groundtruth:
                        groundtruth = datasources['labels']
                    prediction_resampled_np = np.zeros(
                        [self.num_labels_all] +
                        list(reversed(input_image.GetSize())),
                        dtype=np.float16)
                    prediction_resampled_np[0, ...] = 0.5
                    first = False

                image, prediction, transformation = self.test_full_image(
                    dataset_entry)

                if filter_largest_cc:
                    prediction_thresh_np = (prediction > 0.5).astype(np.uint8)
                    largest_connected_component = utils.np_image.largest_connected_component(
                        prediction_thresh_np[0])
                    prediction_thresh_np[largest_connected_component[None, ...]
                                         == 1] = 0
                    prediction[prediction_thresh_np == 1] = 0

                if self.save_output_images:
                    if self.save_output_images_as_uint:
                        image_normalization = 'min_max'
                        label_normalization = (0, 1)
                        output_image_type = np.uint8
                    else:
                        image_normalization = None
                        label_normalization = None
                        output_image_type = np.float32
                    origin = transformation.TransformPoint(
                        np.zeros(3, np.float64))
                    utils.io.image.write_multichannel_np(
                        image,
                        self.output_file_for_current_iteration(image_id + '_' +
                                                               landmark_id +
                                                               '_input.mha'),
                        output_normalization_mode=image_normalization,
                        data_format=self.data_format,
                        image_type=output_image_type,
                        spacing=self.image_spacing,
                        origin=origin)
                    utils.io.image.write_multichannel_np(
                        prediction,
                        self.output_file_for_current_iteration(
                            image_id + '_' + landmark_id + '_prediction.mha'),
                        output_normalization_mode=label_normalization,
                        data_format=self.data_format,
                        image_type=output_image_type,
                        spacing=self.image_spacing,
                        origin=origin)

                prediction_resampled_sitk = utils.sitk_image.transform_np_output_to_sitk_input(
                    output_image=prediction,
                    output_spacing=self.image_spacing,
                    channel_axis=channel_axis,
                    input_image_sitk=input_image,
                    transform=transformation,
                    interpolator='linear',
                    output_pixel_type=sitk.sitkFloat32)
                #utils.io.image.write(prediction_resampled_sitk[0],  self.output_file_for_current_iteration(image_id + '_' + landmark_id + '_resampled.mha'))
                if self.data_format == 'channels_first':
                    prediction_resampled_np[int(landmark_id) + 1,
                                            ...] = utils.sitk_np.sitk_to_np(
                                                prediction_resampled_sitk[0])
                else:
                    prediction_resampled_np[..., int(landmark_id) +
                                            1] = utils.sitk_np.sitk_to_np(
                                                prediction_resampled_sitk[0])
            prediction_labels = segmentation_test.get_label_image(
                prediction_resampled_np, reference_sitk=input_image)
            # delete to save memory
            del prediction_resampled_np
            utils.io.image.write(
                prediction_labels,
                self.output_file_for_current_iteration(image_id + '.mha'))

            if self.has_validation_groundtruth:
                segmentation_statistics.add_labels(image_id, prediction_labels,
                                                   groundtruth)

            print_progress_bar(i,
                               len(self.test_id_list),
                               prefix='Testing ',
                               suffix=' complete')

        # finalize loss values
        if self.has_validation_groundtruth:
            segmentation_statistics.finalize()
            dice_list = segmentation_statistics.get_metric_mean_list('dice')
            mean_dice = np.nanmean(dice_list)
            dice_list = [mean_dice] + dice_list
            hausdorff_list = segmentation_statistics.get_metric_mean_list('h')
            mean_hausdorff = np.mean(hausdorff_list)
            hausdorff_list = [mean_hausdorff] + hausdorff_list
            summary_values = OrderedDict(
                list(zip(self.dice_names, dice_list)) +
                list(zip(self.hausdorff_names, hausdorff_list)))
            self.val_loss_aggregator.finalize(self.current_iter,
                                              summary_values=summary_values)
Beispiel #3
0
    def test(self):
        """
        The test function. Performs inference on the the validation images and calculates the loss.
        """
        print('Testing...')

        channel_axis = 0
        if self.data_format == 'channels_last':
            channel_axis = 3
        if self.use_spine_postprocessing:
            heatmap_maxima = HeatmapTest(channel_axis,
                                         False,
                                         return_multiple_maxima=True,
                                         min_max_distance=7,
                                         min_max_value=0.25,
                                         multiple_min_max_value_factor=0.1)
            spine_postprocessing = SpinePostprocessing(
                num_landmarks=self.num_landmarks,
                image_spacing=self.image_spacing)
        else:
            heatmap_maxima = HeatmapTest(channel_axis, False)

        landmark_statistics = LandmarkStatistics()
        landmarks = {}
        num_entries = self.dataset_val.num_entries()
        for i in range(num_entries):
            dataset_entry = self.dataset_val.get_next()
            current_id = dataset_entry['id']['image_id']
            datasources = dataset_entry['datasources']
            input_image = datasources['image']
            target_landmarks = datasources['landmarks']

            image, prediction, transformation = self.test_cropped_image(
                dataset_entry)

            if self.save_output_images:
                if self.save_output_images_as_uint:
                    image_normalization = 'min_max'
                    heatmap_normalization = (0, 1)
                    output_image_type = np.uint8
                else:
                    image_normalization = None
                    heatmap_normalization = None
                    output_image_type = np.float32
                origin = transformation.TransformPoint(np.zeros(3, np.float64))
                utils.io.image.write_multichannel_np(
                    image,
                    self.output_file_for_current_iteration(current_id +
                                                           '_input.mha'),
                    output_normalization_mode=image_normalization,
                    data_format=self.data_format,
                    image_type=output_image_type,
                    spacing=self.image_spacing,
                    origin=origin)
                utils.io.image.write_multichannel_np(
                    prediction,
                    self.output_file_for_current_iteration(current_id +
                                                           '_prediction.mha'),
                    output_normalization_mode=heatmap_normalization,
                    data_format=self.data_format,
                    image_type=output_image_type,
                    spacing=self.image_spacing,
                    origin=origin)

            if self.use_spine_postprocessing:
                local_maxima_landmarks = heatmap_maxima.get_landmarks(
                    prediction, input_image, self.image_spacing,
                    transformation)
                landmark_sequence = spine_postprocessing.postprocess_landmarks(
                    local_maxima_landmarks, prediction.shape)
                landmarks[current_id] = landmark_sequence
            else:
                maxima_landmarks = heatmap_maxima.get_landmarks(
                    prediction, input_image, self.image_spacing,
                    transformation)
                landmarks[current_id] = maxima_landmarks

            if self.has_validation_groundtruth:
                landmark_statistics.add_landmarks(current_id,
                                                  landmark_sequence,
                                                  target_landmarks)

            print_progress_bar(i,
                               num_entries,
                               prefix='Testing ',
                               suffix=' complete')

        utils.io.landmark.save_points_csv(
            landmarks, self.output_file_for_current_iteration('points.csv'))

        # finalize loss values
        if self.has_validation_groundtruth:
            print(landmark_statistics.get_pe_overview_string())
            print(landmark_statistics.get_correct_id_string(20.0))
            summary_values = OrderedDict(
                zip(
                    self.point_statistics_names,
                    list(landmark_statistics.get_pe_statistics()) +
                    [landmark_statistics.get_correct_id(20)]))

            # finalize loss values
            self.val_loss_aggregator.finalize(self.current_iter,
                                              summary_values)
            overview_string = landmark_statistics.get_overview_string(
                [2, 2.5, 3, 4, 10, 20], 10, 20.0)
            utils.io.text.save_string_txt(
                overview_string,
                self.output_file_for_current_iteration('eval.txt'))
Beispiel #4
0
    def test(self):
        """
        The test function. Performs inference on the the validation images and calculates the loss.
        """
        print('Testing...')

        channel_axis = 0
        if self.data_format == 'channels_last':
            channel_axis = 3

        landmark_statistics = LandmarkStatistics()
        landmarks = {}
        num_entries = self.dataset_val.num_entries()
        for i in range(num_entries):
            dataset_entry = self.dataset_val.get_next()
            current_id = dataset_entry['id']['image_id']
            datasources = dataset_entry['datasources']
            if self.has_validation_groundtruth:
                groundtruth_landmarks = datasources['landmarks']
                groundtruth_landmark = [
                    get_mean_landmark(groundtruth_landmarks)
                ]
            input_image = datasources['image']

            image, prediction, transformation = self.test_full_image(
                dataset_entry)
            predictions_sitk = utils.sitk_image.transform_np_output_to_sitk_input(
                output_image=prediction,
                output_spacing=self.image_spacing,
                channel_axis=channel_axis,
                input_image_sitk=input_image,
                transform=transformation,
                interpolator='linear',
                output_pixel_type=sitk.sitkFloat32)
            if self.save_output_images:
                if self.save_output_images_as_uint:
                    image_normalization = 'min_max'
                    heatmap_normalization = (0, 1)
                    output_image_type = np.uint8
                else:
                    image_normalization = None
                    heatmap_normalization = None
                    output_image_type = np.float32
                origin = transformation.TransformPoint(np.zeros(3, np.float64))
                utils.io.image.write_multichannel_np(
                    image,
                    self.output_file_for_current_iteration(current_id +
                                                           '_input.mha'),
                    output_normalization_mode=image_normalization,
                    data_format=self.data_format,
                    image_type=output_image_type,
                    spacing=self.image_spacing,
                    origin=origin)
                utils.io.image.write_multichannel_np(
                    prediction,
                    self.output_file_for_current_iteration(current_id +
                                                           '_prediction.mha'),
                    output_normalization_mode=heatmap_normalization,
                    data_format=self.data_format,
                    image_type=output_image_type,
                    spacing=self.image_spacing,
                    origin=origin)
                #utils.io.image.write(predictions_sitk[0], self.output_file_for_current_iteration(current_id + '_prediction_original.mha'))

            predictions_com = input_image.TransformContinuousIndexToPhysicalPoint(
                list(
                    reversed(
                        utils.np_image.center_of_mass(
                            utils.sitk_np.sitk_to_np_no_copy(
                                predictions_sitk[0])))))
            current_landmark = [Landmark(predictions_com)]
            landmarks[current_id] = current_landmark

            if self.has_validation_groundtruth:
                landmark_statistics.add_landmarks(current_id, current_landmark,
                                                  groundtruth_landmark)

            print_progress_bar(i,
                               num_entries,
                               prefix='Testing ',
                               suffix=' complete')

        utils.io.landmark.save_points_csv(
            landmarks, self.output_file_for_current_iteration('points.csv'))

        # finalize loss values
        if self.has_validation_groundtruth:
            print(landmark_statistics.get_pe_overview_string())
            summary_values = OrderedDict(
                zip(self.point_statistics_names,
                    list(landmark_statistics.get_pe_statistics())))

            # finalize loss values
            self.val_loss_aggregator.finalize(self.current_iter,
                                              summary_values)
            overview_string = landmark_statistics.get_overview_string(
                [2, 2.5, 3, 4, 10, 20], 10)
            utils.io.text.save_string_txt(
                overview_string,
                self.output_file_for_current_iteration('eval.txt'))