def postprocessing_random(self, image): """ Performs random augmentations of a grayscale image. Augmentation consists of random gamma correction, random intensity shift/scale per video and per frame. :param image: The grayscale image to augment. :return: The augmented grayscale image. """ random_lambda = float_uniform(0.6, 1.4) image = change_gamma_unnormalized(image, random_lambda) image = ShiftScaleClamp(random_shift=0.65, random_scale=0.65)(image) if len(image.shape) == 4: for i in range(image.shape[self.video_frame_stack_axis]): current_slice = [slice(None), slice(None)] current_slice.insert(self.video_frame_stack_axis, slice(i, i + 1)) image[tuple(current_slice)] = ShiftScaleClamp(random_shift=0.1, random_scale=0.1)(image[tuple(current_slice)]) return image
def intensity_postprocessing_mr(self, image): """ Intensity postprocessing for MR input. :param image: The np input image. :return: The processed image. """ image = normalize_robust(image) return ShiftScaleClamp(clamp_min=-1.0)(image)
def binary_labels(self, image): """ Converts an instance label image into a binary label. All instances will be set to 1. :param image: The instance label image. :return: A list of np arrays. First, is the background. Second is the foreground. """ all_seg = ShiftScaleClamp(clamp_min=0, clamp_max=1)(image) return split_label_image(all_seg, [0, 1])
def intensity_postprocessing_augmented(self, image): """ Intensity postprocessing. Random augmentation version. :param image: The np input image. :return: The processed image. """ normalized = normalize_robust(image) return ShiftScaleClamp(random_shift=0.15, random_scale=0.15)(normalized)
def intensity_postprocessing_mr_random(self, image): """ Intensity postprocessing for MR input. Random augmentation version. :param image: The np input image. :return: The processed image. """ image = normalize_robust(image) return ShiftScaleClamp(random_shift=0.2, random_scale=0.4, clamp_min=-1.0)(image)
def intensity_postprocessing(self, image): """ Intensity postprocessing. :param image: The np input image. :return: The processed image. """ return ShiftScaleClamp(shift=0, scale=1 / 2048, clamp_min=-1.0, clamp_max=1.0)(image)
def intensity_postprocessing_mr_random(self, image): """ Intensity postprocessing for MR input. Random augmentation version. :param image: The np input image. :return: The processed image. """ image = change_gamma_unnormalized(image, float_uniform(0.5, 1.5)) image = normalize_robust(image, consideration_factors=(0.1, 0.1)) return ShiftScaleClamp(random_shift=0.6, random_scale=0.6, clamp_min=-1.0)(image)
def intensity_postprocessing_augmented(self, image): """ Intensity postprocessing. Random augmentation version. :param image: The np input image. :return: The processed image. """ return ShiftScaleClamp(shift=0, scale=1 / 2048, clamp_min=-1.0, clamp_max=1.0, random_shift=0.15, random_scale=0.15)(image)
def intensity_postprocessing_ct_random(self, image): """ Intensity postprocessing for CT input. Random augmentation version. :param image: The np input image. :return: The processed image. """ return ShiftScaleClamp(shift=0, scale=1 / 2048, random_shift=self.random_intensity_shift, random_scale=self.random_intensity_scale, clamp_min=-1.0, clamp_max=1.0)(image)
def intensity_postprocessing_ct(self, image): """ Intensity postprocessing for CT input. :param image: The np input image. :return: The processed image. """ if not self.normalize_zero_mean_unit_variance: output = ShiftScaleClamp(shift=0, scale=1 / 2048, clamp_min=-1.0, clamp_max=1.0)(image) else: output = normalize_zero_mean_unit_variance(image) return output
def intensity_postprocessing_ct_random(self, image): """ Intensity postprocessing for CT input. Random augmentation version. :param image: The np input image. :return: The processed image. """ if not self.normalize_zero_mean_unit_variance: random_lambda = float_uniform(0.9, 1.1) image = change_gamma_unnormalized(image, random_lambda) output = ShiftScaleClamp(shift=0, scale=1 / 2048, random_shift=self.random_intensity_shift, random_scale=self.random_intensity_scale, clamp_min=-1.0, clamp_max=1.0)(image) else: random_lambda = float_uniform(0.9, 1.1) image = change_gamma_unnormalized(image, random_lambda) output = normalize_zero_mean_unit_variance(image) return output
def test(self): print('Testing...') if self.data_format == 'channels_first': np_channel_index = 0 else: np_channel_index = 3 heatmap_maxima = HeatmapTest(np_channel_index, False) landmark_statistics = LandmarkStatistics() landmarks = {} for i in range(self.dataset_val.num_entries()): dataset_entry = self.dataset_val.get_next() current_id = dataset_entry['id']['image_id'] datasources = dataset_entry['datasources'] image_datasource = datasources['image_datasource'] landmarks_datasource = datasources['landmarks_datasource'] if not self.cropped_training: image, heatmaps, heatmap_transform = self.test_full_image( dataset_entry) else: image, heatmaps, heatmap_transform = self.test_cropped_image( dataset_entry) utils.io.image.write_np( ShiftScaleClamp(scale=255, clamp_min=0, clamp_max=255)(heatmaps).astype(np.uint8), self.output_file_for_current_iteration(current_id + '_heatmaps.mha')) utils.io.image.write_np( image, self.output_file_for_current_iteration(current_id + '_image.mha')) predicted_landmarks = heatmap_maxima.get_landmarks( heatmaps, image_datasource, self.image_spacing, heatmap_transform) landmarks[current_id] = predicted_landmarks landmark_statistics.add_landmarks(current_id, predicted_landmarks, landmarks_datasource) tensorflow_train.utils.tensorflow_util.print_progress_bar( i, self.dataset_val.num_entries(), prefix='Testing ', suffix=' complete') tensorflow_train.utils.tensorflow_util.print_progress_bar( self.dataset_val.num_entries(), self.dataset_val.num_entries(), prefix='Testing ', suffix=' complete') print(landmark_statistics.get_pe_overview_string()) print(landmark_statistics.get_correct_id_string(20.0)) summary_values = OrderedDict( zip( self.point_statistics_names, list(landmark_statistics.get_pe_statistics()) + [landmark_statistics.get_correct_id(20)])) # finalize loss values self.val_loss_aggregator.finalize(self.current_iter, summary_values) utils.io.landmark.save_points_csv( landmarks, self.output_file_for_current_iteration('points.csv')) overview_string = landmark_statistics.get_overview_string( [2, 2.5, 3, 4, 10, 20], 10, 20.0) utils.io.text.save_string_txt( overview_string, self.output_file_for_current_iteration('eval.txt'))