def dataset_val(self):
     """
     Returns the validation dataset. No random augmentation is performed.
     :return: The validation dataset.
     """
     iterator = IdListIterator(self.val_id_list_file_name,
                               random=False,
                               keys=['image_id'])
     data_sources = self.data_sources(iterator, False)
     if self.translate_by_random_factor:
         image_size = self.image_size[:2] + [None]
     else:
         image_size = self.image_size
     image_transformation = self.spatial_transformation(
         data_sources['image_datasource'], image_size)
     data_generators = self.data_generators(
         data_sources['image_datasource'],
         data_sources['landmarks_datasource'], image_transformation,
         self.intensity_postprocessing, image_size)
     dataset = GraphDataset(
         data_sources=list(data_sources.values()),
         data_generators=list(data_generators.values()),
         transformations=[image_transformation],
         iterator=iterator,
         debug_image_folder='debug_val' if self.save_debug_images else None)
     return dataset
    def dataset_val(self):
        """
        Returns the validation dataset. No random augmentation is performed.
        :return: The validation dataset.
        """
        if self.cv == 'inference':
            iterator = 'iterator'
        else:
            iterator = self.iterator(self.test_file, False)
        sources = self.datasources(iterator, False)
        if self.translate_by_random_factor:
            image_size = self.image_size[:2] + [None]
        else:
            image_size = self.image_size
        reference_transformation = self.spatial_transformation(
            iterator, sources, image_size)
        generators = self.data_generators(iterator, sources,
                                          reference_transformation,
                                          self.postprocessing, False,
                                          image_size)
        if self.generate_single_vertebrae and not self.generate_labels:
            del generators['labels']

        return GraphDataset(
            data_generators=list(generators.values()),
            data_sources=list(sources.values()),
            transformations=[reference_transformation],
            iterator=iterator,
            debug_image_folder='debug_val' if self.save_debug_images else None)
    def dataset_val(self):
        """
        Returns the validation dataset. No random augmentation is performed.
        :return: The validation dataset.
        """
        if self.cv == 'inference':
            iterator = 'iterator'
        else:
            iterator = self.iterator(self.test_file, False)
        sources = self.datasources(iterator, True, True, self.preprocessing, 2048)
        if self.use_variable_image_size:
            if self.load_spine_bbs:
                image_size = ImageSizeGenerator(self.dim, [None] * 3, self.image_spacing, valid_output_sizes=[[32, 64, 96, 128], [32, 64, 96, 128], [32 + i * 32 for i in range(20)]], name='output_size', kwparents={'extent': sources['landmarks_bb_extent']})
            else:
                image_size = ImageSizeGenerator(self.dim, [None] * 3, self.image_spacing, valid_output_sizes=[[32, 64, 96, 128], [32, 64, 96, 128], [32 + i * 32 for i in range(20)]], name='output_size', kwparents={'image': sources['image']})
            #image_size = ImageSizeGenerator(self.dim, [None] * 3, self.image_spacing, valid_output_sizes=[self.valid_output_sizes_x, self.valid_output_sizes_y, [64 + i * 32 for i in range(20)]], name='output_size', kwparents={'extent': sources['landmarks_bb_extent']})
        else:
            image_size = LambdaNode(lambda: self.image_size, name='output_size')
        reference_transformation = self.spatial_transformation(iterator, sources, image_size)
        generators = self.data_generators(iterator, sources, reference_transformation, self.postprocessing, False, image_size, False)
        generators['image_id'] = LambdaNode(lambda d: np.array(d['image_id']), name='image_id', parents=[iterator])

        return GraphDataset(data_generators=list(generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[reference_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_val' if self.save_debug_images else None)
Esempio n. 4
0
    def dataset_val(self):
        """
        Returns the validation dataset for videos. No random augmentation is performed.
        :return: The validation dataset.
        """
        dim = 3
        full_video_frame_list_image = VideoFrameList(
            self.video_frame_list_file_name,
            self.num_frames - 1,
            0,
            border_mode='valid',
            random_start=False,
            random_skip_probability=0.0)
        iterator = 'image_ids'
        iterator_postprocessing = LambdaNode(
            lambda x: full_video_frame_list_image.get_id_dict_list(
                x['video_id'], x['frame_id']),
            parents=[iterator])

        sources = self.datasources(iterator_postprocessing)
        image_key = 'image'
        image_transformation = self.spatial_transformation_volumetric(
            sources[image_key])
        generators = self.data_generators(dim, sources, image_transformation,
                                          None)
        final_generators = self.all_generators_post_processing(
            generators, False)

        return GraphDataset(data_generators=list(final_generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[image_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train'
                            if self.save_debug_images else None)
Esempio n. 5
0
    def dataset_val_single_frame(self):
        """
        Returns the validation dataset for single frames. No random augmentation is performed.
        :return: The validation dataset.
        """
        sources = self.datasources_single_frame('id_dict')
        image_key = 'image'
        image_transformation = self.spatial_transformation(sources[image_key])
        generators = self.data_generators_single_frame(2, sources, image_transformation, None)

        return GraphDataset(data_generators=list(generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[image_transformation],
                            iterator='id_dict',
                            debug_image_folder=self.debug_image_folder)
Esempio n. 6
0
    def dataset_val_single_frame(self):
        """
        Returns the validation dataset for single frames. No random augmentation is performed.
        :return: The validation dataset.
        """
        sources = self.datasources_single_frame('id_dict')
        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        image_transformation = self.spatial_transformation(sources[image_key])
        generators = self.data_generators_single_frame(2, sources, image_transformation, None)
        final_generators = self.all_generators_post_processing(generators, False)

        return GraphDataset(data_generators=list(final_generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[image_transformation],
                            iterator='id_dict',
                            debug_image_folder='debug_val' if self.save_debug_images else None)
Esempio n. 7
0
    def dataset_single_frame(self):
        """
        Returns the training dataset for single frames. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator = 'iterator'
        sources = self.datasources_single_frame(iterator)
        image_key = 'merged'
        image_transformation = self.spatial_transformation(sources[image_key])
        generators = self.data_generators_single_frame(2, sources,
                                                       image_transformation)

        return GraphDataset(data_generators=list(generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[image_transformation],
                            iterator=iterator,
                            debug_image_folder=None)
Esempio n. 8
0
    def dataset_val_all_frames(self):
        """
        Returns the validation dataset for videos. No random augmentation is performed.
        :return: The validation dataset.
        """
        dim = 3
        iterator = 'image_ids'
        sources = self.datasources(iterator)
        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        image_transformation = self.spatial_transformation_volumetric(sources[image_key])
        generators = self.data_generators(dim, sources, image_transformation, None)
        final_generators = self.all_generators_post_processing(generators, False)

        return GraphDataset(data_generators=list(final_generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[image_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train' if self.save_debug_images else None)
Esempio n. 9
0
    def dataset_train_single_frame(self):
        """
        Returns the training dataset for single frames. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator = IdListIterator(self.train_id_list_file_name, random=True, keys=['video_id', 'frame_id'])

        sources = self.datasources_single_frame(iterator)
        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        image_transformation = self.spatial_transformation_augmented(sources[image_key])
        generators = self.data_generators_single_frame(2, sources, image_transformation, self.postprocessing_random)
        final_generators = self.all_generators_post_processing(generators, False)

        return GraphDataset(data_generators=list(final_generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[image_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train' if self.save_debug_images else None)
Esempio n. 10
0
 def dataset_val(self):
     """
     Returns the validation dataset. No random augmentation is performed.
     :return: The validation dataset.
     """
     iterator = IdListIterator(self.val_id_list_file_name,
                               random=False,
                               keys=['image_id'])
     data_sources = self.data_sources(False, iterator)
     image_transformation = self.spatial_transformation(data_sources)
     data_generators = self.data_generators(data_sources,
                                            image_transformation,
                                            self.intensity_postprocessing)
     return GraphDataset(
         data_generators=list(data_generators.values()),
         data_sources=list(data_sources.values()),
         transformations=[image_transformation],
         iterator=iterator,
         debug_image_folder='debug_val' if self.save_debug_images else None)
Esempio n. 11
0
    def dataset_train(self):
        """
        Returns the training dataset. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator = IdListIterator(self.train_file,
                                  random=True,
                                  keys=['image_id'],
                                  name='iterator')
        sources = self.datasources(iterator, True)
        reference_transformation = self.spatial_transformation_augmented(
            sources)
        generators = self.data_generators(sources, reference_transformation,
                                          self.postprocessing_random)

        return GraphDataset(data_generators=list(generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[reference_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train'
                            if self.save_debug_images else None)
Esempio n. 12
0
 def dataset_train(self):
     """
     Returns the training dataset. Random augmentation is performed.
     :return: The training dataset.
     """
     iterator = IdListIterator(self.train_id_list_file_name,
                               random=True,
                               keys=['image_id'])
     data_sources = self.data_sources(iterator, True)
     image_transformation = self.spatial_transformation_augmented(
         data_sources['image_datasource'])
     data_generators = self.data_generators(
         data_sources['image_datasource'],
         data_sources['landmarks_datasource'], image_transformation,
         self.intensity_postprocessing_augmented, self.image_size)
     dataset = GraphDataset(data_sources=list(data_sources.values()),
                            data_generators=list(data_generators.values()),
                            transformations=[image_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train'
                            if self.save_debug_images else None)
     return dataset
Esempio n. 13
0
    def dataset_train(self):
        """
        Returns the training dataset for videos. Random augmentation is performed.
        :return: The training dataset.
        """
        dim = 3
        full_video_frame_list_image = VideoFrameList(self.video_frame_list_file_name, self.num_frames - 1, 0,
                                                     border_mode='valid', random_start=True, random_skip_probability=self.random_skip_probability)
        iterator = IdListIterator(self.train_id_list_file_name, random=True, keys=['video_id', 'frame_id'],
                                        postprocessing=lambda x: full_video_frame_list_image.get_id_dict_list(x['video_id'], x['frame_id']))

        sources = self.datasources(iterator)
        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        image_transformation = self.spatial_transformation_volumetric_augmented(sources[image_key])
        generators = self.data_generators(dim, sources, image_transformation, self.postprocessing_random)
        final_generators = self.all_generators_post_processing(generators, False)

        return GraphDataset(data_generators=list(final_generators.values()),
                            # data_sources=list(sources.values()),
                            # transformations=[image_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train' if self.save_debug_images else None)
Esempio n. 14
0
    def dataset_train(self):
        """
        Returns the training dataset. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator = self.iterator(self.train_file, True)
        sources = self.datasources(iterator, False, False, self.preprocessing_random, 8192)
        if self.use_variable_image_size:
            image_size = ImageSizeGenerator(self.dim, [None] * 3, self.image_spacing, valid_output_sizes=[self.valid_output_sizes_x, self.valid_output_sizes_y, self.valid_output_sizes_z], name='output_size', kwparents={'extent': sources['landmarks_bb_extent']})
            if self.crop_randomly_smaller:
                image_size = LambdaNode(self.crop_randomly_smaller_image_size, name='output_size', parents=[image_size])
        else:
            image_size = LambdaNode(lambda: self.image_size, name='output_size')
        reference_transformation = self.spatial_transformation_augmented(iterator, sources, image_size)
        generators = self.data_generators(iterator, sources, reference_transformation, self.postprocessing_random, True, image_size, self.crop_image_top_bottom)
        generators['image_id'] = LambdaNode(lambda d: np.array(d['image_id']), name='image_id', parents=[iterator])

        return GraphDataset(data_generators=list(generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[reference_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train' if self.save_debug_images else None)
    def dataset_train(self):
        """
        Returns the training dataset. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator = self.iterator(self.train_file, True)
        sources = self.datasources(iterator, True)
        image_size = self.image_size
        reference_transformation = self.spatial_transformation_augmented(
            iterator, sources, image_size)
        generators = self.data_generators(iterator, sources,
                                          reference_transformation,
                                          self.postprocessing_random, True,
                                          image_size)
        if self.generate_single_vertebrae and not self.generate_labels:
            del generators['labels']

        return GraphDataset(data_generators=list(generators.values()),
                            data_sources=list(sources.values()),
                            transformations=[reference_transformation],
                            iterator=iterator,
                            debug_image_folder='debug_train'
                            if self.save_debug_images else None)
Esempio n. 16
0
    def dataset_val(self):
        """
        Returns the validation dataset. No random augmentation is performed.
        :return: The validation dataset.
        """
        iterator = IdListIterator(self.test_file,
                                  random=False,
                                  keys=['image_id'],
                                  name='iterator')
        sources = self.datasources(iterator, False)
        reference_transformation = self.spatial_transformation(sources)
        generators = self.data_generators(sources, reference_transformation,
                                          self.postprocessing)

        if self.cv == 0:
            del sources['landmarks']
            del generators['landmarks']

        return GraphDataset(
            data_generators=list(generators.values()),
            data_sources=list(sources.values()),
            transformations=[reference_transformation],
            iterator=iterator,
            debug_image_folder='debug_val' if self.save_debug_images else None)