Пример #1
0
    def dataset_val(self):
        """
        Returns the validation dataset for videos. No random augmentation is performed.
        :return: The validation dataset.
        """
        iterator_val = IdListIterator(self.val_id_list_file_name,
                                      random=False,
                                      keys=['video_id', 'frame_id'])

        sources = self.datasources()
        generator_sources = self.data_generator_sources()
        generators_val = self.data_generators(None)
        image_transformation = self.spatial_transformation()

        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        dataset_val = ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={'image': image_key},
            reference_transformation=image_transformation,
            datasources=sources,
            data_generators=generators_val,
            data_generator_sources=generator_sources,
            iterator=iterator_val,
            all_generators_post_processing=lambda x: self.
            all_generators_post_processing(x, False),
            debug_image_folder=os.path.join(self.debug_folder_prefix,
                                            'debug_val')
            if self.save_debug_images else None,
            use_only_first_reference_datasource_entry=True)

        return dataset_val
Пример #2
0
    def dataset_val_single_frame(self):
        """
        Returns the validation dataset for single frames. No random augmentation is performed.
        :return: The validation dataset.
        """
        sources = self.datasources_single_frame()
        generator_sources = self.data_generator_sources()
        generators_val = self.data_generators_single_frame(None)
        image_transformation = self.spatial_transformation()

        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        dataset_val = ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={'image': image_key},
            reference_transformation=image_transformation,
            datasources=sources,
            data_generators=generators_val,
            data_generator_sources=generator_sources,
            iterator=None,
            all_generators_post_processing=lambda x: self.
            all_generators_post_processing(x, False),
            debug_image_folder=os.path.join(self.debug_folder_prefix,
                                            'debug_val')
            if self.save_debug_images else None)

        return dataset_val
Пример #3
0
    def dataset_train_single_frame(self):
        """
        Returns the training dataset for single frames. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator_train = IdListIterator(self.train_id_list_file_name,
                                        random=True,
                                        keys=['video_id', 'frame_id'])

        sources = self.datasources_single_frame()
        generator_sources = self.data_generator_sources()
        generators_train = self.data_generators_single_frame(
            self.postprocessing_random)
        image_transformation = self.spatial_transformation_augmented()

        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        dataset_train = ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={'image': image_key},
            reference_transformation=image_transformation,
            datasources=sources,
            data_generators=generators_train,
            data_generator_sources=generator_sources,
            iterator=iterator_train,
            all_generators_post_processing=lambda x: self.
            all_generators_post_processing(x, True),
            debug_image_folder=os.path.join(self.debug_folder_prefix,
                                            'debug_train')
            if self.save_debug_images else None)

        return dataset_train
Пример #4
0
    def dataset_val(self):
        """
        Returns the validation dataset. No random augmentation is performed.
        :return: The validation dataset.
        """
        iterator = IdListIterator(self.test_file,
                                  random=False,
                                  keys=['image_id'])
        sources = self.datasources()
        generator_sources = self.data_generator_sources()
        generators = self.data_generators(self.postprocessing,
                                          self.split_labels)
        reference_transformation = self.spatial_transformation()

        if self.cv == 0:
            del sources['mask']
            del generator_sources['mask']
            del generators['mask']

        return ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={
                'image': 'image',
                'landmarks': 'landmarks'
            },
            reference_transformation=reference_transformation,
            datasources=sources,
            data_generators=generators,
            data_generator_sources=generator_sources,
            iterator=iterator,
            debug_image_folder='debug_val' if self.save_debug_images else None)
Пример #5
0
    def dataset_train(self):
        """
        Returns the training dataset. Random augmentation is performed.
        :return: The training dataset.
        """
        iterator = IdListIterator(self.train_file,
                                  random=True,
                                  keys=['image_id'])
        sources = self.datasources()
        generator_sources = self.data_generator_sources()
        generators = self.data_generators(self.postprocessing_random,
                                          self.split_labels)
        reference_transformation = self.spatial_transformation_augmented()

        return ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={
                'image': 'image',
                'landmarks': 'landmarks'
            },
            reference_transformation=reference_transformation,
            datasources=sources,
            data_generators=generators,
            data_generator_sources=generator_sources,
            iterator=iterator,
            debug_image_folder='debug_train'
            if self.save_debug_images else None)
Пример #6
0
    def dataset_val_single_frame(self):
        sources = self.datasources()
        generator_sources = self.data_generator_sources()
        generators_val = self.data_generators()
        image_transformation = self.image_transformation_val()

        dataset_val = ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={'image': 'image'},
            reference_transformation=image_transformation,
            datasources=sources,
            data_generators=generators_val,
            data_generator_sources=generator_sources,
            iterator=None,
            all_generators_post_processing=None,
            debug_image_folder=os.path.join(self.debug_folder_prefix,
                                            'debug_val')
            if self.save_debug_images else None)

        return dataset_val
Пример #7
0
    def dataset_train(self):
        """
        Returns the training dataset for videos. Random augmentation is performed.
        :return: The training dataset.
        """
        full_video_frame_list_image = VideoFrameList(
            self.video_frame_list_file_name,
            int(self.num_frames / 2),
            int(self.num_frames / 2) - 1,
            border_mode='valid',
            random_start=True,
            random_skip_probability=self.random_skip_probability)
        iterator_train = IdListIterator(
            self.train_id_list_file_name,
            random=True,
            keys=['video_id', 'frame_id'],
            postprocessing=lambda x: full_video_frame_list_image.
            get_id_dict_list(x['video_id'], x['frame_id']))

        sources = self.datasources()
        generator_sources = self.data_generator_sources()
        generators_train = self.data_generators(self.postprocessing_random)
        image_transformation = self.spatial_transformation_augmented()

        image_key = 'merged' if self.pad_image or self.crop_image_size is not None else 'image'
        dataset_train = ReferenceTransformationDataset(
            dim=self.dim,
            reference_datasource_keys={'image': image_key},
            reference_transformation=image_transformation,
            datasources=sources,
            data_generators=generators_train,
            data_generator_sources=generator_sources,
            iterator=iterator_train,
            all_generators_post_processing=lambda x: self.
            all_generators_post_processing(x, True),
            debug_image_folder=os.path.join(self.debug_folder_prefix,
                                            'debug_train')
            if self.save_debug_images else None,
            use_only_first_reference_datasource_entry=True)

        return dataset_train
Пример #8
0
 def dataset_val(self):
     """
     Returns the validation dataset. No random augmentation is performed.
     :return: The validation dataset.
     """
     data_sources = self.data_sources(False)
     data_generator_sources = self.data_generator_sources()
     data_generators = self.data_generators(self.intensity_postprocessing)
     image_transformation = self.spatial_transformation()
     iterator = IdListIterator(self.val_id_list_file_name,
                               random=False,
                               keys=['image_id'])
     dataset = ReferenceTransformationDataset(dim=self.dim,
                                              reference_datasource_keys={'image': 'image_datasource'},
                                              reference_transformation=image_transformation,
                                              datasources=data_sources,
                                              data_generators=data_generators,
                                              data_generator_sources=data_generator_sources,
                                              iterator=iterator,
                                              debug_image_folder='debug_val' if self.save_debug_images else None)
     return dataset