예제 #1
0
    def _get_sinogram_mct(self):
        from ..raw.mCT import Dataset
        from dxpy.learn.model.normalizer import FixWhite
        from ..super_resolution import random_crop_multiresolution
        with tf.variable_scope('mCT_dataset'):
            dataset_origin = Dataset(self.name / 'mCT')
            self.register_node('id', dataset_origin['id'])
            self.register_node('phantom', dataset_origin['phantom'])
            dataset = dict()
            keys = [
                'noise/image{}x'.format(2**i)
                for i in range(self.param('nb_down_sample') + 1)
            ]
            for i, k in enumerate(keys):
                with tf.variable_scope('normalization_{}x'.format(2**i)):
                    fwn = FixWhite(
                        self.name / 'normalization_{}x'.format(2**i),
                        inputs=dataset_origin['sinogram{}x'.format(2**i)],
                        mean=dataset_origin.MEAN * (4.0**i),
                        std=dataset_origin.STD * (4.0**i))
                    dataset['noise/image{}x'.format(2**i)] = fwn.as_tensor()
                    # dataset[k] = dataset_origin['sinogram{}x'.format(2**i)]
            crop_keys = ['image{}x'.format(i) for i in range(len(keys))]
            images = {
                'image{}x'.format(i): dataset[k]
                for i, k in enumerate(keys)
            }
            images_cropped = random_crop_multiresolution(
                images, [shape_as_list(images['image1x'])[0]] +
                list(self.param('target_shape')) + [1])

            result = dict()
            for ki, ko in zip(crop_keys, keys):
                result[ko] = images_cropped[ki]
            return result
예제 #2
0
 def _get_phantom_aps(self):
     from ..raw.analytical_phantom_sinogram import Dataset
     from ..super_resolution import SuperResolutionDataset
     fields = ['phantom', 'id'] + [
         'recon{}x'.format(2**i)
         for i in range(self.param('nb_down_sample') + 1)
     ]
     with tf.name_scope('aps_phantom_dataset'):
         dataset_origin = Dataset(self.name / 'analytical_phantom_sinogram',
                                  fields=fields)
         self.register_node('id', dataset_origin['id'])
         self.register_node('phantom', dataset['phantom'])
         label = dataset['phantom']
         keys = [
             'recon{}x'.format(2**i)
             for i in range(self.param('nb_down_sample') + 1)
         ]
         dataset = dataset_origin
         if self.param('log_scale'):
             stat = dataset.LOG_RECON_STAT
         else:
             stat = dataset.RECON_STAT
         dataset = {k: dataset[k] for k in keys}
         for i, k in enumerate(keys):
             if i == 0:
                 continue
             dataset[k] = tf.nn.avg_pool(
                 dataset[k], [1] + [2**i, 2**i] + [1],
                 padding='SAME',
                 strides=[1] + [2**i, 2**i] + [1]) * (2.0**i * 2.0**i)
         if self.param('log_scale'):
             for k in keys:
                 dataset[k] = tf.log(dataset[k] + 1.0)
             label = tf.log(label + 1.0)
         if self.param('with_white_normalization'):
             for i, k in enumerate(keys):
                 dataset[k] = FixWhite(name=self.name / 'fix_white' / k,
                                       inputs=dataset[k],
                                       mean=stat['mean'],
                                       std=stat['std']).as_tensor()
             label = FixWhite(name=self.name / 'fix_white' / 'phantom',
                              inputs=dataset[k],
                              mean=stat['mean'],
                              std=stat['std']).as_tensor()
         result = dict()
         for i, k in enumerate(keys):
             result['noise/image{}x'.format(2**i)] = dataset[k]
         result['label/phantom'] = phantom
         return result
예제 #3
0
 def _get_recons_ms(self):
     from ..raw.analytical_phantom_sinogram import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     ratios = [2**i for i in range(self.param('nb_down_sample') + 1)]
     fields = ['phantom', 'id'] + ['clean{}x'.format(r) for r in ratios] + [
         'noise{}x'.format(r) for r in ratios
     ]
     with tf.name_scope('dataset_recon_ms'):
         dataset = Dataset(self.name / 'analytical_phantom_sinogram',
                           fields=fields)
         self.register_node('id', dataset['id'])
         self.register_node('phantom', dataset['phantom'])
         keys_noise = ['noise{}x'.format(r) for r in ratios]
         keys_clean = ['clean{}x'.format(r) for r in ratios]
         stat = dataset.RECON_MS_STAT
         cleans = dict()
         noises = dict()
         if self.param('with_white_normalization'):
             with tf.name_scope('normalizatoin_clean'):
                 for i, k in enumerate(keys_clean):
                     cleans[k] = FixWhite(name=self.name / 'fix_white',
                                          inputs=dataset[k],
                                          mean=stat['mean'] * (4.0)**i,
                                          std=stat['std'] *
                                          (4.0)**i).as_tensor()
             with tf.name_scope('normalizatoin_noise'):
                 for i, k in enumerate(keys_noise):
                     noises[k] = FixWhite(name=self.name / 'fix_white',
                                          inputs=dataset[k],
                                          mean=stat['mean'] * (4.0)**i,
                                          std=stat['std'] *
                                          (4.0)**i).as_tensor()
         # for i, r in ratios:
         #     self.register_node('clean/image{}x'.format(r), cleans[keys_clean[i]])
         #     self.register_node('noise/image{}x'.format(r), noises[keys_noise[i]])
         # if self.param('with_poission_noise'):
         #     images = {'input/image{}x'.format(r): noises['noise{}x'.format(r)] for r in ratios}
         # else:
         #     images = {'input/image{}x'.format(r): cleans['clean{}x'.format(r)] for r in ratios}
         # if self.param('with_noise_lable'):
         #     labels = {'label/image{}x'.format(r): noises['noise{}x'.format(r)] for r in ratios}
         # else:
         #     labels = {'label/image{}x'.format(r): cleans['clean{}x'.format(r)] for r in ratios}
         result = dict()
         result.update(cleans)
         result.update(noises)
         return result
예제 #4
0
    def _process_recons(self, dataset):
        from ...model.normalizer.normalizer import ReduceSum, FixWhite
        from dxpy.learn.model.normalizer import FixWhite
        from ..super_resolution import SuperResolutionDataset
        from ...utils.tensor import shape_as_list
        keys = [
            'recon{}x'.format(2**i)
            for i in range(self.param('nb_down_sample') + 1)
        ]
        if self.param('log_scale'):
            stat = dataset.LOG_RECON_STAT
        else:
            stat = dataset.RECON_STAT
        phantom = dataset['phantom']
        dataset = {k: dataset[k] for k in keys}
        for i, k in enumerate(keys):
            if i == 0:
                continue
            dataset[k] = tf.nn.avg_pool(
                dataset[k], [1] + [2**i, 2**i] + [1],
                padding='SAME',
                strides=[1] + [2**i, 2**i] + [1]) * (2.0**i * 2.0**i)

        # dataset = {k: ReduceSum(self.name / 'reduce_sum' / k, dataset[k], fixed_summation_value=1e6).as_tensor() for k in keys}
        if self.param('log_scale'):
            for k in keys:
                dataset[k] = tf.log(dataset[k] + 1.0)
            phantom = tf.log(phantom + 1.0)
        if self.param('with_white_normalization'):
            for i, k in enumerate(keys):
                dataset[k] = FixWhite(name=self.name / 'fix_white' / k,
                                      inputs=dataset[k],
                                      mean=stat['mean'],
                                      std=stat['std']).as_tensor()
            phantom = FixWhite(name=self.name / 'fix_white' / 'phantom',
                               inputs=dataset[k],
                               mean=stat['mean'],
                               std=stat['std']).as_tensor()

        result = dict()
        for i, k in enumerate(keys):
            result['input/image{}x'.format(2**i)] = dataset[k]
            result['label/image{}x'.format(
                2**i)] = result['input/image{}x'.format(2**i)]
        result['label/phantom'] = phantom
        return result
예제 #5
0
 def _get_sinogram_external(self):
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     with tf.name_scope('external_dataset'):
         dataset = self._make_input_place_holder()
         self.register_node('external_place_holder', dataset)
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=self.param('mean'),
                                std=self.param('std')).as_tensor()
         if self.param('with_random_crop'):
             dataset = tf.random_crop(dataset, self._target_shape(dataset))
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
예제 #6
0
 def _get_sinogram_mice(self):
     from ..raw.mice import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     with tf.name_scope('mice_sinogram_dataset'):
         dataset = Dataset(self.name / 'mice_sinogram')
         self.register_node('id', dataset['id'])
         bs = dataset.param('batch_size')
         stat = {'mean': dataset.MEAN, 'std': dataset.STD}
         dataset = dataset['sinogram']
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 if self.param('low_dose'):
                     ratio = self.param('low_dose_ratio')
                     ratio_norm = 4e6 * bs / ratio
                     dataset = dataset / tf.reduce_sum(dataset) * ratio_norm
                     stat['mean'] = stat['mean'] / ratio
                     stat['std'] = stat['std'] / ratio
                 else:
                     dataset = dataset / tf.reduce_sum(dataset) * 4e6 * bs
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=stat['mean'],
                                std=stat['std']).as_tensor()
         dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                                  list(self.param('target_shape')) + [1])
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
예제 #7
0
 def _get_sinogram_aps(self):
     from ..raw.analytical_phantom_sinogram import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     fields = ['sinogram', 'id', 'phantom']
     with tf.name_scope('aps_sinogram_dataset'):
         dataset = Dataset(self.name / 'analytical_phantom_sinogram',
                           fields=fields)
         self.register_node('id', dataset['id'])
         self.register_node('phantom', dataset['phantom'])
         if self.param('log_scale'):
             stat = dataset.LOG_SINO_STAT
         else:
             stat = dataset.SINO_STAT
         dataset = dataset['sinogram']
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
             if self.param('log_scale'):
                 dataset = tf.log(dataset + 0.4)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=stat['mean'],
                                std=stat['std']).as_tensor()
         dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                                  list(self.param('target_shape')) + [1])
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
예제 #8
0
 def _process_sinogram(self, dataset):
     from ...model.normalizer.normalizer import ReduceSum, FixWhite
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     from ...utils.tensor import shape_as_list
     if self.param('log_scale'):
         stat = dataset.LOG_SINO_STAT
     else:
         stat = dataset.SINO_STAT
     # dataset = ReduceSum(self.name / 'reduce_sum', dataset['sinogram'],
     # fixed_summation_value=1e6).as_tensor()
     if 'phantom' in dataset:
         phan = dataset['phantom']
     else:
         phan = None
     dataset = dataset['sinogram']
     if self.param('with_poission_noise'):
         with tf.name_scope('add_with_poission_noise'):
             noise = tf.random_poisson(dataset, shape=[])
             dataset = tf.concat([noise, dataset], axis=0)
     if self.param('log_scale'):
         dataset = tf.log(dataset + 0.4)
     if self.param('with_white_normalization'):
         dataset = FixWhite(name=self.name / 'fix_white',
                            inputs=dataset,
                            mean=stat['mean'],
                            std=stat['std']).as_tensor()
     # random phase shift
     # if self.param('with_phase_shift'):
     #     phase_view = tf.random_uniform(
     #         [], 0, shape_as_list(dataset)[1], dtype=tf.int64)
     #     dataset_l = dataset[:, phase_view:, :, :]
     #     dataset_r = dataset[:, :phase_view, :, :]
     #     dataset = tf.concat([dataset_l, dataset_r], axis=1)
     dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                              list(self.param('target_shape')) + [1])
     dataset = SuperResolutionDataset(
         self.name / 'super_resolution',
         lambda: {'image': dataset},
         input_key='image',
         nb_down_sample=self.param('nb_down_sample'))
     keys = [
         'image{}x'.format(2**i)
         for i in range(dataset.param('nb_down_sample') + 1)
     ]
     if self.param('with_poission_noise'):
         result = {
             'input/' + k: dataset[k][:shape_as_list(dataset[k])[0] // 2,
                                      ...]
             for k in keys
         }
         result.update({
             'label/' + k: dataset[k][shape_as_list(dataset[k])[0] // 2:,
                                      ...]
             for k in keys
         })
     else:
         result = {'input/' + k: dataset[k] for k in keys}
         result.update({'label/' + k: dataset[k] for k in keys})
     result.update({
         'noise/' + k: dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
         for k in keys
     })
     result.update({
         'clean/' + k: dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
         for k in keys
     })
     if phan is not None:
         result.update({'phantom': phan})
     return result