Пример #1
0
 def _get_sinogram_mice(self):
     from ..raw.mice import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     with tf.name_scope('mice_sinogram_dataset'):
         dataset = Dataset(self.name / 'mice_sinogram')
         self.register_node('id', dataset['id'])
         bs = dataset.param('batch_size')
         stat = {'mean': dataset.MEAN, 'std': dataset.STD}
         dataset = dataset['sinogram']
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 if self.param('low_dose'):
                     ratio = self.param('low_dose_ratio')
                     ratio_norm = 4e6 * bs / ratio
                     dataset = dataset / tf.reduce_sum(dataset) * ratio_norm
                     stat['mean'] = stat['mean'] / ratio
                     stat['std'] = stat['std'] / ratio
                 else:
                     dataset = dataset / tf.reduce_sum(dataset) * 4e6 * bs
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=stat['mean'],
                                std=stat['std']).as_tensor()
         dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                                  list(self.param('target_shape')) + [1])
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
Пример #2
0
 def _get_sinogram_aps(self):
     from ..raw.analytical_phantom_sinogram import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     fields = ['sinogram', 'id', 'phantom']
     with tf.name_scope('aps_sinogram_dataset'):
         dataset = Dataset(self.name / 'analytical_phantom_sinogram',
                           fields=fields)
         self.register_node('id', dataset['id'])
         self.register_node('phantom', dataset['phantom'])
         if self.param('log_scale'):
             stat = dataset.LOG_SINO_STAT
         else:
             stat = dataset.SINO_STAT
         dataset = dataset['sinogram']
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
             if self.param('log_scale'):
                 dataset = tf.log(dataset + 0.4)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=stat['mean'],
                                std=stat['std']).as_tensor()
         dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                                  list(self.param('target_shape')) + [1])
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
Пример #3
0
    def _get_sinogram_mct(self):
        from ..raw.mCT import Dataset
        from dxpy.learn.model.normalizer import FixWhite
        from ..super_resolution import random_crop_multiresolution
        with tf.variable_scope('mCT_dataset'):
            dataset_origin = Dataset(self.name / 'mCT')
            self.register_node('id', dataset_origin['id'])
            self.register_node('phantom', dataset_origin['phantom'])
            dataset = dict()
            keys = [
                'noise/image{}x'.format(2**i)
                for i in range(self.param('nb_down_sample') + 1)
            ]
            for i, k in enumerate(keys):
                with tf.variable_scope('normalization_{}x'.format(2**i)):
                    fwn = FixWhite(
                        self.name / 'normalization_{}x'.format(2**i),
                        inputs=dataset_origin['sinogram{}x'.format(2**i)],
                        mean=dataset_origin.MEAN * (4.0**i),
                        std=dataset_origin.STD * (4.0**i))
                    dataset['noise/image{}x'.format(2**i)] = fwn.as_tensor()
                    # dataset[k] = dataset_origin['sinogram{}x'.format(2**i)]
            crop_keys = ['image{}x'.format(i) for i in range(len(keys))]
            images = {
                'image{}x'.format(i): dataset[k]
                for i, k in enumerate(keys)
            }
            images_cropped = random_crop_multiresolution(
                images, [shape_as_list(images['image1x'])[0]] +
                list(self.param('target_shape')) + [1])

            result = dict()
            for ki, ko in zip(crop_keys, keys):
                result[ko] = images_cropped[ki]
            return result
Пример #4
0
 def _get_sinogram_external(self):
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     with tf.name_scope('external_dataset'):
         dataset = self._make_input_place_holder()
         self.register_node('external_place_holder', dataset)
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=self.param('mean'),
                                std=self.param('std')).as_tensor()
         if self.param('with_random_crop'):
             dataset = tf.random_crop(dataset, self._target_shape(dataset))
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
Пример #5
0
def random_crop_multiresolution(images, target_shape_high_resolution, nb_down_sample=None):
    """
    Args:
        images: dict-like object. Containing 'image{0,1,2,3,...}x'.
    """
    from dxpy.learn.utils.tensor import shape_as_list
    import tensorflow as tf
    if nb_down_sample is None:
        nb_down_sample = len(images) - 1

    with tf.name_scope('random_crop_multi_resolution'):
        shapes = [shape_as_list(images['image{}x'.format(i)])
                  for i in range(nb_down_sample + 1)]
        down_sample_ratios = [sh // sl for sh, sl in zip(shapes[0], shapes[1])]
        for i in range(len(shapes) - 1):
            for sh, sl, dr in zip(shapes[i], shapes[i + 1], down_sample_ratios):
                if sl * dr != sh:
                    raise ValueError(
                        "Invalid inputs shapes: {}".format(shapes))
        for sh, st in zip(shapes[0], target_shape_high_resolution):
            if sh < st:
                raise ValueError("Can not perfrom random crop on inputs with shape {} with target shape {}.".format(
                    shapes, target_shape_high_resolution))
        target_shapes = []
        for i in range(nb_down_sample + 1):
            target_shapes.append([s // (dr**i)
                                  for s, dr in zip(target_shape_high_resolution, down_sample_ratios)])
        max_offsets = [sl - tl for sl,
                       tl in zip(shapes[-1], target_shapes[-1])]
        offset = []
        for mo in max_offsets:
            if mo == 0:
                offset.append(0)
            else:
                offset.append(tf.random_uniform([], 0, mo, dtype=tf.int64))
        offsets = [offset]
        for i in range(nb_down_sample):
            offset_new = [o * dr for o,
                          dr in zip(offsets[-1], down_sample_ratios)]
            offsets.append(offset_new)
        offsets = list(offsets[::-1])
        results = dict()
        for i in range(nb_down_sample + 1):
            k = 'image{}x'.format(i)
            results[k] = tf.slice(images[k], offsets[i], target_shapes[i])
        return results
Пример #6
0
 def _process_sinogram(self, dataset):
     from ...model.normalizer.normalizer import ReduceSum, FixWhite
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     from ...utils.tensor import shape_as_list
     if self.param('log_scale'):
         stat = dataset.LOG_SINO_STAT
     else:
         stat = dataset.SINO_STAT
     # dataset = ReduceSum(self.name / 'reduce_sum', dataset['sinogram'],
     # fixed_summation_value=1e6).as_tensor()
     if 'phantom' in dataset:
         phan = dataset['phantom']
     else:
         phan = None
     dataset = dataset['sinogram']
     if self.param('with_poission_noise'):
         with tf.name_scope('add_with_poission_noise'):
             noise = tf.random_poisson(dataset, shape=[])
             dataset = tf.concat([noise, dataset], axis=0)
     if self.param('log_scale'):
         dataset = tf.log(dataset + 0.4)
     if self.param('with_white_normalization'):
         dataset = FixWhite(name=self.name / 'fix_white',
                            inputs=dataset,
                            mean=stat['mean'],
                            std=stat['std']).as_tensor()
     # random phase shift
     # if self.param('with_phase_shift'):
     #     phase_view = tf.random_uniform(
     #         [], 0, shape_as_list(dataset)[1], dtype=tf.int64)
     #     dataset_l = dataset[:, phase_view:, :, :]
     #     dataset_r = dataset[:, :phase_view, :, :]
     #     dataset = tf.concat([dataset_l, dataset_r], axis=1)
     dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                              list(self.param('target_shape')) + [1])
     dataset = SuperResolutionDataset(
         self.name / 'super_resolution',
         lambda: {'image': dataset},
         input_key='image',
         nb_down_sample=self.param('nb_down_sample'))
     keys = [
         'image{}x'.format(2**i)
         for i in range(dataset.param('nb_down_sample') + 1)
     ]
     if self.param('with_poission_noise'):
         result = {
             'input/' + k: dataset[k][:shape_as_list(dataset[k])[0] // 2,
                                      ...]
             for k in keys
         }
         result.update({
             'label/' + k: dataset[k][shape_as_list(dataset[k])[0] // 2:,
                                      ...]
             for k in keys
         })
     else:
         result = {'input/' + k: dataset[k] for k in keys}
         result.update({'label/' + k: dataset[k] for k in keys})
     result.update({
         'noise/' + k: dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
         for k in keys
     })
     result.update({
         'clean/' + k: dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
         for k in keys
     })
     if phan is not None:
         result.update({'phantom': phan})
     return result
Пример #7
0
 def _target_shape(self, dataset):
     batch_size = shape_as_list(dataset[0])
     return [batch_size] + list(self.param('target_shape')) + [1]