def __init__(self,
              reader,
              window_sizes,
              batch_size=10,
              n_interpolations=10,
              queue_length=10,
              name='linear_interpolation_sampler'):
     ImageWindowDataset.__init__(self,
                                 reader,
                                 window_sizes=window_sizes,
                                 batch_size=batch_size,
                                 queue_length=queue_length,
                                 shuffle=False,
                                 epoch=1,
                                 smaller_final_batch_mode='drop',
                                 name=name)
     self.n_interpolations = n_interpolations
     # only try to use the first spatial shape available
     image_spatial_shape = list(self.reader.shapes.values())[0][:3]
     self.window.set_spatial_shape(image_spatial_shape)
     tf.logging.info("initialised linear interpolation sampler %s ",
                     self.window.shapes)
     assert not self.window.has_dynamic_shapes, \
         "dynamic shapes not supported, please specify " \
         "spatial_window_size = (1, 1, 1)"
Пример #2
0
    def __init__(self,
                 reader,
                 window_sizes,
                 batch_size=1,
                 spatial_window_size=None,
                 window_border=None,
                 queue_length=10,
                 smaller_final_batch_mode='pad',
                 name='grid_sampler'):

        # override all spatial window defined in input
        # modalities sections
        # this is useful when do inference with a spatial window
        # which is different from the training specifications
        ImageWindowDataset.__init__(
            self,
            reader=reader,
            window_sizes=spatial_window_size or window_sizes,
            batch_size=batch_size,
            windows_per_image=1,
            queue_length=queue_length,
            shuffle=False,
            epoch=1,
            smaller_final_batch_mode=smaller_final_batch_mode,
            name=name)

        self.border_size = window_border or (0, 0, 0)
        assert isinstance(self.border_size, (list, tuple)), \
            "window_border should be a list or tuple"
        while len(self.border_size) < N_SPATIAL:
            self.border_size = tuple(self.border_size) + \
                               (self.border_size[-1],)
        self.border_size = self.border_size[:N_SPATIAL]
        tf.logging.info('initialised window instance')
        tf.logging.info("initialised grid sampler %s", self.window.shapes)
Пример #3
0
    def __init__(self,
                 names=('vector', ),
                 vector_size=(100, ),
                 batch_size=10,
                 n_interpolations=10,
                 mean=0.0,
                 stddev=1.0,
                 repeat=1,
                 queue_length=10,
                 name='random_vector_sampler'):
        # repeat=None for infinite loops
        self.n_interpolations = max(n_interpolations, 1)
        self.mean = mean
        self.stddev = stddev
        self.repeat = repeat
        self.names = names

        ImageWindowDataset.__init__(
            self,
            reader=None,
            window_sizes={names[0]: {
                              'spatial_window_size': vector_size
                          }},
            batch_size=batch_size,
            queue_length=queue_length,
            shuffle=False,
            epoch=1,
            smaller_final_batch_mode='drop',
            name=name)
        self.window = ImageWindow(shapes={names[0]: vector_size},
                                  dtypes={names[0]: tf.float32})
        tf.logging.info("initialised sampler output %s ", self.window.shapes)
Пример #4
0
 def __init__(self,
              reader,
              window_sizes,
              batch_size=1,
              spatial_window_size=None,
              windows_per_image=1,
              shuffle=True,
              queue_length=10,
              smaller_final_batch_mode='pad',
              name='resize_sampler_v2'):
     tf.logging.info('reading size of preprocessed images')
     ImageWindowDataset.__init__(
         self,
         reader=reader,
         window_sizes=window_sizes,
         batch_size=batch_size,
         windows_per_image=windows_per_image,
         queue_length=queue_length,
         shuffle=shuffle,
         epoch=-1 if shuffle else 1,
         smaller_final_batch_mode=smaller_final_batch_mode,
         name=name)
     if spatial_window_size:
         # override all spatial window defined in input
         # modalities sections
         # this is useful when do inference with a spatial window
         # which is different from the training specifications
         self.window.set_spatial_shape(spatial_window_size)
     tf.logging.info("initialised resize sampler %s ", self.window.shapes)
Пример #5
0
 def __init__(self,
              reader,
              csv_reader=None,
              window_sizes=None,
              batch_size=10,
              windows_per_image=1,
              shuffle=True,
              queue_length=10,
              num_threads=4,
              epoch=-1,
              smaller_final_batch_mode='pad',
              name='random_vector_sampler'):
     self.csv_reader = csv_reader
     print("assigned csv_reader")
     ImageWindowDataset.__init__(
         self,
         reader=reader,
         window_sizes=window_sizes,
         batch_size=batch_size,
         windows_per_image=windows_per_image,
         shuffle=shuffle,
         queue_length=queue_length,
         epoch=epoch,
         smaller_final_batch_mode=smaller_final_batch_mode,
         name=name)
     print("initialised IWD")
     self.set_num_threads(num_threads)
Пример #6
0
 def run_dataset(self, n_iters, n_threads, **kwargs):
     sampler = ImageWindowDataset(**kwargs)
     sampler.set_num_threads(n_threads)
     with self.cached_session() as sess:
         true_iters = 0
         next_element = sampler.pop_batch_op()
         windows = []
         try:
             for _ in range(min(n_iters, 100)):
                 windows.append(sess.run(next_element)['mr_location'])
                 true_iters = true_iters + 1
         except (tf.errors.OutOfRangeError, EOFError):
             pass
         assert true_iters <= 100, 'keep the test smaller than 100 iters'
     return true_iters, np.concatenate(windows, 0)
Пример #7
0
 def test_epoch(self):
     reader = get_2d_reader()
     batch_size = 3
     sampler = ImageWindowDataset(reader=reader,
                                  batch_size=batch_size,
                                  epoch=1)
     with self.cached_session() as sess:
         next_element = sampler.pop_batch_op()
         iters = 0
         try:
             for _ in range(400):
                 window = sess.run(next_element)
                 iters = iters + 1
         except tf.errors.OutOfRangeError:
             pass
         # batch size 3, 40 images in total
         self.assertEqual(
             np.ceil(reader.num_subjects / np.float(batch_size)), iters)
Пример #8
0
    def __init__(self,
                 reader,
                 window_sizes,
                 batch_size=1,
                 windows_per_image=1,
                 queue_length=10,
                 name='uniform_sampler_v2'):
        ImageWindowDataset.__init__(self,
                                    reader=reader,
                                    window_sizes=window_sizes,
                                    batch_size=batch_size,
                                    windows_per_image=windows_per_image,
                                    queue_length=queue_length,
                                    shuffle=True,
                                    epoch=-1,
                                    smaller_final_batch_mode='drop',
                                    name=name)

        tf.logging.info("initialised uniform sampler %s ", self.window.shapes)
        self.window_centers_sampler = rand_spatial_coordinates
Пример #9
0
 def __init__(self,
              reader,
              csv_reader=None,
              window_sizes=None,
              batch_size=10,
              windows_per_image=1,
              shuffle=True,
              queue_length=10,
              epoch=-1,
              smaller_final_batch_mode='pad',
              name='random_vector_sampler'):
     self.csv_reader = csv_reader
     ImageWindowDataset.__init__(
         self,
         reader=reader,
         window_sizes=window_sizes,
         batch_size=batch_size,
         windows_per_image=windows_per_image,
         shuffle=shuffle,
         queue_length=queue_length,
         epoch=epoch,
         smaller_final_batch_mode=smaller_final_batch_mode,
         name=name)
 def __init__(self, *args, **kwargs):
     ImageWindowDataset.__init__(self, *args, **kwargs)
Пример #11
0
 def test_windows_per_image(self):
     sampler = ImageWindowDataset(reader=get_2d_reader(),
                                  batch_size=2,
                                  windows_per_image=2)
     self.assert_window(sampler())
Пример #12
0
 def test_window_size_dict(self):
     sampler = ImageWindowDataset(reader=get_2d_reader(),
                                  window_sizes={'mr': (0, 0, 0)},
                                  batch_size=2)
     self.assert_tf_window(sampler)
     self.assert_window(sampler())
Пример #13
0
 def test_batch_size(self):
     # batch size doesn't change the numpy interface
     sampler = ImageWindowDataset(reader=get_2d_reader(), batch_size=2)
     self.assert_tf_window(sampler)
     self.assert_window(sampler())
Пример #14
0
 def test_simple(self):
     sampler = ImageWindowDataset(reader=get_2d_reader())
     self.assert_tf_window(sampler)
     self.assert_window(sampler())
Пример #15
0
 def test_window_size(self):
     sampler = ImageWindowDataset(reader=get_3d_reader(),
                                  window_sizes=(0, 0, 0),
                                  batch_size=2)
     self.assert_tf_window(sampler)
     self.assert_window(sampler())
data_param = \
    {'MR': {
        'path_to_search': '~/Desktop/useful_scripts/visualise_windows',
        'filename_contains': 'example.png'}}

###
# create an image reader
###
reader = ImageReader().initialise(data_param)
reader.add_preprocessing_layers(  # add volume padding layer
    [PadLayer(image_name=['MR'], border=volume_padding_size, mode='constant')])

###
# show 'volume' -- without window sampling
###
image_2d = ImageWindowDataset(reader)()['MR'][0, :, :, 0, 0, 0]
vis_coordinates(image_2d, saving_name='output/image.png')

###
# create & show uniform random samples
###
uniform_sampler = UniformSampler(reader,
                                 spatial_window_size,
                                 windows_per_image=100)
next_window = uniform_sampler.pop_batch_op()
coords = []
with tf.Session() as sess:
    for _ in range(20):
        uniform_windows = sess.run(next_window)
        coords.append(uniform_windows['MR_location'])
coords = np.concatenate(coords, axis=0)