def __init__(self, reader, data_param, batch_size, spatial_window_size=(), window_border=(), queue_length=10): self.batch_size = batch_size self.reader = reader Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__( self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed inputs') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) if spatial_window_size: # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window.set_spatial_shape(spatial_window_size) self.border_size = window_border tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=1, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s", self.window.shapes)
def __init__(self, reader, data_param, batch_size=10, n_interpolations=10, queue_length=10, name='linear_interpolation_sampler'): self.n_interpolations = n_interpolations self.reader = reader Layer.__init__(self, name=name) InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) # only try to use the first spatial shape available image_spatial_shape = list(self.reader.shapes.values())[0][:3] self.window.set_spatial_shape(image_spatial_shape) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes) assert not self.window.has_dynamic_shapes, \ "dynamic shapes not supported, please specify " \ "spatial_window_size = (1, 1, 1)"
def __init__(self, names=('vector',), vector_size=(100,), batch_size=10, n_interpolations=10, mean=0.0, stddev=1.0, repeat=1, queue_length=10): self.n_interpolations = max(n_interpolations, 1) self.mean = mean self.stddev = stddev self.repeat = repeat Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__( self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') self.names = names vector_shapes = {names[0]: vector_size} vector_dtypes = {names[0]: tf.float32} self.window = ImageWindow(names=tuple(vector_shapes), shapes=vector_shapes, dtypes=vector_dtypes) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, reader, data_param, batch_size, windows_per_image, queue_length=10): self.reader = reader Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=True) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=windows_per_image, dequeue_size=batch_size) tf.logging.info( "initialised sampler output %s " " [-1 for dynamic size]", self.window.shapes) self.spatial_coordinates_generator = rand_spatial_coordinates
def __init__(self, reader, data_param, batch_size, spatial_window_size=None, window_border=None, queue_length=10, name='grid_sampler'): self.batch_size = batch_size self.border_size = window_border or (0, 0, 0) self.reader = reader Layer.__init__(self, name=name) InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed inputs') # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, spatial_window_size or data_param) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=1, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s", self.window.shapes)
def __init__(self, reader=None, window_sizes=None, batch_size=1, windows_per_image=1, queue_length=10, shuffle=True, epoch=-1, name='image_dataset'): Layer.__init__(self, name=name) self.dataset = None self.iterator = None self.reader = reader self.batch_size = batch_size self.queue_length = queue_length self.n_subjects = 1 self.from_generator = inspect.isgeneratorfunction(self.layer_op) self.shuffle = shuffle self.epoch = epoch self.window = None if reader is not None: self.window = ImageWindow.from_data_reader_properties( reader.input_sources, reader.shapes, reader.tf_dtypes, window_sizes or (-1, -1, -1)) self.n_subjects = reader.num_subjects self.window.n_samples = \ 1 if self.from_generator else windows_per_image
def __init__(self, reader, data_param, batch_size=10, n_interpolations=10, queue_length=10): self.n_interpolations = n_interpolations self.reader = reader Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__( self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) # only try to use the first spatial shape available image_spatial_shape = list(self.reader.shapes.values())[0][:3] self.window.set_spatial_shape(image_spatial_shape) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes) assert not self.window.has_dynamic_shapes, \ "dynamic shapes not supported, please specify " \ "spatial_window_size = (1, 1, 1)"
def __init__(self, reader, data_param, batch_size, spatial_window_size=(), windows_per_image=1, shuffle_buffer=True, queue_length=10, name='resize_sampler'): self.reader = reader self.windows_per_image = windows_per_image self.shuffle = bool(shuffle_buffer) Layer.__init__(self, name=name) InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=self.shuffle) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) if spatial_window_size: # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window.set_spatial_shape(spatial_window_size) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=1, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, names=('vector', ), vector_size=(100, ), batch_size=10, n_interpolations=10, mean=0.0, stddev=1.0, repeat=1, queue_length=10, name='random_vector_sampler'): # repeat=None for infinite loops Layer.__init__(self, name=name) self.n_interpolations = max(n_interpolations, 1) self.mean = mean self.stddev = stddev self.repeat = repeat InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') vector_shapes = {names[0]: vector_size} vector_dtypes = {names[0]: tf.float32} self.window = ImageWindow(shapes=vector_shapes, dtypes=vector_dtypes) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, spatial_axis=0, do_cropping=True, name='spatial_gradient'): Layer.__init__(self, name=name) self.spatial_axis = int(spatial_axis) self.do_cropping = do_cropping
def __init__(self, reader_0, reader_1, data_param, batch_size=1): Layer.__init__(self, name='pairwise_sampler_resize') # reader for the fixed images self.reader_0 = reader_0 # reader for the moving images self.reader_1 = reader_1 # TODO: # 0) check the readers should have the same length file list # 1) detect window shape mismatches or defaulting # windows to the fixed image reader properties # 2) reshape images to (supporting multi-modal data) # [batch, x, y, channel] or [batch, x, y, z, channels] # 3) infer spatial rank # 4) make ``label`` optional self.batch_size = int(batch_size) assert self.batch_size > 0, "batch size must be greater than 0" self.spatial_rank = 3 self.window = ImageWindow.from_data_reader_properties( self.reader_0.input_sources, self.reader_0.shapes, self.reader_0.tf_dtypes, data_param) if self.window.has_dynamic_shapes: tf.logging.fatal('Dynamic shapes not supported.\nPlease specify ' 'all spatial dims of the input data, for the ' 'spatial_window_size parameter.') raise NotImplementedError # TODO: check spatial dims the same across input modalities self.image_shape = \ self.reader_0.shapes['fixed_image'][:self.spatial_rank] self.moving_image_shape = \ self.reader_1.shapes['moving_image'][:self.spatial_rank] self.window_size = self.window.shapes['fixed_image'][1:] # initialise a dataset prefetching pairs of image and label volumes n_subjects = len(self.reader_0.output_list) int_seq = list(range(n_subjects)) # make the list of sequence divisible by batch size while len(int_seq) > 0 and len(int_seq) % self.batch_size != 0: int_seq.append(int_seq[-1]) image_dataset = tf.data.Dataset.from_tensor_slices(int_seq) # mapping random integer id to 4 volumes moving/fixed x image/label # tf.py_func wrapper of ``get_pairwise_inputs`` image_dataset = image_dataset.map( lambda image_id: tuple(tf.py_func( self.get_pairwise_inputs, [image_id], [tf.int32, tf.float32, tf.float32, tf.int32, tf.int32])), num_parallel_calls=4) # supported by tf 1.4? # todo: sequential and no repeatition image_dataset = image_dataset.batch(self.batch_size) self.iterator = image_dataset.make_initializable_iterator()
def __init__(self, sigma=1, truncate=3.0, type_str='gaussian'): """ :param sigma: standard deviation :param truncate: Truncate the filter at this many standard deviations :param type_str: type of kernels """ Layer.__init__(self, name='approximated_smoothing') self.kernel_func = look_up_operations( type_str.lower(), SUPPORTED_KERNELS) self.sigma = sigma self.truncate = truncate
def __init__(self, reader_0, reader_1, data_param, batch_size=1): Layer.__init__(self, name='pairwise_sampler_uniform') # reader for the fixed images self.reader_0 = reader_0 # reader for the moving images self.reader_1 = reader_1 # TODO: # 0) check the readers should have the same length file list # 1) detect window shape mismatches or defaulting # windows to the fixed image reader properties # 2) reshape images to (supporting multi-modal data) # [batch, x, y, channel] or [batch, x, y, z, channels] # 3) infer spatial rank # 4) make ``label`` optional self.batch_size = batch_size self.spatial_rank = 3 self.window = ImageWindow.from_data_reader_properties( self.reader_0.input_sources, self.reader_0.shapes, self.reader_0.tf_dtypes, data_param) if self.window.has_dynamic_shapes: tf.logging.fatal('Dynamic shapes not supported.\nPlease specify ' 'all spatial dims of the input data, for the ' 'spatial_window_size parameter.') raise NotImplementedError # TODO: check spatial dims the same across input modalities self.image_shape = \ self.reader_0.shapes['fixed_image'][:self.spatial_rank] self.moving_image_shape = \ self.reader_1.shapes['moving_image'][:self.spatial_rank] self.window_size = self.window.shapes['fixed_image'] # initialise a dataset prefetching pairs of image and label volumes n_subjects = len(self.reader_0.output_list) rand_ints = np.random.randint(n_subjects, size=[n_subjects]) image_dataset = Dataset.from_tensor_slices(rand_ints) # mapping random integer id to 4 volumes moving/fixed x image/label # tf.py_func wrapper of ``get_pairwise_inputs`` image_dataset = image_dataset.map( lambda image_id: tuple( tf.py_func(self.get_pairwise_inputs, [image_id], [ tf.int64, tf.float32, tf.float32, tf.int32, tf.int32 ])), num_threads=4) # supported by tf 1.4? image_dataset = image_dataset.repeat() # num_epochs can be param image_dataset = image_dataset.shuffle(buffer_size=self.batch_size * 20) image_dataset = image_dataset.batch(self.batch_size) self.iterator = image_dataset.make_initializable_iterator()
def __init__(self, reader=None, window_sizes=None, batch_size=1, windows_per_image=1, queue_length=10, shuffle=True, epoch=-1, smaller_final_batch_mode='pad', seed=None, name='image_dataset'): Layer.__init__(self, name=name) self._num_threads = 1 self._enqueuer = None self._seed = seed self.dataset = None self.iterator = None self.reader = reader self.batch_size = batch_size self.queue_length = int(max(queue_length, round(batch_size * 5.0))) if self.queue_length > queue_length: tf.logging.warning( 'sampler queue_length should be larger than batch_size, ' 'defaulting to batch_size * 5.0 (%s).', self.queue_length) self.from_generator = inspect.isgeneratorfunction(self.layer_op) self.shuffle = shuffle self.epoch = 1 if self.from_generator else epoch self.smaller_final_batch_mode = look_up_operations( smaller_final_batch_mode.lower(), SMALLER_FINAL_BATCH_MODE) self.n_subjects = 1 self.window = None if reader is not None: self.window = ImageWindow.from_data_reader_properties( reader.input_sources, reader.shapes, reader.tf_dtypes, window_sizes or (-1, -1, -1)) self.n_subjects = reader.num_subjects self.window.n_samples = windows_per_image
def __init__(self, scale, interpolation='linear', boundary='zero', transform=None, name='AffineAugmentation'): """ :param scale: how extreme the perturbation is, with 0. meaning no perturbation and 1.0 giving largest perturbations. :param interpolation: the image value interpolation used by the resampling. :param boundary: the boundary handling used by the resampling :param name: string name of the layer. """ Layer.__init__(self, name=name) self.scale = min(max(float(scale), 0.0), 1.0) self.interpolation = interpolation self.boundary = boundary self._transform = None if transform is not None: self._transform = transform
def __init__(self, name='crop_concat'): Layer.__init__(self, name=name)