def __init__(self, names=('vector', ), vector_size=(100, ), batch_size=10, n_interpolations=10, mean=0.0, stddev=1.0, repeat=1, queue_length=10, name='random_vector_sampler'): # repeat=None for infinite loops Layer.__init__(self, name=name) self.n_interpolations = max(n_interpolations, 1) self.mean = mean self.stddev = stddev self.repeat = repeat InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') vector_shapes = {names[0]: vector_size} vector_dtypes = {names[0]: tf.float32} self.window = ImageWindow(shapes=vector_shapes, dtypes=vector_dtypes) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, reader, data_param, batch_size, spatial_window_size=(), window_border=(), queue_length=10): self.batch_size = batch_size self.reader = reader Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__( self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed inputs') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) if spatial_window_size: # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window.set_spatial_shape(spatial_window_size) self.border_size = window_border tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=1, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s", self.window.shapes)
def __init__(self, reader, data_param, batch_size, spatial_window_size=(), windows_per_image=1, shuffle_buffer=True, queue_length=10, name='resize_sampler'): self.reader = reader self.windows_per_image = windows_per_image self.shuffle = bool(shuffle_buffer) Layer.__init__(self, name=name) InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=self.shuffle) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) if spatial_window_size: # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window.set_spatial_shape(spatial_window_size) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=1, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, reader, data_param, batch_size=10, n_interpolations=10, queue_length=10, name='linear_interpolation_sampler'): self.n_interpolations = n_interpolations self.reader = reader Layer.__init__(self, name=name) InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) # only try to use the first spatial shape available image_spatial_shape = list(self.reader.shapes.values())[0][:3] self.window.set_spatial_shape(image_spatial_shape) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes) assert not self.window.has_dynamic_shapes, \ "dynamic shapes not supported, please specify " \ "spatial_window_size = (1, 1, 1)"
def __init__(self, reader, data_param, batch_size, windows_per_image, queue_length=10): self.reader = reader Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=True) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=windows_per_image, dequeue_size=batch_size) tf.logging.info( "initialised sampler output %s " " [-1 for dynamic size]", self.window.shapes) self.spatial_coordinates_generator = rand_spatial_coordinates
def __init__(self, names=('vector',), vector_size=(100,), batch_size=10, n_interpolations=10, mean=0.0, stddev=1.0, repeat=1, queue_length=10): self.n_interpolations = max(n_interpolations, 1) self.mean = mean self.stddev = stddev self.repeat = repeat Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__( self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') self.names = names vector_shapes = {names[0]: vector_size} vector_dtypes = {names[0]: tf.float32} self.window = ImageWindow(names=tuple(vector_shapes), shapes=vector_shapes, dtypes=vector_dtypes) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, reader, data_param, batch_size, spatial_window_size=None, window_border=None, queue_length=10, name='grid_sampler'): self.batch_size = batch_size self.border_size = window_border or (0, 0, 0) self.reader = reader Layer.__init__(self, name=name) InputBatchQueueRunner.__init__(self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed inputs') # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, spatial_window_size or data_param) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=1, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s", self.window.shapes)
def __init__(self, reader, data_param, batch_size=10, n_interpolations=10, queue_length=10): self.n_interpolations = n_interpolations self.reader = reader Layer.__init__(self, name='input_buffer') InputBatchQueueRunner.__init__( self, capacity=queue_length, shuffle=False) tf.logging.info('reading size of preprocessed images') self.window = ImageWindow.from_data_reader_properties( self.reader.input_sources, self.reader.shapes, self.reader.tf_dtypes, data_param) # only try to use the first spatial shape available image_spatial_shape = list(self.reader.shapes.values())[0][:3] self.window.set_spatial_shape(image_spatial_shape) tf.logging.info('initialised window instance') self._create_queue_and_ops(self.window, enqueue_size=self.n_interpolations, dequeue_size=batch_size) tf.logging.info("initialised sampler output %s ", self.window.shapes) assert not self.window.has_dynamic_shapes, \ "dynamic shapes not supported, please specify " \ "spatial_window_size = (1, 1, 1)"
def test_dynamic_window_enqueue(self): enqueue_size = 3 dequeue_size = 2 test_buffer = InputBatchQueueRunner(capacity=10, shuffle=False) window = get_dynamic_image_window() test_buffer._create_queue_and_ops(window, enqueue_size, dequeue_size) dynamic_image_size = { 'image': (5, 5, 5, 1, 2), 'label': (5, 5, 3, 1, 2) } window_shape = window.match_image_shapes(dynamic_image_size) enqueue_dict = {} placeholder = window.image_data_placeholder('image') enqueue_dict[placeholder] = np.zeros((1, ) + window_shape['image']) placeholder = window.coordinates_placeholder('image') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) placeholder = window.image_data_placeholder('label') enqueue_dict[placeholder] = np.zeros((1, ) + window_shape['label']) placeholder = window.coordinates_placeholder('label') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) with self.test_session() as sess: # queue size before enqueue expected_queue_size = 0 queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(expected_queue_size, queue_size) # do enqueue sess.run(test_buffer._enqueue_op, feed_dict=enqueue_dict) # queue size after enqueue expected_queue_size = 1 queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(expected_queue_size, queue_size) # do dequeue sess.run(test_buffer.pop_batch_op()) # queue size after enqueue queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(0, queue_size) sess.run(test_buffer._close_queue_op)
def test_dynamic_window_init(self): window = get_dynamic_image_window() self.assertAllEqual(window.has_dynamic_shapes, True) test_buffer = InputBatchQueueRunner(capacity=10, shuffle=True) with self.assertRaisesRegexp(AttributeError, ""): test_buffer._create_queue_and_ops('test') test_buffer._create_queue_and_ops(window) self.assertIsInstance(test_buffer._queue, tf.RandomShuffleQueue) self.assertAllEqual(test_buffer._enqueue_op.name, 'shuffled_queue_enqueue') self.assertAllEqual(test_buffer._enqueue_op.name, 'shuffled_queue_enqueue') test_buffer = InputBatchQueueRunner(capacity=10, shuffle=False) test_buffer._create_queue_and_ops(window) self.assertIsInstance(test_buffer._queue, tf.FIFOQueue) self.assertAllEqual(test_buffer._enqueue_op.name, 'FIFO_queue_enqueue') self.assertAllEqual(test_buffer._enqueue_op.name, 'FIFO_queue_enqueue') with self.assertRaisesRegexp(NotImplementedError, ""): test_buffer()
def test_dynamic_window_enqueue(self): enqueue_size = 3 dequeue_size = 2 test_buffer = InputBatchQueueRunner(capacity=10, shuffle=False) window = get_dynamic_image_window() test_buffer._create_queue_and_ops(window, enqueue_size, dequeue_size) dynamic_image_size = {'image': (5, 5, 5, 1, 2), 'label': (5, 5, 3, 1, 2)} window_shape = window.match_image_shapes(dynamic_image_size) enqueue_dict = {} placeholder = window.image_data_placeholder('image') enqueue_dict[placeholder] = np.zeros((1,) + window_shape['image']) placeholder = window.coordinates_placeholder('image') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) placeholder = window.image_data_placeholder('label') enqueue_dict[placeholder] = np.zeros((1,) + window_shape['label']) placeholder = window.coordinates_placeholder('label') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) with self.test_session() as sess: # queue size before enqueue expected_queue_size = 0 queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(expected_queue_size, queue_size) # do enqueue sess.run(test_buffer._enqueue_op, feed_dict=enqueue_dict) # queue size after enqueue expected_queue_size = 1 queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(expected_queue_size, queue_size) # do dequeue sess.run(test_buffer.pop_batch_op()) # queue size after enqueue queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(0, queue_size) sess.run(test_buffer._close_queue_op)
def test_static_window_enqueue(self): enqueue_size = 3 dequeue_size = 2 test_buffer = InputBatchQueueRunner(capacity=10, shuffle=True) window = get_static_image_window() test_buffer._create_queue_and_ops(window, enqueue_size=enqueue_size, dequeue_size=dequeue_size) enqueue_dict = {} placeholder = window.image_data_placeholder('image') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) placeholder = window.coordinates_placeholder('image') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) placeholder = window.image_data_placeholder('label') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) placeholder = window.coordinates_placeholder('label') enqueue_dict[placeholder] = np.zeros(placeholder.shape.as_list()) with self.test_session() as sess: # queue size before enqueue queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(0, queue_size) # do enqueue sess.run(test_buffer._enqueue_op, feed_dict=enqueue_dict) # queue size after enqueue queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(enqueue_size, queue_size) for _ in range(2): sess.run(test_buffer._enqueue_op, feed_dict=enqueue_dict) queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(9, queue_size) sess.run(test_buffer.pop_batch_op()) queue_size = sess.run(test_buffer._query_queue_size_op) self.assertAllEqual(7, queue_size) sess.run(test_buffer._close_queue_op)