def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension( self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) boxes = tf.tile(tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)])) batch_queue = batcher.BatchQueue(tensor_dict={'boxes': boxes}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, 4], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch)
def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(1, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 1) image = tf.reshape(tf.range(1, 13), [4, 3]) * counter batch_queue = batcher.BatchQueue(tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([4, 3], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 1 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual( tensor, np.arange(1, 13).reshape((4, 3)) * i) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch)
def test_batcher_when_batch_size_is_one(self): with self.test_session() as sess: batch_size = 1 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) image = tf.reshape(tf.range(counter * counter), tf.stack([counter, counter])) batch_queue = batcher.BatchQueue(tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, None], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual( tensor, np.arange(i * i).reshape((i, i))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch)
def create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tensor_dict[fields.InputDataFields.image], 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.to_float(images) tensor_dict[fields.InputDataFields.image] = float_images # for audio input tensor_dict[fields.InputDataFields.audio] = tf.expand_dims( tensor_dict[fields.InputDataFields.audio], 0) audios = tensor_dict[fields.InputDataFields.audio] float_audios = tf.to_float(audios) tensor_dict[fields.InputDataFields.audio] = float_audios include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) if data_augmentation_options: tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue
def _create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() # def func(x): # import ipdb; ipdb.set_trace() # return x # # img = tensor_dict[fields.InputDataFields.image] # img = tf.py_func( # func, # [img], # tf.float32, # ) img = tf.sparse_tensor_to_dense(img) tensor_dict[fields.InputDataFields.image] = tf.expand_dims(img, 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.to_float(images) tensor_dict[fields.InputDataFields.image] = float_images if data_augmentation_options: tensor_dict = preprocessor.preprocess(tensor_dict, data_augmentation_options) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension( self, verbose=False): print( '\n==========================================================================' ) print( 'test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension' ) with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) # Increments 'ref' until it reaches 'limit' counter = examples.count_up_to(limit=num_batches * batch_size + 2) if verbose: counter = tf.Print(counter, [counter]) boxes = tf.tile(input=tf.reshape(tf.range(4), [1, 4]), multiples=tf.stack([counter, tf.constant(1)])) if verbose: boxes = tf.Print(boxes, [boxes]) boxes = tf.Print(boxes, [tf.shape(boxes)]) batch_queue = batcher.BatchQueue(tensor_dict={'boxes': boxes}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, 4], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch)
def _create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tensor_dict[fields.InputDataFields.image], 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.to_float(images) tensor_dict[fields.InputDataFields.image] = float_images next_images = tensor_dict.get(fields.InputDataFields.next_image) if next_images is not None: next_float_images = tf.to_float(next_images) tensor_dict[fields.InputDataFields.next_image] = next_float_images if data_augmentation_options: # TODO handle next_image, depth and flow to re-enable augmentations tensor_dict = preprocessor.preprocess(tensor_dict, data_augmentation_options) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue
def _create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. #how to set up clones ???????????? create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() tensor_dict[ fields.InputDataFields. image] = tf.expand_dims( #expand images daata , acrually etract data tensor_dict[fields.InputDataFields.image], 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.to_float( images) #not much turning the image data in to fload tensor_dict[fields.InputDataFields. image] = float_images #put that in to tensor dict if data_augmentation_options: #here we will pre process tensor_dict = preprocessor.preprocess( tensor_dict, #return tensor_dict: which contains the preprocessed images, bounding boxes, etc. data_augmentation_options) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue
def _create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options, ignore_options=None, mtl_window=False, mtl_edgemask=False): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). ignore_options: exception condition of training loss Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tensor_dict[fields.InputDataFields.image], 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.to_float(images) tensor_dict[fields.InputDataFields.image] = float_images preprocessor.make_ignore_list(tensor_dict, ignore_options) if mtl_window: for option in data_augmentation_options: if 'random_horizontal_flip' in option[0].func_name: option[1][fields.InputDataFields.window_boxes] = tensor_dict[ fields.InputDataFields.window_boxes] if mtl_edgemask: for option in data_augmentation_options: if 'random_horizontal_flip' in option[0].func_name: option[1][ fields.InputDataFields. groundtruth_edgemask_masks] = tensor_dict[ fields.InputDataFields.groundtruth_edgemask_masks] if data_augmentation_options: tensor_dict = preprocessor.preprocess(tensor_dict, data_augmentation_options, mtl_window=mtl_window, mtl_edgemask=mtl_edgemask) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue