Пример #1
0
 def do_dequeue_func():
     if allow_smaller_final_batch:
         dequeued = queue.dequeue_up_to(batch_size)
     else:
         dequeued = queue.dequeue_many(batch_size, name=name)
     dequeued = _restore_sparse_tensors(dequeued, sparse_info)
     return _as_original_type(tensors_list[0], dequeued)
Пример #2
0
 def testDictInputs(self):
     d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
     l = inp._as_tensor_list(d)
     self.assertEquals([1, 11, 2, 22, 3, 33], l)
     d2 = inp._as_original_type(d, l)
     self.assertEquals(d, d2)
Пример #3
0
 def testListInputs(self):
     l = [1, 2, 3, 11, 22, 33]
     l2 = inp._as_tensor_list(l)
     self.assertEquals(l, l2)
     l3 = inp._as_original_type(l, l2)
     self.assertEquals(l, l3)
Пример #4
0
 def testDictInputs(self):
   d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
   l = inp._as_tensor_list(d)
   self.assertEquals([1, 11, 2, 22, 3, 33], l)
   d2 = inp._as_original_type(d, l)
   self.assertEquals(d, d2)
Пример #5
0
 def testListInputs(self):
   l = [1, 2, 3, 11, 22, 33]
   l2 = inp._as_tensor_list(l)
   self.assertEquals(l, l2)
   l3 = inp._as_original_type(l, l2)
   self.assertEquals(l, l3)
Пример #6
0
def _custom_shuffle_batch(tensors,
                          batch_size,
                          capacity,
                          min_after_dequeue,
                          keep_input,
                          num_threads=1,
                          seed=None,
                          enqueue_many=False,
                          shapes=None,
                          allow_smaller_final_batch=False,
                          shared_name=None,
                          name=None,
                          shuffle=False):
    """Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""

    if context.executing_eagerly():
        raise ValueError(
            "Input pipelines based on Queues are not supported when eager execution"
            " is enabled. Please use tf.data to ingest data into your model"
            " instead.")
    tensor_list = tf_input._as_tensor_list(tensors)
    with ops.name_scope(name, "shuffle_batch",
                        list(tensor_list) + [keep_input]) as name:
        if capacity <= min_after_dequeue:
            raise ValueError(
                "capacity %d must be bigger than min_after_dequeue %d." %
                (capacity, min_after_dequeue))
        tensor_list = tf_input._validate(tensor_list)
        keep_input = tf_input._validate_keep_input(keep_input, enqueue_many)
        tensor_list, sparse_info = tf_input._store_sparse_tensors(
            tensor_list, enqueue_many, keep_input)
        types = tf_input._dtypes([tensor_list])
        shapes = tf_input._shapes([tensor_list], shapes, enqueue_many)

        ###########################################################################################
        if shuffle:
            queue = data_flow_ops.RandomShuffleQueue(
                capacity=capacity,
                min_after_dequeue=min_after_dequeue,
                seed=seed,
                dtypes=types,
                shapes=shapes,
                shared_name=shared_name)
        else:
            # Remove shuffle property
            queue = data_flow_ops.FIFOQueue(capacity=capacity,
                                            dtypes=types,
                                            shapes=shapes,
                                            shared_name=shared_name)
        ###########################################################################################

        tf_input._enqueue(queue, tensor_list, num_threads, enqueue_many,
                          keep_input)
        full = (math_ops.to_float(
            math_ops.maximum(0,
                             queue.size() - min_after_dequeue)) *
                (1. / (capacity - min_after_dequeue)))

        summary_name = ("fraction_over_%d_of_%d_full" %
                        (min_after_dequeue, capacity - min_after_dequeue))
        summary.scalar(summary_name, full)

        if allow_smaller_final_batch:
            dequeued = queue.dequeue_up_to(batch_size, name=name)
        else:
            dequeued = queue.dequeue_many(batch_size, name=name)

        dequeued = tf_input._restore_sparse_tensors(dequeued, sparse_info)

        return tf_input._as_original_type(tensors, dequeued)