Пример #1
0
def test_priority_queue():

    priority = tf.placeholder(shape=(None), dtype=tf.int64)
    image_path = tf.placeholder(shape=(None), dtype=tf.string)
    image_label = tf.placeholder(shape=(None), dtype=tf.int64)

    queue = tf.PriorityQueue(capacity=10,
                             types=[tf.string, tf.int64],
                             shapes=[(), ()])

    enqueue_op = queue.enqueue_many([priority, image_path, image_label])
    dequeue_op = queue.dequeue()

    session = tf.Session()

    session.run(enqueue_op,
                feed_dict={
                    priority: [2, 1, 4, 3],
                    image_path: ['item2', 'item1', 'item4', 'item3'],
                    image_label: [0, 1, 1, 0]
                })

    output = session.run(dequeue_op)
    # [1, ‘item1’, 1]

    print(output)

    return
Пример #2
0
def build_queues(N, num_worker):
    size_per_task = int(N / num_worker)

    with tf.variable_scope('reducer-0'):
        with tf.device('/job:reducer/task:0/cpu:0'):
            partial_alpha_queue = tf.FIFOQueue(
                capacity=num_worker,
                dtypes=tf.float64,
                shapes=[],
                shared_name='partial_alpha_shared_queue')
            alpha_queue = tf.FIFOQueue(capacity=num_worker,
                                       dtypes=tf.float64,
                                       shapes=[],
                                       shared_name='alpha_shared_queue')
            partial_delta_queue = tf.FIFOQueue(
                capacity=num_worker,
                dtypes=tf.float64,
                shapes=[],
                shared_name='partial_delta_shared_queue')
            beta_queue = tf.FIFOQueue(capacity=num_worker,
                                      dtypes=tf.float64,
                                      shapes=[],
                                      shared_name='beta_shared_queue')
            p_queue = tf.PriorityQueue(capacity=num_worker,
                                       types=(tf.float64, tf.float64),
                                       shapes=[[size_per_task, 1],
                                               [size_per_task, 1]],
                                       shared_name='p_shared_queue')
            p_result_queue = tf.FIFOQueue(capacity=num_worker,
                                          dtypes=(tf.float64),
                                          shapes=[N, 1],
                                          shared_name='p_result_shared_queue')

    return partial_alpha_queue, alpha_queue, partial_delta_queue, beta_queue, p_queue, p_result_queue
Пример #3
0
def get_queue(nodes,
              queue_type='fifo',
              batch_size=256,
              capacity=None,
              min_after_dequeue=None,
              shape_flag=True,
              seed=0):
    """ A generic queue for reading data
        Built on top of https://indico.io/blog/tensorflow-data-input-part2-extensions/
    """
    if capacity is None:
        capacity = 2 * batch_size
    if min_after_dequeue is None:
        min_after_dequeue = capacity // 2

    names = []
    dtypes = []
    shapes = []

    for name in nodes.keys():
        names.append(name)
        dtypes.append(nodes[name].dtype)
        if shape_flag:
            shapes.append(nodes[name].get_shape()[1:])
        else:
            shapes.append(nodes[name].get_shape())

    if batch_size == 1:
        shapes = None

    if queue_type == 'random':
        queue = tf.RandomShuffleQueue(capacity=capacity,
                                      min_after_dequeue=min_after_dequeue,
                                      dtypes=dtypes,
                                      shapes=shapes,
                                      names=names,
                                      seed=seed)
    elif queue_type == 'fifo':
        queue = tf.FIFOQueue(capacity=capacity,
                             dtypes=dtypes,
                             shapes=shapes,
                             names=names)
    elif queue_type == 'padding_fifo':
        queue = tf.PaddingFIFOQueue(capacity=capacity,
                                    dtypes=dtypes,
                                    shapes=shapes,
                                    names=names)
    elif queue_type == 'priority':
        queue = tf.PriorityQueue(capacity=capacity,
                                 types=dtypes,
                                 shapes=shapes,
                                 names=names)
    else:
        Exception('Queue type %s not recognized' % queue_type)

    return queue
Пример #4
0
    def __init__(self, session, queue_size, data_type, share_name, ordered):
        self.session = session
        self.priority = tf.placeholder(data_type)
        self.data = tf.placeholder(data_type)
        if ordered:
            self.queue = tf.PriorityQueue(queue_size, (data_type), (()),
                                          shared_name=share_name)
            self.enqueue_op = self.queue.enqueue((self.priority, self.data))
        else:
            print "creating fifo"
            self.queue = tf.FIFOQueue(queue_size, (data_type, data_type),
                                      shared_name=share_name)
            self.enqueue_op = self.queue.enqueue((self.priority, self.data))
        self.val1 = tf.placeholder(tf.int64)
        self.val2 = tf.placeholder(tf.int64)
        self.compare_op = tf.equal(self.val1, self.val2)

        self.dequeue_op = self.queue.dequeue()
        self.close_op = self.queue.close()
        self.ordered = ordered
Пример #5
0
import tensorflow as tf

priority = tf.placeholder(shape=(None), dtype=tf.int64)
image_path = tf.placeholder(shape=(None), dtype=tf.string)
image_label = tf.placeholder(shape=(None), dtype=tf.int64)

queue = tf.PriorityQueue(capacity=10,
                         types=[tf.string, tf.int64],
                         shapes=[(), ()])

enqueue_op = queue.enqueue_many([priority, image_path, image_label])