def create_dequeue_ops(host_id): """Create outfeed dequeue ops.""" dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for v in self.outfeed_tensors: tensor_dtypes.append(v.dtype) tensor_shapes.append(v.shape) with tf.device(utils.device_for_host(self._get_host(host_id))): for i in range(FLAGS.num_shards_per_host): outfeed = tpu.outfeed_dequeue_tuple(dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i) if len(outfeed) == 2: if outfeed[0].shape.ndims == 3: detections, is_pad = outfeed else: is_pad, detections = outfeed num_non_pad = tf.shape(is_pad)[0] - tf.reduce_sum( tf.cast(is_pad, tf.int32)) dequeue_ops.append( tf.slice(detections, [0, 0, 0], [num_non_pad, -1, -1])) else: dequeue_ops.append(outfeed) dequeue_ops = tf.concat(dequeue_ops, axis=0) return dequeue_ops
def create_dequeue_ops(): """Create outfeed dequeue ops.""" dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for v in self.outfeed_tensors: dequeue_ops.append([]) tensor_dtypes.append(v.dtype) tensor_shapes.append(v.shape) for i in range(FLAGS.num_shards): with tf.device(utils.device_for_host(self._get_host(0))): outfeed_tensors = tpu.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) for j in range(len(outfeed_tensors)): dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0) return dequeue_ops
def create_dequeue_ops(host_id): """Create deque ops graph function.""" dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for v in self.eval_tensors: dequeue_ops.append([]) tensor_dtypes.append(v.dtype) tensor_shapes.append(v.shape) for i in range(FLAGS.tpu_cores_per_host): with tf.device(device_for_host(self.get_host(host_id))): outfeed_tensors = tpu.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) for j in range(len(outfeed_tensors)): dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0) return dequeue_ops
def create_dequeue_ops(): dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for v in self.eval_tensors: dequeue_ops.append([]) tensor_dtypes.append(v.dtype) tensor_shapes.append(v.shape) tf.logging.info("appending %s" % v.name) for i in range(FLAGS.num_cores): with tf.device(device_for_host()): outfeed_tensors = tpu.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) for j in range(len(outfeed_tensors)): dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0) return dequeue_ops
def create_dequeue_ops(): """Create outfeed dequeue ops.""" dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for v in self.outfeed_tensors: dequeue_ops.append([]) tensor_dtypes.append(v.dtype) tensor_shapes.append(v.shape) # Currently working only on a donut, change this later to support # distibuted eval. for i in range(FLAGS.tpu_num_shards_per_host): with tf.device(low_level_utils.device_for_host(self._get_host(0))): outfeed_tensors = tpu.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) for j in range(len(outfeed_tensors)): dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0) return dequeue_ops