def eval_loop(self): per_replica_eval_batch_size = self.eval_batch_size // self.num_replicas tf.get_variable_scope().reuse_variables() predictions = tf.zeros([self.eval_steps, per_replica_eval_batch_size, 2]) _, predictions = training_loop.repeat( int(self.eval_steps), self.eval_step, [tf.constant(0), predictions]) with tf.control_dependencies([tpu_ops.outfeed_enqueue_tuple([predictions]) ]): return tf.no_op()
def _DecodeStep(): """Decode call to be compiled for TPU.""" input_batch = self._model_task.input_generator.TpuDequeueBatch() metrics_dict = self._model_task.Decode(input_batch) self.metrics_nm = py_utils.NestedMap(metrics_dict) device = tpu.core(0) if self.spmd else '' with tf.device(device): outfeed_enqueue = tpu_ops.outfeed_enqueue_tuple( self.metrics_nm.Flatten()) return [outfeed_enqueue]
def _DecodeStep(): """Decode call to be compiled for TPU.""" with py_utils.OpportunisticVariableReuseScope(True): with cluster_factory.SetEval(True): self._decode_model = self._decode_task_params.Instantiate() self._decode_model_task = self._decode_model.GetTask() self._decode_model_task.AddChild('input', self._decode_input) input_batch = self._decode_model_task.input_generator.TpuDequeueBatch( ) metrics_dict = self._decode_model_task.Decode(input_batch) self.metrics_nm = py_utils.NestedMap(metrics_dict) device = tpu.core(0) if self.spmd else '' with tf.device(device): outfeed_enqueue = tpu_ops.outfeed_enqueue_tuple( self.metrics_nm.Flatten()) return [outfeed_enqueue]
def tpu_eval_step(): """Generate the TPU graph.""" values = self.eval_infeed_queue[0].generate_dequeue_op( tpu_device=0) unflattened_inputs = data_nest.pack_sequence_as( self.eval_feature_structure, values) features = unflattened_inputs["features"] estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT, params) for k, v in six.iteritems(estimator_spec.predictions): self.outfeed_names.append(k) self.outfeed_tensors.append(v) with tf.device( device_for_tpu_core(get_host(self.resolver, self.hparams))): outfeed_enqueue_ops = tpu_ops.outfeed_enqueue_tuple( self.outfeed_tensors) with tf.control_dependencies([outfeed_enqueue_ops]): return tf.no_op()
def eval_step(self): """One evaluation step.""" inp = self.infeed_op[False].generate_dequeue_op() flatten_structure = tf.nest.flatten(self.feature_structure[False]) inp = [ tf.slice(i, [0] * i.shape.ndims, j.shape) for i, j in zip(inp, flatten_structure) ] if self.eval_has_labels: features, labels = tf.nest.pack_sequence_as( self.feature_structure[False], inp) else: features = tf.nest.pack_sequence_as(self.feature_structure[False], inp) labels = None self.maybe_add_embedding_features(features, False) _, self.predict_output = self.model_fn(features, labels, False) for _ in self.predict_output: self.dequeue_ops.append([]) with tf.device(device_for_tpu_core(self.get_host(0))): return [ tpu_ops.outfeed_enqueue_tuple( tf.nest.flatten(self.predict_output)) ]
def _OutfeedEnqueue(self, per_example_tensors): if not per_example_tensors: return tf.no_op() per_example_tensors = py_utils.NestedMap(per_example_tensors) return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())