def testEvaluateWithEvalFeedDict(self): # Create a checkpoint. checkpoint_dir = tempfile.mkdtemp('evaluate_with_eval_feed_dict') self._train_model(checkpoint_dir, num_steps=1) # We need a variable that the saver will try to restore. variables.get_or_create_global_step() # Create a variable and an eval op that increments it with a placeholder. my_var = variables.local_variable(0.0, name='my_var') increment = array_ops.placeholder(dtype=dtypes.float32) eval_ops = state_ops.assign_add(my_var, increment) increment_value = 3 num_evals = 5 expected_value = increment_value * num_evals final_values = evaluation.evaluate_repeatedly( checkpoint_dir=checkpoint_dir, eval_ops=eval_ops, feed_dict={increment: 3}, final_ops={'my_var': array_ops.identity(my_var)}, hooks=[ evaluation.StopAfterNEvalsHook(num_evals), ], max_number_of_evaluations=1) self.assertEqual(final_values['my_var'], expected_value)
def testTrainWithLocalVariable(self): with ops.Graph().as_default(): random_seed.set_random_seed(0) tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32) tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32) local_multiplier = variables_lib.local_variable(1.0) tf_predictions = logistic_classifier(tf_inputs) * local_multiplier losses.log_loss(tf_labels, tf_predictions) total_loss = losses.get_total_loss() optimizer = gradient_descent.GradientDescentOptimizer( learning_rate=1.0) train_op = training.create_train_op(total_loss, optimizer) loss = training.train( train_op, None, hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)], save_summaries_steps=None, save_checkpoint_secs=None) self.assertIsNotNone(loss) self.assertLess(loss, .015)
def testEvalOpAndFinalOp(self): checkpoint_dir = tempfile.mkdtemp('eval_ops_and_final_ops') # Train a model for a single step to get a checkpoint. self._train_model(checkpoint_dir, num_steps=1) checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir) # Create the model so we have something to restore. inputs = constant_op.constant(self._inputs, dtype=dtypes.float32) logistic_classifier(inputs) num_evals = 5 final_increment = 9.0 my_var = variables.local_variable(0.0, name='MyVar') eval_ops = state_ops.assign_add(my_var, 1.0) final_ops = array_ops.identity(my_var) + final_increment final_ops_values = evaluation.evaluate_once( checkpoint_path=checkpoint_path, eval_ops=eval_ops, final_ops={'value': final_ops}, hooks=[ evaluation.StopAfterNEvalsHook(num_evals), ]) self.assertEqual( final_ops_values['value'], num_evals + final_increment)
def testOnlyFinalOp(self): checkpoint_dir = tempfile.mkdtemp('only_final_ops') # Train a model for a single step to get a checkpoint. self._train_model(checkpoint_dir, num_steps=1) checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir) # Create the model so we have something to restore. inputs = constant_op.constant(self._inputs, dtype=dtypes.float32) logistic_classifier(inputs) final_increment = 9.0 my_var = variables.local_variable(0.0, name='MyVar') final_ops = array_ops.identity(my_var) + final_increment final_ops_values = evaluation.evaluate_once( checkpoint_path=checkpoint_path, final_ops={'value': final_ops}) self.assertEqual(final_ops_values['value'], final_increment)
def testTrainWithLocalVariable(self): logdir = tempfile.mkdtemp('tmp_logs') with ops.Graph().as_default(): random_seed.set_random_seed(0) tf_inputs = tf.constant(self._inputs, dtype=tf.float32) tf_labels = tf.constant(self._labels, dtype=tf.float32) local_multiplier = variables_lib2.local_variable(1.0) tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier loss_ops.log_loss(tf_labels, tf_predictions) total_loss = loss_ops.get_total_loss() optimizer = gradient_descent.GradientDescentOptimizer( learning_rate=1.0) train_op = learning.create_train_op(total_loss, optimizer) loss = learning.train(train_op, logdir, number_of_steps=300, log_every_n_steps=10) self.assertIsNotNone(loss) self.assertLess(loss, .015)