def test_tpu_estimator_simple_lifecycle(self, use_tpu): config = tf.contrib.tpu.RunConfig(master="", tf_random_seed=42) estimator = TPUEstimator( head=tu.head(), subnetwork_generator=SimpleGenerator( [_DNNBuilder("dnn", use_tpu=use_tpu)]), max_iteration_steps=200, mixture_weight_initializer=tf.zeros_initializer(), use_bias=True, model_dir=self.test_subdirectory, config=config, use_tpu=use_tpu, train_batch_size=64 if use_tpu else 0) max_steps = 300 xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] train_input_fn = tu.dummy_input_fn(xor_features, xor_labels) # Train. estimator.train(input_fn=train_input_fn, steps=None, max_steps=max_steps, hooks=None) # Evaluate. eval_results = estimator.evaluate(input_fn=train_input_fn, steps=10, hooks=None) # Predict. # TODO: skip predictions on TF versions 1.11 and 1.12 since # some TPU hooks seem to be failing on predict. predictions = [] tf_version = LooseVersion(tf.VERSION) if (tf_version != LooseVersion("1.11.0") and tf_version != LooseVersion("1.12.0")): predictions = estimator.predict( input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None)) # Export SavedModel. def serving_input_fn(): """Input fn for serving export, starting from serialized example.""" serialized_example = tf.placeholder(dtype=tf.string, shape=(None), name="serialized_example") return tf.estimator.export.ServingInputReceiver( features={"x": tf.constant([[0., 0.]], name="serving_x")}, receiver_tensors=serialized_example) export_saved_model_fn = getattr(estimator, "export_saved_model", None) if not callable(export_saved_model_fn): export_saved_model_fn = estimator.export_savedmodel export_saved_model_fn(export_dir_base=estimator.model_dir, serving_input_receiver_fn=serving_input_fn) self.assertAlmostEqual(0.32416, eval_results["loss"], places=3) self.assertEqual(max_steps, eval_results["global_step"]) for prediction in predictions: self.assertIsNotNone(prediction["predictions"])
def test_tpu_estimator_simple_lifecycle(self, use_tpu, subnetwork_generator, want_loss): config = tf_compat.v1.estimator.tpu.RunConfig(master="", tf_random_seed=42) estimator = TPUEstimator(head=tf.contrib.estimator.regression_head( loss_reduction=tf_compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE), subnetwork_generator=subnetwork_generator, max_iteration_steps=10, model_dir=self.test_subdirectory, config=config, use_tpu=use_tpu, train_batch_size=64 if use_tpu else 0) max_steps = 30 xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] train_input_fn = tu.dummy_input_fn(xor_features, xor_labels) # Train. estimator.train(input_fn=train_input_fn, steps=None, max_steps=max_steps, hooks=None) # Evaluate. eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1, hooks=None) # Predict. predictions = estimator.predict( input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None)) # Export SavedModel. def serving_input_fn(): """Input fn for serving export, starting from serialized example.""" serialized_example = tf.placeholder(dtype=tf.string, shape=(None), name="serialized_example") return tf.estimator.export.ServingInputReceiver( features={"x": tf.constant([[0., 0.]], name="serving_x")}, receiver_tensors=serialized_example) export_saved_model_fn = getattr(estimator, "export_saved_model", None) if not callable(export_saved_model_fn): export_saved_model_fn = estimator.export_savedmodel export_saved_model_fn(export_dir_base=estimator.model_dir, serving_input_receiver_fn=serving_input_fn) self.assertAlmostEqual(want_loss, eval_results["loss"], places=2) self.assertEqual(max_steps, eval_results["global_step"]) self.assertEqual(2, eval_results["iteration"]) for prediction in predictions: self.assertIsNotNone(prediction["predictions"])
def test_tpu_estimator_simple_lifecycle(self, use_tpu, subnetwork_generator, want_loss): config = tf.compat.v1.estimator.tpu.RunConfig(master="", tf_random_seed=42) estimator = TPUEstimator( # TODO: Add test with estimator Head v2. head=make_regression_head(use_tpu), subnetwork_generator=subnetwork_generator, max_iteration_steps=10, model_dir=self.test_subdirectory, config=config, use_tpu=use_tpu, train_batch_size=64 if use_tpu else 0) max_steps = 30 xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] train_input_fn = tu.dummy_input_fn(xor_features, xor_labels) # Train. estimator.train(input_fn=train_input_fn, steps=None, max_steps=max_steps, hooks=None) # Evaluate. eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1, hooks=None) # Predict. predictions = estimator.predict(input_fn=tu.dataset_input_fn( features=[0., 0.], return_dataset=True)) # We need to iterate over all the predictions before moving on, otherwise # the TPU will not be shut down. for prediction in predictions: self.assertIsNotNone(prediction["predictions"]) # Export SavedModel. def serving_input_fn(): """Input fn for serving export, starting from serialized example.""" serialized_example = tf.compat.v1.placeholder( dtype=tf.string, shape=(None), name="serialized_example") return tf.estimator.export.ServingInputReceiver( features={"x": tf.constant([[0., 0.]], name="serving_x")}, receiver_tensors=serialized_example) estimator.export_saved_model( export_dir_base=estimator.model_dir, serving_input_receiver_fn=serving_input_fn) self.assertAlmostEqual(want_loss, eval_results["loss"], places=2) self.assertEqual(max_steps, eval_results["global_step"]) self.assertEqual(2, eval_results["iteration"])
def test_tpu_estimator_summaries(self): config = tf.contrib.tpu.RunConfig(tf_random_seed=42) estimator = TPUEstimator( head=tu.head(), subnetwork_generator=SimpleGenerator([_DNNBuilder("dnn")]), max_iteration_steps=200, model_dir=self.test_subdirectory, config=config) train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]]) with fake_run_on_tpu(): estimator.train(input_fn=train_input_fn, max_steps=3) estimator.evaluate(input_fn=train_input_fn, steps=3) self.assertFalse( _summaries_exist(self.test_subdirectory + "/candidate/t0_dnn")) self.assertTrue( _summaries_exist(self.test_subdirectory + "/candidate/t0_dnn/eval"))
def test_tpu_estimator_summaries(self, use_tpu, want_loss, want_adanet_loss, want_eval_summary_loss, want_predictions): config = tf.contrib.tpu.RunConfig(tf_random_seed=42, save_summary_steps=100, log_step_count_steps=100) assert config.log_step_count_steps def metric_fn(predictions): return { "predictions": tf_compat.v1.metrics.mean(predictions["predictions"]) } max_steps = 100 estimator = TPUEstimator(head=tu.head(), subnetwork_generator=SimpleGenerator( [_DNNBuilder("dnn", use_tpu=use_tpu)]), max_iteration_steps=max_steps, model_dir=self.test_subdirectory, metric_fn=metric_fn, config=config, use_tpu=use_tpu, train_batch_size=64 if use_tpu else 0) xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] train_input_fn = tu.dummy_input_fn(xor_features, xor_labels) estimator.train(input_fn=train_input_fn, max_steps=max_steps) eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1) self.assertAlmostEqual(want_loss, eval_results["loss"], places=2) self.assertEqual(max_steps, eval_results["global_step"]) self.assertEqual(0, eval_results["iteration"]) subnetwork_subdir = os.path.join(self.test_subdirectory, "subnetwork/t0_dnn") ensemble_subdir = os.path.join( self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized") # TODO: Why is the adanet_loss written to 'loss'? self.assertAlmostEqual(want_adanet_loss, _get_summary_value("loss", self.test_subdirectory), places=1) self.assertEqual( 0., _get_summary_value("iteration/adanet/iteration", self.test_subdirectory)) self.assertAlmostEqual(3., _get_summary_value("scalar", subnetwork_subdir), places=3) self.assertEqual((3, 3, 1), _get_summary_value("image/image/0", subnetwork_subdir)) self.assertAlmostEqual(5., _get_summary_value("nested/scalar", subnetwork_subdir), places=3) self.assertAlmostEqual( want_adanet_loss, _get_summary_value("adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir), places=1) self.assertAlmostEqual( 0., _get_summary_value( "complexity_regularization/adanet/adanet_weighted_ensemble", ensemble_subdir), places=1) self.assertAlmostEqual(1., _get_summary_value( "mixture_weight_norms/adanet/" "adanet_weighted_ensemble/subnetwork_0", ensemble_subdir), places=1) # Eval metric summaries are always written out during eval. subnetwork_eval_subdir = os.path.join(subnetwork_subdir, "eval") self.assertAlmostEqual(want_eval_summary_loss, _get_summary_value("loss", subnetwork_eval_subdir), places=1) self.assertAlmostEqual(want_eval_summary_loss, _get_summary_value("average_loss", subnetwork_eval_subdir), places=1) self.assertAlmostEqual(want_predictions, _get_summary_value("predictions", subnetwork_eval_subdir), places=3) eval_subdir = os.path.join(self.test_subdirectory, "eval") ensemble_eval_subdir = os.path.join(ensemble_subdir, "eval") for subdir in [ensemble_eval_subdir, eval_subdir]: self.assertEqual([b"| dnn |"], _get_summary_value( "architecture/adanet/ensembles/0", subdir)) if subdir == eval_subdir: self.assertAlmostEqual(want_loss, _get_summary_value("loss", subdir), places=1) self.assertAlmostEqual(want_eval_summary_loss, _get_summary_value("average_loss", subdir), places=1)
def test_tpu_estimator_summaries(self, use_tpu): config = tf.contrib.tpu.RunConfig(tf_random_seed=42, save_summary_steps=2, log_step_count_steps=1) assert config.log_step_count_steps estimator = TPUEstimator(head=tu.head(), subnetwork_generator=SimpleGenerator( [_DNNBuilder("dnn", use_tpu=use_tpu)]), max_iteration_steps=200, model_dir=self.test_subdirectory, config=config, use_tpu=use_tpu, train_batch_size=64 if use_tpu else 0) xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] train_input_fn = tu.dummy_input_fn(xor_features, xor_labels) estimator.train(input_fn=train_input_fn, max_steps=3) estimator.evaluate(input_fn=train_input_fn, steps=3) ensemble_loss = .5 self.assertAlmostEqual(ensemble_loss, _check_eventfile_for_keyword( "loss", self.test_subdirectory), places=1) self.assertIsNotNone( _check_eventfile_for_keyword("global_step/sec", self.test_subdirectory)) eval_subdir = os.path.join(self.test_subdirectory, "eval") self.assertAlmostEqual(ensemble_loss, _check_eventfile_for_keyword( "loss", eval_subdir), places=1) self.assertEqual( 0., _check_eventfile_for_keyword("iteration/adanet/iteration", self.test_subdirectory)) candidate_subdir = os.path.join(self.test_subdirectory, "candidate/t0_dnn") self.assertAlmostEqual(3., _check_eventfile_for_keyword( "scalar", candidate_subdir), places=3) self.assertEqual((3, 3, 1), _check_eventfile_for_keyword("image/image/0", candidate_subdir)) self.assertAlmostEqual(5., _check_eventfile_for_keyword( "nested/scalar", candidate_subdir), places=1) self.assertAlmostEqual( ensemble_loss, _check_eventfile_for_keyword( "adanet_loss/adanet/adanet_weighted_ensemble", candidate_subdir), places=1) self.assertAlmostEqual( 0., _check_eventfile_for_keyword( "complexity_regularization/adanet/adanet_weighted_ensemble", candidate_subdir), places=1) self.assertAlmostEqual(1., _check_eventfile_for_keyword( "mixture_weight_norms/adanet/" "adanet_weighted_ensemble/subnetwork_0", candidate_subdir), places=1)
def test_tpu_estimator_summaries(self, use_tpu): config = tf.contrib.tpu.RunConfig(tf_random_seed=42, save_summary_steps=2, log_step_count_steps=1) assert config.log_step_count_steps estimator = TPUEstimator(head=tu.head(), subnetwork_generator=SimpleGenerator( [_DNNBuilder("dnn", use_tpu=use_tpu)]), max_iteration_steps=200, model_dir=self.test_subdirectory, config=config, use_tpu=use_tpu, train_batch_size=64 if use_tpu else 0) xor_features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] xor_labels = [[1.], [0.], [1.], [0.]] train_input_fn = tu.dummy_input_fn(xor_features, xor_labels) estimator.train(input_fn=train_input_fn, max_steps=3) estimator.evaluate(input_fn=train_input_fn, steps=3) subnetwork_subdir = os.path.join(self.test_subdirectory, "subnetwork/t0_dnn") ensemble_loss = .5 ensemble_subdir = os.path.join( self.test_subdirectory, "ensemble/t0_dnn_complexity_regularized") self.assertAlmostEqual(ensemble_loss, _get_summary_value("loss", self.test_subdirectory), places=1) self.assertEqual( 0., _get_summary_value("iteration/adanet/iteration", self.test_subdirectory)) self.assertAlmostEqual(3., _get_summary_value("scalar", subnetwork_subdir), places=3) self.assertEqual((3, 3, 1), _get_summary_value("image/image/0", subnetwork_subdir)) self.assertAlmostEqual(5., _get_summary_value("nested/scalar", subnetwork_subdir), places=3) self.assertAlmostEqual( ensemble_loss, _get_summary_value("adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir), places=1) self.assertAlmostEqual( 0., _get_summary_value( "complexity_regularization/adanet/adanet_weighted_ensemble", ensemble_subdir), places=1) self.assertAlmostEqual(1., _get_summary_value( "mixture_weight_norms/adanet/" "adanet_weighted_ensemble/subnetwork_0", ensemble_subdir), places=1) # Eval metric summaries are always written out during eval. subnetwork_eval_subdir = os.path.join(subnetwork_subdir, "eval") if use_tpu: # TODO: Why is subnetwork eval loss 0.0 when use_tpu=False? self.assertAlmostEqual(ensemble_loss, _get_summary_value("loss", subnetwork_eval_subdir), places=1) self.assertAlmostEqual(ensemble_loss, _get_summary_value("average_loss", subnetwork_eval_subdir), places=1) eval_subdir = os.path.join(self.test_subdirectory, "eval") ensemble_eval_subdir = os.path.join(ensemble_subdir, "eval") for subdir in [ensemble_eval_subdir, eval_subdir]: self.assertEqual([b"| dnn |"], _get_summary_value( "architecture/adanet/ensembles/0", subdir)) if subdir == eval_subdir: self.assertAlmostEqual(ensemble_loss, _get_summary_value("loss", subdir), places=1) self.assertAlmostEqual(ensemble_loss, _get_summary_value("average_loss", subdir), places=1)