def test_garbage_collect_exports(self): export_dir_base = tempfile.mkdtemp() tf.compat.v1.gfile.MkDir(export_dir_base) tf.compat.v1.gfile.MkDir(export_dir_base + "/export") tf.compat.v1.gfile.MkDir(export_dir_base + "/eval") export_dir_1 = _create_test_export_dir(export_dir_base) export_dir_2 = _create_test_export_dir(export_dir_base) export_dir_3 = _create_test_export_dir(export_dir_base) export_dir_4 = _create_test_export_dir(export_dir_base) self.assertTrue(tf.compat.v1.gfile.Exists(export_dir_1)) self.assertTrue(tf.compat.v1.gfile.Exists(export_dir_2)) self.assertTrue(tf.compat.v1.gfile.Exists(export_dir_3)) self.assertTrue(tf.compat.v1.gfile.Exists(export_dir_4)) def _serving_input_receiver_fn(): return tf.constant([1]), None exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, exports_to_keep=2) estimator = tf.compat.v1.test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base # Garbage collect all but the most recent 2 exports, # where recency is determined based on the timestamp directory names. exporter.export(estimator, export_dir_base, None, None, False) self.assertFalse(tf.compat.v1.gfile.Exists(export_dir_1)) self.assertFalse(tf.compat.v1.gfile.Exists(export_dir_2)) self.assertTrue(tf.compat.v1.gfile.Exists(export_dir_3)) self.assertTrue(tf.compat.v1.gfile.Exists(export_dir_4))
def test_best_export_is_saved(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() tf.compat.v1.gfile.MkDir(export_dir_base) tf.compat.v1.gfile.MkDir(export_dir_base + "/export") tf.compat.v1.gfile.MkDir(export_dir_base + "/eval") exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = tf.compat.v1.test.mock.Mock(spec=estimator_lib.Estimator) estimator.export_saved_model.return_value = "export_result_path" estimator.model_dir = export_dir_base export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 0.5}, False) self.assertTrue(estimator.export_saved_model.called) self.assertEqual("export_result_path", export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 0.6}, False) self.assertEqual(None, export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 0.4}, False) self.assertEqual("export_result_path", export_result)
def test_the_first_export(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() tf.compat.v1.gfile.MkDir(export_dir_base) tf.compat.v1.gfile.MkDir(export_dir_base + "/export") tf.compat.v1.gfile.MkDir(export_dir_base + "/eval") exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = tf.compat.v1.test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_saved_model.return_value = "export_result_path" # Note that evaluation occurs before export with context.graph_mode(): eval_dir_base = os.path.join(export_dir_base, "eval_continuous") first_evaluation_results = {"loss": 60} estimator_lib._write_dict_to_summary(eval_dir_base, first_evaluation_results, 1) # export the model with the same results computed in the first evaluation export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", first_evaluation_results, False) self.assertEqual("export_result_path", export_result)
def test_best_exporter_with_empty_event(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() tf.compat.v1.gfile.MkDir(export_dir_base) tf.compat.v1.gfile.MkDir(export_dir_base + "/export") tf.compat.v1.gfile.MkDir(export_dir_base + "/eval") eval_dir_base = os.path.join(export_dir_base, "eval_continuous") estimator_lib._write_dict_to_summary(eval_dir_base, {}, 1) estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2) exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = tf.compat.v1.test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_saved_model.return_value = "export_result_path" export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 100}, False) self.assertEqual("export_result_path", export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 10}, False) self.assertEqual("export_result_path", export_result)
def test_best_exporter(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() gfile.MkDir(export_dir_base) gfile.MkDir(export_dir_base + "/export") gfile.MkDir(export_dir_base + "/eval") exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=5) estimator = test.mock.Mock(spec=estimator_lib.Estimator) estimator.export_saved_model.return_value = "export_result_path" estimator.model_dir = export_dir_base export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {}, False) self.assertEqual("export_result_path", export_result) estimator.export_saved_model.assert_called_with( export_dir_base, _serving_input_receiver_fn, assets_extra={"from/path": "to/path"}, as_text=False, checkpoint_path="checkpoint_path")
def test_error_out_if_exports_to_keep_is_zero(self): def _serving_input_receiver_fn(): pass with self.assertRaisesRegexp(ValueError, "positive number"): exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, exports_to_keep=0) self.assertEqual("best_exporter", exporter.name)
def test_best_exporter_with_preemption(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() gfile.MkDir(export_dir_base) gfile.MkDir(export_dir_base + "/export") gfile.MkDir(export_dir_base + "/eval") eval_dir_base = os.path.join(export_dir_base, "eval_continuous") # _write_dict_to_summary is only called internally within graph mode. with context.graph_mode(): estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 50}, 1) estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2) exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_savedmodel.return_value = "export_result_path" export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 100}, False) self.assertEqual(None, export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 10}, False) self.assertEqual("export_result_path", export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 20}, False) self.assertEqual(None, export_result)