def test_the_first_export(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() tf.compat.v1.gfile.MkDir(export_dir_base) tf.compat.v1.gfile.MkDir(export_dir_base + "/export") tf.compat.v1.gfile.MkDir(export_dir_base + "/eval") exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = tf.compat.v1.test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_saved_model.return_value = "export_result_path" # Note that evaluation occurs before export with context.graph_mode(): eval_dir_base = os.path.join(export_dir_base, "eval_continuous") first_evaluation_results = {"loss": 60} estimator_lib._write_dict_to_summary(eval_dir_base, first_evaluation_results, 1) # export the model with the same results computed in the first evaluation export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", first_evaluation_results, False) self.assertEqual("export_result_path", export_result)
def test_best_exporter_with_empty_event(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() tf.compat.v1.gfile.MkDir(export_dir_base) tf.compat.v1.gfile.MkDir(export_dir_base + "/export") tf.compat.v1.gfile.MkDir(export_dir_base + "/eval") eval_dir_base = os.path.join(export_dir_base, "eval_continuous") estimator_lib._write_dict_to_summary(eval_dir_base, {}, 1) estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2) exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = tf.compat.v1.test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_saved_model.return_value = "export_result_path" export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 100}, False) self.assertEqual("export_result_path", export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 10}, False) self.assertEqual("export_result_path", export_result)
def _write_events(eval_dir, params): """Test helper to write events to summary files.""" for steps, loss, accuracy in params: estimator._write_dict_to_summary(eval_dir, { 'loss': loss, 'accuracy': accuracy, }, steps)
def test_steps_exporter_with_preemption(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() gfile.MkDir(export_dir_base) gfile.MkDir(export_dir_base + "/export") gfile.MkDir(export_dir_base + "/eval") eval_dir_base = os.path.join(export_dir_base, "eval_continuous") estimator_lib._write_dict_to_summary(eval_dir_base, {}, 1) estimator_lib._write_dict_to_summary(eval_dir_base, {}, 2) exporter = exporter_lib.StepsExporter( name="steps_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, steps_to_keep=[1, 2, 6, 8]) estimator = test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_savedmodel.return_value = "export_result_path" export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"global_step": 3}, False) self.assertEqual(None, export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"global_step": 6}, False) self.assertEqual("export_result_path", export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"global_step": 7}, False) self.assertEqual(None, export_result) shutil.rmtree(export_dir_base, ignore_errors=True)
def test_best_exporter_with_preemption(self): def _serving_input_receiver_fn(): pass export_dir_base = tempfile.mkdtemp() gfile.MkDir(export_dir_base) gfile.MkDir(export_dir_base + "/export") gfile.MkDir(export_dir_base + "/eval") eval_dir_base = os.path.join(export_dir_base, "eval_continuous") # _write_dict_to_summary is only called internally within graph mode. with context.graph_mode(): estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 50}, 1) estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2) exporter = exporter_lib.BestExporter( name="best_exporter", serving_input_receiver_fn=_serving_input_receiver_fn, event_file_pattern="eval_continuous/*.tfevents.*", assets_extra={"from/path": "to/path"}, as_text=False, exports_to_keep=1) estimator = test.mock.Mock(spec=estimator_lib.Estimator) estimator.model_dir = export_dir_base estimator.export_savedmodel.return_value = "export_result_path" export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 100}, False) self.assertEqual(None, export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 10}, False) self.assertEqual("export_result_path", export_result) export_result = exporter.export(estimator, export_dir_base, "checkpoint_path", {"loss": 20}, False) self.assertEqual(None, export_result)