def test_log_evaluation_result_with_invalid_type(self):
    eval_result = "{'loss': 0.46237424, 'global_step': 207082}"
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    log.log_evaluation_result(eval_result)

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertFalse(tf.io.gfile.exists(metric_log))
  def test_log_non_number_value(self):
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    const = tf.constant(1)
    log.log_metric("accuracy", const)

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertFalse(tf.io.gfile.exists(metric_log))
  def test_log_metric(self):
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    log.log_metric("accuracy", 0.999, global_step=1e4, extras={"name": "value"})

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertTrue(tf.io.gfile.exists(metric_log))
    with tf.io.gfile.GFile(metric_log) as f:
      metric = json.loads(f.readline())
      self.assertEqual(metric["name"], "accuracy")
      self.assertEqual(metric["value"], 0.999)
      self.assertEqual(metric["unit"], None)
      self.assertEqual(metric["global_step"], 1e4)
      self.assertEqual(metric["extras"], [{"name": "name", "value": "value"}])
  def test_log_run_info(self, mock_gather_run_info):
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    run_info = {"model_name": "model_name",
                "dataset_": "dataset_name",
                "run_info": "run_value"}
    mock_gather_run_info.return_value = run_info
    log.log_run_info("model_name", "dataset_name", {})

    run_log = os.path.join(log_dir, "benchmark_run.log")
    self.assertTrue(tf.io.gfile.exists(run_log))
    with tf.io.gfile.GFile(run_log) as f:
      run_info = json.loads(f.readline())
      self.assertEqual(run_info["model_name"], "model_name")
      self.assertEqual(run_info["dataset_"], "dataset_name")
      self.assertEqual(run_info["run_info"], "run_value")
  def test_log_evaluation_result(self):
    eval_result = {"loss": 0.46237424,
                   "global_step": 207082,
                   "accuracy": 0.9285}
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    log.log_evaluation_result(eval_result)

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertTrue(tf.io.gfile.exists(metric_log))
    with tf.io.gfile.GFile(metric_log) as f:
      accuracy = json.loads(f.readline())
      self.assertEqual(accuracy["name"], "accuracy")
      self.assertEqual(accuracy["value"], 0.9285)
      self.assertEqual(accuracy["unit"], None)
      self.assertEqual(accuracy["global_step"], 207082)

      loss = json.loads(f.readline())
      self.assertEqual(loss["name"], "loss")
      self.assertEqual(loss["value"], 0.46237424)
      self.assertEqual(loss["unit"], None)
      self.assertEqual(loss["global_step"], 207082)
  def test_create_logging_dir(self):
    non_exist_temp_dir = os.path.join(self.get_temp_dir(), "unknown_dir")
    self.assertFalse(tf.io.gfile.isdir(non_exist_temp_dir))

    logger.BenchmarkFileLogger(non_exist_temp_dir)
    self.assertTrue(tf.io.gfile.isdir(non_exist_temp_dir))