예제 #1
0
    def test_log_multiple_metrics(self):
        log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
        log = logger.BenchmarkFileLogger(log_dir)
        log.log_metric("accuracy",
                       0.999,
                       global_step=1e4,
                       extras={"name": "value"})
        log.log_metric("loss", 0.02, global_step=1e4)

        metric_log = os.path.join(log_dir, "metric.log")
        self.assertTrue(tf.gfile.Exists(metric_log))
        with tf.gfile.GFile(metric_log) as f:
            accuracy = json.loads(f.readline())
            self.assertEqual(accuracy["name"], "accuracy")
            self.assertEqual(accuracy["value"], 0.999)
            self.assertEqual(accuracy["unit"], None)
            self.assertEqual(accuracy["global_step"], 1e4)
            self.assertEqual(accuracy["extras"], [{
                "name": "name",
                "value": "value"
            }])

            loss = json.loads(f.readline())
            self.assertEqual(loss["name"], "loss")
            self.assertEqual(loss["value"], 0.02)
            self.assertEqual(loss["unit"], None)
            self.assertEqual(loss["global_step"], 1e4)
            self.assertEqual(loss["extras"], [])
  def test_log_evaluation_result_with_invalid_type(self):
    eval_result = "{'loss': 0.46237424, 'global_step': 207082}"
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    log.log_evaluation_result(eval_result)

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertFalse(tf.io.gfile.exists(metric_log))
  def test_log_non_number_value(self):
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    const = tf.constant(1)
    log.log_metric("accuracy", const)

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertFalse(tf.io.gfile.exists(metric_log))
  def test_log_run_info(self, mock_gather_run_info):
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    run_info = {"model_name": "model_name",
                "dataset": "dataset_name",
                "run_info": "run_value"}
    mock_gather_run_info.return_value = run_info
    log.log_run_info("model_name", "dataset_name", {})

    run_log = os.path.join(log_dir, "benchmark_run.log")
    self.assertTrue(tf.io.gfile.exists(run_log))
    with tf.io.gfile.GFile(run_log) as f:
      run_info = json.loads(f.readline())
      self.assertEqual(run_info["model_name"], "model_name")
      self.assertEqual(run_info["dataset"], "dataset_name")
      self.assertEqual(run_info["run_info"], "run_value")
  def test_log_evaluation_result(self):
    eval_result = {"loss": 0.46237424,
                   "global_step": 207082,
                   "accuracy": 0.9285}
    log_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
    log = logger.BenchmarkFileLogger(log_dir)
    log.log_evaluation_result(eval_result)

    metric_log = os.path.join(log_dir, "metric.log")
    self.assertTrue(tf.io.gfile.exists(metric_log))
    with tf.io.gfile.GFile(metric_log) as f:
      accuracy = json.loads(f.readline())
      self.assertEqual(accuracy["name"], "accuracy")
      self.assertEqual(accuracy["value"], 0.9285)
      self.assertEqual(accuracy["unit"], None)
      self.assertEqual(accuracy["global_step"], 207082)

      loss = json.loads(f.readline())
      self.assertEqual(loss["name"], "loss")
      self.assertEqual(loss["value"], 0.46237424)
      self.assertEqual(loss["unit"], None)
      self.assertEqual(loss["global_step"], 207082)
  def test_create_logging_dir(self):
    non_exist_temp_dir = os.path.join(self.get_temp_dir(), "unknown_dir")
    self.assertFalse(tf.io.gfile.isdir(non_exist_temp_dir))

    logger.BenchmarkFileLogger(non_exist_temp_dir)
    self.assertTrue(tf.io.gfile.isdir(non_exist_temp_dir))