예제 #1
0
 def test_multitask_evaluator(self, distribution):
   with distribution.scope():
     tasks = [
         MockTask(params=cfg.TaskConfig(), name="bar"),
         MockTask(params=cfg.TaskConfig(), name="foo")
     ]
     model = MockModel()
     test_evaluator = evaluator.MultiTaskEvaluator(
         eval_tasks=tasks, model=model)
     results = test_evaluator.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
   self.assertContainsSubset(["validation_loss", "acc"], results["bar"].keys())
   self.assertContainsSubset(["validation_loss", "acc"], results["foo"].keys())
   self.assertEqual(results["bar"]["validation_loss"], 0.0)
   self.assertEqual(results["foo"]["validation_loss"], 1.0)
예제 #2
0
 def test_multitask_evaluator_numpy_metrics(self, distribution):
   with distribution.scope():
     tasks = [
         MockTask(params=cfg.TaskConfig(), name="bar"),
         MockTask(params=cfg.TaskConfig(), name="foo")
     ]
     model = MockModel()
     test_evaluator = evaluator.MultiTaskEvaluator(
         eval_tasks=tasks, model=model)
     results = test_evaluator.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
   self.assertEqual(results["bar"]["counter"],
                    5. * distribution.num_replicas_in_sync)
   self.assertEqual(results["foo"]["counter"],
                    5. * distribution.num_replicas_in_sync)
예제 #3
0
def get_exp_config():
    return cfg.ExperimentConfig(
        task=cfg.TaskConfig(model=bert.PretrainerConfig()),
        trainer=trainer_lib.ProgressiveTrainerConfig(
            export_checkpoint=True,
            export_checkpoint_interval=1,
            export_only_final_stage_ckpt=False))
예제 #4
0
class MultiEvalExperimentConfig(base_config.Config):
  """An experiment config for single-task training and multi-task evaluation.

  Attributes:
    task: the single-stream training task.
    eval_tasks: individual evaluation tasks.
    trainer: the trainer configuration.
    runtime: the runtime configuration.
  """
  task: cfg.TaskConfig = cfg.TaskConfig()
  eval_tasks: MultiTaskConfig = MultiTaskConfig()
  trainer: cfg.TrainerConfig = cfg.TrainerConfig()
  runtime: cfg.RuntimeConfig = cfg.RuntimeConfig()
예제 #5
0
  def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
    config = cfg.ExperimentConfig(
        task=cfg.TaskConfig(
            model=bert.PretrainerConfig()),
        runtime=cfg.RuntimeConfig(
            mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
        trainer=trainer_lib.ProgressiveTrainerConfig(
            export_checkpoint=True,
            export_checkpoint_interval=1,
            export_only_final_stage_ckpt=False))
    task = TestPolicy(None, config.task)
    trainer = trainer_lib.ProgressiveTrainer(config, task, self.get_temp_dir())
    if mixed_precision_dtype != 'float16':
      self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
    elif mixed_precision_dtype == 'float16' and loss_scale is None:
      self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)

    metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
    self.assertIn('training_loss', metrics)