def test_train_save_full_model(self):
     if context.num_gpus() >= 2:
         self.skipTest(
             'No need to test 2+ GPUs without a distribution strategy.')
     FLAGS.save_weights_only = False
     t = transformer_main.TransformerTask(FLAGS)
     t.train()
 def test_predict_fp16(self):
     if context.num_gpus() >= 2:
         self.skipTest(
             'No need to test 2+ GPUs without a distribution strategy.')
     self._prepare_files_and_flags('--dtype=fp16')
     t = transformer_main.TransformerTask(FLAGS)
     t.predict()
Esempio n. 3
0
 def test_eval(self):
   if context.num_gpus() >= 2:
     self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
   if 'test_xla' in sys.argv[0]:
     self.skipTest('TODO(xla): Make this test faster under XLA.')
   self._prepare_files_and_flags()
   t = transformer_main.TransformerTask(FLAGS)
   t.eval()
Esempio n. 4
0
 def test_train_2_gpu(self):
   if context.num_gpus() < 2:
     self.skipTest(
         '{} GPUs are not available for this test. {} GPUs are available'
         .format(2, context.num_gpus()))
   FLAGS.distribution_strategy = 'mirrored'
   FLAGS.num_gpus = 2
   FLAGS.param_set = 'base'
   t = transformer_main.TransformerTask(FLAGS)
   t.train()
Esempio n. 5
0
 def test_train_static_batch(self):
   if context.num_gpus() >= 2:
     self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
   FLAGS.distribution_strategy = 'one_device'
   if tf.test.is_built_with_cuda():
     FLAGS.num_gpus = 1
   else:
     FLAGS.num_gpus = 0
   FLAGS.static_batch = True
   t = transformer_main.TransformerTask(FLAGS)
   t.train()
Esempio n. 6
0
 def test_train_no_dist_strat(self):
   if context.num_gpus() >= 2:
     self.skipTest('No need to test 2+ GPUs without a distribution strategy.')
   t = transformer_main.TransformerTask(FLAGS)
   t.train()
Esempio n. 7
0
 def test_train_fp16(self):
   FLAGS.distribution_strategy = 'one_device'
   FLAGS.dtype = 'fp16'
   t = transformer_main.TransformerTask(FLAGS)
   t.train()
Esempio n. 8
0
 def test_train_1_gpu_with_dist_strat(self):
   FLAGS.distribution_strategy = 'one_device'
   t = transformer_main.TransformerTask(FLAGS)
   t.train()
    def _run_and_report_benchmark(self,
                                  bleu_max=None,
                                  bleu_min=None,
                                  log_steps=None,
                                  total_batch_size=None,
                                  warmup=1):
        """Report benchmark results by writing to local protobuf file.

    Args:
      bleu_max: highest passing level for bleu score.
      bleu_min: lowest passing level for bleu score.
      log_steps: How often the log was created for stats['step_timestamp_log'].
      total_batch_size: Global batch-size.
      warmup: number of entries in stats['step_timestamp_log'] to ignore.
    """
        start_time_sec = time.time()
        task = transformer_main.TransformerTask(FLAGS)
        stats = task.train()
        wall_time_sec = time.time() - start_time_sec

        metrics = []
        if 'bleu_uncased' in stats:
            if 'bleu_uncased_history' in stats:
                bleu_uncased_best = max(stats['bleu_uncased_history'],
                                        key=lambda x: x[1])
                metrics.append({
                    'name': 'bleu_uncased',
                    'value': bleu_uncased_best[1],
                    'min_value': bleu_min,
                    'max_value': bleu_max
                })
                metrics.append({
                    'name': 'bleu_best_score_iteration',
                    'value': bleu_uncased_best[0]
                })
                metrics.append({
                    'name': 'bleu_uncased_last',
                    'value': stats['bleu_uncased']
                })
            else:
                metrics.append({
                    'name': 'bleu_uncased',
                    'value': stats['bleu_uncased'],
                    'min_value': bleu_min,
                    'max_value': bleu_max
                })

        if (warmup and 'step_timestamp_log' in stats
                and len(stats['step_timestamp_log']) > warmup):
            # first entry in the time_log is start of step 1. The rest of the
            # entries are the end of each step recorded
            time_log = stats['step_timestamp_log']
            elapsed = time_log[-1].timestamp - time_log[warmup].timestamp
            num_examples = (total_batch_size * log_steps *
                            (len(time_log) - warmup - 1))
            examples_per_sec = num_examples / elapsed
            metrics.append({
                'name': 'exp_per_second',
                'value': examples_per_sec
            })

        if 'avg_exp_per_second' in stats:
            metrics.append({
                'name': 'avg_exp_per_second',
                'value': stats['avg_exp_per_second']
            })

        flags_str = flags_core.get_nondefault_flags_as_str()
        self.report_benchmark(iters=-1,
                              wall_time=wall_time_sec,
                              metrics=metrics,
                              extras={'flags': flags_str})