コード例 #1
0
ファイル: librispeech.py プロジェクト: thzll2001/lingvo
 def ProgramSchedule(self):
     return program.SimpleProgramScheduleForTask(
         train_dataset_name='Train',
         train_steps_per_loop=50,
         eval_dataset_names=['Test'],
         eval_steps_per_loop=5,
         decode_steps_per_loop=0)
コード例 #2
0
 def ProgramSchedule(self):
   """Returns a schedule for the Executor."""
   return program_lib.SimpleProgramScheduleForTask(
       'Train',
       train_steps_per_loop=self.Task().train.tpu_steps_per_loop,
       eval_dataset_names=[],
       eval_steps_per_loop=0,
       decode_steps_per_loop=0)
コード例 #3
0
 def ProgramSchedule(self):
     # Only needed if --use_tpu_executor.
     p = program.SimpleProgramScheduleForTask(train_dataset_name='Train',
                                              train_steps_per_loop=1000,
                                              eval_dataset_names=[],
                                              eval_steps_per_loop=0,
                                              decode_steps_per_loop=0)
     p.train_executions_per_eval = 0
     return p
コード例 #4
0
 def ProgramSchedule(self):
     return program.SimpleProgramScheduleForTask(
         train_dataset_name='Train',
         train_steps_per_loop=100,
         # I want to compute WER...
         eval_dataset_names=['Dev'],
         eval_steps_per_loop=1,
         decode_steps_per_loop=1,
     )
コード例 #5
0
 def ProgramSchedule(self):
     p = program.SimpleProgramScheduleForTask(
         train_dataset_name='Train',
         train_steps_per_loop=1000,
         eval_dataset_names=['Dev', 'Test'],
         eval_steps_per_loop=1,
         decode_steps_per_loop=1)
     p.train_executions_per_eval = 0
     return p
コード例 #6
0
 def ProgramSchedule(self):
     p = program.SimpleProgramScheduleForTask(
         train_dataset_name='Train',
         train_steps_per_loop=train_steps_per_loop,
         eval_dataset_names=['Test'],
         eval_steps_per_loop=eval_decode_steps_per_loop,
         decode_steps_per_loop=eval_decode_steps_per_loop)
     if max_train_steps == 0:
         p.train_executions_per_eval = 0
     return p
コード例 #7
0
 def ProgramSchedule(self):
     p = program.SimpleProgramScheduleForTask(
         train_dataset_name='Train',
         train_steps_per_loop=100,
         eval_dataset_names=['Train'],
         eval_steps_per_loop=100,
         decode_steps_per_loop=0,
     )
     p.train_program.spmd = True
     # every 5K steps
     p.train_executions_per_eval = 5
     return p
コード例 #8
0
ファイル: wiki_bert.py プロジェクト: tensorflow/lingvo
    def ProgramSchedule(self):
        p = program.SimpleProgramScheduleForTask(
            train_dataset_name='Train',
            train_steps_per_loop=self.TRAIN_STEPS_PER_LOOP,
            eval_dataset_names=['Test'],
            eval_steps_per_loop=10,
            decode_steps_per_loop=0,
        )
        p.train_program.spmd = True
        p.train_executions_per_eval = self.TRAIN_EXES_PER_EVAL

        # For compliance logging.
        p.ml_perf.benchmark_name = 'bert'
        p.ml_perf.submission_metadata = {
            'global_batch_size': self.BATCH_SIZE,
            'submission_org': 'Google',
            'submission_platform': 'tpu-v4-4096',
            'submission_division': 'open',
            'submission_status': 'cloud',
            'submission_benchmark': p.ml_perf.benchmark_name,
            'submission_model': 'lingvo',
            'cache_clear': None,
            'train_samples': 156725653,
            'eval_samples': 10000
        }

        # For BERT, we log the number of examples as the epoch.
        # epoch_num = global_step / steps_per_epoch
        # epoch_num = num_examples_trained = global_step * examples_per_step
        # steps_per_epoch = global_step / (global_step * examples_per_step)
        # steps_per_epoch = 1 / examples_per_step
        examples_per_step = self.BATCH_SIZE
        p.ml_perf.steps_per_epoch = 1 / examples_per_step

        p.ml_perf.decoder_metric_name = 'acc1'
        p.ml_perf.decoder_metric_success_threshold = 0.6
        p.ml_perf.max_steps_to_train = 31790

        return p