示例#1
0
  def ProgramSchedule(self):
    p = program_lib.MLPerfProgramScheduleForTask(
        train_dataset_name='Train',
        train_steps_per_loop=20,
        decode_dataset_name='Test',
        decode_steps_per_loop=1,
        num_epochs_per_session_run=4,
        warmup_seconds=0)

    p.train_executions_per_eval = 1

    p.ml_perf.benchmark_name = 'transformer'
    p.ml_perf.steps_per_epoch = 1
    p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
    # Dummy value just to see run_stop/success.
    p.ml_perf.decoder_metric_success_threshold = -10.0
    p.ml_perf.max_sequence_length = 80
    p.ml_perf.global_batch_size = 64
    p.ml_perf.optimizer_name = 'adam'
    p.ml_perf.opt_adam_beta_1 = 0.9
    p.ml_perf.opt_adam_beta_2 = 0.98
    p.ml_perf.opt_adam_epsilon = 1e-9
    p.ml_perf.base_learning_rate = 2.0
    p.ml_perf.warmup_steps = self.WARMUP_STEPS
    p.ml_perf.train_samples = 566340
    p.ml_perf.eval_samples = 3003
    return p
示例#2
0
  def ProgramSchedule(self):

    p = program_lib.MLPerfProgramScheduleForTask(
        train_dataset_name='Train',
        train_steps_per_loop=1107,
        decode_dataset_name='Test',
        decode_steps_per_loop=1,
        num_epochs_per_session_run=4)
    p.train_executions_per_eval = 1

    # For compliance logging.
    p.ml_perf.benchmark_name = 'transformer'
    p.ml_perf.steps_per_epoch = self.STEPS_PER_EPOCH
    p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
    p.ml_perf.decoder_metric_success_threshold = 0.25

    p.ml_perf.max_sequence_length = 80
    p.ml_perf.global_batch_size = 512
    p.ml_perf.optimizer_name = 'adam'
    p.ml_perf.opt_adam_beta_1 = 0.9
    p.ml_perf.opt_adam_beta_2 = 0.98
    p.ml_perf.opt_adam_epsilon = 1e-9
    p.ml_perf.base_learning_rate = self.LEARNING_RATE
    p.ml_perf.warmup_steps = self.WARMUP_STEPS
    p.ml_perf.train_samples = 566340
    p.ml_perf.eval_samples = 3003
    return p