コード例 #1
0
ファイル: Engine.py プロジェクト: chagge/returnn
 def init_train_from_config(self, config, train_data, dev_data=None, eval_data=None):
   """
   :type config: Config.Config
   :type train_data: Dataset.Dataset
   :type dev_data: Dataset.Dataset | None
   :type eval_data: Dataset.Dataset | None
   """
   self.train_data = train_data
   self.dev_data = dev_data
   self.eval_data = eval_data
   self.start_epoch, self.start_batch = self.get_train_start_epoch_batch(config)
   self.batch_size = config.int('batch_size', 1)
   self.shuffle_batches = config.bool('shuffle_batches', True)
   self.update_batch_size = config.int('update_batch_size', 0)
   self.model_filename = config.value('model', None)
   self.save_model_epoch_interval = config.int('save_interval', 1)
   self.save_epoch1_initial_model = config.bool('save_epoch1_initial_model', False)
   self.learning_rate_control = loadLearningRateControlFromConfig(config)
   self.learning_rate = self.learning_rate_control.defaultLearningRate
   self.initial_learning_rate = self.learning_rate
   self.pretrain_learning_rate = config.float('pretrain_learning_rate', self.learning_rate)
   self.final_epoch = self.config_get_final_epoch(config)  # Inclusive.
   self.max_seqs = config.int('max_seqs', -1)
   self.updater = Updater.initFromConfig(config)
   self.ctc_prior_file = config.value('ctc_prior_file', None)
   self.exclude = config.int_list('exclude', [])
   self.init_train_epoch_posthook = config.value('init_train_epoch_posthook', None)
   self.share_batches = config.bool('share_batches', False)
   self.batch_variance = config.float('batch_variance', 0.0)
   self.max_seq_length = config.float('max_seq_length', 0)
   self.inc_seq_length = config.float('inc_seq_length', 0)
   if self.max_seq_length == 0:
     self.max_seq_length = sys.maxint
   # And also initialize the network. That depends on some vars here such as pretrain.
   self.init_network_from_config(config)
コード例 #2
0
ファイル: TFEngine.py プロジェクト: ZhangAustin/returnn
 def init_train_from_config(self, config, train_data, dev_data, eval_data):
   """
   :type config: Config.Config
   :type train_data: Dataset.Dataset
   :type dev_data: Dataset.Dataset | None
   :type eval_data: Dataset.Dataset | None
   """
   self.use_dynamic_train_flag = True
   self.train_data = train_data
   self.dev_data = dev_data
   self.eval_data = eval_data
   self.start_epoch, self.start_batch = self.get_train_start_epoch_batch(config)
   self.batch_size = config.int('batch_size', 1)
   self.shuffle_batches = config.bool('shuffle_batches', True)
   self.update_batch_size = config.int('update_batch_size', 0)
   self.save_model_epoch_interval = config.int('save_interval', 1)
   self.save_epoch1_initial_model = config.bool('save_epoch1_initial_model', False)
   self.learning_rate_control = loadLearningRateControlFromConfig(config)
   self.learning_rate = self.learning_rate_control.defaultLearningRate
   self.initial_learning_rate = self.learning_rate
   self.pretrain_learning_rate = config.float('pretrain_learning_rate', self.learning_rate)
   self.final_epoch = self.config_get_final_epoch(config)  # Inclusive.
   self.max_seqs = config.int('max_seqs', -1)
   self.ctc_prior_file = config.value('ctc_prior_file', None)
   self.exclude = config.int_list('exclude', [])
   self.init_train_epoch_posthook = config.value('init_train_epoch_posthook', None)
   self.share_batches = config.bool('share_batches', False)
   self.seq_drop = config.float('seq_drop', 0.0)
   self.seq_drop_freq = config.float('seq_drop_freq', 10)
   self.max_seq_length = config.float('max_seq_length', 0)
   self.inc_seq_length = config.float('inc_seq_length', 0)
   if self.max_seq_length == 0:
     self.max_seq_length = sys.maxsize
   # And also initialize the network. That depends on some vars here such as pretrain.
   self.init_network_from_config(config)
コード例 #3
0
def test_init_error_new():
    config = Config()
    config.update({
        "learning_rate_control": "newbob",
        "learning_rate_control_error_measure": "dev_score"
    })
    lrc = loadLearningRateControlFromConfig(config)
    assert isinstance(lrc, NewbobRelative)
    lrc.getLearningRateForEpoch(1)
    lrc.setEpochError(1, {"train_score": {'cost:output': 1.9344199658230012}})
    lrc.setEpochError(1, {
        "dev_score": {
            'cost:output': 1.99
        },
        "dev_error": {
            'error:output': 0.6
        }
    })
    error = lrc.getEpochErrorDict(1)
    assert "train_score" in error
    assert "dev_score" in error
    assert "dev_error" in error
    assert_equal(lrc.getErrorKey(1), "dev_score")
    lrc.getLearningRateForEpoch(2)
    lrc.setEpochError(2, {"train_score": {'cost:output': 1.8}})
    lrc.setEpochError(2, {
        "dev_score": {
            'cost:output': 1.9
        },
        "dev_error": {
            'error:output': 0.5
        }
    })
    lrc.getLearningRateForEpoch(3)
コード例 #4
0
 def init_train_from_config(self,
                            config,
                            train_data,
                            dev_data=None,
                            eval_data=None):
     """
 :type config: Config.Config
 :type train_data: Dataset.Dataset
 :type dev_data: Dataset.Dataset | None
 :type eval_data: Dataset.Dataset | None
 """
     self.train_data = train_data
     self.dev_data = dev_data
     self.eval_data = eval_data
     self.start_epoch, self.start_batch = self.get_train_start_epoch_batch(
         config)
     self.batch_size = config.int('batch_size', 1)
     self.shuffle_batches = config.bool('shuffle_batches', False)
     self.update_batch_size = config.int('update_batch_size', 0)
     self.batch_size_eval = config.int('batch_size_eval',
                                       self.update_batch_size)
     self.model_filename = config.value('model', None)
     self.save_model_epoch_interval = config.int('save_interval', 1)
     self.save_epoch1_initial_model = config.bool(
         'save_epoch1_initial_model', False)
     self.learning_rate_control = loadLearningRateControlFromConfig(config)
     self.learning_rate = self.learning_rate_control.defaultLearningRate
     self.initial_learning_rate = self.learning_rate
     self.pretrain_learning_rate = config.float('pretrain_learning_rate',
                                                self.learning_rate)
     self.final_epoch = self.config_get_final_epoch(config)  # Inclusive.
     self.max_seqs = config.int('max_seqs', -1)
     self.max_seqs_eval = config.int('max_seqs_eval', self.max_seqs)
     self.updater = Updater.initFromConfig(config)
     self.ctc_prior_file = config.value('ctc_prior_file', None)
     self.exclude = config.int_list('exclude', [])
     self.init_train_epoch_posthook = config.value(
         'init_train_epoch_posthook', None)
     self.share_batches = config.bool('share_batches', False)
     self.seq_drop = config.float('seq_drop', 0.0)
     self.seq_drop_freq = config.float('seq_drop_freq', 10)
     self.max_seq_length = config.float('max_seq_length', 0)
     self.inc_seq_length = config.float('inc_seq_length', 0)
     self.max_seq_length_eval = config.int('max_seq_length_eval', 2e31)
     self.output_precision = config.int('output_precision', 12)
     self.reduction_rate = config.float('reduction_rate', 1.0)
     self.batch_pruning = config.float('batch_pruning', 0.0)
     if self.max_seq_length == 0:
         self.max_seq_length = sys.maxsize
     if config.is_typed("seq_train_parallel"):
         self.seq_train_parallel = SeqTrainParallelControl(
             engine=self,
             config=config,
             **config.typed_value("seq_train_parallel"))
     else:
         self.seq_train_parallel = None
     # And also initialize the network. That depends on some vars here such as pretrain.
     self.init_network_from_config(config)
コード例 #5
0
def test_init_error_old():
  config = Config()
  config.update({"learning_rate_control": "newbob", "learning_rate_control_error_measure": "dev_score"})
  lrc = loadLearningRateControlFromConfig(config)
  assert isinstance(lrc, NewbobRelative)
  lrc.getLearningRateForEpoch(1)
  lrc.setEpochError(1, {"train_score": 1.9344199658230012})
  lrc.setEpochError(1, {"dev_score": 1.99, "dev_error": 0.6})
  error = lrc.getEpochErrorDict(1)
  assert "train_score" in error
  assert "dev_score" in error
  assert "dev_error" in error
  assert_equal(lrc.getErrorKey(1), "dev_score")
  lrc.getLearningRateForEpoch(2)
  lrc.setEpochError(2, {"train_score": 1.8})
  lrc.setEpochError(2, {"dev_score": 1.9, "dev_error": 0.5})
  lrc.getLearningRateForEpoch(3)
コード例 #6
0
def test_init_error_muliple_out():
  config = Config()
  config.update({"learning_rate_control": "newbob", "learning_rate_control_error_measure": "dev_score"})
  lrc = loadLearningRateControlFromConfig(config)
  assert isinstance(lrc, NewbobRelative)
  lrc.getLearningRateForEpoch(1)
  lrc.setEpochError(1, {"train_score": {'cost:output': 1.95, "cost:out2": 2.95}})
  lrc.setEpochError(1, {"dev_score": {'cost:output': 1.99, "cost:out2": 2.99},
                        "dev_error": {'error:output': 0.6, "error:out2": 0.7}})
  error = lrc.getEpochErrorDict(1)
  assert "train_score_output" in error
  assert "train_score_out2" in error
  assert "dev_score_output" in error
  assert "dev_score_out2" in error
  assert "dev_error_output" in error
  assert "dev_error_out2" in error
  assert_equal(lrc.getErrorKey(1), "dev_score_output")
  lrc.getLearningRateForEpoch(2)
  lrc.setEpochError(2, {"train_score": {'cost:output': 1.8, "cost:out2": 2.8}})
  lrc.setEpochError(2, {"dev_score": {'cost:output': 1.9, "cost:out2": 2.9},
                        "dev_error": {'error:output': 0.5, "error:out2": 0.6}})
  lrc.getLearningRateForEpoch(3)