def __do_validation(self): """Do early-stopping validation.""" if self.ectr >= self.valid_start: self.vctr += 1 # Compute validation loss self.model.set_dropout(False) cur_loss = self.model.val_loss() self.model.set_dropout(True) # Add val_loss self.valid_metrics['loss'].append(cur_loss) # Print validation loss self.__print("Validation %2d - LOSS = %.3f (PPL: %.3f)" % (self.vctr, cur_loss, np.exp(cur_loss))) ############################# # Are we doing beam search? # ############################# if self.beam_metrics: beam_results = None # Save beam search results? f_valid_out = None if self.valid_save_hyp: f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix, self.vctr) self.__print('Calling beam-search process') beam_time = time.time() beam_results = self.model.run_beam_search(beam_size=self.beam_size, n_jobs=self.njobs, metric=self.beam_metrics, f_valid_out=f_valid_out) beam_time = time.time() - beam_time self.__print('Beam-search ended, took %.5f minutes.' % (beam_time / 60.)) if beam_results: # beam_results: {name: (metric_str, metric_float)} # names are as defined in metrics/*.py like BLEU, METEOR # but we use lowercase names in conf files. self.__send_stats(self.vctr, **beam_results) for name, (metric_str, metric_value) in beam_results.items(): self.__print("Validation %2d - %s" % (self.vctr, metric_str)) self.valid_metrics[name.lower()].append(metric_value) else: self.__print('Skipping this validation since nmt-translate probably failed.') # Return back to training loop since nmt-translate did not run correctly. # This will allow to fix your model's build_sampler while training continues. return # Is this the best evaluation based on early-stop metric? if is_last_best(self.early_metric, self.valid_metrics[self.early_metric], self.patience_delta): self.__save_best_model() self.early_bad = 0 else: self.early_bad += 1 self.__print("Early stopping patience: %d validation left" % (self.patience - self.early_bad)) self.__dump_val_summary()
def __do_validation(self): """Do early-stopping validation.""" if self.ectr >= self.valid_start: self.vctr += 1 # Compute validation loss # self.model.set_dropout(False) cur_loss = self.model.val_loss() # self.model.set_dropout(True) # Add val_loss self.valid_metrics['loss'].append(cur_loss) # Print validation loss self.__print("Validation %2d - ACC = %.3f (LOSS: %.3f)" % (self.vctr, 1.0 - cur_loss, cur_loss)) f_valid_out = None if self.valid_save_hyp: f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix, self.vctr) if is_last_best('loss', self.valid_metrics['loss']): if self.valid_save_hyp: # Create a link towards best hypothesis file force_symlink(f_valid_out, '%s.BEST' % self.valid_save_prefix, relative=True) self.__save_best_model() self.early_bad = 0 else: self.early_bad += 1 self.__print("Early stopping patience: %d validation left" % (self.patience - self.early_bad)) self.__dump_val_summary() # Khoa: Set the initial accuracy for Discriminator in GAN if cur_loss < (1 - self.max_acc): self.__print( "Reach maximum accuracy %.3f : Current Accuracy = %.3f " % (self.max_acc, 1 - cur_loss)) return False else: return True return True
def __do_validation(self, min_loss): """Do early-stopping validation.""" if self.ectr >= self.valid_start: self.vctr += 1 # Compute validation loss self.model.set_dropout(False) cur_loss = self.model.val_loss() self.model.set_dropout(True) # Add val_loss self.valid_metrics['loss'].append(cur_loss) # Print validation loss self.__print("Validation %2d - LOSS = %.3f (PPL: %.3f)" % (self.vctr, cur_loss, np.exp(cur_loss))) f_valid_out = None if self.valid_save_hyp: f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix, self.vctr) if is_last_best('loss', self.valid_metrics['loss']): if self.valid_save_hyp: # Create a link towards best hypothesis file force_symlink(f_valid_out, '%s.BEST' % self.valid_save_prefix, relative=True) self.__save_best_model() self.early_bad = 0 else: self.early_bad += 1 self.__print("Early stopping patience: %d validation left" % (self.patience - self.early_bad)) self.__dump_val_summary() if cur_loss < min_loss: return False
def __do_validation(self): """Do early-stopping validation.""" if self.ectr >= self.valid_start: self.vctr += 1 # Compute validation loss self.model.set_dropout(False) cur_loss = self.model.val_loss() self.model.set_dropout(True) # Add val_loss self.valid_metrics['loss'].append(cur_loss) # Print validation loss self.__print("Validation %2d - LOSS = %.3f (PPL: %.3f)" % (self.vctr, cur_loss, np.exp(cur_loss))) ############################# # Are we doing beam search? # ############################# if self.beam_metrics: beam_results = None # Save beam search results? f_valid_out = None if self.valid_save_hyp: f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix, self.vctr) self.__print('Calling beam-search process') beam_time = time.time() beam_results = self.model.run_beam_search( beam_size=self.beam_size, n_jobs=self.njobs, metric=self.beam_metrics, f_valid_out=f_valid_out) beam_time = time.time() - beam_time self.__print('Beam-search ended, took %.5f minutes.' % (beam_time / 60.)) if beam_results: # beam_results: {name: (metric_str, metric_float)} # names are as defined in metrics/*.py like BLEU, METEOR # but we use lowercase names in conf files. self.__send_stats(self.vctr, **beam_results) for name, (metric_str, metric_value) in beam_results.items(): self.__print("Validation %2d - %s" % (self.vctr, metric_str)) self.valid_metrics[name.lower()].append(metric_value) else: self.__print( 'Skipping this validation since nmt-translate probably failed.' ) # Return back to training loop since nmt-translate did not run correctly. # This will allow to fix your model's build_sampler while training continues. return # Is this the best evaluation based on early-stop metric? if is_last_best(self.early_metric, self.valid_metrics[self.early_metric], self.patience_delta): self.__save_best_model() self.early_bad = 0 else: self.early_bad += 1 self.__print("Early stopping patience: %d validation left" % (self.patience - self.early_bad)) self.__dump_val_summary()