示例#1
0
    def __save_best_model(self):
        """Saves best N models to disk."""
        if self.save_best_n > 0:
            # Get the score of the system that will be saved
            cur_score = self.valid_metrics[self.early_metric][-1]

            # Custom filename with metric score
            cur_fname = "%s-val%3.3d-%s_%.3f.npz" % (
                self.model.save_path, self.vctr, self.early_metric, cur_score)

            # Stack is empty, save the model whatsoever
            if len(self.best_models) < self.save_best_n:
                self.best_models.append((cur_score, cur_fname))

            # Stack is full, replace the worst model
            else:
                os.unlink(self.best_models[self.next_prune_idx][1])
                self.best_models[self.next_prune_idx] = (cur_score, cur_fname)

            self.__print('Saving model with best validation %s' %
                         self.early_metric.upper())
            self.model.save(cur_fname)

            # Create a .BEST symlink
            force_symlink(cur_fname, ('%s.BEST.npz' % self.model.save_path),
                          relative=True)

            # In the next best, we'll remove the following idx from the list/disk
            # Metric specific comparator stuff
            where = comparators[self.early_metric][-1]
            self.next_prune_idx = sorted(
                range(len(self.best_models)),
                key=self.best_models.__getitem__)[where]
示例#2
0
    def __save_best_model(self):
        """Saves best N models to disk."""
        if self.save_best_n > 0:
            # Get the score of the system that will be saved
            cur_score = self.valid_metrics[self.early_metric][-1]

            # Custom filename with metric score
            cur_fname = "%s-val%3.3d-%s_%.3f.npz" % (self.model.save_path, self.vctr, self.early_metric, cur_score)

            # Stack is empty, save the model whatsoever
            if len(self.best_models) < self.save_best_n:
                self.best_models.append((cur_score, cur_fname))

            # Stack is full, replace the worst model
            else:
                os.unlink(self.best_models[self.next_prune_idx][1])
                self.best_models[self.next_prune_idx] = (cur_score, cur_fname)

            self.__print('Saving model with best validation %s' % self.early_metric.upper())
            self.model.save(cur_fname)

            # Create a .BEST symlink
            force_symlink(cur_fname, ('%s.BEST.npz' % self.model.save_path), relative=True)

            # In the next best, we'll remove the following idx from the list/disk
            # Metric specific comparator stuff
            where = comparators[self.early_metric][-1]
            self.next_prune_idx = sorted(range(len(self.best_models)),
                                         key=self.best_models.__getitem__)[where]
示例#3
0
    def __do_validation(self):
        """Do early-stopping validation."""
        if self.ectr >= self.valid_start:
            self.vctr += 1

            # Compute validation loss
            # self.model.set_dropout(False)
            cur_loss = self.model.val_loss()
            # self.model.set_dropout(True)

            # Add val_loss
            self.valid_metrics['loss'].append(cur_loss)

            # Print validation loss
            self.__print("Validation %2d - ACC = %.3f (LOSS: %.3f)" %
                         (self.vctr, 1.0 - cur_loss, cur_loss))

            f_valid_out = None
            if self.valid_save_hyp:
                f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix,
                                                   self.vctr)

            if is_last_best('loss', self.valid_metrics['loss']):
                if self.valid_save_hyp:
                    # Create a link towards best hypothesis file
                    force_symlink(f_valid_out,
                                  '%s.BEST' % self.valid_save_prefix,
                                  relative=True)

                self.__save_best_model()
                self.early_bad = 0
            else:
                self.early_bad += 1
                self.__print("Early stopping patience: %d validation left" %
                             (self.patience - self.early_bad))

            self.__dump_val_summary()
            # Khoa: Set the initial accuracy for Discriminator in GAN
            if cur_loss < (1 - self.max_acc):
                self.__print(
                    "Reach maximum accuracy %.3f : Current Accuracy = %.3f " %
                    (self.max_acc, 1 - cur_loss))
                return False
            else:
                return True

        return True
示例#4
0
    def __do_validation(self, min_loss):
        """Do early-stopping validation."""
        if self.ectr >= self.valid_start:
            self.vctr += 1

            # Compute validation loss
            self.model.set_dropout(False)
            cur_loss = self.model.val_loss()
            self.model.set_dropout(True)

            # Add val_loss
            self.valid_metrics['loss'].append(cur_loss)

            # Print validation loss
            self.__print("Validation %2d - LOSS = %.3f (PPL: %.3f)" %
                         (self.vctr, cur_loss, np.exp(cur_loss)))

            f_valid_out = None
            if self.valid_save_hyp:
                f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix,
                                                   self.vctr)

            if is_last_best('loss', self.valid_metrics['loss']):
                if self.valid_save_hyp:
                    # Create a link towards best hypothesis file
                    force_symlink(f_valid_out,
                                  '%s.BEST' % self.valid_save_prefix,
                                  relative=True)

                self.__save_best_model()
                self.early_bad = 0
            else:
                self.early_bad += 1
                self.__print("Early stopping patience: %d validation left" %
                             (self.patience - self.early_bad))

            self.__dump_val_summary()

            if cur_loss < min_loss:
                return False
示例#5
0
    def __do_validation(self):
        """Do early-stopping validation."""
        if self.ectr >= self.valid_start:
            self.vctr += 1

            # Compute validation loss
            self.model.set_dropout(False)
            cur_loss = self.model.val_loss()
            self.model.set_dropout(True)

            # Add val_loss
            self.valid_metrics['loss'].append(cur_loss)

            # Print validation loss
            self.__print("Validation %2d - LOSS = %.3f (PPL: %.3f)" %
                         (self.vctr, cur_loss, np.exp(cur_loss)))

            #############################
            # Are we doing beam search? #
            #############################
            if self.beam_metrics:
                beam_results = None
                # Save beam search results?
                f_valid_out = None

                if self.valid_save_hyp:
                    f_valid_out = "{0}.{1:03d}".format(self.valid_save_prefix,
                                                       self.vctr)

                self.__print('Calling beam-search process')
                beam_time = time.time()
                beam_results = self.model.run_beam_search(
                    beam_size=self.beam_size,
                    n_jobs=self.njobs,
                    metric=self.beam_metrics,
                    mode='beamsearch',
                    valid_mode=self.valid_mode,
                    f_valid_out=f_valid_out)
                beam_time = time.time() - beam_time
                self.__print('Beam-search ended, took %.5f minutes.' %
                             (beam_time / 60.))

                if beam_results:
                    # beam_results: {name: (metric_str, metric_float)}
                    # names are as defined in metrics/*.py like BLEU, METEOR
                    # but we use lowercase names in conf files.
                    self.__send_stats(self.vctr, **beam_results)
                    for name, (metric_str,
                               metric_value) in beam_results.items():
                        self.__print("Validation %2d - %s" %
                                     (self.vctr, metric_str))
                        self.valid_metrics[name.lower()].append(metric_value)
                else:
                    self.__print(
                        'Skipping this validation since nmt-translate probably failed.'
                    )
                    # Return back to training loop since nmt-translate did not run correctly.
                    # This will allow to fix your model's build_sampler while training continues.
                    return

            # Is this the best evaluation based on early-stop metric?
            if is_last_best(self.early_metric,
                            self.valid_metrics[self.early_metric]):
                if self.valid_save_hyp:
                    # Create a link towards best hypothesis file
                    force_symlink(f_valid_out,
                                  '%s.BEST' % self.valid_save_prefix,
                                  relative=True)

                self.__save_best_model()
                self.early_bad = 0
            else:
                self.early_bad += 1
                self.__print("Early stopping patience: %d validation left" %
                             (self.patience - self.early_bad))

            self.__dump_val_summary()