Esempio n. 1
0
 def setup_after_stop_training(self, model, final_evaluation):
     self.rememberer.reset_to_best_model(self.epochs_df, model,
                                         self.optimizer)
     if final_evaluation:
         loss_to_reach = float(self.epochs_df['train_loss'].iloc[-1])
         self.stop_criterion = Or(stop_criteria=[
             MaxEpochs(max_epochs=global_vars.get('final_max_epochs')),
             ColumnBelow(column_name='valid_loss',
                         target_value=loss_to_reach)
         ])
Esempio n. 2
0
 def setup_after_stop_training(self):
     """
     Setup training after first stop. 
     
     Resets parameters to best parameters and updates stop criterion.
     """
     # also remember old monitor chans, will be put back into
     # monitor chans after experiment finished
     self.before_stop_df = deepcopy(self.epochs_df)
     self.rememberer.reset_to_best_model(self.epochs_df, self.model,
                                         self.optimizer)
     loss_to_reach = float(self.epochs_df['train_loss'].iloc[-1])
     self.stop_criterion = Or(stop_criteria=[
         MaxEpochs(max_epochs=self.rememberer.best_epoch * 2),
         ColumnBelow(column_name='valid_loss', target_value=loss_to_reach)])
     log.info("Train loss to reach {:.5f}".format(loss_to_reach))
Esempio n. 3
0
    def setup_after_stop_training(self):
        """
        Setup training after first stop. 
        
        Resets parameters to best parameters and updates stop criterion.
        """
        # also remember old monitor chans, will be put back into
        # monitor chans after experiment finished
        self.before_stop_df = deepcopy(self.epochs_df)
        self.rememberer.reset_to_best_model(self.epochs_df, self.model,
                                            self.optimizer)
        loss_to_reach = float(self.epochs_df['train_loss'].iloc[-1])
        self.stop_criterion = Or(stop_criteria=[
            MaxEpochs(max_epochs=self.rememberer.best_epoch * 2),
            ColumnBelow(column_name='valid_loss', target_value=loss_to_reach)
        ])
        log.info("Train loss to reach {:.5f}".format(loss_to_reach))


# def exp_lr_scheduler(optimizer, global_step, init_lr, decay_steps, decay_rate, lr_clip, staircase=True):
#     if staircase:
#         lr = init_lr * decay_rate
#     else:
#         lr = init_lr * decay_rate**(global_step / decay_steps)
#     lr = max(lr, lr_clip)
#     #print(lr)
#     if global_step % decay_steps == 0:
#         print('LR is set to {}'.format(lr))
#     lr=0.1
#     for param_group in optimizer.param_groups:
#         param_group['lr'] = lr

#     return optimizer

# def adjust_lr(optimizer, epoch):
#     lr = init_lr * (0.1 ** (epoch // 20))
#     for param_group in optimizer.param_groups:
#         param_group['lr'] = lr
#     return optimizer