def _build_lr_scheduler(self):
     if self.args.force_anneal > 0:
         def anneal(e):
             if e < self.args.force_anneal:
                 return 1
             else:
                 return self.args.lrshrink ** (e + 1 - self.args.force_anneal)
         lr_scheduler = LambdaLR(self.optimizer, anneal)
         lr_scheduler.best = None
     else:
         # decay the LR by 0.1 every time the validation loss plateaus
         lr_scheduler = ReduceLROnPlateau(self.optimizer, patience=0)
     return lr_scheduler
Exemple #2
0
    def _build_lr_scheduler(self):
        if len(self.args.lr) > 1 or self.args.force_anneal > 0:
            lrs = self.args.lr

            def anneal(e):
                if e < self.args.force_anneal:
                    # use fixed LR schedule
                    next_lr = lrs[min(e, len(lrs) - 1)]
                else:
                    next_lr = lrs[-1] * self.args.lrshrink ** (e + 1 - self.args.force_anneal)
                return next_lr / lrs[0]  # correct for scaling from LambdaLR

            lr_scheduler = LambdaLR(self.optimizer, anneal)
            lr_scheduler.best = None
        else:
            # decay the LR by a factor every time the validation loss plateaus
            lr_scheduler = ReduceLROnPlateau(self.optimizer, patience=0,
                                             factor=self.args.lrshrink)
        return lr_scheduler