Пример #1
0
    def on_epoch_end(self, epoch, logs=None):
        '''
        Per epoch logic for managing learning rate and early stopping
        '''
        stop_training = False
        # check if we need to stop or increase scheduler stage
        if isinstance(logs, dict):
            loss = logs['val_loss']
        else:
            loss = logs
        if loss <= self.best_loss:
            self.best_loss = loss
            self.wait = 0
        else:
            self.wait += 1
            if self.wait > self.patience:
                self.scheduler_stage += 1
                self.wait = 0

        # calculate and set learning rate
        lr = self.lr * np.power(self.drop, self.scheduler_stage)
        K.set_value(self.lr_tensor, lr)

        # built in stopping if lr is way too small
        if lr <= 1e-7:
            stop_training = True

        # for keras
        if hasattr(self, 'model') and self.model is not None:
            self.model.stop_training = stop_training

        return stop_training
Пример #2
0
    def on_batch_begin(self, batch, logs=None):
        if self.epochs > 0 and self.steps_per_epoch is None:
            return
        elif self.steps_per_epoch is not None:
            self.steps = self.epochs * self.steps_per_epoch + batch

        if self.steps < self.warmup_steps:
            lr = self.warmup_schedual(self.steps)
            K.set_value(getattr(self.model.optimizer, self.lr_attr_name), lr)
Пример #3
0
    def on_epoch_begin(self, epoch, logs=None):
        self.epochs = epoch
        if self.lr_attr_name is None:
            if not hasattr(self.model.optimizer, 'lr'):
                if not hasattr(self.model.optimizer, 'learning_rate'):
                    raise ValueError(
                        'Optimizer must have an "lr" or "learning_rate" attribute.'
                    )
                self.lr_attr_name = 'learning_rate'
            else:
                self.lr_attr_name = 'lr'

        lr = self.epoch_schedual(epoch)
        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')
        K.set_value(getattr(self.model.optimizer, self.lr_attr_name), lr)
Пример #4
0
    def on_epoch_end(self, epoch, logs=None):
        """For managing learning rate, early stopping, and temperature."""
        stop_training = False
        min_tem = self.min_tem
        anneal_rate = 0.00003
        if self.gumble and epoch % 20 == 0:
            self.tau = np.maximum(self.tau * np.exp(-anneal_rate * epoch),
                                  min_tem)
            K.set_value(self.tau_tensor, self.tau)
        # check if we need to stop or increase scheduler stage
        if isinstance(logs, dict):
            loss = logs['loss']
        else:
            loss = logs
        if loss <= self.best_loss:
            self.best_loss = loss
            self.wait = 0
        else:
            self.wait += 1
            if self.wait > self.patience:
                self.scheduler_stage += 1
                self.wait = 0
        if math.isnan(loss):
            stop_training = True
        # calculate and set learning rate
        lr = self.lr * np.power(self.drop, self.scheduler_stage)
        K.set_value(self.lr_tensor, lr)

        # built in stopping if lr is way too small
        if lr <= 1e-9:
            stop_training = True

        # for keras
        if hasattr(self, 'model') and self.model is not None:
            self.model.stop_training = stop_training

        return stop_training