def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn(
                'Learning Rate Plateau Reducing requires %s available!' %
                self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print(
                                '\nEpoch %05d: reducing learning rate to %s.' %
                                (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
예제 #2
0
  def on_epoch_end(self, epoch, logs=None):
    logs = logs or {}
    logs['lr'] = K.get_value(self.model.optimizer.lr)
    current = logs.get(self.monitor)
    if current is None:
      logging.warning('Reduce LR on plateau conditioned on metric `%s` '
                      'which is not available. Available metrics are: %s' %
                      (self.monitor, ','.join(list(logs.keys()))))
    else:
      if self.in_cooldown():
        self.cooldown_counter -= 1
        self.wait = 0

      if self.monitor_op(current, self.best):
        self.best = current
        self.wait = 0
      elif not self.in_cooldown():
        if self.wait >= self.patience:
          old_lr = float(K.get_value(self.model.optimizer.lr))
          if old_lr > self.min_lr + self.lr_epsilon:
            new_lr = old_lr * self.factor
            new_lr = max(new_lr, self.min_lr)
            K.set_value(self.model.optimizer.lr, new_lr)
            if self.verbose > 0:
              print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
                                                                     new_lr))
            self.cooldown_counter = self.cooldown
            self.wait = 0
        self.wait += 1
예제 #3
0
  def on_epoch_end(self, epoch, logs=None):
    logs = logs or {}
    logs['lr'] = K.get_value(self.model.optimizer.lr)
    current = logs.get(self.monitor)
    if current is None:
      logging.warning('Reduce LR on plateau conditioned on metric `%s` '
                      'which is not available. Available metrics are: %s' %
                      (self.monitor, ','.join(list(logs.keys()))))
    else:
      if self.in_cooldown():
        self.cooldown_counter -= 1
        self.wait = 0

      if self.monitor_op(current, self.best):
        self.best = current
        self.wait = 0
      elif not self.in_cooldown():
        if self.wait >= self.patience:
          old_lr = float(K.get_value(self.model.optimizer.lr))
          if old_lr > self.min_lr + self.lr_epsilon:
            new_lr = old_lr * self.factor
            new_lr = max(new_lr, self.min_lr)
            K.set_value(self.model.optimizer.lr, new_lr)
            if self.verbose > 0:
              print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
                                                                     new_lr))
            self.cooldown_counter = self.cooldown
            self.wait = 0
        self.wait += 1
예제 #4
0
  def on_epoch_end(self, epoch, logs=None):
    logs = logs or {}
    logs['lr'] = K.get_value(self.model.optimizer.lr)
    current = logs.get(self.monitor)
    if current is None:
      warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                    self.monitor, RuntimeWarning)
    else:
      if self.in_cooldown():
        self.cooldown_counter -= 1
        self.wait = 0

      if self.monitor_op(current, self.best):
        self.best = current
        self.wait = 0
      elif not self.in_cooldown():
        if self.wait >= self.patience:
          old_lr = float(K.get_value(self.model.optimizer.lr))
          if old_lr > self.min_lr + self.lr_epsilon:
            new_lr = old_lr * self.factor
            new_lr = max(new_lr, self.min_lr)
            K.set_value(self.model.optimizer.lr, new_lr)
            if self.verbose > 0:
              print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
                                                                     new_lr))
            self.cooldown_counter = self.cooldown
            self.wait = 0
        self.wait += 1
예제 #5
0
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'decay': float(K.get_value(self.decay)),
         'epsilon': self.epsilon
     }
     base_config = super(Adagrad, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
예제 #6
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'decay': float(K.get_value(self.decay)),
       'epsilon': self.epsilon
   }
   base_config = super(Adagrad, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
예제 #7
0
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'momentum': float(K.get_value(self.momentum)),
         'decay': float(K.get_value(self.decay)),
         'nesterov': self.nesterov
     }
     base_config = super(SGD, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
예제 #8
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'momentum': float(K.get_value(self.momentum)),
       'decay': float(K.get_value(self.decay)),
       'nesterov': self.nesterov
   }
   base_config = super(SGD, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
예제 #9
0
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'beta_1': float(K.get_value(self.beta_1)),
         'beta_2': float(K.get_value(self.beta_2)),
         'epsilon': self.epsilon,
         'schedule_decay': self.schedule_decay
     }
     base_config = super(Nadam, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
예제 #10
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'beta_1': float(K.get_value(self.beta_1)),
       'beta_2': float(K.get_value(self.beta_2)),
       'epsilon': self.epsilon,
       'schedule_decay': self.schedule_decay
   }
   base_config = super(Nadam, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
예제 #11
0
 def on_epoch_begin(self, epoch, logs={}):
     """Processes called at the beginning of each epoch during the training of the model
     
     :param epoch: epoch number
     :param logs: dictionary of logs
     """
     old_lr = K.get_value(self.model.optimizer.lr)
     if epoch % 3 == 0 and epoch > 0:
         new_lr = self.decay * old_lr
         K.set_value(self.model.optimizer.lr, new_lr)
         logging.info("New value for the learning rate : {}".format(
             K.get_value(self.model.optimizer.lr)))
     else:
         K.set_value(self.model.optimizer.lr, old_lr)
예제 #12
0
def convert_all_kernels_in_model(model):
    """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  Arguments:
      model: target model for the conversion.
  """
    # Note: SeparableConvolution not included
    # since only supported by TF.
    conv_classes = {
        'Conv1D',
        'Conv2D',
        'Conv3D',
        'Conv2DTranspose',
    }
    to_assign = []
    for layer in model.layers:
        if layer.__class__.__name__ in conv_classes:
            original_kernel = K.get_value(layer.kernel)
            converted_kernel = convert_kernel(original_kernel)
            to_assign.append((layer.kernel, converted_kernel))
    K.batch_set_value(to_assign)
예제 #13
0
def convert_all_kernels_in_model(model):
  """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  Arguments:
      model: target model for the conversion.
  """
  # Note: SeparableConvolution not included
  # since only supported by TF.
  conv_classes = {
      'Conv1D',
      'Conv2D',
      'Conv3D',
      'Conv2DTranspose',
  }
  to_assign = []
  for layer in model.layers:
    if layer.__class__.__name__ in conv_classes:
      original_kernel = K.get_value(layer.kernel)
      converted_kernel = convert_kernel(original_kernel)
      to_assign.append((layer.kernel, converted_kernel))
  K.batch_set_value(to_assign)