示例#1
0
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            logging.warning(
                'Reduce LR on plateau conditioned on metric `%s` '
                'which is not available. Available metrics are: %s',
                self.monitor, ','.join(list(logs.keys())), RuntimeWarning)

        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print(
                                '\nEpoch %05d: ReduceLROnPlateau reducing learning '
                                'rate to %s.' % (epoch + 1, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
示例#2
0
  def on_epoch_end(self, epoch, logs=None):
    logs = logs or {}
    logs['lr'] = K.get_value(self.model.optimizer.lr)
    current = logs.get(self.monitor)
    if current is None:
      logging.warning('Reduce LR on plateau conditioned on metric `%s` '
                      'which is not available. Available metrics are: %s',
                      self.monitor, ','.join(list(logs.keys())), RuntimeWarning)

    else:
      if self.in_cooldown():
        self.cooldown_counter -= 1
        self.wait = 0

      if self.monitor_op(current, self.best):
        self.best = current
        self.wait = 0
      elif not self.in_cooldown():
        if self.wait >= self.patience:
          old_lr = float(K.get_value(self.model.optimizer.lr))
          if old_lr > self.min_lr:
            new_lr = old_lr * self.factor
            new_lr = max(new_lr, self.min_lr)
            K.set_value(self.model.optimizer.lr, new_lr)
            if self.verbose > 0:
              print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
                    'rate to %s.' % (epoch + 1, new_lr))
            self.cooldown_counter = self.cooldown
            self.wait = 0
        self.wait += 1
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'decay': float(K.get_value(self.decay)),
         'epsilon': self.epsilon
     }
     base_config = super(Adagrad, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
示例#4
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'decay': float(K.get_value(self.decay)),
       'epsilon': self.epsilon
   }
   base_config = super(Adagrad, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'momentum': float(K.get_value(self.momentum)),
         'decay': float(K.get_value(self.decay)),
         'nesterov': self.nesterov
     }
     base_config = super(SGD, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
示例#6
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'momentum': float(K.get_value(self.momentum)),
       'decay': float(K.get_value(self.decay)),
       'nesterov': self.nesterov
   }
   base_config = super(SGD, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'beta_1': float(K.get_value(self.beta_1)),
         'beta_2': float(K.get_value(self.beta_2)),
         'epsilon': self.epsilon,
         'schedule_decay': self.schedule_decay
     }
     base_config = super(Nadam, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
示例#8
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'beta_1': float(K.get_value(self.beta_1)),
       'beta_2': float(K.get_value(self.beta_2)),
       'epsilon': self.epsilon,
       'schedule_decay': self.schedule_decay
   }
   base_config = super(Nadam, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
 def get_config(self):
     config = {
         'lr': float(K.get_value(self.lr)),
         'beta_1': float(K.get_value(self.beta_1)),
         'beta_2': float(K.get_value(self.beta_2)),
         'decay': float(K.get_value(self.decay)),
         'epsilon': self.epsilon,
         'amsgrad': self.amsgrad
     }
     base_config = super(Adam, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
示例#10
0
 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'beta_1': float(K.get_value(self.beta_1)),
       'beta_2': float(K.get_value(self.beta_2)),
       'decay': float(K.get_value(self.decay)),
       'epsilon': self.epsilon,
       'amsgrad': self.amsgrad
   }
   base_config = super(Adam, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
示例#11
0
def convert_all_kernels_in_model(model):
  """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  Arguments:
      model: target model for the conversion.
  """
  # Note: SeparableConvolution not included
  # since only supported by TF.
  conv_classes = {
      'Conv1D',
      'Conv2D',
      'Conv3D',
      'Conv2DTranspose',
  }
  to_assign = []
  for layer in model.layers:
    if layer.__class__.__name__ in conv_classes:
      original_kernel = K.get_value(layer.kernel)
      converted_kernel = convert_kernel(original_kernel)
      to_assign.append((layer.kernel, converted_kernel))
  K.batch_set_value(to_assign)
示例#12
0
def convert_all_kernels_in_model(model):
    """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  Arguments:
      model: target model for the conversion.
  """
    # Note: SeparableConvolution not included
    # since only supported by TF.
    conv_classes = {
        'Conv1D',
        'Conv2D',
        'Conv3D',
        'Conv2DTranspose',
    }
    to_assign = []
    for layer in model.layers:
        if layer.__class__.__name__ in conv_classes:
            original_kernel = K.get_value(layer.kernel)
            converted_kernel = convert_kernel(original_kernel)
            to_assign.append((layer.kernel, converted_kernel))
    K.batch_set_value(to_assign)
示例#13
0
def iterator_predict_loop(model, inputs, steps, verbose=0):
  """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  outs = []
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; '
          'interrupting prediction. Make sure that your '
          'dataset can generate at least `steps` '
          'batches (in this case, %d batches).', steps)
      break

    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
      raise ValueError(
          'Please provide data as a list or tuple of 2 elements '
          ' - input and target pair. Received %s. We do not use the '
          '`target` value here.' % next_element)
    x, _ = next_element

    # Validate and standardize data.
    x, _, _ = model._standardize_user_data(x)

    if model._expects_training_arg:
      batch_outs = model.call(x[0] if len(x) == 1 else x, training=False)
    else:
      batch_outs = model.call(x[0] if len(x) == 1 else x)
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]

    # We collect the results from every step and then concatenate them once
    # in the end. This is an expensive process. We are doing this because we
    # do not know the number of samples beforehand.
    if step_index == 0:
      for _ in batch_outs:
        outs.append([])
    for i, batch_out in enumerate(batch_outs):
      outs[i].append(backend.get_value(batch_out))

    if verbose == 1:
      progbar.update(step_index + 1)
  for i, out in enumerate(outs):
    outs[i] = np.concatenate(tuple(out), axis=0)
  if len(outs) == 1:
    return outs[0]
  return outs
示例#14
0
def iterator_predict_loop(model, inputs, steps, verbose=0):
    """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
    assert isinstance(inputs, iterator_ops.EagerIterator)
    outs = []
    if verbose == 1:
        progbar = generic_utils.Progbar(target=steps)
    for step_index in range(steps):
        # Get data from the iterator.
        try:
            next_element = inputs.get_next()
        except errors.OutOfRangeError:
            logging.warning(
                'Your dataset iterator ran out of data; '
                'interrupting prediction. Make sure that your '
                'dataset can generate at least `steps` '
                'batches (in this case, %d batches).', steps)
            break

        if not isinstance(next_element,
                          (list, tuple)) or len(next_element) != 2:
            raise ValueError(
                'Please provide data as a list or tuple of 2 elements '
                ' - input and target pair. Received %s. We do not use the '
                '`target` value here.' % next_element)
        x, _ = next_element

        # Validate and standardize data.
        x, _, _ = model._standardize_user_data(x)

        if model._expects_training_arg:
            batch_outs = model.call(x[0] if len(x) == 1 else x, training=False)
        else:
            batch_outs = model.call(x[0] if len(x) == 1 else x)
        if not isinstance(batch_outs, list):
            batch_outs = [batch_outs]

        # We collect the results from every step and then concatenate them once
        # in the end. This is an expensive process. We are doing this because we
        # do not know the number of samples beforehand.
        if step_index == 0:
            for _ in batch_outs:
                outs.append([])
        for i, batch_out in enumerate(batch_outs):
            outs[i].append(backend.get_value(batch_out))

        if verbose == 1:
            progbar.update(step_index + 1)
    for i, out in enumerate(outs):
        outs[i] = np.concatenate(tuple(out), axis=0)
    if len(outs) == 1:
        return outs[0]
    return outs