Пример #1
0
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
Пример #2
0
  def get_gradients(self, loss, params):
    """Returns gradients of `loss` with respect to `params`.

    Arguments:
        loss: Loss tensor.
        params: List of variables.

    Returns:
        List of gradient tensors.

    Raises:
        ValueError: In case any gradient cannot be computed (e.g. if gradient
          function not implemented).
    """
    grads = K.gradients(loss, params)
    if None in grads:
      raise ValueError('An operation has `None` for gradient. '
                       'Please make sure that all of your ops have a '
                       'gradient defined (i.e. are differentiable). '
                       'Common ops without gradient: '
                       'K.argmax, K.round, K.eval.')
    if hasattr(self, 'clipnorm') and self.clipnorm > 0:
      norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
      grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
    if hasattr(self, 'clipvalue') and self.clipvalue > 0:
      grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
    return grads
Пример #3
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = (
       self.rate * K.clip(norms, self.min_value, self.max_value) +
       (1 - self.rate) * norms)
   w *= (desired / (K.epsilon() + norms))
   return w
Пример #4
0
 def get_gradients(self, loss, params):
   grads = K.gradients(loss, params)
   if hasattr(self, 'clipnorm') and self.clipnorm > 0:
     norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
     grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
   if hasattr(self, 'clipvalue') and self.clipvalue > 0:
     grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
   return grads
Пример #5
0
 def get_initial_state(self, inputs):
   # build an all-zero tensor of shape (samples, output_dim)
   initial_state = K.zeros_like(inputs)  # (samples, timesteps, input_dim)
   initial_state = K.sum(initial_state, axis=(1, 2))  # (samples,)
   initial_state = K.expand_dims(initial_state)  # (samples, 1)
   initial_state = K.tile(initial_state, [1,
                                          self.units])  # (samples, output_dim)
   initial_state = [initial_state for _ in range(len(self.states))]
   return initial_state
  def get_initial_state(self, inputs):
    # (samples, timesteps, rows, cols, filters)
    initial_state = K.zeros_like(inputs)
    # (samples, rows, cols, filters)
    initial_state = K.sum(initial_state, axis=1)
    shape = list(self.kernel_shape)
    shape[-1] = self.filters
    initial_state = self.input_conv(
        initial_state, K.zeros(tuple(shape)), padding=self.padding)

    initial_states = [initial_state for _ in range(2)]
    return initial_states
  def get_constants(self, inputs, training=None):
    constants = []
    if self.implementation == 0 and 0 < self.dropout < 1:
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones += 1

      def dropped_inputs():
        return K.dropout(ones, self.dropout)

      dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])

    if 0 < self.recurrent_dropout < 1:
      shape = list(self.kernel_shape)
      shape[-1] = self.filters
      ones = K.zeros_like(inputs)
      ones = K.sum(ones, axis=1)
      ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
      ones += 1.

      def dropped_inputs():  # pylint: disable=function-redefined
        return K.dropout(ones, self.recurrent_dropout)

      rec_dp_mask = [
          K.in_train_phase(dropped_inputs, ones, training=training)
          for _ in range(4)
      ]
      constants.append(rec_dp_mask)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(4)])
    return constants
  def get_initial_state(self, inputs):
    # (samples, timesteps, rows, cols, filters)
    initial_state = K.zeros_like(inputs)
    # (samples, rows, cols, filters)
    initial_state = K.sum(initial_state, axis=1)
    shape = list(self.cell.kernel_shape)
    shape[-1] = self.cell.filters
    initial_state = self.cell.input_conv(initial_state,
                                         K.zeros(tuple(shape)),
                                         padding=self.cell.padding)

    if hasattr(self.cell.state_size, '__len__'):
      return [initial_state for _ in self.cell.state_size]
    else:
      return [initial_state]
Пример #9
0
def softmax(x, axis=-1):
  """Softmax activation function.

  Arguments:
      x : Tensor.
      axis: Integer, axis along which the softmax normalization is applied.

  Returns:
      Tensor, output of softmax transformation.

  Raises:
      ValueError: In case `dim(x) == 1`.
  """
  ndim = K.ndim(x)
  if ndim == 2:
    return K.softmax(x)
  elif ndim > 2:
    e = K.exp(x - K.max(x, axis=axis, keepdims=True))
    s = K.sum(e, axis=axis, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor that is 1D')
Пример #10
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = K.clip(norms, 0, self.max_value)
   w *= (desired / (K.epsilon() + norms))
   return w
Пример #11
0
 def __call__(self, w):
   return w / (
       K.epsilon() + K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))
Пример #12
0
def cosine_proximity(y_true, y_pred):
  y_true = K.l2_normalize(y_true, axis=-1)
  y_pred = K.l2_normalize(y_pred, axis=-1)
  return -K.sum(y_true * y_pred, axis=-1)
Пример #13
0
def cosine_proximity(y_true, y_pred):
  y_true = K.l2_normalize(y_true, axis=-1)
  y_pred = K.l2_normalize(y_pred, axis=-1)
  return -K.sum(y_true * y_pred, axis=-1)
Пример #14
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
Пример #15
0
def categorical_hinge(y_true, y_pred):
  pos = K.sum(y_true * y_pred, axis=-1)
  neg = K.max((1. - y_true) * y_pred, axis=-1)
  return K.maximum(neg - pos + 1., 0.)
Пример #16
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = K.clip(y_true, K.epsilon(), 1)
  y_pred = K.clip(y_pred, K.epsilon(), 1)
  return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
Пример #17
0
 def __call__(self, w):
     return w / (K.epsilon() +
                 K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))
Пример #18
0
def categorical_hinge(y_true, y_pred):
    pos = K.sum(y_true * y_pred, axis=-1)
    neg = K.max((1. - y_true) * y_pred, axis=-1)
    return K.maximum(neg - pos + 1., 0.)
Пример #19
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = K.clip(norms, 0, self.max_value)
     w *= (desired / (K.epsilon() + norms))
     return w
Пример #20
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
                (1 - self.rate) * norms)
     w *= (desired / (K.epsilon() + norms))
     return w