コード例 #1
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr / (1. - K.pow(self.beta_1, t))

        shapes = [K.int_shape(p) for p in params]
        # zero init of 1st moment
        ms = [K.zeros(shape) for shape in shapes]
        # zero init of exponentially weighted infinity norm
        us = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + us

        for p, g, m, u in zip(params, grads, ms, us):

            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            u_t = K.maximum(self.beta_2 * u, K.abs(g))
            p_t = p - lr_t * m_t / (u_t + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(u, u_t))

            new_p = p_t
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
コード例 #2
0
ファイル: optimizers.py プロジェクト: jiayouwyhit/tensorflow
  def get_updates(self, loss, params):
    grads = self.get_gradients(loss, params)
    self.updates = [K.update_add(self.iterations, 1)]

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * K.cast(self.iterations,
                                            K.dtype(self.decay))))

    t = K.cast(self.iterations, K.floatx()) + 1
    lr_t = lr / (1. - K.pow(self.beta_1, t))

    shapes = [K.int_shape(p) for p in params]
    # zero init of 1st moment
    ms = [K.zeros(shape) for shape in shapes]
    # zero init of exponentially weighted infinity norm
    us = [K.zeros(shape) for shape in shapes]
    self.weights = [self.iterations] + ms + us

    for p, g, m, u in zip(params, grads, ms, us):

      m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
      u_t = K.maximum(self.beta_2 * u, K.abs(g))
      p_t = p - lr_t * m_t / (u_t + self.epsilon)

      self.updates.append(K.update(m, m_t))
      self.updates.append(K.update(u, u_t))
      new_p = p_t

      # Apply constraints.
      if getattr(p, 'constraint', None) is not None:
        new_p = p.constraint(new_p)

      self.updates.append(K.update(p, new_p))
    return self.updates
コード例 #3
0
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
コード例 #4
0
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
コード例 #5
0
 def call(self, inputs, mask=None):
   pos = K.relu(inputs)
   if K.backend() == 'theano':
     neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
            (inputs - K.abs(inputs)) * 0.5)
   else:
     neg = -self.alpha * K.relu(-inputs)
   return pos + neg
コード例 #6
0
ファイル: advanced_activations.py プロジェクト: lengjia/RRL
 def call(self, inputs, mask=None):
     pos = K.relu(inputs)
     if K.backend() == 'theano':
         neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
                (inputs - K.abs(inputs)) * 0.5)
     else:
         neg = -self.alpha * K.relu(-inputs)
     return pos + neg
コード例 #7
0
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
コード例 #8
0
def mean_absolute_error(y_true, y_pred):
  return K.mean(K.abs(y_pred - y_true), axis=-1)
コード例 #9
0
def sample_mean_absolute_percentage_error(y_true, y_pred):
    diff = K.abs((y_true - y_pred) /
                 K.clip(K.abs(y_true) + K.abs(y_pred), K.epsilon(), None))
    return 200. * K.mean(diff, axis=-1)
コード例 #10
0
ファイル: losses.py プロジェクト: AutumnQYN/tensorflow
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
コード例 #11
0
ファイル: losses.py プロジェクト: AutumnQYN/tensorflow
def mean_absolute_error(y_true, y_pred):
  return K.mean(K.abs(y_pred - y_true), axis=-1)