Ejemplo n.º 1
0
  def get_updates(self, loss, params):
    grads = self.get_gradients(loss, params)
    shapes = [K.int_shape(p) for p in params]
    accumulators = [K.zeros(shape) for shape in shapes]
    delta_accumulators = [K.zeros(shape) for shape in shapes]
    self.weights = accumulators + delta_accumulators
    self.updates = [K.update_add(self.iterations, 1)]

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * K.cast(self.iterations,
                                            K.dtype(self.decay))))

    for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
      # update accumulator
      new_a = self.rho * a + (1. - self.rho) * K.square(g)
      self.updates.append(K.update(a, new_a))

      # use the new accumulator and the *old* delta_accumulator
      update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
      new_p = p - lr * update

      # Apply constraints.
      if getattr(p, 'constraint', None) is not None:
        new_p = p.constraint(new_p)

      self.updates.append(K.update(p, new_p))

      # update delta_accumulator
      new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
      self.updates.append(K.update(d_a, new_d_a))
    return self.updates
Ejemplo n.º 2
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.int_shape(p) for p in params]
        accumulators = [K.zeros(shape) for shape in shapes]
        delta_accumulators = [K.zeros(shape) for shape in shapes]
        self.weights = accumulators + delta_accumulators
        self.updates = []

        lr = self.lr
        if self.initial_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))
            self.updates.append(K.update_add(self.iterations, 1))

        for p, g, a, d_a in zip(params, grads, accumulators,
                                delta_accumulators):
            # update accumulator
            new_a = self.rho * a + (1. - self.rho) * K.square(g)
            self.updates.append(K.update(a, new_a))

            # use the new accumulator and the *old* delta_accumulator
            update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a +
                                                             self.epsilon)

            new_p = p - lr * update
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))

            # update delta_accumulator
            new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
            self.updates.append(K.update(d_a, new_d_a))
        return self.updates
Ejemplo n.º 3
0
  def get_updates(self, params, constraints, loss):
    grads = self.get_gradients(loss, params)
    shapes = [K.int_shape(p) for p in params]
    accumulators = [K.zeros(shape) for shape in shapes]
    delta_accumulators = [K.zeros(shape) for shape in shapes]
    self.weights = accumulators + delta_accumulators
    self.updates = []

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * self.iterations))
      self.updates.append(K.update_add(self.iterations, 1))

    for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
      # update accumulator
      new_a = self.rho * a + (1. - self.rho) * K.square(g)
      self.updates.append(K.update(a, new_a))

      # use the new accumulator and the *old* delta_accumulator
      update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)

      new_p = p - lr * update
      # apply constraints
      if p in constraints:
        c = constraints[p]
        new_p = c(new_p)
      self.updates.append(K.update(p, new_p))

      # update delta_accumulator
      new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
      self.updates.append(K.update(d_a, new_d_a))
    return self.updates
Ejemplo n.º 4
0
  def get_updates(self, params, constraints, loss):
    grads = self.get_gradients(loss, params)
    self.updates = [K.update_add(self.iterations, 1)]

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * self.iterations))

    t = self.iterations + 1
    lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                 (1. - K.pow(self.beta_1, t)))

    shapes = [K.int_shape(p) for p in params]
    ms = [K.zeros(shape) for shape in shapes]
    vs = [K.zeros(shape) for shape in shapes]
    self.weights = [self.iterations] + ms + vs

    for p, g, m, v in zip(params, grads, ms, vs):
      m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
      v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
      p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

      self.updates.append(K.update(m, m_t))
      self.updates.append(K.update(v, v_t))

      new_p = p_t
      # apply constraints
      if p in constraints:
        c = constraints[p]
        new_p = c(new_p)
      self.updates.append(K.update(p, new_p))
    return self.updates
Ejemplo n.º 5
0
  def get_updates(self, loss, params):
    grads = self.get_gradients(loss, params)
    self.updates = [K.update_add(self.iterations, 1)]

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * K.cast(self.iterations,
                                            K.dtype(self.decay))))

    t = K.cast(self.iterations, K.floatx()) + 1
    lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                 (1. - K.pow(self.beta_1, t)))

    ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
    vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
    self.weights = [self.iterations] + ms + vs

    for p, g, m, v in zip(params, grads, ms, vs):
      m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
      v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
      p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

      self.updates.append(K.update(m, m_t))
      self.updates.append(K.update(v, v_t))
      new_p = p_t

      # Apply constraints.
      if getattr(p, 'constraint', None) is not None:
        new_p = p.constraint(new_p)

      self.updates.append(K.update(p, new_p))
    return self.updates
Ejemplo n.º 6
0
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
Ejemplo n.º 7
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.initial_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        shapes = [K.int_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            new_p = p_t
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
Ejemplo n.º 8
0
 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += K.sum(self.l1 * K.abs(x))
   if self.l2:
     regularization += K.sum(self.l2 * K.square(x))
   return regularization
Ejemplo n.º 9
0
  def get_updates(self, loss, params):
    grads = self.get_gradients(loss, params)
    accumulators = [
        K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params
    ]
    self.weights = accumulators
    self.updates = [K.update_add(self.iterations, 1)]

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * K.cast(self.iterations,
                                            K.dtype(self.decay))))

    for p, g, a in zip(params, grads, accumulators):
      # update accumulator
      new_a = self.rho * a + (1. - self.rho) * K.square(g)
      self.updates.append(K.update(a, new_a))
      new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)

      # Apply constraints.
      if getattr(p, 'constraint', None) is not None:
        new_p = p.constraint(new_p)

      self.updates.append(K.update(p, new_p))
    return self.updates
Ejemplo n.º 10
0
 def get_gradients(self, loss, params):
   grads = K.gradients(loss, params)
   if hasattr(self, 'clipnorm') and self.clipnorm > 0:
     norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
     grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
   if hasattr(self, 'clipvalue') and self.clipvalue > 0:
     grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
   return grads
Ejemplo n.º 11
0
 def get_gradients(self, loss, params):
     grads = K.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
    def f_lrn(X):
        samples, rows, cols, channels = X.get_shape()
        half_n = n // 2
        X_square = K.square(X)

        # pad with zeros (half_n channels)
        X_square_padded = K.spatial_2d_padding(
            K.permute_dimensions(X_square, (1, 0, 3, 2)),
            ((0, 0), (half_n, half_n)))  # pad 2nd and 3rd dim
        X_square_padded = K.permute_dimensions(X_square_padded, (1, 0, 3, 2))

        # sum runs over n "adjacent" kernel maps at the same spatial position
        sum = 0
        for i in range(n):
            sum += X_square_padded[:, :, :, i:(i + int(channels))]
        scale = (k + alpha * sum)**beta
        return X / scale
Ejemplo n.º 13
0
  def get_updates(self, params, constraints, loss):
    grads = self.get_gradients(loss, params)
    self.updates = [K.update_add(self.iterations, 1)]

    t = self.iterations + 1

    # Due to the recommendations in [2], i.e. warming momentum schedule
    momentum_cache_t = self.beta_1 * (1. - 0.5 *
                                      (K.pow(0.96, t * self.schedule_decay)))
    momentum_cache_t_1 = self.beta_1 * (1. - 0.5 *
                                        (K.pow(0.96,
                                               (t + 1) * self.schedule_decay)))
    m_schedule_new = self.m_schedule * momentum_cache_t
    m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
    self.updates.append((self.m_schedule, m_schedule_new))

    shapes = [K.int_shape(p) for p in params]
    ms = [K.zeros(shape) for shape in shapes]
    vs = [K.zeros(shape) for shape in shapes]

    self.weights = [self.iterations] + ms + vs

    for p, g, m, v in zip(params, grads, ms, vs):
      # the following equations given in [1]
      g_prime = g / (1. - m_schedule_new)
      m_t = self.beta_1 * m + (1. - self.beta_1) * g
      m_t_prime = m_t / (1. - m_schedule_next)
      v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
      v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
      m_t_bar = (
          1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

      self.updates.append(K.update(m, m_t))
      self.updates.append(K.update(v, v_t))

      p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
      new_p = p_t

      # apply constraints
      if p in constraints:
        c = constraints[p]
        new_p = c(new_p)
      self.updates.append(K.update(p, new_p))
    return self.updates
Ejemplo n.º 14
0
  def get_updates(self, params, constraints, loss):
    grads = self.get_gradients(loss, params)
    self.updates = [K.update_add(self.iterations, 1)]

    t = self.iterations + 1

    # Due to the recommendations in [2], i.e. warming momentum schedule
    momentum_cache_t = self.beta_1 * (1. - 0.5 *
                                      (K.pow(0.96, t * self.schedule_decay)))
    momentum_cache_t_1 = self.beta_1 * (1. - 0.5 *
                                        (K.pow(0.96,
                                               (t + 1) * self.schedule_decay)))
    m_schedule_new = self.m_schedule * momentum_cache_t
    m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
    self.updates.append((self.m_schedule, m_schedule_new))

    shapes = [K.int_shape(p) for p in params]
    ms = [K.zeros(shape) for shape in shapes]
    vs = [K.zeros(shape) for shape in shapes]

    self.weights = [self.iterations] + ms + vs

    for p, g, m, v in zip(params, grads, ms, vs):
      # the following equations given in [1]
      g_prime = g / (1. - m_schedule_new)
      m_t = self.beta_1 * m + (1. - self.beta_1) * g
      m_t_prime = m_t / (1. - m_schedule_next)
      v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
      v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
      m_t_bar = (
          1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime

      self.updates.append(K.update(m, m_t))
      self.updates.append(K.update(v, v_t))

      p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
      new_p = p_t

      # apply constraints
      if p in constraints:
        c = constraints[p]
        new_p = c(new_p)
      self.updates.append(K.update(p, new_p))
    return self.updates
Ejemplo n.º 15
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        shapes = [K.int_shape(p) for p in params]
        accumulators = [K.zeros(shape) for shape in shapes]
        self.weights = accumulators
        self.updates = []

        lr = self.lr
        if self.initial_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))
            self.updates.append(K.update_add(self.iterations, 1))

        for p, g, a in zip(params, grads, accumulators):
            new_a = a + K.square(g)  # update accumulator
            self.updates.append(K.update(a, new_a))
            new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
            # apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates
Ejemplo n.º 16
0
  def get_updates(self, params, constraints, loss):
    grads = self.get_gradients(loss, params)
    shapes = [K.int_shape(p) for p in params]
    accumulators = [K.zeros(shape) for shape in shapes]
    self.weights = accumulators
    self.updates = []

    lr = self.lr
    if self.initial_decay > 0:
      lr *= (1. / (1. + self.decay * self.iterations))
      self.updates.append(K.update_add(self.iterations, 1))

    for p, g, a in zip(params, grads, accumulators):
      new_a = a + K.square(g)  # update accumulator
      self.updates.append(K.update(a, new_a))
      new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
      # apply constraints
      if p in constraints:
        c = constraints[p]
        new_p = c(new_p)
      self.updates.append(K.update(p, new_p))
    return self.updates
Ejemplo n.º 17
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
              (1 - self.rate) * norms)
   w *= (desired / (K.epsilon() + norms))
   return w
Ejemplo n.º 18
0
 def __call__(self, w):
     return w / (K.epsilon() +
                 K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))
Ejemplo n.º 19
0
 def __call__(self, w):
   norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
   desired = K.clip(norms, 0, self.max_value)
   w *= (desired / (K.epsilon() + norms))
   return w
Ejemplo n.º 20
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = (self.rate * K.clip(norms, self.min_value, self.max_value) +
                (1 - self.rate) * norms)
     w *= (desired / (K.epsilon() + norms))
     return w
Ejemplo n.º 21
0
 def __call__(self, w):
     norms = K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True))
     desired = K.clip(norms, 0, self.max_value)
     w *= (desired / (K.epsilon() + norms))
     return w
Ejemplo n.º 22
0
 def _huber_loss(self, target, prediction):
     # sqrt(1+error^2)-1
     error = prediction - target
     return K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)
Ejemplo n.º 23
0
def mean_squared_error(y_true, y_pred):
  return K.mean(K.square(y_pred - y_true), axis=-1)
Ejemplo n.º 24
0
def normalize(x):
    """
    Used in heatmap function - normalises a tensor by its L2 norm.
    """
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
Ejemplo n.º 25
0
def squared_hinge(y_true, y_pred):
  return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Ejemplo n.º 26
0
def mean_squared_error(y_true, y_pred):
  return K.mean(K.square(y_pred - y_true), axis=-1)
Ejemplo n.º 27
0
def squared_hinge(y_true, y_pred):
  return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Ejemplo n.º 28
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
Ejemplo n.º 29
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
Ejemplo n.º 30
0
def balanced_squared_hinge(y_true, y_pred):
  postive = K.cumsum(y_true-0.)
  negtive = K.cumsum(1.-y_true)
  posrate = postive/(postive+negtive)
  negrate = negtive/(postive+negtive)
  return K.mean(K.square(K.maximum((1. - y_pred) * y_true, 0.)), axis=-1)*negrate + K.mean(K.square(K.maximum((1. - y_true) * y_pred, 0.)), axis=-1)*posrate
Ejemplo n.º 31
0
 def __call__(self, w):
   return w / (
       K.epsilon() + K.sqrt(K.sum(K.square(w), axis=self.axis, keepdims=True)))