Esempio n. 1
0
def disc_mutual_info_loss(c_disc, aux_dist):
    """
    Mutual Information lower bound loss for discrete distribution.
    """
    reg_disc_dim = aux_dist.get_shape().as_list()[-1]
    cross_ent = -K.mean(K.sum(K.log(aux_dist + EPSILON) * c_disc, axis=1))
    ent = -K.mean(K.sum(K.log(1. / reg_disc_dim + EPSILON) * c_disc, axis=1))

    return -(ent - cross_ent)
Esempio n. 2
0
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Esempio n. 3
0
def mean_squared_error(y_true, y_pred):
  return K.mean(K.square(y_pred - y_true), axis=-1)
Esempio n. 4
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
def mean_pred(y_true, y_pred):  # score_array = fn(y_true, y_pred) must 2 args
    return K.mean(y_pred)
Esempio n. 6
0
 def call(self, inputs):
   return K.mean(inputs, axis=1)
 def call(self, inputs):
   if self.data_format == 'channels_last':
     return K.mean(inputs, axis=[1, 2, 3])
   else:
     return K.mean(inputs, axis=[2, 3, 4])
Esempio n. 8
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
Esempio n. 9
0
def logcosh(y_true, y_pred):

  def cosh(x):
    return (K.exp(x) + K.exp(-x)) / 2

  return K.mean(K.log(cosh(y_pred - y_true)), axis=-1)
Esempio n. 10
0
def hinge(y_true, y_pred):
  return K.mean(K.maximum(1. - y_true * y_pred, 0.), axis=-1)
Esempio n. 11
0
def squared_hinge(y_true, y_pred):
  return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Esempio n. 12
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
Esempio n. 13
0
def mean_absolute_percentage_error(y_true, y_pred):
  # Equivalent to MAE, but sometimes easier to interpret.
  diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
  return 100. * K.mean(diff, axis=-1)
Esempio n. 14
0
def mean_absolute_error(y_true, y_pred):
  return K.mean(K.abs(y_pred - y_true), axis=-1)
Esempio n. 15
0
def squared_hinge(y_true, y_pred):
  return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
Esempio n. 16
0
def hinge(y_true, y_pred):
  return K.mean(K.maximum(1. - y_true * y_pred, 0.), axis=-1)
Esempio n. 17
0
def binary_crossentropy(y_true, y_pred):
  return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
Esempio n. 18
0
def mean_squared_error(y_true, y_pred):
  return K.mean(K.square(y_pred - y_true), axis=-1)
Esempio n. 19
0
def poisson(y_true, y_pred):
  return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
Esempio n. 20
0
def cosine_proximity(y_true, y_pred):
  y_true = K.l2_normalize(y_true, axis=-1)
  y_pred = K.l2_normalize(y_pred, axis=-1)
  return -K.mean(y_true * y_pred, axis=-1)
Esempio n. 21
0
def risk_estimation(y_true, y_pred):
    return -100. * K.mean((y_true - 0.0002) * y_pred)
Esempio n. 22
0
 def call(self, inputs):
   if self.data_format == 'channels_last':
     return K.mean(inputs, axis=[1, 2, 3])
   else:
     return K.mean(inputs, axis=[2, 3, 4])
Esempio n. 23
0
def logcosh(y_true, y_pred):
    def cosh(x):
        return (K.exp(x) + K.exp(-x)) / 2

    return K.mean(K.log(cosh(y_pred - y_true)), axis=-1)
Esempio n. 24
0
def binary_accuracy(y_true, y_pred):
    return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
Esempio n. 25
0
 def _huber_loss(self, target, prediction):
     # sqrt(1+error^2)-1
     error = prediction - target
     return K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)
Esempio n. 26
0
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
    return K.mean(K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'),
                             k),
                  axis=-1)
Esempio n. 27
0
def sample_mean_absolute_percentage_error(y_true, y_pred):
    diff = K.abs((y_true - y_pred) /
                 K.clip(K.abs(y_true) + K.abs(y_pred), K.epsilon(), None))
    return 200. * K.mean(diff, axis=-1)
Esempio n. 28
0
def mean_absolute_error(y_true, y_pred):
  return K.mean(K.abs(y_pred - y_true), axis=-1)
Esempio n. 29
0
def normalize(x):
    """
    Used in heatmap function - normalises a tensor by its L2 norm.
    """
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
Esempio n. 30
0
def mean_squared_logarithmic_error(y_true, y_pred):
  first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
  second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
  return K.mean(K.square(first_log - second_log), axis=-1)
Esempio n. 31
0
def binary_accuracy(y_true, y_pred):
  return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
Esempio n. 32
0
def balanced_squared_hinge(y_true, y_pred):
  postive = K.cumsum(y_true-0.)
  negtive = K.cumsum(1.-y_true)
  posrate = postive/(postive+negtive)
  negrate = negtive/(postive+negtive)
  return K.mean(K.square(K.maximum((1. - y_pred) * y_true, 0.)), axis=-1)*negrate + K.mean(K.square(K.maximum((1. - y_true) * y_pred, 0.)), axis=-1)*posrate
Esempio n. 33
0
def top_k_categorical_accuracy(y_true, y_pred, k=5):
  return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
Esempio n. 34
0
def binary_crossentropy(y_true, y_pred):
  return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
 def call(self, inputs):
   return K.mean(inputs, axis=1)
Esempio n. 36
0
def cosine_proximity(y_true, y_pred):
  y_true = K.l2_normalize(y_true, axis=-1)
  y_pred = K.l2_normalize(y_pred, axis=-1)
  return -K.mean(y_true * y_pred, axis=-1)
Esempio n. 37
0
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
  return K.mean(K.in_top_k(y_pred,
                           K.cast(K.max(y_true, axis=-1), 'int32'), k), axis=-1)