def disc_mutual_info_loss(c_disc, aux_dist): """ Mutual Information lower bound loss for discrete distribution. """ reg_disc_dim = aux_dist.get_shape().as_list()[-1] cross_ent = -K.mean(K.sum(K.log(aux_dist + EPSILON) * c_disc, axis=1)) ent = -K.mean(K.sum(K.log(1. / reg_disc_dim + EPSILON) * c_disc, axis=1)) return -(ent - cross_ent)
def mean_absolute_percentage_error(y_true, y_pred): # Equivalent to MAE, but sometimes easier to interpret. diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None)) return 100. * K.mean(diff, axis=-1)
def mean_squared_error(y_true, y_pred): return K.mean(K.square(y_pred - y_true), axis=-1)
def top_k_categorical_accuracy(y_true, y_pred, k=5): return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
def mean_pred(y_true, y_pred): # score_array = fn(y_true, y_pred) must 2 args return K.mean(y_pred)
def call(self, inputs): return K.mean(inputs, axis=1)
def call(self, inputs): if self.data_format == 'channels_last': return K.mean(inputs, axis=[1, 2, 3]) else: return K.mean(inputs, axis=[2, 3, 4])
def poisson(y_true, y_pred): return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
def logcosh(y_true, y_pred): def cosh(x): return (K.exp(x) + K.exp(-x)) / 2 return K.mean(K.log(cosh(y_pred - y_true)), axis=-1)
def hinge(y_true, y_pred): return K.mean(K.maximum(1. - y_true * y_pred, 0.), axis=-1)
def squared_hinge(y_true, y_pred): return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
def mean_squared_logarithmic_error(y_true, y_pred): first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.) second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.) return K.mean(K.square(first_log - second_log), axis=-1)
def mean_absolute_error(y_true, y_pred): return K.mean(K.abs(y_pred - y_true), axis=-1)
def binary_crossentropy(y_true, y_pred): return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
def cosine_proximity(y_true, y_pred): y_true = K.l2_normalize(y_true, axis=-1) y_pred = K.l2_normalize(y_pred, axis=-1) return -K.mean(y_true * y_pred, axis=-1)
def risk_estimation(y_true, y_pred): return -100. * K.mean((y_true - 0.0002) * y_pred)
def binary_accuracy(y_true, y_pred): return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
def _huber_loss(self, target, prediction): # sqrt(1+error^2)-1 error = prediction - target return K.mean(K.sqrt(1 + K.square(error)) - 1, axis=-1)
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5): return K.mean(K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'), k), axis=-1)
def sample_mean_absolute_percentage_error(y_true, y_pred): diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true) + K.abs(y_pred), K.epsilon(), None)) return 200. * K.mean(diff, axis=-1)
def normalize(x): """ Used in heatmap function - normalises a tensor by its L2 norm. """ return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def balanced_squared_hinge(y_true, y_pred): postive = K.cumsum(y_true-0.) negtive = K.cumsum(1.-y_true) posrate = postive/(postive+negtive) negrate = negtive/(postive+negtive) return K.mean(K.square(K.maximum((1. - y_pred) * y_true, 0.)), axis=-1)*negrate + K.mean(K.square(K.maximum((1. - y_true) * y_pred, 0.)), axis=-1)*posrate