def weighted(y_true, y_pred, weights, mask=None): # score_array has ndim >= 2 score_array = fn(y_true, y_pred) if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano mask = K.cast(mask, K.floatX) # mask should have the same shape as score_array score_array *= mask # the loss per batch should be proportional # to the number of unmasked samples. score_array /= K.mean(mask) # reduce score_array to 1D ndim = K.ndim(score_array) for _ in range(ndim-1): score_array = K.mean(score_array, axis=-1) if weights is not None: score_array *= weights return K.mean(score_array)
def weighted(y_true, y_pred, weights, mask=None): # score_array has ndim >= 2 score_array = fn(y_true, y_pred) if mask is not None: # Cast the mask to floatX to avoid float64 upcasting in theano mask = K.cast(mask, K.floatX) # mask should have the same shape as score_array score_array *= mask # the loss per batch should be proportional # to the number of unmasked samples. score_array /= K.mean(mask) # reduce score_array to 1D ndim = K.ndim(score_array) for _ in range(ndim - 1): score_array = K.mean(score_array, axis=-1) if weights is not None: score_array *= weights return K.mean(score_array)
def output(self, train=False): X = self.get_input('input')(train) if self.mode == 'ave': s = K.mean(X, axis=1) return s if self.mode == 'sum': s = K.sum(X, axis=1) return s elif self.mode == 'mul': s = K.prod(X, axis=1) return s else: raise Exception('Unknown merge mode')
def mean_absolute_percentage_error(y_true, y_pred): diff = K.abs( (y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), np.inf)) return 100. * K.mean(diff, axis=-1)
def mean_absolute_error(y_true, y_pred): return K.mean(K.abs(y_pred - y_true), axis=-1)
def cosine_proximity(y_true, y_pred): assert K.ndim(y_true) == 2 assert K.ndim(y_pred) == 2 y_true = K.l2_normalize(y_true, axis=1) y_pred = K.l2_normalize(y_pred, axis=1) return -K.mean(y_true * y_pred, axis=1)
def binary_crossentropy(y_true, y_pred): return K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)
def hinge(y_true, y_pred): return K.mean(K.maximum(1. - y_true * y_pred, 0.), axis=-1)
def mean_categorical_crossentropy(y_true, y_pred): '''Expects a binary class matrix instead of a vector of scalar classes. ''' return K.mean(K.categorical_crossentropy(y_pred, y_true), axis=-1)
def squared_hinge(y_true, y_pred): return K.mean(K.square(K.maximum(1. - y_true * y_pred, 0.)), axis=-1)
def mean_squared_logarithmic_error(y_true, y_pred): first_log = K.log(K.clip(y_pred, K.epsilon(), np.inf) + 1.) second_log = K.log(K.clip(y_true, K.epsilon(), np.inf) + 1.) return K.mean(K.square(first_log - second_log), axis=-1)
def mean_absolute_percentage_error(y_true, y_pred): diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), np.inf)) return 100. * K.mean(diff, axis=-1)
def poisson(y_true, y_pred): return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
def root_mean_squared_error(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))