def cos(x, y): denom = ( model_ops.sqrt(model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y))) + model_ops.epsilon()) return model_ops.dot(x, tf.transpose(y)) / denom
def poisson(y_true, y_pred): return model_ops.mean(y_pred - y_true * tf.log(y_pred + model_ops.epsilon()), axis=-1)
def cos(x, y): denom = (model_ops.sqrt( model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y))) + model_ops.epsilon()) return model_ops.dot(x, tf.transpose(y)) / denom
def mean_squared_logarithmic_error(y_true, y_pred): first_log = tf.log(model_ops.clip(y_pred, model_ops.epsilon(), None) + 1.) second_log = tf.log(model_ops.clip(y_true, model_ops.epsilon(), None) + 1.) return model_ops.mean(tf.square(first_log - second_log), axis=-1)
def kullback_leibler_divergence(y_true, y_pred): y_true = model_ops.clip(y_true, model_ops.epsilon(), 1) y_pred = model_ops.clip(y_pred, model_ops.epsilon(), 1) return model_ops.sum(y_true * tf.log(y_true / y_pred), axis=-1)
def __call__(self, p): return p / (model_ops.epsilon() + model_ops.sqrt( model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)))
def mean_absolute_percentage_error(y_true, y_pred): diff = tf.abs((y_true - y_pred) / model_ops.clip(tf.abs(y_true), model_ops.epsilon(), None)) return 100. * model_ops.mean(diff, axis=-1)
def poisson(y_true, y_pred): return model_ops.mean( y_pred - y_true * tf.log(y_pred + model_ops.epsilon()), axis=-1)
def __call__(self, p): norms = model_ops.sqrt(model_ops.sum( tf.square(p), axis=self.axis, keepdims=True)) desired = model_ops.clip(norms, 0, self.m) p *= (desired / (model_ops.epsilon() + norms)) return p
def mean_absolute_percentage_error(y_true, y_pred): diff = tf.abs((y_true - y_pred) / model_ops.clip( tf.abs(y_true), model_ops.epsilon(), None)) return 100. * model_ops.mean(diff, axis=-1)
def __call__(self, p): norms = model_ops.sqrt( model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)) desired = model_ops.clip(norms, 0, self.m) p *= (desired / (model_ops.epsilon() + norms)) return p