def __call__(self, x): regularization = 0 if self.l1: regularization += model_ops.sum(self.l1 * tf.abs(x)) if self.l2: regularization += model_ops.sum(self.l2 * tf.square(x)) return regularization
def softmax(x): ndim = get_ndim(x) if ndim == 2: return tf.nn.softmax(x) elif ndim == 3: e = tf.exp(x - model_ops.max(x, axis=-1, keepdims=True)) s = model_ops.sum(e, axis=-1, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor ' 'that is not 2D or 3D. ' 'Here, ndim=' + str(ndim))
def cos(x, y): denom = ( model_ops.sqrt(model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y))) + model_ops.epsilon()) return model_ops.dot(x, tf.transpose(y)) / denom
def cos(x, y): denom = (model_ops.sqrt( model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y))) + model_ops.epsilon()) return model_ops.dot(x, tf.transpose(y)) / denom
def kullback_leibler_divergence(y_true, y_pred): y_true = model_ops.clip(y_true, model_ops.epsilon(), 1) y_pred = model_ops.clip(y_pred, model_ops.epsilon(), 1) return model_ops.sum(y_true * tf.log(y_true / y_pred), axis=-1)
def __call__(self, p): return p / (model_ops.epsilon() + model_ops.sqrt( model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)))
def __call__(self, p): norms = model_ops.sqrt(model_ops.sum( tf.square(p), axis=self.axis, keepdims=True)) desired = model_ops.clip(norms, 0, self.m) p *= (desired / (model_ops.epsilon() + norms)) return p
def __call__(self, p): norms = model_ops.sqrt( model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)) desired = model_ops.clip(norms, 0, self.m) p *= (desired / (model_ops.epsilon() + norms)) return p