示例#1
0
 def __call__(self, x):
   regularization = 0
   if self.l1:
       regularization += model_ops.sum(self.l1 * tf.abs(x))
   if self.l2:
       regularization += model_ops.sum(self.l2 * tf.square(x))
   return regularization
示例#2
0
 def __call__(self, x):
   regularization = 0
   if self.l1:
     regularization += model_ops.sum(self.l1 * tf.abs(x))
   if self.l2:
     regularization += model_ops.sum(self.l2 * tf.square(x))
   return regularization
示例#3
0
def softmax(x):
  ndim = get_ndim(x)
  if ndim == 2:
    return tf.nn.softmax(x)
  elif ndim == 3:
    e = tf.exp(x - model_ops.max(x, axis=-1, keepdims=True))
    s = model_ops.sum(e, axis=-1, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor '
                     'that is not 2D or 3D. '
                     'Here, ndim=' + str(ndim))
示例#4
0
def softmax(x):
    ndim = get_ndim(x)
    if ndim == 2:
        return tf.nn.softmax(x)
    elif ndim == 3:
        e = tf.exp(x - model_ops.max(x, axis=-1, keepdims=True))
        s = model_ops.sum(e, axis=-1, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor '
                         'that is not 2D or 3D. '
                         'Here, ndim=' + str(ndim))
示例#5
0
def cos(x, y):
  denom = (
      model_ops.sqrt(model_ops.sum(tf.square(x)) *
                     model_ops.sum(tf.square(y))) + model_ops.epsilon())
  return model_ops.dot(x, tf.transpose(y)) / denom
示例#6
0
def cos(x, y):
    denom = (model_ops.sqrt(
        model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y))) +
             model_ops.epsilon())
    return model_ops.dot(x, tf.transpose(y)) / denom
def kullback_leibler_divergence(y_true, y_pred):
    y_true = model_ops.clip(y_true, model_ops.epsilon(), 1)
    y_pred = model_ops.clip(y_pred, model_ops.epsilon(), 1)
    return model_ops.sum(y_true * tf.log(y_true / y_pred), axis=-1)
示例#8
0
 def __call__(self, p):
   return p / (model_ops.epsilon() + model_ops.sqrt(
       model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)))
示例#9
0
 def __call__(self, p):
   norms = model_ops.sqrt(model_ops.sum(
       tf.square(p), axis=self.axis, keepdims=True))
   desired = model_ops.clip(norms, 0, self.m)
   p *= (desired / (model_ops.epsilon() + norms))
   return p
示例#10
0
def kullback_leibler_divergence(y_true, y_pred):
  y_true = model_ops.clip(y_true, model_ops.epsilon(), 1)
  y_pred = model_ops.clip(y_pred, model_ops.epsilon(), 1)
  return model_ops.sum(y_true * tf.log(y_true / y_pred), axis=-1)
示例#11
0
 def __call__(self, p):
     return p / (model_ops.epsilon() + model_ops.sqrt(
         model_ops.sum(tf.square(p), axis=self.axis, keepdims=True)))
示例#12
0
 def __call__(self, p):
     norms = model_ops.sqrt(
         model_ops.sum(tf.square(p), axis=self.axis, keepdims=True))
     desired = model_ops.clip(norms, 0, self.m)
     p *= (desired / (model_ops.epsilon() + norms))
     return p