示例#1
0
 def __call__(self, x):
   regularization = 0
   if self.l1:
     regularization += model_ops.sum(self.l1 * tf.abs(x))
   if self.l2:
     regularization += model_ops.sum(self.l2 * tf.square(x))
   return regularization
示例#2
0
 def __call__(self, x):
   regularization = 0
   if self.l1:
     regularization += model_ops.sum(self.l1 * tf.abs(x))
   if self.l2:
     regularization += model_ops.sum(self.l2 * tf.square(x))
   return regularization
示例#3
0
def softmax(x):
    ndim = get_ndim(x)
    if ndim == 2:
        return tf.nn.softmax(x)
    elif ndim == 3:
        e = tf.exp(x - model_ops.max(x, axis=-1, keepdims=True))
        s = model_ops.sum(e, axis=-1, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor '
                         'that is not 2D or 3D. '
                         'Here, ndim=' + str(ndim))
示例#4
0
def softmax(x):
  ndim = get_ndim(x)
  if ndim == 2:
    return tf.nn.softmax(x)
  elif ndim == 3:
    e = tf.exp(x - model_ops.max(x, axis=-1, keepdims=True))
    s = model_ops.sum(e, axis=-1, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor '
                     'that is not 2D or 3D. '
                     'Here, ndim=' + str(ndim))