Ejemplo n.º 1
0
def _apply_activation_with_summaries(x, activation_fn):
  """Returns activation_fn(x).

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    x: The tensor to apply activation to.
    activation_fn: An activation function.
  Returns:
    A tensor with activation applied to x.
  """
  if activation_fn is None:
    return x
  y = activation_fn(x)
  if activation_fn in (nn.relu, nn.softplus, nn.relu6):
    # Using x for comparison to avoid floating point equality and/or epsilons.
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.to_float(standard_ops.less(
            x, 0.0))), '%s/zeros' % y.op.name)
  if activation_fn is nn.relu6:
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.to_float(standard_ops.greater(
            x, 6.0))), '%s/sixes' % y.op.name)
  if activation_fn is nn.l2_normalize:
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.sum(
            standard_ops.square(x), 1))), '%s/length' % y.op.name)
  _add_histogram_summary(y, '%s/activations' % y.op.name)
  return y
Ejemplo n.º 2
0
def _apply_activation_with_summaries(x, activation_fn):
    """Returns activation_fn(x).

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    x: The tensor to apply activation to.
    activation_fn: An activation function.
  Returns:
    A tensor with activation applied to x.
  """
    if activation_fn is None:
        return x
    y = activation_fn(x)
    if activation_fn in (nn.relu, nn.softplus, nn.relu6):
        # Using x for comparison to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.less(x, 0.0))),
            '%s/zeros' % y.op.name)
    if activation_fn is nn.relu6:
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.greater(x, 6.0))),
            '%s/sixes' % y.op.name)
    if activation_fn is nn.l2_normalize:
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.sqrt(standard_ops.sum(standard_ops.square(x),
                                                   1))),
            '%s/length' % y.op.name)
    _add_histogram_summary(y, '%s/activations' % y.op.name)
    return y
Ejemplo n.º 3
0
 def lo(weights, name='lo_regularizer'):
     """Applies group column regularization to weights."""
     with tf.name_scope(name) as scope:
         my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
         # if tf.__version__ <= '0.12':
         #     standard_ops_fn = standard_ops.mul
         # else:
         standard_ops_fn = standard_ops.multiply
         return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), name=scope)
Ejemplo n.º 4
0
 def li(weights, name=None):
   """Applies li regularization to weights."""
   with ops.op_scope([weights], name, 'li_regularizer') as scope:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                      name='scale')
   return standard_ops.mul(
         my_scale,
         standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
         name=scope)
Ejemplo n.º 5
0
 def lo(weights, name=None):
   """Applies group column regularization to weights."""
   with ops.op_scope([weights], name, 'lo_regularizer') as scope:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                      name='scale')
     return standard_ops.mul(
         my_scale,
         standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))),
         name=scope)
Ejemplo n.º 6
0
 def lo(weights, name='lo_regularizer'):
     """Applies group column regularization to weights."""
     with tf.name_scope(name) as scope:
         my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
         # if tf.__version__ <= '0.12':
         #     standard_ops_fn = standard_ops.mul
         # else:
         standard_ops_fn = standard_ops.multiply
         return standard_ops_fn(
             my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))),
             name=scope
         )
Ejemplo n.º 7
0
 def li(weights, name=None):
   """Applies li regularization to weights."""
   # with ops.op_scope([weights], name, 'li_regularizer') as scope: # tf.op_scope(values, name, default_name) is deprecated, use tf.name_scope(name, default_name, values)
   try: # TF12
       with ops.name_scope(scope, 'li_regularizer', [weights]) as name:
           my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                          name='scale')
           return standard_ops.mul(
             my_scale,
             standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
             name=scope)
   except: # TF11
       with ops.op_scope([weights], name, 'li_regularizer') as scope:
           my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                          name='scale')
           return standard_ops.mul(
             my_scale,
             standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
             name=scope)
Ejemplo n.º 8
0
 def group(weights):
     """Applies group regularization to weights."""
     with tf.name_scope('group_regularizer') as scope:
         my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                          name='scale')
         standard_ops_fn = standard_ops.multiply
         return standard_ops_fn(my_scale,
                                standard_ops.reduce_sum(
                                    standard_ops.sqrt(
                                        standard_ops.reduce_sum(
                                            tf.square(weights), 1))),
                                name=scope)
Ejemplo n.º 9
0
 def li(weights, name=None):
   """Applies li regularization to weights."""
   with tf.name_scope('li_regularizer') as scope:
       my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                          name='scale')
       if tf.__version__ <= '0.12':
           standard_ops_fn = standard_ops.mul
       else:
           standard_ops_fn = standard_ops.multiply
           return standard_ops_fn(
             my_scale,
             standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
             name=scope)
Ejemplo n.º 10
0
 def li(weights, name=None):
   """Applies li regularization to weights."""
   with tf.name_scope('li_regularizer') as scope:
       my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                          name='scale')
       if tf.__version__ <= '0.12':
           standard_ops_fn = standard_ops.mul
       else:
           standard_ops_fn = standard_ops.multiply
           return standard_ops_fn(
             my_scale,
             standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))),
             name=scope)
Ejemplo n.º 11
0
 def lo(weights, name=None):
   """Applies group column regularization to weights."""
   with ops.op_scope([weights], name, 'lo_regularizer') as scope:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                      name='scale')
   #   return standard_ops.mul(
   #       my_scale,
   #       standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(weights**2, 0))),
   #       name=scope)
     return standard_ops.mul(
         my_scale,
         standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))),
       #   standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.reduce_mean(tf.square(weights), 0))),
         name=scope)
Ejemplo n.º 12
0
 def li(weights, name=None):
     """Applies li regularization to weights."""
     with ops.op_scope([weights], name, 'li_regularizer') as scope:
         my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                          name='scale')
     #   return standard_ops.mul(
     #       my_scale,
     #       standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(weights**2, 1))),
     #       name=scope)
     return standard_ops.mul(
         my_scale,
         standard_ops.reduce_sum(
             standard_ops.sqrt(
                 standard_ops.reduce_sum(tf.square(weights), 1))),
         #   standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.reduce_mean(tf.square(weights), 1))),
         name=scope)
Ejemplo n.º 13
0
 def _apply_variational_kernel(self, inputs):
     if (not isinstance(self.kernel_posterior, independent_lib.Independent)
             or not isinstance(self.kernel_posterior.distribution,
                               normal_lib.Normal)):
         raise TypeError(
             "`DenseLocalReparameterization` requires "
             "`kernel_posterior_fn` produce an instance of "
             "`tf.distributions.Independent(tf.distributions.Normal)` "
             "(saw: \"{}\").".format(self.kernel_posterior.name))
     self.kernel_posterior_affine = normal_lib.Normal(
         loc=self._matmul(inputs, self.kernel_posterior.distribution.loc),
         scale=standard_ops.sqrt(
             self._matmul(
                 standard_ops.square(inputs),
                 standard_ops.square(
                     self.kernel_posterior.distribution.scale))))
     self.kernel_posterior_affine_tensor = (self.kernel_posterior_tensor_fn(
         self.kernel_posterior_affine))
     self.kernel_posterior_tensor = None
     return self.kernel_posterior_affine_tensor
 def _apply_variational_kernel(self, inputs):
   if not self.kernel_use_local_reparameterization:
     self.kernel.posterior_tensor = self.kernel.posterior_tensor_fn(
         self.kernel.posterior)
     self.kernel.posterior_affine = None
     self.kernel.posterior_affine_tensor = None
     return self._matmul(inputs, self.kernel.posterior_tensor)
   if not isinstance(self.kernel.posterior, normal_lib.Normal):
     raise TypeError("`kernel_use_local_reparameterization=True` requires "
                     "`kernel_posterior_fn` produce an instance of "
                     "`tf.distributions.Normal` (saw: \"{}\").".format(
                         type(self.kernel.posterior).__name__))
   self.kernel.posterior_affine = normal_lib.Normal(
       loc=self._matmul(inputs, self.kernel.posterior.loc),
       scale=standard_ops.sqrt(self._matmul(
           standard_ops.square(inputs),
           standard_ops.square(self.kernel.posterior.scale))))
   self.kernel.posterior_affine_tensor = (
       self.kernel.posterior_tensor_fn(self.kernel.posterior_affine))
   self.kernel.posterior_tensor = None
   return self.kernel.posterior_affine_tensor
Ejemplo n.º 15
0
 def _apply_variational_kernel(self, inputs):
     if not self.kernel_use_local_reparameterization:
         self.kernel.posterior_tensor = self.kernel.posterior_tensor_fn(
             self.kernel.posterior)
         self.kernel.posterior_affine = None
         self.kernel.posterior_affine_tensor = None
         return self._matmul(inputs, self.kernel.posterior_tensor)
     if not isinstance(self.kernel.posterior, normal_lib.Normal):
         raise TypeError(
             "`kernel_use_local_reparameterization=True` requires "
             "`kernel_posterior_fn` produce an instance of "
             "`tf.distributions.Normal` (saw: \"{}\").".format(
                 type(self.kernel.posterior).__name__))
     self.kernel.posterior_affine = normal_lib.Normal(
         loc=self._matmul(inputs, self.kernel.posterior.loc),
         scale=standard_ops.sqrt(
             self._matmul(standard_ops.square(inputs),
                          standard_ops.square(
                              self.kernel.posterior.scale))))
     self.kernel.posterior_affine_tensor = (self.kernel.posterior_tensor_fn(
         self.kernel.posterior_affine))
     self.kernel.posterior_tensor = None
     return self.kernel.posterior_affine_tensor