Esempio n. 1
0
def summarize_activation(op):
    """Summarize an activation.

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    op: The tensor to summarize (assumed to be a layer activation).
  Returns:
    The summary op created to summarize `op`.
  """
    if op.op.type in ('Relu', 'Softplus', 'Relu6'):
        # Using inputs to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(
                    standard_ops.less(
                        op.op.inputs[0],
                        standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
            '%s/zeros' % op.op.name)
    if op.op.type == 'Relu6':
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(
                    standard_ops.greater(
                        op.op.inputs[0],
                        standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
            '%s/sixes' % op.op.name)
    return _add_histogram_summary(op, '%s/activation' % op.op.name)
Esempio n. 2
0
def _apply_activation_with_summaries(x, activation_fn):
    """Returns activation_fn(x).

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    x: The tensor to apply activation to.
    activation_fn: An activation function.
  Returns:
    A tensor with activation applied to x.
  """
    if activation_fn is None:
        return x
    y = activation_fn(x)
    if activation_fn in (nn.relu, nn.softplus, nn.relu6):
        # Using x for comparison to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.less(x, 0.0))),
            '%s/zeros' % y.op.name)
    if activation_fn is nn.relu6:
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.greater(x, 6.0))),
            '%s/sixes' % y.op.name)
    if activation_fn is nn.l2_normalize:
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.sqrt(standard_ops.sum(standard_ops.square(x),
                                                   1))),
            '%s/length' % y.op.name)
    _add_histogram_summary(y, '%s/activations' % y.op.name)
    return y
Esempio n. 3
0
def _apply_activation_with_summaries(x, activation_fn):
  """Returns activation_fn(x).

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    x: The tensor to apply activation to.
    activation_fn: An activation function.
  Returns:
    A tensor with activation applied to x.
  """
  if activation_fn is None:
    return x
  y = activation_fn(x)
  if activation_fn in (nn.relu, nn.softplus, nn.relu6):
    # Using x for comparison to avoid floating point equality and/or epsilons.
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.to_float(standard_ops.less(
            x, 0.0))), '%s/zeros' % y.op.name)
  if activation_fn is nn.relu6:
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.to_float(standard_ops.greater(
            x, 6.0))), '%s/sixes' % y.op.name)
  if activation_fn is nn.l2_normalize:
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.sum(
            standard_ops.square(x), 1))), '%s/length' % y.op.name)
  _add_histogram_summary(y, '%s/activations' % y.op.name)
  return y
Esempio n. 4
0
def summarize_activation(op):
    """Summarize an activation.

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    op: The tensor to summarize (assumed to be a layer activation).
  Returns:
    The summary op created to summarize `op`.
  """
    if op.op.type in ("Relu", "Softplus", "Relu6"):
        # Using inputs to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.less(op.op.inputs[0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))
            ),
            "%s/zeros" % op.op.name,
        )
    if op.op.type == "Relu6":
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(
                    standard_ops.greater(op.op.inputs[0], standard_ops.cast(6.0, op.op.inputs[0].dtype))
                )
            ),
            "%s/sixes" % op.op.name,
        )
    return _add_histogram_summary(op, "%s/activation" % op.op.name)