Exemplo n.º 1
0
def summarize_activation(op):
    """Summarize an activation.

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    op: The tensor to summarize (assumed to be a layer activation).
  Returns:
    The summary op created to summarize `op`.
  """
    if op.op.type in ('Relu', 'Softplus', 'Relu6'):
        # Using inputs to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(
                    standard_ops.less(
                        op.op.inputs[0],
                        standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
            '%s/zeros' % op.op.name)
    if op.op.type == 'Relu6':
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(
                    standard_ops.greater(
                        op.op.inputs[0],
                        standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
            '%s/sixes' % op.op.name)
    return _add_histogram_summary(op, '%s/activation' % op.op.name)
Exemplo n.º 2
0
def _apply_activation_with_summaries(x, activation_fn):
  """Returns activation_fn(x).

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    x: The tensor to apply activation to.
    activation_fn: An activation function.
  Returns:
    A tensor with activation applied to x.
  """
  if activation_fn is None:
    return x
  y = activation_fn(x)
  if activation_fn in (nn.relu, nn.softplus, nn.relu6):
    # Using x for comparison to avoid floating point equality and/or epsilons.
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.to_float(standard_ops.less(
            x, 0.0))), '%s/zeros' % y.op.name)
  if activation_fn is nn.relu6:
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.to_float(standard_ops.greater(
            x, 6.0))), '%s/sixes' % y.op.name)
  if activation_fn is nn.l2_normalize:
    _add_scalar_summary(
        standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.sum(
            standard_ops.square(x), 1))), '%s/length' % y.op.name)
  _add_histogram_summary(y, '%s/activations' % y.op.name)
  return y
Exemplo n.º 3
0
def _apply_activation_with_summaries(x, activation_fn):
    """Returns activation_fn(x).

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    x: The tensor to apply activation to.
    activation_fn: An activation function.
  Returns:
    A tensor with activation applied to x.
  """
    if activation_fn is None:
        return x
    y = activation_fn(x)
    if activation_fn in (nn.relu, nn.softplus, nn.relu6):
        # Using x for comparison to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.less(x, 0.0))),
            '%s/zeros' % y.op.name)
    if activation_fn is nn.relu6:
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.greater(x, 6.0))),
            '%s/sixes' % y.op.name)
    if activation_fn is nn.l2_normalize:
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.sqrt(standard_ops.sum(standard_ops.square(x),
                                                   1))),
            '%s/length' % y.op.name)
    _add_histogram_summary(y, '%s/activations' % y.op.name)
    return y
Exemplo n.º 4
0
def summarize_activation(op):
    """Summarize an activation.

  This applies the given activation and adds useful summaries specific to the
  activation.

  Args:
    op: The tensor to summarize (assumed to be a layer activation).
  Returns:
    The summary op created to summarize `op`.
  """
    if op.op.type in ("Relu", "Softplus", "Relu6"):
        # Using inputs to avoid floating point equality and/or epsilons.
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(standard_ops.less(op.op.inputs[0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))
            ),
            "%s/zeros" % op.op.name,
        )
    if op.op.type == "Relu6":
        _add_scalar_summary(
            standard_ops.reduce_mean(
                standard_ops.to_float(
                    standard_ops.greater(op.op.inputs[0], standard_ops.cast(6.0, op.op.inputs[0].dtype))
                )
            ),
            "%s/sixes" % op.op.name,
        )
    return _add_histogram_summary(op, "%s/activation" % op.op.name)
Exemplo n.º 5
0
 def testIndexedSlicesWithDenseShape(self):
     with self.test_session():
         data = ops.IndexedSlices(tf.constant([1, 2, 3]), tf.constant([0, 1]), dense_shape=tf.constant([3]))
         zero = tf.constant(0)
         one = tf.constant(1)
         less_op = tf.less(zero, one)
         switch_false, switch_true = control_flow_ops.switch(data, less_op)
         self.assertAllEqual([1, 2, 3], switch_true.values.eval())
         self.assertAllEqual([0, 1], switch_true.indices.eval())
Exemplo n.º 6
0
 def testIndexedSlicesWithDenseShape(self):
   with self.test_session():
     data = ops.IndexedSlices(tf.constant([1, 2, 3]),
                              tf.constant([0, 1]),
                              dense_shape=tf.constant([3]))
     zero = tf.constant(0)
     one = tf.constant(1)
     less_op = tf.less(zero, one)
     switch_false, switch_true = control_flow_ops.switch(data, less_op)
     self.assertAllEqual([1, 2, 3], switch_true.values.eval())
     self.assertAllEqual([0, 1], switch_true.indices.eval())
 def testWhileContext(self):
   with self.test_session() as sess:
     i = tf.constant(0)
     c = lambda i: tf.less(i, 10)
     b = lambda i: tf.add(i, 1)
     tf.while_loop(c, b, [i])
     for op in sess.graph.get_operations():
       c = op._get_control_flow_context()
       if c:
         compare.ProtoEq(
             c.to_proto(),
             control_flow_ops.WhileContext.from_proto(c.to_proto()).to_proto())
 def testCondContext(self):
   with self.test_session() as sess:
     x = tf.constant(2)
     y = tf.constant(5)
     control_flow_ops.cond(tf.less(x, y),
                           lambda: tf.mul(x, 17),
                           lambda: tf.add(y, 23))
     for op in sess.graph.get_operations():
       c = op._get_control_flow_context()
       if c:
         compare.ProtoEq(
             c.to_proto(),
             control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto())
Exemplo n.º 9
0
 def testWhileContext(self):
     with self.test_session() as sess:
         i = tf.constant(0)
         c = lambda i: tf.less(i, 10)
         b = lambda i: tf.add(i, 1)
         tf.while_loop(c, b, [i])
         for op in sess.graph.get_operations():
             c = op._get_control_flow_context()
             if c:
                 compare.ProtoEq(
                     c.to_proto(),
                     control_flow_ops.WhileContext.from_proto(
                         c.to_proto()).to_proto())
Exemplo n.º 10
0
 def testCondContext(self):
     with self.test_session() as sess:
         x = tf.constant(2)
         y = tf.constant(5)
         control_flow_ops.cond(tf.less(x, y), lambda: tf.mul(x, 17),
                               lambda: tf.add(y, 23))
         for op in sess.graph.get_operations():
             c = op._get_control_flow_context()
             if c:
                 compare.ProtoEq(
                     c.to_proto(),
                     control_flow_ops.CondContext.from_proto(
                         c.to_proto()).to_proto())