Ejemplo n.º 1
0
  def testAddDispatchForTypes_With_CppOp(self):
    original_handlers = gen_math_ops.add._tf_dispatchers[:]

    # Override the behavior of gen_math_ops.add.
    @dispatch.dispatch_for_types(gen_math_ops.add, CustomTensor)
    def custom_add(x, y, name=None):  # pylint: disable=unused-variable
      return CustomTensor(gen_math_ops.add(x.tensor, y.tensor, name),
                          (x.score+y.score) / 2.0)
    self.assertEqual(len(math_ops.add._tf_dispatchers),
                     len(original_handlers) + 1)

    # Test that we see the overridden behavior when using CustomTensors.
    x = CustomTensor([1, 2, 3], 2.0)
    y = CustomTensor([7, 8, 2], 0.0)
    x_plus_y = gen_math_ops.add(x, y)
    self.assertAllEqual(self.evaluate(x_plus_y.tensor), [8, 10, 5])
    self.assertNear(x_plus_y.score, 1.0, 0.001)

    # Test that we still get the right behavior when using normal Tensors.
    a = [1, 2, 3]
    b = [4, 5, 6]
    a_plus_b = gen_math_ops.add(a, b)
    self.assertAllEqual(a_plus_b, [5, 7, 9])

    # Test that we still get a TypeError or ValueError if we pass some
    # type that's not supported by any dispatcher.
    with self.assertRaises((TypeError, ValueError)):
      gen_math_ops.add(a, None)

    # Clean up
    gen_math_ops.add._tf_dispatchers = original_handlers
Ejemplo n.º 2
0
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  name_to_node = graph_compute_order.parse_graph_nodes(g.as_graph_def())
  return name_to_node
Ejemplo n.º 3
0
    def testAddDispatchForTypes_With_CppOp(self):
        original_handlers = gen_math_ops.add._tf_dispatchers[:]

        # Override the behavior of gen_math_ops.add.
        @dispatch.dispatch_for_types(gen_math_ops.add, CustomTensor)
        def custom_add(x, y, name=None):  # pylint: disable=unused-variable
            return CustomTensor(gen_math_ops.add(x.tensor, y.tensor, name),
                                (x.score + y.score) / 2.0)

        self.assertEqual(len(math_ops.add._tf_dispatchers),
                         len(original_handlers) + 1)

        # Test that we see the overridden behavior when using CustomTensors.
        x = CustomTensor([1, 2, 3], 2.0)
        y = CustomTensor([7, 8, 2], 0.0)
        x_plus_y = gen_math_ops.add(x, y)
        self.assertAllEqual(self.evaluate(x_plus_y.tensor), [8, 10, 5])
        self.assertNear(x_plus_y.score, 1.0, 0.001)

        # Test that we still get the right behavior when using normal Tensors.
        a = [1, 2, 3]
        b = [4, 5, 6]
        a_plus_b = gen_math_ops.add(a, b)
        self.assertAllEqual(a_plus_b, [5, 7, 9])

        # Test that we still get a TypeError or ValueError if we pass some
        # type that's not supported by any dispatcher.
        with self.assertRaises((TypeError, ValueError)):
            gen_math_ops.add(a, None)

        # Clean up
        gen_math_ops.add._tf_dispatchers = original_handlers
Ejemplo n.º 4
0
    def call(self, inputs):
        inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
        rank = common_shapes.rank(inputs)

        if rank > 2:
            # Broadcasting is required for the inputs.
            outputs = standard_ops.tensordot(inputs, self.kernel,
                                             [[rank - 1], [0]])
            # Reshape the output back to the original ndim of the input.
            if not context.executing_eagerly():
                shape = inputs.get_shape().as_list()
                output_shape = shape[:-1] + [self.units]
                outputs.set_shape(output_shape)
        else:
            outputs = gen_math_ops.mat_mul(inputs, self.kernel)

        w_norm = 0.5 * tf.linalg.norm(self.kernel, axis=0)**2
        x_norm = tf.expand_dims(0.5 * tf.linalg.norm(inputs, axis=-1)**2,
                                axis=-1)

        outputs = gen_math_ops.add(outputs, -w_norm)
        outputs = gen_math_ops.add(outputs, -x_norm)
        outputs /= (self.sigma**2)

        return self.activation(outputs)
def create_test_network():
  """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
  g = ops.Graph()
  with g.as_default():
    # An input test image with unknown spatial resolution.
    x = array_ops.placeholder(
        dtypes.float32, (None, None, None, 1), name='input_image')
    # Left branch before first addition.
    l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
    # Right branch before first addition.
    l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
    l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
    l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
    # First addition.
    l4 = nn.relu(l1 + l3, name='L4_relu')
    # Left branch after first addition.
    l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
    # Right branch after first addition.
    l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
    # Final addition.
    gen_math_ops.add(l5, l6, name='L7_add')

  return g
def create_test_network():
    """Convolutional neural network for test.

  Returns:
    g: Tensorflow graph object (Graph proto).
  """
    g = ops.Graph()
    with g.as_default():
        # An input test image with unknown spatial resolution.
        x = array_ops.placeholder(dtypes.float32, (None, None, None, 1),
                                  name='input_image')
        # Left branch before first addition.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch before first addition.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]],
                               name='L2_pad')
        l2 = slim.conv2d(l2_pad,
                         1, [3, 3],
                         stride=2,
                         scope='L2',
                         padding='VALID')
        l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
        # First addition.
        l4 = nn.relu(l1 + l3, name='L4_relu')
        # Left branch after first addition.
        l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
        # Right branch after first addition.
        l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
        # Final addition.
        gen_math_ops.add(l5, l6, name='L7_add')

    return g
Ejemplo n.º 7
0
def create_test_network(placeholder_resolution,
                        convert_variables_to_constants):
    """Convolutional neural network for test.

  Args:
    placeholder_resolution: Resolution to use for input placeholder. Used for
      height and width dimensions.
    convert_variables_to_constants: Whether to convert variables to constants.

  Returns:
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.
  """
    g = ops.Graph()
    sess = session.Session(graph=g)
    with g.as_default():
        # An input test image with unknown spatial resolution.
        x = array_ops.placeholder(
            dtypes.float32,
            (1, placeholder_resolution, placeholder_resolution, 1),
            name='input_image')
        # Left branch before first addition.
        l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
        # Right branch before first addition.
        l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]],
                               name='L2_pad')
        l2 = slim.conv2d(l2_pad,
                         1, [3, 3],
                         stride=2,
                         scope='L2',
                         padding='VALID')
        l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
        # First addition.
        l4 = nn.relu(l1 + l3, name='L4_relu')
        # Left branch after first addition.
        l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
        # Right branch after first addition.
        l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
        # Final addition.
        gen_math_ops.add(l5, l6, name='L7_add')

        if convert_variables_to_constants:
            sess.run(variables.global_variables_initializer())
            graph_def = graph_util.convert_variables_to_constants(
                sess, g.as_graph_def(), ['L7_add'])
        else:
            graph_def = g.as_graph_def()

    name_to_node = graph_compute_order.parse_graph_nodes(graph_def)
    return name_to_node
Ejemplo n.º 8
0
def fucking_deep_gaze_logsumexp(input_tensor,axis=None, keepdims=False,
        name=None):
    """
    Adaptd from
    https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/math_ops.py.
    It is the same as the classic logsumexp instead you substact log(N) where N
    in the number of tensor over which compute the logsumexp (if you have 10
    readout nets, N=10). I don't know why they do this.
    """
    keepdims = False if keepdims is None else keepdims
    input_tensor = ops.convert_to_tensor(input_tensor)
    with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
        raw_max = tf.reduce_max(input_tensor, axis=axis, keep_dims=True)
        my_max = array_ops.stop_gradient( array_ops.where(
            gen_math_ops.is_finite(raw_max), raw_max,
            array_ops.zeros_like(raw_max)))
        result = gen_math_ops.log(
                #reduce_sum( # normal logsumexp
                tf.reduce_mean( # fuckimg modif from deep_gaze for the output only
                    gen_math_ops.exp(tf.subtract(input_tensor, my_max)),
                    axis, keep_dims=keepdims))
        if not keepdims:
            my_max = array_ops.reshape(my_max, array_ops.shape(result))
        result = gen_math_ops.add(result, my_max)
        return result
Ejemplo n.º 9
0
 def test_python_long_loop_unroll_warning(self):
   if __debug__:
     with ops.Graph().as_default():
       out_capturer = six.StringIO()
       with test.mock.patch.object(sys, 'stdout', out_capturer):
         ag_logging.echo_log_to_stdout = True
         sys.stdout = out_capturer
         control_flow.while_stmt(
             test=lambda i, _: i < 10000,
             body=lambda i, _: (i + 1, gen_math_ops.add(i, 1),),
             init_state=(0, None))
       self.assertTrue(re.match(
           r'.*ops.*loop.*large.*iterations.*Add.*', out_capturer.getvalue()))
Ejemplo n.º 10
0
 def call(self, inputs):
     inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
     shape = inputs.get_shape().as_list()
     if len(shape) > 1:
         # Broadcasting is required for the inputs.
         outputs1 = []
         outputss = []
         outputst = standard_ops.tensordot(inputs, self.smatrix,
                                           [[len(shape) - 1], [0]])
         outputs1.append(
             standard_ops.scalar_mul(self.kernel[0][0], outputst))
         outputs2 = standard_ops.scalar_mul(1.0 - self.kernel[0][0], inputs)
         outputss.append(standard_ops.add(outputs1[0], outputs2))
         for i in range(1, self.smooth_num):
             outputst = standard_ops.tensordot(outputss[i - 1],
                                               self.smatrix,
                                               [[len(shape) - 1], [0]])
             outputs1.append(
                 standard_ops.scalar_mul(self.kernel[0][0], outputst))
             outputss.append(standard_ops.add(outputs1[i], outputs2))
         outputs = outputss[self.smooth_num - 1]
     # Reshape the output back to the original ndim of the input.
     if not context.executing_eagerly():
         output_shape = shape[:-1] + [self.units]
         outputs.set_shape(output_shape)
     else:
         outputs2 = gen_math_ops.mat_mul(1.0 - self.kernel[0][0], inputs)
         outputs = gen_math_ops.mat_mul(inputs, self.smatrix)
         outputs = gen_math_ops.mat_mul(outputs, self.kernel)
         outputs = gen_math_ops.add(outputs, outputs2)
         for i in range(1, self.smooth_num):
             outputs = gen_math_ops.mat_mul(outputs, self.smatrix)
             outputs = gen_math_ops.mat_mul(outputs, self.kernel)
             outputs = gen_math_ops.add(outputs, outputs2)
     if self.use_bias:
         outputs = nn.bias_add(outputs, self.bias)
     if self.activation is not None:
         return self.activation(outputs)  # pylint: disable=not-callable
     return outputs
Ejemplo n.º 11
0
 def test_python_long_loop_unroll_warning(self):
     if __debug__:
         with ops.Graph().as_default():
             out_capturer = six.StringIO()
             with test.mock.patch.object(sys, 'stdout', out_capturer):
                 ag_logging.echo_log_to_stdout = True
                 sys.stdout = out_capturer
                 control_flow.while_stmt(test=lambda i, _: i < 10000,
                                         body=lambda i, _: (
                                             i + 1,
                                             gen_math_ops.add(i, 1),
                                         ),
                                         init_state=(0, None))
             self.assertTrue(
                 re.match(r'.*ops.*loop.*large.*iterations.*Add.*',
                          out_capturer.getvalue()))
Ejemplo n.º 12
0
 def test_python_long_loop_unroll_warning(self):
   if __debug__:
     with test.mock.patch.object(
         control_flow, 'INEFFICIENT_UNROLL_MIN_ITERATIONS', 10):
       with ops.Graph().as_default():
         out_capturer = six.StringIO()
         with test.mock.patch.object(sys, 'stdout', out_capturer):
           ag_logging.echo_log_to_stdout = True
           sys.stdout = out_capturer
           control_flow.while_stmt(
               test=lambda i, _: i < 100,
               body=lambda i, _: (i + 1, gen_math_ops.add(i, 1),),
               get_state=None,
               set_state=None,
               init_vars=(0, None),
               basic_symbol_names=('i',),
               composite_symbol_names=(),
               opts={})
         self.assertTrue(re.match(
             r'.*ops.*loop.*large.*iterations.*Add.*',
             out_capturer.getvalue()))
Ejemplo n.º 13
0
 def call(self, inputs, **kwargs):
     inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
     shape = inputs.get_shape().as_list()
     if len(shape) > 2:
         # Broadcasting is required for the inputs.
         outputs = standard_ops.tensordot(inputs, self.kernel,
                                          [[len(shape) - 1], [0]])
         # Reshape the output back to the original ndim of the input.
         # if not context.executing_eagerly():
         #     output_shape = shape[:-1] + [self.units]
         #     outputs.set_shape(output_shape)
     else:
         # notice: batch product here
         # outputs = reduce_sum(multiply(self.x0, inputs), axis=1, keep_dims=True)
         outputs = matmul(expand_dims(self.x0, axis=2),
                          expand_dims(inputs, axis=2),
                          transpose_a=False,
                          transpose_b=True)
         # the static shape
         # shape_kernel = convert_to_tensor([shape[0], 1, 1])
         # the dynamic shape
         shape_kernel = tf.convert_to_tensor([tf.shape(inputs)[0], 1, 1])
         outputs = matmul(
             outputs,
             tile(expand_dims(self.kernel, axis=0), multiples=shape_kernel))
         shape_outputs = tf.convert_to_tensor(tf.shape(inputs)[0:2])
         outputs = gen_math_ops.add(
             tf.reshape(outputs, shape=shape_outputs), inputs)
         outputs = nn.bias_add(outputs, self.bias)
         outputs = nn.relu(outputs)
         # outputs = gen_math_ops.mat_mul(inputs, self.kernel)
         # print(outputs)
     # if self.use_bias:
     #     outputs = nn.bias_add(outputs, self.bias)
     # if self.activation is not None:
     #     return self.activation(outputs)  # pylint: disable=not-callable
     return outputs
 def body():
   nonlocal i
   gen_math_ops.add(i, 1)
   i += 1
Ejemplo n.º 15
0
 def custom_atan2(y, x, name=None):  # pylint: disable=unused-variable
     return CustomTensor(gen_math_ops.add(y.tensor, x.tensor, name),
                         (x.score + y.score) / 2.0)
Ejemplo n.º 16
0
 def fn():
   return gen_math_ops.add(c, 1)
Ejemplo n.º 17
0
 def custom_add(x, y, name=None):  # pylint: disable=unused-variable
   return CustomTensor(gen_math_ops.add(x.tensor, y.tensor, name),
                       (x.score+y.score) / 2.0)
Ejemplo n.º 18
0
 def add_op_to_graph(num_ops):
     with func_graph.FuncGraph("add").as_default():
         a = gen_array_ops.placeholder(dtypes.float32)
         b = gen_array_ops.placeholder(dtypes.float32)
         for _ in range(num_ops):
             gen_math_ops.add(a, b)
Ejemplo n.º 19
0
 def fn():
     return gen_math_ops.add(c, 1)
Ejemplo n.º 20
0
 def bench():
   return gen_math_ops.add(x, y)