Ejemplo n.º 1
0
 def testExpandedBatch(self):
     """Test that argument passing to conv1d is handled properly."""
     # double datatype is currently not supported for convolution ops
     # on the ROCm platform
     x = constant_op.constant([1, 2, 3, 4], dtype=dtypes.float32)
     x = array_ops.expand_dims(x, 0)  # Add batch dimension
     x = array_ops.expand_dims(x, 2)  # And depth dimension
     x = array_ops.stack([x, x])  # Make batch shape [2, 1]
     filters = constant_op.constant([2, 1], dtype=dtypes.float32)
     filters = array_ops.expand_dims(filters, 1)  # in_channels
     filters = array_ops.expand_dims(filters, 2)  # out_channels
     # Filters is 2x1x1
     for stride in [1, 2]:
         with self.cached_session(use_gpu=test.is_gpu_available()):
             c = nn_ops.conv1d(x, filters, stride, padding="VALID")
             reduced = array_ops.squeeze(c)  # Sequeeze out dims 1 and 3.
             output = self.evaluate(reduced)
             if stride == 1:
                 self.assertAllClose(
                     output,
                     [[2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4],
                      [2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4]])
             else:
                 self.assertAllClose(output,
                                     [[2 * 1 + 1 * 2, 2 * 3 + 1 * 4],
                                      [2 * 1 + 1 * 2, 2 * 3 + 1 * 4]])
Ejemplo n.º 2
0
    def testBasic(self):
        """Test that argument passing to conv1d is handled properly."""
        dtypes_to_test = [dtypes.float16, dtypes.float32]
        if not test.is_built_with_rocm():
            dtypes_to_test += [dtypes.float64]

        for dtype in dtypes_to_test:
            x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
            x = array_ops.expand_dims(x, 0)  # Add batch dimension
            x = array_ops.expand_dims(x, 2)  # And depth dimension
            filters = constant_op.constant([2, 1], dtype=dtype)
            filters = array_ops.expand_dims(filters, 1)  # in_channels
            filters = array_ops.expand_dims(filters, 2)  # out_channels
            # Filters is 2x1x1
            for stride in [1, 2]:
                with self.cached_session(use_gpu=test.is_gpu_available()):
                    c = nn_ops.conv1d(x, filters, stride, padding="VALID")
                    reduced = array_ops.squeeze(c)
                    output = self.evaluate(reduced)
                    if stride == 1:
                        self.assertEqual(len(output), 3)
                        self.assertAllClose(
                            output,
                            [2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
                    else:
                        self.assertEqual(len(output), 2)
                        self.assertAllClose(output,
                                            [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
Ejemplo n.º 3
0
 def backprop_conv(self, activation, kernel, relevance, strides, padding='SAME'):
     W_p = tf.maximum(0., kernel)
     z = nn_ops.conv1d(activation, W_p, strides, padding) + 1e-10
     s = relevance / z
     print(tf.shape(s))
     c = nn_ops.conv1d_transpose(s, W_p, tf.shape(activation), strides, padding)
     return activation * c
Ejemplo n.º 4
0
 def testBasic(self):
     """Test that argument passing to conv1d is handled properly."""
     # TODO(yongtang): dtypes.float64 can only be enabled once conv2d support
     # dtypes.float64, as conv1d implicitly calls conv2d after expand_dims.
     for dtype in [dtypes.float16, dtypes.float32]:
         x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
         x = array_ops.expand_dims(x, 0)  # Add batch dimension
         x = array_ops.expand_dims(x, 2)  # And depth dimension
         filters = constant_op.constant([2, 1], dtype=dtype)
         filters = array_ops.expand_dims(filters, 1)  # in_channels
         filters = array_ops.expand_dims(filters, 2)  # out_channels
         # Filters is 2x1x1
         for stride in [1, 2]:
             with self.test_session(use_gpu=test.is_gpu_available()):
                 c = nn_ops.conv1d(x, filters, stride, padding="VALID")
                 reduced = array_ops.squeeze(c)
                 output = reduced.eval()
                 if stride == 1:
                     self.assertEqual(len(output), 3)
                     self.assertAllClose(
                         output,
                         [2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
                 else:
                     self.assertEqual(len(output), 2)
                     self.assertAllClose(output,
                                         [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
Ejemplo n.º 5
0
    def backprop_conv_input(self, X, kernel, relevance, strides, padding='SAME', lowest=0., highest=1.):
        W_p = tf.maximum(0., kernel)
        W_n = tf.minimum(0., kernel)

        L = tf.ones_like(X, tf.float32) * lowest
        H = tf.ones_like(X, tf.float32) * highest

        z_o = nn_ops.conv1d(X, kernel, strides, padding)
        z_p = nn_ops.conv1d(L, W_p, strides, padding)
        z_n = nn_ops.conv1d(H, W_n, strides, padding)

        z = z_o - z_p - z_n + 1e-10
        s = relevance / z

        c_o = nn_ops.conv1d_transpose(s, kernel, tf.shape(X), strides, padding)
        c_p = nn_ops.conv1d_transpose(s, W_p, tf.shape(X), strides, padding)
        c_n = nn_ops.conv1d_transpose(s, W_n, tf.shape(X), strides, padding)

        return X * c_o - L * c_p - H * c_n
Ejemplo n.º 6
0
    def testBasic(self):
        """Test that argument passing to conv2d is handled properly."""

        x = constant_op.constant([1, 2, 3, 4], dtype=dtypes.float32)
        x = array_ops.expand_dims(x, 0)  # Add batch dimension
        x = array_ops.expand_dims(x, 2)  # And depth dimension
        filters = constant_op.constant([2, 1], dtype=dtypes.float32)
        filters = array_ops.expand_dims(filters, 1)  # in_channels
        filters = array_ops.expand_dims(filters, 2)  # out_channels
        # Filters is 2x1x1
        for stride in [1, 2]:
            with self.test_session():
                c = nn_ops.conv1d(x, filters, stride, padding="VALID")
                reduced = array_ops.squeeze(c)
                output = reduced.eval()
                if stride == 1:
                    self.assertEqual(len(output), 3)
                    self.assertAllClose(
                        output, [2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
                else:
                    self.assertEqual(len(output), 2)
                    self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
Ejemplo n.º 7
0
 def testBasic(self):
   """Test that argument passing to conv1d is handled properly."""
   for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
     x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
     x = array_ops.expand_dims(x, 0)  # Add batch dimension
     x = array_ops.expand_dims(x, 2)  # And depth dimension
     filters = constant_op.constant([2, 1], dtype=dtype)
     filters = array_ops.expand_dims(filters, 1)  # in_channels
     filters = array_ops.expand_dims(filters, 2)  # out_channels
     # Filters is 2x1x1
     for stride in [1, 2]:
       with self.test_session(use_gpu=test.is_gpu_available()):
         c = nn_ops.conv1d(x, filters, stride, padding="VALID")
         reduced = array_ops.squeeze(c)
         output = reduced.eval()
         if stride == 1:
           self.assertEqual(len(output), 3)
           self.assertAllClose(output,
                               [2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
         else:
           self.assertEqual(len(output), 2)
           self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])