Ejemplo n.º 1
0
  def testDilatedConv2D(self):
    height, width = 7, 9
    images = random_ops.random_uniform((5, height, width, 4))
    layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=3)
    output = layer.apply(images)
    self.assertListEqual(output.get_shape().as_list(), [5, 1, 3, 32])
    self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
    self.assertListEqual(layer.bias.get_shape().as_list(), [32])

    # Test tuple dilation rate
    layer = conv_layers.Conv2D(32, [3, 3], dilation_rate=(1, 3))
    output = layer.apply(images)
    self.assertListEqual(output.get_shape().as_list(), [5, height - 2, 3, 32])
Ejemplo n.º 2
0
  def testUnknownInputChannels(self):
    images = array_ops.placeholder(dtypes.float32, (5, 7, 9, None))
    layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(images)

    images = array_ops.placeholder(dtypes.float32, (5, None, 7, 9))
    layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
    with self.assertRaisesRegexp(ValueError,
                                 'The channel dimension of the inputs '
                                 'should be defined. Found `None`.'):
      _ = layer.apply(images)
Ejemplo n.º 3
0
    def testUnknownInputChannels(self):
        images = random_ops.random_uniform((5, 7, 9, 4))
        images._shape = tensor_shape.as_shape((5, 7, 9, None))
        layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
        with self.assertRaisesRegexp(
                ValueError, 'The channel dimension of the inputs '
                'should be defined. Found `None`.'):
            _ = layer.apply(images)

        images = random_ops.random_uniform((5, 4, 7, 9))
        images._shape = tensor_shape.as_shape((5, None, 7, 9))
        layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
        with self.assertRaisesRegexp(
                ValueError, 'The channel dimension of the inputs '
                'should be defined. Found `None`.'):
            _ = layer.apply(images)
Ejemplo n.º 4
0
 def testConv2DPaddingSame(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 32), seed=1)
     layer = conv_layers.Conv2D(64, images.get_shape()[1:3], padding='same')
     output = layer.apply(images)
     self.assertListEqual(output.get_shape().as_list(),
                          [5, height, width, 64])
Ejemplo n.º 5
0
    def testConv(self):
        if 'GPU' in self.device:
            # TODO(b/32333178)
            self.skipTest(
                'Current implementation of RandomStandardNormal kernel '
                'is very slow on GPU, and has been blacklisted.')
        with self.test_scope():
            data_format = 'channels_last'
            conv = convolutional.Conv2D(
                filters=1,
                kernel_size=2,
                padding='VALID',
                data_format=data_format,
                activation=nn_ops.relu,
                kernel_initializer=init_ops.ones_initializer(),
                bias_initializer=init_ops.zeros_initializer())
            pool = pooling.MaxPooling2D(2, 2, data_format=data_format)

            def model(x):
                x = conv(x)
                return pool(x)

            model = function.defun(model)

            x = array_ops.ones([1, 4, 4, 1])
            y = model(x)
            self.assertAllEqual(y.numpy(), [[[[4.]]]])
Ejemplo n.º 6
0
  def testConstraints(self):
    # Conv1D
    k_constraint = lambda x: x / math_ops.reduce_sum(x)
    b_constraint = lambda x: x / math_ops.reduce_max(x)
    conv1d = conv_layers.Conv1D(2, 3,
                                kernel_constraint=k_constraint,
                                bias_constraint=b_constraint)
    inputs = random_ops.random_uniform((5, 3, 5), seed=1)
    conv1d(inputs)
    self.assertEqual(conv1d.kernel_constraint, k_constraint)
    self.assertEqual(conv1d.bias_constraint, b_constraint)

    # Conv2D
    k_constraint = lambda x: x / math_ops.reduce_sum(x)
    b_constraint = lambda x: x / math_ops.reduce_max(x)
    conv2d = conv_layers.Conv2D(2, 3,
                                kernel_constraint=k_constraint,
                                bias_constraint=b_constraint)
    inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1)
    conv2d(inputs)
    self.assertEqual(conv2d.kernel_constraint, k_constraint)
    self.assertEqual(conv2d.bias_constraint, b_constraint)

    # Conv3D
    k_constraint = lambda x: x / math_ops.reduce_sum(x)
    b_constraint = lambda x: x / math_ops.reduce_max(x)
    conv3d = conv_layers.Conv3D(2, 3,
                                kernel_constraint=k_constraint,
                                bias_constraint=b_constraint)
    inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1)
    conv3d(inputs)
    self.assertEqual(conv3d.kernel_constraint, k_constraint)
    self.assertEqual(conv3d.bias_constraint, b_constraint)
Ejemplo n.º 7
0
 def testCreateConv2DChannelsFirst(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, 4, height, width))
     layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
     output = layer.apply(images)
     self.assertListEqual(output.get_shape().as_list(),
                          [5, 32, height - 2, width - 2])
     self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
     self.assertListEqual(layer.bias.get_shape().as_list(), [32])
Ejemplo n.º 8
0
 def testCreateConv2DIntegerKernelSize(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 4))
     layer = conv_layers.Conv2D(32, 3)
     output = layer.apply(images)
     self.assertListEqual(output.get_shape().as_list(),
                          [5, height - 2, width - 2, 32])
     self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
     self.assertListEqual(layer.bias.get_shape().as_list(), [32])
Ejemplo n.º 9
0
 def testConv2DBiasRegularizer(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 4))
     reg = lambda x: 0.1 * math_ops.reduce_sum(x)
     layer = conv_layers.Conv2D(32, [3, 3], bias_regularizer=reg)
     layer.apply(images)
     loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
     self.assertEqual(len(loss_keys), 1)
     self.assertListEqual(layer.losses, loss_keys)
Ejemplo n.º 10
0
 def testCreateConv2D(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 4))
     layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
     output = layer.apply(images)
     self.assertEqual(output.op.name, 'conv2d/Relu')
     self.assertListEqual(output.get_shape().as_list(),
                          [5, height - 2, width - 2, 32])
     self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 3, 4, 32])
     self.assertListEqual(layer.bias.get_shape().as_list(), [32])
Ejemplo n.º 11
0
    def testCreateConvWithStrides(self):
        height, width = 6, 8
        # Test strides tuple
        images = random_ops.random_uniform((5, height, width, 3), seed=1)
        layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 2), padding='same')
        output = layer.apply(images)
        self.assertListEqual(output.get_shape().as_list(),
                             [5, height / 2, width / 2, 32])

        # Test strides integer
        layer = conv_layers.Conv2D(32, [3, 3], strides=2, padding='same')
        output = layer.apply(images)
        self.assertListEqual(output.get_shape().as_list(),
                             [5, height / 2, width / 2, 32])

        # Test unequal strides
        layer = conv_layers.Conv2D(32, [3, 3], strides=(2, 1), padding='same')
        output = layer.apply(images)
        self.assertListEqual(output.get_shape().as_list(),
                             [5, height / 2, width, 32])
    def build_layer_fn(x, w_initializer, b_initializer):
      layer = layers_convolutional.Conv2D(
          filters=3,
          kernel_size=3,
          padding='same',
          kernel_initializer=w_initializer,
          bias_initializer=b_initializer)
      net = layer.apply(x)
      expected_normalized_vars = {'tf.layers.Conv2d.kernel': layer.kernel}
      expected_not_normalized_vars = {'tf.layers.Conv2d.bias': layer.bias}

      return net, expected_normalized_vars, expected_not_normalized_vars
Ejemplo n.º 13
0
    def testLayerInDefun(self):
        conv = convolutional.Conv2D(
            filters=1,
            kernel_size=2,
            kernel_initializer=init_ops.ones_initializer(),
            bias_initializer=init_ops.zeros_initializer())

        @function.defun
        def model(x):
            return conv(x)

        x = array_ops.ones([1, 2, 2, 1])
        y = model(x)
        self.assertAllEqual([[[[4.0]]]], y.numpy())
Ejemplo n.º 14
0
def _residual_core(x,
                   filters,
                   kernel_size=3,
                   stride=1,
                   train=True,
                   wd=0.0,
                   bn_momentum=0.99,
                   bn_epsilon=0.001):
    """ Core function of a residual unit.

    In -> conv -> bn -> relu -> conv

    Note that the normal residual layer has a batch norm and relu before the
    first conv. This is in the residual function which calls this.

    Parameters
    ----------
    x : tf tensor
        Input to be modified
    filters : int
        Number of output filters (will be used for all convolutions in the
        resnet core).
    kernel_size : int
        Size of the filter kernels
    stride : int
        Conv stride
    train : bool or tf boolean tensor
        Whether we are in the train phase or not. Can set to a tensorflow tensor
        so that it can be modified on the fly.
    wd : float
        Weight decay term for the convolutional weights
    bn_momentum : float
        The momentum for the batch normalization layers in the resnet
    bn_epsilon : float
        The epsilon for the batch normalization layers in the resnet
    """

    init = init_ops.VarianceScaling(scale=1.0, mode='fan_out')
    reg = lambda w: real_reg(w, wd, norm=2)
    bn_class = lambda name: normalization.BatchNormalization(
        name=name, momentum=bn_momentum, epsilon=bn_epsilon)
    conv_class = lambda name, stride: convolutional.Conv2D(
        filters,
        3, (stride, stride),
        use_bias=False,
        padding=('SAME' if stride == 1 else 'VALID'),
        kernel_initializer=init,
        kernel_regularizer=reg,
        name=name)

    with tf.variable_scope('sub1'):
        # As we will do downsampling with strides, need to make sure the output
        # size is the correct format.
        if stride > 1:
            x = fixed_padding(x, kernel_size, 'channels_last')

        conv = conv_class('conv1', stride)
        x = conv.apply(x)

    with tf.variable_scope('sub2'):
        bn = bn_class('between_bn')
        x = bn.apply(x, training=train)
        x = tf.nn.relu(x)
        conv = conv_class('conv2', 1)
        x = conv.apply(x)

    return x