def testUnknownInputChannelsConv1D(self): data = array_ops.placeholder(dtypes.float32, (5, 4, None)) layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu) with self.assertRaisesRegexp(ValueError, 'The channel dimension of the inputs ' 'should be defined. Found `None`.'): _ = layer.apply(data) data = array_ops.placeholder(dtypes.float32, (5, None, 4)) layer = conv_layers.Conv1D(32, 3, data_format='channels_first') with self.assertRaisesRegexp(ValueError, 'The channel dimension of the inputs ' 'should be defined. Found `None`.'): _ = layer.apply(data)
def testUnknownInputChannelsConv1D(self): data = random_ops.random_uniform((5, 4, 7)) data._shape = tensor_shape.as_shape((5, 4, None)) layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu) with self.assertRaisesRegexp( ValueError, 'The channel dimension of the inputs ' 'should be defined. Found `None`.'): _ = layer.apply(data) data = random_ops.random_uniform((5, 7, 4)) data._shape = tensor_shape.as_shape((5, None, 4)) layer = conv_layers.Conv1D(32, 3, data_format='channels_first') with self.assertRaisesRegexp( ValueError, 'The channel dimension of the inputs ' 'should be defined. Found `None`.'): _ = layer.apply(data)
def testConstraints(self): # Conv1D k_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) conv1d = conv_layers.Conv1D(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = random_ops.random_uniform((5, 3, 5), seed=1) conv1d(inputs) self.assertEqual(conv1d.kernel_constraint, k_constraint) self.assertEqual(conv1d.bias_constraint, b_constraint) # Conv2D k_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) conv2d = conv_layers.Conv2D(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = random_ops.random_uniform((5, 3, 3, 5), seed=1) conv2d(inputs) self.assertEqual(conv2d.kernel_constraint, k_constraint) self.assertEqual(conv2d.bias_constraint, b_constraint) # Conv3D k_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) conv3d = conv_layers.Conv3D(2, 3, kernel_constraint=k_constraint, bias_constraint=b_constraint) inputs = random_ops.random_uniform((5, 3, 3, 3, 5), seed=1) conv3d(inputs) self.assertEqual(conv3d.kernel_constraint, k_constraint) self.assertEqual(conv3d.bias_constraint, b_constraint)
def __init__(self, num_units, memory, memory_sequence_length=None, scale=False, probability_fn=None, score_mask_value=float("-inf"), name="LuongAttention"): if probability_fn is None: probability_fn = tf.nn.softmax wrapped_probability_fn = lambda score, _: probability_fn(score) super(tf.contrib.seq2seq.LuongAttention, self).__init__( query_layer=None, memory_layer=layers_core.Dense( num_units, name="memory_layer", use_bias=False), memory=memory, probability_fn=wrapped_probability_fn, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._scale = scale self._name = name self.query_dense = layers_core.Dense(num_units) self.cf_conv = layers_conv.Conv1D(filters=64, kernel_size=201, strides=1, padding='same') self.cf_dense = layers_core.Dense(num_units) self.score_dense = layers_core.Dense(1)
def testCreateConv1DChannelsFirst(self): width = 7 data = random_ops.random_uniform((5, 4, width)) layer = conv_layers.Conv1D(32, 3, data_format='channels_first') output = layer.apply(data) self.assertListEqual(output.get_shape().as_list(), [5, 32, width - 2]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32])
def testCreateConv1D(self): width = 7 data = random_ops.random_uniform((5, width, 4)) layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu) output = layer.apply(data) self.assertEqual(output.op.name, 'conv1d/Relu') self.assertListEqual(output.get_shape().as_list(), [5, width - 2, 32]) self.assertListEqual(layer.kernel.get_shape().as_list(), [3, 4, 32]) self.assertListEqual(layer.bias.get_shape().as_list(), [32])