Пример #1
0
 def test_list_and_number_args_equal(self):
     with tf.variable_scope("TEST"):
         conv1, _ = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
     with tf.variable_scope("TEST", reuse=True):
         conv2, _ = parallel_conv(self.p, self.filtsz, self.embedsz, [self.motsz] * len(self.filtsz))
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         np.testing.assert_allclose(conv1.eval({self.p: self.input}), conv2.eval({self.p: self.input}))
Пример #2
0
 def test_list_and_number_args_equal(self):
     with tf.variable_scope("TEST"):
         conv1, _ = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
     with tf.variable_scope("TEST", reuse=True):
         conv2, _ = parallel_conv(self.p, self.filtsz, self.embedsz, [self.motsz] * len(self.filtsz))
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         np.testing.assert_allclose(conv1.eval({self.p: self.input}), conv2.eval({self.p: self.input}))
Пример #3
0
 def test_conv_called(self):
     with patch('baseline.tf.tfy.tf.nn.conv2d') as conv_mock:
         conv_mock.return_value = tf.zeros(
             (self.batchsz, 1, self.seqsz, self.motsz))
         conv = parallel_conv(self.p, self.filtsz, self.embedsz,
                              self.motsz)
         self.assertEqual(conv_mock.call_count, self.num_filt)
Пример #4
0
    def pool(self, word_embeddings, dsz, init, **kwargs):
        """Do parallel convolutional filtering with varied receptive field widths, followed by max-over-time pooling

        :param word_embeddings: The word embeddings, which are inputs here
        :param dsz: The depth of the word embeddings
        :param init: The tensorflow initializer
        :param kwargs: See below

        :Keyword Arguments:
        * *cmotsz* -- (``int``) The number of convolutional feature maps for each filter
            These are MOT-filtered, leaving this # of units per parallel filter
        * *filtsz* -- (``list``) This is a list of filter widths to use

        :return:
        """
        cmotsz = kwargs['cmotsz']
        filtsz = kwargs['filtsz']

        combine, _ = parallel_conv(word_embeddings, filtsz, dsz, cmotsz)
        # Definitely drop out
        with tf.name_scope("dropout"):
            combine = tf.layers.dropout(combine,
                                        self.pdrop_value,
                                        training=TRAIN_FLAG())
        return combine
Пример #5
0
 def test_output_feature_shape_int_arg(self):
     conv = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(
             conv.eval({
                 self.p: self.input
             }).shape[1], self.motsz * self.num_filt)
Пример #6
0
 def test_output_feature_shape_list_arg(self):
     motsz = [self.nfeat_factor * fsz for fsz in self.filtsz]
     conv = parallel_conv(self.p, self.filtsz, self.embedsz, motsz)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(
             conv.eval({
                 self.p: self.input
             }).shape[1], sum(motsz))
Пример #7
0
 def test_output_batch_shape_list_arg(self):
     motsz = [self.motsz] * len(self.filtsz)
     conv = parallel_conv(self.p, self.filtsz, self.embedsz, motsz)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(
             conv.eval({
                 self.p: self.input
             }).shape[0], self.batchsz)
Пример #8
0
 def test_shape_available_int(self):
     """The previous tests test the shape of the actual output tensor. This
     tests the output shape information available when building the graph.
     When tf.squeeze is used on a tensor with a None dimension all shape info
     is lost because it is unknown if the None dimension is 1 and should be
     squeezed out. This cased error later because layers needed shape information
     to set the right size. This test makes sure that needed size information
     is present. This test would have caught the break in the classify method.
     """
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
     conv_shape = conv.get_shape().as_list()
     self.assertEqual(conv_shape, [None, self.motsz * len(self.filtsz)])
Пример #9
0
 def test_shape_available_int(self):
     """The previous tests test the shape of the actual output tensor. This
     tests the output shape information available when building the graph.
     When tf.squeeze is used on a tensor with a None dimension all shape info
     is lost because it is unknown if the None dimension is 1 and should be
     squeezed out. This cased error later because layers needed shape information
     to set the right size. This test makes sure that needed size information
     is present. This test would have caught the break in the classify method.
     """
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
     conv_shape = conv.get_shape().as_list()
     self.assertEqual(conv_shape, [None, self.motsz * len(self.filtsz)])
Пример #10
0
    def pool(self, word_embeddings, dsz, init, **kwargs):
        """Do parallel convolutional filtering with varied receptive field widths, followed by max-over-time pooling

        :param word_embeddings: The word embeddings, which are inputs here
        :param dsz: The depth of the word embeddings
        :param init: The tensorflow initializer
        :param kwargs: See below

        :Keyword Arguments:
        * *cmotsz* -- (``int``) The number of convolutional feature maps for each filter
            These are MOT-filtered, leaving this # of units per parallel filter
        * *filtsz* -- (``list``) This is a list of filter widths to use

        :return:
        """
        cmotsz = kwargs['cmotsz']
        filtsz = kwargs['filtsz']

        combine, _ = parallel_conv(word_embeddings, filtsz, dsz, cmotsz)
        # Definitely drop out
        with tf.name_scope("dropout"):
            combine = tf.layers.dropout(combine, self.pdrop_value, training=TRAIN_FLAG())
        return combine
Пример #11
0
 def test_output_batch_shape_list_arg(self):
     motsz = [self.motsz] * len(self.filtsz)
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, motsz)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(conv.eval({self.p: self.input}).shape[0], self.batchsz)
Пример #12
0
 def conv_pooling(embeddings, **kwargs):
     combine, _ = parallel_conv(embeddings, filtsz, dsz, cmotsz)
     return combine
Пример #13
0
 def test_shape_available_list(self):
     """Same as the `test_shape_available_int` commnet."""
     motsz = [self.nfeat_factor * fsz for fsz in self.filtsz]
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, motsz)
     conv_shape = conv.get_shape().as_list()
     self.assertEqual(conv_shape, [None, sum(motsz)])
Пример #14
0
 def test_output_feature_shape_int_arg(self):
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(conv.eval({self.p: self.input}).shape[1], self.motsz * self.num_filt)
Пример #15
0
 def test_output_feature_shape_list_arg(self):
     motsz = [self.nfeat_factor * fsz for fsz in self.filtsz]
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, motsz)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         self.assertEqual(conv.eval({self.p: self.input}).shape[1], sum(motsz))
Пример #16
0
 def test_shape_available_list(self):
     """Same as the `test_shape_available_int` commnet."""
     motsz = [self.nfeat_factor * fsz for fsz in self.filtsz]
     conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, motsz)
     conv_shape = conv.get_shape().as_list()
     self.assertEqual(conv_shape, [None, sum(motsz)])
Пример #17
0
 def test_conv_called(self):
     with patch('baseline.tf.tfy.tf.nn.conv2d') as conv_mock:
         conv_mock.return_value = tf.zeros((self.batchsz, 1, self.seqsz, self.motsz))
         conv, _ = parallel_conv(self.p, self.filtsz, self.embedsz, self.motsz)
         self.assertEqual(conv_mock.call_count, self.num_filt)