コード例 #1
0
    def layer_embedding(self):
        """
        Layer embedding
        Default fuse word and char embeddings
        """

        self.global_step = tf.Variable(0, name="global_step", trainable=False)

        with tf.variable_scope('embeddings'):
            self.word_mat = tf.get_variable(
                'word_embeddings',
                shape=[self.vocab.word_size(), self.vocab.word_embed_dim],
                initializer=tf.constant_initializer(
                    self.vocab.word_embeddings),
                trainable=False)
            self.char_mat = tf.get_variable(
                'char_embeddins',
                shape=[self.vocab.char_size(), self.vocab.char_embed_dim],
                initializer=tf.constant_initializer(
                    self.vocab.char_embeddings),
                trainable=False)

            sh_emb = tf.reshape(tf.nn.embedding_lookup(
                self.char_mat, self.sh), [
                    self.config.batch_size * self.config.max_sent_len,
                    self.config.max_char_len, self.vocab.char_embed_dim
                ])
            # projection
            sh_emb = Conv()(sh_emb, self.config.num_units, scope="sh_dense")
            sh_emb = tf.reduce_max(sh_emb, axis=1)
            sh_emb = tf.reshape(
                sh_emb, [self.config.batch_size, self.config.max_sent_len, -1])

            s_emb = tf.nn.embedding_lookup(self.word_mat, self.s)
            s_emb += libs.position_embedding(self.s, self.vocab.word_embed_dim)

            s_emb = tf.concat([s_emb, sh_emb], axis=2)

            # projec
            self.s_emb = Conv()(s_emb, self.config.num_units, scope="s_proj")

            # self.s_emb = tf.layers.dropout(self.s_emb, 1.0-self.dropout)

            self.s_emb = Highway(activation=tf.nn.relu,
                                 kernel='conv',
                                 dropout=self.dropout)(self.s_emb,
                                                       scope='highway')
コード例 #2
0
ファイル: highway.py プロジェクト: toughhou/clfzoo
    def _conv(self, inputs):

        with tf.variable_scope(self.scope, reuse=self.reuse):
            size = inputs.shape.as_list()[-1]

            inputs = Conv()(inputs, size, scope="input_projection")

            for i in range(self.num_layers):
                H = Conv(activation=self.activation,
                         bias=True)(inputs, size, scope="activation_%d" % i)
                T = Conv(bias=True, activation=tf.sigmoid)(inputs,
                                                           size,
                                                           scope="gate_%d" % i)
                H = tf.nn.dropout(H, 1.0 - self.dropout)

                inputs = H * T + inputs * (1.0 - T)
            return inputs
コード例 #3
0
ファイル: model.py プロジェクト: nutalk/clfzoo
    def layer_embedding(self):
        """
        Layer embedding
        Default fuse word and char embeddings
        """
        with tf.variable_scope('embeddings'):
            self.word_mat = tf.get_variable(
                'word_embeddings',
                shape=[self.vocab.word_size(), self.vocab.word_embed_dim],
                initializer=tf.constant_initializer(
                    self.vocab.word_embeddings),
                trainable=False)
            self.char_mat = tf.get_variable(
                'char_embeddins',
                shape=[self.vocab.char_size(), self.vocab.char_embed_dim],
                initializer=tf.constant_initializer(
                    self.vocab.char_embeddings),
                trainable=False)

            sh_emb = tf.reshape(tf.nn.embedding_lookup(
                self.char_mat, self.sh), [
                    self.config.batch_size * self.config.max_sent_len,
                    self.config.max_char_len, self.vocab.char_embed_dim
                ])
            # projection
            sh_emb = Conv(bias=True)(sh_emb,
                                     self.config.filter_size,
                                     scope="sh_conv")
            sh_emb = tf.reduce_max(sh_emb, axis=1)
            sh_emb = tf.reshape(
                sh_emb, [self.config.batch_size, self.config.max_sent_len, -1])

            s_emb = tf.nn.embedding_lookup(self.word_mat, self.s)
            s_emb = tf.concat([s_emb, sh_emb], axis=2)

            # projection
            s_emb = Conv(bias=True)(s_emb,
                                    self.config.filter_size,
                                    scope="s_proj")
            self.s_emb = Highway(activation=tf.nn.relu,
                                 kernel='conv',
                                 dropout=self.dropout)(s_emb, scope='highway')
コード例 #4
0
ファイル: model.py プロジェクト: toughhou/clfzoo
    def layer_encoder(self):
        """
        Layer Encoder
        Multi-channels convolution to encode
        """
        with tf.variable_scope('encoder'):
            inputs = self.s_emb

            conv1 = Conv(bias=True, activation=tf.nn.relu,
                         kernel_size=3)(inputs,
                                        self.config.filter_size,
                                        scope='conv1')
            conv2 = Conv(bias=True, activation=tf.nn.relu,
                         kernel_size=3)(conv1,
                                        self.config.filter_size,
                                        scope='conv2')

            # shortcut
            conv2 = conv2 + inputs

            inputs = conv2

            for i in range(self.config.num_blocks):
                seq_len = inputs.get_shape()[1]
                poolings = tf.transpose(
                    tf.nn.top_k(tf.transpose(inputs, [0, 2, 1]),
                                k=seq_len // 2)[0], [0, 2, 1])

                conv1 = Conv(bias=True, activation=tf.nn.relu,
                             kernel_size=3)(poolings,
                                            self.config.filter_size,
                                            scope='conv1-%d' % i)
                conv2 = Conv(bias=True, activation=tf.nn.relu,
                             kernel_size=3)(conv1,
                                            self.config.filter_size,
                                            scope='conv2-%d' % i)

                inputs = conv2 + poolings

            self.output = tf.reduce_max(inputs, axis=1)