Пример #1
0
    def build(self):
        with tf.variable_scope("Embeddings"):
            self.embeddings = tf.get_variable(
                "emb", [self.config.n_embed, self.config.d_embed],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
        with tf.variable_scope("Model"):
            w_softmax = tf.get_variable(
                "w_softmax", [2 * self.config.dim_sem, self.config.dim_output],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
            b_softmax = tf.get_variable(
                "bias_softmax", [self.config.dim_output],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())

        sent_l = self.t_variables['sent_l']

        max_sent_l = self.t_variables['max_sent_l']

        batch_l = self.t_variables['batch_l']

        tokens_input = tf.nn.embedding_lookup(
            self.embeddings, self.t_variables['token_idxs'][:, :max_sent_l])
        tokens_input = tf.nn.dropout(tokens_input,
                                     self.t_variables['keep_prob'])

        mask_tokens = self.t_variables['mask_tokens'][:, :max_sent_l]
        #mask_sents = self.t_variables['mask_sents'][:]
        [_, _, rnn_size] = tokens_input.get_shape().as_list()
        tokens_input_do = tf.reshape(tokens_input,
                                     [batch_l, max_sent_l, rnn_size])

        sent_l = tf.reshape(sent_l, [batch_l])

        tokens_output, _ = dynamicBiRNN(tokens_input_do,
                                        sent_l,
                                        n_hidden=self.config.dim_hidden,
                                        cell_type=self.config.rnn_cell,
                                        cell_name='Model/sent')

        print("tokens output shape", tokens_output[0].shape)
        tokens_output = tf.concat([tokens_output[0], tokens_output[1]], 2)
        #tokens_output = tokens_output[0]

        print("tokens output shape", tokens_output.shape)
        #print("sents input shape",sents_input.shape)
        ntime_steps = tf.shape(tokens_output)[1]
        context_rep_flat = tf.reshape(tokens_output,
                                      [-1, 2 * self.config.dim_sem])
        pred = tf.matmul(context_rep_flat, w_softmax) + b_softmax
        self.final_output = tf.reshape(
            pred, [-1, ntime_steps, self.config.dim_output])

        #final_output = MLP(tokens_output, 'output', self.t_variables['keep_prob'])
        #self.final_output = tf.matmul(tokens_output, w_softmax) + b_softmax
        print("final output shape", self.final_output.shape)
Пример #2
0
    def build(self):
        with tf.variable_scope("Embeddings"):
            self.embeddings = tf.get_variable("emb", [self.config.n_embed, self.config.d_embed], dtype=tf.float64,
                                         initializer=self.xavier_init)
            embeddings_root = tf.get_variable("emb_root", [1, 1, 2 * self.config.dim_sem], dtype=tf.float64,
                                                  initializer=self.xavier_init)
            embeddings_root_s = tf.get_variable("emb_root_s", [1, 1,2* self.config.dim_sem], dtype=tf.float64,
                                                    initializer=self.xavier_init)
        with tf.variable_scope("Model"):
            w_comb = tf.get_variable("w_comb", [4 * self.config.dim_sem, 2 * self.config.dim_sem], dtype=tf.float64,
                            initializer=self.xavier_init)
            w_comb_both = tf.get_variable("w_comb_both", [6 * self.config.dim_sem, 2 * self.config.dim_sem], dtype=tf.float64,
                                     initializer=self.xavier_init)
            b_comb = tf.get_variable("bias_comb", [2 * self.config.dim_sem], dtype=tf.float64, initializer=tf.constant_initializer())

            w_comb_s = tf.get_variable("w_comb_s", [4 * self.config.dim_sem, 2 * self.config.dim_sem], dtype=tf.float64,
                            initializer=self.xavier_init)
            b_comb_s = tf.get_variable("bias_comb_s", [2 * self.config.dim_sem], dtype=tf.float64, initializer=tf.constant_initializer())

            w_softmax = tf.get_variable("w_softmax", [2 * self.config.dim_sem, self.config.dim_output], dtype=tf.float64,
                            initializer=self.xavier_init)
            b_softmax = tf.get_variable("bias_softmax", [self.config.dim_output], dtype=tf.float64,
                            initializer=self.xavier_init)

            w_sem_doc = tf.get_variable("w_sem_doc", [2 * self.config.dim_sem, 2 * self.config.dim_sem], dtype=tf.float64,
                                        initializer=self.xavier_init)

            w_str_doc = tf.get_variable("w_str_doc", [2 * self.config.dim_sem, 2 * self.config.dim_str], dtype=tf.float64,
                                        initializer=self.xavier_init)

        with tf.variable_scope("Structure/doc"):
            tf.get_variable("w_parser_p", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("w_parser_c", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("w_parser_s", [2 * self.config.dim_str, 2 * self.config.dim_str], dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("bias_parser_p", [2 * self.config.dim_str], dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("bias_parser_c", [2 * self.config.dim_str], dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("w_parser_root", [2 * self.config.dim_str, 1], dtype=tf.float64,
                            initializer=self.xavier_init)
        with tf.variable_scope("Structure/sent"):
            tf.get_variable("w_parser_p", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("w_parser_c", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("bias_parser_p", [2 * self.config.dim_str], dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("bias_parser_c", [2 * self.config.dim_str], dtype=tf.float64,
                            initializer=self.xavier_init)

            tf.get_variable("w_parser_s", [2 * self.config.dim_str, 2 * self.config.dim_str], dtype=tf.float64,
                            initializer=self.xavier_init)
            tf.get_variable("w_parser_root", [2 * self.config.dim_str, 1], dtype=tf.float64,
                            initializer=self.xavier_init)

        sent_l = self.t_variables['sent_l']
        doc_l = self.t_variables['doc_l']
        max_sent_l = self.t_variables['max_sent_l']
        max_doc_l = self.t_variables['max_doc_l']
        batch_l = self.t_variables['batch_l']

        tokens_input = tf.nn.embedding_lookup(self.embeddings, self.t_variables['token_idxs'][:, :max_doc_l, :max_sent_l])
        tokens_input = tf.nn.dropout(tokens_input, self.t_variables['keep_prob'])  # [batch_size, doc_l, sent_l, d_embed]

        mask_tokens = self.t_variables['mask_tokens'][:, :max_doc_l, :max_sent_l]
        mask_sents = self.t_variables['mask_sents'][:, :max_doc_l]  # [batch_size, doc_l]

        tokens_input_do = tf.reshape(tokens_input, [batch_l * max_doc_l, max_sent_l, self.config.d_embed])
        sent_l = tf.reshape(sent_l, [batch_l * max_doc_l])
        mask_tokens = tf.reshape(mask_tokens, [batch_l * max_doc_l, -1])
        tokens_output, _ = dynamicBiRNN(tokens_input_do, sent_l, n_hidden=self.config.dim_hidden, xavier_init=self.xavier_init,
                                        cell_type=self.config.rnn_cell, cell_name='Model/sent')
        tokens_sem = tf.concat([tokens_output[0][:,:,:self.config.dim_sem], tokens_output[1][:,:,:self.config.dim_sem]], 2)
        tokens_str = tf.concat([tokens_output[0][:,:,self.config.dim_sem:], tokens_output[1][:,:,self.config.dim_sem:]], 2)

        if self.config.skip_sent_attention:
            tokens_output = LReLu(tf.tensordot(tf.concat([tokens_sem, tokens_input_do], 2), w_comb_s, [[2], [0]]) + b_comb_s)
        else:
            temp1 = tf.zeros([batch_l * max_doc_l, max_sent_l,1], tf.float64)
            temp2 = tf.zeros([batch_l * max_doc_l,1,max_sent_l], tf.float64)

            mask1 = tf.ones([batch_l * max_doc_l, max_sent_l, max_sent_l-1], tf.float64)
            mask2 = tf.ones([batch_l * max_doc_l, max_sent_l-1, max_sent_l], tf.float64)
            mask1 = tf.concat([temp1,mask1],2)
            mask2 = tf.concat([temp2,mask2],1)

            if self.config.skip_mask_bug_fix:
                str_scores_s_, _, LL_tokens = get_structure('sent', tokens_str, mask1, mask2, None, None, None)  # batch_l,  sent_l+1, sent_l
            else:
                # create mask for setting all padded cells to 0
                mask_ll_tokens = tf.expand_dims(mask_tokens, 2)
                mask_ll_tokens_trans = tf.transpose(mask_ll_tokens, perm=[0, 2, 1])
                mask_ll_tokens = mask_ll_tokens
                mask_tokens_mult = mask_ll_tokens * mask_ll_tokens_trans

                # create mask for setting the padded diagonals to 1
                mask_diags = tf.matrix_diag_part(mask_tokens_mult)
                mask_diags_invert = tf.cast(tf.logical_not(tf.cast(mask_diags, tf.bool)), tf.float64)
                zero_matrix = tf.zeros([batch_l * max_doc_l, max_sent_l, max_sent_l], tf.float64)
                mask_tokens_add = tf.matrix_set_diag(zero_matrix, mask_diags_invert)

                str_scores_s_, _, LL_tokens = get_structure('sent', tokens_str, mask1, mask2, mask_tokens_mult,
                                                            mask_tokens_add, tf.expand_dims(mask_tokens,
                                                                                            2))  # batch_l,  sent_l+1, sent_l

            str_scores_s = tf.matrix_transpose(str_scores_s_)  # soft parent
            tokens_sem_root = tf.concat([tf.tile(embeddings_root_s, [batch_l * max_doc_l, 1, 1]), tokens_sem], 1)
            tokens_output_ = tf.matmul(str_scores_s, tokens_sem_root)
            tokens_output = LReLu(tf.tensordot(tf.concat([tokens_sem, tokens_output_], 2), w_comb_s, [[2], [0]]) + b_comb_s)

        if (self.config.sent_attention == 'sum'):
            tokens_output = tokens_output * tf.expand_dims(mask_tokens,2)
            tokens_output = tf.reduce_sum(tokens_output, 1)
        elif (self.config.sent_attention == 'mean'):
            tokens_output = tokens_output * tf.expand_dims(mask_tokens,2)
            tokens_output = tf.reduce_sum(tokens_output, 1)/tf.expand_dims(tf.cast(sent_l,tf.float64),1)
        elif (self.config.sent_attention == 'max'):
            tokens_output = tokens_output + tf.expand_dims((mask_tokens-1)*999,2)
            tokens_output = tf.reduce_max(tokens_output, 1)

        # batch_l * max_doc_l, 200
        if self.config.skip_doc_bilstm:
            if self.config.use_positional_encoding:
                tokens_output = tf.reshape(tokens_output, [batch_l, max_doc_l, 2 * self.config.dim_sem])
                tokens_output = self.add_timing_signal(tokens_output, max_doc_l, num_timescales=self.config.dim_sem)
                tokens_output = tf.reshape(tokens_output, [batch_l * max_doc_l, 2 * self.config.dim_sem])

            sents_sem = tf.matmul(tokens_output, w_sem_doc)
            sents_sem = tf.reshape(sents_sem, [batch_l, max_doc_l, 2 * self.config.dim_sem])
            sents_str = tf.matmul(tokens_output, w_str_doc)
            sents_str = tf.reshape(sents_str, [batch_l, max_doc_l, 2 * self.config.dim_str])
        else:
            sents_input = tf.reshape(tokens_output, [batch_l, max_doc_l, 2 * self.config.dim_sem])
            sents_output, _ = dynamicBiRNN(sents_input, doc_l, n_hidden=self.config.dim_hidden, xavier_init=self.xavier_init, 
                                           cell_type=self.config.rnn_cell, cell_name='Model/doc')
            sents_sem = tf.concat([sents_output[0][:,:,:self.config.dim_sem], sents_output[1][:,:,:self.config.dim_sem]], 2)  # [batch_l, doc+l, dim_sem*2]
            sents_str = tf.concat([sents_output[0][:,:,self.config.dim_sem:], sents_output[1][:,:,self.config.dim_sem:]], 2)  # [batch_l, doc+l, dim_str*2]

        if self.config.skip_doc_attention:
            if self.config.skip_doc_bilstm:
                sents_input = tf.reshape(tokens_output, [batch_l, max_doc_l, 2 * self.config.dim_sem])
                sents_output = LReLu(tf.tensordot(tf.concat([sents_sem, sents_input], 2), w_comb, [[2], [0]]) + b_comb)
            else:
                sents_output = LReLu(tf.tensordot(tf.concat([sents_sem, sents_input], 2), w_comb, [[2], [0]]) + b_comb)
        else:
            if self.config.skip_mask_bug_fix:
                str_scores_, str_scores_no_root, LL_sents = get_structure('doc', sents_str, self.t_variables['mask_parser_1'],
                                                                  self.t_variables['mask_parser_2'], None, None, None)  # [batch_size, doc_l+1, doc_l]
            else:
                # create mask for setting all padded cells to 0
                mask_ll_sents = tf.expand_dims(mask_sents, 2)
                mask_ll_sents_trans = tf.transpose(mask_ll_sents, perm=[0, 2, 1])
                mask_ll_sents = mask_ll_sents
                mask_sents_mult = mask_ll_sents * mask_ll_sents_trans

                # create mask for setting the padded diagonals to 1
                mask_sents_diags = tf.matrix_diag_part(mask_sents_mult)
                mask_sents_diags_invert = tf.cast(tf.logical_not(tf.cast(mask_sents_diags, tf.bool)), tf.float64)
                zero_matrix_sents = tf.zeros([batch_l, max_doc_l, max_doc_l], tf.float64)
                mask_sents_add = tf.matrix_set_diag(zero_matrix_sents, mask_sents_diags_invert)

                str_scores_, str_scores_no_root, LL_sents = get_structure('doc', sents_str, self.t_variables['mask_parser_1'],
                                                                  self.t_variables['mask_parser_2'], mask_sents_mult,
                                                                  mask_sents_add, tf.expand_dims(mask_sents,
                                                                                                 2))  # [batch_size, doc_l+1, doc_l]

            str_scores = tf.matrix_transpose(str_scores_)
            self.str_scores = str_scores  # shape is [batch_size, doc_l, doc_l+1]

            sents_children = tf.matmul(str_scores_no_root, sents_sem)
            if self.config.tree_percolation == "child":
                sents_output = LReLu(tf.tensordot(tf.concat([sents_sem, sents_children], 2), w_comb, [[2], [0]]) + b_comb)
            else:
                sents_sem_root = tf.concat([tf.tile(embeddings_root, [batch_l, 1, 1]), sents_sem], 1)
                sents_parents = tf.matmul(str_scores, sents_sem_root)
                if self.config.tree_percolation == "parent":
                    sents_output = LReLu(tf.tensordot(tf.concat([sents_sem, sents_parents], 2), w_comb, [[2], [0]]) + b_comb)
                elif self.config.tree_percolation == "both":
                    sents_output = LReLu(tf.tensordot(tf.concat([sents_sem, sents_parents, sents_children], 2), w_comb_both, [[2], [0]]) + b_comb)

            # percolation is only supported for "child" option
            if self.config.tree_percolation_levels > 0:
                count = 0
                while count < self.config.tree_percolation_levels:
                    sents_children_2 = tf.matmul(str_scores_no_root, sents_output)
                    sents_output = LReLu(tf.tensordot(tf.concat([sents_output, sents_children_2], 2), w_comb, [[2], [0]]) + b_comb)
                    count += 1

        if (self.config.doc_attention == 'sum'):
            sents_output = sents_output * tf.expand_dims(mask_sents, 2)  # mask is [batch_size, doc_l, 1]
            sents_output = tf.reduce_sum(sents_output, 1)  # [batch_size, dim_sem*2]
        elif (self.config.doc_attention == 'mean'):
            sents_output = sents_output * tf.expand_dims(mask_sents, 2)
            sents_output = tf.reduce_sum(sents_output, 1)/tf.expand_dims(tf.cast(doc_l,tf.float64),1)
        elif (self.config.doc_attention == 'max'):
            sents_output = sents_output + tf.expand_dims((mask_sents-1)*999,2)
            sents_output = tf.reduce_max(sents_output, 1)
        elif (self.config.doc_attention == 'weighted_sum'):
            sents_weighted = sents_output * tf.expand_dims(str_scores[:,:,0], 2)
            sents_output = sents_weighted * tf.expand_dims(mask_sents, 2)  # apply mask
            sents_output = tf.reduce_sum(sents_output, 1)

        final_output = MLP(sents_output, 'output', self.t_variables['keep_prob'], self.config.seed, self.xavier_init)
        self.final_output = tf.matmul(final_output, w_softmax) + b_softmax
Пример #3
0
    def build(self):
        with tf.variable_scope("Embeddings"):
            self.embeddings = tf.get_variable("emb", [self.config.n_embed, self.config.d_embed], dtype=tf.float32,
                                         initializer=tf.contrib.layers.xavier_initializer())
            embeddings_root = tf.get_variable("emb_root", [1, 1, 2 * self.config.dim_sem], dtype=tf.float32,
                                                  initializer=tf.contrib.layers.xavier_initializer())
            embeddings_root_s = tf.get_variable("emb_root_s", [1, 1,2* self.config.dim_sem], dtype=tf.float32,
                                                    initializer=tf.contrib.layers.xavier_initializer())
        with tf.variable_scope("Model"):
            w_comb = tf.get_variable("w_comb", [4 * self.config.dim_sem, 2 * self.config.dim_sem], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            b_comb = tf.get_variable("bias_comb", [2 * self.config.dim_sem], dtype=tf.float32, initializer=tf.constant_initializer())

            w_comb_s = tf.get_variable("w_comb_s", [4 * self.config.dim_sem, 2 * self.config.dim_sem], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            b_comb_s = tf.get_variable("bias_comb_s", [2 * self.config.dim_sem], dtype=tf.float32, initializer=tf.constant_initializer())

            w_softmax = tf.get_variable("w_softmax", [2 * self.config.dim_sem, self.config.dim_output], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            b_softmax = tf.get_variable("bias_softmax", [self.config.dim_output], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())

        '''with tf.variable_scope("Structure/doc"):
            tf.get_variable("w_parser_p", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_c", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_s", [2 * self.config.dim_str, 2 * self.config.dim_str], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_p", [2 * self.config.dim_str], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_c", [2 * self.config.dim_str], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_root", [2 * self.config.dim_str, 1], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())'''

        with tf.variable_scope("Structure/sent"):
            tf.get_variable("w_parser_p", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_c", [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_p", [2 * self.config.dim_str], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_c", [2 * self.config.dim_str], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())

            tf.get_variable("w_parser_s", [2 * self.config.dim_str, 2 * self.config.dim_str], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_root", [2 * self.config.dim_str, 1], dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())

        sent_l = self.t_variables['sent_l']

        max_sent_l = self.t_variables['max_sent_l']

        batch_l = self.t_variables['batch_l']

        tokens_input = tf.nn.embedding_lookup(self.embeddings, self.t_variables['token_idxs'][:, :max_sent_l])
        tokens_input = tf.nn.dropout(tokens_input, self.t_variables['keep_prob'])
        verb_indicator = tf.expand_dims(self.t_variables['verb_indicator'],2)
        tokens_input = tf.concat([tokens_input,verb_indicator],2)

        mask_tokens = self.t_variables['mask_tokens'][:,  :max_sent_l]
        #mask_sents = self.t_variables['mask_sents'][:]
        [_, _, rnn_size] = tokens_input.get_shape().as_list()
        tokens_input_do = tf.reshape(tokens_input, [batch_l , max_sent_l, rnn_size])

        sent_l = tf.reshape(sent_l, [batch_l ])
        mask_tokens = tf.reshape(mask_tokens, [batch_l , -1])

        tokens_output, _ = dynamicBiRNN(tokens_input_do, sent_l, n_hidden=self.config.dim_hidden,
                                        cell_type=self.config.rnn_cell, cell_name='Model/sent')
        tokens_sem = tf.concat([tokens_output[0][:,:,:self.config.dim_sem], tokens_output[1][:,:,:self.config.dim_sem]], 2)
        tokens_str = tf.concat([tokens_output[0][:,:,self.config.dim_sem:], tokens_output[1][:,:,self.config.dim_sem:]], 2)
        temp1 = tf.zeros([batch_l , max_sent_l,1], tf.float32)
        temp2 = tf.zeros([batch_l ,1,max_sent_l], tf.float32)

        mask1 = tf.ones([batch_l , max_sent_l, max_sent_l-1], tf.float32)
        mask2 = tf.ones([batch_l , max_sent_l-1, max_sent_l], tf.float32)
        mask1 = tf.concat([temp1,mask1],2)
        mask2 = tf.concat([temp2,mask2],1)

        str_scores_s_ = get_structure('sent', tokens_str, max_sent_l, mask1, mask2)  # batch_l,  sent_l+1, sent_l
        str_scores_s = tf.matrix_transpose(str_scores_s_)  # soft parent
        tokens_sem_root = tf.concat([tf.tile(embeddings_root_s, [batch_l , 1, 1]), tokens_sem], 1)
        tokens_output_ = tf.matmul(str_scores_s, tokens_sem_root)
        tokens_output = LReLu(tf.tensordot(tf.concat([tokens_sem, tokens_output_], 2), w_comb_s, [[2], [0]]) + b_comb_s)
        print("tokens output shape", tokens_output.shape)


        '''f (self.config.sent_attention == 'sum'):
            tokens_output = tokens_output * tf.expand_dims(mask_tokens,2)
            tokens_output = tf.reduce_sum(tokens_output, 1)
        elif (self.config.sent_attention == 'mean'):
            tokens_output = tokens_output * tf.expand_dims(mask_tokens,2)
            tokens_output = tf.reduce_sum(tokens_output, 1)/tf.expand_dims(tf.cast(sent_l,tf.float32),1)
        elif (self.config.sent_attention == 'max'):
            tokens_output = tokens_output + tf.expand_dims((mask_tokens-1)*999,2)
            tokens_output = tf.reduce_max(tokens_output, 1)'''


        #sents_input = tf.reshape(tokens_output, [batch_l, 2*self.config.dim_sem])

        print("tokens output shape", tokens_output.shape)
        #print("sents input shape",sents_input.shape)
        ntime_steps = tf.shape(tokens_output)[1]
        context_rep_flat = tf.reshape(tokens_output, [-1, 2 * self.config.dim_sem])
        pred = tf.matmul(context_rep_flat, w_softmax) + b_softmax
        self.final_output = tf.reshape(pred, [-1, ntime_steps, self.config.dim_output])

        #final_output = MLP(tokens_output, 'output', self.t_variables['keep_prob'])
        #self.final_output = tf.matmul(tokens_output, w_softmax) + b_softmax
        print("final output shape", self.final_output.shape)
Пример #4
0
    def build(self):
        with tf.variable_scope("Embeddings"):
            self.embeddings = tf.get_variable(
                "emb", [self.config.n_embed, self.config.d_embed],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
            embeddings_root = tf.get_variable(
                "emb_root", [1, 1, 2 * self.config.dim_sem],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
            embeddings_root_s = tf.get_variable(
                "emb_root_s", [1, 1, 2 * self.config.dim_sem],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
        with tf.variable_scope("Model"):
            w_comb = tf.get_variable(
                "w_comb", [4 * self.config.dim_sem, 2 * self.config.dim_sem],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
            b_comb = tf.get_variable("bias_comb", [2 * self.config.dim_sem],
                                     dtype=tf.float32,
                                     initializer=tf.constant_initializer())

            w_comb_s = tf.get_variable(
                "w_comb_s", [4 * self.config.dim_sem, 2 * self.config.dim_sem],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
            b_comb_s = tf.get_variable("bias_comb_s",
                                       [2 * self.config.dim_sem],
                                       dtype=tf.float32,
                                       initializer=tf.constant_initializer())

            w_softmax = tf.get_variable(
                "w_softmax", [2 * self.config.dim_sem, self.config.dim_output],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())
            b_softmax = tf.get_variable(
                "bias_softmax", [self.config.dim_output],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer())

        with tf.variable_scope("Structure/doc"):
            tf.get_variable("w_parser_p",
                            [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_c",
                            [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_s",
                            [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_p", [2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_c", [2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_root", [2 * self.config.dim_str, 1],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())

        with tf.variable_scope("Structure/sent"):
            tf.get_variable("w_parser_p",
                            [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_c",
                            [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_p", [2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("bias_parser_c", [2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())

            tf.get_variable("w_parser_s",
                            [2 * self.config.dim_str, 2 * self.config.dim_str],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())
            tf.get_variable("w_parser_root", [2 * self.config.dim_str, 1],
                            dtype=tf.float32,
                            initializer=tf.contrib.layers.xavier_initializer())

        # model 안에서 쓰이는 placeholder
        sent_l = self.t_variables[
            'sent_l']  # tf.placeholder(tf.int32, [None, None])
        doc_l = self.t_variables['doc_l']  # tf.placeholder(tf.int32, [None])
        max_sent_l = self.t_variables['max_sent_l']  # tf.placeholder(tf.int32)
        max_doc_l = self.t_variables['max_doc_l']  # tf.placeholder(tf.int32)
        batch_l = self.t_variables['batch_l']  # tf.placeholder(tf.int32)

        # embeddings : [self.config.n_embed, self.config.d_embed]
        # t_variables['token_idxs'] = tf.placeholder(tf.int32, [None, None(document), None(sentence)])
        tokens_input = tf.nn.embedding_lookup(
            self.embeddings,
            self.t_variables['token_idxs'][:, :max_doc_l, :max_sent_l])
        tokens_input = tf.nn.dropout(tokens_input,
                                     self.t_variables['keep_prob'])

        mask_tokens = self.t_variables[
            'mask_tokens'][:, :max_doc_l, :max_sent_l]
        mask_sents = self.t_variables['mask_sents'][:, :max_doc_l]
        [_, _, _,
         rnn_size] = tokens_input.get_shape().as_list()  # shape 리스트로 뽑아내준다.
        tokens_input_do = tf.reshape(
            tokens_input, [batch_l * max_doc_l, max_sent_l, rnn_size])

        sent_l = tf.reshape(sent_l, [batch_l * max_doc_l])
        mask_tokens = tf.reshape(mask_tokens, [batch_l * max_doc_l, -1])

        tokens_output, _ = dynamicBiRNN(tokens_input_do,
                                        sent_l,
                                        n_hidden=self.config.dim_hidden,
                                        cell_type=self.config.rnn_cell,
                                        cell_name='Model/sent')
        # tokens_output : [batch_size,max_time,depth]
        # fw : tokens_output[0], bw : tokens_output[1]
        tokens_sem = tf.concat([
            tokens_output[0][:, :, :self.config.dim_sem],
            tokens_output[1][:, :, :self.config.dim_sem]
        ], 2)
        tokens_str = tf.concat([
            tokens_output[0][:, :, self.config.dim_sem:],
            tokens_output[1][:, :, self.config.dim_sem:]
        ], 2)
        temp1 = tf.zeros([batch_l * max_doc_l, max_sent_l, 1],
                         tf.float32)  # shape : (?, ?, 1)
        temp2 = tf.zeros([batch_l * max_doc_l, 1, max_sent_l],
                         tf.float32)  # shape : (?, 1, ?)

        mask1 = tf.ones([batch_l * max_doc_l, max_sent_l, max_sent_l - 1],
                        tf.float32)
        mask2 = tf.ones([batch_l * max_doc_l, max_sent_l - 1, max_sent_l],
                        tf.float32)
        mask1 = tf.concat([temp1, mask1], 2)
        mask2 = tf.concat([temp2, mask2], 1)

        str_scores_s_ = get_structure('sent', tokens_str, max_sent_l, mask1,
                                      mask2)  # batch_l,  sent_l+1, sent_l
        str_scores_s = tf.matrix_transpose(str_scores_s_)  # soft parent
        tokens_sem_root = tf.concat([
            tf.tile(embeddings_root_s, [batch_l * max_doc_l, 1, 1]), tokens_sem
        ], 1)
        tokens_output_ = tf.matmul(str_scores_s, tokens_sem_root)  # (17)
        tokens_output = LReLu(
            tf.tensordot(tf.concat([tokens_sem, tokens_output_], 2), w_comb_s,
                         [[2], [0]]) + b_comb_s)  # (18)

        if (self.config.sent_attention == 'sum'):
            tokens_output = tokens_output * tf.expand_dims(mask_tokens, 2)
            tokens_output = tf.reduce_sum(tokens_output, 1)
        elif (self.config.sent_attention == 'mean'):
            tokens_output = tokens_output * tf.expand_dims(mask_tokens, 2)
            tokens_output = tf.reduce_sum(tokens_output, 1) / tf.expand_dims(
                tf.cast(sent_l, tf.float32), 1)
        elif (self.config.sent_attention == 'max'):
            tokens_output = tokens_output + tf.expand_dims(
                (mask_tokens - 1) * 999, 2)
            tokens_output = tf.reduce_max(tokens_output, 1)

        sents_input = tf.reshape(tokens_output,
                                 [batch_l, max_doc_l, 2 * self.config.dim_sem])
        sents_output, _ = dynamicBiRNN(sents_input,
                                       doc_l,
                                       n_hidden=self.config.dim_hidden,
                                       cell_type=self.config.rnn_cell,
                                       cell_name='Model/doc')

        sents_sem = tf.concat([
            sents_output[0][:, :, :self.config.dim_sem],
            sents_output[1][:, :, :self.config.dim_sem]
        ], 2)
        sents_str = tf.concat([
            sents_output[0][:, :, self.config.dim_sem:],
            sents_output[1][:, :, self.config.dim_sem:]
        ], 2)

        str_scores_ = get_structure(
            'doc', sents_str, max_doc_l, self.t_variables['mask_parser_1'],
            self.t_variables['mask_parser_2'])  #batch_l,  sent_l+1, sent_l
        str_scores = tf.matrix_transpose(str_scores_)  # soft parent
        sents_sem_root = tf.concat(
            [tf.tile(embeddings_root, [batch_l, 1, 1]), sents_sem], 1)
        sents_output_ = tf.matmul(str_scores, sents_sem_root)
        sents_output = LReLu(
            tf.tensordot(tf.concat([sents_sem, sents_output_], 2), w_comb,
                         [[2], [0]]) + b_comb)

        if (self.config.doc_attention == 'sum'):
            sents_output = sents_output * tf.expand_dims(mask_sents, 2)
            sents_output = tf.reduce_sum(sents_output, 1)
        elif (self.config.doc_attention == 'mean'):
            sents_output = sents_output * tf.expand_dims(mask_sents, 2)
            sents_output = tf.reduce_sum(sents_output, 1) / tf.expand_dims(
                tf.cast(doc_l, tf.float32), 1)
        elif (self.config.doc_attention == 'max'):
            sents_output = sents_output + tf.expand_dims(
                (mask_sents - 1) * 999, 2)
            sents_output = tf.reduce_max(sents_output, 1)

        final_output = MLP(sents_output, 'output',
                           self.t_variables['keep_prob'])
        self.final_output = tf.matmul(final_output, w_softmax) + b_softmax