Esempio n. 1
0
 def __feedforward(self, inputs, masks, model_dim=None, kernel_size=1, keep_prob=0.5, scope='feed-forward'):
     """Apply Point-wise feed forward layer
     """
     with tf.variable_scope(scope):
         if not model_dim: model_dim = inputs.get_shape().as_list()[-1]
         num_units = [4*model_dim, model_dim]
         outputs = feedforward(inputs, masks, num_units=num_units, kernel_size=kernel_size, scope=scope, reuse=None)
         outputs = tf.nn.dropout(outputs, keep_prob)
         return outputs
Esempio n. 2
0
    def __init__(self,
                 sequence_length,
                 num_classes,
                 pos_vocab_size,
                 pos_embedding_size,
                 text_embedding_size,
                 filter_sizes,
                 num_heads,
                 num_filters,
                 l2_reg_lambda=0.0):

        # Placeholders for input, output and dropout
        self.text_embedded_chars = tf.placeholder(
            tf.float32,
            shape=[None, sequence_length, 768],
            name='text_embedded_chars')
        # self.input_p1 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p1')
        # self.input_p2 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p2')
        self.input_pos = tf.placeholder(tf.int32,
                                        shape=[None, sequence_length],
                                        name='input_pos')
        self.input_y = tf.placeholder(tf.float32,
                                      shape=[None, num_classes],
                                      name='input_y')  #[20 19]
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name='dropout_keep_prob')
        self.emb_dropout_keep_prob = tf.placeholder(
            tf.float32, name='emb_dropout_keep_prob')

        initializer = tf.keras.initializers.glorot_normal

        # Embedding layer
        # with tf.device('/device:GPU:0'), tf.variable_scope("text-embedding"):
        #     # self.W_text = tf.Variable(tf.random_uniform([text_vocab_size, text_embedding_size], -0.25, 0.25), name="W_text")
        #     # self.text_embedded_chars = tf.nn.embedding_lookup(self.W_text, self.input_text) #[800 90 300]
        #     # self.text_embedded_chars = server_bert.get_sentence_embedding(self.input_text) #[800 90 768]
        #     # self.text_embedded_chars_trans = transformer.transformerencoder(self.text_embedded_chars)
        #     self.text_embedded_chars_change = tf.layers.dense(self.text_embedded_chars, units=300,activation=tf.nn.relu,use_bias=True, trainable=True) #[800 90 300]
        #     print("change:",self.text_embedded_chars_change.get_shape())# (?, 90, 300)
        #     self.text_embedded_chars_expanded = tf.expand_dims(self.text_embedded_chars_change, -1) #[800 90 300 1]
        #     print(self.text_embedded_chars_expanded.get_shape())

        with tf.variable_scope("pos-embedding"):
            self.W_pos = tf.get_variable("W_pos",
                                         [pos_vocab_size, pos_embedding_size],
                                         initializer=initializer())
            self.pos_embedded_chars = tf.nn.embedding_lookup(
                self.W_pos, self.input_pos)
            # self.p2_embedded_chars = tf.nn.embedding_lookup(self.W_pos, self.input_p2)
            self.pos_embedded_chars_expanded = tf.expand_dims(
                self.pos_embedded_chars, -1)  #[800 90 50 1]
            # self.p2_embedded_chars_expanded = tf.expand_dims(self.p2_embedded_chars, -1)

        # self.embedded_chars_expanded = tf.concat([self.text_embedded_chars_expanded,
        #                                           self.p1_embedded_chars_expanded,
        #                                           self.p2_embedded_chars_expanded], 2) #[800 90 400 1]

        _embedding_size = 768
        self.text_shape = tf.shape(self.text_embedded_chars)
        # self.text_expand_shape=tf.shape(self.text_embedded_chars_expanded)
        # self.pos_expand_shape=tf.shape(self.p1_embedded_chars_expanded)
        # self.embedd_shape=tf.shape(self.text_embedded_chars_change)
        # self.embedding_size_shape=tf.shape(_embedding_size)

        # Dropout for Word Embedding
        with tf.variable_scope('dropout-embeddings'):
            self.embedded_chars = tf.nn.dropout(self.text_embedded_chars,
                                                self.emb_dropout_keep_prob)
        # self.embedded_chars = tf.layers.dense(self.embedded_chars,units=300,activation=tf.nn.relu,kernel_initializer=initializer())
        # self.embedded_chars_expanded = tf.concat([self.embedded_chars, self.pos_embedded_chars], 2)
        # self-attention
        # with tf.variable_scope("self-attention"):
        #     self.self_attn_output, self.self_alphas = multihead_attention(self.embedded_chars, self.embedded_chars,
        #                                                            num_units=768, num_heads=num_heads)
        #     # print("attention shape:", self.self_attn.get_shape) #(?, 90 ,300)
        #     # self.fnn_output = fnn(self.self_attn_output)
        #     self.self_attn = tf.layers.dense(self.self_attn_output, units=300,
        #                                       activation=tf.nn.relu, use_bias=True,
        # #                                       trainable=True, kernel_initializer=initializer())  # [800 90 300]
        dim_model = 768
        dim_ff = 3072
        num_stack = 1
        ##transformer
        for i in range(num_stack):
            with tf.variable_scope("block-{}".format(i)):
                # Multi-head Attention (self attention)
                with tf.variable_scope("multihead-attention"):
                    self.mh = multihead_attention(query=self.embedded_chars,
                                                  key=self.embedded_chars,
                                                  value=self.embedded_chars,
                                                  dim_model=dim_model,
                                                  num_head=num_heads)
                    # Residual & Layer Normalization
                    self.mh = tf.contrib.layers.layer_norm(
                        self.embedded_chars + self.mh)

                # Position-wise Feed Forward
                with tf.variable_scope("position-wise-feed-forward"):
                    self.ff = feedforward(self.mh, dim_model, dim_ff)
                    # Residual & Layer Normalization
                    self.enc = tf.contrib.layers.layer_norm(self.mh + self.ff)
        # self.enc_expand = tf.expand_dims(self.enc, axis=-1)
        # self.smp = tf.nn.max_pool(value=self.enc_expand, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1],
        #                      padding='SAME')

        # self.self_attn = tf.layers.dense(self.embedded_chars, units=300,
        #                                  activation=tf.nn.relu, use_bias=True,
        #                                  trainable=True, kernel_initializer=initializer())  # [800 90 300]
        # self.enc_change = tf.layers.conv1d(inputs=self.enc, filters=300, kernel_size=1, activation=tf.nn.relu,
        #                                    kernel_initializer=initializer())
        # print("change:", self.enc.get_shape())  # (?, 90, 300)
        self.self_atten_change = tf.expand_dims(self.enc, -1)  #[800 90 300 1]

        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.variable_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                conv = tf.layers.conv2d(
                    self.self_atten_change,
                    num_filters, [filter_size, _embedding_size],
                    activation=tf.nn.relu,
                    padding="SAME",
                    strides=(1, _embedding_size),
                    name="conv")  # num_filter=128,filter_size=2,3,4,5
                print(conv.get_shape()
                      )  # (?,89,1, 128);(?88,1,128)(?87,1,128)(?86 1 128)
                # R = tf.squeeze(conv,axis=-2)
                # print(R.get_shape())

                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(conv,
                                        ksize=[1, sequence_length, 1, 1],
                                        strides=[1, sequence_length, 1, 1],
                                        padding='SAME',
                                        name="pool")
                print(pooled.get_shape())  # (?, 1, 1, 128)
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        # print(pooled_outputs.get_shape())
        print(np.array(pooled_outputs).shape)  #(4,)
        self.h_pool = tf.concat(pooled_outputs, 3)
        # print(self.h_pool.get_shape()) #(?,1,1,512)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        # print(self.h_pool_flat.get_shape())#(?,512)

        # Add dropout
        with tf.variable_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat,
                                        self.dropout_keep_prob)

        # Final scores and predictions
        with tf.variable_scope("output"):
            self.logits = tf.layers.dense(self.h_drop,
                                          num_classes,
                                          kernel_initializer=initializer())
            print(self.logits.get_shape())  #(?,19)
            self.predictions = tf.argmax(self.logits, 1, name="predictions")

        # Calculate mean cross-entropy loss
        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits_v2(
                logits=self.logits, labels=self.input_y)
            self.l2 = tf.add_n(
                [tf.nn.l2_loss(v) for v in tf.trainable_variables()])
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions,
                                           tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   tf.float32),
                                           name="accuracy")
    def __init__(self,
                 sequence_length,
                 num_classes,
                 pos_vocab_size,
                 pos_embedding_size,
                 text_embedding_size,
                 filter_sizes,
                 num_heads,
                 num_filters,
                 l2_reg_lambda=0.0):

        # Placeholders for input, output and dropout
        self.text_embedded_chars = tf.placeholder(
            tf.float32,
            shape=[None, sequence_length, 768],
            name='text_embedded_chars')
        # self.input_p1 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p1')
        # self.input_p2 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p2')
        self.e1_index = tf.placeholder(tf.int32, shape=[None], name="e1_start")
        self.e2_index = tf.placeholder(tf.int32, shape=[None], name="e1_end")
        # self.e1_embeded = tf.placeholder(tf.float32, shape=[None, 768], name="e1_embedding")
        # self.e2_embeded = tf.placeholder(tf.float32, shape=[None, 768], name="e2_embedding")
        self.input_pos = tf.placeholder(tf.int32,
                                        shape=[None, sequence_length],
                                        name='input_pos')
        self.input_y = tf.placeholder(tf.float32,
                                      shape=[None, num_classes],
                                      name='input_y')  #[20 19]
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name='dropout_keep_prob')
        self.emb_dropout_keep_prob = tf.placeholder(
            tf.float32, name='emb_dropout_keep_prob')

        initializer = tf.keras.initializers.glorot_normal

        # Embedding layer
        # with tf.device('/device:GPU:0'), tf.variable_scope("text-embedding"):
        #     # self.W_text = tf.Variable(tf.random_uniform([text_vocab_size, text_embedding_size], -0.25, 0.25), name="W_text")
        #     # self.text_embedded_chars = tf.nn.embedding_lookup(self.W_text, self.input_text) #[800 90 300]
        #     # self.text_embedded_chars = server_bert.get_sentence_embedding(self.input_text) #[800 90 768]
        #     # self.text_embedded_chars_trans = transformer.transformerencoder(self.text_embedded_chars)
        #     self.text_embedded_chars_change = tf.layers.dense(self.text_embedded_chars, units=300,activation=tf.nn.relu,use_bias=True, trainable=True) #[800 90 300]
        #     print("change:",self.text_embedded_chars_change.get_shape())# (?, 90, 300)
        #     self.text_embedded_chars_expanded = tf.expand_dims(self.text_embedded_chars_change, -1) #[800 90 300 1]
        #     print(self.text_embedded_chars_expanded.get_shape())

        with tf.variable_scope("pos-embedding"):
            self.W_pos = tf.get_variable("W_pos",
                                         [pos_vocab_size, pos_embedding_size],
                                         initializer=initializer())
            self.pos_embedded_chars = tf.nn.embedding_lookup(
                self.W_pos, self.input_pos)
            # self.p2_embedded_chars = tf.nn.embedding_lookup(self.W_pos, self.input_p2)
            self.pos_embedded_chars_expanded = tf.expand_dims(
                self.pos_embedded_chars, -1)  #[800 90 50 1]
            # self.p2_embedded_chars_expanded = tf.expand_dims(self.p2_embedded_chars, -1)
        # self.text_embedded_chars_re = tf.reshape(self.text_embedded_chars,[tf.shape(self.text_embedded_chars)[0]*tf.shape(self.text_embedded_chars)[1], 768])
        # print(self.text_embedded_chars_re.get_shape())
        # e1_embedded = tf.nn.embedding_lookup(self.text_embedded_chars_re,self.e1_index)
        # e2_embedded = tf.nn.embedding_lookup(self.text_embedded_chars_re, self.e2_index)
        # alpha = attention_2(self.text_embedded_chars, e1_embedded,e2_embedded)

        # self.text_embedded_chars_attention = tf.reduce_sum(tf.multiply(self.text_embedded_chars, tf.expand_dims(alpha, -1)), 1)
        # self.text_embedded_chars_attention = tf.multiply(self.text_embedded_chars, tf.expand_dims(alpha, -1))
        # print("attention_shape:", self.text_embedded_chars_attention.get_shape())
        # self.e1_e2_embeded_2 = tf.expand_dims(self.text_embedded_chars_attention, axis=1)
        # self.text_embedded_chars_2 = tf.concat([self.text_embedded_chars,self.text_embedded_chars_attention], axis=-1)
        # self.text_embedded_chars_2 = tf.add(self.text_embedded_chars, self.text_embedded_chars_attention)
        # self.text_embedded_chars_3 = tf.nn.tanh(self.text_embedded_chars_2)
        # print("e1_shape,",e1_embedded.get_shape())
        # self.embedded_chars_expanded = tf.concat([self.text_embedded_chars_expanded,
        #                                           self.p1_embedded_chars_expanded,
        #                                           self.p2_embedded_chars_expanded], 2) #[800 90 400 1]

        _embedding_size = 768
        self.text_shape = tf.shape(self.text_embedded_chars)
        # self.text_expand_shape=tf.shape(self.text_embedded_chars_expanded)
        # self.pos_expand_shape=tf.shape(self.p1_embedded_chars_expanded)
        # self.embedd_shape=tf.shape(self.text_embedded_chars_change)
        # self.embedding_size_shape=tf.shape(_embedding_size)
        # self.text_expand_shape=tf.shape(self.text_embedded_chars_expanded)
        # self.pos_expand_shape=tf.shape(self.p1_embedded_chars_expanded)
        # self.embedd_shape=tf.shape(self.text_embedded_chars_change)
        # self.embedding_size_shape=tf.shape(_embedding_size)
        # self.e1_embeded_dense = tf.layers.dense(self.e1_embeded, units=768, activation=tf.nn.tanh)
        # self.e2_embeded_dense = tf.layers.dense(self.e2_embeded, units=768, activation=tf.nn.tanh)
        # self.e1_embeded_dense = tf.nn.tanh(self.e1_embeded)
        # self.e2_embeded_dense = tf.nn.tanh(self.e2_embeded )
        # self.e1_embeded_2 = tf.expand_dims(self.e1_embeded, axis=1)
        # # print(self.e1_embeded.get_shape())
        # self.e2_embeded_2 = tf.expand_dims(self.e2_embeded, axis=1)

        # self.text_embedded_chars_2 = tf.concat([self.text_embedded_chars,self.e1_embeded_2, self.e2_embeded_2], axis=1)
        # print("embedded_char_dim",self.text_embedded_chars_2.get_shape())
        # e1_h = tf.reshape(tf.tile(self.e1_embeded, [1, sequence_length]), [-1, sequence_length, 768])
        # e2_h = tf.reshape(tf.tile(self.e2_embeded, [1, sequence_length]), [-1, sequence_length, 768])
        # self.text_embedded_chars1 = tf.add(self.text_embedded_chars,e1_h)
        # self.text_embedded_chars_2 = tf.add(self.text_embedded_chars1,e2_h)
        # self.text_embedded_chars_dense = tf.layers.dense(self.text_embedded_chars_2, units=768, activation=tf.nn.relu)
        # self.text_embedded_chars_dense = tf.layers.dense(tf.nn.relu(self.text_embedded_chars_2), units=768)
        # print(self.text_embedded_chars_2.get_shape())
        #entity-aware

        # Dropout for Word Embedding
        with tf.variable_scope('dropout-embeddings'):
            # self.embedded_chars_expanded = tf.concat([self.text_embedded_chars, self.pos_embedded_chars], 2)
            self.embedded_chars = tf.nn.dropout(self.text_embedded_chars,
                                                self.emb_dropout_keep_prob)
        # alpha = attention_2(self.embedded_chars, self.e1_embeded, self.e2_embeded)
        # self.embedded_chars = tf.multiply(self.embedded_chars_2, tf.expand_dims(alpha, -1))
        # self.embedded_chars= tf.contrib.layers.layer_norm(self.embedded_chars_3 + self.embedded_chars_2)
        # self.embedded_chars_expanded = tf.concat([self.embedded_chars, self.pos_embedded_chars], 2)
        # self-attention
        # with tf.variable_scope("self-attention"):
        #     self.self_attn_output, self.self_alphas = multihead_attention(self.embedded_chars, self.embedded_chars,
        #                                                            num_units=768, num_heads=num_heads)
        #     # print("attention shape:", self.self_attn.get_shape) #(?, 90 ,300)
        #     # self.fnn_output = fnn(self.self_attn_output)
        #     self.self_attn = tf.layers.dense(self.self_attn_output, units=300,
        #                                       activation=tf.nn.relu, use_bias=True,
        #                                       trainable=True, kernel_initializer=initializer())  # [800 90 300]
        dim_model = 768
        dim_ff = 3072
        num_stack = 1
        ##transformer
        for i in range(num_stack):
            with tf.variable_scope("block-{}".format(i)):
                # Multi-head Attention (self attention)
                with tf.variable_scope("multihead-attention"):
                    self.mh = multihead_attention(query=self.embedded_chars,
                                                  key=self.embedded_chars,
                                                  value=self.embedded_chars,
                                                  dim_model=dim_model,
                                                  num_head=num_heads)
                    # Residual & Layer Normalization
                    self.mh = tf.contrib.layers.layer_norm(
                        self.embedded_chars + self.mh)

                # Position-wise Feed Forward
                with tf.variable_scope("position-wise-feed-forward"):
                    self.ff = feedforward(self.mh, dim_model, dim_ff)
                    # Residual & Layer Normalization
                    self.enc = tf.contrib.layers.layer_norm(self.mh + self.ff)

        # alpha = attention(self.enc)
        # self.batch_size = tf.shape(self.enc)[0]
        # print(self.batch_size)
        # e1_embedd = tf.reduce_sum(self.enc[0][self.e1_start[0]:self.e1_end[0]], axis=0)
        # print(self.enc.shape[0].value)
        # print(tf.size(self.enc).eval())
        # for i in range(1, tf.shape(self.enc)[0].eval()):
        #     e1 = tf.reduce_sum(self.enc[i][self.e1_start[i]:self.e1_end[i] + 1], axis=0)
        #     # print("e1 shape",e1.shape)
        #     # e2 = np.mean(text_embedded_chars[i][train_be2_start[i]:train_be2_end[i] + 1], axis=0)
        #     e1_embedd = np.vstack((e1_embedd, e1))
        #     # e2_embedd = np.vstack((e2_embedd, e2))
        # print(e1_embedd.get_shape())
        # print(e1_embedd.shape)
        # e2_embedd = np.mean(text_embedded_chars[0][train_be2_start[0]:train_be2_end[0] + 1], axis=0)
        # for i in range(1, text_embedded_chars.shape[0]):
        #     e1 = np.mean(text_embedded_chars[i][train_be1_start[i]:train_be1_end[i] + 1], axis=0)
        #     # print("e1 shape",e1.shape)
        #     e2 = np.mean(text_embedded_chars[i][train_be2_start[i]:train_be2_end[i] + 1], axis=0)
        #     e1_embedd = np.vstack((e1_embedd, e1))
        #     e2_embedd = np.vstack((e2_embedd, e2))
        # self.enc_expand = tf.expand_dims(self.enc, axis=-1)
        # self.smp = tf.nn.max_pool(value=self.enc_expand, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1],
        #                      padding='SAME')

        # self.self_attn = tf.layers.dense(self.enc, units=300,
        #                                  activation=tf.nn.relu, use_bias=True,
        #                                  trainable=True, kernel_initializer=initializer())  # [800 90 300]
        # self.enc_change = tf.layers.conv1d(inputs=self.enc, filters=300, kernel_size=1, activation=tf.nn.relu,
        #                                    kernel_initializer=initializer())
        # print("change:", self.enc.get_shape())  # (?, 90, 300)
        self.self_atten_change = tf.expand_dims(self.enc, -1)  #[800 90 300 1]
        # i=0
        # n = tf.shape(self.enc)[0]
        # def cond(i,n):
        #     return i<n
        # def body(i,n):
        #     e1 =
        # tf.while_loop
        self.embedd_dense = tf.layers.dense(self.embedded_chars,
                                            units=300,
                                            activation=tf.nn.relu,
                                            use_bias=True,
                                            trainable=True,
                                            kernel_initializer=initializer())
        self.text_embedded_chars_re = tf.reshape(self.embedd_dense, [
            tf.shape(self.embedd_dense)[0] * tf.shape(self.embedd_dense)[1],
            300
        ])
        print(self.text_embedded_chars_re.get_shape())
        self.e1_embedded = tf.nn.embedding_lookup(self.text_embedded_chars_re,
                                                  self.e1_index)
        e2_embedded = tf.nn.embedding_lookup(self.text_embedded_chars_re,
                                             self.e2_index)
        e1_output, e2_output = attention_entity(self.embedd_dense,
                                                self.e1_embedded, e2_embedded)
        # e1_embedded_dense = tf.layers.dense(e1_embedded, units=128, use_bias=False, activation=tf.nn.relu,
        #                                     kernel_initializer=initializer())
        # e2_embedded_dense = tf.layers.dense(e2_embedded, units=128, use_bias=False, activation=tf.nn.relu,
        #                                     kernel_initializer=initializer())
        # print("e1_shape:",e1_embedded.get_shape())
        # alpha = attention_2(self.enc, self.e1_index, self.e2_index)
        # print("alpha:", alpha.get_shape())
        # self.enc_attention =  tf.reduce_sum(tf.multiply(self.enc, tf.expand_dims(alpha, -1)),axis=1)
        # print(self.enc_attention.get_shape())
        # # self.e1_e2_embeded_2 = tf.expand_dims(self.enc_attention, axis=1)
        # self.text_embedded_chars_2 = tf.add(self.enc, self.enc_attention)
        # # self.text_embedded_chars_2 = tf.concat([self.enc, self.enc_attention], -1)
        # self.self_atten_change = tf.expand_dims(self.text_embedded_chars_2, -1)
        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.variable_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                conv = tf.layers.conv2d(
                    self.self_atten_change,
                    num_filters, [filter_size, _embedding_size],
                    kernel_initializer=initializer(),
                    activation=tf.nn.relu,
                    padding="SAME",
                    strides=(1, _embedding_size),
                    name="conv")  # num_filter=128,filter_size=2,3,4,5

                print(conv.get_shape()
                      )  # (?,89,1, 128);(?88,1,128)(?87,1,128)(?86 1 128)
                # if filter_size==3:

                pooled = tf.nn.max_pool(conv,
                                        ksize=[1, sequence_length, 1, 1],
                                        strides=[1, sequence_length, 1, 1],
                                        padding='SAME',
                                        name="pool")
                print(pooled.get_shape())  # (?, 1, 1, 128)
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        # print(pooled_outputs.get_shape())
        print(np.array(pooled_outputs).shape)  #(4,)
        self.h_pool = tf.concat(pooled_outputs, 2)
        # print(self.h_pool.get_shape()) #(?,1,1,512)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        self.h_pool_flat_2 = tf.concat(
            [self.h_pool_flat, e1_output, e2_output], axis=-1)
        print(self.h_pool_flat.get_shape())  #(?,512)
        print(self.h_pool_flat_2.get_shape())
        # Add dropout
        with tf.variable_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat_2,
                                        self.dropout_keep_prob)
        print(self.h_drop.get_shape())
        # Final scores and predictions
        with tf.variable_scope("output"):
            self.logits = tf.layers.dense(self.h_drop,
                                          num_classes,
                                          kernel_initializer=initializer())
            print(self.logits.get_shape())  #(?,19)
            self.predictions = tf.argmax(self.logits, 1, name="predictions")

        # Calculate mean cross-entropy loss
        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits_v2(
                logits=self.logits, labels=self.input_y)
            self.l2 = tf.add_n(
                [tf.nn.l2_loss(v) for v in tf.trainable_variables()])
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions,
                                           tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   tf.float32),
                                           name="accuracy")
    def __init__(self, sequence_length, num_classes,
                 text_vocab_size, text_embedding_size, pos_vocab_size, pos_embedding_size,
                 filter_sizes, num_filters, l2_reg_lambda=0.0, use_elmo=False):

        # Placeholders for input, output and dropout
        self.input_text = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_text')
        self.input_x_text = tf.placeholder(tf.string, shape=[None,], name='input_x_text')
        self.input_p1 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p1')
        self.input_p2 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p2')
        self.input_y = tf.placeholder(tf.float32, shape=[None, num_classes], name='input_y')
        self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')

        initializer = tf.keras.initializers.glorot_normal


        # Embedding layer

        if use_elmo:
            # Contextual Embedding Layer
            with tf.variable_scope("elmo-embeddings"):
                elmo_model = hub.Module("/home/wangyan/relation_extraction/ELMo", trainable=True)
                self.text_embedded_chars = elmo_model(self.input_x_text, signature="default", as_dict=True)["elmo"]
                # print(list(self.input_x_text)[:2])
                print("embedding_shape:", self.text_embedded_chars.get_shape())
                # sequence_length =self.text_embedded_chars.shape[1].value
        else:
            with tf.device('/cpu:0'), tf.variable_scope("text-embedding"):
                self.W_text = tf.Variable(tf.random_uniform([text_vocab_size, text_embedding_size], -0.25, 0.25), name="W_text")
                self.text_embedded_chars = tf.nn.embedding_lookup(self.W_text, self.input_text) #[800 90 300]

        with tf.variable_scope('dropout-embeddings'):
            # self.embedded_chars_expanded = tf.concat([self.text_embedded_chars, self.pos_embedded_chars], 2)
            self.embedded_chars = tf.nn.dropout(self.text_embedded_chars, self.dropout_keep_prob)
        # self.embedded_chars = tf.layers.dense(self.embedded_chars, units=300,
        #                                  activation=tf.nn.relu, use_bias=True,
        #                                  trainable=True, kernel_initializer=initializer())
        dim_model = 100
        dim_ff = 400
        num_stack = 1
        num_heads = 12
        # self.embedded_chars = self.text_embedded_chars
        ##transformer
        for i in range(num_stack):
            with tf.variable_scope("block-{}".format(i)):
                # Multi-head Attention (self attention)
                with tf.variable_scope("multihead-attention"):
                    self.mh = multihead_attention(query=self.embedded_chars, key=self.embedded_chars,
                                                  value=self.embedded_chars,
                                                  dim_model=dim_model, num_head=num_heads)
                    # Residual & Layer Normalization
                    self.mh = tf.contrib.layers.layer_norm(self.embedded_chars + self.mh)

                # Position-wise Feed Forward
                with tf.variable_scope("position-wise-feed-forward"):
                    self.ff = feedforward(self.mh, dim_model, dim_ff)
                    # Residual & Layer Normalization
                    self.enc = tf.contrib.layers.layer_norm(self.mh + self.ff)
        # self.enc_change = tf.layers.conv1d(inputs=self.enc, filters=300, kernel_size=1, activation=tf.nn.relu)
        # self.self_attn = tf.layers.dense(self.enc, units=300,
        #                                  activation=tf.nn.relu, use_bias=True,
        #                                  trainable=True, kernel_initializer=initializer())  # [800 90 300]
        print("change:", self.enc.get_shape())  # (?, 90, 300)
        self.self_atten_change = tf.expand_dims(self.enc, -1)  # [800 90 300 1]dim_model = 1024

        # with tf.device('/cpu:0'), tf.variable_scope("position-embedding"):
        #     self.W_pos = tf.get_variable("W_pos", [pos_vocab_size, pos_embedding_size], initializer=initializer())
        #     self.p1_embedded_chars = tf.nn.embedding_lookup(self.W_pos, self.input_p1)
        #     self.p2_embedded_chars = tf.nn.embedding_lookup(self.W_pos, self.input_p2)
        #     self.p1_embedded_chars_expanded = tf.expand_dims(self.p1_embedded_chars, -1) #[800 90 50 1]
        #     self.p2_embedded_chars_expanded = tf.expand_dims(self.p2_embedded_chars, -1)

        # self.embedded_chars_expanded = tf.concat([self.text_embedded_chars_expanded,
        #                                           self.p1_embedded_chars_expanded,
        #                                           self.p2_embedded_chars_expanded], 2) #[800 90 400 1]
        _embedding_size = 100
        self.text_shape=tf.shape(self.text_embedded_chars)
        self.text_expand_shape=tf.shape(self.self_atten_change)
        # self.pos_expand_shape=tf.shape(self.p1_embedded_chars_expanded)
        # self.embedd_shape=tf.shape(self.embedded_chars_expanded)
        self.embedding_size_shape=tf.shape(_embedding_size)
        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.variable_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                conv = tf.layers.conv2d(self.self_atten_change, num_filters, [filter_size, _embedding_size],
                                        kernel_initializer=initializer(), activation=tf.nn.relu, padding="SAME",
                                        strides=(1, _embedding_size),
                                        name="conv")  # num_filter=128,filter_size=2,3,4,5
                print(conv.get_shape())  # (?,89,1, 128);(?88,1,128)(?87,1,128)(?86 1 128)
                # R = tf.squeeze(conv,axis=-2)
                # print(R.get_shape())

                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(conv, ksize=[1, sequence_length, 1, 1],
                                        strides=[1, sequence_length, 1, 1], padding='SAME', name="pool")
                print(pooled.get_shape())  # (?, 1, 1, 128)
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        print(np.array(pooled_outputs).shape)#(4,)
        self.h_pool = tf.concat(pooled_outputs, 3)
        print(self.h_pool.get_shape()) #(?,1,1,512)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        print(self.h_pool_flat.get_shape()) #(?,512)

        # Add dropout
        with tf.variable_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)

        # Final scores and predictions
        with tf.variable_scope("output"):
            self.logits = tf.layers.dense(self.h_drop, num_classes, kernel_initializer=initializer())
            self.predictions = tf.argmax(self.logits, 1, name="predictions")

        # Calculate mean cross-entropy loss
        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y)
            self.l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")
    def __init__(self,
                 sequence_length,
                 num_classes,
                 pos_vocab_size,
                 pos_embedding_size,
                 text_embedding_size,
                 filter_sizes,
                 num_heads,
                 num_filters,
                 l2_reg_lambda=0.0):

        # Placeholders for input, output and dropout
        self.text_embedded_chars = tf.placeholder(
            tf.float32,
            shape=[None, sequence_length, 768],
            name='text_embedded_chars')
        # self.input_p1 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p1')
        # self.input_p2 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p2')
        self.e1_start = tf.placeholder(tf.int32, shape=[None], name="e1_start")
        self.e2_start = tf.placeholder(tf.int32, shape=[None], name="e2_start")
        self.e1_end = tf.placeholder(tf.int32, shape=[None], name="e1_end")
        self.e2_end = tf.placeholder(tf.int32, shape=[None], name="e2_end")
        self.batch_size_len = tf.placeholder(tf.int32, name="batch_size_len")
        # self.e1_embeded = tf.placeholder(tf.float32, shape=[None, 768], name="e1_embedding")
        # self.e2_embeded = tf.placeholder(tf.float32, shape=[None, 768], name="e2_embedding")
        self.input_pos = tf.placeholder(tf.int32,
                                        shape=[None, sequence_length],
                                        name='input_pos')
        self.input_y = tf.placeholder(tf.float32,
                                      shape=[None, num_classes],
                                      name='input_y')  #[20 19]
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name='dropout_keep_prob')
        self.emb_dropout_keep_prob = tf.placeholder(
            tf.float32, name='emb_dropout_keep_prob')

        initializer = tf.keras.initializers.glorot_normal

        # Embedding layer
        # with tf.device('/device:GPU:0'), tf.variable_scope("text-embedding"):
        #     # self.W_text = tf.Variable(tf.random_uniform([text_vocab_size, text_embedding_size], -0.25, 0.25), name="W_text")
        #     # self.text_embedded_chars = tf.nn.embedding_lookup(self.W_text, self.input_text) #[800 90 300]
        #     # self.text_embedded_chars = server_bert.get_sentence_embedding(self.input_text) #[800 90 768]
        #     # self.text_embedded_chars_trans = transformer.transformerencoder(self.text_embedded_chars)
        #     self.text_embedded_chars_change = tf.layers.dense(self.text_embedded_chars, units=300,activation=tf.nn.relu,use_bias=True, trainable=True) #[800 90 300]
        #     print("change:",self.text_embedded_chars_change.get_shape())# (?, 90, 300)
        #     self.text_embedded_chars_expanded = tf.expand_dims(self.text_embedded_chars_change, -1) #[800 90 300 1]
        #     print(self.text_embedded_chars_expanded.get_shape())

        with tf.variable_scope("pos-embedding"):
            self.W_pos = tf.get_variable("W_pos",
                                         [pos_vocab_size, pos_embedding_size],
                                         initializer=initializer())
            self.pos_embedded_chars = tf.nn.embedding_lookup(
                self.W_pos, self.input_pos)
            # self.p2_embedded_chars = tf.nn.embedding_lookup(self.W_pos, self.input_p2)
            self.pos_embedded_chars_expanded = tf.expand_dims(
                self.pos_embedded_chars, -1)  #[800 90 50 1]
            # self.p2_embedded_chars_expanded = tf.expand_dims(self.p2_embedded_chars, -1)

        _embedding_size = 768

        # Dropout for Word Embedding
        with tf.variable_scope('dropout-embeddings'):
            # self.embedded_chars_expanded = tf.concat([self.text_embedded_chars, self.pos_embedded_chars], 2)
            self.embedded_chars = tf.nn.dropout(self.text_embedded_chars,
                                                self.emb_dropout_keep_prob)

        dim_model = 768
        dim_ff = 3072
        num_stack = 1
        ##transformer
        for i in range(num_stack):
            with tf.variable_scope("block-{}".format(i)):
                # Multi-head Attention (self attention)
                with tf.variable_scope("multihead-attention"):
                    self.mh = multihead_attention(query=self.embedded_chars,
                                                  key=self.embedded_chars,
                                                  value=self.embedded_chars,
                                                  dim_model=dim_model,
                                                  num_head=num_heads)
                    # Residual & Layer Normalization
                    self.mh = tf.contrib.layers.layer_norm(
                        self.embedded_chars + self.mh)

                # Position-wise Feed Forward
                with tf.variable_scope("position-wise-feed-forward"):
                    self.ff = feedforward(self.mh, dim_model, dim_ff)
                    # Residual & Layer Normalization
                    self.enc = tf.contrib.layers.layer_norm(self.mh + self.ff)

        print(self.enc.shape[0].value)
        print(self.batch_size_len)
        m = tf.constant(value=32, dtype=tf.int32)
        print(m)
        print(tf.shape(self.enc)[0])
        print(FLAGS.batch_size_len)
        print("e1_start:", self.e1_start.get_shape())

        def true_func():
            e1_embedd = tf.expand_dims(tf.reduce_mean(
                self.enc[0][self.e1_start[0]:self.e1_end[0] + 1], axis=0),
                                       axis=0)
            e2_embedd = tf.expand_dims(tf.reduce_mean(
                self.enc[0][self.e2_start[0]:self.e2_end[0] + 1], axis=0),
                                       axis=0)
            print("e1 shape", e1_embedd.shape)
            for i in range(1, 32):
                e1 = tf.expand_dims(tf.reduce_mean(
                    self.enc[i][self.e1_start[i]:self.e1_end[i] + 1], axis=0),
                                    axis=0)
                e2 = tf.expand_dims(tf.reduce_mean(
                    self.enc[i][self.e2_start[i]:self.e2_end[i] + 1], axis=0),
                                    axis=0)
                e1_embedd = tf.concat([e1_embedd, e1], axis=0)
                e2_embedd = tf.concat([e2_embedd, e2], axis=0)
            print("embed shape:", e1_embedd.shape)
            # e1_embedd = tf.nn.relu(e1_embedd)
            # e2_embedd = tf.nn.relu(e2_embedd)
            print("embed shape:", e1_embedd.shape)
            return e1_embedd, e2_embedd

        def false_func():
            e1_embedd = tf.expand_dims(tf.reduce_mean(
                self.enc[0][self.e1_start[0]:self.e1_end[0] + 1], axis=0),
                                       axis=0)
            e2_embedd = tf.expand_dims(tf.reduce_mean(
                self.enc[0][self.e2_start[0]:self.e2_end[0] + 1], axis=0),
                                       axis=0)
            print("e1 shape", e1_embedd.shape)
            for i in range(1, 29):
                e1 = tf.expand_dims(tf.reduce_mean(
                    self.enc[i][self.e1_start[i]:self.e1_end[i] + 1], axis=0),
                                    axis=0)
                # print(self.enc.shape[0].value)
                # print("e1 shape",e1.shape)
                # print("i:",FLAGS.batch_size_len)
                e2 = tf.expand_dims(tf.reduce_mean(
                    self.enc[i][self.e2_start[i]:self.e2_end[i] + 1], axis=0),
                                    axis=0)
                e1_embedd = tf.concat([e1_embedd, e1], axis=0)
                e2_embedd = tf.concat([e2_embedd, e2], axis=0)
            print("embed shape 1:", e1_embedd.shape)
            # e1_embedd = tf.nn.relu(e1_embedd)
            # e2_embedd = tf.nn.relu(e2_embedd)
            print("embed shape 1:", e1_embedd.shape)
            return e1_embedd, e2_embedd

        e1_embedd_2, e2_embedd_2 = tf.cond(pred=tf.equal(
            m, self.batch_size_len),
                                           true_fn=true_func,
                                           false_fn=false_func)
        print("embed shape 2:", e1_embedd_2.shape)
        # def extract_entity(x, e):
        #     e_idx = tf.concat([tf.expand_dims(tf.range(tf.shape(e)[0]), axis=-1), tf.expand_dims(e, axis=-1)], axis=-1)
        #     return tf.gather_nd(x, e_idx)
        #
        # # self.enc_dense = tf.layers.dense(self.enc, units=128, activation=tf.nn.relu, kernel_initializer=initializer())
        # e1_end = extract_entity(self.enc, self.e1_end)  # (batch, hidden)
        # e1_start = extract_entity(self.enc, self.e1_start)
        # print("e1_h", e1_end.get_shape())
        # e2_end = extract_entity(self.enc, self.e2_end)  # (batch, hidden)
        # e2_start = extract_entity(self.enc, self.e2_start)
        # e1_h_add = (e1_start+e1_end)/2.0
        # e2_h_add = (e2_start+e2_end)/2.0
        # alpha = attention_4(self.enc , e1_h, e2_h)
        # self.enc_attention = tf.reduce_sum(tf.multiply(self.enc, tf.expand_dims(alpha, -1)), axis=1)
        # self.enc_attention_h = tf.expand_dims(self.enc_attention,1)
        # e1_h = tf.expand_dims(e1_h,1)
        # e2_h = tf.expand_dims(e2_h,1)

        # print(e1_h.get_shape())
        # e1_end_h = tf.expand_dims(e1_end, 1)
        # e2_end_h = tf.expand_dims(e2_end,1)
        # e1_start_h = tf.expand_dims(e1_start, 1)
        # e2_start_h = tf.expand_dims(e2_start, 1)
        # print(e1_end_h.get_shape())
        # e1_h =  (e1_start_h + e1_end_h)/2.0
        # e2_h =  (e2_start_h + e2_end_h)/2.0
        # e1_h = tf.reshape(tf.tile(e1_h_add, [1, sequence_length]), [-1, sequence_length, 128])  # (batch, seq_len, hidden_size)
        # e2_h = tf.reshape(tf.tile(e2_h_add, [1, sequence_length]), [-1, sequence_length, 128])  # (batch, seq_len, hidden_size)
        # e_h = (e1_h + e2_h)/2.0
        e1_h = tf.expand_dims(e1_embedd_2, axis=1)
        e2_h = tf.expand_dims(e2_embedd_2, axis=1)
        input_e1 = tf.concat([self.enc, e1_h, e2_h], axis=1)
        print(input_e1.get_shape())
        self.self_atten_change = tf.expand_dims(input_e1, -1)  # [800 90 300 1]
        # self.self_atten_change = tf.expand_dims(self.enc,-1)
        self.enc_dense = tf.layers.dense(self.enc,
                                         units=128,
                                         activation=tf.nn.relu)

        def true_func_2():
            e1_embedd_dense = tf.expand_dims(tf.reduce_mean(
                self.enc_dense[0][self.e1_start[0]:self.e1_end[0] + 1],
                axis=0),
                                             axis=0)
            e2_embedd_dense = tf.expand_dims(tf.reduce_mean(
                self.enc_dense[0][self.e2_start[0]:self.e2_end[0] + 1],
                axis=0),
                                             axis=0)
            print("e1 shape", e1_embedd_dense.shape)
            for i in range(1, 32):
                e1 = tf.expand_dims(tf.reduce_mean(
                    self.enc_dense[i][self.e1_start[i]:self.e1_end[i] + 1],
                    axis=0),
                                    axis=0)
                e2 = tf.expand_dims(tf.reduce_mean(
                    self.enc_dense[i][self.e2_start[i]:self.e2_end[i] + 1],
                    axis=0),
                                    axis=0)
                e1_embedd_dense = tf.concat([e1_embedd_dense, e1], axis=0)
                e2_embedd_dense = tf.concat([e2_embedd_dense, e2], axis=0)
            print("embed shape:", e1_embedd_dense.shape)
            return e1_embedd_dense, e2_embedd_dense

        def false_func_2():
            e1_embedd_dense = tf.expand_dims(tf.reduce_mean(
                self.enc_dense[0][self.e1_start[0]:self.e1_end[0] + 1],
                axis=0),
                                             axis=0)
            e2_embedd_dense = tf.expand_dims(tf.reduce_mean(
                self.enc_dense[0][self.e2_start[0]:self.e2_end[0] + 1],
                axis=0),
                                             axis=0)
            print("e1 shape", e1_embedd_dense.shape)
            for i in range(1, 29):
                e1 = tf.expand_dims(tf.reduce_mean(
                    self.enc_dense[i][self.e1_start[i]:self.e1_end[i] + 1],
                    axis=0),
                                    axis=0)
                e2 = tf.expand_dims(tf.reduce_mean(
                    self.enc_dense[i][self.e2_start[i]:self.e2_end[i] + 1],
                    axis=0),
                                    axis=0)
                e1_embedd_dense = tf.concat([e1_embedd_dense, e1], axis=0)
                e2_embedd_dense = tf.concat([e2_embedd_dense, e2], axis=0)
            print("embed shape:", e1_embedd_dense.shape)

            return e1_embedd_dense, e2_embedd_dense

        e1_embedd_dense_2, e2_embedd_dense_2 = tf.cond(pred=tf.equal(
            m, self.batch_size_len),
                                                       true_fn=true_func_2,
                                                       false_fn=false_func_2)
        print("dense:", e1_embedd_dense_2.get_shape())
        alpha = attention_7(self.enc_dense, e1_embedd_dense_2,
                            e2_embedd_dense_2)
        self.enc_attention_1 = tf.reduce_sum(tf.multiply(
            self.enc_dense, tf.expand_dims(alpha, -1)),
                                             axis=1)

        # self.enc_attention_dense = tf.layers.dense(self.enc_attention_1, units=128,
        #                                            activation=tf.nn.relu)
        # Create a convolution + maxpool layer for each filter size
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.variable_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                conv = tf.layers.conv2d(
                    self.self_atten_change,
                    num_filters, [filter_size, _embedding_size],
                    activation=tf.nn.relu,
                    padding="SAME",
                    strides=(1, _embedding_size),
                    name="conv")  # num_filter=128,filter_size=2,3,4,5
                print(conv.get_shape()
                      )  # (?,89,1, 128);(?88,1,128)(?87,1,128)(?86 1 128)

                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(conv,
                                        ksize=[1, sequence_length + 2, 1, 1],
                                        strides=[1, sequence_length + 2, 1, 1],
                                        padding='SAME',
                                        name="pool")
                print(pooled.get_shape())  # (?, 1, 1, 128)
                pooled_outputs.append(pooled)
        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        # print(pooled_outputs.get_shape())
        print(np.array(pooled_outputs).shape)  #(4,)
        self.h_pool = tf.concat(pooled_outputs, 3)
        # print(self.h_pool.get_shape()) #(?,1,1,512)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        self.h_pool_flat_2 = tf.concat(
            [self.h_pool_flat, self.enc_attention_1], axis=-1)
        print(self.h_pool_flat.get_shape())  #(?,512)
        # print(self.h_pool_flat_2.get_shape())
        # Add dropout
        with tf.variable_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat_2,
                                        self.dropout_keep_prob)
        print(self.h_drop.get_shape())
        # Final scores and predictions
        with tf.variable_scope("output"):
            self.logits = tf.layers.dense(self.h_drop,
                                          num_classes,
                                          kernel_initializer=initializer())
            print(self.logits.get_shape())  #(?,19)
            self.predictions = tf.argmax(self.logits, 1, name="predictions")

        # Calculate mean cross-entropy loss
        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits_v2(
                logits=self.logits, labels=self.input_y)
            self.l2 = tf.add_n(
                [tf.nn.l2_loss(v) for v in tf.trainable_variables()])
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2

        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions,
                                           tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                                   tf.float32),
                                           name="accuracy")
    def __init__(self, sequence_length, num_classes,
                 vocab_size, embedding_size, pos_vocab_size, pos_embedding_size,
                 hidden_size, num_heads, attention_size,
                 use_elmo=False, l2_reg_lambda=0.0):
        # Placeholders for input, output and dropout
        self.input_x = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_x')
        self.input_y = tf.placeholder(tf.float32, shape=[None, num_classes], name='input_y')
        self.input_text = tf.placeholder(tf.string, shape=[None, ], name='input_text')
        self.input_p1 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p1')
        self.input_p2 = tf.placeholder(tf.int32, shape=[None, sequence_length], name='input_p2')
        self.emb_dropout_keep_prob = tf.placeholder(tf.float32, name='emb_dropout_keep_prob')
        self.rnn_dropout_keep_prob = tf.placeholder(tf.float32, name='rnn_dropout_keep_prob')
        self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
        # print("e1 shape:", self.input_e1.get_shape()) #(?,)

        if use_elmo:
            # Contextual Embedding Layer
            with tf.variable_scope("elmo-embeddings"):
                elmo_model = hub.Module("/home/wangyan/relation_extraction/ELMo", trainable=True)
                self.embedded_chars = elmo_model(self.input_text, signature="default", as_dict=True)["elmo"]
                print(self.embedded_chars.get_shape())
        else:
            # Word Embedding Layer
            with tf.device('/cpu:0'), tf.variable_scope("word-embeddings"):
                self.W_text = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -0.25, 0.25), name="W_text")
                self.embedded_chars = tf.nn.embedding_lookup(self.W_text, self.input_x)
                # print("shape:",self.embedded_chars.get_shape()) #(?, 90, 300)

        # Position Embedding Layer
        # with tf.device('/cpu:0'), tf.variable_scope("position-embeddings"):
        #     self.W_pos = tf.get_variable("W_pos", [pos_vocab_size, pos_embedding_size], initializer=initializer())
        #     print("embedding_char:", self.embedded_chars.get_shape()[1])
        #     self.p1 = tf.nn.embedding_lookup(self.W_pos, self.input_p1)[:, :tf.shape(self.embedded_chars)[1]]
        #     self.p2 = tf.nn.embedding_lookup(self.W_pos, self.input_p2)[:, :tf.shape(self.embedded_chars)[1]]
        #     # print("p shape:", self.p1.get_shape()) # (?, ? 50)

        # Dropout for Word Embedding
        with tf.variable_scope('dropout-embeddings'):
            self.embedded_chars = tf.nn.dropout(self.embedded_chars,  self.emb_dropout_keep_prob)

        dim_model = 300
        dim_ff = 1200
        num_stack = 1
        num_heads = 12
        # self.embedded_chars = self.text_embedded_chars
        ##transformer
        for i in range(num_stack):
            with tf.variable_scope("block-{}".format(i)):
                # Multi-head Attention (self attention)
                with tf.variable_scope("multihead-attention"):
                    self.mh = multihead_attention(query=self.embedded_chars, key=self.embedded_chars,
                                                  value=self.embedded_chars,
                                                  dim_model=dim_model, num_head=num_heads)
                    # Residual & Layer Normalization
                    self.mh = tf.contrib.layers.layer_norm(self.embedded_chars + self.mh)

                # Position-wise Feed Forward
                with tf.variable_scope("position-wise-feed-forward"):
                    self.ff = feedforward(self.mh, dim_model, dim_ff)
                    # Residual & Layer Normalization
                    self.enc = tf.contrib.layers.layer_norm(self.mh + self.ff)
        # self.enc_change = tf.layers.conv1d(inputs=self.enc, filters=300, kernel_size=1, activation=tf.nn.relu)
        # self.self_attn = tf.layers.dense(self.enc, units=300,
        #                                  activation=tf.nn.relu, use_bias=True,
        #                                  trainable=True, kernel_initializer=initializer())  # [800 90 300]
        print("change:", self.enc.get_shape())  # (?, 90, 300)
        # self.self_atten_change = tf.expand_dims(self.enc, -1)  # [800 90 300 1]dim_model = 1024
        # print("attention shape:", self.self_attn.get_shape) #(?, 90 ,300)
        # Bidirectional LSTM
        with tf.variable_scope("bi-lstm"):
            _fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size, initializer=initializer())
            fw_cell = tf.nn.rnn_cell.DropoutWrapper(_fw_cell, self.rnn_dropout_keep_prob)
            _bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size, initializer=initializer())
            bw_cell = tf.nn.rnn_cell.DropoutWrapper(_bw_cell, self.rnn_dropout_keep_prob)
            print("sequence_len:",self._length(self.input_x).get_shape())
            self.rnn_outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell,
                                                                  cell_bw=bw_cell,
                                                                  inputs=self.enc,
                                                                  sequence_length=self._length(self.input_x),
                                                                  dtype=tf.float32)

            self.rnn_outputs = tf.concat(self.rnn_outputs, axis=-1)
            # print("rnn_output_shape:", self.rnn_outputs.get_shape()) #(? 90 600)

        # Attention
        with tf.variable_scope('attention'):
            self.attn, self.alphas = attention_output(self.rnn_outputs)

        # print("attn:", self.attn.get_shape()) #(? 600)

        # Dropout
        with tf.variable_scope('dropout'):
            self.h_drop = tf.nn.dropout(self.attn, self.dropout_keep_prob)

        # Fully connected layer
        with tf.variable_scope('output'):
            self.logits = tf.layers.dense(self.h_drop, num_classes, kernel_initializer=initializer())
            print("logit shape:", self.attn.get_shape()) #(? ,600)
            self.predictions = tf.argmax(self.logits, 1, name="predictions")
            print("predit shape:", self.predictions.get_shape()) #(?,)

        # Calculate mean cross-entropy loss
        with tf.variable_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y)
            self.l2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2

        # Accuracy
        with tf.variable_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")