def build_graph(self):
        with self._graph.as_default():
            if self._conf['rand_seed'] is not None:
                rand_seed = self._conf['rand_seed']
                tf.set_random_seed(rand_seed)
                print('set tf random seed: %s' % self._conf['rand_seed'])

            #word embedding
            if self._word_embedding_init is not None:
                word_embedding_initializer = tf.constant_initializer(
                    self._word_embedding_init)
            else:
                word_embedding_initializer = tf.random_normal_initializer(
                    stddev=0.1)

            self._word_embedding = tf.get_variable(
                name='word_embedding',
                shape=[self._conf['vocab_size'] + 1, self._conf['emb_size']],
                dtype=tf.float32,
                initializer=word_embedding_initializer)

            #define placehloders
            #config max_turn_history_num
            self.turns_history = tf.placeholder(
                tf.int32,
                shape=[
                    self._conf["batch_size"],
                    self._conf["max_turn_history_num"],
                    self._conf["max_turn_len"]
                ])

            self.turns = tf.placeholder(tf.int32,
                                        shape=[
                                            self._conf["batch_size"],
                                            self._conf["max_turn_num"],
                                            self._conf["max_turn_len"]
                                        ])

            self.tt_turns_len = tf.placeholder(
                tf.int32, shape=[self._conf["batch_size"]])

            self.every_turn_len = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_num"]])

            self.response = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_len"]])

            self.response_len = tf.placeholder(
                tf.int32, shape=[self._conf["batch_size"]])

            self.label = tf.placeholder(tf.float32,
                                        shape=[self._conf["batch_size"]])

            #define operations
            #response part
            Hr = tf.nn.embedding_lookup(self._word_embedding, self.response)
            turns_history_embedding = tf.nn.embedding_lookup(
                self._word_embedding, self.turns_history)

            if self._conf['is_positional'] and self._conf['stack_num'] > 0:
                with tf.variable_scope('positional'):
                    Hr = op.positional_encoding_vector(Hr, max_timescale=10)
            Hr_stack = [Hr]

            _batch_size, _turn_nums, _turn_words, _emb_size = turns_history_embedding.get_shape(
            ).as_list()
            turns_history_embedding = tf.reshape(turns_history_embedding,
                                                 [-1, _turn_words, _emb_size])

            for index in range(self._conf['stack_num']):
                turns_history_embedding, _ = self._multihead(
                    turns_history_embedding, turns_history_embedding,
                    turns_history_embedding)

            turns_history_embedding = tf.reshape(
                turns_history_embedding,
                [_batch_size, _turn_nums, _turn_words, _emb_size])

            for index in range(self._conf['stack_num']):
                with tf.variable_scope('self_stack_' + str(index)):
                    Hr = layers.block(Hr,
                                      Hr,
                                      Hr,
                                      Q_lengths=self.response_len,
                                      K_lengths=self.response_len)
                    Hr_stack.append(Hr)

            with tf.variable_scope('respone_extraction_history'):
                turn_important_inf = []
                #需要增加一个全链接层
                for _t in tf.split(turns_history_embedding,
                                   self._conf['max_turn_history_num'], 1):
                    _t = tf.squeeze(_t)
                    #_match_result = layers.attention(Hr_stack[-1], _t,  _t, self.response_len, self.response_len)
                    _match_result = layers.attention(
                        self._dense1(Hr_stack[-1]), _t, _t, self.response_len,
                        self.response_len)
                    turn_important_inf.append(tf.expand_dims(_match_result, 1))

            best_turn_match = tf.concat(turn_important_inf, 1)
            with tf.variable_scope('response_extraciton_best_information'):
                #best_information,_ = self._multihead(Hr_stack[-1], best_turn_match, best_turn_match)
                best_information, _ = self._multihead(
                    self._dense2(Hr_stack[-1]), best_turn_match,
                    best_turn_match)
                best_information = layers.FFN(best_information)

            #context part
            #a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len]
            list_turn_t = tf.unstack(self.turns, axis=1)
            list_turn_length = tf.unstack(self.every_turn_len, axis=1)

            sim_turns = []
            #for every turn_t calculate matching vector
            for turn_t, t_turn_length in zip(list_turn_t, list_turn_length):
                Hu = tf.nn.embedding_lookup(
                    self._word_embedding,
                    turn_t)  #[batch, max_turn_len, emb_size]

                if self._conf['is_positional'] and self._conf['stack_num'] > 0:
                    with tf.variable_scope('positional', reuse=True):
                        Hu = op.positional_encoding_vector(Hu,
                                                           max_timescale=10)
                Hu_stack = [Hu]

                for index in range(self._conf['stack_num']):

                    with tf.variable_scope('self_stack_' + str(index),
                                           reuse=True):
                        Hu = layers.block(Hu,
                                          Hu,
                                          Hu,
                                          Q_lengths=t_turn_length,
                                          K_lengths=t_turn_length)

                        Hu_stack.append(Hu)

                r_a_t_stack = []
                t_a_r_stack = []
                for index in range(self._conf['stack_num'] + 1):

                    with tf.variable_scope('t_attend_r_' + str(index)):
                        try:
                            t_a_r = layers.block(tf.add(
                                Hu_stack[index], best_information),
                                                 Hr_stack[index],
                                                 Hr_stack[index],
                                                 Q_lengths=t_turn_length,
                                                 K_lengths=self.response_len)
                        except ValueError:
                            tf.get_variable_scope().reuse_variables()
                            t_a_r = layers.block(tf.add(
                                Hu_stack[index], best_information),
                                                 Hr_stack[index],
                                                 Hr_stack[index],
                                                 Q_lengths=t_turn_length,
                                                 K_lengths=self.response_len)

                    with tf.variable_scope('r_attend_t_' + str(index)):
                        try:
                            r_a_t = layers.block(
                                Hr_stack[index],
                                tf.add(Hu_stack[index], best_information),
                                tf.add(Hu_stack[index], best_information),
                                Q_lengths=self.response_len,
                                K_lengths=t_turn_length)
                        except ValueError:
                            tf.get_variable_scope().reuse_variables()
                            r_a_t = layers.block(
                                Hr_stack[index],
                                tf.add(Hu_stack[index], best_information),
                                tf.add(Hu_stack[index], best_information),
                                Q_lengths=self.response_len,
                                K_lengths=t_turn_length)

                    t_a_r_stack.append(t_a_r)
                    r_a_t_stack.append(r_a_t)

                t_a_r_stack.extend(Hu_stack)
                r_a_t_stack.extend(Hr_stack)

                t_a_r = tf.stack(t_a_r_stack, axis=-1)
                r_a_t = tf.stack(r_a_t_stack, axis=-1)

                #calculate similarity matrix
                with tf.variable_scope('similarity'):
                    # sim shape [batch, max_turn_len, max_turn_len, 2*stack_num+1]
                    # divide sqrt(200) to prevent gradient explosion
                    sim = tf.einsum('biks,bjks->bijs', t_a_r,
                                    r_a_t) / tf.sqrt(200.0)

                sim_turns.append(sim)

            #cnn and aggregation
            sim = tf.stack(sim_turns, axis=1)
            print('sim shape: %s' % sim.shape)
            with tf.variable_scope('cnn_aggregation'):
                final_info = layers.CNN_3d(sim, 32, 16)
                #final_info_dim = final_info.get_shape().as_list()[-1]
                #for douban
                #final_info = layers.CNN_3d(sim, 16, 16)
                #                 _x = self._conv1d(best_information)
                #                 _x = self._pool1d(_x)
                #final_info = tf.concat([final_info,best_information],-1)

            #loss and train
            with tf.variable_scope('loss'):
                self.loss, self.logits = layers.loss(final_info, self.label)

                self.global_step = tf.Variable(0, trainable=False)
                initial_learning_rate = self._conf['learning_rate']
                self.learning_rate = tf.train.exponential_decay(
                    initial_learning_rate,
                    global_step=self.global_step,
                    decay_steps=400,
                    decay_rate=0.9,
                    staircase=True)

                Optimizer = tf.train.AdamOptimizer(self.learning_rate)
                self.optimizer = Optimizer.minimize(
                    self.loss, global_step=self.global_step)

                self.init = tf.global_variables_initializer()
                self.saver = tf.train.Saver(
                    max_to_keep=self._conf["max_to_keep"])
                self.all_variables = tf.global_variables()
                self.all_operations = self._graph.get_operations()
                self.grads_and_vars = Optimizer.compute_gradients(self.loss)

                for grad, var in self.grads_and_vars:
                    if grad is None:
                        print(var)

                self.capped_gvs = [(tf.clip_by_value(grad, -1, 1), var)
                                   for grad, var in self.grads_and_vars]
                self.g_updates = Optimizer.apply_gradients(
                    self.capped_gvs, global_step=self.global_step)

        return self._graph
Exemplo n.º 2
0
    def build_graph(self):
        with self._graph.as_default():
            rand_seed = self._conf['rand_seed']
            tf.set_random_seed(rand_seed)

            #word embedding
            if self._word_embedding_init is not None:
                word_embedding_initializer = tf.constant_initializer(
                    self._word_embedding_init)
            else:
                word_embedding_initializer = tf.random_normal_initializer(
                    stddev=0.1)

            self._word_embedding = tf.get_variable(
                name='word_embedding',
                shape=[self._conf['vocab_size'] + 1, self._conf['emb_size']],
                dtype=tf.float32,
                initializer=word_embedding_initializer)

            #define placehloders
            self.turns = tf.placeholder(tf.int32,
                                        shape=[
                                            self._conf["batch_size"],
                                            self._conf["max_turn_num"],
                                            self._conf["max_turn_len"]
                                        ])

            self.tt_turns_len = tf.placeholder(
                tf.int32, shape=[self._conf["batch_size"]])

            self.every_turn_len = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_num"]])

            self.response = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_len"]])

            self.response_len = tf.placeholder(
                tf.int32, shape=[self._conf["batch_size"]])

            self.label = tf.placeholder(tf.float32,
                                        shape=[self._conf["batch_size"]])

            #define operations
            #response part
            Hr = tf.nn.embedding_lookup(self._word_embedding, self.response)

            if self._conf['is_positional'] and self._conf['stack_num'] > 0:
                with tf.variable_scope('positional'):
                    Hr = op.positional_encoding_vector(Hr, max_timescale=10)

            for index in range(self._conf['stack_num']):
                with tf.variable_scope('self_stack_' + str(index)):
                    Hr = layers.block(Hr,
                                      Hr,
                                      Hr,
                                      Q_lengths=self.response_len,
                                      K_lengths=self.response_len)

            #context part
            #a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len]
            list_turn_t = tf.unstack(self.turns, axis=1)
            list_turn_length = tf.unstack(self.every_turn_len, axis=1)

            sim_turns = []
            #for every turn_t calculate matching vector
            for turn_t, t_turn_length in zip(list_turn_t, list_turn_length):
                Hu = tf.nn.embedding_lookup(
                    self._word_embedding,
                    turn_t)  #[batch, max_turn_len, emb_size]

                if self._conf['is_positional'] and self._conf['stack_num'] > 0:
                    with tf.variable_scope('positional', reuse=True):
                        Hu = op.positional_encoding_vector(Hu,
                                                           max_timescale=10)

                for index in range(self._conf['stack_num']):

                    with tf.variable_scope('self_stack_' + str(index),
                                           reuse=True):
                        Hu = layers.block(Hu,
                                          Hu,
                                          Hu,
                                          Q_lengths=t_turn_length,
                                          K_lengths=t_turn_length)

                with tf.variable_scope('u_attentd_r_' + str(index)):
                    try:
                        u_a_r = layers.block(Hu,
                                             Hr,
                                             Hr,
                                             Q_lengths=t_turn_length,
                                             K_lengths=self.response_len)
                    except ValueError:
                        tf.get_variable_scope().reuse_variables()
                        u_a_r = layers.block(Hu,
                                             Hr,
                                             Hr,
                                             Q_lengths=t_turn_length,
                                             K_lengths=self.response_len)

                with tf.variable_scope('r_attend_u_' + str(index)):
                    try:
                        r_a_u = layers.block(Hr,
                                             Hu,
                                             Hu,
                                             Q_lengths=self.response_len,
                                             K_lengths=t_turn_length)
                    except ValueError:
                        tf.get_variable_scope().reuse_variables()
                        r_a_u = layers.block(Hr,
                                             Hu,
                                             Hu,
                                             Q_lengths=self.response_len,
                                             K_lengths=t_turn_length)

                u_a_r = tf.stack([u_a_r, Hu], axis=-1)
                r_a_u = tf.stack([r_a_u, Hr], axis=-1)

                #calculate similarity matrix
                with tf.variable_scope('similarity'):
                    # sim shape [batch, max_turn_len, max_turn_len, 2*stack_num+1]
                    # divide sqrt(200) to prevent gradient explosion
                    sim = tf.einsum('biks,bjks->bijs', r_a_u,
                                    u_a_r) / tf.sqrt(200.0)

                sim_turns.append(sim)

            #cnn and aggregation
            sim = tf.stack(sim_turns, axis=1)
            print('sim shape: %s' % sim.shape)
            with tf.variable_scope('cnn_aggregation'):
                final_info = layers.CNN_3d(sim, 32, 16)
                #for douban
                #final_info = layers.CNN_3d(sim, 16, 16)

            #loss and train
            with tf.variable_scope('loss'):
                self.loss, self.logits = layers.loss(final_info, self.label)

                self.global_step = tf.Variable(0, trainable=False)
                initial_learning_rate = self._conf['learning_rate']
                self.learning_rate = tf.train.exponential_decay(
                    initial_learning_rate,
                    global_step=self.global_step,
                    decay_steps=400,
                    decay_rate=0.9,
                    staircase=True)

                Optimizer = tf.train.AdamOptimizer(self.learning_rate)
                self.optimizer = Optimizer.minimize(self.loss)

                self.init = tf.global_variables_initializer()
                self.saver = tf.train.Saver(
                    max_to_keep=self._conf["max_to_keep"])
                self.all_variables = tf.global_variables()
                self.all_operations = self._graph.get_operations()
                self.grads_and_vars = Optimizer.compute_gradients(self.loss)

                for grad, var in self.grads_and_vars:
                    if grad is None:
                        print var

                self.capped_gvs = [(tf.clip_by_value(grad, -1, 1), var)
                                   for grad, var in self.grads_and_vars]
                self.g_updates = Optimizer.apply_gradients(
                    self.capped_gvs, global_step=self.global_step)

        return self._graph
    def build_graph(self):
        with self._graph.as_default():
            if self._conf['rand_seed'] is not None:
                rand_seed = self._conf['rand_seed']
                tf.set_random_seed(rand_seed)
                print('set tf random seed: %s' % self._conf['rand_seed'])

            # word embedding
            if self._word_embedding_init is not None:
                word_embedding_initializer = tf.constant_initializer(
                    self._word_embedding_init)
            else:
                word_embedding_initializer = tf.random_normal_initializer(
                    stddev=0.1)

            self._word_embedding = tf.get_variable(
                name='word_embedding',
                shape=[self._conf['vocab_size'] + 1, self._conf['emb_size']],
                dtype=tf.float32,
                initializer=word_embedding_initializer)

            # define placehloders
            self.turns = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_num"],
                       self._conf["max_turn_len"]])

            self.tt_turns_len = tf.placeholder(  # turn_num
                tf.int32,
                shape=[self._conf["batch_size"]])

            self.every_turn_len = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_num"]])

            self.turns_intent = tf.placeholder(
                tf.float32,
                shape=[self._conf["batch_size"], self._conf["max_turn_num"],
                       self._conf["intent_size"]])

            self.response = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"], self._conf["max_turn_len"]])

            self.response_len = tf.placeholder(
                tf.int32,
                shape=[self._conf["batch_size"]])

            self.response_intent = tf.placeholder(
                tf.float32,
                shape=[self._conf["batch_size"], self._conf["intent_size"]])

            self.label = tf.placeholder(
                tf.float32,
                shape=[self._conf["batch_size"]])

            # define operations
            # response part
            Hr = tf.nn.embedding_lookup(self._word_embedding, self.response)
            # [batch_size, max_turn_len, embed_size]

            # print('[after embedding_lookup] Hr shape: %s' % Hr.shape)

            if self._conf['is_positional'] and self._conf['stack_num'] > 0:
                with tf.variable_scope('positional'):
                    Hr = op.positional_encoding_vector(Hr, max_timescale=10)
            Hr_stack = [Hr]  # 1st element of Hr_stack is the orginal embedding
            # lyang comments: self attention
            for index in range(self._conf['stack_num']):
                # print('[self attention for response] stack index: %d ' % index)
                with tf.variable_scope('self_stack_' + str(index)):
                    # [batch, max_turn_len, emb_size]
                    Hr = layers.block(  # attentive module
                        Hr, Hr, Hr,
                        Q_lengths=self.response_len,
                        K_lengths=self.response_len)
                    # print('[after layers.block] Hr shape: %s' % Hr.shape)
                    # Hr is still [batch_size, max_turn_len, embed_size]
                    Hr_stack.append(Hr)

            # print('[after self attention of response] len(Hr_stack)',
            #       len(Hr_stack))  # 1+stack_num
            # context part
            # a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len]
            list_turn_t = tf.unstack(self.turns, axis=1)
            list_turn_length = tf.unstack(self.every_turn_len, axis=1)
            list_turn_intent = tf.unstack(self.turns_intent, axis=1)

            sim_turns = []
            attention_turns = [] # intent based attention on each turn
            # for every turn_t calculate matching vector
            turn_index = 0
            for turn_t, t_turn_length, t_intent in zip(list_turn_t, list_turn_length, list_turn_intent):
                print('current turn_index : ', turn_index)
                turn_index += 1
                Hu = tf.nn.embedding_lookup(self._word_embedding,
                                            turn_t)  # [batch, max_turn_len, emb_size]
                # print('[after embedding_lookup] Hu shape: %s' % Hu.shape)

                if self._conf['is_positional'] and self._conf['stack_num'] > 0:
                    with tf.variable_scope('positional', reuse=True):
                        Hu = op.positional_encoding_vector(Hu,
                                                           max_timescale=10)
                Hu_stack = [Hu]  # 1st element of Hu_stack is the orginal embedding

                # lyang comments: self attention
                for index in range(self._conf['stack_num']):
                    # print('[self attention for context turn] stack index: %d ' % index)
                    with tf.variable_scope('self_stack_' + str(index),
                                           reuse=True):
                        # [batch, max_turn_len, emb_size]
                        Hu = layers.block(  # attentive module
                            Hu, Hu, Hu,
                            Q_lengths=t_turn_length, K_lengths=t_turn_length)
                        # print('[after layers.block] Hu shape: %s' % Hu.shape)
                        Hu_stack.append(Hu)
                # print('[after self attention of context turn] len(Hu_stack)',
                #       len(Hu_stack))  # 1+stack_num

                # lyang comments: cross attention
                # print('[cross attention ...]')
                r_a_t_stack = []
                t_a_r_stack = []
                # cross attention
                for index in range(self._conf['stack_num'] + 1):
                    # print('[cross attention] stack index = ', index)
                    with tf.variable_scope('t_attend_r_' + str(index)):
                        try:
                            # [batch, max_turn_len, emb_size]
                            t_a_r = layers.block(  # attentive module
                                Hu_stack[index], Hr_stack[index],
                                Hr_stack[index],
                                Q_lengths=t_turn_length,
                                K_lengths=self.response_len)
                        except ValueError:
                            tf.get_variable_scope().reuse_variables()
                            t_a_r = layers.block(
                                # [batch, max_turn_len, emb_size]
                                Hu_stack[index], Hr_stack[index],
                                Hr_stack[index],
                                Q_lengths=t_turn_length,
                                K_lengths=self.response_len)
                        # print('[cross attention t_attend_r_] stack index: %d, t_a_r.shape: %s' % (
                        #         index, t_a_r.shape))

                    with tf.variable_scope('r_attend_t_' + str(index)):
                        try:
                            # [batch, max_turn_len, emb_size]
                            r_a_t = layers.block(  # attentive module
                                Hr_stack[index], Hu_stack[index],
                                Hu_stack[index],
                                Q_lengths=self.response_len,
                                K_lengths=t_turn_length)
                        except ValueError:
                            tf.get_variable_scope().reuse_variables()
                            r_a_t = layers.block(
                                Hr_stack[index], Hu_stack[index],
                                Hu_stack[index],
                                Q_lengths=self.response_len,
                                K_lengths=t_turn_length)
                        # print('[cross attention r_a_t_] stack index: %d, r_a_t.shape: %s' % (
                        #         index, r_a_t.shape))

                    t_a_r_stack.append(t_a_r)
                    r_a_t_stack.append(r_a_t)
                    # print('[cross attention] len(t_a_r_stack):', len(t_a_r_stack))
                    # print('[cross attention] len(r_a_t_stack):', len(r_a_t_stack))

                # print('[before extend] len(t_a_r_stack):', len(t_a_r_stack))
                # print('[before extend] len(r_a_t_stack):', len(r_a_t_stack))
                # lyang comments: 3D aggregation
                t_a_r_stack.extend(
                    Hu_stack)  # half from self-attention; half from cross-attention
                r_a_t_stack.extend(
                    Hr_stack)  # half from self-attention; half from cross-attention
                # after extend, len(t_a_r_stack)) = 2*(stack_num+1)

                # print('[after extend] len(t_a_r_stack):', len(t_a_r_stack))
                # print('[after extend] len(r_a_t_stack):', len(r_a_t_stack))

                t_a_r = tf.stack(t_a_r_stack, axis=-1)
                r_a_t = tf.stack(r_a_t_stack, axis=-1)

                # print('after stack along the last dimension: ')
                # print('t_a_r shape: %s' % t_a_r.shape)
                # print('r_a_t shape: %s' % r_a_t.shape)
                # after stack, t_a_r and r_a_t are (batch, max_turn_len, embed_size, 2*(stack_num+1))

                with tf.variable_scope('intent_based_attention',
                                       reuse=tf.AUTO_REUSE): # share parameter across different turns
                    # there are 3 different ways to implement intent based attention
                    # implement these three different variations and compare the
                    # effectiveness as model abalation analysis
                    # let I_u_t and I_r_k are intent vector in [12,1]
                    # 1. dot: w * [I_u_t, I_r_k], where w is [24,1]
                    # 2. biliear: I_u_t' * w * I_r_k, where w is [12,12]
                    # 3. outprod: I_u_t * I_r_k' -> [12,12] out product ->
                    #             flaten to [144,1] outprod -> w*outprod
                    #             where w is [1,144]
                    attention_logits = layers.attention_intent(t_intent,
                                        self.response_intent,
                                        self._conf['intent_attention_type'])
                    # print('[intent_based_attention] attention_logits.shape: %s' % attention_logits.shape)
                    attention_turns.append(attention_logits)

                    # calculate similarity matrix
                with tf.variable_scope('similarity'):
                    # sim shape [batch, max_turn_len, max_turn_len, 2*(stack_num+1)]
                    # divide sqrt(200) to prevent gradient explosion
                    # A_biks * B_bjks -> C_bijs
                    sim = tf.einsum('biks,bjks->bijs', t_a_r, r_a_t) / tf.sqrt(
                        200.0)
                    # (batch, max_turn_len, embed_size, 2*(stack_num+1)) *
                    # (batch, max_turn_len, embed_size, 2*(stack_num+1)) ->
                    # [batch, max_turn_len, max_turn_len, 2*(stack_num+1)]
                    # where k is corresponding to the dimension of embed_size,
                    # which can be eliminated by dot product with einsum
                    # print('[similarity] after einsum dot prod sim shape: %s' % sim.shape)
                    # [batch, max_turn_len, max_turn_len, 2*(stack_num+1)]
                    # ! Here we multipy sim by intent based attention weights before
                    # append sim into sim_turns in order to generate the weighted
                    # stack in the next step

                sim_turns.append(sim)
                # print('[similarity] after append, len(sim_turns):', len(sim_turns))

            attention_logits = tf.stack(attention_turns, axis=1) # [batch, max_turn_num]
            print('[attention_logits] after stack attention_logits.shape: %s' % attention_logits.shape)
            # add mask in attention following the way in BERT
            # real turn_num is in self.tt_turns_len [batch]
            # return a mask tensor with shape [batch,  conf['max_turn_num']]
            attention_mask = tf.sequence_mask(self.tt_turns_len, self._conf['max_turn_num'],
                                              dtype=tf.float32)
            print('[attention_mask] attention_mask.shape: %s' % attention_mask.shape)
            # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
            # masked positions, this operation will create a tensor which is 0.0 for
            # positions we want to attend and -10000.0 for masked positions.
            adder = (1.0 - attention_mask) * -10000.0

            # Since we are adding it to the raw scores before the softmax, this is
            # effectively the same as removing these entirely.
            attention_logits += adder
            attention = tf.nn.softmax(attention_logits) # by default softmax along dim=-1 [batch, max_turn_num]
            print('[attention] attention.shape: %s' % attention_mask.shape)
            self.attention = attention # will print it for visualization

            # cnn and aggregation
            # lyang comments aggregation by 3D CNN layer
            # [3d cnn aggregation] sim shape: (32, 9, 180, 180, 10)
            # conv_0 shape: (32, 9, 180, 180, 16)
            # pooling_0 shape: (32, 3, 60, 60, 16)
            # conv_1 shape: (32, 3, 60, 60, 16)
            # pooling_1 shape: (32, 1, 20, 20, 16)
            # [3d cnn aggregation] final_info: (32, 6400) # [batch * feature_size]
            # [batch, max_turn_num, max_turn_len, max_turn_len, 2*(stack_num+1)]
            # (32, 9, 180, 180, 10)
            sim = tf.stack(sim_turns, axis=1)
            # multipy sim by attention score
            sim = tf.einsum('bijks,bi->bijks', sim, attention)
            print('[3d cnn aggregation] sim shape: %s' % sim.shape)
            with tf.variable_scope('cnn_aggregation'):
                final_info = layers.CNN_3d(sim, self._conf['cnn_3d_oc0'],
                                           self._conf['cnn_3d_oc1'])
                # for udc
                # final_info = layers.CNN_3d(sim, 32, 16)
                # for douban
                # final_info = layers.CNN_3d(sim, 16, 16)

            print('[3d cnn aggregation] final_info: %s' % final_info.shape)
            # loss and train
            with tf.variable_scope('loss'):
                self.loss, self.logits = layers.loss(final_info, self.label)

                self.global_step = tf.Variable(0, trainable=False)
                initial_learning_rate = self._conf['learning_rate']
                self.learning_rate = tf.train.exponential_decay(
                    initial_learning_rate,
                    global_step=self.global_step,
                    decay_steps=400,
                    decay_rate=0.9,
                    staircase=True)

                Optimizer = tf.train.AdamOptimizer(self.learning_rate)
                self.optimizer = Optimizer.minimize(
                    self.loss,
                    global_step=self.global_step)

                self.init = tf.global_variables_initializer()
                self.saver = tf.train.Saver(
                    max_to_keep=self._conf["max_to_keep"])
                self.all_variables = tf.global_variables()
                self.all_operations = self._graph.get_operations()
                self.grads_and_vars = Optimizer.compute_gradients(self.loss)

                for grad, var in self.grads_and_vars:
                    if grad is None:
                        print var

                self.capped_gvs = [(tf.clip_by_value(grad, -1, 1), var) for
                                   grad, var in self.grads_and_vars]
                self.g_updates = Optimizer.apply_gradients(
                    self.capped_gvs,
                    global_step=self.global_step)

        return self._graph
Exemplo n.º 4
0
def self_cross_attention_block(config, Hu, every_turn_len, Hr, response_len):
    """

    :param config:
    :param Hu: shape = (batch_size, max_turn_num, sentence_len, emb_size)
    :param every_turn_len: shape = (batch_size, max_turn_num )
    :param Hr: shape = (batch_size, sentence_len, emb_size)
    :param response_len: shape = (batch_size)
    :return:
    """

    if config['is_positional'] and config['stack_num'] > 0:
        with tf.variable_scope('positional', reuse=tf.AUTO_REUSE):
            Hr = op.positional_encoding_vector(Hr, max_timescale=10)
    Hr_stack = [Hr]

    for index in range(config['stack_num']):
        with tf.variable_scope('self_stack_' + str(index),
                               reuse=tf.AUTO_REUSE):
            # Hr.shape = (batch_size, max_turn_len, emb_size)
            Hr = layers.block(Hr,
                              Hr,
                              Hr,
                              Q_lengths=response_len,
                              K_lengths=response_len)
            Hr_stack.append(Hr)

    # context part
    # a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len, emb_size]
    list_turn_t = tf.unstack(Hu, axis=1)
    list_turn_length = tf.unstack(every_turn_len, axis=1)

    sim_turns = []
    # for every Hu calculate matching vector
    for Hu, t_turn_length in zip(list_turn_t, list_turn_length):
        if config['is_positional'] and config['stack_num'] > 0:
            with tf.variable_scope('positional', reuse=tf.AUTO_REUSE):
                Hu = op.positional_encoding_vector(Hu, max_timescale=10)
        Hu_stack = [Hu]

        for index in range(config['stack_num']):
            with tf.variable_scope('self_stack_' + str(index),
                                   reuse=tf.AUTO_REUSE):
                Hu = layers.block(Hu,
                                  Hu,
                                  Hu,
                                  Q_lengths=t_turn_length,
                                  K_lengths=t_turn_length)

                Hu_stack.append(Hu)

        r_a_t_stack = []
        t_a_r_stack = []
        for index in range(config['stack_num'] + 1):

            with tf.variable_scope('t_attend_r_' + str(index),
                                   reuse=tf.AUTO_REUSE):
                try:
                    t_a_r = layers.block(Hu_stack[index],
                                         Hr_stack[index],
                                         Hr_stack[index],
                                         Q_lengths=t_turn_length,
                                         K_lengths=response_len)
                except ValueError:
                    tf.get_variable_scope().reuse_variables()
                    t_a_r = layers.block(Hu_stack[index],
                                         Hr_stack[index],
                                         Hr_stack[index],
                                         Q_lengths=t_turn_length,
                                         K_lengths=response_len)

            with tf.variable_scope('r_attend_t_' + str(index),
                                   reuse=tf.AUTO_REUSE):
                try:
                    r_a_t = layers.block(Hr_stack[index],
                                         Hu_stack[index],
                                         Hu_stack[index],
                                         Q_lengths=response_len,
                                         K_lengths=t_turn_length)
                except ValueError:
                    tf.get_variable_scope().reuse_variables()
                    r_a_t = layers.block(Hr_stack[index],
                                         Hu_stack[index],
                                         Hu_stack[index],
                                         Q_lengths=response_len,
                                         K_lengths=t_turn_length)

            t_a_r_stack.append(t_a_r)
            r_a_t_stack.append(r_a_t)

        t_a_r_stack.extend(Hu_stack)
        r_a_t_stack.extend(Hr_stack)

        t_a_r = tf.stack(t_a_r_stack, axis=-1)
        r_a_t = tf.stack(r_a_t_stack, axis=-1)

        # calculate similarity matrix
        with tf.variable_scope('similarity', reuse=tf.AUTO_REUSE):
            # sim shape [batch, max_turn_len, max_turn_len, 2*stack_num+1]
            # divide sqrt(200) to prevent gradient explosion
            sim = tf.einsum('biks,bjks->bijs', t_a_r, r_a_t) / tf.sqrt(200.0)

        sim_turns.append(sim)

    # cnn and aggregation
    sim = tf.stack(sim_turns, axis=1)
    print('sim shape: %s' % sim.shape)
    with tf.variable_scope('cnn_aggregation', reuse=tf.AUTO_REUSE):
        final_info = layers.CNN_3d(sim, 32, 16)

    with tf.variable_scope('linear', reuse=tf.AUTO_REUSE):
        W = tf.get_variable(name='weights',
                            shape=[final_info.shape[-1], 1],
                            initializer=tf.orthogonal_initializer())
        bias = tf.get_variable(name='bias',
                               shape=[1],
                               initializer=tf.zeros_initializer())

    logits = tf.reshape(tf.matmul(final_info, W) + bias, [-1])

    return logits
Exemplo n.º 5
0
Arquivo: net.py Projeto: kifish/IACMN
    def build_graph(self):
        with self._graph.as_default():
            if self._conf['rand_seed'] is not None:
                rand_seed = self._conf['rand_seed']
                tf.set_random_seed(rand_seed)
                print('set tf random seed: %s' % self._conf['rand_seed'])

            #word embedding
            with tf.device('/cpu:0'), tf.name_scope("embedding"):
                self._word_embedding = tf.get_variable(
                    'word_embedding',
                    shape=(self._conf['vocab_size'], self._conf['emb_size']),
                    dtype=tf.float32,
                    trainable=False)

                self.emb_placeholder = tf.placeholder(
                    tf.float32,
                    shape=[self._conf['vocab_size'], self._conf['emb_size']])

                self.emb_init = self._word_embedding.assign(
                    self.emb_placeholder)

            #define placehloders
            self.turns = tf.placeholder(  # context data
                tf.int32,
                shape=[
                    None, self._conf["max_turn_num"],
                    self._conf["max_turn_len"]
                ])

            self.tt_turns_len = tf.placeholder(  # utterance num of context
                tf.int32, shape=[None])

            self.every_turn_len = tf.placeholder(  # length of each utterance in context
                tf.int32,
                shape=[None, self._conf["max_turn_num"]])

            self.response = tf.placeholder(  # response data
                tf.int32,
                shape=[None, self._conf["max_turn_len"]])

            self.response_len = tf.placeholder(  # response len
                tf.int32, shape=[None])

            self.label = tf.placeholder(  # scale label
                tf.float32, shape=[None])

            self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                    name="dropout_keep_prob")

            #define operations
            #build response embedding
            Hr = tf.nn.embedding_lookup(self._word_embedding, self.response)
            Hr = tf.nn.dropout(Hr, self.dropout_keep_prob)

            if self._conf['is_positional']:
                with tf.variable_scope('positional'):
                    Hr = op.positional_encoding_vector(Hr, max_timescale=10)

            with tf.variable_scope('attention_cnn_block'):
                hr_conv_list = layers.agdr_block(
                    Hr, self._conf['repeat_times'],
                    self._conf['delation_list'],
                    self._conf['dcnn_filter_width'],
                    self._conf['dcnn_channel'], self.dropout_keep_prob)

            list_turn_t = tf.unstack(self.turns, axis=1)
            list_turn_length = tf.unstack(self.every_turn_len, axis=1)

            reuse = None
            sim_turns = []
            #for every turn_t, build embedding and calculate matching vector
            for turn_t, t_turn_length in zip(list_turn_t, list_turn_length):

                Hu = tf.nn.embedding_lookup(self._word_embedding, turn_t)
                Hu = tf.nn.dropout(Hu, self.dropout_keep_prob)

                if self._conf['is_positional']:
                    with tf.variable_scope('positional', reuse=True):
                        Hu = op.positional_encoding_vector(Hu,
                                                           max_timescale=10)

                # multi-level sim matrix of response and each utterance
                sim_matrix = [layers.Word_Sim(Hr, Hu)]

                with tf.variable_scope('attention_cnn_block', reuse=True):
                    hu_conv_list = layers.agdr_block(
                        Hu, self._conf['repeat_times'],
                        self._conf['delation_list'],
                        self._conf['dcnn_filter_width'],
                        self._conf['dcnn_channel'], self.dropout_keep_prob)

                for index in range(len(hu_conv_list)):
                    with tf.variable_scope('segment_sim'):
                        sim_matrix.append(
                            layers.Word_Sim(hr_conv_list[index],
                                            hu_conv_list[index]))

                sim_matrix = tf.stack(sim_matrix,
                                      axis=-1,
                                      name='one_matrix_stack')

                with tf.variable_scope('cnn_aggregation', reuse=tf.AUTO_REUSE):
                    matching_vector = layers.CNN_2d(sim_matrix, 32, 16,
                                                    self.dropout_keep_prob)
                if not reuse:
                    reuse = True

                sim_turns.append(matching_vector)

            #aggregation with a gru
            sim = tf.stack(sim_turns, axis=1, name='matching_stack')

            with tf.variable_scope("sent_rnn"):
                sent_rnn_outputs, _ = layers.bigru_sequence(
                    sim, 64, None, self.dropout_keep_prob)  # TODO:CHECK

            # attention at sentence level:
            sent_atten_inputs = tf.concat(sent_rnn_outputs, 2)

            with tf.variable_scope("sent_atten"):
                rev_outs, alphas_sents = layers.intro_attention(
                    sent_atten_inputs, 50)

            #loss and train
            with tf.variable_scope('loss'):
                self.loss, self.logits = layers.loss(rev_outs,
                                                     self.label,
                                                     is_clip=True)

                self.global_step = tf.Variable(0, trainable=False)
                initial_learning_rate = self._conf['learning_rate']
                self.learning_rate = tf.train.exponential_decay(
                    initial_learning_rate,
                    global_step=self.global_step,
                    decay_steps=5000,
                    decay_rate=0.96,
                    staircase=True)

                Optimizer = tf.train.AdamOptimizer(self.learning_rate)
                self.optimizer = Optimizer.minimize(
                    self.loss, global_step=self.global_step)

                self.init = tf.global_variables_initializer()
                self.saver = tf.train.Saver(
                    max_to_keep=self._conf["max_to_keep"])
                self.all_variables = tf.global_variables()
                self.all_operations = self._graph.get_operations()
                self.grads_and_vars = Optimizer.compute_gradients(self.loss)

                for grad, var in self.grads_and_vars:
                    if grad is None:
                        print(var)

                self.capped_gvs = [(tf.clip_by_value(grad, -1, 1), var)
                                   for grad, var in self.grads_and_vars]
                self.g_updates = Optimizer.apply_gradients(
                    self.capped_gvs, global_step=self.global_step)

            # summary
            grad_summaries = []
            for g, v in self.grads_and_vars:
                if g is not None:
                    grad_hist_summary = tf.summary.histogram(
                        "gradient/{}/hist".format(v.name), g)
                    sparsity_summary = tf.summary.scalar(
                        "gradient/{}/sparsity".format(v.name),
                        tf.nn.zero_fraction(g))
                    grad_summaries.append(grad_hist_summary)
                    grad_summaries.append(sparsity_summary)

            grad_summaries_merged = tf.summary.merge(grad_summaries)

            logit_summary = tf.summary.histogram("{}".format(self.logits.name),
                                                 self.logits)

            # Loss Summaries
            loss_summary = tf.summary.scalar("loss", self.loss)
            # Train, Dev Summaries
            self.train_summary_op = tf.summary.merge(
                [loss_summary, logit_summary, grad_summaries_merged])
            self.dev_summary_op = tf.summary.merge([
                loss_summary,
            ])

        return self._graph
def cc_model(input_x, input_x_mask, input_x_len, input_x2, input_x_mask2, input_x_len2, word_emb, conf, con_c):


    #a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len]
    list_turn_t1 = tf.unstack(input_x, axis=1) 
    list_turn_length1 = tf.unstack(input_x_len, axis=1)
    list_turn_length1 = [tf.sequence_mask(i, conf["max_turn_len"]) for i in list_turn_length1]
    list_turn_length1 = [tf.cast(i, tf.float32) for i in list_turn_length1]

    list_turn_t2 = tf.unstack(input_x2, axis=1) 
    list_turn_length2 = tf.unstack(input_x_len2, axis=1)
    list_turn_length2 = [tf.sequence_mask(i, conf["max_turn_len"]) for i in list_turn_length2]
    list_turn_length2 = [tf.cast(i, tf.float32) for i in list_turn_length2]

    if con_c:
        list_turn_t1 = tf.reshape(input_x, [conf["batch_size"], conf["max_turn_num"]*conf["max_turn_len"]])
        list_turn_t1 = [list_turn_t1]
        list_turn_t2 = tf.reshape(input_x2, [conf["batch_size"], conf["max_turn_num"]*conf["max_turn_len"]])
        list_turn_t2 = [list_turn_t2]
        list_turn_length1 = tf.cast(tf.sequence_mask(input_x_len, conf["max_turn_len"]), tf.float32)
        list_turn_length1 = tf.reshape(list_turn_length1, [conf["batch_size"], conf["max_turn_num"]*conf["max_turn_len"]])
        list_turn_length1 = [list_turn_length1]
        list_turn_length2 = tf.cast(tf.sequence_mask(input_x_len2, conf["max_turn_len"]), tf.float32)
        list_turn_length2 = tf.reshape(list_turn_length2, [conf["batch_size"], conf["max_turn_num"]*conf["max_turn_len"]])
        list_turn_length2 = [list_turn_length2]



    #for every turn_t calculate matching vector
    trans_u1, trans_u2 = [], []
    for turn_t, t_turn_length in zip(list_turn_t1, list_turn_length1):
        Hu = tf.nn.embedding_lookup(word_emb, turn_t) #[batch, max_turn_len, emb_size]
        #Hu = turn_t
        if conf['is_positional'] and conf['stack_num'] > 0:
            with tf.variable_scope('positional_', reuse=tf.AUTO_REUSE):
                Hu = op.positional_encoding_vector(Hu, max_timescale=10)
        for index in range(conf['stack_num']):
            with tf.variable_scope('self_stack_cc' + str(index), reuse=tf.AUTO_REUSE):
                Hu = layers.block(
                    Hu, Hu, Hu,
                    Q_lengths=t_turn_length, K_lengths=t_turn_length, input_mask=True)
        trans_u1.append(Hu)

    for turn_r, r_turn_length in zip(list_turn_t2, list_turn_length2):
        Hu = tf.nn.embedding_lookup(word_emb, turn_r) #[batch, max_turn_len, emb_size]
        #Hu = turn_r
        if conf['is_positional'] and conf['stack_num'] > 0:
            with tf.variable_scope('positional_', reuse=tf.AUTO_REUSE):
                Hu = op.positional_encoding_vector(Hu, max_timescale=10)
        for index in range(conf['stack_num']):
            with tf.variable_scope('self_stack_cc' + str(index), reuse=tf.AUTO_REUSE):
                Hu = layers.block(
                    Hu, Hu, Hu,
                    Q_lengths=r_turn_length, K_lengths=r_turn_length, input_mask=True)
        trans_u2.append(Hu)

    final_info_all = []
    sim_turns_all = []
    for t_inedx, (turn_t, t_turn_length, Hu) in enumerate(zip(list_turn_t1, list_turn_length1, trans_u1)):
        sim_turns = []
        for r_index, (turn_r, r_turn_length, Hr) in enumerate(zip(list_turn_t2, list_turn_length2, trans_u2)):

            with tf.variable_scope('u_attentd_r_' + str(index)):
                try:
                    u_a_r = layers.block(
                        Hu, Hr, Hr,
                        Q_lengths=t_turn_length, K_lengths=r_turn_length, input_mask=True)
                except ValueError:
                    tf.get_variable_scope().reuse_variables()
                    u_a_r = layers.block(
                        Hu, Hr, Hr,
                        Q_lengths=t_turn_length, K_lengths=r_turn_length, input_mask=True)
                    

            with tf.variable_scope('r_attend_u_' + str(index)):
                try:
                    r_a_u = layers.block(
                        Hr, Hu, Hu,
                        Q_lengths=r_turn_length, K_lengths=t_turn_length, input_mask=True)
                except ValueError:
                    tf.get_variable_scope().reuse_variables()
                    r_a_u = layers.block(
                        Hr, Hu, Hu,
                        Q_lengths=r_turn_length, K_lengths=t_turn_length, input_mask=True)
        
            # u_a_r batch_size turn emb
            u_a_r = tf.stack([u_a_r, Hu], axis=-1)
            r_a_u = tf.stack([r_a_u, Hr], axis=-1)
        
            #calculate similarity matrix
            with tf.variable_scope('similarity', reuse=tf.AUTO_REUSE):
                #sim shape [batch, max_turn_len, max_turn_len, 2*stack_num+1]
                #sim shape [batch, max_turn_len, max_turn_len, 2]
                sim = tf.einsum('biks,bjks->bijs', r_a_u, u_a_r) / tf.sqrt(200.0)
                sim = layers.CNN_FZX(sim)
            final_info_all.append(sim)

    att_weight_print = None
    if not con_c:
        # final_info_all
        final_info_all = tf.stack(final_info_all, axis=1)  # 100 9 144
        max_nei = 5
        gcn_size = conf["max_turn_num"]*conf["max_turn_num"]
        turn_size = conf["max_turn_num"]
        m1 = [ [] for i in range(gcn_size)]
        m_pos = [ [] for i in range(gcn_size)]
        m1_len = [ 0 for i in range(gcn_size)]
        for i in range(turn_size):
            for j in range(turn_size):
                cur_index = i*turn_size+j
                m1[cur_index].append(cur_index)
                m_pos[cur_index].extend([i,j])
                if cur_index%turn_size!=0:
                    m1[cur_index].append(cur_index-1)
                    m_pos[cur_index].extend([i-1,j])
                if cur_index%turn_size!=turn_size-1:
                    m1[cur_index].append(cur_index+1)
                    m_pos[cur_index].extend([i+1,j])
                if i!=0:
                    m1[cur_index].append(cur_index-turn_size)
                    m_pos[cur_index].extend([i,j-1])
                if i!=turn_size-1:
                    m1[cur_index].append(cur_index+turn_size)
                    m_pos[cur_index].extend([i,j+1])
                m1_len[cur_index] = len(m1[cur_index])
                if m1_len[cur_index]<max_nei:
                    m1[cur_index].extend([cur_index for k in range(max_nei-m1_len[cur_index])])
                    for k in range(max_nei-m1_len[cur_index]): m_pos[cur_index].extend([i,j])
        # m1 25 5
        # m1_len 25

        m1 = tf.constant(m1, dtype=tf.int32) # 25 5
        m1_len = tf.constant(m1_len, dtype=tf.int32)
        m_pos = tf.constant(m_pos, dtype=tf.int32)

        def gan(input_m, adjm, adjm_len, adjm_pos, gcn_size, turn_size, max_nei):
            #return input_m
            batch_size_gnn = tf.shape(input_m)[0]
            mask_value = tf.cast(tf.sequence_mask(adjm_len, max_nei), tf.float32) # 25 5
            res_all = []
            for gan_index in range(4):
                with tf.variable_scope('gan_layer'+str(gan_index), reuse=tf.AUTO_REUSE):
                    role_emb1 = tf.get_variable(name="gnn_role_emb1", shape=[turn_size, conf["role_dim"]], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=1))
                    role_emb2 = tf.get_variable(name="gnn_role_emb2", shape=[turn_size, conf["role_dim"]], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=1))

                    input_m_exp = tf.expand_dims(input_m, axis=2) # bs 25 1 144
                    input_m_exp = tf.tile(input_m_exp, [1, 1, max_nei, 1]) # bs 25 5 144

                    nei_rep = tf.gather(input_m, adjm, axis=1) # bs 25*5 144
                    nei_rep = tf.reshape(nei_rep, [tf.shape(input_m)[0], gcn_size, max_nei, -1]) # bs 25 5 144

                    att1 = tf.layers.dense(nei_rep, 128, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="gcn") # bs 25 5 128
                    att2 = tf.layers.dense(input_m_exp, 128, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="gcn") # bs 25 5 128


                    pos_index11 = tf.gather(adjm_pos, [0,], axis=1)
                    pos_index12 = tf.gather(adjm_pos, [1,], axis=1)
                    pos_index11 = tf.tile(pos_index11, [1, max_nei])
                    pos_index12 = tf.tile(pos_index12, [1, max_nei])

                    pos_index21 = tf.gather(adjm_pos, [0,2,4,6,8], axis=1)
                    pos_index22 = tf.gather(adjm_pos, [1,3,5,7,9], axis=1)

                    pos_index11 = tf.gather(role_emb1, pos_index11) # 25 5 30
                    pos_index12 = tf.gather(role_emb2, pos_index12) # 25 5 30
                    pos_index21 = tf.gather(role_emb1, pos_index21) # 25 5 30
                    pos_index22 = tf.gather(role_emb2, pos_index22) # 25 5 30

                    pos_index11 = tf.tile(tf.expand_dims(pos_index11, axis=0), [batch_size_gnn,1,1,1])
                    pos_index12 = tf.tile(tf.expand_dims(pos_index12, axis=0), [batch_size_gnn,1,1,1])
                    pos_index21 = tf.tile(tf.expand_dims(pos_index21, axis=0), [batch_size_gnn,1,1,1])
                    pos_index22 = tf.tile(tf.expand_dims(pos_index22, axis=0), [batch_size_gnn,1,1,1])


                    att = tf.concat([att1, att2], axis=-1)
                    #att = tf.concat([att1, att2, pos_index11, pos_index12, pos_index21, pos_index22], axis=-1)
                    att = tf.layers.dense(att, 1, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="gcna") # bs 25 5 128
                    att = tf.reshape(att, [-1, gcn_size, max_nei])
                    att = tf.nn.leaky_relu(att) # bs 25 5

                    att = att * tf.expand_dims(mask_value, axis=0)
                    att = tf.nn.softmax(att, axis=2) # bs 25 5
                    att = att * tf.expand_dims(mask_value, axis=0)

                    nei_rep2 = tf.layers.dense(nei_rep, 128, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="gcnl") # bs 25 5 128
                    nei_rep11 = tf.layers.dense(input_m, 128, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="gcnl") # bs 25 5 128
                    nei_rep2 = nei_rep2 * tf.expand_dims(tf.expand_dims(mask_value, axis=0), axis=-1)

                    res = tf.einsum('bdik,bdi->bdk', nei_rep2, att) # bs 25 128

                    att_input = res+nei_rep11
                    att_out = tf.layers.dense(att_input, 1, kernel_initializer=tf.contrib.layers.xavier_initializer(), name="att"+str(i))
                    att_out = tf.nn.sigmoid(att_out)
                    print_weight = att_out
                    # att_out not used

                    res = res + nei_rep11
                    input_m = res
                    res_all.append(res)
            res_all = tf.concat(res_all, axis=-1)

            return res_all, print_weight

        gan_res, att_weight_print = gan(final_info_all, m1, m1_len, m_pos, gcn_size, turn_size, max_nei)


        final_info_all = gan_res
        final_info_role = []

        role_emb1 = tf.get_variable(name="role_emb1", shape=[len(list_turn_t1), conf["role_dim"]], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=1))
        role_emb2 = tf.get_variable(name="role_emb2", shape=[len(list_turn_t2), conf["role_dim"]], dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0, stddev=1))
        for i, ii in enumerate(list_turn_t1):
            for j, jj in enumerate(list_turn_t2):
                role_con = tf.concat([role_emb1[i], role_emb2[j]], axis=0)
                final_info_role.append(role_con)
        final_info_role = tf.stack(final_info_role, axis=0) # 9 50
        final_info_role = tf.expand_dims(final_info_role, 0) # 1 9 50
        final_info_role = tf.tile(final_info_role, [tf.shape(final_info_all)[0], 1, 1], name="role_con")
        final_info_all_att = tf.concat([final_info_role, final_info_all], axis=2)

        final_info_all_att = tf.reshape(final_info_all_att, [-1, final_info_all_att.get_shape()[-1]]) # bs*9 144
        final_info_all_att = tf.layers.dense(final_info_all_att, 1, kernel_initializer=tf.contrib.layers.xavier_initializer())
        final_info_all_att = tf.squeeze(final_info_all_att, [1])
        final_info_all_att = tf.reshape(final_info_all_att, [-1, final_info_all.get_shape()[1]]) # 100 9
        final_info_all_att = tf.nn.softmax(final_info_all_att, axis=1)

        final_info_all_att = tf.expand_dims(final_info_all_att, -1)
        final_info_all_max = tf.reduce_max(final_info_all, axis=1)
        final_info_all_mean = tf.reduce_mean(final_info_all, axis=1)
        final_info_all =  final_info_all * final_info_all_att
        final_info_all = tf.reduce_sum(final_info_all, axis=1)

        final_info_all = tf.concat([final_info_all_mean, final_info_all_max, final_info_all], axis=1)

    else:
        final_info_all = final_info_all[0]
    return final_info_all, att_weight_print
def dam_model(input_x,
              input_x_mask,
              input_y,
              input_y_mask,
              word_emb,
              keep_rate,
              conf,
              x_len=None,
              y_len=None):

    Hr = tf.nn.embedding_lookup(word_emb, input_y)

    if conf['is_positional'] and conf['stack_num'] > 0:
        with tf.variable_scope('positional'):
            Hr = op.positional_encoding_vector(Hr, max_timescale=10)
    Hr_stack = [Hr]

    for index in range(conf['stack_num']):
        with tf.variable_scope('self_stack_cr_' + str(index)):
            Hr = layers.block(Hr, Hr, Hr, Q_lengths=y_len, K_lengths=y_len)
            Hr_stack.append(Hr)

    #context part
    #a list of length max_turn_num, every element is a tensor with shape [batch, max_turn_len]
    list_turn_t = tf.unstack(input_x, axis=1)
    list_turn_length = tf.unstack(x_len, axis=1)

    sim_turns = []
    #for every turn_t calculate matching vector
    for turn_t, t_turn_length in zip(list_turn_t, list_turn_length):
        Hu = tf.nn.embedding_lookup(word_emb,
                                    turn_t)  #[batch, max_turn_len, emb_size]

        if conf['is_positional'] and conf['stack_num'] > 0:
            with tf.variable_scope('positional', reuse=True):
                Hu = op.positional_encoding_vector(Hu, max_timescale=10)
        Hu_stack = [Hu]

        for index in range(conf['stack_num']):

            with tf.variable_scope('self_stack_cr_' + str(index), reuse=True):
                Hu = layers.block(Hu,
                                  Hu,
                                  Hu,
                                  Q_lengths=t_turn_length,
                                  K_lengths=t_turn_length)

                Hu_stack.append(Hu)

        r_a_t_stack = []
        t_a_r_stack = []
        for index in range(conf['stack_num'] + 1):

            with tf.variable_scope('t_attend_r_cr_' + str(index)):
                try:
                    t_a_r = layers.block(Hu_stack[index],
                                         Hr_stack[index],
                                         Hr_stack[index],
                                         Q_lengths=t_turn_length,
                                         K_lengths=y_len)
                except ValueError:
                    tf.get_variable_scope().reuse_variables()
                    t_a_r = layers.block(Hu_stack[index],
                                         Hr_stack[index],
                                         Hr_stack[index],
                                         Q_lengths=t_turn_length,
                                         K_lengths=y_len)

            with tf.variable_scope('r_attend_t_cr_' + str(index)):
                try:
                    r_a_t = layers.block(Hr_stack[index],
                                         Hu_stack[index],
                                         Hu_stack[index],
                                         Q_lengths=y_len,
                                         K_lengths=t_turn_length)
                except ValueError:
                    tf.get_variable_scope().reuse_variables()
                    r_a_t = layers.block(Hr_stack[index],
                                         Hu_stack[index],
                                         Hu_stack[index],
                                         Q_lengths=y_len,
                                         K_lengths=t_turn_length)

            t_a_r_stack.append(t_a_r)
            r_a_t_stack.append(r_a_t)

        t_a_r_stack.extend(Hu_stack)
        r_a_t_stack.extend(Hr_stack)

        t_a_r = tf.stack(t_a_r_stack, axis=-1)
        r_a_t = tf.stack(r_a_t_stack, axis=-1)

        #calculate similarity matrix
        with tf.variable_scope('similarity'):
            # sim shape [batch, max_turn_len, max_turn_len, 2*stack_num+1]
            # divide sqrt(200) to prevent gradient explosion
            sim = tf.einsum('biks,bjks->bijs', t_a_r, r_a_t) / tf.sqrt(200.0)

        sim_turns.append(sim)

    #cnn and aggregation
    sim = tf.stack(sim_turns, axis=1)
    print('sim shape: %s' % sim.shape)
    with tf.variable_scope('cnn_aggregation'):
        final_info = layers.CNN_3d(sim, 32, 16)
        #for douban
        #final_info = layers.CNN_3d(sim, 16, 16)

    return final_info