def generate_episode(self, memory, q_vec, fact_vecs, speaker_info, hop_index):
        """Generate episode by applying attention to current fact vectors through a modified GRU"""

        attentions = [tf.squeeze(
            self.get_attention(q_vec, memory, fv, speaker_info, bool(hop_index) or bool(i)), axis=1)  # reuse is false only for 0th hop, and first fv.
            for i, fv in enumerate(tf.unstack(fact_vecs, axis=1))]

        attentions = tf.transpose(tf.stack(attentions))
        self.attentions.append(attentions)
        attentions = tf.nn.softmax(attentions)
        attentions = tf.expand_dims(attentions, axis=-1)

        # print("hello")
        # print(fact_vecs, attentions)
        # print("done")
        if self.config.GRU == "true" :
            reuse = True if hop_index > 0 else False

            if self.config.speaker_gru is not None: 

                speaker_info_sentence = tf.expand_dims(tf.ones([self.max_sentences,1]), 1) * speaker_info
                speaker_info_sentence = tf.transpose(speaker_info_sentence, perm=[1,0,2])  # assigning the speaker to each sentence spoken
                print("speaker_info_sentence", speaker_info_sentence)

                # concatenate fact vectors and attentions for input into attGRU
                gru_inputs = tf.concat([fact_vecs, speaker_info_sentence, attentions], 2)
                # gru_inputs = tf.concat([fact_vecs, attentions], 2)

                with tf.variable_scope('attention_gru', reuse=reuse):
                    _, episode = tf.nn.dynamic_rnn(AttentionGRUCell(2*self.config.hidden_size),
                            gru_inputs,
                            dtype=np.float32,
                            sequence_length=self.input_len_placeholder
                    )
            else :
                # concatenate fact vectors and attentions for input into attGRU
                gru_inputs = tf.concat([fact_vecs, attentions], 2)
                # gru_inputs = tf.concat([fact_vecs, attentions], 2)

                with tf.variable_scope('attention_gru', reuse=reuse):
                    _, episode = tf.nn.dynamic_rnn(AttentionGRUCell(self.config.hidden_size),
                            gru_inputs,
                            dtype=np.float32,
                            sequence_length=self.input_len_placeholder
                    )
        else : 
            fact_trans = tf.transpose(fact_vecs, perm=[0,2,1])
            final = tf.matmul(fact_trans, attentions)
            episode = tf.squeeze(final)
            episode = fact_vecs * attentions
        # print(episode)
        return episode
    def generate_episode(self, memory, q_vec, fact_vecs, hop_index):
        """Generate episode by applying attention to current fact vectors through a modified GRU"""

        attentions = [tf.squeeze(
            self.get_attention(q_vec, memory, fv, bool(hop_index) or bool(i)), axis=1)
            for i, fv in enumerate(tf.unstack(fact_vecs, axis=1))]

        attentions = tf.transpose(tf.stack(attentions))
        self.attentions.append(attentions)
        attentions = tf.nn.softmax(attentions)
        attentions = tf.expand_dims(attentions, axis=-1)

        reuse = True if hop_index > 0 else False
        
        # concatenate fact vectors and attentions for input into attGRU
        gru_inputs = tf.concat([fact_vecs, attentions], 2)

        with tf.variable_scope('attention_gru', reuse=reuse):
            _, episode = tf.nn.dynamic_rnn(AttentionGRUCell(self.config.hidden_size),
                    gru_inputs,
                    dtype=np.float32,
                    sequence_length=self.input_len_placeholder
            )

        return episode
Example #3
0
    def generate_episode(self, memory, q_vec, fact_vecs, hop_index):
        attentions = [
            tf.squeeze(self.attention(q_vec, memory, fact_vec,
                                      bool(hop_index) or bool(i)),
                       axis=1)
            for i, fact_vec in enumerate(tf.unstack(fact_vecs, axis=1))
        ]

        attentions = tf.transpose(tf.stack(attentions))
        self.attentions.append(attentions)
        attentions = tf.nn.softmax(attentions)
        attentions = tf.expand_dims(attentions, axis=-1)

        if hop_index > 0:
            reuse = True
        else:
            reuse = False

        gru_inputs = tf.concat([fact_vecs, attentions], 2)

        with tf.variable_scope('attention_gru', reuse=reuse):
            h, episode = tf.nn.dynamic_rnn(
                AttentionGRUCell(self.config.hidden_size),
                gru_inputs,
                dtype=np.float32,
                sequence_length=self.input_len_placeholder)

        return episode
Example #4
0
    def generate_episode(self, memory, q, facts, mask, hop_index):
        attentions = [
            tf.squeeze(self.get_attention(q, memory, fact,
                                          bool(hop_index) or bool(i)),
                       axis=1)
            for i, fact in enumerate(tf.unstack(facts, axis=1))
        ]  # list [num_max_sents] * [batch_size,]
        attentions = tf.transpose(tf.stack(attentions)) + (
            mask * -1e9)  # tensor [batch_size, num_max_sents]
        attentions = tf.nn.softmax(attentions)
        self.attentions.append(attentions)  # for visualization
        attentions = tf.expand_dims(
            attentions, axis=-1)  # tensor [batch_size, num_max_sents, 1]

        reuse = True if hop_index > 0 else False

        gru_inputs = tf.concat(
            [facts, attentions],
            2)  # tensor [batch_size, num_max_sents, hid_size + 1]

        with tf.variable_scope("episode", reuse=reuse):
            _, episode = tf.nn.dynamic_rnn(cell=AttentionGRUCell(
                self.hid_size),
                                           inputs=gru_inputs,
                                           sequence_length=self.num_sents,
                                           dtype=tf.float32)
        return episode