def feed_previous_decode(feed_previous_bool): weights = tf.matmul(self.dec_weights, self.type_embedding, transpose_b=True) biases = tf.matmul(tf.expand_dims(self.dec_biases, 0), self.type_embedding, transpose_b=True) biases = array_ops.reshape(biases, [self.num_types]) dec_embed, loop_fn = seq2seq.get_decoder_embedding( self.decoder_input, self.num_types, self.num_type_dim, output_projection=(weights, biases), feed_previous=feed_previous_bool) #dec_embed is of dimension self.max_steps_label * batch_size * self.num_type_dim concatenated_input = self.dec_concat_ip( dec_embed, sentence_states, sentence_mention_states) #concat_dec_inputs is a list of self.max_steps_label tensors of dimension batch_size * (self.num_type_dim + self.hidden_size) dec_output, _ = self.decode( concatenated_input, loop_fn, init_state, sentence_states, sentence_outputs, sentence_mention_states, sentence_mention_outputs, scope, attn_len, attn_size, attn_t_given_context_per_level, attn_t_given_mention_per_level, initial_state_attention) return dec_output
def feed_previous_decode(feed_previous_bool): dec_embed, loop_fn = seq2seq.get_decoder_embedding(decoder_inputs, self.decoder_words, self.text_embedding_size, output_projection=(weights, biases), feed_previous=feed_previous_bool) # dec_embed is of dimension max_len * batch_size * self.text_embedding_size # utterance_output is of dimension batch_size * cell_size concatenated_input = self.get_dec_concat_ip(dec_embed, utterance_output) dec_output, _ = self.decode(concatenated_input, loop_fn, self.dec_cells_text, init_state, utterance_output, scope, attention_states) # dec_output is a max_len sized list of tensors of dimension batch_size * cell_size return dec_output
def feed_previous_decode(feed_previous_bool): dec_embed_word, loop_fn = seq2seq.get_decoder_embedding( decoder_inputs, self.decoder_words, self.text_embedding_size, output_projection=(weight_word, bias_word), feed_previous=feed_previous_bool) concatenated_input_word = self.get_dec_concat_ip( dec_embed_word, utterance_outputs) dec_output_word, _ = self.decode(concatenated_input_word, loop_fn, self.dec_cells_text, init_state, utterance_outputs, scope) return dec_output_word
def feed_prev_decode(feed_previous_bool): '''Makes two seperate graphs based on feed_previous input given at training and test time. Args: feed_previous_bool: Boolean tensor which is True at time of validation and testing and False at time of training. Return: dec_output which is a list having tensor of size batch_size*self.cell_size.''' dec_embed, loop_fn = seq2seq.get_decoder_embedding( decoder_inputs, self.decoder_words, self.embedding_size, output_projection=(weights, biases), feed_previous=feed_previous_bool ) #look for get_decoder_embedding in seq2seq.py concatenated_ip = self.get_dec_concat_ip( dec_embed, utterance_output) dec_output, _ = self.decode(concatenated_ip, loop_fn, dec_cell, init_state, utterance_output, scope) return dec_output