コード例 #1
0
    def decode_train(cell, embeddings, encoder_state, targets, targets_length, scope='decoder', reuse=None):
        """
        Args:
            cell: An RNNCell object
            embeddings: An embedding matrix with shape
                (vocab_size, word_dim)
            encoder_state: A tensor that contains the encoder state;
                its shape should match that of cell.zero_state
            targets: A int32 tensor with shape (batch, max_len), which
                contains word indices; should start and end with
                the proper <BOS> and <EOS> symbol
            targets_length: A int32 tensor with shape (batch,), which
                contains the length of each sample in a batch
            scope: A VariableScope object of a string which indicates
                the scope
            reuse: A boolean value or None which specifies whether to
                reuse variables already defined in the scope

        Returns:
            decoder_outputs, which is a float32
            (batch, max_len, cell.output_size) tensor that contains
            the cell's hidden state per time step
        """

        with tf.variable_scope(scope, initializer=tf.orthogonal_initializer(), reuse=reuse):
            decoder_fn = seq2seq.simple_decoder_fn_train(encoder_state=encoder_state)
            targets_embed = tf.nn.embedding_lookup(params=embeddings, ids=targets)
            decoder_outputs, _, _ = seq2seq.dynamic_rnn_decoder(cell=cell, decoder_fn=decoder_fn, inputs=targets_embed,
                sequence_length=targets_length, time_major=False, scope='rnn')
        return decoder_outputs
コード例 #2
0
ファイル: model.py プロジェクト: derekchen14/cs224n
  def decoder_components(self, stage, component_name, enc_state):

    if stage is "training":

      if toy:
        components = { "inputs": self.output_placeholder,
          "sequence_length": self.dec_seq_len }
      else:
        answers_batch_embedding = tf.nn.embedding_lookup(self.embedding_matrix, self.answer_ids)
        components = { "inputs": answers_batch_embedding,
          "sequence_length": self.dec_seq_len }
      if self.use_attention:
        keys, values, score_fn, construct_fn = prepare_attention(None,
            attention_option = "luong", num_units=self.n_cells, reuse=False)
        components["function"] = attention_decoder_fn_train(enc_state, keys,
            values, score_fn, construct_fn)
      else:
        components["function"] = seq2seq.simple_decoder_fn_train(enc_state)

    elif stage is "inference":
      output_fn, SOS_id, EOS_id = None, self.SOS_id, self.EOS_id
      components = { "inputs": None, "sequence_length": None }
      if self.use_attention:
        keys, values, score_fn, construct_fn = prepare_attention(None,
            attention_option = "luong", num_units=self.n_cells, reuse=False)
        components["function"] = attention_decoder_fn_inference(output_fn,
            enc_state, keys, values, score_fn, construct_fn, self.embedding_matrix,
            SOS_id, EOS_id, self.max_dec_len, self.vocab_size)
      else:
        components["function"] = seq2seq.simple_decoder_fn_inference(output_fn,
            enc_state, self.embedding_matrix, SOS_id, EOS_id,
              maximum_length=self.max_dec_len, num_decoder_symbols=self.vocab_size)

    return components[component_name]
コード例 #3
0
 def decoder_train_layer(self, encoded_state, token_embedding):
     with tf.variable_scope('decoder'):
         dynamic_fn_train = seq2seq.simple_decoder_fn_train(encoded_state)
         decoder_outputs, state, context = seq2seq.dynamic_rnn_decoder(
             self.decoder_cell, dynamic_fn_train, token_embedding,
             self.sequence_lengths)
     return decoder_outputs
コード例 #4
0
    def _init_decoder(self):
        """
            decoder cell.
            attention적용 시 결과가 좋지 않음.
        """
        with tf.variable_scope("Decoder") as scope:

            def output_fn(outputs):
                return tf.contrib.layers.linear(outputs,
                                                self.vocab_size,
                                                scope=scope)

            decoder_fn_train = seq2seq.simple_decoder_fn_train(
                encoder_state=self.encoder_state)
            decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
                output_fn=output_fn,
                encoder_state=self.encoder_state,
                embeddings=self.embedding_matrix,
                start_of_sequence_id=self.EOS,
                end_of_sequence_id=self.EOS,
                maximum_length=self.len_max,
                num_decoder_symbols=self.vocab_size,
            )

            (self.decoder_outputs_train, self.decoder_state_train,
             self.decoder_context_state_train) = (seq2seq.dynamic_rnn_decoder(
                 cell=self.decoder_cell,
                 decoder_fn=decoder_fn_train,
                 inputs=self.decoder_train_inputs_embedded,
                 sequence_length=[
                     self.len_max for _ in range(self.batch_size)
                 ],
                 time_major=True,
                 scope=scope,
             ))

            self.decoder_logits_train = output_fn(self.decoder_outputs_train)
            self.decoder_prediction_train = tf.argmax(
                self.decoder_logits_train,
                axis=-1,
                name='decoder_prediction_train')

            scope.reuse_variables()

            (self.decoder_logits_inference, self.decoder_state_inference,
             self.decoder_context_state_inference) = (
                 seq2seq.dynamic_rnn_decoder(
                     cell=self.decoder_cell,
                     decoder_fn=decoder_fn_inference,
                     time_major=True,
                     scope=scope,
                 ))
            self.decoder_prediction_inference = tf.argmax(
                self.decoder_logits_inference,
                axis=-1,
                name='decoder_prediction_inference')
コード例 #5
0
ファイル: network3.py プロジェクト: shyamalschandra/pdf2latex
 def decoder(self):
     cell = self._rnn_cell()
     initial_state = cell.zero_state(self.model.batch_size,
                                     dtype=tf.float32)
     decoder_fn = seq2seq.simple_decoder_fn_train(initial_state)
     output = seq2seq.dynamic_rnn_decoder(cell,
                                          decoder_fn,
                                          self.convolution,
                                          self.sequence_lengths,
                                          time_major=True)
     return output[0]
コード例 #6
0
 def decoder(self,encoder_state,inputs=None,is_train=True):
     '''
     解码器
     '''
     with tf.variable_scope("decoder") as scope:
         if is_train is True:
             decoder_fn=seq2seq.simple_decoder_fn_train(encoder_state)
             outputs,final_state,final_context_state=seq2seq.dynamic_rnn_decoder(self.decoder_cell,decoder_fn=decoder_fn,inputs=inputs,sequence_length=self.seq_len,time_major=False,scope=scope)
         else:
             tf.get_variable_scope().reuse_variables()
             #解码时,通过decoder embedding和decoder bias计算每个词的概率
             output_fn=lambda x:tf.nn.softmax(tf.matmul(x,self.dec_embedding,transpose_b=True)+self.dec_bias)
             decoder_fn=seq2seq.simple_decoder_fn_inference(output_fn=output_fn,encoder_state=encoder_state,embeddings=self.embedding,
                                                            start_of_sequence_id=0,end_of_sequence_id=0,maximum_length=self.subject_len,
                                                            num_decoder_symbols=self.vocab_size,dtype=tf.int32)
             outputs,final_state,final_context_state=seq2seq.dynamic_rnn_decoder(self.decoder_cell,decoder_fn=decoder_fn,inputs=None,sequence_length=self.seq_len,time_major=False,scope=scope)
         
     return outputs,final_state,final_context_state
コード例 #7
0
    def _init_decoder(self):
        with tf.variable_scope("Decoder") as scope:
            def output_fn(outputs):
                self.test_outputs = outputs
                return tf.contrib.layers.linear(outputs, self.decoder_vocab_size, scope=scope)

            if not self.attention:
                decoder_fn_train = seq2seq.simple_decoder_fn_train(encoder_state=self.encoder_state)
                decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    embeddings=self.decoder_embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) + 100,
                    num_decoder_symbols=self.decoder_vocab_size,
                )
            else:

                # attention_states: size [batch_size, max_time, num_units]
                attention_states = tf.transpose(self.encoder_outputs, [1, 0, 2])

                (attention_keys,
                attention_values,
                attention_score_fn,
                attention_construct_fn) = seq2seq.prepare_attention(
                    attention_states=attention_states,
                    attention_option="bahdanau",
                    num_units=self.decoder_hidden_units,
                )

                decoder_fn_train = seq2seq.attention_decoder_fn_train(
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    name='attention_decoder'
                )

                decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    embeddings=self.decoder_embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) + 100,
                    num_decoder_symbols=self.decoder_vocab_size,
                )

            (self.decoder_outputs_train,
             self.decoder_state_train,
             self.decoder_context_state_train) = (
                seq2seq.dynamic_rnn_decoder(
                    cell=self.decoder_cell,
                    decoder_fn=decoder_fn_train,
                    inputs=self.decoder_train_inputs_embedded,
                    sequence_length=self.decoder_train_length,
                    time_major=self.time_major,
                    scope=scope,
                )
            )

            self.decoder_logits_train = output_fn(self.decoder_outputs_train)
            self.decoder_prediction_train = tf.argmax(self.decoder_logits_train, axis=-1, name='decoder_prediction_train')

            scope.reuse_variables()

            (self.decoder_logits_inference,
             self.decoder_state_inference,
             self.decoder_context_state_inference) = (
                seq2seq.dynamic_rnn_decoder(
                    cell=self.decoder_cell,
                    decoder_fn=decoder_fn_inference,
                    time_major=self.time_major,
                    scope=scope,
                )
            )
            self.decoder_prediction_inference = tf.argmax(self.decoder_logits_inference, axis=-1, name='decoder_prediction_inference')
コード例 #8
0
    def _init_decoder(self):
        with tf.variable_scope("Decoder") as scope:

            def output_fn(outputs):
                return tf.contrib.layers.linear(outputs,
                                                self.vocab_size,
                                                scope=scope)

            if not self.attention:
                decoder_fn_train = seq2seq.simple_decoder_fn_train(
                    encoder_state=self.encoder_state)
                decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=data_utils.GO_ID,
                    end_of_sequence_id=data_utils.EOS_ID,
                    maximum_length=FLAGS.max_inf_target_len,
                    num_decoder_symbols=self.vocab_size,
                )
            else:

                # attention_states: size [batch_size, max_time, num_units]
                attention_states = tf.transpose(self.encoder_outputs,
                                                [1, 0, 2])

                #attention_states = tf.zeros([batch_size, 1, self.decoder_hidden_units])

                (attention_keys, attention_values, attention_score_fn,
                 attention_construct_fn) = seq2seq.prepare_attention(
                     attention_states=attention_states,
                     attention_option="bahdanau",
                     num_units=self.decoder_hidden_units,
                 )

                decoder_fn_train = seq2seq.attention_decoder_fn_train(
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    name='attention_decoder')

                decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=data_utils.GO_ID,
                    end_of_sequence_id=data_utils.EOS_ID,
                    maximum_length=FLAGS.max_inf_target_len,
                    num_decoder_symbols=self.vocab_size,
                )

            (self.decoder_outputs_train, self.decoder_state_train,
             self.decoder_context_state_train) = (seq2seq.dynamic_rnn_decoder(
                 cell=self.decoder_cell,
                 decoder_fn=decoder_fn_train,
                 inputs=self.decoder_train_inputs_embedded,
                 sequence_length=self.decoder_train_length,
                 time_major=True,
                 scope=scope,
             ))

            self.decoder_outputs_train = tf.nn.dropout(
                self.decoder_outputs_train, _keep_prob)

            self.decoder_logits_train = output_fn(self.decoder_outputs_train)

            # reusing the scope of training to use the same variables for inference
            scope.reuse_variables()

            (self.decoder_logits_inference, self.decoder_state_inference,
             self.decoder_context_state_inference) = (
                 seq2seq.dynamic_rnn_decoder(
                     cell=self.decoder_cell,
                     decoder_fn=decoder_fn_inference,
                     time_major=True,
                     scope=scope,
                 ))

            self.decoder_prediction_inference = tf.argmax(
                self.decoder_logits_inference,
                axis=-1,
                name='decoder_prediction_inference')
コード例 #9
0
    def decoder_adv(self, max_twee_len):
        with self.graph.as_default():
            with tf.variable_scope("Decoder") as scope:
                self.decoder_length = max_twee_len + 3

                def output_fn(outputs):
                    return tf.contrib.layers.linear(outputs,
                                                    self.vocab_size,
                                                    scope=scope)

                # self.decoder_cell = LSTMCell(self.decoder_hidden_nodes)
                self.decoder_cell = GRUCell(self.decoder_hidden_nodes)
                if not self.attention:
                    decoder_train = seq2seq.simple_decoder_fn_train(
                        encoder_state=self.encoder_final_state)
                    decoder_inference = seq2seq.simple_decoder_fn_inference(
                        output_fn=output_fn,
                        encoder_state=self.encoder_final_state,
                        embeddings=self.embed,
                        start_of_sequence_id=self.EOS,
                        end_of_sequence_id=self.EOS,
                        maximum_length=self.decoder_length,
                        num_decoder_symbols=self.vocab_size)
                else:
                    # attention_states: size [batch_size, max_time, num_units]
                    self.attention_states = tf.transpose(
                        self.encoder_output, [1, 0, 2])
                    (self.attention_keys, self.attention_values, self.attention_score_fn, self.attention_construct_fn) = \
                        seq2seq.prepare_attention(attention_states = self.attention_states, attention_option = "bahdanau",
                                                  num_units = self.decoder_hidden_nodes)

                    decoder_fn_train = seq2seq.attention_decoder_fn_train(
                        encoder_state=self.encoder_final_state,
                        attention_keys=self.attention_keys,
                        attention_values=self.attention_values,
                        attention_score_fn=self.attention_score_fn,
                        attention_construct_fn=self.attention_construct_fn,
                        name="attention_decoder")

                    decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
                        output_fn=output_fn,
                        encoder_state=self.encoder_final_state,
                        attention_keys=self.attention_keys,
                        attention_values=self.attention_values,
                        attention_score_fn=self.attention_score_fn,
                        attention_construct_fn=self.attention_construct_fn,
                        embeddings=self.embed,
                        start_of_sequence_id=self.EOS,
                        end_of_sequence_id=self.EOS,
                        maximum_length=
                        23,  #max_twee_len + 3,  #tf.reduce_max(self.de_out_len) + 3,
                        num_decoder_symbols=self.vocab_size)
                    self.decoder_train_inputs_embedded = tf.nn.embedding_lookup(
                        self.embed, self.decoder_train_input)
                    (self.decoder_outputs_train, self.decoder_state_train,
                     self.decoder_context_state_train) = (
                         seq2seq.dynamic_rnn_decoder(
                             cell=self.decoder_cell,
                             decoder_fn=decoder_fn_train,
                             inputs=self.decoder_train_inputs_embedded,
                             sequence_length=self.decoder_train_length,
                             time_major=True,
                             scope=scope))

                    self.decoder_logits_train = output_fn(
                        self.decoder_outputs_train)
                    self.decoder_prediction_train = tf.argmax(
                        self.decoder_logits_train,
                        axis=-1,
                        name='decoder_prediction_train')

                    scope.reuse_variables()
                    (self.decoder_logits_inference,
                     self.decoder_state_inference,
                     self.decoder_context_state_inference) = (
                         seq2seq.dynamic_rnn_decoder(
                             cell=self.decoder_cell,
                             decoder_fn=decoder_fn_inference,
                             time_major=True,
                             scope=scope))
                    self.decoder_prediction_inference = tf.argmax(
                        self.decoder_logits_inference,
                        axis=-1,
                        name='decoder_prediction_inference')



        return self.de_out, self.de_out_len, self.title_out, self.first_out, self.decoder_logits_train, \
               self.decoder_prediction_train, self.loss_weights, self.decoder_train_targets, \
               self.decoder_train_title, self.decoder_train_first, self.decoder_prediction_inference
コード例 #10
0
    def _init_decoder(self):
        with tf.variable_scope("Decoder") as scope:

            def output_fn(outputs):
                return tf.contrib.layers.linear(
                    outputs, self.vocab_size, scope=scope
                )  #this is for calculatng outputs. In a greedy way

            if not self.attention:
                decoder_fn_train = seq2seq.simple_decoder_fn_train(
                    encoder_state=self.encoder_state
                )  #This is the training  function that we used in training  dynamic_rnn_decoder

                #refer to https://github.com/tensorflow/tensorflow/blob/r1.0/tensorflow/contrib/seq2seq/python/ops/decoder_fn.py#L182

                decoder_fn_inference = seq2seq.simple_decoder_fn_inference(  #nference function for a sequence-to-sequence model. It should be used when dynamic_rnn_decoder is in the inference mode.final mode
                    output_fn=
                    output_fn,  #this returns a decoder function . This function in used inside the dynamicRNN function
                    encoder_state=self.encoder_state,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) +
                    3,
                    num_decoder_symbols=self.vocab_size,
                )
            else:

                # attention_states: size [batch_size, max_time, num_units]
                attention_states = tf.transpose(self.encoder_outputs, [
                    1, 0, 2
                ])  #take the attention status as the encorder hidden states

                (
                    attention_keys,  #Each Encoder hidden status multiplied in fully conected way and list of size [num units*Max_time] 
                    attention_values,  #this is attention encoder states 
                    attention_score_fn,  #score function of the attention Different ways to compute attention scores  If we input the decoder state , encoder hidden states  this will out put the context vector 
                    attention_construct_fn
                ) = seq2seq.prepare_attention(  #this contruct will Function to compute attention vectors. This will output the concatanaded context vector and the attention wuary then make it as a inpit 
                    attention_states=attention_states,
                    attention_option="bahdanau",
                    num_units=self.decoder_hidden_units,
                )
                print("Prininting the number of units .......................")
                print(self.decoder_hidden_units)
                print(
                    "Printing the shape of the attetniton values ......................**********************************************"
                )
                print(attention_keys)
                print(
                    "Printing the attention score function++++++++++++++++++++++++++++++++++++++++++++++++++++"
                )
                print(attention_score_fn)

                #this function can basically initialize input state of the decoder the nthe attention and other stuff then this will be passed to dy_decorder
                #decorder_function train will take time, cell_state, cell_input, cell_output, context_state
                decoder_fn_train = seq2seq.attention_decoder_fn_train(  #this is for training the dynamic decorder. This will take care of 
                    encoder_state=self.
                    encoder_state,  # final state. We take the biderection and concatanate it (c or h)
                    attention_keys=
                    attention_keys,  # The transformation of each encoder outputs 
                    attention_values=
                    attention_values,  #attention encododr status 
                    attention_score_fn=
                    attention_score_fn,  #this will give a context vector
                    attention_construct_fn=
                    attention_construct_fn,  #calculating above thinhs  also output the hidden state 
                    name='attention_decoder')
                #What can we achieve by running decorder_fn_ ?  done, next state, next input, emit output, next context state
                #here the emit_output or cell_output will give the output of cell after all atention - non lieanrity applied

                #this also give the hidden vector output which was concatanated with rnn output and attention vector . Actually concatanated goes throug a linear unit
                #next_input = array_ops.concat([cell_input, attention], 1)  #next cell input
                #context_state - this will modify when using the beam search
                #what is the contect state in decorder_fn inside the return funfction of the decorder fn train
                #the following function is same as the above but the only difference is it's use this in the inference .This has a greedy output

                #in the inference model cell_output = output_fn(cell_output) . Which means we get logits
                #next_input = array_ops.concat([cell_input, attention], 1)

                decoder_fn_inference = seq2seq.attention_decoder_fn_inference(  #this is used in the inference model 
                    output_fn=
                    output_fn,  #this will predict the output and the narcmax after that attention will be concatenaded 
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,  #doing same 
                    attention_construct_fn=attention_construct_fn,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) +
                    3,
                    num_decoder_symbols=self.vocab_size,
                )


#following function is to do all the decodinf with the helop of above functions
#this can use in traning or inferense . But we need two separate finctions for trainin and iference

#What is this context_state_train : one way to diversify the inference output is to use a stochastic decoder_fn, in which case one would want to store the  decoded outputs, not just the RNN outputs. This can be done by maintaining a TensorArray in context_state and storing the decoded output of each iteration therein

            (
                self.
                decoder_outputs_train,  #outputs from the eacah cell [batch_size, max_time, cell.output_size]
                self.
                decoder_state_train,  #The final state and will be shaped [batch_size, cell.state_size]
                self.decoder_context_state_train
            ) = (  #described above 
                seq2seq.dynamic_rnn_decoder(
                    cell=self.decoder_cell,
                    decoder_fn=
                    decoder_fn_train,  #decoder_fn allows modeling of early stopping, output, state, and next input and context.
                    inputs=self.
                    decoder_train_inputs_embedded,  #inputs to the decoder in the training #in the raning time  only 
                    sequence_length=self.
                    decoder_train_length,  #sequence_length is needed at training time, i.e., when inputs is not None, for dynamic unrolling. At test time, when inputs is None, sequence_length is not needed.
                    time_major=
                    True,  #input and output shape should be in [max_time, batch_size, ...]
                    scope=scope,
                ))

            self.decoder_logits_train = output_fn(
                self.decoder_outputs_train
            )  #take the final output hidden status and run them throgh linearl layer #get the argmax
            self.decoder_prediction_train = tf.argmax(
                self.decoder_logits_train,
                axis=-1,
                name='decoder_prediction_train')

            scope.reuse_variables()

            (
                self.
                decoder_logits_inference,  #same as above but no input provided. This will take the predicted things as inputs
                self.decoder_state_inference,
                self.decoder_context_state_inference) = (
                    seq2seq.dynamic_rnn_decoder(
                        cell=self.decoder_cell,
                        decoder_fn=
                        decoder_fn_inference,  #difference decorder fucntion 
                        time_major=True,
                        scope=scope,
                    ))
            self.decoder_prediction_inference = tf.argmax(
                self.decoder_logits_inference,
                axis=-1,
                name='decoder_prediction_inference'
            )  #predicted output at the each time step
コード例 #11
0
ファイル: rnn_decoder.py プロジェクト: m12sl/TF-seq2seq
    def _build_graph(self):
        # required only for training
        self.targets = tf.placeholder(shape=(None, None),
                                      dtype=tf.int32,
                                      name="decoder_inputs")
        self.targets_length = tf.placeholder(shape=(None, ),
                                             dtype=tf.int32,
                                             name="decoder_inputs_length")
        self.global_step = tf.Variable(0, name="global_step", trainable=False)

        with tf.name_scope("DecoderTrainFeed"):
            sequence_size, batch_size = tf.unstack(tf.shape(self.targets))

            EOS_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.EOS
            PAD_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.PAD

            self.train_inputs = tf.concat([EOS_SLICE, self.targets], axis=0)
            self.train_length = self.targets_length + 1

            train_targets = tf.concat([self.targets, PAD_SLICE], axis=0)
            train_targets_seq_len, _ = tf.unstack(tf.shape(train_targets))
            train_targets_eos_mask = tf.one_hot(self.train_length - 1,
                                                train_targets_seq_len,
                                                on_value=self.EOS,
                                                off_value=self.PAD,
                                                dtype=tf.int32)
            train_targets_eos_mask = tf.transpose(train_targets_eos_mask,
                                                  [1, 0])

            # hacky way using one_hot to put EOS symbol at the end of target sequence
            train_targets = tf.add(train_targets, train_targets_eos_mask)

            self.train_targets = train_targets

            self.loss_weights = tf.ones(
                [batch_size, tf.reduce_max(self.train_length)],
                dtype=tf.float32,
                name="loss_weights")

        with tf.variable_scope("embedding") as scope:
            self.inputs_embedded = tf.nn.embedding_lookup(
                self.embedding_matrix, self.train_inputs)

        with tf.variable_scope("Decoder") as scope:

            def logits_fn(outputs):
                return layers.linear(outputs, self.vocab_size, scope=scope)

            if not self.attention:
                train_fn = seq2seq.simple_decoder_fn_train(
                    encoder_state=self.encoder_state)
                inference_fn = seq2seq.simple_decoder_fn_inference(
                    output_fn=logits_fn,
                    encoder_state=self.encoder_state,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) +
                    3,
                    num_decoder_symbols=self.vocab_size)
            else:

                # attention_states: size [batch_size, max_time, num_units]
                attention_states = tf.transpose(self.encoder_outputs,
                                                [1, 0, 2])

                (attention_keys, attention_values, attention_score_fn,
                 attention_construct_fn) = seq2seq.prepare_attention(
                     attention_states=attention_states,
                     attention_option="bahdanau",
                     num_units=self.decoder_hidden_units)

                train_fn = seq2seq.attention_decoder_fn_train(
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    name="decoder_attention")

                inference_fn = seq2seq.attention_decoder_fn_inference(
                    output_fn=logits_fn,
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) +
                    3,
                    num_decoder_symbols=self.vocab_size)

            (self.train_outputs, self.train_state,
             self.train_context_state) = seq2seq.dynamic_rnn_decoder(
                 cell=self.cell,
                 decoder_fn=train_fn,
                 inputs=self.inputs_embedded,
                 sequence_length=self.train_length,
                 time_major=True,
                 scope=scope)

            self.train_logits = logits_fn(self.train_outputs)
            self.train_prediction = tf.argmax(self.train_logits,
                                              axis=-1,
                                              name="train_prediction")
            self.train_prediction_probabilities = tf.nn.softmax(
                self.train_logits,
                dim=-1,
                name="train_prediction_probabilities")

            scope.reuse_variables()

            (self.inference_logits, self.inference_state,
             self.inference_context_state) = seq2seq.dynamic_rnn_decoder(
                 cell=self.cell,
                 decoder_fn=inference_fn,
                 time_major=True,
                 scope=scope)

            self.inference_prediction = tf.argmax(self.inference_logits,
                                                  axis=-1,
                                                  name="inference_prediction")
            self.inference_prediction_probabilities = tf.nn.softmax(
                self.train_logits,
                dim=-1,
                name="inference_prediction_probabilities")
コード例 #12
0
ファイル: Seq2SeqModel.py プロジェクト: Lucklyric/NLP-NER-CNN
    def _init_decoder(self):
        with tf.variable_scope("Decoder") as scope:

            def output_fn(outputs):
                return tc.layers.fully_connected(outputs,
                                                 self.output_symbol_size,
                                                 activation_fn=None,
                                                 scope=scope)

            if not self.attention:
                decoder_fn_train = seq2seq.simple_decoder_fn_train(
                    encoder_state=self.encoder_state)
                decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length),
                    num_decoder_symbols=self.output_symbol_size)
            else:
                (attention_keys, attention_values, attention_score_fn,
                 attention_construct_fn) = seq2seq.prepare_attention(
                     attention_states=self.encoder_outputs,
                     attention_option="bahdanau",
                     num_units=self.decoder_hidden_units,
                 )

                decoder_fn_train = seq2seq.attention_decoder_fn_train(
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    name='attention_decoder')

                decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    embeddings=self.embedding_matrix,
                    start_of_sequence_id=self.EOS,
                    end_of_sequence_id=self.EOS,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length),
                    num_decoder_symbols=self.output_symbol_size,
                )
            if self.is_training:
                (self.decoder_outputs_train, self.decoder_state_train,
                 self.decoder_context_state_train) = (
                     seq2seq.dynamic_rnn_decoder(
                         cell=self.decoder_cell,
                         decoder_fn=decoder_fn_train,
                         inputs=self.decoder_train_inputs_embedded,
                         sequence_length=self.decoder_train_length,
                         time_major=False,
                         scope=scope,
                     ))

                self.decoder_logits_train = output_fn(
                    self.decoder_outputs_train)
                self.decoder_prediction_train = tf.argmax(
                    self.decoder_logits_train,
                    axis=-1,
                    name='decoder_prediction_train')

                scope.reuse_variables()

            (self.decoder_logits_inference, self.decoder_state_inference,
             self.decoder_context_state_inference) = (
                 seq2seq.dynamic_rnn_decoder(
                     cell=self.decoder_cell,
                     decoder_fn=decoder_fn_inference,
                     time_major=False,
                     scope=scope,
                 ))
            self.decoder_prediction_inference = tf.argmax(
                self.decoder_logits_inference,
                axis=-1,
                name='decoder_prediction_inference')
コード例 #13
0
    def __init_decoder(self):
        '''Initializes the decoder part of the model.'''
        with tf.variable_scope('decoder') as scope:
            output_fn = lambda outs: layers.linear(
                outs, self.__get_vocab_size(), scope=scope)

            if self.cfg.get('use_attention'):
                attention_states = tf.transpose(self.encoder_outputs,
                                                [1, 0, 2])

                (attention_keys, attention_values, attention_score_fn,
                 attention_construct_fn) = seq2seq.prepare_attention(
                     attention_states=attention_states,
                     attention_option='bahdanau',
                     num_units=self.decoder_cell.output_size)

                decoder_fn_train = seq2seq.attention_decoder_fn_train(
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    name='attention_decoder')

                decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    attention_keys=attention_keys,
                    attention_values=attention_values,
                    attention_score_fn=attention_score_fn,
                    attention_construct_fn=attention_construct_fn,
                    embeddings=self.embeddings,
                    start_of_sequence_id=Config.EOS_WORD_IDX,
                    end_of_sequence_id=Config.EOS_WORD_IDX,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) +
                    3,
                    num_decoder_symbols=self.__get_vocab_size())
            else:
                decoder_fn_train = seq2seq.simple_decoder_fn_train(
                    encoder_state=self.encoder_state)
                decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
                    output_fn=output_fn,
                    encoder_state=self.encoder_state,
                    embeddings=self.embeddings,
                    start_of_sequence_id=Config.EOS_WORD_IDX,
                    end_of_sequence_id=Config.EOS_WORD_IDX,
                    maximum_length=tf.reduce_max(self.encoder_inputs_length) +
                    3,
                    num_decoder_symbols=self.__get_vocab_size())

            (self.decoder_outputs_train, self.decoder_state_train,
             self.decoder_context_state_train) = seq2seq.dynamic_rnn_decoder(
                 cell=self.decoder_cell,
                 decoder_fn=decoder_fn_train,
                 inputs=self.decoder_train_inputs_embedded,
                 sequence_length=self.decoder_train_length,
                 time_major=True,
                 scope=scope)

            self.decoder_logits_train = output_fn(self.decoder_outputs_train)
            self.decoder_prediction_train = tf.argmax(
                self.decoder_logits_train,
                axis=-1,
                name='decoder_prediction_traion')

            scope.reuse_variables()

            (self.decoder_logits_inference, decoder_state_inference,
             self.decoder_context_state_inference
             ) = seq2seq.dynamic_rnn_decoder(cell=self.decoder_cell,
                                             decoder_fn=decoder_fn_inference,
                                             time_major=True,
                                             scope=scope)

            self.decoder_prediction_inference = tf.argmax(
                self.decoder_logits_inference,
                axis=-1,
                name='decoder_prediction_inference')
コード例 #14
0
    def _init_decoder(self, forward_only):
        with tf.variable_scope("decoder") as scope:

            def output_fn(outputs):
                return tf.contrib.layers.linear(outputs,
                                                self.target_vocab_size,
                                                scope=scope)

            self.attention = True
            if not self.attention:
                if forward_only:
                    decoder_fn = seq2seq.simple_decoder_fn_inference(
                        output_fn=output_fn,
                        encoder_state=self.encoder_state,
                        embeddings=self.dec_embedding_matrix,
                        start_of_sequence_id=model_config.GO_ID,
                        end_of_sequence_id=model_config.EOS_ID,
                        maximum_length=self.buckets[-1][1],
                        num_decoder_symbols=self.target_vocab_size,
                    )
                    (self.decoder_outputs, self.decoder_state,
                     self.decoder_context_state) = (
                         seq2seq.dynamic_rnn_decoder(
                             cell=self.decoder_cell,
                             decoder_fn=decoder_fn,
                             time_major=True,
                             scope=scope,
                         ))
                else:
                    decoder_fn = seq2seq.simple_decoder_fn_train(
                        encoder_state=self.encoder_state)
                    (self.decoder_outputs, self.decoder_state,
                     self.decoder_context_state) = (
                         seq2seq.dynamic_rnn_decoder(
                             cell=self.decoder_cell,
                             decoder_fn=decoder_fn,
                             inputs=self.decoder_inputs_embedded,
                             sequence_length=self.decoder_inputs_length,
                             time_major=True,
                             scope=scope,
                         ))

            else:
                # attention_states: size [batch_size, max_time, num_units]
                attention_states = tf.transpose(self.encoder_outputs,
                                                [1, 0, 2])

                (attention_keys, attention_values, attention_score_fn,
                 attention_construct_fn) = (seq2seq.prepare_attention(
                     attention_states=attention_states,
                     attention_option="bahdanau",
                     num_units=self.dec_hidden_size))

                if forward_only:
                    decoder_fn = seq2seq.attention_decoder_fn_inference(
                        output_fn=output_fn,
                        encoder_state=self.encoder_state,
                        attention_keys=attention_keys,
                        attention_values=attention_values,
                        attention_score_fn=attention_score_fn,
                        attention_construct_fn=attention_construct_fn,
                        embeddings=self.dec_embedding_matrix,
                        start_of_sequence_id=model_config.GO_ID,
                        end_of_sequence_id=model_config.EOS_ID,
                        maximum_length=self.buckets[-1][1],
                        num_decoder_symbols=self.target_vocab_size,
                    )
                    (self.decoder_outputs, self.decoder_state,
                     self.decoder_context_state) = (
                         seq2seq.dynamic_rnn_decoder(
                             cell=self.decoder_cell,
                             decoder_fn=decoder_fn,
                             time_major=True,
                             scope=scope,
                         ))
                else:
                    decoder_fn = seq2seq.attention_decoder_fn_train(
                        encoder_state=self.encoder_state,
                        attention_keys=attention_keys,
                        attention_values=attention_values,
                        attention_score_fn=attention_score_fn,
                        attention_construct_fn=attention_construct_fn,
                        name='attention_decoder')
                    (self.decoder_outputs, self.decoder_state,
                     self.decoder_context_state) = (
                         seq2seq.dynamic_rnn_decoder(
                             cell=self.decoder_cell,
                             decoder_fn=decoder_fn,
                             inputs=self.decoder_inputs_embedded,
                             sequence_length=self.decoder_inputs_length,
                             time_major=True,
                             scope=scope,
                         ))

            if not forward_only:
                self.decoder_logits = output_fn(self.decoder_outputs)
            else:
                self.decoder_logits = self.decoder_outputs

            self.decoder_prediction = tf.argmax(self.decoder_logits,
                                                axis=-1,
                                                name='decoder_prediction')
            logits = tf.transpose(self.decoder_logits, [1, 0, 2])
            targets = tf.transpose(self.decoder_targets, [1, 0])

            if not forward_only:
                self.loss = seq2seq.sequence_loss(logits=logits,
                                                  targets=targets,
                                                  weights=self.target_weights)