コード例 #1
0
    def __load_model(self,num_layers):
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            lstm = rnn_cell.LSTMCell(self.memory_dim)
            # Stacks layers of RNN's to form a stacked decoder
            self.cell = rnn_cell.MultiRNNCell([lstm] * num_layers)

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse = True):
                self.dec_outputs_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse = True):
                self.dec_outputs_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
コード例 #2
0
    def __load_model(self):
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            self.cell = rnn_cell.LSTMCell(self.memory_dim)

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse = True):
                self.dec_outputs_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse = True):
                self.dec_outputs_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp, self.dec_inp, self.cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
コード例 #3
0
    def __load_model(self, num_layers):
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("forward"):
            fw_single_cell = rnn_cell.GRUCell(self.memory_dim)
            # Stacks layers of RNN's to form a stacked decoder
            self.forward_cell = rnn_cell.MultiRNNCell([fw_single_cell] *
                                                      num_layers)

        with tf.variable_scope("backward"):
            bw_single_cell = rnn_cell.GRUCell(self.memory_dim)
            # Stacks layers of RNN's to form a stacked decoder
            self.backward_cell = rnn_cell.MultiRNNCell([bw_single_cell] *
                                                       num_layers)

        # embedding model
        if not self.attention:
            with tf.variable_scope("forward"):
                self.dec_outputs_fwd, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("forward", reuse=True):
                self.dec_outputs_fwd_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

            with tf.variable_scope("backward"):
                self.dec_outputs_bwd, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)

            with tf.variable_scope("backward", reuse=True):
                self.dec_outputs_bwd_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("forward"):
                self.dec_outputs_fwd, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("forward", reuse=True):
                self.dec_outputs_fwd_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

            with tf.variable_scope("backward"):
                self.dec_outputs_bwd, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)

            with tf.variable_scope("backward", reuse=True):
                self.dec_outputs_bwd_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
コード例 #4
0
    def __load_model(self, num_layers):
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("forward"):
            fw_single_cell = rnn_cell.GRUCell(self.memory_dim)
            # Stacks layers of RNN's to form a stacked decoder
            self.forward_cell = rnn_cell.MultiRNNCell([fw_single_cell] * num_layers)

        with tf.variable_scope("backward"):
            bw_single_cell = rnn_cell.GRUCell(self.memory_dim)
            # Stacks layers of RNN's to form a stacked decoder
            self.backward_cell = rnn_cell.MultiRNNCell([bw_single_cell] * num_layers)


        # embedding model
        if not self.attention:
            with tf.variable_scope("forward"):
                self.dec_outputs_fwd, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("forward", reuse = True):
                self.dec_outputs_fwd_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

            with tf.variable_scope("backward"):
                self.dec_outputs_bwd, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)

            with tf.variable_scope("backward", reuse = True):
                self.dec_outputs_bwd_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("forward"):
                self.dec_outputs_fwd, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("forward", reuse = True):
                self.dec_outputs_fwd_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

            with tf.variable_scope("backward"):
                self.dec_outputs_bwd, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)

            with tf.variable_scope("backward", reuse = True):
                self.dec_outputs_bwd_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
コード例 #5
0
    def __load_model(self):
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("forward"):
            self.forward_cell = rnn_cell.LSTMCell(self.memory_dim)
        with tf.variable_scope("backward"):
            self.backward_cell = rnn_cell.LSTMCell(self.memory_dim)

        # embedding model
        if not self.attention:
            with tf.variable_scope("forward"):
                self.dec_outputs_fwd, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("forward", reuse=True):
                self.dec_outputs_fwd_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

            with tf.variable_scope("backward"):
                self.dec_outputs_bwd, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)

            with tf.variable_scope("backward", reuse=True):
                self.dec_outputs_bwd_tst, _ = seq2seq.embedding_rnn_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("forward"):
                self.dec_outputs_fwd, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("forward", reuse=True):
                self.dec_outputs_fwd_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_fwd, self.dec_inp, self.forward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

            with tf.variable_scope("backward"):
                self.dec_outputs_bwd, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length)

            with tf.variable_scope("backward", reuse=True):
                self.dec_outputs_bwd_tst, _ = seq2seq.embedding_attention_seq2seq(\
                                self.enc_inp_bwd, self.dec_inp, self.backward_cell, \
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
コード例 #6
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
     return seq2seq.embedding_attention_seq2seq(
         encoder_inputs,
         decoder_inputs,
         cell,
         source_vocab_size,
         target_vocab_size,
         output_projection=output_projection,
         feed_previous=do_decode)
コード例 #7
0
ファイル: seq2seq_test.py プロジェクト: xzm2004260/tensorflow
 def GRUSeq2Seq(enc_inp, dec_inp):
     cell = rnn_cell.MultiRNNCell([rnn_cell.GRUCell(24)] * 2)
     return seq2seq.embedding_attention_seq2seq(
         enc_inp,
         dec_inp,
         cell,
         classes,
         classes,
         output_projection=(w, b))
コード例 #8
0
  def testEmbeddingAttentionSeq2Seq(self):
    with self.test_session() as sess:
      with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
        enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in xrange(2)]
        dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in xrange(3)]
        cell = rnn_cell.BasicLSTMCell(2)
        dec, mem = seq2seq.embedding_attention_seq2seq(
            enc_inp, dec_inp, cell, 2, 5)
        sess.run([tf.initialize_all_variables()])
        res = sess.run(dec)
        self.assertEqual(len(res), 3)
        self.assertEqual(res[0].shape, (2, 5))

        res = sess.run(mem)
        self.assertEqual(len(res), 4)
        self.assertEqual(res[0].shape, (2, 4))

        # Test externally provided output projection.
        w = tf.get_variable("proj_w", [2, 5])
        b = tf.get_variable("proj_b", [5])
        with tf.variable_scope("proj_seq2seq"):
          dec, _ = seq2seq.embedding_attention_seq2seq(
              enc_inp, dec_inp, cell, 2, 5, output_projection=(w, b))
        sess.run([tf.variables.initialize_all_variables()])
        res = sess.run(dec)
        self.assertEqual(len(res), 3)
        self.assertEqual(res[0].shape, (2, 2))

        # Test that previous-feeding model ignores inputs after the first.
        dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in xrange(3)]
        tf.get_variable_scope().reuse_variables()
        d1, _ = seq2seq.embedding_attention_seq2seq(
            enc_inp, dec_inp, cell, 2, 5, feed_previous=True)
        d2, _ = seq2seq.embedding_attention_seq2seq(
            enc_inp, dec_inp2, cell, 2, 5, feed_previous=True)
        d3, _ = seq2seq.embedding_attention_seq2seq(
            enc_inp, dec_inp2, cell, 2, 5, feed_previous=tf.constant(True))
        res1 = sess.run(d1)
        res2 = sess.run(d2)
        res3 = sess.run(d3)
        self.assertAllClose(res1, res2)
        self.assertAllClose(res1, res3)
コード例 #9
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
   return seq2seq.embedding_attention_seq2seq(
       encoder_inputs, decoder_inputs, cell, source_vocab_size,
       target_vocab_size, output_projection=output_projection,
       feed_previous=do_decode)
コード例 #10
0
ファイル: seq2seq_test.py プロジェクト: nickicindy/tensorflow
 def GRUSeq2Seq(enc_inp, dec_inp):
   cell = rnn_cell.MultiRNNCell([rnn_cell.GRUCell(24)] * 2)
   return seq2seq.embedding_attention_seq2seq(
       enc_inp, dec_inp, cell, classes, classes, output_projection=(w, b))
コード例 #11
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode=False):
     return seq2seq.embedding_attention_seq2seq(
         encoder_inputs, decoder_inputs, cell, vocab_size,
         vocab_size, feed_previous=False)
コード例 #12
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
     return seq2seq.embedding_attention_seq2seq(
                         encoder_inputs, decoder_inputs,
                         cell, num_input_tokens,
                         num_target_tokens, output_projection=output_projection,
                         feed_previous=do_decode)