Esempio n. 1
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
     return rl_seq2seq.embedding_attention_seq2seq(
         encoder_inputs,
         decoder_inputs,
         cell,
         num_encoder_symbols=source_vocab_size,
         num_decoder_symbols=target_vocab_size,
         embedding_size=size,
         output_projection=output_projection,
         feed_previous=do_decode,
         dtype=dtype)
Esempio n. 2
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
     return rl_seq2seq.embedding_attention_seq2seq(
         encoder_inputs,
         decoder_inputs,
         cell,
         num_encoder_symbols=self.source_vocab_size,
         num_decoder_symbols=self.target_vocab_size,
         embedding_size=self.emb_dim,
         output_projection=output_projection,
         feed_previous=do_decode,  # 是否把上一轮的预测作为这一轮的输入 || 是否在测试
         mc_search=self.mc_search,  # TODO(Zhu) 文件位置:seq2seq._argmax_or_mcsearch 什么意思?
         dtype=self.dtype)
Esempio n. 3
0
 def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
     return rl_seq2seq.embedding_attention_seq2seq(
         encoder_inputs,
         decoder_inputs,
         cell,
         num_encoder_symbols=source_vocab_size,
         num_decoder_symbols=target_vocab_size,
         embedding_size=emb_dim,
         output_projection=output_projection,
         feed_previous=do_decode,
         mc_search=self.mc_search,
         dtype=tf.float32)