def evaluate(encoder, decoder, voc, sentence, beam_size, k, p, max_length=MAX_LENGTH, hidvar=0): indexes_batch = [indexesFromSentence(voc, sentence)] # [1, seq_len] lengths = [len(indexes) for indexes in indexes_batch] input_batch = torch.LongTensor(indexes_batch).transpose(0, 1) input_batch = input_batch.to(device) encoder_outputs, encoder_hidden = encoder( input_batch, lengths, None) # tutorial: instead in class of decoder if hidvar: z = Variable(torch.randn([4, 1, hidvar.z_hidden_size])) z = z.to(device) decoder_hidden = torch.cat( [encoder_hidden[:decoder.n_layers], z[:decoder.n_layers]], 2) else: decoder_hidden = encoder_hidden[:decoder.n_layers] if beam_size == 1: return decode(decoder, decoder_hidden, encoder_outputs, voc, k, p) else: return beam_decode(decoder, decoder_hidden, encoder_outputs, voc, beam_size)
def evaluate(encoder, decoder, voc, sentence, beam_size, max_length=MAX_LENGTH): indexes_batch = [indexesFromSentence(voc, sentence)] #[1, seq_len] lengths = [len(indexes) for indexes in indexes_batch] input_batch = Variable(torch.LongTensor(indexes_batch), volatile=True).transpose(0, 1) input_batch = input_batch.cuda() if USE_CUDA else input_batch encoder_outputs, encoder_hidden = encoder(input_batch, lengths, None) decoder_hidden = encoder_hidden[:decoder.n_layers] if beam_size == 1: return decode(decoder, decoder_hidden, encoder_outputs, voc) else: return beam_decode(decoder, decoder_hidden, encoder_outputs, voc, beam_size)
def evaluate(encoder, decoder, pinyin_voc, word_voc, sentence, beam_size, max_length=MAX_LENGTH): indexes_batch = [indexesFromSentence(pinyin_voc, sentence)] #[1, seq_len] lengths = [len(indexes) for indexes in indexes_batch] input_batch = torch.LongTensor(indexes_batch).transpose(0, 1) input_batch = input_batch.to(device) encoder_outputs, encoder_hidden = encoder(input_batch, lengths, None) decoder_hidden = encoder_hidden[:decoder.n_layers] if beam_size == 1: return decode(decoder, decoder_hidden, encoder_outputs, word_voc, len(indexes_batch[0])) else: return beam_decode(decoder, decoder_hidden, encoder_outputs, word_voc, beam_size)
def evaluate(encoder, decoder, voc, sentence, beam_size, max_length=MAX_LENGTH): # TODO:evaluate 负责对一个输入句子生成一句回答 indexes_batch = [indexesFromSentence(voc, sentence) ] #把句子转换为index数字串,[1, seq_len] lengths = [len(indexes) for indexes in indexes_batch] input_batch = torch.LongTensor(indexes_batch).transpose(0, 1) #[ seq_len,1] input_batch = input_batch.to(device) encoder_outputs, encoder_hidden = encoder(input_batch, lengths, None) decoder_hidden = encoder_hidden[:decoder.n_layers] if beam_size == 1: return decode(decoder, decoder_hidden, encoder_outputs, voc) else: return beam_decode(decoder, decoder_hidden, encoder_outputs, voc, beam_size)