Example #1
0
def evaluate(sentence, encoder, decoder, max_length = loader.MAX_LENGTH):
	with torch.no_grad():
		input_tensor = train.tensorFromSentence(input_lang, sentence)
		input_length = input_tensor.size(0)

		encoder_hidden = (encoder.initHidden().to(device), encoder.initHidden().to(device))
		encoder_outputs = torch.zeros(max_length, encoder.hidden_size).to(device)

		for index in range(input_length):
			encoder_output, encoder_hidden = encoder(input_tensor[index], encoder_hidden)
			encoder_outputs[index] = encoder_output[0, 0]

		decoder_input = torch.tensor([[loader.SOS_token]]).to(device)
		decoder_hidden = encoder_hidden

		decoded_words = []
		for index in range(max_length):
			decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)
			topv, topi = decoder_output.data.topk(1)
			if topi.item() == loader.EOS_token:
				break
			else:
				decoded_words.append(output_lang.index2word[topi.item()])
			decoder_input = topi.squeeze().detach()
	return decoded_words
Example #2
0
def evaluate(encoder, decoder, sentence, max_length):
    with torch.no_grad():
        word_tokenize = nlp.word_tokenize(sentence)
        syntax_info = nlp.dependency_parse(sentence)
        sentence = ' '.join(word_tokenize)
        #print(syntax_info)
        #print(len(word_tokenize))
        tmp_dependency_tree = dependency_tree(sentence, len(word_tokenize),
                                              syntax_info)
        syntax_matrix = tmp_dependency_tree.get_syntax_matrix()
        #print(syntax_matrix)

        input_tensor = tensorFromSentence(input_lang, sentence)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length,
                                      encoder.hidden_size,
                                      device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS

        decoder_hidden = encoder_hidden

        h_t_tilde = decoder_hidden[0]

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs,
                len(syntax_matrix), syntax_matrix)
            #decoder_output, decoder_hidden, h_t_tilde, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs, h_t_tilde, len(syntax_matrix), syntax_matrix)
            decoder_attentions[di] = decoder_attention.data
            topv, topi = decoder_output.data.topk(1)
            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(output_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]
def evaluate(sentence, encoder, decoder, max_length=loader.MAX_LENGTH):
    with torch.no_grad():
        input_tensor = train.tensorFromSentence(input_lang, sentence)
        # |input_tensor| = (sentence_length, 1)
        input_length = input_tensor.size(0)

        encoder_hidden = (encoder.initHidden().to(device),
                          encoder.initHidden().to(device))
        # |encoder_hidden|= (2, num_layers*num_directions, batch_size, hidden_size)
        encoder_outputs = torch.zeros(max_length,
                                      encoder.hidden_size).to(device)
        # |encoder_outputs| = (max_length, hidden_size)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            # |encoder_output| = (batch_size, sequence_length, num_directions*hidden_size))
            # |encoder_hidden| = (2, num_layers*num_directions, batch_size, hidden_size)
            # 2: respectively, hidden state and cell state.
            encoder_outputs[ei] = encoder_output[0, 0]

        decoder_input = torch.tensor([[loader.SOS_token]]).to(device)
        # |decoder_input| = (1, 1)
        decoder_hidden = encoder_hidden
        # |decoder_hidden|= (2, num_layers*num_directions, batch_size, hidden_size)
        # 2: respectively, hidden state and cell state

        decoded_words = []
        for di in range(max_length):
            decoder_output, decoder_hidden = decoder(decoder_input,
                                                     decoder_hidden)
            # |decoder_output| = (sequence_length, output_lang.n_words)
            # |decoder_hidden| = (2, num_layers*num_directions, batch_size, hidden_size)
            # 2: respectively, hidden state and cell state.

            topv, topi = decoder_output.data.topk(1)  # top-1 value, index
            # |topv|, |topi| = (1, 1)

            if topi.item() == loader.EOS_token:
                # decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(output_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

    return decoded_words
def evaluate(context, classification, device, sentence, inputs_dict,
             target_dict):
    with torch.no_grad():
        input_tensor = tensorFromSentence(inputs_dict, sentence, device)
        input_length = input_tensor.size()[0]
        context_hidden = context.init_hidden()

        for ei in range(input_length):
            context_hidden = context(input_tensor[ei], context_hidden)

        classification_inputs = context_hidden[0]

        result = classification(classification_inputs)

        result = classFromTensor(result, target_dict)

        return result
Example #5
0
def evaluate(encoder, decoder, sentence, max_length=loader.MAX_LENGTH):
    with torch.no_grad():
        input_tensor = train.tensorFromSentence(input_lang, sentence)
        # |input_tensor| = (sentence_length, 1)
        input_length = input_tensor.size(0)

        encoder_hidden = (encoder.initHidden().to(device),
                          encoder.initHidden().to(device))
        # |encoder_hidden[0]|, |encoder_hidden[1]| = (2, 1, hidden_size/2)
        encoder_outputs = torch.zeros(max_length,
                                      encoder.hidden_size,
                                      device=device)
        # |encoder_outputs| = (max_length, hidden_size)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            # |encoder_output| = (1, 1, hidden_size)
            # |encoder_hidden[0]|, |encoder_hidden[1]| = (2, 1, hidden_size/2)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[loader.SOS_token]], device=device)
        decoder_hidden = train.merge_encoder_hiddens(encoder_hidden)
        # |decoder_input| = (1, 1)
        # |decoder_hidden[0]|, |decoder_hidden[1]| = (1, 1, hidden_size)

        decoded_words = []
        for di in range(max_length):
            decoder_output, decoder_hidden = decoder(decoder_input,
                                                     decoder_hidden)
            # |decoder_output| = (1, output_lang.n_words)
            # |decoder_hidden[0]|, |decoder_hidden[1]| = (1, 1, hidden_size)
            topv, topi = decoder_output.data.topk(
                1)  # decoder_output.data == decoder_output
            # |topv| = (1, 1)
            # |topi| = (1, 1)
            if topi.item() == loader.EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(output_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

    return decoded_words
Example #6
0
def evaluate(input_lang,
             output_lang,
             device,
             encoder,
             decoder,
             sentence,
             max_length=MAX_LENGTH):
    with torch.no_grad():
        input_tensor = tensorFromSentence(input_lang, sentence, device)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length,
                                      encoder.hidden_size,
                                      device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS

        decoder_hidden = encoder_hidden

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            decoder_attentions[di] = decoder_attention.data
            topv, topi = decoder_output.data.topk(1)
            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(output_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]