コード例 #1
0
def main():
    embedding = nn.Embedding(voc.num_words, hidden_size)
    encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
    decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size,
                                  voc.num_words, decoder_n_layers, dropout)

    model = torch.load(model_save_pth, 'cpu')

    encoder.load_state_dict(torch.load(model_save_pth, device)['en'])
    decoder.load_state_dict(torch.load(model_save_pth, device)['de'])

    #encoder = model['en']
    #decoder = model.LuongAttnDecoderRNN['de']

    #encoder = encoder.to(device)
    #decoder = decoder.to(device)
    encoder.eval()
    decoder.eval()

    searcher = GreedySearchDecoder(encoder, decoder)

    for sentence in pick_n_valid_sentences(10):
        decoded_words = evaluate(searcher, sentence)
        print('Human: {}'.format(sentence))
        print('Bot: {}'.format(''.join(decoded_words)))
コード例 #2
0
    print('checkpoint: ' + str(checkpoint))
    # Load model
    checkpoint = torch.load(checkpoint)
    encoder_sd = checkpoint['en']
    decoder_sd = checkpoint['de']

    print('Building encoder and decoder ...')
    # Initialize encoder & decoder models
    encoder = EncoderRNN(input_lang.n_words, hidden_size, encoder_n_layers, dropout)
    decoder = LuongAttnDecoderRNN(attn_model, hidden_size, output_lang.n_words, decoder_n_layers, dropout)

    encoder.load_state_dict(encoder_sd)
    decoder.load_state_dict(decoder_sd)

    # Use appropriate device
    encoder = encoder.to(device)
    decoder = decoder.to(device)
    print('Models built and ready to go!')

    # Set dropout layers to eval mode
    encoder.eval()
    decoder.eval()

    # Initialize search module
    searcher = GreedySearchDecoder(encoder, decoder)
    for input_sentence, target_sentence in pick_n_valid_sentences(input_lang, output_lang, 10):
        decoded_words = evaluate(searcher, input_sentence, input_lang, output_lang)
        print('> {}'.format(input_sentence))
        print('= {}'.format(target_sentence))
        print('< {}'.format(' '.join(decoded_words)))
コード例 #3
0
ファイル: main.py プロジェクト: AaronGrainer/pytorch-chatbot
def main():
    corpus_name = "cornell movie-dialogs corpus"
    corpus = os.path.join("data", corpus_name)

    printLines(os.path.join(corpus, "movie_lines.txt"))

    # Define path to new file
    datafile = os.path.join(corpus, "formatted_movie_lines.txt")
    linefile = os.path.join(corpus, "movie_lines.txt")
    conversationfile = os.path.join(corpus, "movie_conversations.txt")

    # Initialize lines dict, conversations list, and field ids
    MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
    MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]

    # Load lines and process conversations
    preprocess = Preprocess(datafile, linefile, conversationfile, MOVIE_LINES_FIELDS, MOVIE_CONVERSATIONS_FIELDS)
    preprocess.loadLines()
    preprocess.loadConversations()
    preprocess.writeCSV()

    # Load/Assemble voc and pairs
    save_dir = os.path.join("data", "save")
    dataset = Dataset(corpus, corpus_name, datafile)
    voc, pairs = dataset.loadPrepareData()
    
    # # Print some pairs to validate
    # print("\npairs:")
    # for pair in pairs[:10]:
    #   print(pair)

    # Trim voc and pairs
    pairs = dataset.trimRareWords(voc, pairs, MIN_COUNT)

    # Example for validation
    small_batch_size = 5
    batches = dataset.batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])
    input_variable, lengths, target_variable, mask, max_target_len = batches

    print("input_variable:", input_variable)
    print("lengths:", lengths)
    print("target_variable:", target_variable)
    print("mask:", mask)
    print("max_target_len:", max_target_len)

  

    # Configure models
    model_name = 'cb_model'
    attn_model = 'dot'
    #attn_model = 'general'
    #attn_model = 'concat'
    hidden_size = 500
    encoder_n_layers = 2
    decoder_n_layers = 2
    dropout = 0.1
    batch_size = 64

    # Set checkpoint to load from; set to None if starting from scratch
    loadFilename = None
    checkpoint_iter = 4000
    #loadFilename = os.path.join(save_dir, model_name, corpus_name,
    #                            '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size),
    #                            '{}_checkpoint.tar'.format(checkpoint_iter))

    if loadFilename:
        # If loading on same machine the model was trained on
        checkpoint = torch.load(loadFilename)
        # If loading a model trained on GPU to CPU
        #checkpoint = torch.load(loadFilename, map_location=torch.device('cpu'))
        encoder_sd = checkpoint['en']
        decoder_sd = checkpoint['de']
        encoder_optimizer_sd = checkpoint['en_opt']
        decoder_optimizer_sd = checkpoint['de_opt']
        embedding_sd = checkpoint['embedding']
        voc.__dict__ = checkpoint['voc_dict']

    print('Building encoder and decoder ...')
    # Initialize word embeddings
    embedding = nn.Embedding(voc.num_words, hidden_size)
    if loadFilename:
        embedding.load_state_dict(embedding_sd)
    # Initialize encoder & decoder models
    encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
    decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
    if loadFilename:
        encoder.load_state_dict(encoder_sd)
        decoder.load_state_dict(decoder_sd)
    # Use appropriate device
    encoder = encoder.to(device)
    decoder = decoder.to(device)
    print('Models built and ready to go!')

    # Configure training/optimization
    clip = 50.0
    teacher_forcing_ratio = 1.0
    learning_rate = 0.0001
    decoder_learning_ratio = 5.0
    n_iteration = 4000
    print_every = 1
    save_every = 500

    # Ensure dropout layers are in train mode
    encoder.train()
    decoder.train()

    # Initialize optimizers
    print('Building optimizers ...')
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
    if loadFilename:
        encoder_optimizer.load_state_dict(encoder_optimizer_sd)
        decoder_optimizer.load_state_dict(decoder_optimizer_sd)

    # Run training iterations
    print("Starting Training!")
    model = Model(dataset.batch2TrainData, teacher_forcing_ratio)
    model.trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
                     embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size,
                     print_every, save_every, clip, corpus_name, loadFilename)

    # Set dropout layers to eval mode
    encoder.eval()
    decoder.eval()

    # Initialize search module
    searcher = GreedySearchDecoder(encoder, decoder)