Exemplo n.º 1
0
def run(model, voc):
    # Set dropout layers to eval mode
    model.eval()

    # Initialize search module
    if config.BEAM_SEARCH_ON:
        searcher = BeamSearchDecoder(model)
    else:
        searcher = GreedySearchDecoder(model)

    # Begin chatting (uncomment and run the following line to begin)
    evaluateInput(searcher, voc)
Exemplo n.º 2
0
def run(args):

    # Initialize word embeddings
    embedding = nn.Embedding(voc.num_words, hidden_size)
    # Initialize encoder & decoder models
    encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
    decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size,
                                  voc.num_words, decoder_n_layers, dropout)
    # Use appropriate device
    encoder = encoder.to(device)
    decoder = decoder.to(device)
    # Initialize optimizers
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.Adam(decoder.parameters(),
                                   lr=learning_rate * decoder_learning_ratio)

    if (args.train):
        loadFilename = None
        print('Building encoder and decoder ...')
        print('Building optimizers ...')
        print('Models built and ready to go!')

        # Ensure dropout layers are in train mode
        encoder.train()
        decoder.train()

        # Run training iterations
        print("Starting Training!")
        trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer,
                   decoder_optimizer, embedding, encoder_n_layers,
                   decoder_n_layers, save_dir, n_iteration, batch_size,
                   print_every, save_every, clip, corpus_name, loadFilename,
                   args.name)

    if (args.evaluate):

        # Set checkpoint to load from; set to None if starting from scratch
        loadFilename = os.path.join(
            save_dir, model_name, corpus_name,
            '{}-{}_{}_{}'.format(encoder_n_layers, decoder_n_layers,
                                 hidden_size, args.name),
            '{}_checkpoint.tar'.format(checkpoint_iter))

        # Load model if a loadFilename is provided
        if loadFilename:
            # If loading on same machine the model was trained on
            # checkpoint = torch.load(loadFilename)
            # If loading a model trained on GPU to CPU
            checkpoint = torch.load(loadFilename,
                                    map_location=torch.device('cpu'))
            encoder_sd = checkpoint['en']
            decoder_sd = checkpoint['de']
            encoder_optimizer_sd = checkpoint['en_opt']
            decoder_optimizer_sd = checkpoint['de_opt']
            embedding_sd = checkpoint['embedding']
            voc.__dict__ = checkpoint['voc_dict']

        if loadFilename:
            embedding.load_state_dict(embedding_sd)
            encoder.load_state_dict(encoder_sd)
            decoder.load_state_dict(decoder_sd)
            encoder_optimizer.load_state_dict(encoder_optimizer_sd)
            decoder_optimizer.load_state_dict(decoder_optimizer_sd)

        # Set dropout layers to eval mode
        encoder.eval()
        decoder.eval()
        # Begin chatting (uncomment and run the following line to begin)
        evaluateInput(encoder, decoder, voc, args.beam, args.name)
Exemplo n.º 3
0
def evaluate_mode(word_index_dict):
    encoder, decoder = model_manager.get_models(
        word_index_dict)  # 学習済みモデルデータのロード。
    searcher = network_model.GreedySearchDecoder(encoder, decoder)  # 返事をするモデル。
    evaluate.evaluateInput(encoder, decoder, searcher,
                           word_index_dict)  # 入力受付開始。
Exemplo n.º 4
0

encoder.train()
decoder.train()

# Initialize optimizers
print('Building optimizers ...')
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(),
                               lr=learning_rate * decoder_learning_ratio)
if loadFilename:
    encoder_optimizer.load_state_dict(encoder_optimizer_sd)
    decoder_optimizer.load_state_dict(decoder_optimizer_sd)

# Run training iterations
print("Starting Training!")
trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer,
           decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers,
           save_dir, n_iteration, batch_size, print_every, save_every, clip,
           corpus_name, loadFilename)

# Set dropout layers to eval mode
encoder.eval()
decoder.eval()

# Initialize search module
searcher = GreedySearchDecoder(encoder, decoder, device)

# Begin chatting (uncomment and run the following line to begin)
evaluateInput(searcher, voc)
Exemplo n.º 5
0
######################################################################
# Run Evaluation
# ~~~~~~~~~~~~~~
#
# To chat with your model, run the following block.
#

# Set dropout layers to eval mode
encoder.eval()
decoder.eval()

# Initialize search module
searcher = GreedySearchDecoder(encoder, decoder)

# Begin chatting (uncomment and run the following line to begin)
evaluateInput(encoder, decoder, searcher, voc, MAX_LENGTH)

######################################################################
# Conclusion
# ----------
#
# That’s all for this one, folks. Congratulations, you now know the
# fundamentals to building a generative chatbot model! If you’re
# interested, you can try tailoring the chatbot’s behavior by tweaking the
# model and training parameters and customizing the data that you train
# the model on.
#
# Check out the other tutorials for more cool deep learning applications
# in PyTorch!
#