Ejemplo n.º 1
0
input_lang, output_lang, pairs = etl.prepare_data(language)

attn_model = 'general'
hidden_size = 500
n_layers = 2
dropout_p = 0.05
teacher_forcing_ratio = .5
clip = 5.
criterion = nn.NLLLoss()

# Initialize models
encoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers)
decoder = AttentionDecoderRNN(attn_model,
                              hidden_size,
                              output_lang.n_words,
                              n_layers,
                              dropout_p=dropout_p)

learning_rate = 1
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)

# Load model parameters
encoder.load_state_dict(torch.load(
    './data/encoder_params_{}'.format(language)))
decoder.load_state_dict(torch.load(
    './data/decoder_params_{}'.format(language)))
decoder.attention.load_state_dict(
    torch.load('./data/attention_params_{}'.format(language)))
Ejemplo n.º 2
0
    return loss.data[0].item() / target_length


input_lang, output_lang, pairs = etl.prepare_data(args.language)

attn_model = 'general'
hidden_size = 500
n_layers = 2
dropout_p = 0.05

# Initialize models
encoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers)
decoder = AttentionDecoderRNN(attn_model,
                              hidden_size,
                              output_lang.n_words,
                              n_layers,
                              dropout_p=dropout_p)

# Move models to GPU
encoder.cuda()
decoder.cuda()

# Initialize optimizers and criterion
learning_rate = 0.0001
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()

# Configuring training
n_epochs = 100000
Ejemplo n.º 3
0
# parser.add_argument('language')
# parser.add_argument('input')
# args = parser.parse_args()
helpers.validate_language_params("spa")

input_lang, output_lang, pairs = etl.prepare_data(args.language)
attn_model = 'general'
hidden_size = 500
n_layers = 2
dropout_p = 0.05

# Initialize models
encoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers)
decoder = AttentionDecoderRNN(attn_model,
                              hidden_size,
                              output_lang.n_words,
                              n_layers,
                              dropout_p=dropout_p)

# Load model parameters
encoder.load_state_dict(
    torch.load('../data/encoder_params_{}'.format(args.language)))
decoder.load_state_dict(
    torch.load('../data/decoder_params_{}'.format(args.language)))
decoder.attention.load_state_dict(
    torch.load('../data/attention_params_{}'.format(args.language)))

# Move models to GPU
encoder.cuda()
decoder.cuda()
Ejemplo n.º 4
0
    torch.nn.utils.clip_grad_norm(encoder.parameters(), clip)
    torch.nn.utils.clip_grad_norm(encoder.parameters(), clip)
    encoder_opt.step()
    decoder_opt.step()

    return loss.data / target_length


input_lang, output_lang, pairs = etl.prepare_data
hidden_size = 500
n_layers = 2
dropout_p = 0.1

encoder = EncoderRNN(input_lang.n_word, hidden_size, n_layers)
decoder = AttentionDecoderRNN(hidden_size,
                              output_lang.n_words,
                              n_layers,
                              dropout_p=dropout_p)

encoder = encoder.to(device)
decoder = decoder.to(device)

learning_rate = 0.0001
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.patameters(), lr=learning_rate)

criterion = nn.NLLLoss()

n_epochs = 100000
plot_every = 20
print_every = 10
Ejemplo n.º 5
0
print('input: %s' % args.input)

# Initialize models
encoder = EncoderRNN(
    input_lang.n_words,
    args.embedding_size,
    args.hidden_size,
    args.n_layers,
    args.dropout
)

decoder = AttentionDecoderRNN(
    output_lang.n_words,
    args.embedding_size,
    args.hidden_size,
    args.attn_model,
    args.n_layers,
    args.dropout
)

# Load model parameters
encoder.load_state_dict(torch.load('./data/encoder_params_{}'.format(args.language)))
decoder.load_state_dict(torch.load('./data/decoder_params_{}'.format(args.language)))
decoder.attention.load_state_dict(torch.load('./data/attention_params_{}'.format(args.language)))

# Move models to device
encoder = encoder.to(device)
decoder = decoder.to(device)


def evaluate(sentence, max_len=10):
Ejemplo n.º 6
0
    _ = nn.utils.clip_grad_norm_(encoder.parameters(), args.clip)
    _ = nn.utils.clip_grad_norm_(decoder.parameters(), args.clip)
    encoder_opt.step()
    decoder_opt.step()

    return loss.item() / target_length


input_lang, output_lang, pairs = etl.prepare_data(args.language)

# Initialize models
encoder = EncoderRNN(input_lang.n_words, args.embedding_size, args.hidden_size,
                     args.n_layers, args.dropout)

decoder = AttentionDecoderRNN(output_lang.n_words, args.embedding_size,
                              args.hidden_size, args.attn_model, args.n_layers,
                              args.dropout)
# Move models to device
encoder = encoder.to(device)
decoder = decoder.to(device)

# Initialize optimizers and criterion
encoder_optimizer = optim.Adam(encoder.parameters(), lr=args.lr)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=args.lr)
criterion = nn.NLLLoss()

# Keep track of time elapsed and running averages
start = time.time()
plot_losses = []
print_loss_total = 0  # Reset every print_every
plot_loss_total = 0  # Reset every plot_every