예제 #1
0
primal_lr = 1e-6
dual_lr = 1e-4

num_epochs = 1000
log_every = 100  # batches
test_every = 5  # epochs

optimizer_primal = torch.optim.Adam(model.primal.parameters(), lr=primal_lr)
optimizer_dual = torch.optim.Adam(model.dual.parameters(), lr=dual_lr)

history = SPDG(model,
               optimizer_primal,
               optimizer_dual,
               sequence_loader,
               data_loader,
               test_loader,
               num_epochs,
               log_every,
               test_every,
               eval_predictions_on_data=True,
               show_dual=False)

# %% DUAL TRAINING (CONTINUATION)
# history = SPDG(model, optimizer_primal, optimizer_dual, sequence_loader,
#                data_loader, test_loader, num_epochs=10, log_every=100,
#                test_every=1, eval_predictions_on_data=True, show_dual=True, history=history)

# # %% DUAL TRAINING (CONTINUATION VERY LONG)
# history = SPDG(model, optimizer_primal, optimizer_dual, sequence_loader,
#                test_loader, num_epochs=1000, log_every=50, test_every=1, history=history)
예제 #2
0
    history = History()
    for idx in model.dual:
        history['dual ' + str(idx)] = []

epochs_done = 0
while epochs_done < num_epochs:
    history = SPDG(model,
                   optimizer_primal,
                   optimizer_dual,
                   sequence_loader,
                   data_loader,
                   test_loader,
                   save_every,
                   log_every,
                   test_every,
                   sequence_test_loader=sequence_test_loader,
                   predictions_on_data=predictions_on_data,
                   show_dual=show_dual,
                   predictions_on_sequences=predictions_on_sequences,
                   ngram_data_stats=ngram_data_stats,
                   ngram_test_stats=ngram_test_stats,
                   loss_on_test=loss_on_test,
                   history=history)
    save(history, model, ngram, optimizer_primal, optimizer_dual, primal_lr,
         dual_lr)
    epochs_done += save_every

# %% PLOTTING

예제 #3
0
                                        lr=primal_lr)
    optimizer_dual = torch.optim.Adam(model.dual.parameters(), lr=dual_lr)

    history = History()
    for idx in model.dual:
        history['dual ' + str(idx)] = []

epochs_done = 0
while epochs_done < num_epochs:
    history = SPDG(model,
                   optimizer_primal,
                   optimizer_dual,
                   sequence_loader,
                   data_loader,
                   test_loader,
                   save_every,
                   log_every,
                   test_every,
                   sequence_test_loader=sequence_test_loader,
                   show_dual=False,
                   eval_predictions_on_sequences=True,
                   history=history)
    save(history, model, ngram, optimizer_primal, optimizer_dual, primal_lr,
         dual_lr)
    epochs_done += save_every

# %% PLOTTING TEST

xs = np.arange(len(history['predictions'])) * test_every
ys = [[
    100.0 - preds[i, i] / preds[i].sum() * 100
예제 #4
0
파일: main.py 프로젝트: gcie/licencjat
model = Model(ngram, output_size=2)
model.to(DEVICE)
model.init_weights()

primal_lr = 1e-6
dual_lr = 1e-4

optimizer_primal = torch.optim.Adam(model.primal.parameters(), lr=primal_lr)
optimizer_dual = torch.optim.Adam(model.dual.parameters(), lr=dual_lr)

history = SPDG(model,
               optimizer_primal,
               optimizer_dual,
               sequence_loader,
               sequence_loader,
               test_loader,
               num_epochs=1000,
               log_every=100,
               test_every=5,
               eval_predictions_on_data=True,
               show_dual=True)

# %% SAVE
fname = 't1-1-1'
comment = ''

np.save(fname + '_hist', history)
np.save(fname + '_model', model)
np.save(fname + '_ngram', ngram)

with open(fname + '_doc', "w+") as doc: