Ejemplo n.º 1
0
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=1e-4, gamma=0.998)
model.cuda()
learner = Learner(model,
                  opt,
                  './m5_rnn',
                  lr_scheduler=lr_scheduler,
                  verbose=5000)
learner.fit(1000,
            train_dl,
            valid_dl,
            patient=64,
            start_save=-1,
            early_stopping=True)
learner.load(174)
learner.model.eval()


def predict_submission(model, test_dl):
    model.eval()
    test_dl.test()
    preds = []
    for batch in test_dl:
        batch.pop('dec_x')
        preds.append(model.predict(**batch)[0].cpu().detach().numpy())
    preds = np.concatenate(preds, axis=0).squeeze()
    return preds


preds = predict_submission(learner.model, test_dl)
Ejemplo n.º 2
0
                 num_blocks=1,
                 dropout=0.1,
                 loss_fn=RMSE())
wave.cuda()
opt = torch.optim.Adam(wave.parameters(), lr=lr)
wave_learner = Learner(
    wave,
    opt,
    root_dir="./wave",
)
wave_learner.fit(max_epochs=epoch,
                 train_dl=train_dl,
                 valid_dl=valid_dl,
                 early_stopping=True,
                 patient=16)
wave_learner.load(wave_learner.best_epoch)

# rnn2rnn train
train_dl = create_seq2seq_data_loader(series[:, :, train_idx],
                                      enc_len,
                                      dec_len,
                                      sampling_rate=0.1,
                                      batch_size=batch_size,
                                      seq_last=False,
                                      device='cuda')
valid_dl = create_seq2seq_data_loader(series[:, :, valid_idx],
                                      enc_len,
                                      dec_len,
                                      batch_size=batch_size,
                                      seq_last=False,
                                      device='cuda')
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
model.cuda()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5)
learner = Learner(model,
                  opt,
                  './power_preds',
                  verbose=20,
                  lr_scheduler=lr_scheduler)
learner.fit(300,
            train_frame,
            valid_frame,
            patient=128,
            start_save=1,
            early_stopping=True)
learner.load(299)
learner.model.eval()

preds = []
trues = []
for batch in valid_frame:
    batch[0].pop('dec_x')
    preds.append(learner.model(**batch[0])[0])
    trues.append(batch[1])

trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean
preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean

k = 42

plt.plot(trues[k].reshape(-1))
wave = Wave2Wave(target_size=1,
                 num_layers=6,
                 num_blocks=1,
                 dropout=0.1,
                 loss_fn=RMSE())
wave.cuda()
opt = torch.optim.Adam(wave.parameters(), lr=lr)
wave_learner = Learner(
    wave,
    opt,
    root_dir="./wave",
)
wave_learner.fit(max_epochs=epoch,
                 train_dl=train_dl,
                 valid_dl=valid_dl,
                 early_stopping=True,
                 patient=16)
wave_learner.load(wave_learner.best_epoch)

import matplotlib.pyplot as plt
wave_preds = wave_learner.model.predict(
    torch.tensor(series[:, :, test_idx[:-12]]).float().cuda(),
    12).cpu().numpy().reshape(-1)

plt.plot(series[:, :, -48:-12].reshape(-1))
plt.plot(np.arange(36, 48), wave_preds, label="wave2wave preds")
plt.plot(np.arange(36, 48),
         series[:, :, test_idx[-12:]].reshape(-1),
         label="target")
plt.legend()