示例#1
0
def test_rnn2rnn():
    train_dl = create_seq2seq_data_loader(series,
                                          enc_len=14,
                                          dec_len=7,
                                          time_idx=train_idx,
                                          batch_size=12,
                                          num_iteration_per_epoch=12,
                                          seq_last=False)
    valid_dl = create_seq2seq_data_loader(series,
                                          enc_len=14,
                                          dec_len=7,
                                          time_idx=valid_idx,
                                          batch_size=12,
                                          num_iteration_per_epoch=12,
                                          seq_last=False)
    model = RNN2RNN(1,
                    256,
                    64,
                    num_layers=1,
                    attn_heads=1,
                    attn_size=12,
                    rnn_type='LSTM')
    model.cuda()
    opt = Adam(model.parameters(), 0.001)
    learner = Learner(model, opt, ".")
    learner.fit(10, train_dl, valid_dl, early_stopping=False)
示例#2
0
def test_wave2wave_v1():
    train_dl = create_seq2seq_data_loader(series,
                                          enc_len=14,
                                          dec_len=7,
                                          time_idx=train_idx,
                                          batch_size=12,
                                          num_iteration_per_epoch=12,
                                          seq_last=True)
    valid_dl = create_seq2seq_data_loader(series,
                                          enc_len=14,
                                          dec_len=7,
                                          time_idx=valid_idx,
                                          batch_size=12,
                                          num_iteration_per_epoch=12,
                                          seq_last=True)
    model = Wave2WaveV1(1)
    model.cuda()
    opt = Adam(model.parameters(), 0.001)
    learner = Learner(model, opt, ".")
    learner.fit(100, train_dl, valid_dl, early_stopping=False)
示例#3
0
                dec_num_size=21,
                attn_heads=4,
                attn_size=32,
                residual=False,
                dec_cat_size=[(3049, 16), (7, 1), (10, 1), (3, 1), (3, 1),
                              (32, 4), (5, 1), (5, 1), (3, 1)],
                dropout=0.1,
                num_layers=1,
                rnn_type="GRU")
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=1e-4, gamma=0.998)
model.cuda()
learner = Learner(model,
                  opt,
                  './m5_rnn',
                  lr_scheduler=lr_scheduler,
                  verbose=5000)
learner.fit(1000,
            train_dl,
            valid_dl,
            patient=64,
            start_save=-1,
            early_stopping=True)
learner.load(174)
learner.model.eval()


def predict_submission(model, test_dl):
    model.eval()
    test_dl.test()
from torch.optim import Adam

batch_size = 16
enc_len = 36
dec_len = 12
series = np.sin(np.arange(0, 1000))
series = series.reshape(1, 1, -1)
train_idx, valid_idx = forward_split(np.arange(series.shape[2]),
                                     enc_len=14,
                                     valid_size=200)

train_dl = create_seq2seq_data_loader(series,
                                      enc_len=14,
                                      dec_len=7,
                                      time_idx=train_idx,
                                      batch_size=12,
                                      sampling_rate=1.,
                                      seq_last=True)
valid_dl = create_seq2seq_data_loader(series,
                                      enc_len=14,
                                      dec_len=7,
                                      time_idx=valid_idx,
                                      batch_size=12,
                                      sampling_rate=1.,
                                      seq_last=True)
model = Wave2Wave(1, debug=False, num_layers=5, num_blocks=1)
model.cuda()
opt = Adam(model.parameters(), 0.001)
learner = Learner(model, opt, ".")
learner.fit(100, train_dl, valid_dl, early_stopping=False)
                dec_cat_size=[(62, 4)],
                residual=True,
                beta1=.0,
                beta2=.0,
                attn_heads=1,
                attn_size=128,
                num_layers=1,
                dropout=0.0,
                rnn_type='GRU')
opt = Adam(model.parameters(), 0.001)
loss_fn = MSELoss()
model.cuda()
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5)
learner = Learner(model,
                  opt,
                  './power_preds',
                  verbose=20,
                  lr_scheduler=lr_scheduler)
learner.fit(300,
            train_frame,
            valid_frame,
            patient=128,
            start_save=1,
            early_stopping=True)
learner.load(299)
learner.model.eval()

preds = []
trues = []
for batch in valid_frame:
    batch[0].pop('dec_x')
val_dl = create_seq2seq_data_loader(series,
                                    enc_len=ENC_LEN,
                                    dec_len=DEC_LEN,
                                    time_idx=val_idx,
                                    batch_size=BATCH_SIZE,
                                    num_iteration_per_epoch=4,
                                    features=[series_lags, series_lags_corr],
                                    seq_last=False,
                                    device='cuda',
                                    mode='valid')

model = RNN2RNN(1, 128, 128, enc_num_size=8, dec_num_size=8)
opt = Adam(model.parameters(), 0.001)
lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=1e-4, gamma=0.998)
model.cuda()
learner = Learner(model,
                  opt,
                  './m5_rnn',
                  lr_scheduler=lr_scheduler,
                  verbose=100)
learner.fit(500,
            trn_dl,
            val_dl,
            patient=64,
            start_save=-1,
            early_stopping=False)

import torch
torch.cuda.memory_allocated() / 1024**2
示例#7
0
                                      enc_len,
                                      dec_len,
                                      batch_size=batch_size,
                                      seq_last=True,
                                      device='cuda')

wave = Wave2Wave(target_size=1,
                 num_layers=6,
                 num_blocks=1,
                 dropout=0.1,
                 loss_fn=RMSE())
wave.cuda()
opt = torch.optim.Adam(wave.parameters(), lr=lr)
wave_learner = Learner(
    wave,
    opt,
    root_dir="./wave",
)
wave_learner.fit(max_epochs=epoch,
                 train_dl=train_dl,
                 valid_dl=valid_dl,
                 early_stopping=True,
                 patient=16)
wave_learner.load(wave_learner.best_epoch)

# rnn2rnn train
train_dl = create_seq2seq_data_loader(series[:, :, train_idx],
                                      enc_len,
                                      dec_len,
                                      sampling_rate=0.1,
                                      batch_size=batch_size,
                                      enc_len,
                                      dec_len,
                                      batch_size=batch_size,
                                      seq_last=True,
                                      device='cuda')

wave = Wave2Wave(target_size=1,
                 num_layers=6,
                 num_blocks=1,
                 dropout=0.1,
                 loss_fn=RMSE())
wave.cuda()
opt = torch.optim.Adam(wave.parameters(), lr=lr)
wave_learner = Learner(
    wave,
    opt,
    root_dir="./wave",
)
wave_learner.fit(max_epochs=epoch,
                 train_dl=train_dl,
                 valid_dl=valid_dl,
                 early_stopping=True,
                 patient=16)
wave_learner.load(wave_learner.best_epoch)

import matplotlib.pyplot as plt
wave_preds = wave_learner.model.predict(
    torch.tensor(series[:, :, test_idx[:-12]]).float().cuda(),
    12).cpu().numpy().reshape(-1)

plt.plot(series[:, :, -48:-12].reshape(-1))