# encoding: utf-8 """ @author : zhirui zhou @contact: [email protected] @time : 2020/6/1 16:37 """ from deepseries.data import create_seq2seq_data_loader import numpy as np enc_len = 12 dec_len = 8 series = np.random.rand(1000, 8, 100) data_loader = create_seq2seq_data_loader(series, enc_len, dec_len, time_idx=np.arange(series.shape[2]), batch_size=32, seq_last=True) for i in data_loader: pass i[0]['enc_x'].shape i[1].shape
series, series_mean, series_std = F.normalize(series[:, np.newaxis, DROP_BEFORE:], axis=2) series_lags = F.normalize(series_lags[:, :, DROP_BEFORE:])[0] series_lags = Value(series_lags, 'xy_lags') time_idxes = np.arange(series.shape[2]) trn_idx, val_idx = forward_split(time_idxes, ENC_LEN, VALID_LEN + TEST_LEN) val_idx, test_idx = forward_split(val_idx, ENC_LEN, TEST_LEN) trn_dl = create_seq2seq_data_loader(series, enc_len=ENC_LEN, dec_len=DEC_LEN, time_idx=trn_idx, batch_size=BATCH_SIZE, features=[series_lags, series_lags_corr], seq_last=True, device='cuda', mode='train', num_workers=0, pin_memory=False) val_dl = create_seq2seq_data_loader(series, enc_len=ENC_LEN, dec_len=DEC_LEN, time_idx=val_idx, batch_size=BATCH_SIZE, features=[series_lags, series_lags_corr], seq_last=True, device='cuda', mode='valid')
import numpy as np from torch.optim import Adam batch_size = 16 enc_len = 36 dec_len = 12 series = np.sin(np.arange(0, 1000)) series = series.reshape(1, 1, -1) train_idx, valid_idx = forward_split(np.arange(series.shape[2]), enc_len=14, valid_size=200) train_dl = create_seq2seq_data_loader(series, enc_len=14, dec_len=7, time_idx=train_idx, batch_size=12, sampling_rate=1., seq_last=True) valid_dl = create_seq2seq_data_loader(series, enc_len=14, dec_len=7, time_idx=valid_idx, batch_size=12, sampling_rate=1., seq_last=True) model = Wave2Wave(1, debug=False, num_layers=5, num_blocks=1) model.cuda() opt = Adam(model.parameters(), 0.001) learner = Learner(model, opt, ".") learner.fit(100, train_dl, valid_dl, early_stopping=False)
train_idx, valid_idx = forward_split(np.arange(series_len), enc_len=enc_len, valid_size=valid_size + test_size) valid_idx, test_idx = forward_split(valid_idx, enc_len, test_size) # mask test, will not be used for calculating mean/std. mask = np.zeros_like(series).astype(bool) mask[:, :, test_idx] = False series, mu, std = F.normalize(series, axis=2, fillna=True, mask=mask) # wave2wave train train_dl = create_seq2seq_data_loader(series[:, :, train_idx], enc_len, dec_len, sampling_rate=0.1, batch_size=batch_size, seq_last=True, device='cuda') valid_dl = create_seq2seq_data_loader(series[:, :, valid_idx], enc_len, dec_len, batch_size=batch_size, seq_last=True, device='cuda') wave = Wave2Wave(target_size=1, num_layers=6, num_blocks=1, dropout=0.1, loss_fn=RMSE())