Example #1
0
series_is_zero = series == 0

starts, ends = F.get_valid_start_end(series_is_nan)
series_lags = F.make_lags(series, LAGS, use_smooth=True)
series_lags_corr = F.batch_autocorr(series, LAGS, starts, ends, threshold=1.05)
series_lags_corr = F.normalize(series_lags_corr, axis=0)[0]
series_lags_corr = Value(series_lags_corr, name='series_lags_corr')

series, series_mean, series_std = F.normalize(series[:, np.newaxis,
                                                     DROP_BEFORE:],
                                              axis=2)
series_lags = F.normalize(series_lags[:, :, DROP_BEFORE:])[0]
series_lags = Value(series_lags, 'xy_lags')

time_idxes = np.arange(series.shape[2])
trn_idx, val_idx = forward_split(time_idxes, ENC_LEN, VALID_LEN + TEST_LEN)
val_idx, test_idx = forward_split(val_idx, ENC_LEN, TEST_LEN)
trn_dl = create_seq2seq_data_loader(series,
                                    enc_len=ENC_LEN,
                                    dec_len=DEC_LEN,
                                    time_idx=trn_idx,
                                    batch_size=BATCH_SIZE,
                                    features=[series_lags, series_lags_corr],
                                    seq_last=True,
                                    device='cuda',
                                    mode='train',
                                    num_workers=0,
                                    pin_memory=False)

val_dl = create_seq2seq_data_loader(series,
                                    enc_len=ENC_LEN,
@contact: [email protected]
@time   : 2020/5/12 16:33
"""
from deepseries.model import Wave2Wave
from deepseries.train import Learner
from deepseries.data import Value, create_seq2seq_data_loader, forward_split
import numpy as np
from torch.optim import Adam

batch_size = 16
enc_len = 36
dec_len = 12
series = np.sin(np.arange(0, 1000))
series = series.reshape(1, 1, -1)
train_idx, valid_idx = forward_split(np.arange(series.shape[2]),
                                     enc_len=14,
                                     valid_size=200)

train_dl = create_seq2seq_data_loader(series,
                                      enc_len=14,
                                      dec_len=7,
                                      time_idx=train_idx,
                                      batch_size=12,
                                      sampling_rate=1.,
                                      seq_last=True)
valid_dl = create_seq2seq_data_loader(series,
                                      enc_len=14,
                                      dec_len=7,
                                      time_idx=valid_idx,
                                      batch_size=12,
                                      sampling_rate=1.,
Example #3
0
enc_len = 36
dec_len = 12
series_len = 1000

epoch = 100
lr = 0.001

valid_size = 12
test_size = 12

series = np.sin(np.arange(0, series_len)) + np.random.normal(
    0, 0.1, series_len) + np.log2(np.arange(1, series_len + 1))
series = series.reshape(1, 1, -1)

train_idx, valid_idx = forward_split(np.arange(series_len),
                                     enc_len=enc_len,
                                     valid_size=valid_size + test_size)
valid_idx, test_idx = forward_split(valid_idx, enc_len, test_size)

# mask test, will not be used for calculating mean/std.
mask = np.zeros_like(series).astype(bool)
mask[:, :, test_idx] = False
series, mu, std = F.normalize(series, axis=2, fillna=True, mask=mask)

# wave2wave train
train_dl = create_seq2seq_data_loader(series[:, :, train_idx],
                                      enc_len,
                                      dec_len,
                                      sampling_rate=0.1,
                                      batch_size=batch_size,
                                      seq_last=True,