def test_distribution(): """ Makes sure additional tensors can be accessed and have expected shapes """ prediction_length = ds_info.prediction_length # todo adapt loader to anomaly detection use-case batch_size = 2 num_samples = 3 estimator = DeepAREstimator( freq=freq, prediction_length=prediction_length, trainer=Trainer(epochs=2, num_batches_per_epoch=1), distr_output=StudentTOutput(), batch_size=batch_size, ) train_output = estimator.train_model(train_ds, test_ds) training_data_loader = estimator.create_training_data_loader( estimator.create_transformation().apply(train_ds)) seq_len = 2 * ds_info.prediction_length for data_entry in islice(training_data_loader, 1): input_names = get_hybrid_forward_input_names( type(train_output.trained_net)) distr = train_output.trained_net.distribution( *[data_entry[k] for k in input_names]) assert distr.sample(num_samples).shape == ( num_samples, batch_size, seq_len, )
from gluonts.model.deepar import DeepAREstimator from gluonts.trainer import Trainer from gluonts.dataset.artificial import shuffle_testing_dataset from gluonts.distribution.neg_binomial import NegativeBinomialOutput BATCH_SIZE = 32 NUM_BATCH_PER_EPOCH = 50 n = 300 time_list = [] range_list = range(1, 30) for c in range_list: train_ds = shuffle_testing_dataset(n) estimator = DeepAREstimator( prediction_length=20, freq="D", ) transform = estimator.create_transformation() loader = TrainDataLoader( train_ds, transform=transform, batch_size=BATCH_SIZE, ctx=mx.cpu(), num_batches_per_epoch=NUM_BATCH_PER_EPOCH, shuffle_buffer_length=c * BATCH_SIZE, num_workers=2, ) hit_list = [[0 for i in range(NUM_BATCH_PER_EPOCH)] for j in range(n)] print(f"dataset size: {len(train_ds)}") start_time = time.time() count = 0 for batch in loader: print(f"item id: {batch['item_id']}")