コード例 #1
0
ファイル: estimator.py プロジェクト: yifeim/gluon-ts
    def train_model(
        self,
        training_data: Optional[Dataset] = None,
        validation_data: Optional[Dataset] = None,
        num_workers: Optional[int] = None,
        num_prefetch: Optional[int] = None,
        shuffle_buffer_length: Optional[int] = None,
        cache_data: bool = False,
    ) -> TrainOutput:
        transformation = self.create_transformation()

        transformed_training_data = TransformedDataset(
            training_data, transformation
        )

        training_data_loader = self.create_training_data_loader(
            transformed_training_data
            if not cache_data
            else Cached(transformed_training_data),
            num_workers=num_workers,
            num_prefetch=num_prefetch,
            shuffle_buffer_length=shuffle_buffer_length,
        )

        validation_data_loader = None

        if validation_data is not None:
            transformed_validation_data = TransformedDataset(
                validation_data, transformation
            )

            validation_data_loader = self.create_validation_data_loader(
                transformed_validation_data
                if not cache_data
                else Cached(transformed_validation_data),
                num_workers=num_workers,
            )

        training_network = self.create_training_network()

        self.trainer(
            net=training_network,
            train_iter=training_data_loader,
            validation_iter=validation_data_loader,
        )

        with self.trainer.ctx:
            predictor = self.create_predictor(transformation, training_network)

        return TrainOutput(
            transformation=transformation,
            trained_net=training_network,
            predictor=predictor,
        )
コード例 #2
0
ファイル: train.py プロジェクト: sahand68/gluon-ts
    def run_test(self, dataset, estimator, predictor):
        test_dataset = TransformedDataset(
            dataset,
            transformations=[
                FilterTransformation(lambda el: el['target'].shape[-1] >
                                     predictor.prediction_length)
            ],
        )

        len_orig = len(dataset)
        len_filtered = len(test_dataset)
        if len_orig > len_filtered:
            logging.warning(
                'Not all time-series in the test-channel have '
                'enough data to be used for evaluation. Proceeding with '
                f'{len_filtered}/{len_orig} '
                f'(~{int(len_filtered/len_orig*100)}%) items.')

        try:
            log.metric('test_dataset_stats', test_dataset.calc_stats())
        except GluonTSDataError as error:
            logging.error(
                f"Failure whilst calculating stats for test dataset: {error}")
            return

        if isinstance(estimator, GluonEstimator) and isinstance(
                predictor, GluonPredictor):
            inference_data_loader = InferenceDataLoader(
                dataset=test_dataset,
                transform=predictor.input_transform,
                batch_size=estimator.trainer.batch_size,
                ctx=estimator.trainer.ctx,
                float_type=estimator.float_type,
            )

            if estimator.trainer.hybridize:
                predictor.hybridize(batch=next(iter(inference_data_loader)))

            if self.hyperparameters.get('use_symbol_block_predictor'):
                predictor = predictor.as_symbol_block_predictor(
                    batch=next(iter(inference_data_loader)))

        num_eval_samples = self.hyperparameters.get('num_eval_samples', 100)
        quantiles = self.hyperparameters.get(
            'quantiles', (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9))

        # we only log aggregate metrics for now as item metrics may be
        # very large
        predictions, input_timeseries = backtest.make_evaluation_predictions(
            test_dataset, predictor, num_eval_samples)
        agg_metrics, _item_metrics = Evaluator(quantiles=quantiles)(
            input_timeseries, predictions, num_series=len_filtered)
        log.metric("agg_metrics", agg_metrics)
コード例 #3
0
def run_test(env: TrainEnv, predictor: Predictor,
             test_dataset: Dataset) -> None:
    len_original = len(test_dataset)

    test_dataset = TransformedDataset(
        base_dataset=test_dataset,
        transformations=[
            FilterTransformation(
                lambda x: x["target"].shape[-1] > predictor.prediction_length)
        ],
    )

    len_filtered = len(test_dataset)

    if len_original > len_filtered:
        logger.warning(
            f"Not all time-series in the test-channel have "
            f"enough data to be used for evaluation. Proceeding with "
            f"{len_filtered}/{len_original} "
            f"(~{int(len_filtered / len_original * 100)}%) items.")

    forecast_it, ts_it = backtest.make_evaluation_predictions(
        dataset=test_dataset, predictor=predictor, num_samples=100)

    agg_metrics, _item_metrics = Evaluator()(
        ts_iterator=ts_it,
        fcst_iterator=forecast_it,
        num_series=len(test_dataset),
    )

    # we only log aggregate metrics for now as item metrics may be very large
    for name, score in agg_metrics.items():
        logger.info(f"#test_score ({env.current_host}, {name}): {score}")
コード例 #4
0
ファイル: loader.py プロジェクト: yifeim/gluon-ts
def TrainDataLoader(
    dataset: Dataset,
    *,
    transform: Transformation,
    batch_size: int,
    stack_fn: Callable,
    num_batches_per_epoch: Optional[int] = None,
    num_workers: Optional[int] = None,
    num_prefetch: Optional[int] = None,
    shuffle_buffer_length: Optional[int] = None,
    decode_fn: Callable = lambda x: x,
):
    transformed_dataset = TransformedDataset(Cyclic(dataset),
                                             transform,
                                             is_train=True)
    data_iterable = (PseudoShuffled(
        transformed_dataset, shuffle_buffer_length=shuffle_buffer_length)
                     if shuffle_buffer_length is not None else
                     transformed_dataset)
    data_loader = DataLoader(
        data_iterable=data_iterable,
        batch_size=batch_size,
        stack_fn=stack_fn,
        num_workers=num_workers,
        num_prefetch=num_prefetch,
        decode_fn=decode_fn,
    )
    return (iter(data_loader) if num_batches_per_epoch is None else
            IterableSlice(iter(data_loader), num_batches_per_epoch))
コード例 #5
0
ファイル: loader.py プロジェクト: pablosteinmetz/gluon-ts
def InferenceDataLoader(
    dataset: Dataset,
    *,
    transform: Transformation,
    batch_size: int,
    stack_fn: Callable,
):
    """Construct an iterator of batches for inference purposes.

    Parameters
    ----------
    dataset
        Data to iterate over.
    transform
        Transformation to be lazily applied as data is being iterated.
        The transformation is applied in "inference mode" (``is_train=False``).
    batch_size
        Number of entries to include in a batch.
    stack_fn
        Function to use to stack data entries into batches.
        This can be used to set a specific array type or computing device
        the arrays should end up onto (CPU, GPU).

    Returns
    -------
    Iterable[DataBatch]
        An iterable sequence of batches.
    """
    return DataLoader(
        data_iterable=TransformedDataset(dataset, transform, is_train=False),
        batch_size=batch_size,
        stack_fn=stack_fn,
    )
コード例 #6
0
def run_test(
    env: TrainEnv, predictor: Predictor, test_dataset: Dataset
) -> None:
    len_original = maybe_len(test_dataset)

    test_dataset = TransformedDataset(
        test_dataset,
        FilterTransformation(
            lambda x: x["target"].shape[-1] > predictor.prediction_length
        ),
    )

    len_filtered = len(test_dataset)

    if len_original is not None and len_original > len_filtered:
        logger.warning(
            f"Not all time-series in the test-channel have "
            f"enough data to be used for evaluation. Proceeding with "
            f"{len_filtered}/{len_original} "
            f"(~{int(len_filtered / len_original * 100)}%) items."
        )

    forecast_it, ts_it = backtest.make_evaluation_predictions(
        dataset=test_dataset, predictor=predictor, num_samples=100
    )

    if isinstance(predictor, RepresentableBlockPredictor) and isinstance(
        predictor.forecast_generator, QuantileForecastGenerator
    ):
        quantiles = predictor.forecast_generator.quantiles
        logger.info(f"Using quantiles `{quantiles}` for evaluation.")
        evaluator = Evaluator(quantiles=quantiles)
    else:
        evaluator = Evaluator()

    agg_metrics, item_metrics = evaluator(
        ts_iterator=ts_it,
        fcst_iterator=forecast_it,
        num_series=len(test_dataset),
    )

    # we only log aggregate metrics for now as item metrics may be very large
    for name, score in agg_metrics.items():
        logger.info(f"#test_score ({env.current_host}, {name}): {score}")

    # store metrics
    with open(env.path.model / "agg_metrics.json", "w") as agg_metric_file:
        json.dump(agg_metrics, agg_metric_file)
    with open(env.path.model / "item_metrics.csv", "w") as item_metrics_file:
        item_metrics.to_csv(item_metrics_file, index=False)
コード例 #7
0
ファイル: loader.py プロジェクト: youngsuk0723/gluon-ts
def InferenceDataLoader(
    dataset: Dataset,
    *,
    transform: Transformation,
    batch_size: int,
    stack_fn: Callable,
    num_workers: Optional[int] = None,
    num_prefetch: Optional[int] = None,
    shuffle_buffer_length: Optional[int] = None,
):
    return DataLoader(
        data_iterable=TransformedDataset(dataset, transform, is_train=False),
        batch_size=batch_size,
        stack_fn=stack_fn,
    )
コード例 #8
0
    def __init__(
        self,
        dataset: Dataset,
        transform: Transformation,
        is_train: bool = True,
        shuffle_buffer_length: Optional[int] = None,
        cache_data: bool = False,
    ):
        super().__init__()
        self.shuffle_buffer_length = shuffle_buffer_length

        self.transformed_dataset = TransformedDataset(
            Cyclic(dataset) if not cache_data else Cached(Cyclic(dataset)),
            transform,
            is_train=is_train,
        )
コード例 #9
0
ファイル: _estimator.py プロジェクト: kaleming/gluon-ts
 def create_validation_data_loader(
     self,
     data: Dataset,
     **kwargs,
 ) -> DataLoader:
     validation_transform = (self._create_instance_splitter("validation") +
                             self._create_post_split_transform() +
                             SelectFields(["past_target", "valid_length"]))
     return DataLoader(
         data_iterable=TransformedDataset(data,
                                          validation_transform,
                                          is_train=True),
         batch_size=self.batch_size,
         stack_fn=self._stack_fn(),
         decode_fn=partial(as_in_context, ctx=self.trainer.ctx),
     )
コード例 #10
0
ファイル: loader.py プロジェクト: yifeim/gluon-ts
def ValidationDataLoader(
    dataset: Dataset,
    *,
    transform: Transformation,
    batch_size: int,
    stack_fn: Callable,
    num_workers: Optional[int] = None,
    num_prefetch: Optional[int] = None,
    shuffle_buffer_length: Optional[int] = None,
    decode_fn: Callable = lambda x: x,
):
    return DataLoader(
        data_iterable=TransformedDataset(dataset, transform, is_train=True),
        batch_size=batch_size,
        stack_fn=stack_fn,
        decode_fn=decode_fn,
    )
コード例 #11
0
ファイル: train.py プロジェクト: hanifmahboobi/gluon-ts
def prepare_test_dataset(dataset: Dataset, prediction_length: int) -> Dataset:
    test_dataset = TransformedDataset(
        dataset,
        transformations=[
            FilterTransformation(
                lambda el: el['target'].shape[-1] > prediction_length)
        ],
    )

    len_orig = len(dataset)
    len_filtered = len(test_dataset)
    if len_orig > len_filtered:
        log.logger.warning(
            'Not all time-series in the test-channel have '
            'enough data to be used for evaluation. Proceeding with '
            f'{len_filtered}/{len_orig} '
            f'(~{int(len_filtered / len_orig * 100)}%) items.')
    return test_dataset
コード例 #12
0
ファイル: loader.py プロジェクト: wl935/gluon-ts
def construct_training_iterator(
    dataset: Dataset,
    *,
    transform: Transformation,
    shuffle_buffer_length: Optional[int] = None,
) -> Iterator[DataEntry]:
    transformed_dataset = TransformedDataset(
        cyclic(dataset),
        transform,
        is_train=True,
    )

    if shuffle_buffer_length is None:
        return iter(transformed_dataset)
    else:
        return pseudo_shuffled(
            iter(transformed_dataset),
            shuffle_buffer_length=shuffle_buffer_length,
        )
コード例 #13
0
ファイル: loader.py プロジェクト: wl935/gluon-ts
 def __init__(
     self,
     dataset: Dataset,
     *,
     transform: Transformation,
     batch_size: int,
     stack_fn: Callable,
     # FIXME: the following aren't used
     num_workers: Optional[int] = None,
     num_prefetch: Optional[int] = None,
     shuffle_buffer_length: Optional[int] = None,
 ) -> None:
     self.transformed_dataset = TransformedDataset(
         dataset,
         transform,
         is_train=False,
     )
     self.batch_size = batch_size
     self.stack_fn = stack_fn
コード例 #14
0
ファイル: backtest.py プロジェクト: zhupeiru/gluon-ts
def make_evaluation_predictions(
    dataset: Dataset, predictor: Predictor, num_eval_samples: int
) -> Tuple[Iterator[Forecast], Iterator[pd.Series]]:
    """
    Return predictions on the last portion of predict_length time units of the
    target. Such portion is cut before making predictions, such a function can
    be used in evaluations where accuracy is evaluated on the last portion of
    the target.

    Parameters
    ----------
    dataset
        Dataset where the evaluation will happen. Only the portion excluding
        the prediction_length portion is used when making prediction.
    predictor
        Model used to draw predictions.
    num_eval_samples
        Number of samples to draw on the model when evaluating.

    Returns
    -------
    """

    prediction_length = predictor.prediction_length
    freq = predictor.freq

    def add_ts_dataframe(data_iterator: Iterator[DataEntry]) -> DataEntry:
        for data_entry in data_iterator:
            data = data_entry.copy()
            index = pd.date_range(
                start=data["start"],
                freq=freq,
                periods=data["target"].shape[-1],
            )
            data["ts"] = pd.DataFrame(
                index=index, data=data["target"].transpose()
            )
            yield data

    def ts_iter(dataset: Dataset) -> pd.DataFrame:
        for data_entry in add_ts_dataframe(iter(dataset)):
            yield data_entry["ts"]

    def truncate_target(data):
        data = data.copy()
        target = data["target"]
        assert (
            target.shape[-1] >= prediction_length
        )  # handles multivariate case (target_dim, history_length)
        data["target"] = target[..., :-prediction_length]
        return data

    # TODO filter out time series with target shorter than prediction length
    # TODO or fix the evaluator so it supports missing values instead (all
    # TODO the test set may be gone otherwise with such a filtering)

    dataset_trunc = TransformedDataset(
        dataset, transformations=[transform.AdhocTransform(truncate_target)]
    )

    return (
        predictor.predict(dataset_trunc, num_eval_samples=num_eval_samples),
        ts_iter(dataset),
    )
コード例 #15
0
ファイル: loader.py プロジェクト: pablosteinmetz/gluon-ts
def TrainDataLoader(
    dataset: Dataset,
    *,
    transform: Transformation,
    batch_size: int,
    stack_fn: Callable,
    num_batches_per_epoch: Optional[int] = None,
    num_workers: Optional[int] = None,
    num_prefetch: Optional[int] = None,
    shuffle_buffer_length: Optional[int] = None,
    decode_fn: Callable = lambda x: x,
):
    """Construct an iterator of batches for training purposes.

    This function wraps around ``DataLoader`` to offer training-specific
    behaviour and options, as follows:

        1. The provided dataset is iterated cyclically, so that one can go
        over it multiple times in a single epoch.
        2. A transformation must be provided, that is lazily applied as the
        dataset is being iterated; this is useful e.g. to slice random instances
        of fixed length out of each time series in the dataset.
        3. The resulting batches can be iterated in a pseudo-shuffled order.

    The returned object is a stateful iterator, whose length is either
    ``num_batches_per_epoch`` (if not ``None``) or infinite (otherwise).

    Parameters
    ----------
    dataset
        Data to iterate over.
    transform
        Transformation to be lazily applied as data is being iterated.
        The transformation is applied in "training mode" (``is_train=True``).
    batch_size
        Number of entries to include in a batch.
    stack_fn
        Function to use to stack data entries into batches.
        This can be used to set a specific array type or computing device
        the arrays should end up onto (CPU, GPU).
    num_batches_per_epoch
        Length of the iterator. If ``None``, then the iterator is endless.
    num_workers
        Number of worker processes to use. Default: None.
    num_prefetch
        Sets the length of the queue of batches being produced by worker processes.
        (Only meaningful when ``num_workers is not None``).
    shuffle_buffer_length
        Size of the buffer used for shuffling. Default: None, in which case no
        shuffling occurs.
    decode_fn
        A function called on each batch after it's been taken out of the queue.
        (Only meaningful when ``num_workers is not None``).

    Returns
    -------
    Iterator[DataBatch]
        An iterator of batches.
    """
    transformed_dataset = TransformedDataset(
        Cyclic(dataset), transform, is_train=True
    )
    data_iterable = (
        PseudoShuffled(
            transformed_dataset, shuffle_buffer_length=shuffle_buffer_length
        )
        if shuffle_buffer_length is not None
        else transformed_dataset
    )
    data_loader = DataLoader(
        data_iterable=data_iterable,
        batch_size=batch_size,
        stack_fn=stack_fn,
        num_workers=num_workers,
        num_prefetch=num_prefetch,
        decode_fn=decode_fn,
    )
    return (
        iter(data_loader)
        if num_batches_per_epoch is None
        else IterableSlice(iter(data_loader), num_batches_per_epoch)
    )
コード例 #16
0
ファイル: backtest.py プロジェクト: yifeim/gluon-ts
def make_evaluation_predictions(
    dataset: Dataset,
    predictor: Predictor,
    num_samples: int = 100,
) -> Tuple[Iterator[Forecast], Iterator[pd.Series]]:
    """
    Returns predictions for the trailing prediction_length observations of the given
    time series, using the given predictor.

    The predictor will take as input the given time series without the trailing
    prediction_length observations.

    Parameters
    ----------
    dataset
        Dataset where the evaluation will happen. Only the portion excluding
        the prediction_length portion is used when making prediction.
    predictor
        Model used to draw predictions.
    num_samples
        Number of samples to draw on the model when evaluating. Only sampling-based
        models will use this.

    Returns
    -------
    Tuple[Iterator[Forecast], Iterator[pd.Series]]
        A pair of iterators, the first one yielding the forecasts, and the second
        one yielding the corresponding ground truth series.
    """

    prediction_length = predictor.prediction_length
    freq = predictor.freq
    lead_time = predictor.lead_time

    def add_ts_dataframe(
        data_iterator: Iterator[DataEntry], ) -> Iterator[DataEntry]:
        for data_entry in data_iterator:
            data = data_entry.copy()
            index = pd.date_range(
                start=data["start"],
                freq=freq,
                periods=data["target"].shape[-1],
            )
            data["ts"] = pd.DataFrame(index=index,
                                      data=data["target"].transpose())
            yield data

    def ts_iter(dataset: Dataset) -> pd.DataFrame:
        for data_entry in add_ts_dataframe(iter(dataset)):
            yield data_entry["ts"]

    def truncate_target(data):
        data = data.copy()
        target = data["target"]
        assert (target.shape[-1] >= prediction_length
                )  # handles multivariate case (target_dim, history_length)
        data["target"] = target[..., :-prediction_length - lead_time]
        return data

    # TODO filter out time series with target shorter than prediction length
    # TODO or fix the evaluator so it supports missing values instead (all
    # TODO the test set may be gone otherwise with such a filtering)

    dataset_trunc = TransformedDataset(
        dataset, transformation=transform.AdhocTransform(truncate_target))

    return (
        predictor.predict(dataset_trunc, num_samples=num_samples),
        ts_iter(dataset),
    )