示例#1
0
def test_NormalDistributionLoss(center, transformation):
    mean = 1.0
    std = 0.1
    n = 100000
    target = NormalDistributionLoss.distribution_class(loc=mean,
                                                       scale=std).sample((n, ))
    normalizer = TorchNormalizer(center=center, transformation=transformation)
    if transformation in ["log", "log1p", "relu", "softplus"]:
        target = target.abs()
    target = normalizer.inverse_preprocess(target)

    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.stack(
        [normalized_target, scale],
        dim=-1,
    )
    loss = NormalDistributionLoss()
    rescaled_parameters = loss.rescale_parameters(parameters,
                                                  target_scale=target_scale,
                                                  encoder=normalizer)
    samples = loss.sample(rescaled_parameters, 1)
    assert torch.isclose(target.mean(), samples.mean(), atol=0.1, rtol=0.5)
    if center:  # if not centered, softplus distorts std too much for testing
        assert torch.isclose(target.std(), samples.std(), atol=0.1, rtol=0.7)
示例#2
0
def test_NormalDistributionLoss(center, transformation):
    mean = 1000.0
    std = 200.0
    n = 100000
    target = NormalDistributionLoss.distribution_class(loc=mean, scale=std).sample((n,))
    if transformation in ["log", "log1p", "relu", "softplus"]:
        target = target.abs()
    normalizer = TorchNormalizer(center=center, transformation=transformation)
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.stack(
        [normalized_target, scale],
        dim=-1,
    )
    loss = NormalDistributionLoss()
    if transformation in ["logit", "log", "log1p", "softplus", "relu", "logit"]:
        with pytest.raises(AssertionError):
            rescaled_parameters = loss.rescale_parameters(parameters, target_scale=target_scale, encoder=normalizer)
    else:
        rescaled_parameters = loss.rescale_parameters(parameters, target_scale=target_scale, encoder=normalizer)
        samples = loss.sample(rescaled_parameters, 1)
        assert torch.isclose(torch.as_tensor(mean), samples.mean(), atol=0.1, rtol=0.2)
        if center:  # if not centered, softplus distorts std too much for testing
            assert torch.isclose(torch.as_tensor(std), samples.std(), atol=0.1, rtol=0.7)
示例#3
0
def test_ImplicitQuantileNetworkDistributionLoss():
    batch_size = 3
    n_timesteps = 2
    output_size = 5

    target = torch.rand((batch_size, n_timesteps))

    normalizer = TorchNormalizer(center=True, transformation="softplus")
    normalizer.fit(target.reshape(-1))

    loss = ImplicitQuantileNetworkDistributionLoss(input_size=output_size)
    x = torch.rand((batch_size, n_timesteps, output_size))
    target_scale = torch.rand((batch_size, 2))
    pred = loss.rescale_parameters(x,
                                   target_scale=target_scale,
                                   encoder=normalizer)
    assert loss.loss(pred, target).shape == target.shape
    quantiles = loss.to_quantiles(pred)
    assert quantiles.size(-1) == len(loss.quantiles)
    assert quantiles.size(0) == batch_size
    assert quantiles.size(1) == n_timesteps

    point_prediction = loss.to_prediction(pred, n_samples=None)
    assert point_prediction.ndim == loss.to_prediction(pred,
                                                       n_samples=100).ndim
示例#4
0
def test_MultivariateNormalDistributionLoss(center, transformation):
    normalizer = TorchNormalizer(center=center, transformation=transformation)

    mean = torch.tensor([1.0, 1.0])
    std = torch.tensor([0.2, 0.1])
    cov_factor = torch.tensor([[0.0], [0.0]])
    n = 1000000

    loss = MultivariateNormalDistributionLoss()
    target = loss.distribution_class(loc=mean,
                                     cov_diag=std**2,
                                     cov_factor=cov_factor).sample((n, ))
    target = normalizer.inverse_preprocess(target)
    target = target[:, 0]
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.concat(
        [
            normalized_target[..., None], scale[..., None],
            torch.zeros((1, normalized_target.size(1), loss.rank))
        ],
        dim=-1,
    )

    rescaled_parameters = loss.rescale_parameters(parameters,
                                                  target_scale=target_scale,
                                                  encoder=normalizer)
    samples = loss.sample(rescaled_parameters, 1)
    assert torch.isclose(target.mean(), samples.mean(), atol=3.0, rtol=0.5)
    if center:  # if not centered, softplus distorts std too much for testing
        assert torch.isclose(target.std(), samples.std(), atol=0.1, rtol=0.7)
示例#5
0
def test_BetaDistributionLoss(center, transformation):
    initial_mean = 0.1
    initial_shape = 10
    n = 100000
    target = BetaDistributionLoss().map_x_to_distribution(
        torch.tensor([initial_mean, initial_shape])).sample((n, ))
    normalizer = TorchNormalizer(center=center, transformation=transformation)
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    parameters = torch.stack(
        [normalized_target, 1.0 * torch.ones_like(normalized_target)], dim=-1)
    loss = BetaDistributionLoss()

    if transformation not in ["logit"] or not center:
        with pytest.raises(AssertionError):
            loss.rescale_parameters(parameters,
                                    target_scale=target_scale,
                                    encoder=normalizer)
    else:
        rescaled_parameters = loss.rescale_parameters(
            parameters, target_scale=target_scale, encoder=normalizer)
        samples = loss.sample(rescaled_parameters, 1)
        assert torch.isclose(torch.as_tensor(initial_mean),
                             samples.mean(),
                             atol=0.01,
                             rtol=0.01)  # mean=0.1
        assert torch.isclose(target.std(), samples.std(), atol=0.02,
                             rtol=0.3)  # std=0.09
def test_NegativeBinomialDistributionLoss(center, transformation):
    mean = 100.0
    shape = 1.0
    n = 100000
    target = NegativeBinomialDistributionLoss().map_x_to_distribution(
        torch.tensor([mean, shape])).sample_n(n)
    std = target.std()
    normalizer = TorchNormalizer(center=center, transformation=transformation)
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    parameters = torch.stack(
        [normalized_target, 1.0 * torch.ones_like(normalized_target)], dim=-1)
    loss = NegativeBinomialDistributionLoss()

    if center or transformation in ["logit"]:
        with pytest.raises(AssertionError):
            rescaled_parameters = loss.rescale_parameters(
                parameters, target_scale=target_scale, encoder=normalizer)
    else:
        rescaled_parameters = loss.rescale_parameters(
            parameters, target_scale=target_scale, encoder=normalizer)
        samples = loss.sample_n(rescaled_parameters, 1)
        assert torch.isclose(torch.as_tensor(mean),
                             samples.mean(),
                             atol=0.1,
                             rtol=0.5)
        assert torch.isclose(torch.as_tensor(std),
                             samples.std(),
                             atol=0.1,
                             rtol=0.5)
 def map_x_to_distribution(self,
                           x: torch.Tensor) -> distributions.Distribution:
     distr = self.distribution_class(
         picnn=self.picnn,
         hidden_state=x[..., :-2],
         prediction_length=self.prediction_length,
         is_energy_score=self.is_energy_score,
         es_num_samples=self.es_num_samples,
         beta=self.beta,
     )
     # rescale
     loc = x[..., -2][:, None]
     scale = x[..., -1][:, None]
     scaler = distributions.AffineTransform(loc=loc, scale=scale)
     if self._transformation is None:
         return self.transformed_distribution_class(distr, [scaler])
     else:
         return self.transformed_distribution_class(
             distr,
             [
                 scaler,
                 TorchNormalizer.get_transform(
                     self._transformation)["inverse_torch"]
             ],
         )
 def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Normal:
     distr = self.distribution_class(loc=x[..., 2], scale=x[..., 3])
     scaler = distributions.AffineTransform(loc=x[..., 0], scale=x[..., 1])
     if self._transformation is None:
         return distributions.TransformedDistribution(distr, [scaler])
     else:
         return distributions.TransformedDistribution(
             distr, [
                 scaler,
                 TorchNormalizer.get_transform(
                     self._transformation)["inverse_torch"]
             ])
def test_LogNormalDistributionLoss(log_scale, center, coerce_positive,
                                   log_zero_value):
    mean = 2.0
    std = 0.2
    n = 100000
    target = LogNormalDistributionLoss.distribution_class(
        loc=mean, scale=std).sample_n(n)
    if log_scale and coerce_positive:
        return  # combination invalid for normalizer (tested somewhere else)
    normalizer = TorchNormalizer(log_scale=log_scale,
                                 center=center,
                                 coerce_positive=coerce_positive,
                                 log_zero_value=log_zero_value)
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.stack(
        [normalized_target, scale],
        dim=-1,
    )
    loss = LogNormalDistributionLoss()

    if not log_scale or log_zero_value > -1e9:
        with pytest.raises(AssertionError):
            rescaled_parameters = loss.rescale_parameters(
                parameters, target_scale=target_scale, transformer=normalizer)
    else:
        rescaled_parameters = loss.rescale_parameters(
            parameters, target_scale=target_scale, transformer=normalizer)
        samples = loss.sample_n(rescaled_parameters, 1)
        assert torch.isclose(torch.as_tensor(mean),
                             samples.log().mean(),
                             atol=0.1,
                             rtol=0.2)
        if center:  # if not centered, softplus distorts std too much for testing
            assert torch.isclose(torch.as_tensor(std),
                                 samples.log().std(),
                                 atol=0.1,
                                 rtol=0.7)
def test_NegativeBinomialDistributionLoss(log_scale, center, coerce_positive,
                                          log_zero_value):
    mean = 100.0
    shape = 1.0
    n = 100000
    target = NegativeBinomialDistributionLoss().map_x_to_distribution(
        torch.tensor([mean, shape])).sample_n(n)
    std = target.std()
    if log_scale and coerce_positive:
        return  # combination invalid for normalizer (tested somewhere else)
    normalizer = TorchNormalizer(log_scale=log_scale,
                                 center=center,
                                 coerce_positive=coerce_positive,
                                 log_zero_value=log_zero_value)
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    parameters = torch.stack(
        [normalized_target, 1.0 * torch.ones_like(normalized_target)], dim=-1)
    loss = NegativeBinomialDistributionLoss()

    if center:
        with pytest.raises(AssertionError):
            rescaled_parameters = loss.rescale_parameters(
                parameters, target_scale=target_scale, transformer=normalizer)
    else:
        rescaled_parameters = loss.rescale_parameters(
            parameters, target_scale=target_scale, transformer=normalizer)
        samples = loss.sample_n(rescaled_parameters, 1)
        assert torch.isclose(torch.as_tensor(mean),
                             samples.mean(),
                             atol=0.1,
                             rtol=0.5)
        assert torch.isclose(torch.as_tensor(std),
                             samples.std(),
                             atol=0.1,
                             rtol=0.5)
示例#11
0
 def map_x_to_distribution(self, x: torch.Tensor) -> distributions.Normal:
     x = x.permute(1, 0, 2)
     distr = self.distribution_class(
         loc=x[..., 2],
         cov_factor=x[..., 4:],
         cov_diag=x[..., 3],
     )
     scaler = distributions.AffineTransform(loc=x[0, :, 0],
                                            scale=x[0, :, 1],
                                            event_dim=1)
     if self._transformation is None:
         return distributions.TransformedDistribution(distr, [scaler])
     else:
         return distributions.TransformedDistribution(
             distr, [
                 scaler,
                 TorchNormalizer.get_transform(
                     self._transformation)["inverse_torch"]
             ])
def test_MultiNormalizer_fitted():
    data = pd.DataFrame(
        dict(a=[1, 1, 2, 2, 3],
             b=[1.1, 1.1, 1.0, 5.0, 1.1],
             c=[1.1, 1.1, 1.0, 5.0, 1.1]))

    normalizer = MultiNormalizer(
        [GroupNormalizer(groups=["a"]),
         TorchNormalizer()])

    with pytest.raises(NotFittedError):
        check_is_fitted(normalizer)

    normalizer.fit(data, data)

    try:
        check_is_fitted(normalizer.normalizers[0])
        check_is_fitted(normalizer.normalizers[1])
        check_is_fitted(normalizer)
    except NotFittedError:
        pytest.fail(f"{NotFittedError}")
示例#13
0
    def to_quantiles(self,
                     y_pred: torch.Tensor,
                     quantiles: List[float] = None) -> torch.Tensor:
        """
        Convert network prediction into a quantile prediction.

        Args:
            y_pred: prediction output of network
            quantiles (List[float], optional): quantiles for probability range. Defaults to quantiles as
                as defined in the class initialization.

        Returns:
            torch.Tensor: prediction quantiles (last dimension)
        """
        if quantiles is None:
            quantiles = self.quantiles
        quantiles = torch.as_tensor(quantiles, device=y_pred.device)

        # extract parameters
        x = y_pred[..., :-2]
        loc = y_pred[..., -2][..., None]
        scale = y_pred[..., -1][..., None]

        # predict quantiles
        if y_pred.requires_grad:
            predictions = self.quantile_network(x, quantiles)
        else:
            with torch.no_grad():
                predictions = self.quantile_network(x, quantiles)
        # rescale output
        predictions = loc + predictions * scale
        # transform output if required
        if self._transformation is not None:
            transform = TorchNormalizer.get_transform(
                self._transformation)["reverse"]
            predictions = transform(predictions)

        return predictions
示例#14
0
        max_encoder_length=5,
        max_prediction_length=2,
        min_prediction_length=1,
        min_encoder_length=1,
        time_varying_known_reals=["price_regular"],
        scalers={"price_regular": EncoderNormalizer()},
    )
    next(iter(dataset.to_dataloader()))


@pytest.mark.parametrize(
    "kwargs",
    [
        {},
        dict(target_normalizer=MultiNormalizer(
            normalizers=[TorchNormalizer(),
                         EncoderNormalizer()]), ),
        dict(add_target_scales=True),
        dict(weight="volume"),
    ],
)
def test_multitarget(test_data, kwargs):
    dataset = TimeSeriesDataSet(
        test_data.assign(volume1=lambda x: x.volume),
        time_idx="time_idx",
        target=["volume", "volume1"],
        group_ids=["agency", "sku"],
        max_encoder_length=5,
        max_prediction_length=2,
        min_prediction_length=1,
        min_encoder_length=1,
        max_encoder_length=5,
        max_prediction_length=2,
        min_prediction_length=1,
        min_encoder_length=1,
        time_varying_known_reals=["price_regular"],
        scalers={"price_regular": EncoderNormalizer()},
    )
    next(iter(dataset.to_dataloader()))


@pytest.mark.parametrize(
    "kwargs",
    [
        {},
        dict(
            target_normalizer=MultiNormalizer(normalizers=[TorchNormalizer(), EncoderNormalizer()]),
        ),
        dict(add_target_scales=True),
        dict(weight="volume"),
    ],
)
def test_multitarget(test_data, kwargs):
    dataset = TimeSeriesDataSet(
        test_data.assign(volume1=lambda x: x.volume),
        time_idx="time_idx",
        target=["volume", "volume1"],
        group_ids=["agency", "sku"],
        max_encoder_length=5,
        max_prediction_length=2,
        min_prediction_length=1,
        min_encoder_length=1,
示例#16
0
# %%
from pytorch_forecasting.data.encoders import EncoderNormalizer, MultiNormalizer, TorchNormalizer

# create the dataset from the pandas dataframe
multi_target_dataset = TimeSeriesDataSet(
    multi_target_test_data,
    group_ids=["group"],
    target=["target1", "target2"],  # USING two targets
    time_idx="time_idx",
    min_encoder_length=5,
    max_encoder_length=5,
    min_prediction_length=2,
    max_prediction_length=2,
    time_varying_unknown_reals=["target1", "target2"],
    target_normalizer=MultiNormalizer([
        EncoderNormalizer(), TorchNormalizer()
    ]),  # Use the NaNLabelEncoder to encode categorical target
)

x, y = next(iter(multi_target_dataset.to_dataloader(batch_size=4)))
y[0]  # target values are a list of targets

# %%
from typing import List, Union

from pytorch_forecasting.metrics import MAE, SMAPE, MultiLoss
from pytorch_forecasting.utils import to_list


class FullyConnectedMultiTargetModel(BaseModel):
    def __init__(