Exemplo n.º 1
0
def test_NormalDistributionLoss(center, transformation):
    mean = 1000.0
    std = 200.0
    n = 100000
    target = NormalDistributionLoss.distribution_class(loc=mean, scale=std).sample((n,))
    if transformation in ["log", "log1p", "relu", "softplus"]:
        target = target.abs()
    normalizer = TorchNormalizer(center=center, transformation=transformation)
    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.stack(
        [normalized_target, scale],
        dim=-1,
    )
    loss = NormalDistributionLoss()
    if transformation in ["logit", "log", "log1p", "softplus", "relu", "logit"]:
        with pytest.raises(AssertionError):
            rescaled_parameters = loss.rescale_parameters(parameters, target_scale=target_scale, encoder=normalizer)
    else:
        rescaled_parameters = loss.rescale_parameters(parameters, target_scale=target_scale, encoder=normalizer)
        samples = loss.sample(rescaled_parameters, 1)
        assert torch.isclose(torch.as_tensor(mean), samples.mean(), atol=0.1, rtol=0.2)
        if center:  # if not centered, softplus distorts std too much for testing
            assert torch.isclose(torch.as_tensor(std), samples.std(), atol=0.1, rtol=0.7)
Exemplo n.º 2
0
def test_NormalDistributionLoss(center, transformation):
    mean = 1.0
    std = 0.1
    n = 100000
    target = NormalDistributionLoss.distribution_class(loc=mean,
                                                       scale=std).sample((n, ))
    normalizer = TorchNormalizer(center=center, transformation=transformation)
    if transformation in ["log", "log1p", "relu", "softplus"]:
        target = target.abs()
    target = normalizer.inverse_preprocess(target)

    normalized_target = normalizer.fit_transform(target).view(1, -1)
    target_scale = normalizer.get_parameters().unsqueeze(0)
    scale = torch.ones_like(normalized_target) * normalized_target.std()
    parameters = torch.stack(
        [normalized_target, scale],
        dim=-1,
    )
    loss = NormalDistributionLoss()
    rescaled_parameters = loss.rescale_parameters(parameters,
                                                  target_scale=target_scale,
                                                  encoder=normalizer)
    samples = loss.sample(rescaled_parameters, 1)
    assert torch.isclose(target.mean(), samples.mean(), atol=0.1, rtol=0.5)
    if center:  # if not centered, softplus distorts std too much for testing
        assert torch.isclose(target.std(), samples.std(), atol=0.1, rtol=0.7)
Exemplo n.º 3
0
class FullyConnectedForDistributionLossModel(
        BaseModel):  # we inherit the `from_dataset` method
    def __init__(self, input_size: int, output_size: int, hidden_size: int,
                 n_hidden_layers: int, **kwargs):
        # saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this
        self.save_hyperparameters()
        # pass additional arguments to BaseModel.__init__, mandatory call - do not skip this
        super().__init__(**kwargs)
        self.network = FullyConnectedMultiOutputModule(
            input_size=self.hparams.input_size,
            output_size=self.hparams.output_size,
            hidden_size=self.hparams.hidden_size,
            n_hidden_layers=self.hparams.n_hidden_layers,
            n_outputs=
            2,  # <<<<<<<< we predict two outputs for mean and scale of the normal distribution
        )
        self.loss = NormalDistributionLoss()

    @classmethod
    def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs):
        new_kwargs = {
            "output_size": dataset.max_prediction_length,
            "input_size": dataset.max_encoder_length,
        }
        new_kwargs.update(
            kwargs
        )  # use to pass real hyperparameters and override defaults set by dataset
        # example for dataset validation
        assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length"
        assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length"
        assert (
            len(dataset.time_varying_known_categoricals) == 0
            and len(dataset.time_varying_known_reals) == 0
            and len(dataset.time_varying_unknown_categoricals) == 0
            and len(dataset.static_categoricals) == 0
            and len(dataset.static_reals) == 0
            and len(dataset.time_varying_unknown_reals) == 1
            and dataset.time_varying_unknown_reals[0] == dataset.target
        ), "Only covariate should be the target in 'time_varying_unknown_reals'"

        return super().from_dataset(dataset, **new_kwargs)

    def forward(self,
                x: Dict[str, torch.Tensor],
                n_samples: int = None) -> Dict[str, torch.Tensor]:
        # x is a batch generated based on the TimeSeriesDataset
        network_input = x["encoder_cont"].squeeze(-1)
        prediction = self.network(
            network_input)  # shape batch_size x n_decoder_steps x 2
        if (
                self.training or n_samples is None
        ):  # training is a PyTorch variable indicating if a module is being trained (tracing gradients) or evaluated
            assert n_samples is None, "We need to predict parameters when training"
            output_transformation = True
        else:
            # let's sample from our distribution - first we need to scale the parameters to real space
            scaled_parameters = self.transform_output(
                dict(
                    prediction=prediction,
                    target_scale=x["target_scale"],
                ))
            # and then sample from distribution
            prediction = self.loss.sample(scaled_parameters, n_samples)
            output_transformation = None  # predictions are already re-scaled
        return dict(prediction=prediction,
                    target_scale=x["target_scale"],
                    output_transformation=output_transformation)

    def transform_output(self, out: Dict[str, torch.Tensor]) -> torch.Tensor:
        # this is already implemented in pytorch forecasting but this code demonstrates the point
        # input is forward's output
        # depending on output, transform differently
        if out.get("output_transformation",
                   True) is None:  # samples are already rescaled
            out = out["prediction"]
        else:  # parameters need to be rescaled
            out = self.loss.rescale_parameters(
                out["prediction"],
                target_scale=out["target_scale"],
                encoder=self.output_transformer)
        return out