def test_studentT_likelihood(df: float, loc: float, scale: float):

    dfs = torch.zeros((NUM_SAMPLES,)) + df
    locs = torch.zeros((NUM_SAMPLES,)) + loc
    scales = torch.zeros((NUM_SAMPLES,)) + scale

    distr = StudentT(df=dfs, loc=locs, scale=scales)
    samples = distr.sample()

    init_bias = [
        inv_softplus(df - 2),
        loc - START_TOL_MULTIPLE * TOL * loc,
        inv_softplus(scale - START_TOL_MULTIPLE * TOL * scale),
    ]

    df_hat, loc_hat, scale_hat = maximum_likelihood_estimate_sgd(
        StudentTOutput(),
        samples,
        init_biases=init_bias,
        num_epochs=15,
        learning_rate=1e-3,
    )

    assert (
        np.abs(df_hat - df) < TOL * df
    ), f"df did not match: df = {df}, df_hat = {df_hat}"
    assert (
        np.abs(loc_hat - loc) < TOL * loc
    ), f"loc did not match: loc = {loc}, loc_hat = {loc_hat}"
    assert (
        np.abs(scale_hat - scale) < TOL * scale
    ), f"scale did not match: scale = {scale}, scale_hat = {scale_hat}"
示例#2
0
    def __init__(
            self,
            freq: str,
            prediction_length: int,
            context_length: Optional[int] = None,
            num_layers: int = 2,
            hidden_size: int = 40,
            dropout_rate: float = 0.1,
            num_feat_dynamic_real: int = 0,
            num_feat_static_cat: int = 0,
            num_feat_static_real: int = 0,
            cardinality: Optional[List[int]] = None,
            embedding_dimension: Optional[List[int]] = None,
            distr_output: DistributionOutput = StudentTOutput(),
            loss: DistributionLoss = NegativeLogLikelihood(),
            scaling: bool = True,
            lags_seq: Optional[List[int]] = None,
            time_features: Optional[List[TimeFeature]] = None,
            num_parallel_samples: int = 100,
            batch_size: int = 32,
            num_batches_per_epoch: int = 50,
            trainer_kwargs: Optional[Dict[str, Any]] = dict(),
    ) -> None:
        trainer_kwargs = {
            "max_epochs": 100,
            "gradient_clip_val": 10.0,
            **trainer_kwargs,
        }
        super().__init__(trainer_kwargs=trainer_kwargs)

        self.freq = freq
        self.context_length = (context_length if context_length is not None
                               else prediction_length)
        self.prediction_length = prediction_length
        self.distr_output = distr_output
        self.loss = loss
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.dropout_rate = dropout_rate
        self.num_feat_dynamic_real = num_feat_dynamic_real
        self.num_feat_static_cat = num_feat_static_cat
        self.num_feat_static_real = num_feat_static_real
        self.cardinality = (cardinality if cardinality
                            and num_feat_static_cat > 0 else [1])
        self.embedding_dimension = embedding_dimension
        self.scaling = scaling
        self.lags_seq = lags_seq
        self.time_features = (time_features if time_features is not None else
                              time_features_from_frequency_str(self.freq))

        self.num_parallel_samples = num_parallel_samples
        self.batch_size = batch_size
        self.num_batches_per_epoch = num_batches_per_epoch

        self.train_sampler = ExpectedNumInstanceSampler(
            num_instances=1.0, min_future=prediction_length)
        self.validation_sampler = ValidationSplitSampler(
            min_future=prediction_length)
示例#3
0
 def __init__(
     self,
     freq: str,
     context_length: int,
     prediction_length: int,
     num_feat_dynamic_real: int,
     num_feat_static_real: int,
     num_feat_static_cat: int,
     cardinality: List[int],
     embedding_dimension: Optional[List[int]] = None,
     num_layers: int = 2,
     hidden_size: int = 40,
     dropout_rate: float = 0.1,
     distr_output: DistributionOutput = StudentTOutput(),
     lags_seq: Optional[List[int]] = None,
     scaling: bool = True,
     num_parallel_samples: int = 100,
 ) -> None:
     super().__init__()
     self.context_length = context_length
     self.prediction_length = prediction_length
     self.distr_output = distr_output
     self.param_proj = distr_output.get_args_proj(hidden_size)
     self.target_shape = distr_output.event_shape
     self.num_feat_dynamic_real = num_feat_dynamic_real
     self.num_feat_static_cat = num_feat_static_cat
     self.num_feat_static_real = num_feat_static_real
     self.embedding_dimension = (
         embedding_dimension
         if embedding_dimension is not None or cardinality is None
         else [min(50, (cat + 1) // 2) for cat in cardinality]
     )
     self.lags_seq = lags_seq or get_lags_for_frequency(freq_str=freq)
     self.num_parallel_samples = num_parallel_samples
     self.history_length = self.context_length + max(self.lags_seq)
     self.embedder = FeatureEmbedder(
         cardinalities=cardinality,
         embedding_dims=self.embedding_dimension,
     )
     if scaling:
         self.scaler = MeanScaler(dim=1, keepdim=True)
     else:
         self.scaler = NOPScaler(dim=1, keepdim=True)
     self.lagged_rnn = LaggedLSTM(
         input_size=1,  # TODO fix
         features_size=self._number_of_features,
         num_layers=num_layers,
         hidden_size=hidden_size,
         dropout_rate=dropout_rate,
         lags_seq=[lag - 1 for lag in self.lags_seq],
     )
示例#4
0
    def __init__(
        self,
        freq: str,
        prediction_length: int,
        context_length: int,
        hidden_dimensions: List[int],
        distr_output=StudentTOutput(),
        batch_norm: bool = False,
        scaling: Callable = mean_abs_scaling,
    ) -> None:
        super().__init__()

        assert prediction_length > 0
        assert context_length > 0
        assert len(hidden_dimensions) > 0

        self.freq = freq
        self.prediction_length = prediction_length
        self.context_length = context_length
        self.hidden_dimensions = hidden_dimensions
        self.distr_output = distr_output
        self.batch_norm = batch_norm
        self.scaling = scaling

        dimensions = [context_length] + hidden_dimensions[:-1]

        modules = []
        for in_size, out_size in zip(dimensions[:-1], dimensions[1:]):
            modules += [self.__make_lin(in_size, out_size), nn.ReLU()]
            if batch_norm:
                modules.append(nn.BatchNorm1d(out_size))
        modules.append(
            self.__make_lin(
                dimensions[-1], prediction_length * hidden_dimensions[-1]
            )
        )

        self.nn = nn.Sequential(*modules)
        self.args_proj = self.distr_output.get_args_proj(hidden_dimensions[-1])