示例#1
0
 def _make_kf():
     return KalmanFilter(processes=[
         LinearModel(id='lm1', predictors=['x1', 'x2']),
         LinearModel(id='lm2', predictors=['x1', 'x2'])
     ],
                         measures=['y'],
                         compiled=False)
def name_to_proc(id: str, **kwargs) -> Process:
    season_start = '2010-01-04'

    if 'hour_in_day' in id:
        out = FourierSeasonFixed(id=id,
                                 seasonal_period=24, season_start=season_start, dt_unit='h',
                                 **kwargs)
    elif 'day_in_year' in id:
        out = FourierSeasonFixed(id=id,
                                 seasonal_period=24 * 364.25, season_start=season_start, dt_unit='h',
                                 **kwargs)
    elif 'local_level' in id:
        out = LocalLevel(id=id, **kwargs)
    elif 'local_trend' in id:
        out = LocalTrend(id=id, **kwargs)
    elif 'day_in_week' in id:
        out = Season(id=id,
                     seasonal_period=7, season_duration=24,
                     season_start=season_start, dt_unit='h',
                     **kwargs)
    elif 'nn_predictors' in id:
        out = NN(id=id,
                 add_module_params_to_process=False,  # so we can use a separate parameter group
                 model_mat_kwarg_name='predictors',
                 **kwargs)
    elif 'predictors' in id:
        out = LinearModel(id=id,
                          covariates=self.predictors,
                          model_mat_kwarg_name='predictors',
                          **kwargs)
    else:
        raise NotImplementedError(f"Unsure what process to use for `{id}`.")

    return out
示例#3
0
 def test_no_proc_variance(self):
     kf = KalmanFilter(
         processes=[LinearModel(id='lm', predictors=['x1', 'x2'])],
         measures=['y'])
     cov = kf.script_module.process_covariance({}, {})
     self.assertEqual(cov.shape[-1], 2)
     self.assertTrue((cov == 0).all())
示例#4
0
    def test_equations_decay(self):
        data = torch.tensor([[-5., 5., 1., 0., 3.]]).unsqueeze(-1)
        num_times = data.shape[1]

        # make torch kf:
        torch_kf = KalmanFilter(processes=[
            LinearModel(id='lm',
                        predictors=['x1', 'x2', 'x3'],
                        process_variance=True,
                        decay=(.95, 1.))
        ],
                                measures=['y'])
        _kwargs = torch_kf._parse_design_kwargs(input=data,
                                                out_timesteps=num_times,
                                                X=torch.randn(1, num_times, 3))
        _kwargs.pop('init_mean_kwargs')
        design_kwargs = torch_kf.script_module._get_design_kwargs_for_time(
            time=0, **_kwargs)
        F, *_ = torch_kf.script_module.get_design_mats(
            num_groups=1, design_kwargs=design_kwargs, cache={})
        F = F.squeeze(0)

        self.assertTrue((torch.diag(F) > .95).all())
        self.assertTrue((torch.diag(F) < 1.00).all())
        self.assertGreater(len(set(torch.diag(F).tolist())), 1)
        for r in range(F.shape[-1]):
            for c in range(F.shape[-1]):
                if r == c:
                    continue
                self.assertEqual(F[r, c], 0)
示例#5
0
 def _make_kf():
     return KalmanFilter(processes=[
         LocalLevel(id=f'll{i + 1}', measure=str(i + 1))
         for i in range(ndim)
     ] + [
         LinearModel(id=f'lm{i + 1}',
                     predictors=['x1', 'x2', 'x3', 'x4', 'x5'],
                     measure=str(i + 1)) for i in range(ndim)
     ],
                         measures=[str(i + 1) for i in range(ndim)])
示例#6
0
    def test_lm(self, num_groups: int = 1, num_preds: int = 1):
        data = torch.zeros((num_groups, 5, 1))
        kf = KalmanFilter(processes=[
            LinearModel(id='lm',
                        predictors=[f"x{i}" for i in range(num_preds)])
        ],
                          measures=['y'])
        wrong_dim = 1 if num_preds > 1 else 2
        with self.assertRaises((RuntimeError, torch.jit.Error),
                               msg=(num_groups, num_preds)) as cm:
            kf(data, X=torch.zeros((num_groups, 5, wrong_dim)))
        expected = f"produced output with shape [{num_groups}, {wrong_dim}], but expected ({num_preds},) " \
                   f"or (num_groups, {num_preds}). Input had shape [{num_groups}, {wrong_dim}]"
        self.assertIn(expected, str(cm.exception))

        kf(data, X=torch.ones(num_groups, data.shape[1], num_preds))
示例#7
0
                                               group_colname='station',
                                               time_colname='date',
                                               y_colnames=measures_pp,
                                               X_colnames=predictors_pp)

dataset_train, dataset_val = dataset_all.train_val_split(dt=SPLIT_DT)

# impute nans (since standardized, imputing w/zeros means imputing w/mean)
for _dataset in (dataset_all, dataset_train, dataset_val):
    _, X = _dataset.tensors
    X[torch.isnan(X)] = 0.0

# +
kf_pred = KalmanFilter(measures=measures_pp,
                       processes=processes + [
                           LinearModel(id=f'{m}_predictors',
                                       covariates=predictors_pp).add_measure(m)
                           for m in measures_pp
                       ])

kf_pred.opt = LBFGS(kf_pred.parameters(), lr=.20, max_eval=10)


def closure():
    kf_pred.opt.zero_grad()
    y, X = dataset_train.tensors
    pred = kf_pred(y,
                   predictors=X,
                   start_datetimes=dataset_train.start_datetimes)
    loss = -pred.log_prob(y).mean()
    loss.backward()
    return loss