Exemple #1
0
        def _train(num_epochs: int = 12):
            kf = KalmanFilter(processes=[
                LocalTrend(id='trend'),
                FourierSeason(id='day_of_week', period=7, dt_unit='D', K=3)
            ],
                              measures=['y'])

            # train:
            optimizer = torch.optim.LBFGS([
                p for n, p in kf.named_parameters()
                if 'measure_covariance' not in n
            ],
                                          lr=.20,
                                          max_iter=10)

            def closure():
                optimizer.zero_grad()
                _start = time.time()
                # print(f'[{datetime.datetime.now().time()}] forward...')
                pred = kf(data, start_datetimes=start_datetimes)
                loss = -pred.log_prob(data).mean()
                _start = time.time()
                loss.backward()
                return loss

            print(f"\nTraining for {num_epochs} epochs...")
            for i in range(num_epochs):
                loss = optimizer.step(closure)
                print("loss:", loss.item())

            return kf
Exemple #2
0
        def _train(num_epochs: int = 15):
            kf = KalmanFilter(processes=[
                TBATS(id='day_of_week',
                      period=7,
                      dt_unit='D',
                      K=1,
                      process_variance=True,
                      decay=(.85, 1.))
            ],
                              measures=['y'])

            # train:
            optimizer = torch.optim.LBFGS(kf.parameters(), lr=.15, max_iter=10)

            def closure():
                optimizer.zero_grad()
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    pred = kf(dataset.tensors[0],
                              start_datetimes=dataset.start_datetimes)
                loss = -pred.log_prob(dataset.tensors[0]).mean()
                loss.backward()
                return loss

            print(f"\nTraining for {num_epochs} epochs...")
            for i in range(num_epochs):
                loss = optimizer.step(closure)
                print("loss:", loss.item())

            return kf
Exemple #3
0
    def test_equations_decay(self):
        data = torch.tensor([[-5., 5., 1., 0., 3.]]).unsqueeze(-1)
        num_times = data.shape[1]

        # make torch kf:
        torch_kf = KalmanFilter(processes=[
            LinearModel(id='lm',
                        predictors=['x1', 'x2', 'x3'],
                        process_variance=True,
                        decay=(.95, 1.))
        ],
                                measures=['y'])
        _kwargs = torch_kf._parse_design_kwargs(input=data,
                                                out_timesteps=num_times,
                                                X=torch.randn(1, num_times, 3))
        _kwargs.pop('init_mean_kwargs')
        design_kwargs = torch_kf.script_module._get_design_kwargs_for_time(
            time=0, **_kwargs)
        F, *_ = torch_kf.script_module.get_design_mats(
            num_groups=1, design_kwargs=design_kwargs, cache={})
        F = F.squeeze(0)

        self.assertTrue((torch.diag(F) > .95).all())
        self.assertTrue((torch.diag(F) < 1.00).all())
        self.assertGreater(len(set(torch.diag(F).tolist())), 1)
        for r in range(F.shape[-1]):
            for c in range(F.shape[-1]):
                if r == c:
                    continue
                self.assertEqual(F[r, c], 0)
Exemple #4
0
def simulate(num_groups: int,
             num_timesteps: int,
             season_spec: dict,
             noise: float = 1.0) -> torch.Tensor:
    # make kf:
    processes = [
        LocalLevel(id='local_level').add_measure('y'),
        Season(id='day_in_week', seasonal_period=7, fixed=True,
               **season_spec).add_measure('y'),
        FourierSeasonFixed(id='day_in_month',
                           seasonal_period=30,
                           K=2,
                           **season_spec).add_measure('y')
    ]
    kf = KalmanFilter(measures=['y'], processes=processes)

    # make local-level less aggressive:
    pcov = kf.design.process_covariance.create().data
    pcov[0, 0] *= .1
    kf.design.process_covariance.set(pcov)

    # simulate:
    start_datetimes = np.zeros(
        num_groups, dtype='timedelta64') + season_spec['season_start']
    with torch.no_grad():
        dfb = kf.design.for_batch(num_groups=num_groups,
                                  num_timesteps=num_timesteps,
                                  start_datetimes=start_datetimes)
        initial_state = kf.predict_initial_state(dfb)
        simulated_trajectories = initial_state.simulate_trajectories(dfb)
        sim_data = simulated_trajectories.sample_measurements(eps=noise)

    return sim_data
Exemple #5
0
    def test_equations_preds(self, n_step: int):
        from torch_kalman.utils.data import TimeSeriesDataset
        from pandas import DataFrame

        class LinearModelFixed(LinearModel):
            def __init__(self, *args, **kwargs):
                super().__init__(*args, **kwargs)
                self.no_icov_state_elements = self.state_elements

        kf = KalmanFilter(
            processes=[LinearModelFixed(id='lm', predictors=['x1', 'x2'])],
            measures=['y'],
            compiled=False)
        kf.script_module._scale_by_measure_var = False
        kf.state_dict(
        )['script_module.processes.lm.init_mean'][:] = torch.tensor(
            [1.5, -0.5])
        kf.state_dict(
        )['script_module.measure_covariance.cholesky_log_diag'][0] = np.log(
            .1**.5)

        num_times = 100
        df = DataFrame({
            'x1': np.random.randn(num_times),
            'x2': np.random.randn(num_times)
        })
        df['y'] = 1.5 * df['x1'] + -.5 * df['x2'] + .1 * np.random.randn(
            num_times)
        df['time'] = df.index.values
        df['group'] = '1'
        dataset = TimeSeriesDataset.from_dataframe(dataframe=df,
                                                   group_colname='group',
                                                   time_colname='time',
                                                   dt_unit=None,
                                                   X_colnames=['x1', 'x2'],
                                                   y_colnames=['y'])
        y, X = dataset.tensors

        from pandas import Series

        pred = kf(y, X=X, out_timesteps=X.shape[1], n_step=n_step)
        y_series = Series(y.squeeze().numpy())
        for shift in range(-2, 3):
            resid = y_series.shift(shift) - Series(
                pred.means.squeeze().numpy())
            if shift:
                # check there's no misalignment in internal n_step logic (i.e., realigning the input makes things worse)
                self.assertGreater((resid**2).mean(), 1.)
            else:
                self.assertLess((resid**2).mean(), .02)
Exemple #6
0
 def test_dtype(self,
                dtype: torch.dtype,
                ndim: int = 2,
                compiled: bool = True):
     data = torch.zeros((2, 5, ndim), dtype=dtype)
     kf = KalmanFilter(processes=[
         LocalLevel(id=f'll{i}', measure=str(i)) for i in range(ndim)
     ],
                       measures=[str(i) for i in range(ndim)],
                       compiled=compiled)
     kf.to(dtype=dtype)
     pred = kf(data)
     self.assertEqual(pred.means.dtype, dtype)
     loss = pred.log_prob(data)
     self.assertEqual(loss.dtype, dtype)
Exemple #7
0
 def test_no_proc_variance(self):
     kf = KalmanFilter(
         processes=[LinearModel(id='lm', predictors=['x1', 'x2'])],
         measures=['y'])
     cov = kf.script_module.process_covariance({}, {})
     self.assertEqual(cov.shape[-1], 2)
     self.assertTrue((cov == 0).all())
Exemple #8
0
    def test_predictions(self, ndim: int = 2):
        data = torch.zeros((2, 5, ndim))
        kf = KalmanFilter(processes=[
            LocalLevel(id=f'lm{i}', measure=str(i)) for i in range(ndim)
        ],
                          measures=[str(i) for i in range(ndim)],
                          compiled=False)
        pred = kf(data)
        self.assertEqual(len(tuple(pred)), 2)
        self.assertIsInstance(np.asanyarray(pred), np.ndarray)
        means, covs = pred
        self.assertIsInstance(means, torch.Tensor)
        self.assertIsInstance(covs, torch.Tensor)

        with self.assertRaises(TypeError):
            pred[1]

        with self.assertRaises(TypeError):
            pred[(1, )]

        pred_group2 = pred[[1]]
        self.assertTupleEqual(tuple(pred_group2.covs.shape),
                              (1, 5, ndim, ndim))
        self.assertTrue(
            (pred_group2.state_means == pred.state_means[1, :, :]).all())
        self.assertTrue(
            (pred_group2.state_covs == pred.state_covs[1, :, :, :]).all())

        pred_time3 = pred[:, [2]]
        self.assertTupleEqual(tuple(pred_time3.covs.shape), (2, 1, ndim, ndim))
        self.assertTrue(
            (pred_time3.state_means == pred.state_means[:, 2, :]).all())
        self.assertTrue(
            (pred_time3.state_covs == pred.state_covs[:, 2, :, :]).all())
Exemple #9
0
 def _make_kf():
     return KalmanFilter(processes=[
         LinearModel(id='lm1', predictors=['x1', 'x2']),
         LinearModel(id='lm2', predictors=['x1', 'x2'])
     ],
                         measures=['y'],
                         compiled=False)
Exemple #10
0
    def test_process_caching(self, compiled: bool):
        class CallCounter(torch.nn.Module):
            def __init__(self):
                super(CallCounter, self).__init__()
                self.call_count = 0

            def forward(self, input: Optional[torch.Tensor]) -> torch.Tensor:
                self.call_count += 1
                return torch.ones(1) * self.call_count

        data = torch.tensor([[-5., 5., 1., 0., 3.]]).unsqueeze(-1)
        torch_kf = KalmanFilter(processes=[
            Process(id='call_counter',
                    state_elements=['position'],
                    h_module=CallCounter(),
                    f_tensors={'position->position': torch.ones(1)})
        ],
                                measures=['y'],
                                compiled=compiled)
        # with cache enabled, only called once
        pred = torch_kf(data)
        self.assertTrue((pred.H == 1.).all())

        # which is less than what we'd expect without the cache, which is data.shape[1] + 1 times
        pred = torch_kf(data, _disable_cache=True)
        self.assertListEqual(pred.H.squeeze().tolist(),
                             [float(x) for x in range(3, 8)])
Exemple #11
0
    def test_equations(self):
        data = Tensor([[-50., 50., 1.]])[:, :, None]

        #
        _design = simple_mv_velocity_design(dims=1)
        torch_kf = KalmanFilter(processes=_design.processes.values(),
                                measures=_design.measures)
        batch_design = torch_kf.design.for_batch(1, 1)
        pred = torch_kf(data)

        #
        filter_kf = filterpy_KalmanFilter(dim_x=2, dim_z=1)
        filter_kf.x = batch_design.initial_mean.detach().numpy().T
        filter_kf.P = batch_design.initial_covariance.detach().numpy().squeeze(
            0)

        filter_kf.F = batch_design.F(0)[0].detach().numpy()
        filter_kf.H = batch_design.H(0)[0].detach().numpy()
        filter_kf.R = batch_design.R(0)[0].detach().numpy()
        filter_kf.Q = batch_design.Q(0)[0].detach().numpy()
        filter_kf.states = []
        for t in range(data.shape[1]):
            filter_kf.states.append(filter_kf.x)
            filter_kf.update(data[:, t, :])
            filter_kf.predict()
        filterpy_states = np.stack(filter_kf.states).squeeze()
        kf_states = pred.means.detach().numpy().squeeze()

        for r, c in product(*[range(x) for x in kf_states.shape]):
            self.assertAlmostEqual(filterpy_states[r, c],
                                   kf_states[r, c],
                                   places=3)
Exemple #12
0
    def test_nans(self, ndim: int = 3, n_step: int = 1):
        ntimes = 4 + n_step
        data = torch.ones((5, ntimes, ndim)) * 10
        data[0, 2, 0:(ndim - 1)] = float('nan')
        data[2, 2, 0] = float('nan')

        # test critical helper fun:
        get_nan_groups2 = torch.jit.script(get_nan_groups)
        nan_groups = {2}
        if ndim > 1:
            nan_groups.add(0)
        for t in range(ntimes):
            for group_idx, valid_idx in get_nan_groups2(torch.isnan(data[:,
                                                                         t])):
                if t == 2:
                    if valid_idx is None:
                        self.assertEqual(len(group_idx),
                                         data.shape[0] - len(nan_groups))
                        self.assertFalse(
                            bool(
                                set(group_idx.tolist()).intersection(
                                    nan_groups)))
                    else:
                        self.assertLess(len(valid_idx), ndim)
                        self.assertGreater(len(valid_idx), 0)
                        if len(valid_idx) == 1:
                            if ndim == 2:
                                self.assertSetEqual(set(valid_idx.tolist()),
                                                    {1})
                                self.assertSetEqual(set(group_idx.tolist()),
                                                    nan_groups)
                            else:
                                self.assertSetEqual(set(valid_idx.tolist()),
                                                    {ndim - 1})
                                self.assertSetEqual(set(group_idx.tolist()),
                                                    {0})
                        else:
                            self.assertSetEqual(set(valid_idx.tolist()),
                                                {1, 2})
                            self.assertSetEqual(set(group_idx.tolist()), {2})
                else:
                    self.assertIsNone(valid_idx)

        # test `update`
        # TODO: measure dim vs. state-dim

        # test integration:
        # TODO: make missing dim highly correlated with observed dims. upward trend in observed should get reflected in
        #       unobserved state
        kf = KalmanFilter(processes=[
            LocalLevel(id=f'lm{i}', measure=str(i)) for i in range(ndim)
        ],
                          measures=[str(i) for i in range(ndim)],
                          compiled=True)
        obs_means, obs_covs = kf(data, n_step=n_step)
        self.assertFalse(torch.isnan(obs_means).any())
        self.assertFalse(torch.isnan(obs_covs).any())
        self.assertEqual(tuple(obs_means.shape), (5, ntimes, ndim))
Exemple #13
0
def _simulate(num_groups: int, num_timesteps: int, dt_unit: str, noise: float = 1.0) -> torch.Tensor:
    # make kf:
    processes = [
        LocalLevel(id='local_level').add_measure('y'),
        Season(id='day_in_week', seasonal_period=7, fixed=True, dt_unit=dt_unit).add_measure('y'),
        FourierSeason(id='day_in_year', seasonal_period=365.25, K=2, fixed=True, dt_unit=dt_unit).add_measure('y')
    ]
    kf = KalmanFilter(measures=['y'], processes=processes)

    # simulate:
    start_datetimes = np.zeros(num_groups, dtype='timedelta64') + DEFAULT_START_DT
    with torch.no_grad():
        dfb = kf.design.for_batch(num_groups=num_groups, num_timesteps=num_timesteps, start_datetimes=start_datetimes)
        initial_state = kf._predict_initial_state(dfb)
        simulated_trajectories = initial_state.simulate_trajectories(dfb)
        sim_data = simulated_trajectories.sample_measurements(eps=noise)

    return sim_data
Exemple #14
0
 def _make_kf():
     return KalmanFilter(processes=[
         LocalLevel(id=f'll{i + 1}', measure=str(i + 1))
         for i in range(ndim)
     ] + [
         LinearModel(id=f'lm{i + 1}',
                     predictors=['x1', 'x2', 'x3', 'x4', 'x5'],
                     measure=str(i + 1)) for i in range(ndim)
     ],
                         measures=[str(i + 1) for i in range(ndim)])
Exemple #15
0
    def test_complex_kf_init(self):
        proc_specs = {'hour_in_day': {'K': 3},
                      'day_in_year': {'K': 3},
                      'local_level': {'decay': (.33, .95)},
                      'local_trend': {'decay_position': (0.95, 1.00), 'decay_velocity': (0.90, 1.00)}
                      }
        processes = []
        for id, pkwargs in proc_specs.items():
            processes.append(name_to_proc(id, **pkwargs))
            processes[-1].add_measure('measure')

        kf = KalmanFilter(measures=['measure'], processes=processes)
Exemple #16
0
 def test_gaussian_log_prob(self, ndim: int = 1):
     data = torch.zeros((2, 5, ndim))
     kf = KalmanFilter(processes=[
         LocalLevel(id=f'lm{i}', measure=str(i)) for i in range(ndim)
     ],
                       measures=[str(i) for i in range(ndim)])
     pred = kf(data)
     log_lik1 = kf.kf_step.log_prob(data, *pred)
     from torch.distributions import MultivariateNormal
     mv = MultivariateNormal(*pred)
     log_lik2 = mv.log_prob(data)
     self.assertAlmostEqual(log_lik1.sum().item(), log_lik2.sum().item())
Exemple #17
0
 def test_fourier_season(self):
     series = torch.sin(2. * 3.1415 * torch.arange(0., 7.) / 7.)
     data = torch.stack([series.roll(-i).repeat(3)
                         for i in range(6)]).unsqueeze(-1)
     start_datetimes = np.array([
         np.datetime64('2019-04-18') + np.timedelta64(i, 'D')
         for i in range(6)
     ])
     kf = KalmanFilter(processes=[
         FourierSeason(id='day_of_week', period=7, dt_unit='D', K=3)
     ],
                       measures=['y'])
     kf.script_module._scale_by_measure_var = False
     kf.state_dict(
     )['script_module.processes.day_of_week.init_mean'][:] = torch.tensor(
         [1., 0., 0., 0., 0., 0.])
     kf.state_dict(
     )['script_module.measure_covariance.cholesky_log_diag'] -= 2
     pred = kf(data, start_datetimes=start_datetimes)
     pred.means - data
     for g in range(6):
         self.assertLess(torch.abs(pred.means[g] - data[g]).mean(), .01)
Exemple #18
0
    def _train_kf(self, data: torch.Tensor, num_epochs: int = 8):
        kf = KalmanFilter(
            measures=['y'],
            processes=[
                LocalLevel(id='local_level').add_measure('y'),
                Season(id='day_in_week',
                       seasonal_period=7,
                       **self.config['season_spec']).add_measure('y'),
                FourierSeasonDynamic(
                    id='day_in_month',
                    seasonal_period=30,
                    K=2,
                    **self.config['season_spec']).add_measure('y')
            ])
        kf.opt = LBFGS(kf.parameters())

        start_datetimes = (
            np.zeros(self.config['num_groups'], dtype='timedelta64') +
            self.config['season_spec']['season_start'])

        def closure():
            kf.opt.zero_grad()
            pred = kf(data, start_datetimes=start_datetimes)
            loss = -pred.log_prob(data).mean()
            loss.backward()
            return loss

        print(f"Will train for {num_epochs} epochs...")
        loss = float('nan')
        for i in range(num_epochs):
            new_loss = kf.opt.step(closure)
            print(
                f"EPOCH {i}, LOSS {new_loss.item()}, DELTA {loss - new_loss.item()}"
            )
            loss = new_loss.item()

        return kf(data, start_datetimes=start_datetimes).predictions
Exemple #19
0
    def test_current_time(self):
        _state = {}

        def make_season(current_timestep: torch.Tensor):
            _state['call_counter'] += 1
            return current_timestep % 7

        class Season(Process):
            def __init__(self, id: str):
                super(Season, self).__init__(id=id,
                                             h_module=make_season,
                                             state_elements=['x'],
                                             f_tensors={'x->x': torch.ones(1)})
                self.h_kwarg = 'current_timestep'
                self.time_varying_kwargs = ['current_timestep']

        kf = KalmanFilter(processes=[Season(id='s1')],
                          measures=['y'],
                          compiled=False)
        kf.script_module._scale_by_measure_var = False
        data = torch.arange(7).view(1, -1, 1)
        for init_state in [0., 1.]:
            kf.state_dict(
            )['script_module.processes.s1.init_mean'][:] = torch.ones(
                1) * init_state
            _state['call_counter'] = 0
            pred = kf(data)
            # make sure test was called each time:
            # +1 b/c we make an extra call to get_design_mats when getting initial state
            self.assertEqual(_state['call_counter'], data.shape[1] + 1)

            # more suited to a season test but we'll check anyways:
            if init_state == 1.:
                self.assertTrue((pred.state_means == 1.).all())
            else:
                self.assertGreater(pred.state_means[:, -1],
                                   pred.state_means[:, 0])
Exemple #20
0
    def test_jit(self):
        from torch_kalman.kalman_filter.predictions import Predictions

        # compile-able:
        h_module = SingleOutput()
        f_modules = torch.nn.ModuleDict()
        f_modules['position->position'] = SingleOutput()

        compilable = Process(id='compilable',
                             state_elements=['position'],
                             h_module=h_module,
                             f_modules=f_modules)

        torch_kf = KalmanFilter(processes=[compilable], measures=['y'])
        # runs:
        self.assertIsInstance(
            torch_kf(torch.tensor([[-5., 5., 1.]]).unsqueeze(-1)), Predictions)

        # not compile-able:
        not_compilable = Process(
            id='not_compilable',
            state_elements=['position'],
            h_module=lambda x=None: h_module(x),
            f_tensors={'position->position': torch.ones(1)})
        with self.assertRaises(RuntimeError) as cm:
            torch_kf = KalmanFilter(processes=[not_compilable], measures=['y'])
        the_exception = cm.exception
        self.assertIn('failed to compile', str(the_exception))
        self.assertIn('TorchScript', str(the_exception))

        # but we can skip compilation:
        torch_kf = KalmanFilter(processes=[not_compilable],
                                measures=['y'],
                                compiled=False)
        self.assertIsInstance(
            torch_kf(torch.tensor([[-5., 5., 1.]]).unsqueeze(-1)), Predictions)
Exemple #21
0
    def test_lm(self, num_groups: int = 1, num_preds: int = 1):
        data = torch.zeros((num_groups, 5, 1))
        kf = KalmanFilter(processes=[
            LinearModel(id='lm',
                        predictors=[f"x{i}" for i in range(num_preds)])
        ],
                          measures=['y'])
        wrong_dim = 1 if num_preds > 1 else 2
        with self.assertRaises((RuntimeError, torch.jit.Error),
                               msg=(num_groups, num_preds)) as cm:
            kf(data, X=torch.zeros((num_groups, 5, wrong_dim)))
        expected = f"produced output with shape [{num_groups}, {wrong_dim}], but expected ({num_preds},) " \
                   f"or (num_groups, {num_preds}). Input had shape [{num_groups}, {wrong_dim}]"
        self.assertIn(expected, str(cm.exception))

        kf(data, X=torch.ones(num_groups, data.shape[1], num_preds))
Exemple #22
0
    def test_log_prob_with_missings(self,
                                    ndim: int = 1,
                                    num_groups: int = 1,
                                    num_times: int = 5):
        data = torch.randn((num_groups, num_times, ndim))
        mask = torch.randn_like(data) > 1.
        while mask.all() or not mask.any():
            mask = torch.randn_like(data) > 1.
        data[mask.nonzero(as_tuple=True)] = float('nan')
        kf = KalmanFilter(processes=[
            LocalTrend(id=f'lm{i}', measure=str(i)) for i in range(ndim)
        ],
                          measures=[str(i) for i in range(ndim)])
        pred = kf(data)
        lp_method1 = pred.log_prob(data)
        lp_method1_sum = lp_method1.sum().item()

        lp_method2_sum = 0
        for g in range(num_groups):
            data_g = data[[g]]
            pred_g = kf(data_g)
            for t in range(num_times):
                pred_gt = pred_g[:, [t]]
                data_gt = data_g[:, [t]]
                isvalid_gt = ~torch.isnan(data_gt).squeeze(0).squeeze(0)
                if not isvalid_gt.any():
                    continue
                if isvalid_gt.all():
                    lp_gt = kf.kf_step.log_prob(data_gt, *pred_gt).item()
                else:
                    pred_gtm = pred_gt.observe(
                        state_means=pred_gt.state_means,
                        state_covs=pred_gt.state_covs,
                        R=pred_gt.R[..., isvalid_gt, :][..., isvalid_gt],
                        H=pred_gt.H[..., isvalid_gt, :])
                    lp_gt = kf.kf_step.log_prob(data_gt[..., isvalid_gt],
                                                *pred_gtm).item()
                self.assertAlmostEqual(lp_method1[g, t].item(),
                                       lp_gt,
                                       places=4)
                lp_method2_sum += lp_gt
        self.assertAlmostEqual(lp_method1_sum, lp_method2_sum, places=3)
Exemple #23
0
processes = []
for measure in measures_pp:
    processes.extend([
        LocalTrend(id=f'{measure}_trend', multi=.01).add_measure(measure),
        LocalLevel(id=f'{measure}_local_level',
                   decay=(.90, 1.00)).add_measure(measure),
        FourierSeason(id=f'{measure}_day_in_year',
                      seasonal_period=365.25 / 7.,
                      dt_unit='W',
                      K=2,
                      fixed=True).add_measure(measure)
    ])
kf_first = KalmanFilter(measures=measures_pp,
                        processes=processes,
                        measure_var_predict=('seasonal',
                                             dict(K=2,
                                                  period='yearly',
                                                  dt_unit='W')))

# Here we're showing off a few useful features of `torch-kalman`:
#
# - We are training on a multivarite time-series: that is, our time-series has two measures (SO2 and PM10) and our model will capture correlations across these.
# - We are going to train on, and predictor for, multiple time-serieses (i.e. multiple stations) at once.
# - We are allowing the amount of noise in the measure (i.e., the measure variance) to vary with the seasons, by passing 'seasonal' alias to `measure_var_predict`. (The `measure_var_predict` argument takes any `torch.nn.Module` that can be used for prediction, but 'seasonal' is an alias that tells the `KalmanFilter` to use a seasonal NN.)
#
# #### Train our Model
#
# When we call our KalmanFilter, we get predictions (a `StateBeliefOverTime`) which come with a mean and covariance, and so can be evaluated against the actual data using a (negative) log-probability critierion.

# +
kf_first.opt = LBFGS(kf_first.parameters(), lr=.20, max_eval=10)
Exemple #24
0
    def test_equations(self, n_step: int):
        data = torch.tensor([[-5., 5., 1., 0., 3.]]).unsqueeze(-1)
        num_times = data.shape[1]

        # make torch kf:
        torch_kf = KalmanFilter(processes=[
            LocalTrend(id='lt',
                       decay_velocity=None,
                       measure='y',
                       velocity_multi=1.)
        ],
                                measures=['y'],
                                compiled=n_step > 0)
        expectedF = torch.tensor([[1., 1.], [0., 1.]])
        expectedH = torch.tensor([[1., 0.]])
        _kwargs = torch_kf._parse_design_kwargs(input=data,
                                                out_timesteps=num_times)
        init_mean_kwargs = _kwargs.pop('init_mean_kwargs')
        design_kwargs = torch_kf.script_module._get_design_kwargs_for_time(
            time=0, **_kwargs)
        F, H, Q, R = torch_kf.script_module.get_design_mats(
            num_groups=1, design_kwargs=design_kwargs, cache={})
        assert torch.isclose(expectedF, F).all()
        assert torch.isclose(expectedH, H).all()

        # make filterpy kf:
        filter_kf = filterpy_KalmanFilter(dim_x=2, dim_z=1)
        filter_kf.x, filter_kf.P = torch_kf.script_module.get_initial_state(
            data, init_mean_kwargs, {}, measure_cov=R)
        filter_kf.x = filter_kf.x.detach().numpy().T
        filter_kf.P = filter_kf.P.detach().numpy().squeeze(0)
        filter_kf.Q = Q.numpy().squeeze(0)
        filter_kf.R = R.numpy().squeeze(0)
        filter_kf.F = F.numpy().squeeze(0)
        filter_kf.H = H.numpy().squeeze(0)

        # compare:
        if n_step == 0:
            with self.assertRaises(AssertionError):
                torch_kf(data, n_step=n_step)
            return
        else:
            sb = torch_kf(data, n_step=n_step)

        #
        filter_kf.state_means = []
        filter_kf.state_covs = []
        for t in range(num_times):
            if t >= n_step:
                filter_kf.update(data[:, t - n_step, :])
                # 1step:
                filter_kf.predict()
            # n_step:
            filter_kf_copy = copy.deepcopy(filter_kf)
            for i in range(1, n_step):
                filter_kf_copy.predict()
            filter_kf.state_means.append(filter_kf_copy.x)
            filter_kf.state_covs.append(filter_kf_copy.P)

        assert np.isclose(sb.state_means.numpy().squeeze(),
                          np.stack(filter_kf.state_means).squeeze(),
                          rtol=1e-4).all()
        assert np.isclose(sb.state_covs.numpy().squeeze(),
                          np.stack(filter_kf.state_covs).squeeze(),
                          rtol=1e-4).all()
Exemple #25
0
 def test_nn(self):
     y = torch.zeros((2, 5, 1))
     proc = NN(id='nn',
               nn=nn.Linear(in_features=10, out_features=2, bias=False))
     kf = KalmanFilter(processes=[proc], measures=['y'])