Example #1
0
    def simulate_trajectories(
            self,
            design_for_batch: Design,
            progress: bool = False,
            eps: Optional[Tensor] = None,
            ntry_diag_incr: int = 1000,
            compute_measurements: bool = True) -> 'StateBeliefOverTime':

        progress = progress or identity
        if progress is True:
            progress = tqdm
        times = progress(range(design_for_batch.num_timesteps))

        state = self.copy()
        states = []
        for t in times:
            if t > 0:
                # move sim forward one step:
                state = state.predict(F=design_for_batch.F(t - 1),
                                      Q=design_for_batch.Q(t - 1))

            # realize the state:
            state._realize(ntry=ntry_diag_incr,
                           eps=eps[:, t, :] if eps is not None else None)

            # measure the state:
            if compute_measurements:
                state.compute_measurement(H=design_for_batch.H(t),
                                          R=design_for_batch.R(t))

            states.append(state)

        return type(self).concatenate_over_time(state_beliefs=states,
                                                design=design_for_batch)
    def test_discrete_seasons(self):
        # test seasons without durations
        season = Season(id='day_of_week',
                        seasonal_period=7,
                        season_duration=1,
                        season_start='2018-01-01',
                        dt_unit='D')
        season.add_measure('measure')

        # need to include start_datetimes since included above
        with self.assertRaises(ValueError) as cm:
            season.for_batch(1, 1)
        self.assertEqual(
            cm.exception.args[0],
            'Must pass `start_datetimes` to process `day_of_week`.')

        design = Design(processes=[season], measures=['measure'])
        process_kwargs = {
            'day_of_week': {
                'start_datetimes': array([datetime64('2018-01-01')])
            }
        }
        batch_season = design.for_batch(1, 1, process_kwargs=process_kwargs)

        # test transitions manually:
        state_mean = torch.arange(0.0, 7.0)[:, None]
        state_mean[0] = -state_mean[1:].sum()
        for i in range(10):
            state_mean_last = state_mean
        state_mean = torch.mm(batch_season.F(0)[0], state_mean)
        self.assertTrue((state_mean[1:] == state_mean_last[:-1]).all())

        self.assertListEqual(
            batch_season.H(0)[0].tolist(),
            [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
Example #3
0
    def test_design_h_batch_process(self):
        #
        vel_1 = LocalTrend(id='vel_1')
        vel_1.add_measure('measure_1')

        vel_2 = LocalTrend(id='vel_2')
        vel_2.add_measure('measure_2')

        vel_common = LocalTrend(id='vel_common')
        vel_common.add_measure('measure_1')
        vel_common.ses_to_measures[('measure_1',
                                    'position')] = lambda: Tensor([1.0, 0.0])
        vel_common.add_measure('measure_2')
        vel_common.ses_to_measures[('measure_2',
                                    'position')] = lambda: Tensor([0.0, 1.0])

        design = Design(processes=[vel_1, vel_2, vel_common],
                        measures=['measure_1', 'measure_2'])
        batch_design = design.for_batch(num_groups=2, num_timesteps=1)

        design_H = batch_design.H(0)

        self.assertListEqual(list1=design_H[0].tolist(),
                             list2=[[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
                                    [0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])

        self.assertListEqual(list1=design_H[1].tolist(),
                             list2=[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                                    [0.0, 0.0, 1.0, 0.0, 1.0, 0.0]])
Example #4
0
    def test_fourier_season2(self):
        season = FourierSeason2(id='season', seasonal_period=24, K=2, decay=False, dt_unit=None)
        season.add_measure('measure')
        design = Design(processes=[season], measures=['measure'])
        for_batch = design.for_batch(1, 24 * 2)

        positions = []
        state = torch.randn(5)
        for i in range(for_batch.num_timesteps):
            state = for_batch.F(i)[0].matmul(state)
            positions.append(round(state[-1].item() * 100) / 100.)

        self.assertListEqual(positions[0:24], positions[-24:])
Example #5
0
    def test_tbats_season(self):
        K = 3
        season = TBATS(id='season', seasonal_period=24, K=K, decay=False, dt_unit=None)
        season.add_measure('measure')
        design = Design(processes=[season], measures=['measure'])
        for_batch = design.for_batch(1, 24 * 7)

        positions = []
        state = torch.randn(int(K * 2))
        for i in range(for_batch.num_timesteps):
            state = for_batch.F(i)[0].matmul(state)
            pos = for_batch.H(i)[0].matmul(state)
            positions.append(round(pos.item() * 100) / 100.)

        self.assertListEqual(positions[0:24], positions[-24:])
Example #6
0
    def test_design_attrs(self):
        with self.assertRaises(ValueError) as cm:
            Design(measures=['same', 'same'], processes=[LocalTrend('test')])
        self.assertIn("Duplicates", cm.exception.args[0])

        with self.assertRaises(ValueError) as cm:
            Design(processes=[LocalTrend(id='same'),
                              LocalTrend(id='same')],
                   measures=['test'])
        self.assertEqual(cm.exception.args[0], "Duplicate process-ids: same.")

        with self.assertRaises(ValueError) as cm:
            Design(processes=[LocalTrend(id='1')], measures=['1'])
        self.assertIn(
            "The following `measures` are not in any of the `processes`:\n{'1'}",
            cm.exception.args[0])
def simple_mv_velocity_design(dims=2):
    processes, measures = [], []
    for i in range(dims):
        process = LocalTrend(id=str(i), decay_velocity=False)
        measure = str(i)
        process.add_measure(measure=measure)
        processes.append(process)
        measures.append(measure)
    return Design(processes=processes, measures=measures)
Example #8
0
    def test_velocity(self):
        # no decay:
        lt = LocalTrend(id='test', decay_velocity=False)
        lt.add_measure('measure')
        design = Design(processes=[lt], measures=['measure'])
        batch_vel = design.for_batch(2, 1)

        # check F:
        self.assertListEqual(list1=batch_vel.F(0)[0].tolist(), list2=[[1., 1.], [0., 1.]])
        state_mean = Tensor([[1.], [-.5]])
        for i in range(3):
            state_mean = torch.mm(batch_vel.F(0)[0], state_mean)
            self.assertEqual(state_mean[0].item(), 1 - .5 * (i + 1.))
            self.assertEqual(state_mean[1].item(), -.5)

        # with decay:
        lt = LocalTrend(id='test', decay_velocity=(.50, 1.00))
        lt.add_measure('measure')
        design = Design(processes=[lt], measures=['measure'])
        batch_vel = design.for_batch(2, 1)
        self.assertLess(batch_vel.F(0)[0][1, 1], 1.0)
        self.assertGreater(batch_vel.F(0)[0][1, 1], 0.5)
        decay = design.processes['test'].decayed_transitions['velocity'].get_value()
        self.assertEqual(decay, batch_vel.F(0)[0][1, 1])

        state_mean = Tensor([[0.], [1.0]])
        for i in range(3):
            state_mean = torch.mm(batch_vel.F(0)[0], state_mean)
            self.assertEqual(decay ** (i + 1), state_mean[1].item())
Example #9
0
    def test_design_h_batch_process(self):
        #
        vel_1 = LocalTrend(id='vel_1')
        vel_1.add_measure('measure_1')

        vel_2 = LocalTrend(id='vel_2')
        vel_2.add_measure('measure_2')

        vel_common = LocalTrend(id='vel_common')
        vel_common.add_measure('measure_1')
        vel_common._set_measure('measure_1', 'position', 1., force=True)
        vel_common.add_measure('measure_2')
        vel_common._set_measure('measure_2', 'position', 5., force=True)

        design = Design(processes=[vel_1, vel_2, vel_common],
                        measures=['measure_1', 'measure_2'])
        batch_design = design.for_batch(num_groups=1, num_timesteps=1)

        design_H = batch_design.H(0)

        self.assertListEqual(list1=design_H[0].tolist(),
                             list2=[[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
                                    [0.0, 0.0, 1.0, 0.0, 5.0, 0.0]])
Example #10
0
    def test_discrete_seasons(self):
        # test seasons without durations
        season = Season(
            id='day_of_week', seasonal_period=7, season_duration=1, dt_unit='D'
        )
        season.add_measure('measure')

        # need to include start_datetimes since included above
        with self.assertRaises(TypeError) as cm:
            season.for_batch(1, 1)
        self.assertIn('Missing argument `start_datetimes`', cm.exception.args[0])

        design = Design(processes=[season], measures=['measure'])
        batch_season = design.for_batch(1, 1, start_datetimes=np.array([np.datetime64('2018-01-01')]))

        # test transitions manually:
        state_mean = torch.arange(0.0, 7.0)[:, None]
        state_mean[0] = -state_mean[1:].sum()
        for i in range(10):
            state_mean_last = state_mean
        state_mean = torch.mm(batch_season.F(0)[0], state_mean)
        self.assertTrue((state_mean[1:] == state_mean_last[:-1]).all())

        self.assertListEqual(batch_season.H(0)[0].tolist(), [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
Example #11
0
 def _init_design(self, *args, **kwargs) -> None:
     self.design = Design(*args, **kwargs)
Example #12
0
class KalmanFilter(torch.nn.Module):
    def __init__(self,
                 measures: Sequence[str],
                 processes: Sequence[Process],
                 device: Optional[torch.device] = None,
                 **kwargs):

        super().__init__()
        self.design: Design = None
        self._init_design(measures=measures,
                          processes=processes,
                          device=device,
                          **kwargs)

        # parameters from design:
        self.design_parameters = ParameterList()
        for param in self.design.parameters():
            self.design_parameters.append(param)

        # the StateBelief family, implemented by property (default gaussian)
        self._family = None

        self.to(device=self.design.device)

    def _init_design(self, *args, **kwargs) -> None:
        self.design = Design(*args, **kwargs)

    @property
    def measure_size(self) -> int:
        return self.design.measure_size

    @property
    def family(self) -> TypeVar('Gaussian'):
        if self._family is None:
            self._family = Gaussian
        return self._family

    def predict_initial_state(self,
                              design_for_batch: DesignForBatch) -> 'Gaussian':
        return self.family(
            means=design_for_batch.initial_mean,
            covs=design_for_batch.initial_covariance,
            # we consider this a one-step-ahead prediction, so last measured one step ago:
            last_measured=torch.ones(design_for_batch.num_groups,
                                     dtype=torch.int))

    def design_for_batch(self, num_groups: int, num_timesteps: int,
                         **kwargs) -> DesignForBatch:
        return self.design.for_batch(num_groups=num_groups,
                                     num_timesteps=num_timesteps,
                                     **kwargs)

    # noinspection PyShadowingBuiltins
    def forward(self,
                input: Tensor,
                initial_state: Optional[StateBelief] = None,
                progress: Union[tqdm, bool] = False,
                **kwargs) -> StateBeliefOverTime:
        """
        :param input: The multivariate time-series to be fit by the kalman-filter. A Tensor where the first dimension
        represents the groups, the second dimension represents the time-points, and the third dimension represents the
        measures.
        :param initial_state: If a StateBelief, this is used as the prediction for time=0; if None then each process
        generates initial values.
        :param progress: Should progress-bar be generated?
        :param kwargs: Other kwargs that will be passed to the `design_for_batch` method.
        :return: A StateBeliefOverTime consisting of one-step-ahead predictions.
        """

        num_groups, num_timesteps, num_measures = input.shape
        if num_measures != self.measure_size:
            raise ValueError(
                f"This KalmanFilter has {self.measure_size} measurement-dimensions; but the input shape is "
                f"{(num_groups, num_timesteps, num_measures)} (last dim should == measure-size)."
            )

        design_for_batch = self.design_for_batch(num_groups=num_groups,
                                                 num_timesteps=num_timesteps,
                                                 **kwargs)

        # initial state of the system:
        if initial_state is None:
            state_prediction = self.predict_initial_state(design_for_batch)
        else:
            state_prediction = initial_state

        progress = progress or identity
        if progress is True:
            progress = tqdm
        iterator = progress(range(num_timesteps))

        # generate one-step-ahead predictions:
        state_predictions = []
        for t in iterator:
            if t > 0:
                # take state-prediction of previous t (now t-1), correct it according to what was actually measured at at t-1
                state_belief = state_prediction.update(obs=input[:, t - 1, :])

                # predict the state for t, from information from t-1
                # F at t-1 is transition *from* t-1 *to* t
                F = design_for_batch.F(t - 1)
                Q = design_for_batch.Q(t - 1)
                state_prediction = state_belief.predict(F=F, Q=Q)

            # compute how state-prediction at t translates into measurement-prediction at t
            H = design_for_batch.H(t)
            R = design_for_batch.R(t)
            state_prediction.compute_measurement(H=H, R=R)

            # append to output:
            state_predictions.append(state_prediction)

        return self.family.concatenate_over_time(
            state_beliefs=state_predictions, design=self.design)

    def smooth(self, states: StateBeliefOverTime):
        raise NotImplementedError

    def simulate(self,
                 states: Union[StateBeliefOverTime, StateBelief],
                 horizon: int,
                 num_iter: int,
                 progress: bool = False,
                 from_times: Sequence[int] = None,
                 state_to_measured: Optional[Callable] = None,
                 white_noise: Optional[Tuple[Tensor, Tensor]] = None,
                 ntry_diag_incr: int = 1000,
                 **kwargs) -> List[Tensor]:

        assert horizon > 0

        # forecast-from time:
        if from_times is None:
            if isinstance(states, StateBelief):
                initial_state = states
            else:
                # a StateBeliefOverTime was passed, but no from_times, so just pick the last one
                initial_state = states.last_prediction()
        else:
            # from_times will be used to pick the slice
            initial_state = states.get_state_belief(from_times)

        initial_state = initial_state.__class__(
            means=initial_state.means.repeat((num_iter, 1)),
            covs=initial_state.covs.repeat((num_iter, 1, 1)),
            last_measured=initial_state.last_measured.repeat(num_iter))

        design_for_batch = self.design_for_batch(
            num_groups=initial_state.num_groups,
            num_timesteps=horizon,
            **kwargs)

        if white_noise is None:
            process_wn, measure_wn = None, None
        else:
            process_wn, measure_wn = white_noise
        trajectories = initial_state.simulate_state_trajectories(
            design_for_batch=design_for_batch,
            progress=progress,
            ntry_diag_incr=ntry_diag_incr,
            eps=process_wn)
        if state_to_measured is None:
            sim = trajectories.measurement_distribution.deterministic_sample(
                eps=measure_wn)
        else:
            sim = state_to_measured(trajectories)

        return torch.chunk(sim, num_iter)

    def forecast(self,
                 states: Union[StateBeliefOverTime, StateBelief],
                 horizon: int,
                 from_times: Optional[Sequence[int]] = None,
                 progress: bool = False,
                 **kwargs) -> StateBeliefOverTime:

        assert horizon > 0

        # forecast-from time:
        if from_times is None:
            if isinstance(states, StateBelief):
                state_prediction = states
            else:
                # a StateBeliefOverTime was passed, but no from_times, so just pick the last one
                state_prediction = states.last_prediction()
        else:
            # from_times will be used to pick the slice
            state_prediction = states.get_state_belief(from_times)

        design_for_batch = self.design_for_batch(
            num_groups=state_prediction.num_groups,
            num_timesteps=horizon,
            **kwargs)

        progress = progress or identity
        if progress is True:
            progress = tqdm
        iterator = progress(range(design_for_batch.num_timesteps))

        forecasts = []
        for t in iterator:
            if t > 0:
                # predict the state for t, from information from t-1
                # F at t-1 is transition *from* t-1 *to* t
                F = design_for_batch.F(t - 1)
                Q = design_for_batch.Q(t - 1)
                state_prediction = state_prediction.predict(F=F, Q=Q)

            # compute how state-prediction at t translates into measurement-prediction at t
            H = design_for_batch.H(t)
            R = design_for_batch.R(t)
            state_prediction.compute_measurement(H=H, R=R)

            # append to output:
            forecasts.append(state_prediction)

        return self.family.concatenate_over_time(state_beliefs=forecasts,
                                                 design=self.design)