示例#1
0
    def test_Filters(self):
        for model in [self.model, self.mvnmodel]:
            x, y = model.sample(500)

            for filter_, props in [(SISR, {'particles': 500}), (APF, {'particles': 500}), (UKF, {})]:
                filt = filter_(model, **props).initialize()

                filt = filt.longfilter(y)

                assert len(filt.s_mx) > 0

                filtmeans = filt.filtermeans.numpy()

                # ===== Run Kalman ===== #
                if model is self.model:
                    kf = pykalman.KalmanFilter(transition_matrices=1., observation_matrices=1.)
                else:
                    kf = pykalman.KalmanFilter(transition_matrices=[[0.5, 1 / 3], [0, 1.]], observation_matrices=[1, 2])

                filterestimates = kf.filter(y.numpy())

                if filtmeans.ndim < 2:
                    filtmeans = filtmeans[:, None]

                rel_error = np.median(np.abs((filtmeans - filterestimates[0]) / filterestimates[0]))

                ll = kf.loglikelihood(y.numpy())

                rel_ll_error = np.abs((ll - np.array(filt.s_ll).sum()) / ll)

                assert rel_error < 0.05 and rel_ll_error < 0.05
示例#2
0
    def test_Filters(self):
        for model in [self.model, self.mvnmodel]:
            x, y = model.sample_path(500)

            for filter_type, props in [
                (SISR, {"particles": 500}),
                (APF, {"particles": 500}),
                (UKF, {}),
                (SISR, {"particles": 50, "proposal": prop.Unscented()}),
            ]:
                filt = filter_type(model, **props)
                result = filt.longfilter(y, record_states=True)

                filtmeans = result.filter_means.numpy()

                # ===== Run Kalman ===== #
                if model is self.model:
                    kf = pykalman.KalmanFilter(transition_matrices=1.0, observation_matrices=1.0)
                else:
                    kf = pykalman.KalmanFilter(
                        transition_matrices=[[0.5, 1 / 3], [0, 1.0]], observation_matrices=[1, 2]
                    )

                f_mean, _ = kf.filter(y.numpy())

                if model.hidden_ndim < 1 and not isinstance(filt, UKF):
                    f_mean = f_mean[:, 0]

                rel_error = np.median(np.abs((filtmeans - f_mean) / f_mean))

                ll = kf.loglikelihood(y.numpy())
                rel_ll_error = np.abs((ll - result.loglikelihood.numpy()) / ll)

                assert rel_error < 0.05 and rel_ll_error < 0.05
示例#3
0
    def __init__(self, init_state: Optional[np.array] = None):
        """
        Create a new Kalman Filter.

        :param init_state: the initial state of the object to track. Will be the zero vector if set to None.
        """

        object.__init__(self)

        self.__state = init_state if init_state is not None else np.zeros(
            shape=(4, ))
        self.__covar = np.eye(4) * 1e-3
        self.__transition_covar = np.eye(4) * 1e-4
        self.__observation_covar = np.eye(2) * 1e-1 + np.random.randn(2,
                                                                      2) * 1e-1

        transition_matrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0],
                                      [0, 0, 0, 1]])
        observation_matrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])

        self.__internal_filter = pykalman.KalmanFilter(
            transition_matrices=transition_matrix,
            observation_matrices=observation_matrix,
            initial_state_mean=self.__state,
            initial_state_covariance=self.__covar,
            transition_covariance=self.__transition_covar,
            observation_covariance=self.__observation_covar)
示例#4
0
def _train_predict_dlm(args):
    """
    Helper function to train and predict the dynamic linear model for a train and test set. Seperated from the main
    class to enable the use of the multiprocessing module. This should not be called directly.
    """
    delta, X, y, ntrain, loss = args
    print delta
    dlm = DynamicLinearModel(include_constant=False)

    # first fit using the training data
    dlm.fit(X[:ntrain], y[:ntrain], delta=delta, method='filter')

    # now run the filter on the whole data set
    ntime, pfeat = X.shape
    observation_matrix = X.reshape((ntime, 1, pfeat))
    k = dlm.kalman
    kalman = pykalman.KalmanFilter(
        transition_matrices=k.transition_matrices,
        observation_matrices=observation_matrix,
        observation_offsets=k.observation_offsets,
        transition_offsets=k.transition_offsets,
        observation_covariance=k.observation_covariance,
        transition_covariance=k.transition_covariance,
        initial_state_mean=k.initial_state_mean,
        initial_state_covariance=k.initial_state_covariance)

    beta, bcov = kalman.filter(y)

    # predict the y-values in the test set
    yfit = np.sum(beta[ntrain - 1:-1] * X[ntrain - 1:-1], axis=1)

    test_error = loss(y[ntrain:], yfit)

    return test_error
    def kpredict(self, performance_df):
        """

        :param performance_df:
        :return:
        """

        transition_matrices = np.array(1)
        observation_matrices = np.array(1)
        transition_covariance = None
        observation_covariance = None
        transition_offsets = None
        observation_offsets = None
        initial_state_mean = None
        initial_state_covariance = None
        random_state = None
        em_vars = [
            'transition_covariance', 'observation_covariance',
            'initial_state_mean', 'initial_state_covariance'
        ]

        n_dim_state = None
        n_dim_obs = None
        kf = pykalman.KalmanFilter(transition_matrices, observation_matrices,
                                   transition_covariance,
                                   observation_covariance, transition_offsets,
                                   observation_offsets, initial_state_mean,
                                   initial_state_covariance, random_state,
                                   em_vars, n_dim_state, n_dim_obs)
        mu, sigma = kf.filter(performance_df.values)
        next_filtered_state_mean, next_filtered_state_covariance = kf.filter_update(
            mu[-1], sigma[-1], performance_df.values[-1])

        return np.sign(next_filtered_state_mean - performance_df.values[-1])
    def kfilter(self, df):
        transition_matrices = 1
        observation_matrices = 1
        transition_covariance = None
        observation_covariance = None
        transition_offsets = None
        observation_offsets = None
        initial_state_mean = None
        initial_state_covariance = None
        random_state = None
        em_vars = [
            'transition_covariance', 'observation_covariance',
            'initial_state_mean', 'initial_state_covariance'
        ]

        n_dim_state = None
        n_dim_obs = None
        kf = pykalman.KalmanFilter(transition_matrices, observation_matrices,
                                   transition_covariance,
                                   observation_covariance, transition_offsets,
                                   observation_offsets, initial_state_mean,
                                   initial_state_covariance, random_state,
                                   em_vars, n_dim_state, n_dim_obs)

        df_mu, df_sigma = kf.filter(df.values)

        return DataFrame(df_mu, index=df.index, columns=df.columns)
示例#7
0
 def smooth2(self, x: Feature, smoother: dict):
     if smoother['type'].startswith("gauss"):
         import scipy.signal
         sigma = smoother['sigma_ms'] / x.frame_shift_ms
         if sigma < 1:
             return x
         cutoff = sigma * smoother['cutoff_sigma']
         # 4 x sigma contains 99.9% of values
         window = scipy.signal.gaussian(int(round(sigma * 2 * 4)), sigma).astype(np.float32)
         # cut off only on left side (after convolution this is the future)
         window = window[int(np.round(len(window) / 2 - cutoff)):]
         window = window / sum(window)
         x = Feature(np.array([scipy.signal.convolve(row, window)[:row.size] for row in x.T]).T, infofrom=x)
     elif smoother['type'].startswith("exponential"):
         factor = smoother['factor']
         facs = (factor * (1 - factor) ** np.arange(1000, dtype=np.float32))
         x = Feature(np.array([np.convolve(row, facs, mode='full')[:row.size] for row in x.T]).T, infofrom=x)
     elif smoother['type'] == 'kalman':
         # todo
         import pykalman
         kf = pykalman.KalmanFilter(n_dim_obs=x.shape[1])
         res, _ = kf.filter(x)  # features.gaussian_blur(x, 300)
         return None  # NumFeature(res.astype(np.float32))
     else:
         raise Exception(f"unknown method {smoother['type']}")
     return x
示例#8
0
def run_kalman(path, mu_0, sigma_0, A, C, sigma_a, sigma_c, frame_time=2000):

    kalman_filter = pykalman.KalmanFilter(transition_matrices=A,
                                          observation_matrices=C,
                                          transition_covariance=sigma_a,
                                          observation_covariance=sigma_c,
                                          initial_state_mean=mu_0,
                                          initial_state_covariance=sigma_0)

    src = tkanvas.TKanvas(draw_fn=kalman_draw, frame_time=frame_time)
    mean, cov = mu_0, sigma_0

    obs = path[0]
    # initial update
    new_mean, new_cov = kalman_filter.filter_update(mean, cov, observation=obs)
    src.mean = mean
    src.cov = cov
    src.new_mean = new_mean
    src.new_cov = new_cov
    src.A = A
    src.C = C
    src.sigma_a = sigma_a
    src.sigma_c = sigma_c
    src.kalman_filter = kalman_filter
    src.track = True
    src.obs_path = []
    src.track_path = []
    src.obs = obs
    src.path = path
    src.time_step = 1
    src.kalman_iter = draw_kalman_filter(src)
示例#9
0
def kalman_filter_em2(foh, fhv, fvh, o_array, v_array):

    trans_mat = get_state_transition_matrix2(foh, fhv, fvh)
    obser_mat = get_observation_matrix2()

    o_array_var = numpy.diff(o_array).var()
    v_array_var = numpy.diff(v_array).var()

    obser_cov = numpy.cov(numpy.array([o_array, v_array]))

    init_state = numpy.zeros((trans_mat.shape[0], ))
    init_state_cov = numpy.diag(numpy.ones((trans_mat.shape[0], )))

    kalman = pykalman.KalmanFilter(
        transition_matrices=trans_mat,
        observation_matrices=obser_mat,
        initial_state_mean=init_state,
        initial_state_covariance=init_state_cov,
        observation_covariance=obser_cov,
        em_vars=['transition_covariance'],
    )

    n = o_array.shape[0]
    data = numpy.zeros((n, 2))
    data[:, 0] = o_array
    data[:, 1] = v_array
    state_filt, state_cov = kalman.em(data).smooth(data)
示例#10
0
def kalman(measurements):
    translation = []
    orientation = []
    magnetometer = []
    for measurement in measurements:
        translation.append([measurement.translation.x, measurement.translation.y, measurement.translation.z])
        orientation.append([measurement.orientation.roll, measurement.orientation.pitch, measurement.orientation.yaw])
        magnetometer.append([measurement.orientation.roll, measurement.orientation.pitch, measurement.orientation.yaw])

    k_filter = pykalman.KalmanFilter(transition_matrices=[[1, 1, 1], [0, 1, 1], [1, 1, 1]],
                                     observation_matrices=[[0.1, 0.5, 0.0], [-0.3, 0.0, 0.0], [1, 1, 1]])
    filtered_translation = k_filter.em(translation, n_iter=5)
    filtered_orientation = k_filter.em(np.asarray(orientation), n_iter=5)
    filtered_magnetometer = k_filter.em(np.asarray(magnetometer), n_iter=5)
    (smoothed_translation_means, smoothed_translation_covariance) = filtered_translation.smooth(translation)
    (smoothed_orientation_means, smoothed_orientation_covariance) = filtered_orientation.smooth(orientation)
    (smoothed_magnetometer_means, smoothed_magnetometer_covariance) = filtered_magnetometer.smooth(magnetometer)

    msg = ninedof()
    msg.translation.x = smoothed_translation_means[4][0]
    msg.translation.y = smoothed_translation_means[4][1]
    msg.translation.z = smoothed_translation_means[4][2]

    msg.orientation.roll = smoothed_orientation_means[4][0]
    msg.orientation.pitch = smoothed_orientation_means[4][1]
    msg.orientation.yaw = smoothed_orientation_means[4][2]

    msg.magnetometer.roll = smoothed_magnetometer_means[4][0]
    msg.magnetometer.pitch = smoothed_magnetometer_means[4][1]
    msg.magnetometer.yaw = smoothed_magnetometer_means[4][2]

    global Publisher
    Publisher.publish(msg)
示例#11
0
    def __init__(self, p, w_range=1.0, max_x=1.0):
        super(ArPredicter, self).__init__()
        self.p = p
        self.min_ob = self.p + 1
        self.max_x = max_x
        self.state_cor = np.matrix(np.zeros((self.p, self.p)))
        self.state = np.matrix(np.zeros(self.p)).T
        '''
        self.train_matrix = [[0.0 for i in range(p)]]
        for i in range(p-1):
            new_p = [0.0 for i in range(p)]
            new_p[i] = 1
            self.train_matrix.append(new_p)
        '''
        self.train_matrix = np.matrix(np.zeros((p, p)))
        self.ob_matrix = np.matrix(np.zeros(p))
        self.ob_matrix[0, 0] = 1.0
        self.em_vars = [
            "transition_matrices",
            "transition_covariance",
        ]

        self.kalman = pykalman.KalmanFilter(
            em_vars=[
                "transition_matrices",
                "transition_covariance",
            ],
            transition_matrices=self.train_matrix)
    def LogLikelihoodWrapper(parameters, y_data, x_data, stage, lambda_g,
                             lambda_z, xi_00, P_00):

        if stage == 1:
            stage, A, H, R, Q, F, x_data, y_data = Rstar.UnpackStage1(
                parameters, y_data, x_data)

        elif stage == 2:
            stage, A, H, R, Q, F = Rstar.UnpackStage2(parameters, lambda_g)

        elif stage == 3:
            stage, A, H, R, Q, F = Rstar.UnpackStage3(parameters, lambda_g,
                                                      lambda_z)

        else:
            pass  # PROGRAM SHOULD STOP HERE

        exogenous_variables = (A.dot(x_data.T)).transpose()

        loglike = pykalman.KalmanFilter(
            transition_matrices=F,
            transition_offsets=np.zeros((xi_00.shape[0])),
            transition_covariance=Q,
            observation_matrices=H,
            observation_offsets=exogenous_variables,
            observation_covariance=R,
            initial_state_mean=xi_00.reshape(xi_00.shape[0]),
            initial_state_covariance=P_00).loglikelihood(y_data)

        return loglike
示例#13
0
def kalman_filter_sm(foh, fhv, fvh, o_array, v_array, smooth_param=300.0):

    trans_mat = get_state_transition_matrix(foh, fhv, fvh)
    obser_mat = get_observation_matrix()

    trans_cov = 1.0 * numpy.diag(numpy.ones((trans_mat.shape[0], )))
    # DEVEL
    # -------------------------------------------
    for i in range(1, trans_mat.shape[0]):
        trans_cov[i] = 0.0
    # -------------------------------------------
    obser_cov = smooth_param * numpy.diag(numpy.ones((obser_mat.shape[0], )))

    init_state = numpy.zeros((trans_mat.shape[0], ))
    init_state_cov = numpy.diag(numpy.ones((trans_mat.shape[0], )))

    kalman = pykalman.KalmanFilter(transition_matrices=trans_mat,
                                   observation_matrices=obser_mat,
                                   transition_covariance=trans_cov,
                                   observation_covariance=obser_cov,
                                   initial_state_mean=init_state,
                                   initial_state_covariance=init_state_cov)

    n = o_array.shape[0]
    data = numpy.zeros((n, 2))
    data[:, 0] = o_array
    data[:, 1] = v_array
    state_filt, state_cov = kalman.smooth(data)

    return extract_kalman_data(foh, state_filt, state_cov)
示例#14
0
def predict(paths, predict_all=False):
    multimodal_outputs = {}
    neighbours_tracks = []

    ## Primary Prediction
    if not predict_all:
        paths = paths[0:1]

    for i, path in enumerate(paths):
        path = paths[i]
        initial_state_mean = [path[0].x, 0, path[0].y, 0]

        transition_matrix = [[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1],
                             [0, 0, 0, 1]]

        observation_matrix = [[1, 0, 0, 0], [0, 0, 1, 0]]

        kf = pykalman.KalmanFilter(transition_matrices=transition_matrix,
                                   observation_matrices=observation_matrix,
                                   transition_covariance=1e-5 * np.eye(4),
                                   observation_covariance=0.05**2 * np.eye(2),
                                   initial_state_mean=initial_state_mean)
        # kf.em([(r.x, r.y) for r in path[:9]], em_vars=['transition_matrices',
        #                                                'observation_matrices'])
        kf.em([(r.x, r.y) for r in path[:9]])
        observed_states, _ = kf.smooth([(r.x, r.y) for r in path[:9]])

        # prepare predictions
        frame_diff = path[1].frame - path[0].frame
        first_frame = path[8].frame + frame_diff
        ped_id = path[8].pedestrian

        # sample predictions (first sample corresponds to last state)
        # average 5 sampled predictions
        predictions = None
        for _ in range(5):
            _, pred = kf.sample(12 + 1, initial_state=observed_states[-1])
            if predictions is None:
                predictions = pred
            else:
                predictions += pred
        predictions /= 5.0

        #write
        if i == 0:
            primary_track = [
                trajnettools.TrackRow(first_frame + j * frame_diff, ped_id, x,
                                      y)
                for j, (x, y) in enumerate(predictions[1:])
            ]
        else:
            neighbours_tracks.append([
                trajnettools.TrackRow(first_frame + j * frame_diff, ped_id, x,
                                      y)
                for j, (x, y) in enumerate(predictions[1:])
            ])

    ## Unimodal Ouput
    multimodal_outputs[0] = primary_track, neighbours_tracks
    return multimodal_outputs
示例#15
0
def run_kf_smooth(A, H, Q, R, Z, x_hat_0, P_0):
    kf = pykalman.KalmanFilter(transition_matrices=A,
                               observation_matrices=H,
                               transition_covariance=Q,
                               observation_covariance=R,
                               initial_state_mean=x_hat_0,
                               initial_state_covariance=P_0)
    kf.smooth(Z)
    smooth_means, smooth_covs = kf.smooth(Z)
    return smooth_means
示例#16
0
def get_data(transition_matrix,
             observation_matrix,
             transition_covariance,
             observation_covariance,
             T=100,
             random_state=None):
    if random_state is None:
        random_state = np.random.RandomState()
    kf = pykalman.KalmanFilter(transition_matrix, observation_matrix,
                               transition_covariance, observation_covariance)
    sample = kf.sample(T, random_state=random_state)
    return sample[1].data.astype(np.float32), kf
示例#17
0
 def __init__(self, p, w_range=1.0, max_x=1.0):
     super(ArPredicter, self).__init__()
     self.p = p
     self.filter_state_mean = [0 for i in range(self.p)]
     self.row = [0 for i in range(self.p)]
     self.filter_state_covariance = filtered_state_covariance = np.array([copy.deepcopy(self.row) for i in range(self.p)])
     for index, row in enumerate(self.filter_state_covariance):
         row[index] = 99999999.0
     self.filter_state_covariance = 1000000000.0
     self.w_range = w_range
     self.kalman = pykalman.KalmanFilter(em_vars=['initial_state_mean','initial_state_covariance'])
     self.min_ob = self.p
     self.max_x = max_x
示例#18
0
def lgssm_true_posterior(observations, initial_loc, initial_scale,
                         transition_mult, transition_bias, transition_scale,
                         emission_mult, emission_bias, emission_scale):
    kf = pykalman.KalmanFilter(initial_state_mean=[initial_loc],
                               initial_state_covariance=[[initial_scale**2]],
                               transition_matrices=[[transition_mult]],
                               transition_offsets=[transition_bias],
                               transition_covariance=[[transition_scale**2]],
                               observation_matrices=[[emission_mult]],
                               observation_offsets=[emission_bias],
                               observation_covariance=[[emission_scale**2]])

    return kf.smooth(observations)
示例#19
0
    def _kalman_filter(self, trajectory):
        """
		smooth the  time series data with Kalman filter
		:param trajectory: numpy-array
		:return: numpy-array, smoothed time-series data
		"""
        time_step, dim = trajectory.shape[:2]

        self.kf = pykalman.KalmanFilter(n_dim_obs=dim,
                                        n_dim_state=dim,
                                        initial_state_mean=trajectory[0])
        state_mean, state_covariance = self.kf.filter(trajectory)
        filt_traj = state_mean
        return filt_traj
示例#20
0
def get_data(transition_matrix,
             observation_matrix,
             transition_covariance,
             observation_covariance,
             T=100,
             random_state=None):
    if random_state is None:
        random_state = np.random.RandomState()
    kf = pykalman.KalmanFilter(transition_matrix, observation_matrix,
                               transition_covariance, observation_covariance)
    sample = kf.sample(T, random_state=random_state)
    data = sample[1].data.astype(np.float32)

    return data.reshape(T, 1, 1, -1), kf_loglikelihood(kf, data)
示例#21
0
 def __init__(self, include_constant=True):
     """
     Constructor for linear regression model with dynamic coefficients.
     """
     self.delta_grid = np.zeros(10)
     self.test_grid = np.zeros(10)
     self.delta = 1e-4
     self.test_error_ = 1.0
     self.kalman = pykalman.KalmanFilter()
     self.beta = np.zeros(2)
     self.beta_cov = np.identity(2)
     self.current_beta = np.zeros(2)
     self.current_bcov = np.identity(2)
     self.include_constant = include_constant
    def KalmanStatesWrapper(parameters, y_data, x_data, stage, lambda_g,
                            lambda_z, xi_00, P_00):

        if stage == 1:
            stage, A, H, R, Q, F, x_data, y_data = Rstar.UnpackStage1(
                parameters, y_data, x_data)

        elif stage == 2:
            stage, A, H, R, Q, F = Rstar.UnpackStage2(parameters, lambda_g)

        elif stage == 3:
            stage, A, H, R, Q, F = Rstar.UnpackStage3(parameters, lambda_g,
                                                      lambda_z)

        else:
            pass  # PROGRAM SHOULD STOP HERE

        exogenous_variables = (A.dot(
            x_data.T)).transpose()  # NEED TO CHECK THE DIMENSION

        kf = pykalman.KalmanFilter(
            transition_matrices=F,
            transition_offsets=np.zeros((xi_00.shape[0])),
            transition_covariance=Q,
            observation_matrices=H,
            observation_offsets=exogenous_variables,
            observation_covariance=R,
            initial_state_mean=xi_00.reshape(xi_00.shape[0]),
            initial_state_covariance=P_00)

        # SHOULD I PUT THIS ON A DATAFRAME?
        filtered_states, filtered_cov = kf.filter(y_data)
        smoothed_states, smoothed_cov = kf.smooth(y_data)

        # If we are on the first stage, we should retrend the esimated states
        if stage == 1:
            T = filtered_states.shape[0]
            filtered_states = filtered_states + parameters[4] * np.concatenate(
                (np.arange(1, T + 1, 1).reshape(
                    (T, 1)), np.arange(0, T, 1).reshape(
                        (T, 1)), np.arange(-1, T - 1, 1).reshape((T, 1))),
                axis=1)

            smoothed_states = smoothed_states + parameters[4] * np.concatenate(
                (np.arange(1, T + 1, 1).reshape(
                    (T, 1)), np.arange(0, T, 1).reshape(
                        (T, 1)), np.arange(-1, T - 1, 1).reshape((T, 1))),
                axis=1)

        return filtered_states, filtered_cov, smoothed_states, smoothed_cov
示例#23
0
    def nextstart(self):
        self._k1 = self._dlast[0]
        self._c1 = self.p.initial_state_covariance

        self._kf = pykalman.KalmanFilter(
            transition_matrices=[1],
            observation_matrices=[1],
            observation_covariance=self.p.observation_covariance,
            transition_covariance=self.p.transition_covariance,
            initial_state_mean=self._k1,
            initial_state_covariance=self._c1,
        )

        self.next()
示例#24
0
def do_filter_kalman_2D(X):
    initial_state_mean = [X[0, 0], 0, X[0, 1], 0]

    transition_matrix = [[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1],
                         [0, 0, 0, 1]]

    observation_matrix = [[1, 0, 0, 0], [0, 0, 1, 0]]

    KF = pykalman.KalmanFilter(transition_matrices=transition_matrix,
                               observation_matrices=observation_matrix,
                               initial_state_mean=initial_state_mean)

    KF = KF.em(X, n_iter=5)
    (smoothed_state_means, smoothed_state_covariances) = KF.smooth(X)
    return smoothed_state_means[:, [0, 2]]
示例#25
0
def main():
    model = np.genfromtxt('model.csv', delimiter=',')
    initial_mean, initial_variance, transition_multiplier, transition_offset, \
        transition_variance, emission_multiplier, emission_offset, \
        emission_variance = model

    kf = pykalman.KalmanFilter(initial_state_mean=[initial_mean],
                               initial_state_covariance=[[initial_variance]],
                               transition_matrices=[[transition_multiplier]],
                               transition_offsets=[transition_offset],
                               transition_covariance=[[transition_variance]],
                               observation_matrices=[[emission_multiplier]],
                               observation_offsets=[emission_offset],
                               observation_covariance=[[emission_variance]])

    num_timesteps_list = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]

    amplitude = 4
    period = 20
    noise_variance = 0.1

    fig, axs = plt.subplots(10, 2)
    fig.set_size_inches(8, 20)
    for i, num_timesteps in enumerate(num_timesteps_list):
        _, observations = kf.sample(num_timesteps)
        observations = np.reshape(observations, (-1))
        observations_reshaped = np.reshape(observations, (1, -1))
        np.savetxt('data_{}.csv'.format(i + 1),
                   observations_reshaped,
                   delimiter=',')

        axs[i][0].set_title('Data {}'.format(i + 1))
        axs[i][0].plot(observations)

        # sin
        observations = amplitude * \
            np.sin(np.arange(num_timesteps) * 2 * np.pi / period) + \
            np.random.randn(num_timesteps) * np.sqrt(noise_variance)
        observations_reshaped = np.reshape(observations, (1, -1))
        np.savetxt('data_{}.csv'.format(i + 11),
                   observations_reshaped,
                   delimiter=',')

        axs[i][1].set_title('Data {}'.format(i + 11))
        axs[i][1].plot(observations)

    fig.tight_layout()
    fig.savefig('data.pdf', bbox_inches='tight')
示例#26
0
    def get_kalman_filter(self, x0, dt):
        try:
            ndim = x0.shape[0]
        except AttributeError:
            ndim = len(x0)

        kalman_filter = pykalman.KalmanFilter(
            transition_matrices=get_state_transition_matrix(ndim, dt),
            observation_matrices=get_observation_matrix(ndim),
            transition_covariance=get_state_transition_covariance_matrix(
                ndim, dt, self.qval),
            observation_covariance=get_observation_covariance_matrix(
                ndim, self.rval),
            initial_state_mean=get_initial_state_mean(x0),
            initial_state_covariance=get_initial_state_covariance_matrix(ndim),
        )
        return kalman_filter
def pykalmanIteration(Net, k_filter=True, k_smoother=False):
    kf = pk.KalmanFilter()
    kf.transition_matrices = Net.A_Matrix.todense()
    kf.observation_matrices = Net.H_Matrix
    kf.transition_covariance = Net.Q_Matrix.todense()
    kf.observation_covariance = Net.R_Matrix
    kf.initial_state_mean = Net.Initial_X
    kf.initial_state_covariance = Net.Initial_P

    if k_filter == True:
        Net.filtered_state_estimates, Net.filtered_state_covariances = kf.filter(
            Net.MeasurementData.T)
    if k_smoother == True:
        Net.smoothed_state_estimates, Net.smoothed_state_covariances = kf.smooth(
            Net.MeasurementData.T)

    return kf
示例#28
0
    def setUpClass(self):
        super(TestInfer, self).setUpClass()

        # Synthetic data
        self.num_timesteps = 100
        self.x = np.linspace(0, 3 * np.pi, self.num_timesteps)
        self.observations = 40 * (np.sin(self.x) +
                                  0.2 * np.random.randn(self.num_timesteps))

        kf = pykalman.KalmanFilter(
            transition_matrices=[[1]],
            transition_covariance=0.01 * np.eye(1),
        )

        # EM to fit parameters
        kf = kf.em(self.observations)

        # Inference using Kalman filter
        self.kalman_smoothed_state_means, \
            self.kalman_smoothed_state_variances = kf.smooth(self.observations)

        # Prepare state space model
        # self.batch_size = 1
        self.num_particles = 1000
        self.observations_tensor = torch.from_numpy(self.observations).\
            unsqueeze(-1).float()

        self.my_initial_distribution = Initial(
            initial_mean=float(kf.initial_state_mean[0]),
            initial_variance=float(kf.initial_state_covariance[0][0]))
        self.my_transition_distribution = Transition(
            transition_matrix=float(kf.transition_matrices[0][0]),
            transition_covariance=float(kf.transition_covariance[0][0]),
            transition_offset=float(kf.transition_offsets[0]))
        self.my_emission_distribution = Emission(
            emission_matrix=float(kf.observation_matrices[0][0]),
            emission_covariance=float(kf.observation_covariance[0][0]),
            emission_offset=float(kf.observation_offsets[0]))
        self.my_proposal_distribution = Proposal(
            initial_mean=float(kf.initial_state_mean[0]),
            initial_variance=float(kf.initial_state_covariance[0][0]),
            transition_matrix=float(kf.transition_matrices[0][0]),
            transition_covariance=float(kf.transition_covariance[0][0]),
            transition_offset=float(kf.transition_offsets[0]))
def main():
    model = np.genfromtxt('model.csv', delimiter=',')
    initial_mean, initial_variance, transition_multiplier, transition_offset, \
        transition_variance, emission_multiplier, emission_offset, \
        emission_variance = model

    kf = pykalman.KalmanFilter(initial_state_mean=[initial_mean],
                               initial_state_covariance=[[initial_variance]],
                               transition_matrices=[[transition_multiplier]],
                               transition_offsets=[transition_offset],
                               transition_covariance=[[transition_variance]],
                               observation_matrices=[[emission_multiplier]],
                               observation_offsets=[emission_offset],
                               observation_covariance=[[emission_variance]])

    num_samples = 100
    num_timesteps = 100
    alpha = 0.2

    latents_list = []
    observations_list = []
    for _ in range(num_samples):
        latents, observations = kf.sample(num_timesteps)
        latents_list.append(np.reshape(latents, (-1)))
        observations_list.append(np.reshape(observations, (-1)))

    fig, axs = plt.subplots(2, 1)

    line1 = axs[0].plot(latents_list[0], label='latents', alpha=alpha)
    line2 = axs[0].plot(observations_list[0],
                        label='observations',
                        alpha=alpha)
    for n in range(1, num_samples):
        axs[0].plot(latents_list[n], color=line1[0].get_color(), alpha=alpha)
        axs[0].plot(observations_list[n],
                    color=line2[0].get_color(),
                    alpha=alpha)

    axs[1].plot(latents_list[0], label='latents')
    axs[1].plot(observations_list[0], label='observations')
    axs[1].legend()

    fig.savefig('prior.pdf', bbox_inches='tight')
示例#30
0
def predict_modified(xy,
                     file_name=None,
                     sample_rate=None,
                     pixel_scale=None,
                     scene_funcs=None,
                     timestamp=None,
                     store_image=0):

    n_frames = 15
    n_obs = 5
    initial_state_mean = [xy[0, 0], 0, xy[0, 1], 0]

    transition_matrix = [[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1],
                         [0, 0, 0, 1]]

    observation_matrix = [[1, 0, 0, 0], [0, 0, 1, 0]]

    kf = pykalman.KalmanFilter(transition_matrices=transition_matrix,
                               observation_matrices=observation_matrix,
                               transition_covariance=1e-5 * np.eye(4),
                               observation_covariance=0.05**2 * np.eye(2),
                               initial_state_mean=initial_state_mean)
    # kf.em([(r.x, r.y) for r in path[:9]], em_vars=['transition_matrices',
    #                                                'observation_matrices'])
    #pdb.set_trace()
    kf.em([(r[0], r[1]) for r in xy[1:n_obs].cpu().numpy()])
    observed_states, _ = kf.smooth([(r[0], r[1])
                                    for r in xy[1:n_obs].cpu().numpy()])

    # sample predictions (first sample corresponds to last state)
    # average 5 sampled predictions
    predictions = None
    for _ in range(3):
        _, pred = kf.sample(
            n_frames - n_obs + 1, initial_state=observed_states[-1]
        )  # I don't know why but sven didn't consider the first sample so we have one more sample and in the last line we start from the sample 1.
        if predictions is None:
            predictions = pred
        else:
            predictions += pred
    predictions /= 3.0
    #pdb.set_trace()
    return torch.tensor(predictions[1:].data).cuda()