Example #1
0
def run():
    # Create some data
    N = 500
    D = 2
    # Initial state
    x0 = np.array([0.5, -0.5])
    # Dynamics (time varying)
    A0 = np.array([[.9, -.4], [.4, .9]])
    A1 = np.array([[.98, -.1], [.1, .98]])
    l = np.linspace(0, 1, N).reshape((-1, 1, 1))
    A = (1 - l) * A0 + l * A1
    # Innovation covariance matrix
    V = np.array([[1, 0], [0, 1]])
    # Observation noise covariance matrix
    C0 = 10 * np.array([[1, 0], [0, 1]])
    C1 = 0.01 * np.array([[1, 0], [0, 1]])
    C = (1 - l)**2 * C0 + l**2 * C1

    X = np.empty((N, D))
    Y = np.empty((N, D))

    # Simulate data
    x = x0
    for n in range(N):
        x = np.dot(A[n, :, :], x) + np.random.multivariate_normal(
            np.zeros(D), V)
        X[n, :] = x
        Y[n, :] = x + np.random.multivariate_normal(np.zeros(D), C[n, :, :])

    U = np.empty((N, D, D))
    UY = np.empty((N, D))
    for n in range(N):
        U[n, :, :] = np.linalg.inv(C[n, :, :])
        UY[n, :] = np.linalg.solve(C[n, :, :], Y[n, :])

    # Create iterators for the static matrices
    #U = N*(U,)
    #A = N*(A,)
    V = N * (V, )

    (Xh, CovXh) = utils.kalman_filter(UY, U, A, V, np.zeros(2),
                                      10 * np.identity(2))
    (Xh, CovXh) = utils.rts_smoother(Xh, CovXh, A, V)

    plt.clf()
    for d in range(D):
        plt.subplot(D, 1, d)
        #plt.plot(Xh[:,d], 'r--')
        bpplt.errorplot(Xh[:, d], error=2 * np.sqrt(CovXh[:, d, d]))
        plt.plot(X[:, d], 'r-')
        plt.plot(Y[:, d], '.')
    plt.show()
Example #2
0
def run():
    # Create some data
    N = 500
    D = 2
    # Initial state
    x0 = np.array([0.5, -0.5])
    # Dynamics (time varying)
    A0 = np.array([[.9, -.4], [.4, .9]])
    A1 = np.array([[.98, -.1], [.1, .98]])
    l = np.linspace(0, 1, N).reshape((-1,1,1))
    A = (1-l)*A0 + l*A1
    # Innovation covariance matrix
    V = np.array([[1, 0], [0, 1]])
    # Observation noise covariance matrix
    C0 = 10*np.array([[1, 0], [0, 1]])
    C1 = 0.01*np.array([[1, 0], [0, 1]])
    C = (1-l)**2*C0 + l**2*C1

    X = np.empty((N,D))
    Y = np.empty((N,D))

    # Simulate data
    x = x0
    for n in range(N):
        x = np.dot(A[n,:,:],x) + np.random.multivariate_normal(np.zeros(D), V)
        X[n,:] = x
        Y[n,:] = x + np.random.multivariate_normal(np.zeros(D), C[n,:,:])

    U = np.empty((N,D,D))
    UY = np.empty((N,D))
    for n in range(N):
        U[n,:,:] = np.linalg.inv(C[n,:,:])
        UY[n,:] = np.linalg.solve(C[n,:,:], Y[n,:])

    # Create iterators for the static matrices
    #U = N*(U,)
    #A = N*(A,)
    V = N*(V,)
    
    (Xh, CovXh) = utils.kalman_filter(UY, U, A, V, np.zeros(2), 10*np.identity(2))
    (Xh, CovXh) = utils.rts_smoother(Xh, CovXh, A, V)
    
    plt.clf()
    for d in range(D):
        plt.subplot(D,1,d)
        #plt.plot(Xh[:,d], 'r--')
        bpplt.errorplot(Xh[:,d], error=2*np.sqrt(CovXh[:,d,d]))
        plt.plot(X[:,d], 'r-')
        plt.plot(Y[:,d], '.')
    plt.show()
    def test_smoothing(self):
        """
        Test the posterior estimation of GaussianMarkovChain.

        Create time-variant dynamics and compare the results of BayesPy VB
        inference and standard Kalman filtering & smoothing.

        This is not that useful anymore, because the moments are checked much
        better in another test method.
        """

        #
        # Set up an artificial system
        #

        # Dimensions
        N = 500
        D = 2
        # Dynamics (time varying)
        A0 = np.array([[0.9, -0.4], [0.4, 0.9]])
        A1 = np.array([[0.98, -0.1], [0.1, 0.98]])
        l = np.linspace(0, 1, N - 1).reshape((-1, 1, 1))
        A = (1 - l) * A0 + l * A1
        # Innovation covariance matrix (time varying)
        v = np.random.rand(D)
        V = np.diag(v)
        # Observation noise covariance matrix
        C = np.identity(D)

        #
        # Simulate data
        #

        X = np.empty((N, D))
        Y = np.empty((N, D))

        x = np.array([0.5, -0.5])
        X[0, :] = x
        Y[0, :] = x + np.random.multivariate_normal(np.zeros(D), C)
        for n in range(N - 1):
            x = np.dot(A[n, :, :], x) + np.random.multivariate_normal(np.zeros(D), V)
            X[n + 1, :] = x
            Y[n + 1, :] = x + np.random.multivariate_normal(np.zeros(D), C)

        #
        # BayesPy inference
        #

        # Construct VB model
        Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, 1 / v, n=N)
        Yh = Gaussian(Xh, np.identity(D), plates=(N,))
        # Put data
        Yh.observe(Y)
        # Run inference
        Xh.update()
        # Store results
        Xh_vb = Xh.u[0]
        CovXh_vb = Xh.u[1] - Xh_vb[..., np.newaxis, :] * Xh_vb[..., :, np.newaxis]

        #
        # "The ground truth" using standard Kalman filter and RTS smoother
        #
        V = N * (V,)
        UY = Y
        U = N * (C,)
        (Xh, CovXh) = utils.kalman_filter(UY, U, A, V, np.zeros(D), np.identity(D))
        (Xh, CovXh) = utils.rts_smoother(Xh, CovXh, A, V)

        #
        # Check results
        #
        self.assertTrue(np.allclose(Xh_vb, Xh))
        self.assertTrue(np.allclose(CovXh_vb, CovXh))
Example #4
0
    def test_smoothing(self):
        """
        Test the posterior estimation of GaussianMarkovChain.

        Create time-variant dynamics and compare the results of BayesPy VB
        inference and standard Kalman filtering & smoothing.

        This is not that useful anymore, because the moments are checked much
        better in another test method.
        """

        #
        # Set up an artificial system
        #

        # Dimensions
        N = 500
        D = 2
        # Dynamics (time varying)
        A0 = np.array([[.9, -.4], [.4, .9]])
        A1 = np.array([[.98, -.1], [.1, .98]])
        l = np.linspace(0, 1, N - 1).reshape((-1, 1, 1))
        A = (1 - l) * A0 + l * A1
        # Innovation covariance matrix (time varying)
        v = np.random.rand(D)
        V = np.diag(v)
        # Observation noise covariance matrix
        C = np.identity(D)

        #
        # Simulate data
        #

        X = np.empty((N, D))
        Y = np.empty((N, D))

        x = np.array([0.5, -0.5])
        X[0, :] = x
        Y[0, :] = x + np.random.multivariate_normal(np.zeros(D), C)
        for n in range(N - 1):
            x = np.dot(A[n, :, :], x) + np.random.multivariate_normal(
                np.zeros(D), V)
            X[n + 1, :] = x
            Y[n + 1, :] = x + np.random.multivariate_normal(np.zeros(D), C)

        #
        # BayesPy inference
        #

        # Construct VB model
        Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, 1 / v, n=N)
        Yh = Gaussian(Xh, np.identity(D), plates=(N, ))
        # Put data
        Yh.observe(Y)
        # Run inference
        Xh.update()
        # Store results
        Xh_vb = Xh.u[0]
        CovXh_vb = Xh.u[1] - Xh_vb[..., np.newaxis, :] * Xh_vb[..., :,
                                                               np.newaxis]

        #
        # "The ground truth" using standard Kalman filter and RTS smoother
        #
        V = N * (V, )
        UY = Y
        U = N * (C, )
        (Xh, CovXh) = utils.kalman_filter(UY, U, A, V, np.zeros(D),
                                          np.identity(D))
        (Xh, CovXh) = utils.rts_smoother(Xh, CovXh, A, V)

        #
        # Check results
        #
        self.assertTrue(np.allclose(Xh_vb, Xh))
        self.assertTrue(np.allclose(CovXh_vb, CovXh))