Beispiel #1
0
        def check(D, N, K, mu=None, Lambda=None, rho=None):

            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)

            V = np.identity(D) + np.ones((D, D))

            # Construct model
            B = GaussianARD(3, 5, shape=(D, K), plates=(1, D))
            S = GaussianARD(2, 4, shape=(K, ), plates=(N, 1))
            A = SumMultiply('dk,k->d', B, S)
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N + 1,
                                    initialize=False)
            Y = Gaussian(X, V, initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(N + 1, D))
            X.update()
            B.update()
            S.update()
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotB = RotateGaussianARD(B, axis=-2)
            rotX = RotateVaryingMarkovChain(X, B, S, rotB)
            rotX.setup()

            # Check gradient with respect to R
            R = np.random.randn(D, D)

            def cost(r):
                (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))
                return (b, np.ravel(dr))

            err = optimize.check_gradient(cost, np.ravel(R), verbose=False)
            self.assertAllClose(err, 0, atol=1e-6, msg="Gradient incorrect")

            return
Beispiel #2
0
        def check(D, N, mu=None, Lambda=None, rho=None, A=None):
            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)
            if A is None:
                A = GaussianARD(3, 5, shape=(D, ), plates=(D, ))

            V = np.identity(D) + np.ones((D, D))

            # Construct model
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N + 1,
                                    initialize=False)
            Y = Gaussian(X, V, initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(*(Y.get_shape(0))))
            X.update()
            try:
                A.update()
            except:
                pass
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotA = RotateGaussianARD(A, axis=-1)
            rotX = RotateGaussianMarkovChain(X, rotA)
            rotX.setup()

            # Check gradient with respect to R
            R = np.random.randn(D, D)

            def cost(r):
                (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))
                return (b, np.ravel(dr))

            err = optimize.check_gradient(cost, np.ravel(R), verbose=False)
            self.assertAllClose(err, 0, atol=1e-5, msg="Gradient incorrect")

            return
def run():
    # Create some data
    N = 500
    D = 2
    # Initial state
    x0 = np.array([0.5, -0.5])
    # Dynamics (time varying)
    A0 = np.array([[.9, -.4], [.4, .9]])
    A1 = np.array([[.98, -.1], [.1, .98]])
    l = np.linspace(0, 1, N - 1).reshape((-1, 1, 1))
    A = (1 - l) * A0 + l * A1
    # Innovation covariance matrix
    V = np.identity(D)
    # Observation noise covariance matrix
    C = np.tile(np.identity(D), (N, 1, 1))
    ## C0 = 10*np.array([[1, 0], [0, 1]])
    ## C1 = 0.01*np.array([[1, 0], [0, 1]])
    ## C = (1-l)**2*C0 + l**2*C1

    X = np.empty((N, D))
    Y = np.empty((N, D))

    # Simulate data
    x = x0
    X[0, :] = x
    Y[0, :] = x + np.random.multivariate_normal(np.zeros(D), C[0, :, :])
    for n in range(N - 1):
        x = np.dot(A[n, :, :], x) + np.random.multivariate_normal(
            np.zeros(D), V)
        X[n + 1, :] = x
        Y[n + 1, :] = x + np.random.multivariate_normal(
            np.zeros(D), C[n + 1, :, :])

    # Invert observation noise covariance to observation precision matrices
    U = np.empty((N, D, D))
    UY = np.empty((N, D))
    for n in range(N):
        U[n, :, :] = np.linalg.inv(C[n, :, :])
        UY[n, :] = np.linalg.solve(C[n, :, :], Y[n, :])

    # Construct VB model
    Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, np.ones(D), n=N)
    Yh = Gaussian(Xh.as_gaussian(), np.identity(D), plates=(N, ))
    Yh.observe(Y)
    Xh.update()

    xh = Xh.u[0]
    varxh = utils.diagonal(Xh.u[1]) - xh**2
    #err = 2 * np.sqrt(varxh)
    plt.figure(1)
    plt.clf()
    for d in range(D):
        plt.subplot(D, 1, d)
        bpplt.errorplot(xh[:, d], error=2 * np.sqrt(varxh[:, d]))
        plt.plot(X[:, d], 'r-')
        plt.plot(Y[:, d], '.')
def run():
    # Create some data
    N = 500
    D = 2
    # Initial state
    x0 = np.array([0.5, -0.5])
    # Dynamics (time varying)
    A0 = np.array([[.9, -.4], [.4, .9]])
    A1 = np.array([[.98, -.1], [.1, .98]])
    l = np.linspace(0, 1, N-1).reshape((-1,1,1))
    A = (1-l)*A0 + l*A1
    # Innovation covariance matrix
    V = np.identity(D)
    # Observation noise covariance matrix
    C = np.tile(np.identity(D), (N, 1, 1))
    ## C0 = 10*np.array([[1, 0], [0, 1]])
    ## C1 = 0.01*np.array([[1, 0], [0, 1]])
    ## C = (1-l)**2*C0 + l**2*C1

    X = np.empty((N,D))
    Y = np.empty((N,D))

    # Simulate data
    x = x0
    X[0,:] = x
    Y[0,:] = x + np.random.multivariate_normal(np.zeros(D), C[0,:,:])
    for n in range(N-1):
        x = np.dot(A[n,:,:],x) + np.random.multivariate_normal(np.zeros(D), V)
        X[n+1,:] = x
        Y[n+1,:] = x + np.random.multivariate_normal(np.zeros(D), C[n+1,:,:])

    # Invert observation noise covariance to observation precision matrices
    U = np.empty((N,D,D))
    UY = np.empty((N,D))
    for n in range(N):
        U[n,:,:] = np.linalg.inv(C[n,:,:])
        UY[n,:] = np.linalg.solve(C[n,:,:], Y[n,:])

    # Construct VB model
    Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, np.ones(D), n=N)
    Yh = Gaussian(Xh.as_gaussian(), np.identity(D), plates=(N,))
    Yh.observe(Y)
    Xh.update()

    xh = Xh.u[0]
    varxh = utils.diagonal(Xh.u[1]) - xh**2
    #err = 2 * np.sqrt(varxh)
    plt.figure(1)
    plt.clf()
    for d in range(D):
        plt.subplot(D,1,d)
        bpplt.errorplot(xh[:,d], error=2*np.sqrt(varxh[:,d]))
        plt.plot(X[:,d], 'r-')
        plt.plot(Y[:,d], '.')
Beispiel #5
0
        def check(D, N, K, mu=None, Lambda=None, rho=None):

            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)

            V = np.identity(D) + np.ones((D, D))

            # Construct model
            B = GaussianARD(3, 5, shape=(D, K), plates=(1, D))
            S = GaussianARD(2, 4, shape=(K, ), plates=(N, 1))
            A = SumMultiply('dk,k->d', B, S)
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N + 1,
                                    initialize=False)
            Y = Gaussian(X, V, initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(N + 1, D))
            X.update()
            B.update()
            S.update()
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotB = RotateGaussianARD(B, axis=-2)
            rotX = RotateVaryingMarkovChain(X, B, S, rotB)

            # Rotation
            true_cost0 = X.lower_bound_contribution()
            rotX.setup()
            I = np.identity(D)
            R = np.random.randn(D, D)
            rot_cost0 = rotX.get_bound_terms(I)
            rot_cost1 = rotX.get_bound_terms(R)
            self.assertAllClose(sum(rot_cost0.values()),
                                rotX.bound(I)[0],
                                msg="Bound terms and total bound differ")
            self.assertAllClose(sum(rot_cost1.values()),
                                rotX.bound(R)[0],
                                msg="Bound terms and total bound differ")
            rotX.rotate(R)
            true_cost1 = X.lower_bound_contribution()
            self.assertAllClose(true_cost1 - true_cost0,
                                rot_cost1[X] - rot_cost0[X],
                                msg="Incorrect rotation cost for X")

            return
Beispiel #6
0
        def check(D, N, mu=None, Lambda=None, rho=None, A=None):
            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)
            if A is None:
                A = GaussianARD(3, 5, shape=(D, ), plates=(D, ))

            V = np.identity(D) + np.ones((D, D))

            # Construct model
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N + 1,
                                    initialize=False)
            Y = Gaussian(X, V, initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(*(Y.get_shape(0))))
            X.update()
            try:
                A.update()
            except:
                pass
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotA = RotateGaussianARD(A, axis=-1)
            rotX = RotateGaussianMarkovChain(X, rotA)

            # Rotation
            true_cost0 = X.lower_bound_contribution()
            rotX.setup()
            I = np.identity(D)
            R = np.random.randn(D, D)
            rot_cost0 = rotX.get_bound_terms(I)
            rot_cost1 = rotX.get_bound_terms(R)
            self.assertAllClose(sum(rot_cost0.values()),
                                rotX.bound(I)[0],
                                msg="Bound terms and total bound differ")
            self.assertAllClose(sum(rot_cost1.values()),
                                rotX.bound(R)[0],
                                msg="Bound terms and total bound differ")
            rotX.rotate(R)
            true_cost1 = X.lower_bound_contribution()
            self.assertAllClose(true_cost1 - true_cost0,
                                rot_cost1[X] - rot_cost0[X],
                                msg="Incorrect rotation cost for X")

            return
        def check(D, N, K,
                  mu=None,
                  Lambda=None,
                  rho=None):

            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)

            V = np.identity(D) + np.ones((D,D))

            # Construct model
            B = GaussianARD(3, 5,
                            shape=(D,K),
                            plates=(1,D))
            S = GaussianARD(2, 4,
                            shape=(K,),
                            plates=(N,1))
            A = SumMultiply('dk,k->d', B, S)
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N+1,
                                    initialize=False)
            Y = Gaussian(X,
                         V,
                         initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(N+1,D))
            X.update()
            B.update()
            S.update()
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotB = RotateGaussianARD(B, axis=-2)
            rotX = RotateVaryingMarkovChain(X, B, S, rotB)

            # Rotation
            true_cost0 = X.lower_bound_contribution()
            rotX.setup()
            I = np.identity(D)
            R = np.random.randn(D, D)
            rot_cost0 = rotX.get_bound_terms(I)
            rot_cost1 = rotX.get_bound_terms(R)
            self.assertAllClose(sum(rot_cost0.values()),
                                rotX.bound(I)[0],
                                    msg="Bound terms and total bound differ")
            self.assertAllClose(sum(rot_cost1.values()),
                                rotX.bound(R)[0],
                                msg="Bound terms and total bound differ")
            rotX.rotate(R)
            true_cost1 = X.lower_bound_contribution()
            self.assertAllClose(true_cost1 - true_cost0,
                                rot_cost1[X] - rot_cost0[X],
                                msg="Incorrect rotation cost for X")
            
            return
        def check(D, N, mu=None, Lambda=None, rho=None, A=None):
            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)
            if A is None:
                A = GaussianARD(3, 5,
                                shape=(D,),
                                plates=(D,))
                
            V = np.identity(D) + np.ones((D,D))

            # Construct model
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N+1,
                                    initialize=False)
            Y = Gaussian(X,
                         V,
                         initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(*(Y.get_shape(0))))
            X.update()
            try:
                A.update()
            except:
                pass
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotA = RotateGaussianARD(A, axis=-1)
            rotX = RotateGaussianMarkovChain(X, rotA)
            rotX.setup()

            # Check gradient with respect to R
            R = np.random.randn(D, D)
            def cost(r):
                (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))
                return (b, np.ravel(dr))

            err = optimize.check_gradient(cost, 
                                          np.ravel(R), 
                                          verbose=False)[1]
            self.assertAllClose(err, 0, 
                                atol=1e-5,
                                msg="Gradient incorrect")
            
            return
        def check(D, N, mu=None, Lambda=None, rho=None, A=None):
            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)
            if A is None:
                A = GaussianARD(3, 5,
                                shape=(D,),
                                plates=(D,))
                
            V = np.identity(D) + np.ones((D,D))

            # Construct model
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N+1,
                                    initialize=False)
            Y = Gaussian(X,
                         V,
                         initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(*(Y.get_shape(0))))
            X.update()
            try:
                A.update()
            except:
                pass
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotA = RotateGaussianARD(A, axis=-1)
            rotX = RotateGaussianMarkovChain(X, rotA)

            # Rotation
            true_cost0 = X.lower_bound_contribution()
            rotX.setup()
            I = np.identity(D)
            R = np.random.randn(D, D)
            rot_cost0 = rotX.get_bound_terms(I)
            rot_cost1 = rotX.get_bound_terms(R)
            self.assertAllClose(sum(rot_cost0.values()),
                                rotX.bound(I)[0],
                                    msg="Bound terms and total bound differ")
            self.assertAllClose(sum(rot_cost1.values()),
                                rotX.bound(R)[0],
                                msg="Bound terms and total bound differ")
            rotX.rotate(R)
            true_cost1 = X.lower_bound_contribution()
            self.assertAllClose(true_cost1 - true_cost0,
                                rot_cost1[X] - rot_cost0[X],
                                msg="Incorrect rotation cost for X")
            
            return
        def check(D, N, K,
                  mu=None,
                  Lambda=None,
                  rho=None):

            if mu is None:
                mu = np.zeros(D)
            if Lambda is None:
                Lambda = np.identity(D)
            if rho is None:
                rho = np.ones(D)

            V = np.identity(D) + np.ones((D,D))

            # Construct model
            B = GaussianARD(3, 5,
                            shape=(D,K),
                            plates=(1,D))
            S = GaussianARD(2, 4,
                            shape=(K,),
                            plates=(N,1))
            A = SumMultiply('dk,k->d', B, S)
            X = GaussianMarkovChain(mu,
                                    Lambda,
                                    A,
                                    rho,
                                    n=N+1,
                                    initialize=False)
            Y = Gaussian(X,
                         V,
                         initialize=False)

            # Posterior estimation
            Y.observe(np.random.randn(N+1,D))
            X.update()
            B.update()
            S.update()
            try:
                mu.update()
            except:
                pass
            try:
                Lambda.update()
            except:
                pass
            try:
                rho.update()
            except:
                pass

            # Construct rotator
            rotB = RotateGaussianARD(B, axis=-2)
            rotX = RotateVaryingMarkovChain(X, B, S, rotB)
            rotX.setup()

            # Check gradient with respect to R
            R = np.random.randn(D, D)
            def cost(r):
                (b, dr) = rotX.bound(np.reshape(r, np.shape(R)))
                return (b, np.ravel(dr))

            err = optimize.check_gradient(cost, 
                                          np.ravel(R), 
                                          verbose=False)[1]
            self.assertAllClose(err, 0, 
                                atol=1e-6,
                                msg="Gradient incorrect")
            
            return