Exemplo n.º 1
0
    def test_moments(self):
        """
        Test the moments of Poisson nodes.
        """

        # Simple test
        X = Poisson(12.8)
        u = X._message_to_child()
        self.assertEqual(len(u), 1)
        self.assertAllClose(u[0], 12.8)

        # Test plates in rate
        X = Poisson(12.8 * np.ones((2, 3)))
        u = X._message_to_child()
        self.assertAllClose(u[0], 12.8 * np.ones((2, 3)))

        # Test with gamma prior
        alpha = Gamma(5, 2)
        r = np.exp(alpha._message_to_child()[1])
        X = Poisson(alpha)
        u = X._message_to_child()
        self.assertAllClose(u[0], r)

        # Test with broadcasted plates in parents
        X = Poisson(Gamma(5, 2, plates=(2, 3)))
        u = X._message_to_child()
        self.assertAllClose(u[0] * np.ones((2, 3)), r * np.ones((2, 3)))

        pass
Exemplo n.º 2
0
    def test_init(self):
        """
        Test the creation of Poisson nodes.
        """

        # Some simple initializations
        X = Poisson(12.8)
        X = Poisson(Gamma(43, 24))

        # Check that plates are correct
        X = Poisson(np.ones((2, 3)))
        self.assertEqual(X.plates, (2, 3))
        X = Poisson(Gamma(1, 1, plates=(2, 3)))
        self.assertEqual(X.plates, (2, 3))

        # Invalid rate
        self.assertRaises(ValueError, Poisson, -0.1)

        # Inconsistent plates
        self.assertRaises(ValueError, Poisson, np.ones(3), plates=(2, ))

        # Explicit plates too small
        self.assertRaises(ValueError, Poisson, np.ones(3), plates=(1, ))

        pass
Exemplo n.º 3
0
    def test_moments(self):
        """
        Test the moments of Poisson nodes.
        """

        # Simple test
        X = Poisson(12.8)
        u = X._message_to_child()
        self.assertEqual(len(u),
                         1)
        self.assertAllClose(u[0],
                            12.8)

        # Test plates in rate
        X = Poisson(12.8*np.ones((2,3)))
        u = X._message_to_child()
        self.assertAllClose(u[0],
                            12.8*np.ones((2,3)))

        # Test with gamma prior
        alpha = Gamma(5, 2)
        r = np.exp(alpha._message_to_child()[1])
        X = Poisson(alpha)
        u = X._message_to_child()
        self.assertAllClose(u[0],
                            r)

        # Test with broadcasted plates in parents
        X = Poisson(Gamma(5, 2, plates=(2,3)))
        u = X._message_to_child()
        self.assertAllClose(u[0]*np.ones((2,3)),
                            r*np.ones((2,3)))

        pass
Exemplo n.º 4
0
def model(M=10, N=100, D=3):
    """
    Construct linear state-space model.

    See, for instance, the following publication:
    "Fast variational Bayesian linear state-space model"
    Luttinen (ECML 2013)
    """

    # Dynamics matrix with ARD
    alpha = Gamma(1e-5, 1e-5, plates=(D, ), name='alpha')
    A = GaussianARD(0,
                    alpha,
                    shape=(D, ),
                    plates=(D, ),
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=1,
                                                        scale=0),
                    name='A')
    A.initialize_from_value(np.identity(D))

    # Latent states with dynamics
    X = GaussianMarkovChain(
        np.zeros(D),  # mean of x0
        1e-3 * np.identity(D),  # prec of x0
        A,  # dynamics
        np.ones(D),  # innovation
        n=N,  # time instances
        plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
        name='X')
    X.initialize_from_value(np.random.randn(N, D))

    # Mixing matrix from latent space to observation space using ARD
    gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma')
    gamma.initialize_from_value(1e-2 * np.ones(D))
    C = GaussianARD(0,
                    gamma,
                    shape=(D, ),
                    plates=(M, 1),
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=2,
                                                        scale=0),
                    name='C')
    C.initialize_from_value(np.random.randn(M, 1, D))

    # Observation noise
    tau = Gamma(1e-5, 1e-5, name='tau')
    tau.initialize_from_value(1e2)

    # Underlying noiseless function
    F = SumMultiply('i,i', C, X, name='F')

    # Noisy observations
    Y = GaussianARD(F, tau, name='Y')

    Q = VB(Y, F, C, gamma, X, A, alpha, tau, C)

    return Q
Exemplo n.º 5
0
    def test_lower_bound_contribution(self):

        a = 15
        b = 21
        y = 4
        x = Gamma(a, b)
        x.observe(y)
        testing.assert_allclose(
            x.lower_bound_contribution(),
            (
                a * np.log(b) +
                (a - 1) * np.log(y) -
                b * y -
                special.gammaln(a)
            )
        )

        # Just one latent node so we'll get exact marginal likelihood
        #
        # p(Y) = p(Y,X)/p(X|Y) = p(Y|X) * p(X) / p(X|Y)
        a = 2.3
        b = 4.1
        x = 1.9
        y = 4.8
        tau = Gamma(a, b)
        Y = GaussianARD(x, tau)
        Y.observe(y)
        mu = x
        nu = 2 * a
        s2 = b / a
        a_post = a + 0.5
        b_post = b + 0.5*(y - x)**2
        tau.update()
        testing.assert_allclose(
            [-b_post, a_post],
            tau.phi
        )
        testing.assert_allclose(
            Y.lower_bound_contribution() + tau.lower_bound_contribution(), # + tau.g,
            (
                special.gammaln((nu+1)/2)
                - special.gammaln(nu/2)
                - 0.5 * np.log(nu)
                - 0.5 * np.log(np.pi)
                - 0.5 * np.log(s2)
                - 0.5 * (nu + 1) * np.log(
                    1 + (y - mu)**2 / (nu * s2)
                )
            )
        )

        return
Exemplo n.º 6
0
    def test_message_to_child(self):
        """
        Test the message to child of Mixture node.
        """

        K = 3

        #
        # Estimate moments from parents only
        #

        # Simple case
        mu = GaussianARD([0,2,4], 1,
                         ndim=0,
                         plates=(K,))
        alpha = Gamma(1, 1,
                      plates=(K,))
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, mu, alpha)
        self.assertEqual(X.plates, ())
        self.assertEqual(X.dims, ( (), () ))
        u = X._message_to_child()
        self.assertAllClose(u[0],
                            2)
        self.assertAllClose(u[1],
                            2**2+1)

        # Broadcasting the moments on the cluster axis
        mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        alpha = Gamma(1, 1,
                      plates=(K,))
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, mu, alpha)
        self.assertEqual(X.plates, ())
        self.assertEqual(X.dims, ( (), () ))
        u = X._message_to_child()
        self.assertAllClose(u[0],
                            2)
        self.assertAllClose(u[1],
                            2**2+1)

        #
        # Estimate moments with observed children
        #
        
        pass
Exemplo n.º 7
0
    def test_mask_to_parent(self):
        """
        Test the mask handling in Mixture node
        """

        K = 3
        Z = Categorical(np.ones(K)/K,
                        plates=(4,5,1))
        Mu = GaussianARD(0, 1,
                         shape=(2,),
                         plates=(4,K,5))
        Alpha = Gamma(1, 1,
                      plates=(4,K,5,2))
        X = Mixture(Z, GaussianARD, Mu, Alpha, cluster_plate=-3)
        Y = GaussianARD(X, 1, ndim=1)
        mask = np.reshape((np.mod(np.arange(4*5), 2) == 0),
                          (4,5))
        Y.observe(np.ones((4,5,2)),
                  mask=mask)
        self.assertArrayEqual(Z.mask,
                              mask[:,:,None])
        self.assertArrayEqual(Mu.mask,
                              mask[:,None,:])
        self.assertArrayEqual(Alpha.mask,
                              mask[:,None,:,None])

        pass
Exemplo n.º 8
0
def _setup_linear_regression():
    """
    Setup code for the pdf and contour tests.

    This code is from http://www.bayespy.org/examples/regression.html
    """
    np.random.seed(1)
    k = 2  # slope
    c = 5  # bias
    s = 2  # noise standard deviation

    x = np.arange(10)
    y = k * x + c + s * np.random.randn(10)
    X = np.vstack([x, np.ones(len(x))]).T

    B = GaussianARD(0, 1e-6, shape=(2, ))

    F = SumMultiply('i,i', B, X)

    tau = Gamma(1e-3, 1e-3)
    Y = GaussianARD(F, tau)
    Y.observe(y)

    Q = VB(Y, B, tau)
    Q.update(repeat=1000)
    xh = np.linspace(-5, 15, 100)
    Xh = np.vstack([xh, np.ones(len(xh))]).T
    Fh = SumMultiply('i,i', B, Xh)

    return locals()
Exemplo n.º 9
0
    def fit(self, X, y):
        self.weights = GaussianARD(0, 1e-6, shape=(X.shape[-1], ))
        y_mean = SumMultiply('i,i', self.weights, X)
        precision = Gamma(1, .1)
        y_obs = GaussianARD(y_mean, precision)
        y_obs.observe(y)

        Q = VB(y_obs, self.weights, precision)
        Q.update(repeat=self.n_iter, tol=self.tolerance, verbose=False)
Exemplo n.º 10
0
def pca():

    np.random.seed(41)

    M = 10
    N = 3000
    D = 5

    # Construct the PCA model
    alpha = Gamma(1e-3, 1e-3, plates=(D, ), name='alpha')
    W = GaussianARD(0, alpha, plates=(M, 1), shape=(D, ), name='W')
    X = GaussianARD(0, 1, plates=(1, N), shape=(D, ), name='X')
    tau = Gamma(1e-3, 1e-3, name='tau')
    W.initialize_from_random()
    F = SumMultiply('d,d->', W, X)
    Y = GaussianARD(F, tau, name='Y')

    # Observe data
    data = np.sum(np.random.randn(M, 1, D - 1) * np.random.randn(1, N, D - 1),
                  axis=-1) + 1e-1 * np.random.randn(M, N)
    Y.observe(data)

    # Initialize VB engine
    Q = VB(Y, X, W, alpha, tau)

    # Take one update step (so phi is ok)
    Q.update(repeat=1)
    Q.save()

    # Run VB-EM
    Q.update(repeat=200)
    bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')

    # Restore the state
    Q.load()

    # Run Riemannian conjugate gradient
    #Q.optimize(X, alpha, maxiter=100, collapsed=[W, tau])
    Q.optimize(W, tau, maxiter=100, collapsed=[X, alpha])
    bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')

    bpplt.pyplot.show()
Exemplo n.º 11
0
    def test_init(self):
        """
        Test the creation of Mixture node
        """

        # Do not accept non-negative cluster plates
        z = Categorical(np.random.dirichlet([1, 1]))
        self.assertRaises(ValueError,
                          Mixture,
                          z,
                          GaussianARD,
                          GaussianARD(0, 1, plates=(2, )),
                          Gamma(1, 1, plates=(2, )),
                          cluster_plate=0)

        # Try constructing a mixture without any of the parents having the
        # cluster plate axis
        z = Categorical(np.random.dirichlet([1, 1]))
        self.assertRaises(ValueError, Mixture, z, GaussianARD,
                          GaussianARD(0, 1, plates=()), Gamma(1, 1, plates=()))
Exemplo n.º 12
0
    def test_message_to_parents(self):
        """ Check gradient passed to inputs parent node """
        D = 3

        X = Gaussian(np.random.randn(D), random.covariance(D))
        a = Gamma(np.random.rand(D), np.random.rand(D))

        Y = GaussianARD(X, a)
        Y.observe(np.random.randn(D))

        self.assert_message_to_parent(Y, X)
        self.assert_message_to_parent(Y, a)

        pass
Exemplo n.º 13
0
    def fit(self, X, y):
        self._init_weights()
        # self.cost,
        # self.myopic_voc(action, state),
        # self.vpi_action(action, state),
        # self.vpi(state),
        # self.expected_term_reward(state)

        self.tau = Gamma(self.prior_a, self.prior_b)
        F = SumMultiply('i,i', self.weights, X)
        y_obs = GaussianARD(F, self.tau)
        y_obs.observe(y)

        Q = VB(y_obs, self.weights)
        Q.update(repeat=10, tol=1e-4, verbose=False)
Exemplo n.º 14
0
def model(M, N, D, K):
    """
    Construct the linear state-space model with time-varying dynamics

    For reference, see the following publication:
    (TODO)
    """

    #
    # The model block for the latent mixing weight process
    #

    # Dynamics matrix with ARD
    # beta : (K) x ()
    beta = Gamma(1e-5, 1e-5, plates=(K, ), name='beta')
    # B : (K) x (K)
    B = GaussianARD(np.identity(K),
                    beta,
                    shape=(K, ),
                    plates=(K, ),
                    name='B',
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=1,
                                                        scale=0),
                    initialize=False)
    B.initialize_from_value(np.identity(K))

    # Mixing weight process, that is, the weights in the linear combination of
    # state dynamics matrices
    # S : () x (N,K)
    S = GaussianMarkovChain(np.ones(K),
                            1e-6 * np.identity(K),
                            B,
                            np.ones(K),
                            n=N,
                            name='S',
                            plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
                            initialize=False)
    s = 10 * np.random.randn(N, K)
    s[:, 0] = 10
    S.initialize_from_value(s)

    #
    # The model block for the latent states
    #

    # Projection matrix of the dynamics matrix
    # alpha : (K) x ()
    alpha = Gamma(1e-5, 1e-5, plates=(D, K), name='alpha')
    alpha.initialize_from_value(1 * np.ones((D, K)))
    # A : (D) x (D,K)
    A = GaussianARD(0,
                    alpha,
                    shape=(D, K),
                    plates=(D, ),
                    name='A',
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=1,
                                                        scale=0),
                    initialize=False)

    # Initialize S and A such that A*S is almost an identity matrix
    a = np.zeros((D, D, K))
    a[np.arange(D), np.arange(D), np.zeros(D, dtype=int)] = 1
    a[:, :, 0] = np.identity(D) / s[0, 0]
    a[:, :, 1:] = 0.1 / s[0, 0] * np.random.randn(D, D, K - 1)
    A.initialize_from_value(a)

    # Latent states with dynamics
    # X : () x (N,D)
    X = VaryingGaussianMarkovChain(
        np.zeros(D),  # mean of x0
        1e-3 * np.identity(D),  # prec of x0
        A,  # dynamics matrices
        S._convert(GaussianMoments)[1:],  # temporal weights
        np.ones(D),  # innovation
        n=N,  # time instances
        name='X',
        plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
        initialize=False)
    X.initialize_from_value(np.random.randn(N, D))

    #
    # The model block for observations
    #

    # Mixing matrix from latent space to observation space using ARD
    # gamma : (D) x ()
    gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma')
    gamma.initialize_from_value(1e-2 * np.ones(D))
    # C : (M,1) x (D)
    C = GaussianARD(0,
                    gamma,
                    shape=(D, ),
                    plates=(M, 1),
                    name='C',
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=2,
                                                        scale=0))
    C.initialize_from_value(np.random.randn(M, 1, D))

    # Noiseless process
    # F : (M,N) x ()
    F = SumMultiply('d,d', C, X, name='F')

    # Observation noise
    # tau : () x ()
    tau = Gamma(1e-5, 1e-5, name='tau')
    tau.initialize_from_value(1e2)

    # Observations
    # Y: (M,N) x ()
    Y = GaussianARD(F, tau, name='Y')

    # Construct inference machine
    Q = VB(Y, F, C, gamma, X, A, alpha, tau, S, B, beta)

    return Q
Exemplo n.º 15
0
    def test_message_to_child(self):
        """
        Test the message to child of GaussianGammaISO node.
        """

        # Simple test
        mu = np.array([1, 2, 3])
        Lambda = np.identity(3)
        a = 2
        b = 10
        X_alpha = GaussianGammaISO(mu, Lambda, a, b)
        u = X_alpha._message_to_child()
        self.assertEqual(len(u), 4)
        tau = np.array(a / b)
        self.assertAllClose(u[0], tau[..., None] * mu)
        self.assertAllClose(
            u[1],
            (linalg.inv(Lambda) + tau[..., None, None] * linalg.outer(mu, mu)))
        self.assertAllClose(u[2], tau)
        self.assertAllClose(u[3], -np.log(b) + special.psi(a))

        # Test with unknown parents
        mu = Gaussian(np.arange(3), 10 * np.identity(3))
        Lambda = Wishart(10, np.identity(3))
        a = 2
        b = Gamma(3, 15)
        X_alpha = GaussianGammaISO(mu, Lambda, a, b)
        u = X_alpha._message_to_child()
        (mu, mumu) = mu._message_to_child()
        Cov_mu = mumu - linalg.outer(mu, mu)
        (Lambda, _) = Lambda._message_to_child()
        (b, _) = b._message_to_child()
        (tau, logtau) = Gamma(
            a, b + 0.5 * np.sum(Lambda * Cov_mu))._message_to_child()
        self.assertAllClose(u[0], tau[..., None] * mu)
        self.assertAllClose(
            u[1],
            (linalg.inv(Lambda) + tau[..., None, None] * linalg.outer(mu, mu)))
        self.assertAllClose(u[2], tau)
        self.assertAllClose(u[3], logtau)

        # Test with plates
        mu = Gaussian(np.reshape(np.arange(3 * 4), (4, 3)),
                      10 * np.identity(3),
                      plates=(4, ))
        Lambda = Wishart(10, np.identity(3))
        a = 2
        b = Gamma(3, 15)
        X_alpha = GaussianGammaISO(mu, Lambda, a, b, plates=(4, ))
        u = X_alpha._message_to_child()
        (mu, mumu) = mu._message_to_child()
        Cov_mu = mumu - linalg.outer(mu, mu)
        (Lambda, _) = Lambda._message_to_child()
        (b, _) = b._message_to_child()
        (tau, logtau) = Gamma(
            a, b +
            0.5 * np.sum(Lambda * Cov_mu, axis=(-1, -2)))._message_to_child()
        self.assertAllClose(u[0] * np.ones((4, 1)),
                            np.ones((4, 1)) * tau[..., None] * mu)
        self.assertAllClose(
            u[1] * np.ones((4, 1, 1)),
            np.ones((4, 1, 1)) *
            (linalg.inv(Lambda) + tau[..., None, None] * linalg.outer(mu, mu)))
        self.assertAllClose(u[2] * np.ones(4), np.ones(4) * tau)
        self.assertAllClose(u[3] * np.ones(4), np.ones(4) * logtau)

        pass
Exemplo n.º 16
0
    def test_init(self):
        """
        Test the constructor of GaussianARD
        """
        def check_init(true_plates, true_shape, mu, alpha, **kwargs):
            X = GaussianARD(mu, alpha, **kwargs)
            self.assertEqual(X.dims, (true_shape, true_shape + true_shape),
                             msg="Constructed incorrect dimensionality")
            self.assertEqual(X.plates,
                             true_plates,
                             msg="Constructed incorrect plates")

        #
        # Create from constant parents
        #

        # Use ndim=0 for constant mu
        check_init((), (), 0, 1)
        check_init((3, 2), (), np.zeros((
            3,
            2,
        )), np.ones((2, )))
        check_init((4, 2, 2, 3), (), np.zeros((
            2,
            1,
            3,
        )), np.ones((4, 1, 2, 3)))
        # Use ndim
        check_init((4, 2), (2, 3),
                   np.zeros((
                       2,
                       1,
                       3,
                   )),
                   np.ones((4, 1, 2, 3)),
                   ndim=2)
        # Use shape
        check_init((4, 2), (2, 3),
                   np.zeros((
                       2,
                       1,
                       3,
                   )),
                   np.ones((4, 1, 2, 3)),
                   shape=(2, 3))
        # Use ndim and shape
        check_init((4, 2), (2, 3),
                   np.zeros((
                       2,
                       1,
                       3,
                   )),
                   np.ones((4, 1, 2, 3)),
                   ndim=2,
                   shape=(2, 3))

        #
        # Create from node parents
        #

        # Infer ndim from parent mu
        check_init((3, ), (), GaussianARD(0, 1, plates=(3, )),
                   Gamma(1, 1, plates=(3, )))

        # Infer ndim from mu, take broadcasted shape
        check_init((4, ), (2, 2, 3),
                   GaussianARD(np.zeros((2, 1, 3)), np.ones((2, 1, 3)),
                               ndim=3),
                   Gamma(np.ones((4, 1, 2, 3)), np.ones((4, 1, 2, 3))))
        # Use ndim
        check_init((4, ), (2, 2, 3),
                   GaussianARD(np.zeros((4, 1, 2, 3)),
                               np.ones((4, 1, 2, 3)),
                               ndim=2),
                   Gamma(np.ones((4, 2, 1, 3)), np.ones((4, 2, 1, 3))),
                   ndim=3)
        # Use shape
        check_init((4, ), (2, 2, 3),
                   GaussianARD(np.zeros((4, 1, 2, 3)),
                               np.ones((4, 1, 2, 3)),
                               ndim=2),
                   Gamma(np.ones((4, 2, 1, 3)), np.ones((4, 2, 1, 3))),
                   shape=(2, 2, 3))
        # Use ndim and shape
        check_init((4, 2), (2, 3),
                   GaussianARD(np.zeros((2, 1, 3)), np.ones((2, 1, 3)),
                               ndim=2),
                   Gamma(np.ones((4, 1, 2, 3)), np.ones((4, 1, 2, 3))),
                   ndim=2,
                   shape=(2, 3))

        # Test for a found bug
        check_init((), (3, ), np.ones(3), 1, ndim=1)

        # Add axes if necessary
        check_init((), (1, 2, 3),
                   GaussianARD(np.zeros((2, 3)), np.ones((2, 3)), ndim=2),
                   1,
                   ndim=3)

        #
        # Errors
        #

        # Inconsistent shapes
        self.assertRaises(ValueError,
                          GaussianARD,
                          GaussianARD(np.zeros((2, 3)),
                                      np.ones((2, 3)),
                                      ndim=1),
                          np.ones((4, 3)),
                          ndim=2)

        # Inconsistent dims of mu and alpha
        self.assertRaises(ValueError, GaussianARD, np.zeros((2, 3)),
                          np.ones((2, )))
        # Inconsistent plates of mu and alpha
        self.assertRaises(ValueError,
                          GaussianARD,
                          GaussianARD(np.zeros((3, 2, 3)),
                                      np.ones((3, 2, 3)),
                                      ndim=2),
                          np.ones((3, 4, 2, 3)),
                          ndim=3)
        # Inconsistent ndim and shape
        self.assertRaises(ValueError,
                          GaussianARD,
                          np.zeros((2, 3)),
                          np.ones((2, )),
                          shape=(2, 3),
                          ndim=1)
        # Parent mu has more axes
        self.assertRaises(ValueError,
                          GaussianARD,
                          GaussianARD(np.zeros((2, 3)),
                                      np.ones((2, 3)),
                                      ndim=2),
                          np.ones((2, 3)),
                          ndim=1)
        # Incorrect shape
        self.assertRaises(ValueError,
                          GaussianARD,
                          GaussianARD(np.zeros((2, 3)),
                                      np.ones((2, 3)),
                                      ndim=2),
                          np.ones((2, 3)),
                          shape=(2, 2))

        pass
Exemplo n.º 17
0
    def test_message_to_parent(self):
        """
        Test the message to parents of Mixture node.
        """

        K = 3

        # Broadcasting the moments on the cluster axis
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1,
                      plates=(K,))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(m[0] * np.ones(K),
                            random.gaussian_logpdf(xx*alpha,
                                                   x*alpha*mu,
                                                   mumu*alpha,
                                                   logalpha,
                                                   0)
                            * np.ones(K))
        m = Mu._message_from_children()
        self.assertAllClose(m[0],
                            1/K * (alpha*x) * np.ones(3))
        self.assertAllClose(m[1],
                            -0.5 * 1/K * alpha * np.ones(3))

        # Some parameters do not have cluster plate axis
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1) # Note: no cluster plate axis!
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(m[0] * np.ones(K),
                            random.gaussian_logpdf(xx*alpha,
                                                   x*alpha*mu,
                                                   mumu*alpha,
                                                   logalpha,
                                                   0)
                            * np.ones(K))
                                                   
        m = Mu._message_from_children()
        self.assertAllClose(m[0],
                            1/K * (alpha*x) * np.ones(3))
        self.assertAllClose(m[1],
                            -0.5 * 1/K * alpha * np.ones(3))

        # Cluster assignments do not have as many plate axes as parameters.
        M = 2
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,M))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1,
                      plates=(K,M))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha, cluster_plate=-2)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5 * np.ones(M)
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(m[0]*np.ones(K),
                            np.sum(random.gaussian_logpdf(xx*alpha,
                                                          x*alpha*mu,
                                                          mumu*alpha,
                                                          logalpha,
                                                          0) *
                                   np.ones((K,M)),
                                   axis=-1))
                                                   
        m = Mu._message_from_children()
        self.assertAllClose(m[0] * np.ones((K,M)),
                            1/K * (alpha*x) * np.ones((K,M)))
        self.assertAllClose(m[1] * np.ones((K,M)),
                            -0.5 * 1/K * alpha * np.ones((K,M)))
        

        # Mixed distribution broadcasts g
        # This tests for a found bug. The bug caused an error.
        Z = Categorical([0.3, 0.5, 0.2])
        X = Mixture(Z, Categorical, [[0.2,0.8], [0.1,0.9], [0.3,0.7]])
        m = Z._message_from_children()

        #
        # Test nested mixtures
        #
        t1 = [1, 1, 0, 3, 3]
        t2 = [2]
        p = Dirichlet([1, 1], plates=(4, 3))
        X = Mixture(t1, Mixture, t2, Categorical, p)
        X.observe([1, 1, 0, 0, 0])
        p.update()
        self.assertAllClose(
            p.phi[0],
            [
                [[1, 1], [1, 1], [2, 1]],
                [[1, 1], [1, 1], [1, 3]],
                [[1, 1], [1, 1], [1, 1]],
                [[1, 1], [1, 1], [3, 1]],
            ]
        )

        # Test sample plates in nested mixtures
        t1 = Categorical([0.3, 0.7], plates=(5,))
        t2 = [[1], [1], [0], [3], [3]]
        t3 = 2
        p = Dirichlet([1, 1], plates=(2, 4, 3))
        X = Mixture(t1, Mixture, t2, Mixture, t3, Categorical, p)
        X.observe([1, 1, 0, 0, 0])
        p.update()
        self.assertAllClose(
            p.phi[0],
            [
                [
                    [[1, 1], [1, 1], [1.3, 1]],
                    [[1, 1], [1, 1], [1, 1.6]],
                    [[1, 1], [1, 1], [1, 1]],
                    [[1, 1], [1, 1], [1.6, 1]],
                ],
                [
                    [[1, 1], [1, 1], [1.7, 1]],
                    [[1, 1], [1, 1], [1, 2.4]],
                    [[1, 1], [1, 1], [1, 1]],
                    [[1, 1], [1, 1], [2.4, 1]],
                ]
            ]
        )

        # Check that Gate and nested Mixture are equal
        t1 = Categorical([0.3, 0.7], plates=(5,))
        t2 = Categorical([0.1, 0.3, 0.6], plates=(5, 1))
        p = Dirichlet([1, 2, 3, 4], plates=(2, 3))
        X = Mixture(t1, Mixture, t2, Categorical, p)
        X.observe([3, 3, 1, 2, 2])
        t1_msg = t1._message_from_children()
        t2_msg = t2._message_from_children()
        p_msg = p._message_from_children()
        t1 = Categorical([0.3, 0.7], plates=(5,))
        t2 = Categorical([0.1, 0.3, 0.6], plates=(5, 1))
        p = Dirichlet([1, 2, 3, 4], plates=(2, 3))
        X = Categorical(Gate(t1, Gate(t2, p)))
        X.observe([3, 3, 1, 2, 2])
        t1_msg2 = t1._message_from_children()
        t2_msg2 = t2._message_from_children()
        p_msg2 = p._message_from_children()
        self.assertAllClose(t1_msg[0], t1_msg2[0])
        self.assertAllClose(t2_msg[0], t2_msg2[0])
        self.assertAllClose(p_msg[0], p_msg2[0])

        pass
Exemplo n.º 18
0
    def test_init(self):
        """
        Test the creation of GaussianGammaISO node
        """

        # Simple construction
        X_alpha = GaussianGammaISO([1, 2, 3], np.identity(3), 2, 10)
        self.assertEqual(X_alpha.plates, ())
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # Plates
        X_alpha = GaussianGammaISO([1, 2, 3],
                                   np.identity(3),
                                   2,
                                   10,
                                   plates=(4, ))
        self.assertEqual(X_alpha.plates, (4, ))
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # Plates in mu
        X_alpha = GaussianGammaISO(np.ones((4, 3)), np.identity(3), 2, 10)
        self.assertEqual(X_alpha.plates, (4, ))
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # Plates in Lambda
        X_alpha = GaussianGammaISO(np.ones(3),
                                   np.ones((4, 3, 3)) * np.identity(3), 2, 10)
        self.assertEqual(X_alpha.plates, (4, ))
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # Plates in a
        X_alpha = GaussianGammaISO(np.ones(3), np.identity(3), np.ones(4), 10)
        self.assertEqual(X_alpha.plates, (4, ))
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # Plates in Lambda
        X_alpha = GaussianGammaISO(np.ones(3), np.identity(3), 2, np.ones(4))
        self.assertEqual(X_alpha.plates, (4, ))
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # Inconsistent plates
        self.assertRaises(ValueError,
                          GaussianGammaISO,
                          np.ones((4, 3)),
                          np.identity(3),
                          2,
                          10,
                          plates=())

        # Inconsistent plates
        self.assertRaises(ValueError,
                          GaussianGammaISO,
                          np.ones((4, 3)),
                          np.identity(3),
                          2,
                          10,
                          plates=(5, ))

        # Unknown parameters
        mu = Gaussian(np.zeros(3), np.identity(3))
        Lambda = Wishart(10, np.identity(3))
        b = Gamma(1, 1)
        X_alpha = GaussianGammaISO(mu, Lambda, 2, b)
        self.assertEqual(X_alpha.plates, ())
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        # mu is Gaussian-gamma
        mu_tau = GaussianGammaISO(np.ones(3), np.identity(3), 5, 5)
        X_alpha = GaussianGammaISO(mu_tau, np.identity(3), 5, 5)
        self.assertEqual(X_alpha.plates, ())
        self.assertEqual(X_alpha.dims, ((3, ), (3, 3), (), ()))

        pass
Exemplo n.º 19
0
            y = np.mean(xx)
            firstline = False
            continue
        y = np.vstack([y, (k * np.mean(xx) + c + s * np.random.randn(1))])

    y = y.reshape(y.shape[0], )

    X = x2.reshape(x2.shape[0], 1)

    from bayespy.nodes import GaussianARD
    B = GaussianARD(0, 1e-6, shape=(X.shape[1], ))
    from bayespy.nodes import SumMultiply
    F = SumMultiply('i,i', B, X)

    from bayespy.nodes import Gamma
    tau = Gamma(1e-3, 1e-3)
    Y = GaussianARD(F, tau)
    Y.observe(y)
    from bayespy.inference import VB
    Q = VB(Y, B, tau)
    #Q.update(repeat=100990)
    distribution = []
    result = []
    distribution = F.get_moments()
    for min_val, max_val in zip(distribution[0], distribution[1]):
        #mean = []
        mean = (min_val + max_val) / 2
        result.append(mean)
        #result = mean
        #x3 = []
        #x3 = pd.DataFrame({result:buffer_data})
Exemplo n.º 20
0
    def test_gradient(self):
        """Test standard gradient of a Gamma node."""
        D = 3

        np.random.seed(42)

        #
        # Without observations
        #

        # Construct model
        a = np.random.rand(D)
        b = np.random.rand(D)
        tau = Gamma(a, b)
        Q = VB(tau)
        # Random initialization
        tau.initialize_from_parameters(np.random.rand(D),
                                       np.random.rand(D))
        # Initial parameters
        phi0 = tau.phi
        # Gradient
        rg = tau.get_riemannian_gradient()
        g = tau.get_gradient(rg)
        # Numerical gradient
        eps = 1e-8
        p0 = tau.get_parameters()
        l0 = Q.compute_lowerbound(ignore_masked=False)
        g_num = [np.zeros(D), np.zeros(D)]
        for i in range(D):
            e = np.zeros(D)
            e[i] = eps
            p1 = p0[0] + e
            tau.set_parameters([p1, p0[1]])
            l1 = Q.compute_lowerbound(ignore_masked=False)
            g_num[0][i] = (l1 - l0) / eps
        for i in range(D):
            e = np.zeros(D)
            e[i] = eps
            p1 = p0[1] + e
            tau.set_parameters([p0[0], p1])
            l1 = Q.compute_lowerbound(ignore_masked=False)
            g_num[1][i] = (l1 - l0) / eps

        # Check
        self.assertAllClose(g[0],
                            g_num[0])
        self.assertAllClose(g[1],
                            g_num[1])

        #
        # With observations
        #

        # Construct model
        a = np.random.rand(D)
        b = np.random.rand(D)
        tau = Gamma(a, b)
        mu = np.random.randn(D)
        Y = GaussianARD(mu, tau)
        Y.observe(np.random.randn(D))
        Q = VB(Y, tau)
        # Random initialization
        tau.initialize_from_parameters(np.random.rand(D),
                                       np.random.rand(D))
        # Initial parameters
        phi0 = tau.phi
        # Gradient
        rg = tau.get_riemannian_gradient()
        g = tau.get_gradient(rg)
        # Numerical gradient
        eps = 1e-8
        p0 = tau.get_parameters()
        l0 = Q.compute_lowerbound(ignore_masked=False)
        g_num = [np.zeros(D), np.zeros(D)]
        for i in range(D):
            e = np.zeros(D)
            e[i] = eps
            p1 = p0[0] + e
            tau.set_parameters([p1, p0[1]])
            l1 = Q.compute_lowerbound(ignore_masked=False)
            g_num[0][i] = (l1 - l0) / eps
        for i in range(D):
            e = np.zeros(D)
            e[i] = eps
            p1 = p0[1] + e
            tau.set_parameters([p0[0], p1])
            l1 = Q.compute_lowerbound(ignore_masked=False)
            g_num[1][i] = (l1 - l0) / eps

        # Check
        self.assertAllClose(g[0],
                            g_num[0])
        self.assertAllClose(g[1],
                            g_num[1])

        pass
Exemplo n.º 21
0
    def test_message_to_parent_alpha(self):
        """
        Test the message from GaussianARD the 2nd parent (alpha).
        """

        # Check formula with uncertain parent mu
        mu = GaussianARD(1,1)
        tau = Gamma(0.5*1e10, 1e10)
        X = GaussianARD(mu,
                        tau)
        X.observe(3)
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0,
                            -0.5*(3**2 - 2*3*1 + 1**2+1))
        self.assertAllClose(m1,
                            0.5)

        # Check formula with uncertain node
        tau = Gamma(1e10, 1e10)
        X = GaussianARD(2, tau)
        Y = GaussianARD(X, 1)
        Y.observe(5)
        X.update()
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0,
                            -0.5*(1/(1+1)+3.5**2 - 2*3.5*2 + 2**2))
        self.assertAllClose(m1,
                            0.5)

        # Check alpha larger than mu
        alpha = Gamma(np.ones((3,2,3))*1e10, 1e10)
        X = GaussianARD(np.ones((2,3)),
                        alpha,
                        ndim=3)
        X.observe(2*np.ones((3,2,3)))
        (m0, m1) = alpha._message_from_children()
        self.assertAllClose(m0 * np.ones((3,2,3)),
                            -0.5*(2**2 - 2*2*1 + 1**2) * np.ones((3,2,3)))
        self.assertAllClose(m1*np.ones((3,2,3)),
                            0.5*np.ones((3,2,3)))

        # Check mu larger than alpha
        tau = Gamma(np.ones((2,3))*1e10, 1e10)
        X = GaussianARD(np.ones((3,2,3)),
                        tau,
                        ndim=3)
        X.observe(2*np.ones((3,2,3)))
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0,
                            -0.5*(2**2 - 2*2*1 + 1**2) * 3 * np.ones((2,3)))
        self.assertAllClose(m1 * np.ones((2,3)),
                            0.5 * 3 * np.ones((2,3)))

        # Check node larger than mu and alpha
        tau = Gamma(np.ones((3,))*1e10, 1e10)
        X = GaussianARD(np.ones((2,3)),
                        tau,
                        shape=(3,2,3))
        X.observe(2*np.ones((3,2,3)))
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0 * np.ones(3),
                            -0.5*(2**2 - 2*2*1 + 1**2) * 6 * np.ones((3,)))
        self.assertAllClose(m1 * np.ones(3),
                            0.5 * 6 * np.ones(3))

        # Check plates for smaller mu than node
        tau = Gamma(np.ones((4,1,2,3))*1e10, 1e10)
        X = GaussianARD(GaussianARD(1, 1, 
                                    shape=(3,),
                                    plates=(4,1,1)),
                        tau,
                        shape=(2,3),
                        plates=(4,5))
        X.observe(2*np.ones((4,5,2,3)))
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0 * np.ones((4,1,2,3)),
                            (-0.5 * (2**2 - 2*2*1 + 1**2+1)
                             * 5*np.ones((4,1,2,3))))
        self.assertAllClose(m1 * np.ones((4,1,2,3)),
                            5*0.5 * np.ones((4,1,2,3)))

        # Check mask
        tau = Gamma(np.ones((4,3))*1e10, 1e10)
        X = GaussianARD(np.ones(3),
                        tau,
                        shape=(3,),
                        plates=(2,4,))
        X.observe(2*np.ones((2,4,3)), mask=[[True, False, True, False],
                                            [False, True, True, False]])
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0 * np.ones((4,3)),
                            (-0.5 * (2**2 - 2*2*1 + 1**2) 
                             * np.ones((4,3)) 
                             * np.array([[1], [1], [2], [0]])))
        self.assertAllClose(m1 * np.ones((4,3)),
                            0.5 * np.array([[1], [1], [2], [0]]) * np.ones((4,3)))
        
        # Check non-ARD Gaussian child
        mu = np.array([1,2])
        alpha = np.array([3,4])
        Alpha = Gamma(alpha*1e10, 1e10)
        Lambda = np.array([[1, 0.5],
                          [0.5, 1]])
        X = GaussianARD(mu, Alpha, ndim=1)
        Y = Gaussian(X, Lambda)
        y = np.array([5,6])
        Y.observe(y)
        X.update()
        (m0, m1) = Alpha._message_from_children()
        Cov = np.linalg.inv(np.diag(alpha)+Lambda)
        mean = np.dot(Cov, np.dot(np.diag(alpha), mu)
                           + np.dot(Lambda, y))
        self.assertAllClose(m0 * np.ones(2),
                            -0.5 * np.diag(
                                np.outer(mean, mean) + Cov
                                - np.outer(mean, mu)
                                - np.outer(mu, mean)
                                + np.outer(mu, mu)))
        self.assertAllClose(m1 * np.ones(2),
                            0.5 * np.ones(2))
        
        pass
Exemplo n.º 22
0
    def test_message_to_parent(self):
        """
        Test the message to parents of Mixture node.
        """

        K = 3

        # Broadcasting the moments on the cluster axis
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1,
                      plates=(K,))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = X._message_to_parent(0)
        self.assertAllClose(m[0],
                            random.gaussian_logpdf(xx*alpha,
                                                   x*alpha*mu,
                                                   mumu*alpha,
                                                   logalpha,
                                                   0))
                                                   
        m = X._message_to_parent(1)
        self.assertAllClose(m[0],
                            1/K * (alpha*x) * np.ones(3))
        self.assertAllClose(m[1],
                            -0.5 * 1/K * alpha * np.ones(3))

        # Some parameters do not have cluster plate axis
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1) # Note: no cluster plate axis!
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = X._message_to_parent(0)
        self.assertAllClose(m[0],
                            random.gaussian_logpdf(xx*alpha,
                                                   x*alpha*mu,
                                                   mumu*alpha,
                                                   logalpha,
                                                   0))
                                                   
        m = X._message_to_parent(1)
        self.assertAllClose(m[0],
                            1/K * (alpha*x) * np.ones(3))
        self.assertAllClose(m[1],
                            -0.5 * 1/K * alpha * np.ones(3))

        # Cluster assignments do not have as many plate axes as parameters.
        M = 2
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,M))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1,
                      plates=(K,M))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha, cluster_plate=-2)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5 * np.ones(M)
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = X._message_to_parent(0)
        self.assertAllClose(m[0]*np.ones(K),
                            np.sum(random.gaussian_logpdf(xx*alpha,
                                                          x*alpha*mu,
                                                          mumu*alpha,
                                                          logalpha,
                                                          0) *
                                   np.ones((K,M)),
                                   axis=-1))
                                                   
        m = X._message_to_parent(1)
        self.assertAllClose(m[0] * np.ones((K,M)),
                            1/K * (alpha*x) * np.ones((K,M)))
        self.assertAllClose(m[1] * np.ones((K,M)),
                            -0.5 * 1/K * alpha * np.ones((K,M)))
        

        pass
Exemplo n.º 23
0
def model(M=10, N=100, D=3):
    """
    Construct linear state-space model.

    See, for instance, the following publication:
    "Fast variational Bayesian linear state-space model"
    Luttinen (ECML 2013)
    """

    # Dynamics matrix with ARD
    alpha = Gamma(1e-5,
                  1e-5,
                  plates=(D,),
                  name='alpha')
    A = GaussianARD(0,
                    alpha,
                    shape=(D,),
                    plates=(D,),
                    plotter=bpplt.GaussianHintonPlotter(rows=0, 
                                                        cols=1,
                                                        scale=0),
                    name='A')
    A.initialize_from_value(np.identity(D))

    # Latent states with dynamics
    X = GaussianMarkovChain(np.zeros(D),         # mean of x0
                            1e-3*np.identity(D), # prec of x0
                            A,                   # dynamics
                            np.ones(D),          # innovation
                            n=N,                 # time instances
                            plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
                            name='X')
    X.initialize_from_value(np.random.randn(N,D))

    # Mixing matrix from latent space to observation space using ARD
    gamma = Gamma(1e-5,
                  1e-5,
                  plates=(D,),
                  name='gamma')
    gamma.initialize_from_value(1e-2*np.ones(D))
    C = GaussianARD(0,
                    gamma,
                    shape=(D,),
                    plates=(M,1),
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=2,
                                                        scale=0),
                    name='C')
    C.initialize_from_value(np.random.randn(M,1,D))

    # Observation noise
    tau = Gamma(1e-5,
                1e-5,
                name='tau')
    tau.initialize_from_value(1e2)

    # Underlying noiseless function
    F = SumMultiply('i,i', 
                    C, 
                    X,
                    name='F')
    
    # Noisy observations
    Y = GaussianARD(F,
                    tau,
                    name='Y')

    Q = VB(Y, F, C, gamma, X, A, alpha, tau, C)

    return Q
Exemplo n.º 24
0
def lssm(M, N, D, K=1, drift_C=False, drift_A=False):

    if (drift_C or drift_A) and not K > 0:
        raise ValueError("K must be positive integer when using drift")

    # Drift weights
    if drift_A or drift_C:
        # Dynamics matrix with ARD
        # beta : (K) x ()
        beta = Gamma(1e-5,
                     1e-5,
                     plates=(K,),
                     name='beta')
        # B : (K) x (K)
        B = GaussianArrayARD(np.identity(K),
                             beta,
                             shape=(K,),
                             plates=(K,),
                             name='B',
                             plotter=bpplt.GaussianHintonPlotter(rows=0, 
                                                                 cols=1,
                                                                 scale=0),
                             initialize=False)
        B.initialize_from_value(np.identity(K))
        #B.initialize_from_mean_and_covariance(np.identity(K),
        #                                      0.1*np.identity(K))

        # State of the drift, that is, temporal weights for dynamics matrices
        # S : () x (N,K)
        S = GaussianMarkovChain(np.ones(K),
                                1e-6*np.identity(K),
                                B,
                                np.ones(K),
                                n=N,
                                name='S',
                                plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
                                initialize=False)
        #s = np.cumsum(np.random.randn(N,K), axis=0)
        s = np.random.randn(N,K)
        s[:,0] = 10
        S.initialize_from_value(s)
        #S.initialize_from_value(np.ones((N,K))+0.01*np.random.randn(N,K))

    if not drift_A:

        # Dynamic matrix
        # alpha: (D) x ()
        alpha = Gamma(1e-5,
                      1e-5,
                      plates=(D,),
                      name='alpha')
        # A : (D) x (D)
        A = GaussianArrayARD(0,
                             alpha,
                             shape=(D,),
                             plates=(D,),
                             name='A',
                             plotter=bpplt.GaussianHintonPlotter(rows=0, 
                                                                 cols=1,
                                                                 scale=0),
                             initialize=False)
        A.initialize_from_value(np.identity(D))

        # Latent states with dynamics
        # X : () x (N,D)
        X = GaussianMarkovChain(np.zeros(D),         # mean of x0
                                1e-3*np.identity(D), # prec of x0
                                A,                   # dynamics
                                np.ones(D),          # innovation
                                n=N,                 # time instances
                                name='X',
                                plotter=bpplt.GaussianMarkovChainPlotter(),
                                initialize=False)
        X.initialize_from_value(np.random.randn(N,D))

    else:
        
        # Projection matrix of the dynamics matrix
        # alpha : (K) x ()
        alpha = Gamma(1e-5,
                      1e-5,
                      plates=(D,K),
                      name='alpha')
        # A : (D) x (D,K)
        A = GaussianArrayARD(0,
                             alpha,
                             shape=(D,K),
                             plates=(D,),
                             name='A',
                             plotter=bpplt.GaussianHintonPlotter(rows=0, 
                                                                 cols=1,
                                                                 scale=0),
                             initialize=False)

        # Initialize S and A such that A*S is almost an identity matrix
        a = np.zeros((D,D,K))
        a[np.arange(D),np.arange(D),np.zeros(D,dtype=int)] = 1
        a[:,:,0] = np.identity(D) / s[0,0]
        a[:,:,1:] = 0.1/s[0,0]*np.random.randn(D,D,K-1)
        A.initialize_from_value(a)
        #A.initialize_from_mean_and_covariance(a,
        #                                      0.1/s[0,0]**2*utils.identity(D,K))
        #A.initialize_from_value(a + 0.01*np.random.randn(D,D,K))

        # Latent states with dynamics
        # X : () x (N,D)
        X = DriftingGaussianMarkovChain(np.zeros(D),         # mean of x0
                                        1e-3*np.identity(D), # prec of x0
                                        A,                   # dynamics matrices
                                        S.as_gaussian()[1:], # temporal weights
                                        np.ones(D),          # innovation
                                        n=N,                 # time instances
                                        name='X',
                                        plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
                                        initialize=False)
        X.initialize_from_value(np.random.randn(N,D))

    if not drift_C:
        # Mixing matrix from latent space to observation space using ARD
        # gamma : (D) x ()
        gamma = Gamma(1e-5,
                      1e-5,
                      plates=(D,),
                      name='gamma')
        # C : (M,1) x (D)
        C = GaussianArrayARD(0,
                             gamma,
                             shape=(D,),
                             plates=(M,1),
                             name='C',
                             plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                                 cols=2,
                                                                 scale=0))
        C.initialize_from_value(np.random.randn(M,1,D))
        #C.initialize_from_random()
        #C.initialize_from_mean_and_covariance(C.random(),
        #                                      0.1*utils.identity(D))

        # Noiseless process
        # F : (M,N) x ()
        F = SumMultiply('d,d',
                        C,
                        X.as_gaussian(),
                        name='F')
    else:
        # Mixing matrix from latent space to observation space using ARD
        # gamma : (D,K) x ()
        gamma = Gamma(1e-5,
                      1e-5,
                      plates=(D,K),
                      name='gamma')
        # C : (M,1) x (D,K)
        C = GaussianArrayARD(0,
                             gamma,
                             shape=(D,K),
                             plates=(M,1),
                             name='C',
                             plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                                 cols=2,
                                                                 scale=0))
        C.initialize_from_random()
        #C.initialize_from_mean_and_covariance(C.random(),
        #                                      0.1*utils.identity(D, K))

        # Noiseless process
        # F : (M,N) x ()
        F = SumMultiply('dk,d,k',
                        C,
                        X.as_gaussian(),
                        S.as_gaussian(),
                        name='F')
        
                  
    # Observation noise
    # tau : () x ()
    tau = Gamma(1e-5,
                1e-5,
                name='tau')
    tau.initialize_from_value(1e2)

    # Observations
    # Y: (M,N) x ()
    Y = GaussianArrayARD(F,
                         tau,
                         name='Y')

    # Construct inference machine
    if drift_C or drift_A:
        Q = VB(Y, F, C, gamma, X, A, alpha, tau, S, B, beta)
    else:
        Q = VB(Y, F, C, gamma, X, A, alpha, tau)

    return Q
Exemplo n.º 25
0
    def test_riemannian_gradient(self):
        """Test Riemannian gradient of a Gamma node."""

        #
        # Without observations
        #

        # Construct model
        a = np.random.rand()
        b = np.random.rand()
        tau = Gamma(a, b)
        # Random initialization
        tau.initialize_from_parameters(np.random.rand(),
                                       np.random.rand())
        # Initial parameters
        phi0 = tau.phi
        # Gradient
        g = tau.get_riemannian_gradient()
        # Parameters after VB-EM update
        tau.update()
        phi1 = tau.phi
        # Check
        self.assertAllClose(g[0],
                            phi1[0] - phi0[0])
        self.assertAllClose(g[1],
                            phi1[1] - phi0[1])

        #
        # With observations
        #

        # Construct model
        a = np.random.rand()
        b = np.random.rand()
        tau = Gamma(a, b)
        mu = np.random.randn()
        Y = GaussianARD(mu, tau)
        Y.observe(np.random.randn())
        # Random initialization
        tau.initialize_from_parameters(np.random.rand(),
                                       np.random.rand())
        # Initial parameters
        phi0 = tau.phi
        # Gradient
        g = tau.get_riemannian_gradient()
        # Parameters after VB-EM update
        tau.update()
        phi1 = tau.phi
        # Check
        self.assertAllClose(g[0],
                            phi1[0] - phi0[0])
        self.assertAllClose(g[1],
                            phi1[1] - phi0[1])

        pass
Exemplo n.º 26
0
import numpy as np

np.random.seed(1)
data = np.random.normal(5, 10, size=(10, ))
from bayespy.nodes import GaussianARD, Gamma

mu = GaussianARD(0, 1e-6)
tau = Gamma(1e-6, 1e-6)
y = GaussianARD(mu, tau, plates=(10, ))
y.observe(data)
from bayespy.inference import VB

Q = VB(mu, tau, y)
Q.update(repeat=20)
import bayespy.plot as bpplt

bpplt.pyplot.subplot(2, 1, 1)
bpplt.pdf(mu, np.linspace(-10, 20, num=100), color='k', name=r'\mu')
bpplt.pyplot.subplot(2, 1, 2)
bpplt.pdf(tau, np.linspace(1e-6, 0.08, num=100), color='k', name=r'\tau')
bpplt.pyplot.tight_layout()
bpplt.pyplot.show()
Exemplo n.º 27
0
def model(M, N, D, K):
    """
    Construct the linear state-space model with time-varying dynamics

    For reference, see the following publication:
    (TODO)
    """

    #
    # The model block for the latent mixing weight process
    #
    
    # Dynamics matrix with ARD
    # beta : (K) x ()
    beta = Gamma(1e-5,
                 1e-5,
                 plates=(K,),
                 name='beta')
    # B : (K) x (K)
    B = GaussianARD(np.identity(K),
                    beta,
                    shape=(K,),
                    plates=(K,),
                    name='B',
                    plotter=bpplt.GaussianHintonPlotter(rows=0, 
                                                        cols=1,
                                                        scale=0),
                    initialize=False)
    B.initialize_from_value(np.identity(K))

    # Mixing weight process, that is, the weights in the linear combination of
    # state dynamics matrices
    # S : () x (N,K)
    S = GaussianMarkovChain(np.ones(K),
                            1e-6*np.identity(K),
                            B,
                            np.ones(K),
                            n=N,
                            name='S',
                            plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
                            initialize=False)
    s = 10*np.random.randn(N,K)
    s[:,0] = 10
    S.initialize_from_value(s)

    #
    # The model block for the latent states
    #
        
    # Projection matrix of the dynamics matrix
    # alpha : (K) x ()
    alpha = Gamma(1e-5,
                  1e-5,
                  plates=(D,K),
                  name='alpha')
    alpha.initialize_from_value(1*np.ones((D,K)))
    # A : (D) x (D,K)
    A = GaussianARD(0,
                    alpha,
                    shape=(D,K),
                    plates=(D,),
                    name='A',
                    plotter=bpplt.GaussianHintonPlotter(rows=0, 
                                                        cols=1,
                                                        scale=0),
                    initialize=False)

    # Initialize S and A such that A*S is almost an identity matrix
    a = np.zeros((D,D,K))
    a[np.arange(D),np.arange(D),np.zeros(D,dtype=int)] = 1
    a[:,:,0] = np.identity(D) / s[0,0]
    a[:,:,1:] = 0.1/s[0,0]*np.random.randn(D,D,K-1)
    A.initialize_from_value(a)

    # Latent states with dynamics
    # X : () x (N,D)
    X = VaryingGaussianMarkovChain(np.zeros(D),         # mean of x0
                                   1e-3*np.identity(D), # prec of x0
                                   A,                   # dynamics matrices
                                   S._convert(GaussianMoments)[1:], # temporal weights
                                   np.ones(D),          # innovation
                                   n=N,                 # time instances
                                   name='X',
                                   plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
                                   initialize=False)
    X.initialize_from_value(np.random.randn(N,D))

    #
    # The model block for observations
    #

    # Mixing matrix from latent space to observation space using ARD
    # gamma : (D) x ()
    gamma = Gamma(1e-5,
                  1e-5,
                  plates=(D,),
                  name='gamma')
    gamma.initialize_from_value(1e-2*np.ones(D))
    # C : (M,1) x (D)
    C = GaussianARD(0,
                    gamma,
                    shape=(D,),
                    plates=(M,1),
                    name='C',
                    plotter=bpplt.GaussianHintonPlotter(rows=0,
                                                        cols=2,
                                                        scale=0))
    C.initialize_from_value(np.random.randn(M,1,D))

    # Noiseless process
    # F : (M,N) x ()
    F = SumMultiply('d,d',
                    C,
                    X,
                    name='F')
                  
    # Observation noise
    # tau : () x ()
    tau = Gamma(1e-5,
                1e-5,
                name='tau')
    tau.initialize_from_value(1e2)

    # Observations
    # Y: (M,N) x ()
    Y = GaussianARD(F,
                    tau,
                    name='Y')

    # Construct inference machine
    Q = VB(Y, F, C, gamma, X, A, alpha, tau, S, B, beta)

    return Q
Exemplo n.º 28
0
    def test_message_to_parent(self):
        """
        Test the message to parents of Mixture node.
        """

        K = 3

        # Broadcasting the moments on the cluster axis
        Mu = GaussianARD(2, 1, ndim=0, plates=(K, ))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1, plates=(K, ))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K) / K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(
            m[0] * np.ones(K),
            random.gaussian_logpdf(xx * alpha, x * alpha * mu, mumu * alpha,
                                   logalpha, 0) * np.ones(K))
        m = Mu._message_from_children()
        self.assertAllClose(m[0], 1 / K * (alpha * x) * np.ones(3))
        self.assertAllClose(m[1], -0.5 * 1 / K * alpha * np.ones(3))

        # Some parameters do not have cluster plate axis
        Mu = GaussianARD(2, 1, ndim=0, plates=(K, ))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1)  # Note: no cluster plate axis!
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K) / K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(
            m[0] * np.ones(K),
            random.gaussian_logpdf(xx * alpha, x * alpha * mu, mumu * alpha,
                                   logalpha, 0) * np.ones(K))

        m = Mu._message_from_children()
        self.assertAllClose(m[0], 1 / K * (alpha * x) * np.ones(3))
        self.assertAllClose(m[1], -0.5 * 1 / K * alpha * np.ones(3))

        # Cluster assignments do not have as many plate axes as parameters.
        M = 2
        Mu = GaussianARD(2, 1, ndim=0, plates=(K, M))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1, plates=(K, M))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K) / K)
        X = Mixture(z, GaussianARD, Mu, Alpha, cluster_plate=-2)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5 * np.ones(M)
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(
            m[0] * np.ones(K),
            np.sum(random.gaussian_logpdf(xx * alpha, x * alpha * mu,
                                          mumu * alpha, logalpha, 0) * np.ones(
                                              (K, M)),
                   axis=-1))

        m = Mu._message_from_children()
        self.assertAllClose(m[0] * np.ones((K, M)),
                            1 / K * (alpha * x) * np.ones((K, M)))
        self.assertAllClose(m[1] * np.ones((K, M)),
                            -0.5 * 1 / K * alpha * np.ones((K, M)))

        # Mixed distribution broadcasts g
        # This tests for a found bug. The bug caused an error.
        Z = Categorical([0.3, 0.5, 0.2])
        X = Mixture(Z, Categorical, [[0.2, 0.8], [0.1, 0.9], [0.3, 0.7]])
        m = Z._message_from_children()

        pass
Exemplo n.º 29
0
    def test_message_to_child(self):
        """
        Test the message to child of GaussianGamma node.
        """

        # Simple test
        mu = np.array([1,2,3])
        Lambda = np.identity(3)
        a = 2
        b = 10
        X_alpha = GaussianGamma(mu, Lambda, a, b)
        u = X_alpha._message_to_child()
        self.assertEqual(len(u), 4)
        tau = np.array(a/b)
        self.assertAllClose(u[0],
                            tau[...,None] * mu)
        self.assertAllClose(u[1],
                            (linalg.inv(Lambda) 
                             + tau[...,None,None] * linalg.outer(mu, mu)))
        self.assertAllClose(u[2],
                            tau)
        self.assertAllClose(u[3],
                            -np.log(b) + special.psi(a))

        # Test with unknown parents
        mu = Gaussian(np.arange(3), 10*np.identity(3))
        Lambda = Wishart(10, np.identity(3))
        a = 2
        b = Gamma(3, 15)
        X_alpha = GaussianGamma(mu, Lambda, a, b)
        u = X_alpha._message_to_child()
        (mu, mumu) = mu._message_to_child()
        Cov_mu = mumu - linalg.outer(mu, mu)
        (Lambda, _) = Lambda._message_to_child()
        (b, _) = b._message_to_child()
        (tau, logtau) = Gamma(a, b + 0.5*np.sum(Lambda*Cov_mu))._message_to_child()
        self.assertAllClose(u[0],
                            tau[...,None] * mu)
        self.assertAllClose(u[1],
                            (linalg.inv(Lambda)
                             + tau[...,None,None] * linalg.outer(mu, mu)))
        self.assertAllClose(u[2],
                            tau)
        self.assertAllClose(u[3],
                            logtau)

        # Test with plates
        mu = Gaussian(np.reshape(np.arange(3*4), (4,3)),
                      10*np.identity(3),
                      plates=(4,))
        Lambda = Wishart(10, np.identity(3))
        a = 2
        b = Gamma(3, 15)
        X_alpha = GaussianGamma(mu, Lambda, a, b, plates=(4,))
        u = X_alpha._message_to_child()
        (mu, mumu) = mu._message_to_child()
        Cov_mu = mumu - linalg.outer(mu, mu)
        (Lambda, _) = Lambda._message_to_child()
        (b, _) = b._message_to_child()
        (tau, logtau) = Gamma(a, 
                              b + 0.5*np.sum(Lambda*Cov_mu, 
                                             axis=(-1,-2)))._message_to_child()
        self.assertAllClose(u[0] * np.ones((4,1)),
                            np.ones((4,1)) * tau[...,None] * mu)
        self.assertAllClose(u[1] * np.ones((4,1,1)),
                            np.ones((4,1,1)) * (linalg.inv(Lambda)
                                                + tau[...,None,None] * linalg.outer(mu, mu)))
        self.assertAllClose(u[2] * np.ones(4),
                            np.ones(4) * tau)
        self.assertAllClose(u[3] * np.ones(4),
                            np.ones(4) * logtau)
        
        pass
Exemplo n.º 30
0
def model(M=20, N=100, D=10, K=3):
    """
    Construct the linear state-space model with switching dynamics.
    """

    #
    # Switching dynamics (HMM)
    #

    # Prior for initial state probabilities
    rho = Dirichlet(1e-3 * np.ones(K), name='rho')

    # Prior for state transition probabilities
    V = Dirichlet(1e-3 * np.ones(K), plates=(K, ), name='V')
    v = 10 * np.identity(K) + 1 * np.ones((K, K))
    v /= np.sum(v, axis=-1, keepdims=True)
    V.initialize_from_value(v)

    # Hidden states (with unknown initial state probabilities and state
    # transition probabilities)
    Z = CategoricalMarkovChain(rho,
                               V,
                               states=N - 1,
                               name='Z',
                               plotter=bpplt.CategoricalMarkovChainPlotter(),
                               initialize=False)
    Z.u[0] = np.random.dirichlet(np.ones(K))
    Z.u[1] = np.reshape(
        np.random.dirichlet(0.5 * np.ones(K * K), size=(N - 2)), (N - 2, K, K))

    #
    # Linear state-space models
    #

    # Dynamics matrix with ARD
    # (K,D) x ()
    alpha = Gamma(1e-5, 1e-5, plates=(K, 1, D), name='alpha')
    # (K,1,1,D) x (D)
    A = GaussianARD(0,
                    alpha,
                    shape=(D, ),
                    plates=(K, D),
                    name='A',
                    plotter=bpplt.GaussianHintonPlotter())
    A.initialize_from_value(
        np.identity(D) * np.ones((K, D, D)) + 0.1 * np.random.randn(K, D, D))

    # Latent states with dynamics
    # (K,1) x (N,D)
    X = SwitchingGaussianMarkovChain(
        np.zeros(D),  # mean of x0
        1e-3 * np.identity(D),  # prec of x0
        A,  # dynamics
        Z,  # dynamics selection
        np.ones(D),  # innovation
        n=N,  # time instances
        name='X',
        plotter=bpplt.GaussianMarkovChainPlotter())
    X.initialize_from_value(10 * np.random.randn(N, D))

    # Mixing matrix from latent space to observation space using ARD
    # (K,1,1,D) x ()
    gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma')
    # (K,M,1) x (D)
    C = GaussianARD(0,
                    gamma,
                    shape=(D, ),
                    plates=(M, 1),
                    name='C',
                    plotter=bpplt.GaussianHintonPlotter(rows=-3, cols=-1))
    C.initialize_from_value(np.random.randn(M, 1, D))

    # Underlying noiseless function
    # (K,M,N) x ()
    F = SumMultiply('i,i', C, X, name='F')

    #
    # Mixing the models
    #

    # Observation noise
    tau = Gamma(1e-5, 1e-5, name='tau')
    tau.initialize_from_value(1e2)

    # Emission/observation distribution
    Y = GaussianARD(F, tau, name='Y')

    Q = VB(Y, F, Z, rho, V, C, gamma, X, A, alpha, tau)

    return Q
Exemplo n.º 31
0
    def test_message_to_parent_mu(self):
        """
        Test that GaussianARD computes the message to the 1st parent correctly.
        """

        # Check formula with uncertain parent alpha
        mu = GaussianARD(0, 1)
        alpha = Gamma(2, 1)
        X = GaussianARD(mu, alpha)
        X.observe(3)
        (m0, m1) = mu._message_from_children()
        #(m0, m1) = X._message_to_parent(0)
        self.assertAllClose(m0, 2 * 3)
        self.assertAllClose(m1, -0.5 * 2)

        # Check formula with uncertain node
        mu = GaussianARD(1, 1e10)
        X = GaussianARD(mu, 2)
        Y = GaussianARD(X, 1)
        Y.observe(5)
        X.update()
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0, 2 * 1 / (2 + 1) * (2 * 1 + 1 * 5))
        self.assertAllClose(m1, -0.5 * 2)

        # Check alpha larger than mu
        mu = GaussianARD(np.zeros((2, 3)), 1e10, shape=(2, 3))
        X = GaussianARD(mu, 2 * np.ones((3, 2, 3)))
        X.observe(3 * np.ones((3, 2, 3)))
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0, 2 * 3 * 3 * np.ones((2, 3)))
        self.assertAllClose(m1, -0.5 * 3 * 2 * misc.identity(2, 3))

        # Check mu larger than alpha
        mu = GaussianARD(np.zeros((3, 2, 3)), 1e10, shape=(3, 2, 3))
        X = GaussianARD(mu, 2 * np.ones((2, 3)))
        X.observe(3 * np.ones((3, 2, 3)))
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0, 2 * 3 * np.ones((3, 2, 3)))
        self.assertAllClose(m1, -0.5 * 2 * misc.identity(3, 2, 3))

        # Check node larger than mu and alpha
        mu = GaussianARD(np.zeros((2, 3)), 1e10, shape=(2, 3))
        X = GaussianARD(mu, 2 * np.ones((3, )), shape=(3, 2, 3))
        X.observe(3 * np.ones((3, 2, 3)))
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0, 2 * 3 * 3 * np.ones((2, 3)))
        self.assertAllClose(m1, -0.5 * 2 * 3 * misc.identity(2, 3))

        # Check broadcasting of dimensions
        mu = GaussianARD(np.zeros((2, 1)), 1e10, shape=(2, 1))
        X = GaussianARD(mu, 2 * np.ones((2, 3)), shape=(2, 3))
        X.observe(3 * np.ones((2, 3)))
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0, 2 * 3 * 3 * np.ones((2, 1)))
        self.assertAllClose(m1, -0.5 * 2 * 3 * misc.identity(2, 1))

        # Check plates for smaller mu than node
        mu = GaussianARD(0, 1, shape=(3, ), plates=(4, 1, 1))
        X = GaussianARD(mu, 2 * np.ones((3, )), shape=(2, 3), plates=(4, 5))
        X.observe(3 * np.ones((4, 5, 2, 3)))
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0 * np.ones((4, 1, 1, 3)),
                            2 * 3 * 5 * 2 * np.ones((4, 1, 1, 3)))
        self.assertAllClose(
            m1 * np.ones((4, 1, 1, 3, 3)),
            -0.5 * 2 * 5 * 2 * misc.identity(3) * np.ones((4, 1, 1, 3, 3)))

        # Check mask
        mu = GaussianARD(np.zeros((2, 1, 3)), 1e10, shape=(3, ))
        X = GaussianARD(mu,
                        2 * np.ones((2, 4, 3)),
                        shape=(3, ),
                        plates=(
                            2,
                            4,
                        ))
        X.observe(3 * np.ones((2, 4, 3)),
                  mask=[[True, True, True, False], [False, True, False, True]])
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0, (2 * 3 * np.ones(
            (2, 1, 3)) * np.array([[[3]], [[2]]])))
        self.assertAllClose(m1, (-0.5 * 2 * misc.identity(3) * np.ones(
            (2, 1, 1, 1)) * np.array([[[[3]]], [[[2]]]])))

        # Check mask with different shapes
        mu = GaussianARD(np.zeros((2, 1, 3)), 1e10, shape=())
        X = GaussianARD(mu,
                        2 * np.ones((2, 4, 3)),
                        shape=(3, ),
                        plates=(
                            2,
                            4,
                        ))
        mask = np.array([[True, True, True, False], [False, True, False,
                                                     True]])
        X.observe(3 * np.ones((2, 4, 3)), mask=mask)
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(
            m0, 2 * 3 * np.sum(
                np.ones((2, 4, 3)) * mask[..., None], axis=-2, keepdims=True))
        self.assertAllClose(m1, (-0.5 * 2 * np.sum(
            np.ones((2, 4, 3)) * mask[..., None], axis=-2, keepdims=True)))

        # Check non-ARD Gaussian child
        mu = np.array([1, 2])
        Mu = GaussianARD(mu, 1e10, shape=(2, ))
        alpha = np.array([3, 4])
        Lambda = np.array([[1, 0.5], [0.5, 1]])
        X = GaussianARD(Mu, alpha)
        Y = Gaussian(X, Lambda)
        y = np.array([5, 6])
        Y.observe(y)
        X.update()
        (m0, m1) = Mu._message_from_children()
        mean = np.dot(np.linalg.inv(np.diag(alpha) + Lambda),
                      np.dot(np.diag(alpha), mu) + np.dot(Lambda, y))
        self.assertAllClose(m0, np.dot(np.diag(alpha), mean))
        self.assertAllClose(m1, -0.5 * np.diag(alpha))

        # Check broadcasted variable axes
        mu = GaussianARD(np.zeros(1), 1e10, shape=(1, ))
        X = GaussianARD(mu, 2, shape=(3, ))
        X.observe(3 * np.ones(3))
        (m0, m1) = mu._message_from_children()
        self.assertAllClose(m0,
                            2 * 3 * np.sum(np.ones(3), axis=-1, keepdims=True))
        self.assertAllClose(
            m1,
            -0.5 * 2 * np.sum(np.identity(3), axis=(-1, -2), keepdims=True))

        pass
Exemplo n.º 32
0
import numpy as np
np.random.seed(1)
from bayespy.nodes import GaussianARD, GaussianMarkovChain, Gamma, Dot
M = 30
N = 400
D = 10
alpha = Gamma(1e-5, 1e-5, plates=(D, ), name='alpha')
A = GaussianARD(0, alpha, shape=(D, ), plates=(D, ), name='A')
X = GaussianMarkovChain(np.zeros(D),
                        1e-3 * np.identity(D),
                        A,
                        np.ones(D),
                        n=N,
                        name='X')
gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma')
C = GaussianARD(0, gamma, shape=(D, ), plates=(M, 1), name='C')
F = Dot(C, X, name='F')
C.initialize_from_random()
tau = Gamma(1e-5, 1e-5, name='tau')
Y = GaussianARD(F, tau, name='Y')
from bayespy.inference import VB
Q = VB(X, C, gamma, A, alpha, tau, Y)
w = 0.3
a = np.array([[np.cos(w), -np.sin(w), 0, 0], [np.sin(w),
                                              np.cos(w), 0, 0], [0, 0, 1, 0],
              [0, 0, 0, 0]])
c = np.random.randn(M, 4)
x = np.empty((N, 4))
f = np.empty((M, N))
y = np.empty((M, N))
x[0] = 10 * np.random.randn(4)
Exemplo n.º 33
0
    def test_message_to_parent_alpha(self):
        """
        Test the message from GaussianARD the 2nd parent (alpha).
        """

        # Check formula with uncertain parent mu
        mu = GaussianARD(1, 1)
        tau = Gamma(0.5 * 1e10, 1e10)
        X = GaussianARD(mu, tau)
        X.observe(3)
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0, -0.5 * (3**2 - 2 * 3 * 1 + 1**2 + 1))
        self.assertAllClose(m1, 0.5)

        # Check formula with uncertain node
        tau = Gamma(1e10, 1e10)
        X = GaussianARD(2, tau)
        Y = GaussianARD(X, 1)
        Y.observe(5)
        X.update()
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0,
                            -0.5 * (1 / (1 + 1) + 3.5**2 - 2 * 3.5 * 2 + 2**2))
        self.assertAllClose(m1, 0.5)

        # Check alpha larger than mu
        alpha = Gamma(np.ones((3, 2, 3)) * 1e10, 1e10)
        X = GaussianARD(np.ones((2, 3)), alpha, ndim=3)
        X.observe(2 * np.ones((3, 2, 3)))
        (m0, m1) = alpha._message_from_children()
        self.assertAllClose(
            m0 * np.ones((3, 2, 3)),
            -0.5 * (2**2 - 2 * 2 * 1 + 1**2) * np.ones((3, 2, 3)))
        self.assertAllClose(m1 * np.ones((3, 2, 3)), 0.5 * np.ones((3, 2, 3)))

        # Check mu larger than alpha
        tau = Gamma(np.ones((2, 3)) * 1e10, 1e10)
        X = GaussianARD(np.ones((3, 2, 3)), tau, ndim=3)
        X.observe(2 * np.ones((3, 2, 3)))
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(
            m0, -0.5 * (2**2 - 2 * 2 * 1 + 1**2) * 3 * np.ones((2, 3)))
        self.assertAllClose(m1 * np.ones((2, 3)), 0.5 * 3 * np.ones((2, 3)))

        # Check node larger than mu and alpha
        tau = Gamma(np.ones((3, )) * 1e10, 1e10)
        X = GaussianARD(np.ones((2, 3)), tau, shape=(3, 2, 3))
        X.observe(2 * np.ones((3, 2, 3)))
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(
            m0 * np.ones(3), -0.5 * (2**2 - 2 * 2 * 1 + 1**2) * 6 * np.ones(
                (3, )))
        self.assertAllClose(m1 * np.ones(3), 0.5 * 6 * np.ones(3))

        # Check plates for smaller mu than node
        tau = Gamma(np.ones((4, 1, 2, 3)) * 1e10, 1e10)
        X = GaussianARD(GaussianARD(1, 1, shape=(3, ), plates=(4, 1, 1)),
                        tau,
                        shape=(2, 3),
                        plates=(4, 5))
        X.observe(2 * np.ones((4, 5, 2, 3)))
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0 * np.ones(
            (4, 1, 2, 3)), (-0.5 * (2**2 - 2 * 2 * 1 + 1**2 + 1) * 5 * np.ones(
                (4, 1, 2, 3))))
        self.assertAllClose(m1 * np.ones((4, 1, 2, 3)), 5 * 0.5 * np.ones(
            (4, 1, 2, 3)))

        # Check mask
        tau = Gamma(np.ones((4, 3)) * 1e10, 1e10)
        X = GaussianARD(np.ones(3), tau, shape=(3, ), plates=(
            2,
            4,
        ))
        X.observe(2 * np.ones((2, 4, 3)),
                  mask=[[True, False, True, False], [False, True, True,
                                                     False]])
        (m0, m1) = tau._message_from_children()
        self.assertAllClose(m0 * np.ones((4, 3)),
                            (-0.5 * (2**2 - 2 * 2 * 1 + 1**2) * np.ones(
                                (4, 3)) * np.array([[1], [1], [2], [0]])))
        self.assertAllClose(
            m1 * np.ones((4, 3)),
            0.5 * np.array([[1], [1], [2], [0]]) * np.ones((4, 3)))

        # Check non-ARD Gaussian child
        mu = np.array([1, 2])
        alpha = np.array([3, 4])
        Alpha = Gamma(alpha * 1e10, 1e10)
        Lambda = np.array([[1, 0.5], [0.5, 1]])
        X = GaussianARD(mu, Alpha, ndim=1)
        Y = Gaussian(X, Lambda)
        y = np.array([5, 6])
        Y.observe(y)
        X.update()
        (m0, m1) = Alpha._message_from_children()
        Cov = np.linalg.inv(np.diag(alpha) + Lambda)
        mean = np.dot(Cov, np.dot(np.diag(alpha), mu) + np.dot(Lambda, y))
        self.assertAllClose(
            m0 * np.ones(2), -0.5 * np.diag(
                np.outer(mean, mean) + Cov - np.outer(mean, mu) -
                np.outer(mu, mean) + np.outer(mu, mu)))
        self.assertAllClose(m1 * np.ones(2), 0.5 * np.ones(2))

        pass
Exemplo n.º 34
0
# BayesPyによるベイジアンネットワーク
# Example model: Principal component analysis(主成分分析)

import numpy as np
from bayespy.nodes import Gaussian, GaussianARD, Wishart, Gamma
from bayespy.nodes import Dot
import bayespy.plot as bpplt

# -----Creating nodes-----
# 潜在空間(latent space)の次元数
D = 3
# 各ノードを定義
X = GaussianARD(0, 1, shape=(D, ), plates=(1, 100), name='X')
alpha = Gamma(1e-3, 1e-3, plates=(D, ), name='alpha')
C = GaussianARD(0, alpha, shape=(D, ), plates=(10, 1), name='C')
F = Dot(C, X)  #内積
tau = Gamma(1e-3, 1e-3, name='tau')
Y = GaussianARD(F, tau, name='Y')

# -----Performing inference------
# 1: Observe some nodes
c = np.random.randn(10, 2)
x = np.random.randn(2, 100)
data = np.dot(c, x) + 0.1 * np.random.randn(10, 100)
# data:10×100

Y.observe(data)
#( Missing values)
Y.observe(data,
          mask=[[True], [False], [False], [True], [True], [False], [True],
                [True], [True], [False]])
Exemplo n.º 35
0
def model(M=20, N=100, D=10, K=3):
    """
    Construct the linear state-space model with switching dynamics.
    """

    #
    # Switching dynamics (HMM)
    #

    # Prior for initial state probabilities
    rho = Dirichlet(1e-3*np.ones(K),
                    name='rho')

    # Prior for state transition probabilities
    V = Dirichlet(1e-3*np.ones(K),
                  plates=(K,),
                  name='V')
    v = 10*np.identity(K) + 1*np.ones((K,K))
    v /= np.sum(v, axis=-1, keepdims=True)
    V.initialize_from_value(v)

    # Hidden states (with unknown initial state probabilities and state
    # transition probabilities)
    Z = CategoricalMarkovChain(rho, V,
                               states=N-1,
                               name='Z',
                               plotter=bpplt.CategoricalMarkovChainPlotter(),
                               initialize=False)
    Z.u[0] = np.random.dirichlet(np.ones(K))
    Z.u[1] = np.reshape(np.random.dirichlet(0.5*np.ones(K*K), size=(N-2)),
                        (N-2, K, K))

    #
    # Linear state-space models
    #

    # Dynamics matrix with ARD
    # (K,D) x ()
    alpha = Gamma(1e-5,
                  1e-5,
                  plates=(K,1,D),
                  name='alpha')
    # (K,1,1,D) x (D)
    A = GaussianARD(0,
                    alpha,
                    shape=(D,),
                    plates=(K,D),
                    name='A',
                    plotter=bpplt.GaussianHintonPlotter())
    A.initialize_from_value(np.identity(D)*np.ones((K,D,D))
                            + 0.1*np.random.randn(K,D,D))

    # Latent states with dynamics
    # (K,1) x (N,D)
    X = SwitchingGaussianMarkovChain(np.zeros(D),         # mean of x0
                                     1e-3*np.identity(D), # prec of x0
                                     A,                   # dynamics
                                     Z,                   # dynamics selection
                                     np.ones(D),          # innovation
                                     n=N,                 # time instances
                                     name='X',
                                     plotter=bpplt.GaussianMarkovChainPlotter())
    X.initialize_from_value(10*np.random.randn(N,D))

    # Mixing matrix from latent space to observation space using ARD
    # (K,1,1,D) x ()
    gamma = Gamma(1e-5,
                  1e-5,
                  plates=(D,),
                  name='gamma')
    # (K,M,1) x (D)
    C = GaussianARD(0,
                    gamma,
                    shape=(D,),
                    plates=(M,1),
                    name='C',
                    plotter=bpplt.GaussianHintonPlotter(rows=-3,cols=-1))
    C.initialize_from_value(np.random.randn(M,1,D))

    # Underlying noiseless function
    # (K,M,N) x ()
    F = SumMultiply('i,i', 
                    C, 
                    X,
                    name='F')
    
    #
    # Mixing the models
    #

    # Observation noise
    tau = Gamma(1e-5,
                1e-5,
                name='tau')
    tau.initialize_from_value(1e2)

    # Emission/observation distribution
    Y = GaussianARD(F, tau,
                    name='Y')

    Q = VB(Y, F,
           Z, rho, V,
           C, gamma, X, A, alpha,
           tau)

    return Q
Exemplo n.º 36
0
import numpy
numpy.random.seed(1)
M = 20
N = 100
import numpy as np
x = np.random.randn(N, 2)
w = np.random.randn(M, 2)
f = np.einsum('ik,jk->ij', w, x)
y = f + 0.1 * np.random.randn(M, N)
D = 10
from bayespy.nodes import GaussianARD, Gamma, SumMultiply
X = GaussianARD(0, 1, plates=(1, N), shape=(D, ))
alpha = Gamma(1e-5, 1e-5, plates=(D, ))
C = GaussianARD(0, alpha, plates=(M, 1), shape=(D, ))
F = SumMultiply('d,d->', X, C)
tau = Gamma(1e-5, 1e-5)
Y = GaussianARD(F, tau)
Y.observe(y)
from bayespy.inference import VB
Q = VB(Y, X, C, alpha, tau)
C.initialize_from_random()
from bayespy.inference.vmp.transformations import RotateGaussianARD
rot_X = RotateGaussianARD(X)
rot_C = RotateGaussianARD(C, alpha)
from bayespy.inference.vmp.transformations import RotationOptimizer
R = RotationOptimizer(rot_X, rot_C, D)
Q.set_callback(R.rotate)
Q.update(repeat=1000)
import bayespy.plot as bpplt
bpplt.plot(F)
bpplt.plot(f, color='r', marker='x', linestyle='None')
Exemplo n.º 37
0
    def test_message_to_parent(self):
        """
        Test the message to parents of Mixture node.
        """

        K = 3

        # Broadcasting the moments on the cluster axis
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1,
                      plates=(K,))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(m[0] * np.ones(K),
                            random.gaussian_logpdf(xx*alpha,
                                                   x*alpha*mu,
                                                   mumu*alpha,
                                                   logalpha,
                                                   0)
                            * np.ones(K))
        m = Mu._message_from_children()
        self.assertAllClose(m[0],
                            1/K * (alpha*x) * np.ones(3))
        self.assertAllClose(m[1],
                            -0.5 * 1/K * alpha * np.ones(3))

        # Some parameters do not have cluster plate axis
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1) # Note: no cluster plate axis!
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(m[0] * np.ones(K),
                            random.gaussian_logpdf(xx*alpha,
                                                   x*alpha*mu,
                                                   mumu*alpha,
                                                   logalpha,
                                                   0)
                            * np.ones(K))
                                                   
        m = Mu._message_from_children()
        self.assertAllClose(m[0],
                            1/K * (alpha*x) * np.ones(3))
        self.assertAllClose(m[1],
                            -0.5 * 1/K * alpha * np.ones(3))

        # Cluster assignments do not have as many plate axes as parameters.
        M = 2
        Mu = GaussianARD(2, 1,
                         ndim=0,
                         plates=(K,M))
        (mu, mumu) = Mu._message_to_child()
        Alpha = Gamma(3, 1,
                      plates=(K,M))
        (alpha, logalpha) = Alpha._message_to_child()
        z = Categorical(np.ones(K)/K)
        X = Mixture(z, GaussianARD, Mu, Alpha, cluster_plate=-2)
        tau = 4
        Y = GaussianARD(X, tau)
        y = 5 * np.ones(M)
        Y.observe(y)
        (x, xx) = X._message_to_child()
        m = z._message_from_children()
        self.assertAllClose(m[0]*np.ones(K),
                            np.sum(random.gaussian_logpdf(xx*alpha,
                                                          x*alpha*mu,
                                                          mumu*alpha,
                                                          logalpha,
                                                          0) *
                                   np.ones((K,M)),
                                   axis=-1))
                                                   
        m = Mu._message_from_children()
        self.assertAllClose(m[0] * np.ones((K,M)),
                            1/K * (alpha*x) * np.ones((K,M)))
        self.assertAllClose(m[1] * np.ones((K,M)),
                            -0.5 * 1/K * alpha * np.ones((K,M)))
        

        # Mixed distribution broadcasts g
        # This tests for a found bug. The bug caused an error.
        Z = Categorical([0.3, 0.5, 0.2])
        X = Mixture(Z, Categorical, [[0.2,0.8], [0.1,0.9], [0.3,0.7]])
        m = Z._message_from_children()

        pass
Exemplo n.º 38
0
    def test_init(self):
        """
        Test the creation of Concatenate node
        """

        # One parent only
        X = GaussianARD(0, 1, plates=(3, ), shape=())
        Y = Concatenate(X)
        self.assertEqual(Y.plates, (3, ))
        self.assertEqual(Y.dims, ((), ()))

        X = GaussianARD(0, 1, plates=(3, ), shape=(2, 4))
        Y = Concatenate(X)
        self.assertEqual(Y.plates, (3, ))
        self.assertEqual(Y.dims, ((2, 4), (2, 4, 2, 4)))

        # Two parents
        X1 = GaussianARD(0, 1, plates=(2, ), shape=())
        X2 = GaussianARD(0, 1, plates=(3, ), shape=())
        Y = Concatenate(X1, X2)
        self.assertEqual(Y.plates, (5, ))
        self.assertEqual(Y.dims, ((), ()))

        # Two parents with shapes
        X1 = GaussianARD(0, 1, plates=(2, ), shape=(4, 6))
        X2 = GaussianARD(0, 1, plates=(3, ), shape=(4, 6))
        Y = Concatenate(X1, X2)
        self.assertEqual(Y.plates, (5, ))
        self.assertEqual(Y.dims, ((4, 6), (4, 6, 4, 6)))

        # Two parents with non-default axis
        X1 = GaussianARD(0, 1, plates=(2, 4), shape=())
        X2 = GaussianARD(0, 1, plates=(3, 4), shape=())
        Y = Concatenate(X1, X2, axis=-2)
        self.assertEqual(Y.plates, (5, 4))
        self.assertEqual(Y.dims, ((), ()))

        # Three parents
        X1 = GaussianARD(0, 1, plates=(2, ), shape=())
        X2 = GaussianARD(0, 1, plates=(3, ), shape=())
        X3 = GaussianARD(0, 1, plates=(4, ), shape=())
        Y = Concatenate(X1, X2, X3)
        self.assertEqual(Y.plates, (9, ))
        self.assertEqual(Y.dims, ((), ()))

        # Constant parent
        X1 = [7.2, 3.5]
        X2 = GaussianARD(0, 1, plates=(3, ), shape=())
        Y = Concatenate(X1, X2)
        self.assertEqual(Y.plates, (5, ))
        self.assertEqual(Y.dims, ((), ()))

        # Different moments
        X1 = GaussianARD(0, 1, plates=(3, ))
        X2 = Gamma(1, 1, plates=(4, ))
        self.assertRaises(ValueError, Concatenate, X1, X2)

        # Incompatible shapes
        X1 = GaussianARD(0, 1, plates=(3, ), shape=(2, ))
        X2 = GaussianARD(0, 1, plates=(2, ), shape=())
        self.assertRaises(ValueError, Concatenate, X1, X2)

        # Incompatible plates
        X1 = GaussianARD(0, 1, plates=(4, 3), shape=())
        X2 = GaussianARD(0, 1, plates=(
            5,
            2,
        ), shape=())
        self.assertRaises(ValueError, Concatenate, X1, X2)

        pass
Exemplo n.º 39
0
    def test_messages(self):

        D = 2
        M = 3

        np.random.seed(42)

        def check(mu, Lambda, alpha, beta, ndim):

            X = GaussianGamma(
                mu,
                (
                    Lambda if isinstance(Lambda._moments, WishartMoments) else
                    Lambda.as_wishart(ndim=ndim)
                ),
                alpha,
                beta,
                ndim=ndim
            )

            self.assert_moments(
                X,
                postprocess=lambda u: [
                    u[0],
                    u[1] + linalg.transpose(u[1], ndim=ndim),
                    u[2],
                    u[3]
                ],
                rtol=1e-5,
                atol=1e-6,
                eps=1e-8
            )

            X.observe(
                (
                    np.random.randn(*(X.plates + X.dims[0])),
                    np.random.rand(*X.plates)
                )
            )

            self.assert_message_to_parent(X, mu)
            self.assert_message_to_parent(
                X,
                Lambda,
                postprocess=lambda m: [
                    m[0] + linalg.transpose(m[0], ndim=ndim),
                    m[1],
                ]
            )
            self.assert_message_to_parent(X, beta)

        check(
            Gaussian(np.random.randn(M, D), random.covariance(D), plates=(M,)),
            Wishart(D + np.random.rand(M), random.covariance(D), plates=(M,)),
            np.random.rand(M),
            Gamma(np.random.rand(M), np.random.rand(M), plates=(M,)),
            ndim=1
        )

        check(
            GaussianARD(np.random.randn(M, D), np.random.rand(M, D), ndim=0),
            Gamma(np.random.rand(M, D), np.random.rand(M, D)),
            np.random.rand(M, D),
            Gamma(np.random.rand(M, D), np.random.rand(M, D)),
            ndim=0
        )

        pass