Ejemplo n.º 1
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)

            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]

            k1 = ekernels.RBF(2, variance)
            k2 = ekernels.RBF(2, variance)
            klin = ekernels.Linear(2, variance)
            self.pekernels = [k1, k2, klin]

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(2, variance)
            klin = kernels.Linear(2, variance)
            self.pkernels = [k1, klin]
Ejemplo n.º 2
0
    def setUp(self):
        self.test_graph = tf.Graph()
        with self.test_context():
            self.rng = np.random.RandomState(0)

            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov_pairwise = t.forward(unconstrained)
            self.Xcov = self.Xcov_pairwise[0]  # no cross-covariances

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]

            k1 = ekernels.RBF(2, variance)
            k2 = ekernels.RBF(2, variance)
            klin = ekernels.Linear(2, variance)
            self.pekernels = [k1, k2, klin]

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(2, variance)
            klin = kernels.Linear(2, variance)
            self.pkernels = [k1, klin]
Ejemplo n.º 3
0
    def setUp(self):
        self.test_graph = tf.Graph()
        with self.test_context():
            self.N = 4
            self.D = 2
            self.rng = np.random.RandomState(0)
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]  # Kernels doing the expectation in closed form, doing the slicing

            k1 = ekernels.RBF(1, variance)
            k2 = ekernels.RBF(1, variance)
            klin = ekernels.Linear(1, variance)
            self.pekernels = [k1, k2, klin]  # kernels doing the expectations in closed form, without slicing

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(1, variance)
            klin = kernels.Linear(1, variance)
            self.pkernels = [k1, klin]
Ejemplo n.º 4
0
    def setUp(self):
        self.test_graph = tf.Graph()
        self.rng = np.random.RandomState(
            1)  # this seed works with 60 GH points
        self.N = 4
        self.D = 2
        self.Xmu = self.rng.rand(self.N, self.D)
        self.Z = self.rng.rand(2, self.D)

        unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
        t = TriDiagonalBlockRep()
        self.Xcov = t.forward(unconstrained)

        # Set up "normal" kernels
        ekernel_classes = [ekernels.RBF, ekernels.Linear]
        kernel_classes = [kernels.RBF, kernels.Linear]
        params = [(self.D, 0.3 + self.rng.rand(),
                   self.rng.rand(2) + [0.5, 1.5], None, True),
                  (self.D, 0.3 + self.rng.rand(), None)]
        self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
        self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

        # Test summed kernels, non-overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ]))
        self.kernels[-1].input_size = self.kernels[-1].input_dim
        for k in self.kernels[-1].kern_list:
            k.input_size = self.kernels[-1].input_size
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ]))
        self.ekernels[-1].input_size = self.ekernels[-1].input_dim
        for k in self.ekernels[-1].kern_list:
            k.input_size = self.ekernels[-1].input_size

        # Test summed kernels, overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                kernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                ekernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))

        self.assertTrue(self.ekernels[-2].on_separate_dimensions)
        self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
Ejemplo n.º 5
0
    def test_2d(self):
        with self.test_context():
            # test default Z on 2_D example
            Q = 2  # latent dimensions
            X_mean = gpflow.models.PCA_reduce(self.Y, Q)
            k = ekernels.RBF(Q, ARD=False)
            m = gpflow.models.BayesianGPLVM(
                X_mean=X_mean,
                X_var=np.ones((self.N, Q)),
                Y=self.Y,
                kern=k,
                M=self.M)
            linit = m.compute_log_likelihood()
            opt = gpflow.train.ScipyOptimizer()
            opt.minimize(m, maxiter=2)
            self.assertTrue(m.compute_log_likelihood() > linit)

            # test prediction
            Xtest = self.rng.randn(10, Q)
            mu_f, var_f = m.predict_f(Xtest)
            mu_fFull, var_fFull = m.predict_f_full_cov(Xtest)
            self.assertTrue(np.allclose(mu_fFull, mu_f))
            # check full covariance diagonal
            for i in range(self.D):
                self.assertTrue(np.allclose(var_f[:, i], np.diag(var_fFull[:, :, i])))
Ejemplo n.º 6
0
 def setUp(self):
     with self.test_session():
         self.D = 2
         self.rng = np.random.RandomState(0)
         self.Xmu = self.rng.rand(10, self.D)
         self.Z = self.rng.rand(4, self.D)
         self.Xcov = np.zeros((self.Xmu.shape[0], self.D, self.D))
         self.Xcovc = np.zeros((self.Xmu.shape[0], self.D, self.D))
         k1 = ekernels.RBF(self.D, ARD=True)
         k1.lengthscales = self.rng.rand(2) + [0.5, 1.5]
         k1.variance = 0.3 + self.rng.rand()
         k2 = ekernels.RBF(self.D)
         k2.lengthscales = self.rng.rand(1) + [0.5]
         k2.variance = 0.3 + self.rng.rand()
         klin = ekernels.Linear(self.D, variance=0.3 + self.rng.rand())
         self.kernels = [k1, klin, k2]
Ejemplo n.º 7
0
    def setUp(self):
        with self.test_session():
            self._threshold = 0.5
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()

            self.kernel = kernels.Prod([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ])

            self.ekernel = ekernels.Prod([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ])

            self.Xmu = self.rng.rand(self.N, self.D)
            self.Xcov = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)
Ejemplo n.º 8
0
 def test_1d(self):
     with self.test_session():
         Q = 1  # latent dimensions
         k = ekernels.RBF(Q)
         Z = np.linspace(0, 1, self.M)
         Z = np.expand_dims(Z, Q)  # inducing points
         m = gpflow.gplvm.BayesianGPLVM(X_mean=np.zeros((self.N, Q)),
                                        X_var=np.ones((self.N, Q)),
                                        Y=self.Y,
                                        kern=k,
                                        M=self.M,
                                        Z=Z)
         linit = m.compute_log_likelihood()
         m.optimize(maxiter=2)
         self.assertTrue(m.compute_log_likelihood() > linit)
Ejemplo n.º 9
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2

            self.rbf = ekernels.RBF(self.D, ARD=True)
            self.rbf.lengthscales = self.rng.rand(2) + [0.5, 1.5]
            self.rbf.variance = 0.3 + self.rng.rand()
            self.lin = ekernels.Linear(self.D)
            self.lin.variance = 0.3 + self.rng.rand()
            self.add = ekernels.Add([self.rbf, self.lin])

            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)[0, :, :, :]
Ejemplo n.º 10
0
    def test_kernelsActiveDims(self):
        ''' Test sum and product compositional kernels '''
        with self.test_context():
            Q = 2  # latent dimensions
            X_mean = gpflow.models.PCA_reduce(self.Y, Q)
            kernsQuadratu = [
                kernels.RBF(1, active_dims=[0]) + kernels.Linear(1, active_dims=[1]),
                kernels.RBF(1, active_dims=[0]) + kernels.Periodic(1, active_dims=[1]),
                kernels.RBF(1, active_dims=[0]) * kernels.Linear(1, active_dims=[1]),
                kernels.RBF(Q)+kernels.Linear(Q)]  # non-overlapping
            kernsAnalytic = [
                ekernels.Sum([
                    ekernels.RBF(1, active_dims=[0]),
                    ekernels.Linear(1, active_dims=[1])]),
                ekernels.Sum([
                    ekernels.RBF(1, active_dims=[0]),
                    kernels.Periodic(1, active_dims=[1])]),
                ekernels.Product([
                    ekernels.RBF(1, active_dims=[0]),
                    ekernels.Linear(1, active_dims=[1])]),
                ekernels.Sum([
                    ekernels.RBF(Q),
                    ekernels.Linear(Q)])
            ]
            fOnSeparateDims = [True, True, True, False]
            Z = np.random.permutation(X_mean.copy())[:self.M]
            # Also test default N(0,1) is used
            X_prior_mean = np.zeros((self.N, Q))
            X_prior_var = np.ones((self.N, Q))
            Xtest = self.rng.randn(10, Q)

        for kq, ka, sepDims in zip(kernsQuadratu, kernsAnalytic, fOnSeparateDims):
            with self.test_context():
                kq.num_gauss_hermite_points = 20  # speed up quadratic for tests
                # RBF should throw error if quadrature is used
                ka.kern_list[0].num_gauss_hermite_points = 0
                if sepDims:
                    self.assertTrue(
                        ka.on_separate_dimensions,
                        'analytic kernel must not use quadrature')
                mq = gpflow.models.BayesianGPLVM(
                    X_mean=X_mean,
                    X_var=np.ones((self.N, Q)),
                    Y=self.Y,
                    kern=kq,
                    M=self.M,
                    Z=Z,
                    X_prior_mean=X_prior_mean,
                    X_prior_var=X_prior_var)
                ma = gpflow.models.BayesianGPLVM(
                    X_mean=X_mean,
                    X_var=np.ones((self.N, Q)),
                    Y=self.Y,
                    kern=ka,
                    M=self.M,
                    Z=Z)
                ql = mq.compute_log_likelihood()
                al = ma.compute_log_likelihood()
                self.assertTrue(np.allclose(ql, al, atol=1e-2),
                                'Likelihood not equal %f<>%f' % (ql, al))
                mu_f_a, var_f_a = ma.predict_f(Xtest)
                mu_f_q, var_f_q = mq.predict_f(Xtest)
                self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                                ('Posterior means different', mu_f_a-mu_f_q))
                self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                                ('Posterior vars different', var_f_a-var_f_q))
Ejemplo n.º 11
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)

            self.Xcov_diag = 0.05 + self.rng.rand(self.N, self.D)
            self.Xcov = np.zeros(
                (self.Xcov_diag.shape[0], self.Xcov_diag.shape[1],
                 self.Xcov_diag.shape[1]))
            self.Xcov[
                (np.s_[:], ) +
                np.diag_indices(self.Xcov_diag.shape[1])] = self.Xcov_diag

            # Set up "normal" kernels
            ekernel_classes = [ekernels.RBF, ekernels.Linear]
            kernel_classes = [kernels.RBF, kernels.Linear]
            params = [(self.D, 0.3 + self.rng.rand(),
                       self.rng.rand(2) + [0.5, 1.5], None, True),
                      (self.D, 0.3 + self.rng.rand(), None)]
            self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
            self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(1, rbfvariance, rbfard, [1], False),
                    kernels.Linear(1, linvariance, [0])
                ]))
            self.kernels[-1].input_size = self.kernels[-1].input_dim
            for k in self.kernels[-1].kern_list:
                k.input_size = self.kernels[-1].input_size
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                    ekernels.Linear(1, linvariance, [0])
                ]))
            self.ekernels[-1].input_size = self.ekernels[-1].input_dim
            for k in self.ekernels[-1].kern_list:
                k.input_size = self.ekernels[-1].input_size

            # Test summed kernels, overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(self.D, rbfvariance, rbfard),
                    kernels.Linear(self.D, linvariance)
                ]))
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(self.D, rbfvariance, rbfard),
                    ekernels.Linear(self.D, linvariance)
                ]))

            self.assertTrue(self.ekernels[-2].on_separate_dimensions)
            self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
Ejemplo n.º 12
0
M = 20
N = Y.shape[0]
# PCA降维,提取前面5维向量作为基 100*5
X_mean = gpflow.gplvm.PCA_reduce(Y, Q)

print('X_mean: ', X_mean.shape)
# permutation:生成随机序列, 然后取前20, 20*5
# 所谓inducing points可能就是一些假设存在于潜在空间中的点吧
Z = np.random.permutation(X_mean.copy())[:M]

print('Z: ', Z.shape)

#slice(0,3): 截取序号0,1,2,共3个元素的切片
#slice(3,5): 截取序号3,4,共2个元素的切片,一共也是5维
k = ekernels.Add([
    ekernels.RBF(3, ARD=False, active_dims=[0, 1, 2]),
    ekernels.Linear(3, ARD=False, active_dims=[3, 4, 5])
])
#k = ekernels.RBF(5, ARD=False, active_dims=[0,1,2,3,4])

m = gpflow.gplvm.BayesianGPLVM(X_mean=X_mean,
                               X_var=0.1 * np.ones((N, Q)),
                               Y=Y,
                               kern=k,
                               M=M,
                               Z=Z)

linit = m.compute_log_likelihood()
m.optimize(maxiter=4)

assert (m.compute_log_likelihood() > linit)