Beispiel #1
0
    def setUp(self):
        self.test_graph = tf.Graph()
        self.rng = np.random.RandomState(
            1)  # this seed works with 60 GH points
        self.N = 4
        self.D = 2
        self.Xmu = self.rng.rand(self.N, self.D)
        self.Z = self.rng.rand(2, self.D)

        unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
        t = TriDiagonalBlockRep()
        self.Xcov = t.forward(unconstrained)

        # Set up "normal" kernels
        ekernel_classes = [ekernels.RBF, ekernels.Linear]
        kernel_classes = [kernels.RBF, kernels.Linear]
        params = [(self.D, 0.3 + self.rng.rand(),
                   self.rng.rand(2) + [0.5, 1.5], None, True),
                  (self.D, 0.3 + self.rng.rand(), None)]
        self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
        self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

        # Test summed kernels, non-overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ]))
        self.kernels[-1].input_size = self.kernels[-1].input_dim
        for k in self.kernels[-1].kern_list:
            k.input_size = self.kernels[-1].input_size
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ]))
        self.ekernels[-1].input_size = self.ekernels[-1].input_dim
        for k in self.ekernels[-1].kern_list:
            k.input_size = self.ekernels[-1].input_size

        # Test summed kernels, overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                kernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                ekernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))

        self.assertTrue(self.ekernels[-2].on_separate_dimensions)
        self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
Beispiel #2
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2

            self.rbf = ekernels.RBF(self.D, ARD=True)
            self.rbf.lengthscales = self.rng.rand(2) + [0.5, 1.5]
            self.rbf.variance = 0.3 + self.rng.rand()
            self.lin = ekernels.Linear(self.D)
            self.lin.variance = 0.3 + self.rng.rand()
            self.add = ekernels.Add([self.rbf, self.lin])

            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)[0, :, :, :]
Beispiel #3
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)

            self.Xcov_diag = 0.05 + self.rng.rand(self.N, self.D)
            self.Xcov = np.zeros(
                (self.Xcov_diag.shape[0], self.Xcov_diag.shape[1],
                 self.Xcov_diag.shape[1]))
            self.Xcov[
                (np.s_[:], ) +
                np.diag_indices(self.Xcov_diag.shape[1])] = self.Xcov_diag

            # Set up "normal" kernels
            ekernel_classes = [ekernels.RBF, ekernels.Linear]
            kernel_classes = [kernels.RBF, kernels.Linear]
            params = [(self.D, 0.3 + self.rng.rand(),
                       self.rng.rand(2) + [0.5, 1.5], None, True),
                      (self.D, 0.3 + self.rng.rand(), None)]
            self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
            self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(1, rbfvariance, rbfard, [1], False),
                    kernels.Linear(1, linvariance, [0])
                ]))
            self.kernels[-1].input_size = self.kernels[-1].input_dim
            for k in self.kernels[-1].kern_list:
                k.input_size = self.kernels[-1].input_size
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                    ekernels.Linear(1, linvariance, [0])
                ]))
            self.ekernels[-1].input_size = self.ekernels[-1].input_dim
            for k in self.ekernels[-1].kern_list:
                k.input_size = self.ekernels[-1].input_size

            # Test summed kernels, overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(self.D, rbfvariance, rbfard),
                    kernels.Linear(self.D, linvariance)
                ]))
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(self.D, rbfvariance, rbfard),
                    ekernels.Linear(self.D, linvariance)
                ]))

            self.assertTrue(self.ekernels[-2].on_separate_dimensions)
            self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
Beispiel #4
0
 def test_kernelsActiveDims(self):
     ''' Test sum and product compositional kernels '''
     with self.test_session():
         Q = 2  # latent dimensions
         X_mean = gpflow.gplvm.PCA_reduce(self.Y, Q)
         kernsQuadratu = [
             kernels.RBF(1, active_dims=[0]) +
             kernels.Linear(1, active_dims=[1]),
             kernels.RBF(1, active_dims=[0]) +
             kernels.PeriodicKernel(1, active_dims=[1]),
             kernels.RBF(1, active_dims=[0]) *
             kernels.Linear(1, active_dims=[1]),
             kernels.RBF(Q) + kernels.Linear(Q)
         ]  # non-overlapping
         kernsAnalytic = [
             ekernels.Add([
                 ekernels.RBF(1, active_dims=[0]),
                 ekernels.Linear(1, active_dims=[1])
             ]),
             ekernels.Add([
                 ekernels.RBF(1, active_dims=[0]),
                 kernels.PeriodicKernel(1, active_dims=[1])
             ]),
             ekernels.Prod([
                 ekernels.RBF(1, active_dims=[0]),
                 ekernels.Linear(1, active_dims=[1])
             ]),
             ekernels.Add([ekernels.RBF(Q),
                           ekernels.Linear(Q)])
         ]
         fOnSeparateDims = [True, True, True, False]
         Z = np.random.permutation(X_mean.copy())[:self.M]
         # Also test default N(0,1) is used
         X_prior_mean = np.zeros((self.N, Q))
         X_prior_var = np.ones((self.N, Q))
         Xtest = self.rng.randn(10, Q)
         for kq, ka, sepDims in zip(kernsQuadratu, kernsAnalytic,
                                    fOnSeparateDims):
             kq.num_gauss_hermite_points = 20  # speed up quadratic for tests
             # RBF should throw error if quadrature is used
             ka.kern_list[0].num_gauss_hermite_points = 0
             if sepDims:
                 self.assertTrue(ka.on_separate_dimensions,
                                 'analytic kernel must not use quadrature')
             mq = gpflow.gplvm.BayesianGPLVM(X_mean=X_mean,
                                             X_var=np.ones((self.N, Q)),
                                             Y=self.Y,
                                             kern=kq,
                                             M=self.M,
                                             Z=Z,
                                             X_prior_mean=X_prior_mean,
                                             X_prior_var=X_prior_var)
             ma = gpflow.gplvm.BayesianGPLVM(X_mean=X_mean,
                                             X_var=np.ones((self.N, Q)),
                                             Y=self.Y,
                                             kern=ka,
                                             M=self.M,
                                             Z=Z)
             mq.compile()
             ma.compile()
             ql = mq.compute_log_likelihood()
             al = ma.compute_log_likelihood()
             self.assertTrue(np.allclose(ql, al, atol=1e-2),
                             'Likelihood not equal %f<>%f' % (ql, al))
             mu_f_a, var_f_a = ma.predict_f(Xtest)
             mu_f_q, var_f_q = mq.predict_f(Xtest)
             self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                             ('Posterior means different', mu_f_a - mu_f_q))
             self.assertTrue(
                 np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                 ('Posterior vars different', var_f_a - var_f_q))
Beispiel #5
0
M = 20
N = Y.shape[0]
# PCA降维,提取前面5维向量作为基 100*5
X_mean = gpflow.gplvm.PCA_reduce(Y, Q)

print('X_mean: ', X_mean.shape)
# permutation:生成随机序列, 然后取前20, 20*5
# 所谓inducing points可能就是一些假设存在于潜在空间中的点吧
Z = np.random.permutation(X_mean.copy())[:M]

print('Z: ', Z.shape)

#slice(0,3): 截取序号0,1,2,共3个元素的切片
#slice(3,5): 截取序号3,4,共2个元素的切片,一共也是5维
k = ekernels.Add([
    ekernels.RBF(3, ARD=False, active_dims=[0, 1, 2]),
    ekernels.Linear(3, ARD=False, active_dims=[3, 4, 5])
])
#k = ekernels.RBF(5, ARD=False, active_dims=[0,1,2,3,4])

m = gpflow.gplvm.BayesianGPLVM(X_mean=X_mean,
                               X_var=0.1 * np.ones((N, Q)),
                               Y=Y,
                               kern=k,
                               M=M,
                               Z=Z)

linit = m.compute_log_likelihood()
m.optimize(maxiter=4)

assert (m.compute_log_likelihood() > linit)