def setUp(self): self.test_graph = tf.Graph() with self.test_context(): self._threshold = 0.5 self.rng = np.random.RandomState(0) self.N = 4 self.D = 2 # Test summed kernels, non-overlapping rbfvariance = 0.3 + self.rng.rand() rbfard = [self.rng.rand() + 0.5] linvariance = 0.3 + self.rng.rand() self.kernel = kernels.Product([ kernels.RBF(1, rbfvariance, rbfard, [1], False), kernels.Linear(1, linvariance, [0]) ]) self.ekernel = ekernels.Product([ ekernels.RBF(1, rbfvariance, rbfard, [1], False), ekernels.Linear(1, linvariance, [0]) ]) self.Xmu = self.rng.rand(self.N, self.D) self.Xcov = self.rng.rand(self.N, self.D) self.Z = self.rng.rand(2, self.D)
def rbf_lin_prod_kern(): return kernels.Product([ kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand() + 1., active_dims=[0]), kernels.Linear(1, variance=rng.rand(), active_dims=[1]) ])
lengthscales=rng.rand() + 1.0), kernels.Linear(variance=rng.rand()), ]), "rbf_lin_sum2": kernels.Sum([ kernels.Linear(variance=rng.rand()), kernels.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0), kernels.Linear(variance=rng.rand()), kernels.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0), ]), "rbf_lin_prod": kernels.Product([ kernels.SquaredExponential(variance=rng.rand(), lengthscales=rng.rand() + 1.0, active_dims=[0]), kernels.Linear(variance=rng.rand(), active_dims=[1]), ]), } def kerns(*args): return [_kerns[k] for k in args] def distrs(*args): return [_distrs[k] for k in args] def means(*args): return [_means[k] for k in args]
def rbf_prod_seperate_dims(): return kernels.Product([ kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[0]), kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[1]) ])
class Data: rng = np.random.RandomState(1) num_data = 5 num_ind = 4 D_in = 2 D_out = 2 Xmu = rng.randn(num_data, D_in) L = gen_L(rng, num_data, D_in, D_in) Xvar = np.array([l @ l.T for l in L]) Z = rng.randn(num_ind, D_in) # distributions don't need to be compiled (No Parameter objects) # but the members should be Tensors created in the same graph graph = tf.Graph() with test_util.session_context(graph) as sess: gauss = Gaussian(tf.constant(Xmu), tf.constant(Xvar)) dirac = Gaussian(tf.constant(Xmu), tf.constant(np.zeros((num_data, D_in, D_in)))) gauss_diag = DiagonalGaussian(tf.constant(Xmu), tf.constant(rng.rand(num_data, D_in))) dirac_diag = DiagonalGaussian(tf.constant(Xmu), tf.constant(np.zeros((num_data, D_in)))) dirac_markov_gauss = MarkovGaussian( tf.constant(Xmu), tf.constant(np.zeros((2, num_data, D_in, D_in)))) # create the covariance for the pairwise markov-gaussian dummy_gen = lambda rng, n, *shape: np.array( [rng.randn(*shape) for _ in range(n)]) L_mg = dummy_gen(rng, num_data, D_in, 2 * D_in) # N+1 x D x 2D LL = np.concatenate((L_mg[:-1], L_mg[1:]), 1) # N x 2D x 2D Xcov = LL @ np.transpose(LL, (0, 2, 1)) Xc = np.concatenate((Xcov[:, :D_in, :D_in], Xcov[-1:, D_in:, D_in:]), 0) # N+1 x D x D Xcross = np.concatenate( (Xcov[:, :D_in, D_in:], np.zeros( (1, D_in, D_in))), 0) # N+1 x D x D Xcc = np.stack([Xc, Xcross]) # 2 x N+1 x D x D markov_gauss = MarkovGaussian(Xmu, Xcc) with gpflow.decors.defer_build(): # features ip = features.InducingPoints(Z) # kernels rbf_prod_seperate_dims = kernels.Product([ kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[0]), kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[1]) ]) rbf_lin_sum = kernels.Sum([ kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()), kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()), kernels.Linear(D_in, variance=rng.rand()) ]) rbf = kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()) lin_kern = kernels.Linear(D_in, variance=rng.rand()) # mean functions lin = mean_functions.Linear(rng.rand(D_in, D_out), rng.rand(D_out)) iden = mean_functions.Identity( D_in) # Note: Identity can only be used if Din == Dout zero = mean_functions.Zero(output_dim=D_out) const = mean_functions.Constant(rng.rand(D_out))