def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) Z_shape = config.num_cond, config.input_dims for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) q_sqrt = tf.zeros([1] + 2 * [config.num_cond], dtype=floatx()) kern = cls(lengthscales=lenscales, variance=config.kernel_variance) Z = InducingPoints(tf.random.uniform(Z_shape, dtype=floatx())) const = tf.random.normal([1], dtype=floatx()) model = SVGP(kernel=kern, likelihood=None, inducing_variable=Z, mean_function=mean_functions.Constant(c=const), q_sqrt=q_sqrt) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) allK = [] allZ = [] Z_shape = config.num_cond, config.input_dims for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) rel_variance = tf.random.uniform(shape=[], minval=0.9, maxval=1.1, dtype=floatx()) allK.append( cls(lengthscales=lenscales, variance=config.kernel_variance * rel_variance)) allZ.append( InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))) kern = kernels.SeparateIndependent(allK) Z = SeparateIndependentInducingVariables(allZ) Kuu = covariances.Kuu(Z, kern, jitter=gpflow_config.default_jitter()) q_sqrt = tf.linalg.cholesky(Kuu)\ * tf.random.uniform(shape=[kern.num_latent_gps, 1, 1], minval=0.0, maxval=0.5, dtype=floatx()) const = tf.random.normal([len(kern.kernels)], dtype=floatx()) model = SVGP(kernel=kern, likelihood=None, inducing_variable=Z, mean_function=mean_functions.Constant(c=const), q_sqrt=q_sqrt, whiten=False, num_latent_gps=len(allK)) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) Z_shape = config.num_cond, config.input_dims for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) base = cls(lengthscales=lenscales, variance=config.kernel_variance) kern = kernels.SharedIndependent(base, output_dim=2) Z = SharedIndependentInducingVariables( InducingPoints(tf.random.uniform(Z_shape, dtype=floatx()))) Kuu = covariances.Kuu(Z, kern, jitter=gpflow_config.default_jitter()) q_sqrt = tf.stack([ tf.zeros(2 * [config.num_cond], dtype=floatx()), tf.linalg.cholesky(Kuu) ]) const = tf.random.normal([2], dtype=floatx()) model = SVGP(kernel=kern, likelihood=None, inducing_variable=Z, mean_function=mean_functions.Constant(c=const), q_sqrt=q_sqrt, whiten=False, num_latent_gps=2) mf, Sff = subroutine(config, model, X) mg, Sgg = model.predict_f(X, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def main(config): assert config is not None, ValueError tf.random.set_seed(config.seed) gpflow_config.set_default_float(config.floatx) gpflow_config.set_default_jitter(config.jitter) X = tf.random.uniform([config.num_cond, config.input_dims], dtype=floatx()) Xnew = tf.random.uniform([config.num_test, config.input_dims], dtype=floatx()) for cls in SupportedBaseKernels: minval = config.rel_lengthscales_min * (config.input_dims**0.5) maxval = config.rel_lengthscales_max * (config.input_dims**0.5) lenscales = tf.random.uniform(shape=[config.input_dims], minval=minval, maxval=maxval, dtype=floatx()) kern = cls(lengthscales=lenscales, variance=config.kernel_variance) const = tf.random.normal([1], dtype=floatx()) K = kern(X, full_cov=True) K = tf.linalg.set_diag( K, tf.linalg.diag_part(K) + config.noise_variance) L = tf.linalg.cholesky(K) y = L @ tf.random.normal([L.shape[-1], 1], dtype=floatx()) + const model = GPR(kernel=kern, noise_variance=config.noise_variance, data=(X, y), mean_function=mean_functions.Constant(c=const)) mf, Sff = subroutine(config, model, Xnew) mg, Sgg = model.predict_f(Xnew, full_cov=True) tol = config.error_tol assert allclose(mf, mg, tol, tol) assert allclose(Sff, Sgg, tol, tol)
def const_mean(): return mean_functions.Constant(c=rng.randn(Data.D_out))
def markov_gauss(): cov_params = rng.randn(num_data + 1, D_in, 2 * D_in) / 2.0 # (N+1)xDx2D Xcov = cov_params @ np.transpose(cov_params, (0, 2, 1)) # (N+1)xDxD Xcross = cov_params[:-1] @ np.transpose(cov_params[1:], (0, 2, 1)) # NxDxD Xcross = np.concatenate((Xcross, np.zeros((1, D_in, D_in))), 0) # (N+1)xDxD Xcov = np.stack([Xcov, Xcross]) # 2x(N+1)xDxD return MarkovGaussian(Xmu_markov, ctt(Xcov)) _means = { "lin": mf.Linear(A=rng.randn(D_in, D_out), b=rng.randn(D_out)), "identity": mf.Identity(input_dim=D_in), "const": mf.Constant(c=rng.randn(D_out)), "zero": mf.Zero(output_dim=D_out), } _distrs = { "gauss": Gaussian(Xmu, Xcov), "dirac_gauss": Gaussian(Xmu, np.zeros((num_data, D_in, D_in))), "gauss_diag": DiagonalGaussian(Xmu, rng.rand(num_data, D_in)), "dirac_diag": DiagonalGaussian(Xmu, np.zeros((num_data, D_in))), "dirac_markov_gauss": MarkovGaussian(Xmu_markov, np.zeros((2, num_data + 1, D_in, D_in))), "markov_gauss":
def const(): return mean_functions.Constant(rng.rand(Data.D_out))
class Data: rng = np.random.RandomState(1) num_data = 5 num_ind = 4 D_in = 2 D_out = 2 Xmu = rng.randn(num_data, D_in) L = gen_L(rng, num_data, D_in, D_in) Xvar = np.array([l @ l.T for l in L]) Z = rng.randn(num_ind, D_in) # distributions don't need to be compiled (No Parameter objects) # but the members should be Tensors created in the same graph graph = tf.Graph() with test_util.session_context(graph) as sess: gauss = Gaussian(tf.constant(Xmu), tf.constant(Xvar)) dirac = Gaussian(tf.constant(Xmu), tf.constant(np.zeros((num_data, D_in, D_in)))) gauss_diag = DiagonalGaussian(tf.constant(Xmu), tf.constant(rng.rand(num_data, D_in))) dirac_diag = DiagonalGaussian(tf.constant(Xmu), tf.constant(np.zeros((num_data, D_in)))) dirac_markov_gauss = MarkovGaussian( tf.constant(Xmu), tf.constant(np.zeros((2, num_data, D_in, D_in)))) # create the covariance for the pairwise markov-gaussian dummy_gen = lambda rng, n, *shape: np.array( [rng.randn(*shape) for _ in range(n)]) L_mg = dummy_gen(rng, num_data, D_in, 2 * D_in) # N+1 x D x 2D LL = np.concatenate((L_mg[:-1], L_mg[1:]), 1) # N x 2D x 2D Xcov = LL @ np.transpose(LL, (0, 2, 1)) Xc = np.concatenate((Xcov[:, :D_in, :D_in], Xcov[-1:, D_in:, D_in:]), 0) # N+1 x D x D Xcross = np.concatenate( (Xcov[:, :D_in, D_in:], np.zeros( (1, D_in, D_in))), 0) # N+1 x D x D Xcc = np.stack([Xc, Xcross]) # 2 x N+1 x D x D markov_gauss = MarkovGaussian(Xmu, Xcc) with gpflow.decors.defer_build(): # features ip = features.InducingPoints(Z) # kernels rbf_prod_seperate_dims = kernels.Product([ kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[0]), kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[1]) ]) rbf_lin_sum = kernels.Sum([ kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()), kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()), kernels.Linear(D_in, variance=rng.rand()) ]) rbf = kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()) lin_kern = kernels.Linear(D_in, variance=rng.rand()) # mean functions lin = mean_functions.Linear(rng.rand(D_in, D_out), rng.rand(D_out)) iden = mean_functions.Identity( D_in) # Note: Identity can only be used if Din == Dout zero = mean_functions.Zero(output_dim=D_out) const = mean_functions.Constant(rng.rand(D_out))