def test_monte_carlo_2_din(white, mean): kernel = gpflow.kernels.SquaredExponential(variance=rng.rand()) mean_function = mean_function_factory(mean, DataMC2.D_in, DataMC2.D_out) model = MomentMatchingSVGP(kernel, gpflow.likelihoods.Gaussian(), num_latent=DataMC2.D_out, mean_function=mean_function, inducing_variable=DataMC2.X.copy(), whiten=white) model.full_output_cov = True @tf.function def closure(): return -model.log_marginal_likelihood(DataMC2.data) training_loop(closure, optimizer=tf.optimizers.Adam(), var_list=model.trainable_variables, maxiter=100) mean1, var1 = model.uncertain_predict_f_moment_matching( *map(tf.convert_to_tensor, [DataMC2.Xnew_mu, DataMC2.Xnew_covar])) for n in range(DataMC2.N_new): mean2, var2 = model.uncertain_predict_f_monte_carlo( DataMC2.Xnew_mu[n, ...], DataMC2.L[n, ...]) assert_allclose(mean1[n, ...], mean2, atol=1e-2) assert_allclose(var1[n, ...], var2, atol=1e-2)
def test_monte_carlo_1_din(white, mean): kernel = gpflow.kernels.SquaredExponential(variance=rng.rand()) mean_function = mean_function_factory(mean, DataMC1.D_in, DataMC1.D_out) model = MomentMatchingSVGP( kernel, gpflow.likelihoods.Gaussian(), num_latent_gps=DataMC1.D_out, mean_function=mean_function, inducing_variable=DataMC1.X.copy(), whiten=white, ) model.full_output_cov = True training_loop( model.training_loss_closure(DataMC1.data), optimizer=tf.optimizers.Adam(), var_list=model.trainable_variables, maxiter=200, compile=True, ) mean1, var1 = model.uncertain_predict_f_moment_matching( *map(tf.convert_to_tensor, [DataMC1.Xnew_mu, DataMC1.Xnew_covar]) ) for n in range(DataMC1.N_new): mean2, var2 = model.uncertain_predict_f_monte_carlo( DataMC1.Xnew_mu[n, ...], DataMC1.Xnew_covar[n, ...] ** 0.5 ) assert_allclose(mean1[n, ...], mean2, atol=1e-3, rtol=1e-1) assert_allclose(var1[n, ...], var2, atol=1e-2, rtol=1e-1)
def test_no_uncertainty(white, mean): mean_function = mean_function_factory(mean, Data.D_in, Data.D_out) kernel = gpflow.kernels.SquaredExponential(variance=rng.rand()) model = MomentMatchingSVGP( kernel, gpflow.likelihoods.Gaussian(), num_latent_gps=Data.D_out, mean_function=mean_function, inducing_variable=Data.X.copy(), whiten=white, ) model.full_output_cov = False training_loop( model.training_loss_closure(Data.data), optimizer=tf.optimizers.Adam(), var_list=model.trainable_variables, maxiter=100, compile=True, ) mean1, var1 = model.predict_f(Data.Xnew_mu) mean2, var2 = model.uncertain_predict_f_moment_matching( *map(tf.convert_to_tensor, [Data.Xnew_mu, Data.Xnew_covar]) ) assert_allclose(mean1, mean2) for n in range(Data.N_new): assert_allclose(var1[n, :], var2[n, ...])
def test_training_loop_converges(): m = create_model() mref = create_model() gpflow.optimizers.Scipy().minimize(mref.training_loss, mref.trainable_variables) training_loop( m.training_loss, tf.optimizers.Adam(learning_rate=0.01), m.trainable_variables, maxiter=5000, compile=True, ) assert_models_close(m, mref, rtol=1e-5)
def test_training_loop_compiles(): m1 = create_model() m2 = create_model() training_loop(m1.training_loss, tf.optimizers.Adam(), m1.trainable_variables, maxiter=50, compile=True) training_loop(m2.training_loss, tf.optimizers.Adam(), m2.trainable_variables, maxiter=50, compile=False) assert_models_close(m1, m2)