def test_lengthscale(self): """checked calculations by hand, e.g., np.exp(-((2.0 - 1.5)**2 / (2.0**2) + (2.5 - 2.0)**2 / (1.5**2)) / 2) np.exp(-((2.0 - 3.1)**2 / (2.0**2) + (2.5 - 4.2)**2 / (1.5**2)) / 2) np.exp(-((4.1 - 1.5)**2 / (2.0**2) + (5.0 - 2.0)**2 / (1.5**2)) / 2) np.exp(-((4.1 - 3.1)**2 / (2.0**2) + (5.0 - 4.2)**2 / (1.5**2)) / 2) """ with self.test_session(): X = tf.constant([[2.0, 2.5], [4.1, 5.0]]) X2 = tf.constant([[1.5, 2.0], [3.1, 4.2]]) lengthscale1 = tf.constant(2.0) lengthscale2 = tf.constant([2.0, 2.0]) lengthscale3 = tf.constant([2.0, 1.5]) self.assertAllClose(rbf(X, X2, lengthscale1).eval(), [[0.939413, 0.598996], [0.139456, 0.814647]], atol=1e-5, rtol=1e-5) self.assertAllClose(rbf(X, X2, lengthscale2).eval(), [[0.939413, 0.598996], [0.139456, 0.814647]], atol=1e-5, rtol=1e-5) self.assertAllClose(rbf(X, X2, lengthscale3).eval(), [[0.916855, 0.452271], [0.058134, 0.765502]], atol=1e-5, rtol=1e-5)
def test_x(self): with self.test_session(): X = tf.constant([[0.0], [0.0]]) X2 = tf.constant([[0.0], [0.0]]) self.assertAllClose(rbf(X).eval(), [[1.0, 1.0], [1.0, 1.0]]) self.assertAllClose(rbf(X, X2).eval(), [[1.0, 1.0], [1.0, 1.0]])
def test_x2(self): with self.test_session(): X = tf.constant([[10.0], [2.0]]) X2 = tf.constant([[2.0], [10.0]]) self.assertAllClose(rbf(X, X2).eval(), [[1.266417e-14, 1.0], [1.0, 1.266417e-14]]) self.assertAllClose(rbf(X2, X).eval(), [[1.266417e-14, 1.0], [1.0, 1.266417e-14]]) X = tf.constant([[2.0, 2.5], [4.1, 5.0]]) X2 = tf.constant([[1.5, 2.0], [3.1, 4.2]]) self.assertAllClose(rbf(X, X2).eval(), [[0.778800, 0.128734], [0.000378, 0.440431]], atol=1e-5, rtol=1e-5)
def test_rbf_1d(self): with self.test_session(): x = tf.constant([0.0]) self.assertAllClose(rbf(x).eval(), [1.0]) x = tf.constant([10.0]) y = tf.constant([2.0]) self.assertAllClose(rbf(x, y=y).eval(), [1.26e-14]) x = tf.constant([0.0]) y = tf.constant([1.0]) sigma = tf.constant(10.0) self.assertAllClose(rbf(x, y=y, sigma=sigma).eval(), [60.6530685]) x = tf.constant([0.0]) y = tf.constant([1.0]) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose(rbf(x, y=y, sigma=sigma, l=l).eval(), [98.01986694]) x = tf.constant([0.0, 1.0]) self.assertAllClose(rbf(x).eval(), [1.0, 0.606530666]) x = tf.constant([10.0, 3.0]) y = tf.constant([2.0, 3.0]) self.assertAllClose(rbf(x, y=y).eval(), [1.266417e-14, 1.0]) x = tf.constant([0.0, 1.0]) y = tf.constant([1.0, 2.0]) sigma = tf.constant(10.0) self.assertAllClose(rbf(x, y=y, sigma=sigma).eval(), [60.6530685, 60.6530685]) x = tf.constant([0.0, -23.0]) y = tf.constant([1.0, -93.0]) sigma = tf.constant(10.0) l = tf.constant(50.0) self.assertAllClose(rbf(x, y=y, sigma=sigma, l=l).eval(), [99.980003, 37.531109])
def test_rbf_2d(self): with self.test_session(): x = tf.constant([[0.0], [0.0]]) self.assertAllClose(rbf(x).eval(), [[1.0], [1.0]]) x = tf.constant([[10.0], [2.0]]) y = tf.constant([[2.0], [10.0]]) self.assertAllClose(rbf(x, y=y).eval(), [[1.266417e-14], [1.266417e-14]]) x = tf.constant([[0.0], [10.0]]) y = tf.constant([[1.0], [1.0]]) sigma = tf.constant(10.0) self.assertAllClose(rbf(x, y=y, sigma=sigma).eval(), [[6.065307e01], [2.576757e-16]]) x = tf.constant([[0.0], [10.0]]) y = tf.constant([[1.0], [1.0]]) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose(rbf(x, y=y, sigma=sigma, l=l).eval(), [[98.019867], [19.789869]]) x = tf.constant([[10.0, 3.0], [10.0, 3.0]]) self.assertAllClose(rbf(x, y=y).eval(), [[2.576757e-18, 1.353353e-01], [2.576757e-18, 1.353353e-01]]) x = tf.constant([[10.0, 3.0], [10.0, 3.0]]) y = tf.constant([[2.0, 3.0], [2.0, 3.0]]) self.assertAllClose(rbf(x, y=y).eval(), [[1.266417e-14, 1.0], [1.266417e-14, 1.0]]) x = tf.constant([[0.0, 1.0], [10.0, -3.0]]) y = tf.constant([[1.0, 2.0], [1.0, 2.0]]) sigma = tf.constant(10.0) self.assertAllClose( rbf(x, y=y, sigma=sigma).eval(), [[6.065307e01, 6.065307e01], [2.576757e-16, 3.726653e-04]] ) x = tf.constant([[10.0, 3.0], [10.0, 3.0]]) y = tf.constant([[2.0, 3.0], [2.0, 3.0]]) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose(rbf(x, y=y, sigma=sigma, l=l).eval(), [[27.80373, 100], [27.80373, 100]])
def test_rbf_1d(self): with self.test_session(): x = tf.constant([0.0]) self.assertAllClose(rbf(x).eval(), [1.0]) x = tf.constant([10.0]) y = tf.constant([2.0]) self.assertAllClose(rbf(x, y=y).eval(), [1.26e-14]) x = tf.constant([0.0]) y = tf.constant([1.0]) sigma = tf.constant(10.0) self.assertAllClose(rbf(x, y=y, sigma=sigma).eval(), [60.6530685]) x = tf.constant([0.0]) y = tf.constant([1.0]) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose( rbf(x, y=y, sigma=sigma, l=l).eval(), [98.01986694]) x = tf.constant([0.0, 1.0]) self.assertAllClose(rbf(x).eval(), [1.0, 0.606530666]) x = tf.constant([10.0, 3.0]) y = tf.constant([2.0, 3.0]) self.assertAllClose(rbf(x, y=y).eval(), [1.266417e-14, 1.0]) x = tf.constant([0.0, 1.0]) y = tf.constant([1.0, 2.0]) sigma = tf.constant(10.0) self.assertAllClose( rbf(x, y=y, sigma=sigma).eval(), [60.6530685, 60.6530685]) x = tf.constant([0.0, -23.0]) y = tf.constant([1.0, -93.0]) sigma = tf.constant(10.0) l = tf.constant(50.0) self.assertAllClose( rbf(x, y=y, sigma=sigma, l=l).eval(), [99.980003, 37.531109])
def test_x2(self): with self.test_session(): X = tf.constant([[10.0], [2.0]]) X2 = tf.constant([[2.0], [10.0]]) self.assertAllClose( rbf(X, X2).eval(), [[1.266417e-14, 1.0], [1.0, 1.266417e-14]]) self.assertAllClose( rbf(X2, X).eval(), [[1.266417e-14, 1.0], [1.0, 1.266417e-14]]) X = tf.constant([[2.0, 2.5], [4.1, 5.0]]) X2 = tf.constant([[1.5, 2.0], [3.1, 4.2]]) self.assertAllClose(rbf(X, X2).eval(), [[0.778800, 0.128734], [0.000378, 0.440431]], atol=1e-5, rtol=1e-5)
def gaussian_process_classification_example(): ed.set_seed(42) data, metadata = crabs('~/data') X_train = data[:100, 3:] y_train = data[:100, 1] N = X_train.shape[0] # Number of data points. D = X_train.shape[1] # Number of features. print('Number of data points: {}'.format(N)) print('Number of features: {}'.format(D)) #-------------------- # Model. X = tf.placeholder(tf.float32, [N, D]) f = MultivariateNormalTriL(loc=tf.zeros(N), scale_tril=tf.cholesky(rbf(X))) y = Bernoulli(logits=f) #-------------------- # Inference. # Perform variational inference. qf = Normal(loc=tf.get_variable('qf/loc', [N]), scale=tf.nn.softplus(tf.get_variable('qf/scale', [N]))) inference = ed.KLqp({f: qf}, data={X: X_train, y: y_train}) inference.run(n_iter=5000)
def test_rbf_0d(self): with self.test_session(): x = tf.constant(0.0) self.assertAllClose(rbf(x).eval(), 1.0) x = tf.constant(10.0) y = tf.constant(2.0) self.assertAllClose(rbf(x, y=y).eval(), 1.26e-14) x = tf.constant(0.0) y = tf.constant(1.0) sigma = tf.constant(10.0) self.assertAllClose(rbf(x, y=y, sigma=sigma).eval(), 60.6530685) x = tf.constant(0.0) y = tf.constant(1.0) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose(rbf(x, y=y, sigma=sigma, l=l).eval(), 98.01986694)
def main(_): ed.set_seed(42) # DATA x_data = build_toy_dataset(FLAGS.N, FLAGS.V) # MODEL x_ph = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.V]) # Form (N, V, V) covariance, one matrix per data point. K = tf.stack([ rbf(tf.reshape(xn, [FLAGS.V, 1])) + tf.diag([1e-6, 1e-6]) for xn in tf.unstack(x_ph) ]) f = MultivariateNormalTriL(loc=tf.zeros([FLAGS.N, FLAGS.V]), scale_tril=tf.cholesky(K)) x = Poisson(rate=tf.exp(f)) # INFERENCE qf = Normal(loc=tf.get_variable("qf/loc", [FLAGS.N, FLAGS.V]), scale=tf.nn.softplus( tf.get_variable("qf/scale", [FLAGS.N, FLAGS.V]))) inference = ed.KLqp({f: qf}, data={x: x_data, x_ph: x_data}) inference.run(n_iter=5000)
def converge(n_train): mse = [] for n in n_train: training_x = np.linspace(90, 110, n) training_y = [] for S in training_x: training_y.append( HestonVega(S, K, T, r, sigma, lmbda, meanV, v0, rho, 'C', N=nInt)) testing_x = np.linspace(95, 105, n) testing_y = [] for S in testing_x: testing_y.append( HestonVega(S, K, T, r, sigma, lmbda, meanV, v0, rho, 'C', N=nInt)) train32_x = np.array(training_x, dtype='float32').reshape(n, 1) test32_x = np.array(testing_x, dtype='float32').reshape(n, 1) Ker = rbf(train32_x).eval() K_noise = Ker + np.eye(n_train) # *0.1 k_s = rbf(test32_x, train32_x).eval() L = np.linalg.cholesky(K_noise) alpha = np.linalg.solve(L.T, np.linalg.solve(L, training_y)) predict_mean = np.dot(k_s, alpha) mse.append(np.mean((predict_mean - testing_y)**2)) return mse
def test_variance(self): with self.test_session(): X = tf.constant([[2.0, 2.5], [4.1, 5.0]]) X2 = tf.constant([[1.5, 2.0], [3.1, 4.2]]) variance = tf.constant(1.4) self.assertAllClose(rbf(X, X2, variance=variance).eval(), [[1.090321, 0.180228], [0.000529, 0.616604]], atol=1e-5, rtol=1e-5)
def test_rbf_0d(self): with self.test_session(): x = tf.constant(0.0) self.assertAllClose(rbf(x).eval(), 1.0) x = tf.constant(10.0) y = tf.constant(2.0) self.assertAllClose(rbf(x, y=y).eval(), 1.26e-14) x = tf.constant(0.0) y = tf.constant(1.0) sigma = tf.constant(10.0) self.assertAllClose(rbf(x, y=y, sigma=sigma).eval(), 60.6530685) x = tf.constant(0.0) y = tf.constant(1.0) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose( rbf(x, y=y, sigma=sigma, l=l).eval(), 98.01986694)
def test_all(self): with self.test_session(): X = tf.constant([[2.0, 2.5], [4.1, 5.0]]) X2 = tf.constant([[1.5, 2.0], [3.1, 4.2]]) lengthscale = tf.constant([2.0, 1.5]) variance = tf.constant(1.4) self.assertAllClose(rbf(X, X2, lengthscale, variance).eval(), [[1.283597, 0.633180], [0.081387, 1.071704]], atol=1e-5, rtol=1e-5)
def test_raises(self): with self.test_session(): X1 = tf.constant([[0.0]]) X2 = tf.constant([[0.0]]) lengthscale = tf.constant(-5.0) variance = tf.constant(-1.0) with self.assertRaisesOpError('Condition'): rbf(X1, X2, variance=variance).eval() rbf(X1, X2, lengthscale).eval() rbf(X1, X2, lengthscale, variance).eval()
def test_rbf_2d(self): with self.test_session(): x = tf.constant([[0.0], [0.0]]) self.assertAllClose(rbf(x).eval(), [[1.0], [1.0]]) x = tf.constant([[10.0], [2.0]]) y = tf.constant([[2.0], [10.0]]) self.assertAllClose( rbf(x, y=y).eval(), [[1.266417e-14], [1.266417e-14]]) x = tf.constant([[0.0], [10.0]]) y = tf.constant([[1.0], [1.0]]) sigma = tf.constant(10.0) self.assertAllClose( rbf(x, y=y, sigma=sigma).eval(), [[6.065307e+01], [2.576757e-16]]) x = tf.constant([[0.0], [10.0]]) y = tf.constant([[1.0], [1.0]]) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose( rbf(x, y=y, sigma=sigma, l=l).eval(), [[98.019867], [19.789869]]) x = tf.constant([[10.0, 3.0], [10.0, 3.0]]) self.assertAllClose( rbf(x, y=y).eval(), [[2.576757e-18, 1.353353e-01], [2.576757e-18, 1.353353e-01]]) x = tf.constant([[10.0, 3.0], [10.0, 3.0]]) y = tf.constant([[2.0, 3.0], [2.0, 3.0]]) self.assertAllClose( rbf(x, y=y).eval(), [[1.266417e-14, 1.0], [1.266417e-14, 1.0]]) x = tf.constant([[0.0, 1.0], [10.0, -3.0]]) y = tf.constant([[1.0, 2.0], [1.0, 2.0]]) sigma = tf.constant(10.0) self.assertAllClose( rbf(x, y=y, sigma=sigma).eval(), [[6.065307e+01, 6.065307e+01], [2.576757e-16, 3.726653e-04]]) x = tf.constant([[10.0, 3.0], [10.0, 3.0]]) y = tf.constant([[2.0, 3.0], [2.0, 3.0]]) sigma = tf.constant(10.0) l = tf.constant(5.0) self.assertAllClose( rbf(x, y=y, sigma=sigma, l=l).eval(), [[27.80373, 100], [27.80373, 100]])
def define_posterior_predictive(X, K, f, sigma_signal, sigma_noise, lengthscale): """ Define the posterior predictive mean and variance. Parameters ---------- X : tf.placeholder, shape (N, D) A placeholder for the input data. K : tf.Tensor, shape (N, N) The covariance matrix. f : edward.RandomVariable, shape (N,) The Gaussian process prior. sigma_signal : float The signal variance. sigma_noise : float The noise variance. Returns ------- x : tf.placeholder, shape (None, D) A placeholder for the future data. mu : tf.Tensor, shape (None,) The mean function. var : tf.Tensor, shape (None,) The variance function. lengthscale : float or array-like The lengthscale parameter. Can either be a scalar or vector of size D where D is the number of dimensions of the input space. """ N, D = X.shape x = tf.placeholder(tf.float32, [None, D]) k = rbf(X, x, variance=sigma_signal, lengthscale=lengthscale) K_inv = tf.matrix_inverse(K) mu = tf.reduce_sum(tf.matmul(tf.transpose(k), K_inv) * f, axis=1) c = sigma_signal + sigma_noise var = c - tf.reduce_sum( tf.matmul(tf.transpose(k), K_inv) * tf.transpose(k), axis=1) return x, mu, var
def define_prior(N, D, sigma_noise, sigma_signal, lengthscale): """ Define a Gaussian process prior. Parameters ---------- N : int The number of observations. D : int The number of input dimensions. sigma_noise : float The noise variance. sigma_signal : float The signal variance. lengthscale : float or array-like The lengthscale parameter. Can either be a scalar or vector of size D where D is the number of dimensions of the input space. Returns ------- X : tf.placeholder, shape (N, D) A placeholder for the input data. K : tf.Tensor, shape (N, N) The covariance matrix. f : edward.RandomVariable, shape (N,) The Gaussian process prior. """ # define model X = tf.placeholder(tf.float32, [N, D]) K = (rbf(X, variance=sigma_signal, lengthscale=lengthscale) + np.eye(N) * sigma_noise) f = MultivariateNormalTriL(loc=tf.zeros(N), scale_tril=tf.cholesky(K)) # check dimensions assert X.shape == (N, D) assert K.shape == (N, N) assert f.shape == (N, ) return X, K, f
def main(_): ed.set_seed(42) # DATA x_data = build_toy_dataset(FLAGS.N, FLAGS.V) # MODEL x_ph = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.V]) # Form (N, V, V) covariance, one matrix per data point. K = tf.stack([rbf(tf.reshape(xn, [FLAGS.V, 1])) + tf.diag([1e-6, 1e-6]) for xn in tf.unstack(x_ph)]) f = MultivariateNormalTriL(loc=tf.zeros([FLAGS.N, FLAGS.V]), scale_tril=tf.cholesky(K)) x = Poisson(rate=tf.exp(f)) # INFERENCE qf = Normal( loc=tf.get_variable("qf/loc", [FLAGS.N, FLAGS.V]), scale=tf.nn.softplus(tf.get_variable("qf/scale", [FLAGS.N, FLAGS.V]))) inference = ed.KLqp({f: qf}, data={x: x_data, x_ph: x_data}) inference.run(n_iter=5000)
from edward.models import Bernoulli, MultivariateNormalTriL, Normal from edward.util import rbf ys = df['index'].values xs = df[['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']].values N = xs.shape[0] D = xs.shape[1] print("Number of data points: {}".format(N)) print("Number of features: {}".format(D)) # In[4]: X = tf.placeholder(tf.float32, [N, D]) f = MultivariateNormalTriL(loc=tf.zeros(N), scale_tril=tf.cholesky(rbf(X))) y = Bernoulli(logits=f) # In[ ]: qf = Normal(loc=tf.get_variable("qf/loc", [N]), scale=tf.nn.softplus(tf.get_variable("qf/scale", [N]))) # In[ ]: inference = ed.KLqp({f: qf}, data={X: xs, y: ys}) inference.run(n_iter=5000)
for n in range(N): f_n = multivariate_normal.rvs(cov=K, size=1) for v in range(V): x[n, v] = poisson.rvs(mu=np.exp(f_n[v]), size=1) return x ed.set_seed(42) N = 308 # number of NBA players V = 2 # number of shot locations # DATA x_data = build_toy_dataset(N, V) # MODEL x_ph = tf.placeholder(tf.float32, [N, V]) # inputs to Gaussian Process # Form (N, V, V) covariance, one matrix per data point. K = tf.stack([rbf(tf.reshape(xn, [V, 1])) + tf.diag([1e-6, 1e-6]) for xn in tf.unstack(x_ph)]) f = MultivariateNormalTriL(loc=tf.zeros([N, V]), scale_tril=tf.cholesky(K)) x = Poisson(rate=tf.exp(f)) # INFERENCE qf = Normal(loc=tf.Variable(tf.random_normal([N, V])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([N, V])))) inference = ed.KLqp({f: qf}, data={x: x_data, x_ph: x_data}) inference.run(n_iter=5000)
sess = ed.get_session() training_number = 11 # You can change the number to see training quality with different amount of training dataset train_x = np.array(np.linspace(90, 110, training_number), dtype='float32').reshape(training_number, 1) train_y = [] for S in train_x: train_y.append( HestonVega(S, K, T, r, sigma, lmbda, meanV, v0, rho, 'C', N=nInt)) test_x = np.array(np.linspace(90, 110, 200), dtype='float32').reshape(200, 1) test_y = [] for S in test_x: test_y.append( HestonVega(S, K, T, r, sigma, lmbda, meanV, v0, rho, 'C', N=nInt)) Kernel = rbf(train_x).eval() K_noise = Kernel + np.eye( training_number ) * 0.01 # without noise, the cov band converge to 0 at the training points k_s = rbf(test_x, train_x).eval() L = np.linalg.cholesky(K_noise) alpha = np.linalg.solve(L.T, np.linalg.solve(L, train_y)) predict_mean = np.dot(k_s, alpha) # cov v = np.linalg.solve(L, k_s.T) var = rbf(test_x).eval() - np.dot(v.T, v) # In[608]: # plot with cov band
data = make_pinwheel(radial_std=0.3, tangential_std=0.05, num_classes=5, num_per_class=30, rate=0.4) N = data.shape[0] D = 2 # number of features K = 2 # number of latent dimensions H1 = 2 H2 = 2 # Model: deep/shallow GP (generative model) X = Normal(loc=tf.zeros([N, K]), scale=tf.ones([N, K])) Kernal = rbf(X) + tf.eye(N) * 1e-6 cholesky = tf.tile(tf.reshape(tf.cholesky(Kernal), [1, N, N]), [H1, 1, 1]) h1 = MultivariateNormalTriL(loc=tf.zeros([H1, N]), scale_tril=cholesky) Kernal1 = rbf(tf.transpose(h1)) + tf.eye(N) * 1e-6 cholesky1 = tf.tile(tf.reshape(tf.cholesky(Kernal1), [1, N, N]), [H2, 1, 1]) h2 = MultivariateNormalTriL(loc=tf.zeros([H2, N]), scale_tril=cholesky1) Kernal2 = rbf(tf.transpose(h2)) + tf.eye(N) * 1e-6 cholesky2 = tf.tile(tf.reshape(tf.cholesky(Kernal2), [1, N, N]), [D, 1, 1]) Y = MultivariateNormalTriL(loc=tf.zeros([D, N]), scale_tril=cholesky2) # Inference (recongnition model) qX = Normal(loc=tf.Variable(tf.random_normal([N, K])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([N, K]))))
# doing this because I don't know how to make the testing work otherwise # it seems like the test and training data need to have the same N X_test, y_test = X_test[:-1, :], y_test[:-1] # unfortunately not sure how to make the linear kernel work at this moment N, P = X_train.shape X_tf = tf.placeholder(tf.float32, [N, P]) # latent stochastic function # ok so here in the loc position is where we can get (x *element-wise* b) b = Bernoulli(varbvs_prior, dtype=np.float32) # prior from varbvs gp_mu = tf.reduce_mean(tf.multiply(X_tf, tf.reshape(tf.tile(b, [N]), [N, P])), 1) # mean for prior over GP f = MultivariateNormalTriL( loc=gp_mu, scale_tril=tf.cholesky(rbf(X_tf)) # uses rbf kernel for covariance of GP for now ) qf = Normal(loc=tf.get_variable("qf/loc", [N]), scale=tf.nn.softplus(tf.get_variable("qf/scale", [N]))) # respose y_tf = Bernoulli(logits=f) # inference infer = ed.KLqp({f: qf}, data={X_tf: X_train, y_tf: y_train}) infer.run(n_samples=3, n_iter=5000) # criticism y_post = ed.copy(y_tf, {f: qf}) ed.evaluate('binary_accuracy', data={X_tf: X_test, y_post: y_test})
def test_contraint_raises(self): with self.test_session(): x = tf.constant(0.0) y = tf.constant(1.0) sigma = tf.constant(-1.0) l = tf.constant(-5.0) with self.assertRaisesOpError('Condition'): rbf(x, y=y, sigma=sigma).eval() rbf(x, y=y, l=l).eval() rbf(x, y=y, sigma=sigma, l=l).eval() x = np.inf * tf.constant(1.0) y = tf.constant(1.0) sigma = tf.constant(1.0) l = tf.constant(5.0) with self.assertRaisesOpError('Inf'): rbf(x).eval() rbf(x, y=y).eval() rbf(x, y=y, sigma=sigma).eval() rbf(x, y=y, l=l).eval() rbf(x, y=y, sigma=sigma, l=l).eval() x = tf.constant(0.0) y = np.nan * tf.constant(1.0) sigma = tf.constant(1.0) l = tf.constant(5.0) with self.assertRaisesOpError('NaN'): rbf(x, y=y).eval() rbf(x, y=y, sigma=sigma).eval() rbf(x, y=y, l=l).eval() rbf(x, y=y, sigma=sigma, l=l).eval()
sess = ed.get_session() training_number = 17 idx = np.linspace(0, len(x)-1, training_number, dtype = 'int') x_train = np.array(x[idx], dtype='float32').reshape(training_number, 1) y_train = np.array(y[idx], dtype='float32').reshape(training_number, 1) x_test = np.array(x, dtype='float32').reshape(len(x), 1) y_test = np.array(y, dtype='float32').reshape(len(x), 1) # In[431]: # mean Kernel = rbf(x_train).eval() K_noise = Kernel + np.eye(training_number) * 0.01 # without noise, the cov band converge to 0 at the training points k_s = rbf(x_test, x_train).eval() L = np.linalg.cholesky(K_noise) alpha = np.linalg.solve(L.T, np.linalg.solve(L, y_train)) predict_mean = np.dot(k_s, alpha) # cov v = np.linalg.solve(L, k_s.T) var = rbf(x_test).eval() - np.dot(v.T, v) # In[433]: # plot with cov band
def test_contraint_raises(self): with self.test_session(): x = tf.constant(0.0) y = tf.constant(1.0) sigma = tf.constant(-1.0) l = tf.constant(-5.0) with self.assertRaisesOpError("Condition"): rbf(x, y=y, sigma=sigma).eval() rbf(x, y=y, l=l).eval() rbf(x, y=y, sigma=sigma, l=l).eval() x = np.inf * tf.constant(1.0) y = tf.constant(1.0) sigma = tf.constant(1.0) l = tf.constant(5.0) with self.assertRaisesOpError("Inf"): rbf(x).eval() rbf(x, y=y).eval() rbf(x, y=y, sigma=sigma).eval() rbf(x, y=y, l=l).eval() rbf(x, y=y, sigma=sigma, l=l).eval() x = tf.constant(0.0) y = np.nan * tf.constant(1.0) sigma = tf.constant(1.0) l = tf.constant(5.0) with self.assertRaisesOpError("NaN"): rbf(x, y=y).eval() rbf(x, y=y, sigma=sigma).eval() rbf(x, y=y, l=l).eval() rbf(x, y=y, sigma=sigma, l=l).eval()
X_test, y_test = X_test[:-1, :], y_test[:-1] # unfortunately not sure how to make the linear kernel work at this moment N, P = X_train.shape X_tf = tf.placeholder(tf.float32, [N, P]) # latent stochastic function # ok so here in the loc position is where we can get (x *element-wise* b) b = Bernoulli(varbvs_prior, dtype=np.float32) # prior from varbvs gp_mu = tf.reduce_mean(tf.multiply(X_tf, tf.reshape(tf.tile(b, [N]), [N, P])), 1) # mean for prior over GP f = MultivariateNormalTriL( loc=gp_mu, scale_tril=tf.cholesky( rbf(X_tf)) # uses rbf kernel for covariance of GP for now ) qf = Normal(loc=tf.get_variable("qf/loc", [N]), scale=tf.nn.softplus(tf.get_variable("qf/scale", [N]))) # respose y_tf = Bernoulli(logits=f) # inference infer = ed.KLqp({f: qf}, data={X_tf: X_train, y_tf: y_train}) infer.run(n_samples=3, n_iter=5000) # criticism y_post = ed.copy(y_tf, {f: qf}) ed.evaluate('binary_accuracy', data={X_tf: X_test, y_post: y_test})