Example #1
0
 def test_data(self):
     tf.InteractiveSession()
     x = Normal(mu=0.0, sigma=1.0)
     y = 2.0 * x
     x_data = tf.constant(0.0)
     y_data = tf.constant(0.0)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data}, n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[y]), {y: y_data}, n_samples=1)
Example #2
0
 def test_n_samples(self):
   with self.test_session():
     x = Normal(loc=0.0, scale=1.0)
     x_data = tf.constant(0.0)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data}, n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data}, n_samples=5)
     self.assertRaises(TypeError, ed.ppc, lambda xs, zs: tf.reduce_mean(xs[x]),
                       {x: x_data}, n_samples='1')
def critique_glm(posterior_pred, x, x_test, y_test, w, b, qw, qb):
    def t_max(data_dict, latent):
        return tf.reduce_max(data_dict[posterior_pred])

    def t_min(data_dict, latent):
        return tf.reduce_min(data_dict[posterior_pred])

    def t_mean(data_dict, latent):
        return tf.reduce_mean(data_dict[posterior_pred])

    ppc_1 = ed.ppc(t_max,
                   data={
                       x:
                       x_test.as_matrix(),
                       posterior_pred:
                       np.reshape(y_test.as_matrix(), (y_test.shape[0]))
                   },
                   latent_vars={
                       w: qw,
                       b: qb
                   })
    ed.ppc_stat_hist_plot(ppc_1[1][1],
                          ppc_1[0],
                          stat_name=r'$T \equiv max$',
                          bins=10)
    plt.show()
    ppc_2 = ed.ppc(t_min,
                   data={
                       x:
                       x_test.as_matrix(),
                       posterior_pred:
                       np.reshape(y_test.as_matrix(), (y_test.shape[0]))
                   },
                   latent_vars={
                       w: qw,
                       b: qb
                   })
    ed.ppc_stat_hist_plot(ppc_2[1][1],
                          ppc_1[0],
                          stat_name=r'$T \equiv min$',
                          bins=10)
    plt.show()
    ppc_3 = ed.ppc(t_mean,
                   data={
                       x:
                       x_test.as_matrix(),
                       posterior_pred:
                       np.reshape(y_test.as_matrix(), (y_test.shape[0]))
                   },
                   latent_vars={
                       w: qw,
                       b: qb
                   })
    ed.ppc_stat_hist_plot(ppc_3[1][1],
                          ppc_1[0],
                          stat_name=r'$T \equiv mean$',
                          bins=10)
    plt.show()
Example #4
0
 def test_n_samples(self):
     with self.test_session():
         x = Normal(mu=0.0, sigma=1.0)
         x_data = tf.constant(0.0)
         ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data},
                n_samples=1)
         ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data},
                n_samples=5)
         self.assertRaises(TypeError,
                           ed.ppc,
                           lambda xs, zs: tf.reduce_mean(xs[x]),
                           {x: x_data},
                           n_samples='1')
Example #5
0
 def test_latent_vars(self):
     with self.test_session():
         x = Normal(mu=0.0, sigma=1.0)
         y = 2.0 * x
         z = Normal(mu=0.0, sigma=1.0)
         x_data = tf.constant(0.0)
         y_data = tf.constant(0.0)
         ed.ppc(
             lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
             {x: x_data}, {z: z},
             n_samples=1)
         ed.ppc(
             lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
             {x: x_data}, {z: y},
             n_samples=1)
         ed.ppc(
             lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
             {x: x_data}, {y: y},
             n_samples=1)
         ed.ppc(
             lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
             {x: x_data}, {y: z},
             n_samples=1)
         self.assertRaises(TypeError,
                           ed.ppc,
                           lambda xs, zs: tf.reduce_mean(xs[x]),
                           {x: x_data}, {'y': z},
                           n_samples=1)
Example #6
0
 def test_data(self):
     with self.test_session():
         x = Normal(loc=0.0, scale=1.0)
         y = 2.0 * x
         x_data = tf.constant(0.0)
         y_data = tf.constant(0.0)
         ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data},
                n_samples=1)
         ed.ppc(lambda xs, zs: tf.reduce_mean(xs[y]), {y: y_data},
                n_samples=1)
         self.assertRaises(TypeError,
                           ed.ppc,
                           lambda xs, zs: tf.reduce_mean(xs[y]),
                           {'y': y_data},
                           n_samples=1)
Example #7
0
def main(_):
  ed.set_seed(42)

  # DATA
  x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

  # MODEL
  p = Beta(1.0, 1.0)
  x = Bernoulli(probs=p, sample_shape=10)

  # INFERENCE
  qp = Empirical(params=tf.get_variable(
      "qp/params", [1000], initializer=tf.constant_initializer(0.5)))

  proposal_p = Beta(3.0, 9.0)

  inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
  inference.run()

  # CRITICISM
  # exact posterior has mean 0.25 and std 0.12
  sess = ed.get_session()
  mean, stddev = sess.run([qp.mean(), qp.stddev()])
  print("Inferred posterior mean:")
  print(mean)
  print("Inferred posterior stddev:")
  print(stddev)

  x_post = ed.copy(x, {p: qp})
  tx_rep, tx = ed.ppc(
      lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
      data={x_post: x_data})
  ed.ppc_stat_hist_plot(
      tx[0], tx_rep, stat_name=r'$T \equiv$mean', bins=10)
  plt.show()
Example #8
0
def plot_samples(y_post, y_test, num_bins=20):
    """Plot y samples from the posterior alongside the actual y values."""
    def T(data, latent_params):
        return tf.reduce_mean(tf.cast(data[y_post], tf.float32))

    Ty_rep, Ty = ed.ppc(T, data={y_post: y_test})
    ed.ppc_stat_hist_plot(Ty, Ty_rep, bins=num_bins)
    plt.show()
Example #9
0
    def ppc(self, T_type):
        if T_type == 'mean':

            def T1(ys, xs):
                return tf.reduce_mean(self.y_post[0])

            def T2(ys, xs):
                return tf.reduce_mean(self.y_post[1])

        else:

            def T1(ys, xs):
                return tf.reduce_mean(self.y_post[0])

            def T2(ys, xs):
                return tf.reduce_mean(self.y_post[1])

        stats1 = ed.ppc(T1,
                        data={
                            self.X1: self.xs1,
                            self.X2: self.xs2,
                            self.y_post[0]: self.ys[:, 0]
                        },
                        latent_vars={
                            self.w1: self.qw1,
                            self.b1: self.qb1
                        },
                        n_samples=1000)
        stats2 = ed.ppc(T2,
                        data={
                            self.X1: self.xs1,
                            self.X2: self.xs2,
                            self.y_post[1]: self.ys[:, 1]
                        },
                        latent_vars={
                            self.w2: self.qw2,
                            self.b2: self.qb2
                        },
                        n_samples=1000)
        return (stats1[0], stats2[0])
Example #10
0
 def test_latent_vars(self):
   with self.test_session():
     x = Normal(loc=0.0, scale=1.0)
     y = 2.0 * x
     z = Normal(loc=0.0, scale=1.0)
     x_data = tf.constant(0.0)
     y_data = tf.constant(0.0)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
            {x: x_data}, {z: z}, n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
            {x: x_data}, {z: y}, n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
            {x: x_data}, {y: y}, n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
            {x: x_data}, {y: z}, n_samples=1)
     self.assertRaises(TypeError, ed.ppc, lambda xs, zs: tf.reduce_mean(xs[x]),
                       {x: x_data}, {'y': z}, n_samples=1)
Example #11
0
 def test_latent_vars(self):
     tf.InteractiveSession()
     x = Normal(mu=0.0, sigma=1.0)
     y = 2.0 * x
     z = Normal(mu=0.0, sigma=1.0)
     x_data = tf.constant(0.0)
     y_data = tf.constant(0.0)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
            {x: x_data}, {z: z},
            n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
            {x: x_data}, {z: y},
            n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
            {x: x_data}, {y: y},
            n_samples=1)
     ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
            {x: x_data}, {y: z},
            n_samples=1)
Example #12
0
    def ppc(self, T_type):
        if T_type == 'mean':

            def T1(ys, xs):
                return tf.reduce_mean(self.y_post)
        else:

            def T1(ys, xs):
                return tf.reduce_mean(self.y_post)

        stats1 = ed.ppc(T1,
                        data={
                            self.X: self.xs,
                            self.y_post: self.ys
                        },
                        latent_vars={self.w1: self.qw1},
                        n_samples=1000)
        return stats1
    log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
    log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
    return log_lik + log_prior

  def sample_prior(self):
    """p ~ p(p)"""
    return {'p': beta.sample(a=1.0, b=1.0)}

  def sample_likelihood(self, zs):
    """x | p ~ p(x | p)"""
    return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


def T(xs, zs):
  return tf.reduce_mean(tf.cast(xs['x'], tf.float32))


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.KLqp({'p': qp}, data, model)
inference.run(n_iter=200)

print(ed.ppc(T, data, model_wrapper=model))
Example #14
0
        log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
        log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
        return log_lik + log_prior

    def sample_prior(self):
        """p ~ p(p)"""
        return {'p': beta.sample(a=1.0, b=1.0)}

    def sample_likelihood(self, zs):
        """x | p ~ p(x | p)"""
        return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


def T(xs, zs):
    return tf.reduce_mean(tf.cast(xs['x'], tf.float32))


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.MFVI({'p': qp}, data, model)
inference.run(n_iter=200)

print(ed.ppc(T, data, model_wrapper=model))
Example #15
0
def save(arr,xdata,ydata):
	tf.reset_default_graph()

	trainSetNumber = round(FLAGS.T* 0.8)

	x_train = xdata[:trainSetNumber]
	y_train = ydata[:trainSetNumber]
	x_test = xdata[trainSetNumber:]
	y_test = ydata[trainSetNumber:]

	x_train = np.asarray(x_train)
	x_test = np.asarray(x_test)

	x_train = np.asarray(x_train)
	x_test = np.asarray(x_test)
	# print(x_test)
	# print(y_test)
	pos = 0
	name = arr[pos]
	pos +=1
	H1 = int(arr[pos])
	pos+=1
	H2 = int(arr[pos])
	pos+=1
	param1 = float(arr[pos])
	pos += 1
	param2 = float(arr[pos])

	graph1 = tf.Graph()
	with graph1.as_default():
		with tf.name_scope("model"):
			W_0 = Normal(loc=tf.zeros([FLAGS.D, H1]), scale=param1*tf.ones([FLAGS.D,H1 ]),name="W_0")
			W_1 = Normal(loc=tf.zeros([H1, H2]), scale=param2*tf.ones([H1, H2]), name="W_1")
			W_2 = Normal(loc=tf.zeros([H2, FLAGS.O]), scale=param2*tf.ones([H2, FLAGS.O]), name="W_2")
			b_0 = Normal(loc=tf.zeros(H1), scale=param1 *tf.ones(H1), name="b_0")
			b_1 = Normal(loc=tf.zeros(H2), scale=param2* tf.ones(H2), name="b_1")
			b_2 = Normal(loc=tf.zeros(FLAGS.O), scale=param2* tf.ones(FLAGS.O), name="b_2")

			X = tf.placeholder(tf.float32, [trainSetNumber, FLAGS.D], name="X")
			y = Normal(loc=neural_network(x_train,W_0, W_1, W_2, b_0, b_1, b_2, trainSetNumber), scale=0.1*tf.ones([trainSetNumber,FLAGS.O]), name="y")
		
		with tf.variable_scope("posterior",reuse=tf.AUTO_REUSE):
			with tf.variable_scope("qW_0",reuse=tf.AUTO_REUSE):
			    loc = tf.get_variable("loc", [FLAGS.D, H1])
			    scale = param1*tf.nn.softplus(tf.get_variable("scale", [FLAGS.D, H1]))
			    qW_0 = Normal(loc=loc, scale=scale)
			with tf.variable_scope("qW_1",reuse=tf.AUTO_REUSE):
			    loc = tf.get_variable("loc", [H1, H2])
			    scale = param2*tf.nn.softplus(tf.get_variable("scale", [H1, H2]))
			    qW_1 = Normal(loc=loc, scale=scale)
			with tf.variable_scope("qW_2",reuse=tf.AUTO_REUSE):
			    loc = tf.get_variable("loc", [H2, FLAGS.O])
			    scale = param2*tf.nn.softplus(tf.get_variable("scale", [H2, FLAGS.O]))
			    qW_2 = Normal(loc=loc, scale=scale)
			with tf.variable_scope("qb_0",reuse=tf.AUTO_REUSE):
			    loc = tf.get_variable("loc", [H1])
			    scale =param1 * tf.nn.softplus(tf.get_variable("scale", [H1]))
			    qb_0 = Normal(loc=loc, scale=scale)
			with tf.variable_scope("qb_1",reuse=tf.AUTO_REUSE):
			    loc = tf.get_variable("loc", [H2])
			    scale =param2 * tf.nn.softplus(tf.get_variable("scale", [H2]))
			    qb_1 = Normal(loc=loc, scale=scale)
			with tf.variable_scope("qb_2",reuse=tf.AUTO_REUSE):
			    loc = tf.get_variable("loc", [FLAGS.O])
			    scale =param2 * tf.nn.softplus(tf.get_variable("scale", [FLAGS.O]))
			    qb_2 = Normal(loc=loc, scale=scale)
	#inference
	with tf.Session(graph=graph1) as sess:
		# Set up the inference method, mapping the prior to the posterior variables
		inference = ed.KLqp({W_0: qW_0, b_0: qb_0,W_1: qW_1, b_1: qb_1,W_2: qW_2, b_2: qb_2}, data={X: x_train, y: y_train})
		# Set up the adam optimizer
		global_step = tf.Variable(0, trainable=False)
		starter_learning_rate = 0.1
		learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,100, 0.3, staircase=True)
		optimizer = tf.train.AdamOptimizer(learning_rate)

		# Run the inference method
		pos += 1
		iter1 = arr[pos]
		inference.run(n_iter=iter1,optimizer=optimizer ,n_samples=5)

		#Run the test data through the neural network
		infered = neural_network(x_test, qW_0, qW_1, qW_2, qb_0, qb_1, qb_2, len(x_test))
		inferedList = infered.eval()

		#Accuracy checks on the data (The test data)
		# In order to work with PPC and other metrics, it must be a random variables
		# Normal creates this random varaibles by sampling from the poterior with a normal distribution
		NormalTest =Normal(loc=neural_network(x_test, qW_0, qW_1, qW_2, qb_0, qb_1, qb_2,len(x_test)), scale=0.1*tf.ones([len(x_test),FLAGS.O]), name="y_other") 
		NormalTestList = NormalTest.eval()
		
		# Change the graph so that the posterior point to the output
		y_post = ed.copy(NormalTest, {W_0: qW_0, b_0: qb_0,W_1: qW_1, b_1: qb_1,W_2: qW_2, b_2: qb_2})
		X = tf.placeholder(tf.float32, [len(x_test), FLAGS.D], name="X")
		y_test_tensor = tf.convert_to_tensor(y_test)
		MSE = ed.evaluate('mean_squared_error', data={X: x_test, NormalTest: y_test_tensor})
		MAE =ed.evaluate('mean_absolute_error', data={X: x_test, NormalTest: y_test_tensor})
		# PPC calculation
		PPCMean = ed.ppc(lambda xs, zs: tf.reduce_mean(xs[y_post]), data={y_post:  y_test, X:x_test}, latent_vars={W_0: qW_0, b_0: qb_0,W_1: qW_1, b_1: qb_1,W_2: qW_2, b_2: qb_2}, n_samples=5)
		# Change the graph again, this is done to do epistemic uncertainty calculations
		posterior = ed.copy(NormalTest, dict_swap={W_0: qW_0.mean(), b_0: qb_0.mean(),W_1: qW_1.mean(), b_1: qb_1.mean(),W_2: qW_2.mean(), b_2: qb_2.mean()})
		Y_post1 = sess.run(posterior.sample(len(x_test)), feed_dict={X: x_test, posterior: y_test})
		mean_prob_over_samples=np.mean(Y_post1, axis=0) ## prediction means
		prediction_variances = np.apply_along_axis(predictive_entropy, axis=1, arr=mean_prob_over_samples)
		
		# Run analysis on test data, to see how many records were correct
		classes, actualClass, cor, firsts, seconds, thirds, fails, perCorrect = Analysis(inferedList, y_test)
		# Save the model through TF saver
		saver = tf.train.Saver()
		dir_path = os.path.dirname(os.path.realpath(__file__))
		save_path = saver.save(sess, dir_path +"/"+name+"/model.ckpt")
		print("Model saved in path: %s" % save_path)

		file = open(dir_path+"/"+name +"/"+name+".csv",'w')
		file.write("MSE = " + str(MSE))
		file.write("\nMAE = " + str(MAE))
		file.write("\nPPC mean = " + str(PPCMean))
		file.write("; Predicted First;Predicted Second; Predicted Third; Predicted Fail \n")
		classNames = ['First','Second', 'Third', 'Fail']
		for x in range(len(firsts)):
			file.write(classNames[x] + ";" + str(firsts[x]) + ";" + str(seconds[x])+ ";" + str(thirds[x])+ ";" + str(fails[x]) + "\n")
		file.write("Num;Class 1;Class 2;Class 3;Class 4;Epi;Predicted Class;Correct Class\n ")
		for x in range(len(inferedList)):
			line = str(x) 
			for i in range(len(inferedList[x])):
				line += ";" + str(round(inferedList[x][i],2))
			line += ";" + str(round(prediction_variances[x],2)) + ";" + str(classes[x]+1) + ";" + str(actualClass[x]+1) + "\n"
			file.write(line) 
		file.close()

		return perCorrect
Example #16
0
from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

data = {x: x_data}
inference = ed.MFVI({p: qp}, data)
inference.run(n_iter=500)

# CRITICISM
x_post = ed.copy(x, {p: qp})


def T(xs, zs):
  return tf.reduce_mean(tf.cast(xs[x_post], tf.float32))


print(ed.ppc(T, data={x_post: x_data}))
    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
        log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
        return log_lik + log_prior

    def sample_prior(self):
        """p ~ p(p)"""
        return {'p': beta.sample(a=1.0, b=1.0)}

    def sample_likelihood(self, zs):
        """x | p ~ p(x | p)"""
        return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.KLqp({'p': qp}, data, model)
inference.run(n_iter=200)

print(
    ed.ppc(lambda xs, zs: tf.reduce_mean(tf.cast(xs['x'], tf.float32)),
           data,
           model_wrapper=model))
Example #18
0
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(1.0, 1.0)
x = Bernoulli(tf.ones(10) * p)

# INFERENCE
qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(qp_a, qp_b)

inference = ed.KLqp({p: qp}, data={x: x_data})
inference.run(n_iter=500)

# CRITICISM
x_post = ed.copy(x, {p: qp})

print(ed.ppc(lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
             data={x_post: x_data}))
Example #19
0
class BetaBernoulli:
    """p(x, p) = Bernoulli(x | p) * Beta(p | 1, 1)"""
    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
        log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
        return log_lik + log_prior

    def sample_likelihood(self, zs):
        """x | p ~ p(x | p)"""
        return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.KLqp({'p': qp}, data, model)
inference.run(n_iter=200)

print(
    ed.ppc(lambda xs, zs: tf.reduce_mean(tf.cast(xs['x'], tf.float32)),
           data,
           latent_vars={'p': qp},
           model_wrapper=model))
Example #20
0
    """
    def __init__(self):
        self.num_vars = 1

    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack(
            [tf.reduce_sum(bernoulli.logpmf(xs, z)) for z in tf.unpack(zs)])
        return log_lik + log_prior

    def sample_likelihood(self, zs, size):
        """x | z ~ p(x | z)"""
        out = np.zeros((zs.shape[0], size))
        for s in range(zs.shape[0]):
            out[s, :] = bernoulli.rvs(zs[s, :], size=size)

        return out


ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.num_vars))
data = ed.Data(tf.constant((0, 1, 0, 0, 0, 0, 0, 0, 0, 1), dtype=tf.float32))

inference = ed.MFVI(model, variational, data)
sess = inference.run(n_iter=200)

T = lambda y, z=None: tf.reduce_mean(y)
print(ed.ppc(model, variational, data, T, sess=sess))
Example #21
0
class BetaBernoulli:
  """p(x, p) = Bernoulli(x | p) * Beta(p | 1, 1)"""
  def log_prob(self, xs, zs):
    log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
    log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
    return log_lik + log_prior

  def sample_likelihood(self, zs):
    """x | p ~ p(x | p)"""
    return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


def T(xs, zs):
  return tf.reduce_mean(tf.cast(xs['x'], tf.float32))


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.KLqp({'p': qp}, data, model)
inference.run(n_iter=200)

print(ed.ppc(T, data, latent_vars={'p': qp}, model_wrapper=model))
Example #22
0
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(1.0, 1.0)
x = Bernoulli(probs=p, sample_shape=10)

# INFERENCE
qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(qp_a, qp_b)

inference = ed.KLqp({p: qp}, data={x: x_data})
inference.run(n_iter=500)

# CRITICISM
x_post = ed.copy(x, {p: qp})

print(ed.ppc(lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
             data={x_post: x_data}))
Example #23
0
from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

data = {x: x_data}
inference = ed.KLqp({p: qp}, data)
inference.run(n_iter=500)

# CRITICISM
x_post = ed.copy(x, {p: qp})


def T(xs, zs):
  return tf.reduce_mean(tf.cast(xs[x_post], tf.float32))


print(ed.ppc(T, data={x_post: x_data}))
Example #24
0
    def __init__(self):
        self.n_vars = 1

    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack([
            tf.reduce_sum(bernoulli.logpmf(xs['x'], z)) for z in tf.unpack(zs)
        ])
        return log_lik + log_prior

    def sample_likelihood(self, zs, n):
        """x | z ~ p(x | z)"""
        out = []
        for s in range(zs.shape[0]):
            out += [{'x': bernoulli.rvs(zs[s, :], size=n).reshape((n, ))}]

        return out


ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.n_vars))
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=200)

T = lambda x, z=None: tf.reduce_mean(tf.cast(x['x'], tf.float32))
print(ed.ppc(model, variational, data, T))
Example #25
0
class BetaBernoulli:
    """p(x, p) = Bernoulli(x | p) * Beta(p | 1, 1)"""
    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs['p'], a=1.0, b=1.0)
        log_lik = tf.reduce_sum(bernoulli.logpmf(xs['x'], p=zs['p']))
        return log_lik + log_prior

    def sample_likelihood(self, zs):
        """x | p ~ p(x | p)"""
        return {'x': bernoulli.sample(p=tf.ones(10) * zs['p'])}


def T(xs, zs):
    return tf.reduce_mean(tf.cast(xs['x'], tf.float32))


ed.set_seed(42)
data = {'x': np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])}

model = BetaBernoulli()

qp_a = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp_b = tf.nn.softplus(tf.Variable(tf.random_normal([])))
qp = Beta(a=qp_a, b=qp_b)

inference = ed.MFVI({'p': qp}, data, model)
inference.run(n_iter=200)

print(ed.ppc(T, data, latent_vars={'p': qp}, model_wrapper=model))
Example #26
0
    p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)
    """
    def __init__(self):
        self.num_vars = 1

    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z))
                           for z in tf.unpack(zs)])
        return log_lik + log_prior

    def sample_likelihood(self, zs, size):
        """x | z ~ p(x | z)"""
        out = np.zeros((zs.shape[0], size))
        for s in range(zs.shape[0]):
            out[s,:] = bernoulli.rvs(zs[s,:], size=size)

        return out

ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.num_vars))
data = ed.Data(tf.constant((0, 1, 0, 0, 0, 0, 0, 0, 0, 1), dtype=tf.float32))

inference = ed.MFVI(model, variational, data)
sess = inference.run(n_iter=200)

T = lambda y, z=None: tf.reduce_mean(y)
print(ed.ppc(model, variational, data, T, sess=sess))
Example #27
0
import edward as ed
import tensorflow as tf

# 点估计评估
x_post = ed.copy(x, {z: qz})  # qz为z学习到的后验分布,语句的意思为,x对z的依赖替换为x_post对qz的依赖
# y_post为按照参数生成的y的后验分布,y_train为真实的数据
ed.evaluate('categorical_accuracy', data={y_post: y_train, x: x_train})
ed.evaluate('mean_absolute_error', data={y_post: y_train, x: x_train})
# 似然估计
ed.evaluate('log_likelihood', data={x_post: x_train})
# 拆分训练集和验证集的方式
from edward.models import Categorical
qz_test = Categorical(logits=tf.Variable(tf.zeros[N_test, K]))
inference_test = ed.Inference({z: qz_test}, data={x: x_test, beta: qbeta})
inference_test.run()  # 模型训练完成
x_post = ed.copy(x, {z: qz_test, beta: qbeta})
ed.evaluate('log_likelihood', data={x_post: x_valid})  # x_valid为测试表现的数据

# 后验预测检验,PPC
x_post = ed.copy(x, {z: qz})
ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x_post]), data={x_post: x_train})
ed.ppc(lambda xs, zs: tf.maximum(zs[z]),
       data={
           y_post: y_train,
           x_ph: x_train
       },
       latent_vars={
           z: qz,
           beta: qbeta
       })  # 明确指出后验
Example #28
0
    p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)
    """
    def __init__(self):
        self.num_vars = 1

    def log_prob(self, xs, zs):
        log_prior = beta.logpdf(zs, a=1.0, b=1.0)
        log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z))
                           for z in tf.unpack(zs)])
        return log_lik + log_prior

    def sample_likelihood(self, zs, size):
        """x | z ~ p(x | z)"""
        out = np.zeros((zs.shape[0], size))
        for s in range(zs.shape[0]):
            out[s,:] = bernoulli.rvs(zs[s,:], size=size).reshape((size,))

        return out

ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.num_vars))
data = ed.Data(tf.constant((0, 1, 0, 0, 0, 0, 0, 0, 0, 1), dtype=tf.float32))

inference = ed.MFVI(model, variational, data)
inference.run(n_iter=200)

T = lambda y, z=None: tf.reduce_mean(y)
print(ed.ppc(model, variational, data, T))