def test_hmc_custom(self):
        with self.test_session() as sess:
            x = TransformedDistribution(
                distribution=Normal(1.0, 1.0),
                bijector=tf.contrib.distributions.bijectors.Softplus())
            x.support = 'nonnegative'
            qx = Empirical(tf.Variable(tf.random_normal([1000])))

            inference = ed.HMC({x: qx})
            inference.initialize(auto_transform=True, step_size=0.8)
            tf.global_variables_initializer().run()
            for _ in range(inference.n_iter):
                info_dict = inference.update()

            # Check approximation on constrained space has same moments as
            # target distribution.
            n_samples = 10000
            x_unconstrained = inference.transformations[x]
            qx_constrained = Empirical(
                x_unconstrained.bijector.inverse(qx.params))
            x_mean, x_var = tf.nn.moments(x.sample(n_samples), 0)
            qx_mean, qx_var = tf.nn.moments(qx_constrained.params[500:], 0)
            stats = sess.run([x_mean, qx_mean, x_var, qx_var])
            self.assertAllClose(stats[0], stats[1], rtol=1e-1, atol=1e-1)
            self.assertAllClose(stats[2], stats[3], rtol=1e-1, atol=1e-1)
Пример #2
0
  def _test_normal_normal(self, default, dtype):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=tf.constant(0.0, dtype=dtype),
                  scale=tf.constant(1.0, dtype=dtype))
      x = Normal(loc=mu, scale=tf.constant(1.0, dtype=dtype),
                 sample_shape=50)

      n_samples = 2000
      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      if not default:
        qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=dtype)))
        inference = ed.HMC({mu: qmu}, data={x: x_data})
      else:
        inference = ed.HMC([mu], data={x: x_data})
        qmu = inference.latent_vars[mu]
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)

      old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
      if not default:
        self.assertEqual(old_t, n_samples)
      else:
        self.assertEqual(old_t, 1e4)
      self.assertGreater(old_n_accept, 0.1)
      sess.run(inference.reset)
      new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(new_t, 0)
      self.assertEqual(new_n_accept, 0)
Пример #3
0
    def test_hmc(self):
        with self.test_session():
            N, D, W_1, W_2, W_3, b_1, b_2, X, y, X_train, y_train = self._test(
            )

            T = 1  # number of MCMC samples
            qW_1 = Empirical(params=tf.Variable(tf.random_normal([T, D, 20])))
            qW_2 = Empirical(params=tf.Variable(tf.random_normal([T, 20, 15])))
            qW_3 = Empirical(params=tf.Variable(tf.random_normal([T, 15, 1])))
            qb_1 = Empirical(params=tf.Variable(tf.random_normal([T, 20])))
            qb_2 = Empirical(params=tf.Variable(tf.random_normal([T, 15])))

            inference = ed.HMC(
                {
                    W_1: qW_1,
                    b_1: qb_1,
                    W_2: qW_2,
                    b_2: qb_2,
                    W_3: qW_3
                },
                data={
                    y: y_train,
                    X: X_train
                })
            inference.run()
Пример #4
0
def main(_):
  # data
  J = 8
  data_y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
  data_sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18])

  # model definition
  mu = Normal(0., 10.)
  logtau = Normal(5., 1.)
  theta_prime = Normal(tf.zeros(J), tf.ones(J))
  sigma = tf.placeholder(tf.float32, J)
  y = Normal(mu + tf.exp(logtau) * theta_prime, sigma * tf.ones([J]))

  data = {y: data_y, sigma: data_sigma}

  # ed.KLqp inference
  with tf.variable_scope('q_logtau'):
    q_logtau = Normal(tf.get_variable('loc', []),
                      tf.nn.softplus(tf.get_variable('scale', [])))

  with tf.variable_scope('q_mu'):
    q_mu = Normal(tf.get_variable('loc', []),
                  tf.nn.softplus(tf.get_variable('scale', [])))

  with tf.variable_scope('q_theta_prime'):
    q_theta_prime = Normal(tf.get_variable('loc', [J]),
                           tf.nn.softplus(tf.get_variable('scale', [J])))

  inference = ed.KLqp({logtau: q_logtau, mu: q_mu,
                      theta_prime: q_theta_prime}, data=data)
  inference.run(n_samples=15, n_iter=60000)
  print("====  ed.KLqp inference ====")
  print("E[mu] = %f" % (q_mu.mean().eval()))
  print("E[logtau] = %f" % (q_logtau.mean().eval()))
  print("E[theta_prime]=")
  print((q_theta_prime.mean().eval()))
  print("====  end ed.KLqp inference ====")
  print("")
  print("")

  # HMC inference
  S = 400000
  burn = S // 2

  hq_logtau = Empirical(tf.get_variable('hq_logtau', [S]))
  hq_mu = Empirical(tf.get_variable('hq_mu', [S]))
  hq_theta_prime = Empirical(tf.get_variable('hq_thetaprime', [S, J]))

  inference = ed.HMC({logtau: hq_logtau, mu: hq_mu,
                     theta_prime: hq_theta_prime}, data=data)
  inference.run()

  print("====  ed.HMC inference ====")
  print("E[mu] = %f" % (hq_mu.params.eval()[burn:].mean()))
  print("E[logtau] = %f" % (hq_logtau.params.eval()[burn:].mean()))
  print("E[theta_prime]=")
  print(hq_theta_prime.params.eval()[burn:, ].mean(0))
  print("====  end ed.HMC inference ====")
  print("")
  print("")
Пример #5
0
  def _test_normal_normal(self, default, dtype):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=tf.constant(0.0, dtype=dtype),
                  scale=tf.constant(1.0, dtype=dtype))
      x = Normal(loc=mu, scale=tf.constant(1.0, dtype=dtype),
                 sample_shape=50)

      n_samples = 2000
      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      if not default:
        qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=dtype)))
        inference = ed.Gibbs({mu: qmu}, data={x: x_data})
      else:
        inference = ed.Gibbs([mu], data={x: x_data})
        qmu = inference.latent_vars[mu]
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)

      old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
      if not default:
        self.assertEqual(old_t, n_samples)
      else:
        self.assertEqual(old_t, 1e4)
      self.assertGreater(old_n_accept, 0.1)
      sess.run(inference.reset)
      new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(new_t, 0)
      self.assertEqual(new_n_accept, 0)
Пример #6
0
    def test_metropolis_hastings(self):
        N, D, W_1, W_2, W_3, b_1, b_2, x_ph, y, X_train, y_train = self._test()

        T = 1  # number of MCMC samples
        qW_1 = Empirical(params=tf.Variable(tf.random_normal([T, D, 20])))
        qW_2 = Empirical(params=tf.Variable(tf.random_normal([T, 20, 15])))
        qW_3 = Empirical(params=tf.Variable(tf.random_normal([T, 15, 1])))
        qb_1 = Empirical(params=tf.Variable(tf.random_normal([T, 20])))
        qb_2 = Empirical(params=tf.Variable(tf.random_normal([T, 15])))

        inference = ed.MetropolisHastings(
            {
                W_1: qW_1,
                b_1: qb_1,
                W_2: qW_2,
                b_2: qb_2,
                W_3: qW_3
            }, {
                W_1: W_1,
                b_1: b_1,
                W_2: W_2,
                b_2: b_2,
                W_3: W_3
            },
            data={
                y: y_train,
                x_ph: X_train
            })
        inference.run()
Пример #7
0
def main(_):
  ed.set_seed(42)

  # MODEL
  z = MultivariateNormalTriL(
      loc=tf.ones(2),
      scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]])))

  # INFERENCE
  qz = Empirical(params=tf.get_variable("qz/params", [1000, 2]))

  inference = ed.HMC({z: qz})
  inference.run()

  # CRITICISM
  sess = ed.get_session()
  mean, stddev = sess.run([qz.mean(), qz.stddev()])
  print("Inferred posterior mean:")
  print(mean)
  print("Inferred posterior stddev:")
  print(stddev)

  fig, ax = plt.subplots()
  trace = sess.run(qz.params)
  ax.scatter(trace[:, 0], trace[:, 1], marker=".")
  mvn_plot_contours(z, ax=ax)
  plt.show()
    def test_normalnormal_float32(self):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(loc=0.0, scale=1.0)
            x = Normal(loc=mu, scale=1.0, sample_shape=50)

            n_samples = 2000
            qmu = Empirical(params=tf.Variable(tf.ones(n_samples)))

            # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
            inference = ed.MetropolisHastings({mu: qmu}, {mu: mu},
                                              data={x: x_data})
            inference.run()

            self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
            self.assertAllClose(qmu.stddev().eval(),
                                np.sqrt(1 / 51),
                                rtol=1e-1,
                                atol=1e-1)

            old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
            self.assertEqual(old_t, n_samples)
            self.assertGreater(old_n_accept, 0.1)
            sess.run(inference.reset)
            new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
            self.assertEqual(new_t, 0)
            self.assertEqual(new_n_accept, 0)
Пример #9
0
def main(_):
  ed.set_seed(42)

  # DATA
  x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

  # MODEL
  p = Beta(1.0, 1.0)
  x = Bernoulli(probs=p, sample_shape=10)

  # INFERENCE
  qp = Empirical(params=tf.get_variable(
      "qp/params", [1000], initializer=tf.constant_initializer(0.5)))

  proposal_p = Beta(3.0, 9.0)

  inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
  inference.run()

  # CRITICISM
  # exact posterior has mean 0.25 and std 0.12
  sess = ed.get_session()
  mean, stddev = sess.run([qp.mean(), qp.stddev()])
  print("Inferred posterior mean:")
  print(mean)
  print("Inferred posterior stddev:")
  print(stddev)

  x_post = ed.copy(x, {p: qp})
  tx_rep, tx = ed.ppc(
      lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
      data={x_post: x_data})
  ed.ppc_stat_hist_plot(
      tx[0], tx_rep, stat_name=r'$T \equiv$mean', bins=10)
  plt.show()
Пример #10
0
  def test_normalnormal_run(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=0.0, scale=1.0)
      x = Normal(loc=tf.ones(50) * mu, scale=1.0)

      n_samples = 2000
      qmu = Empirical(params=tf.Variable(tf.ones(n_samples)))
      proposal_mu = Normal(loc=0.0, scale=1.0)

      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      inference = ed.MetropolisHastings({mu: qmu},
                                        {mu: proposal_mu},
                                        data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)

      old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(old_t, n_samples)
      self.assertGreater(old_n_accept, 0.1)
      sess.run(inference.reset)
      new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(new_t, 0)
      self.assertEqual(new_n_accept, 0)
Пример #11
0
 def collapsed_gibbs(self, wordIds, S, T):
     K = self.K
     V = self.V
     D = self.D
     N = self.N
     latent_vars = {}
     training_data = {}
     qbeta = Empirical(tf.Variable(tf.zeros([S, K, V]) + 0.01))
     latent_vars[self.beta] = qbeta
     qtheta = [None] * D
     qz = [None] * D
     for d in range(D):
         qtheta[d] = Empirical(tf.Variable(tf.zeros([S, K]) + 0.1))
         latent_vars[self.theta[d]] = qtheta[d]
         qz[d] = Empirical(tf.Variable(tf.zeros([S, N[d]], dtype=tf.int32)))
         latent_vars[self.z[d]] = qz[d]
         training_data[self.w[d]] = wordIds[d]
     self.latent_vars = latent_vars
     proposal_vars = {}
     proposal_vars[self.beta] = ed.complete_conditional(self.beta)
     cond_set = set(self.w + self.z)
     for d in range(D):
         proposal_vars[self.theta[d]] = \
             ed.complete_conditional(self.theta[d])
         proposal_vars[self.z[d]] = \
             ed.complete_conditional(self.z[d], cond_set)
     self.inference = ed.Gibbs(latent_vars, proposal_vars, training_data)
     print("collapsed gibbs setup finished")
     self.inference.initialize(n_iter=T, n_print=1)
     print("initialize finished")
     self.__run_inference__(T)
     self.qbeta_sample = qbeta.eval()
Пример #12
0
def main(_):
  ed.set_seed(42)

  # DATA
  x_data = build_toy_dataset(FLAGS.N)

  # MODEL
  pi = Dirichlet(concentration=tf.ones(FLAGS.K))
  mu = Normal(0.0, 1.0, sample_shape=[FLAGS.K, FLAGS.D])
  sigma = InverseGamma(concentration=1.0, rate=1.0,
                       sample_shape=[FLAGS.K, FLAGS.D])
  c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=FLAGS.N)
  x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))

  # INFERENCE
  qpi = Empirical(params=tf.get_variable(
      "qpi/params",
      [FLAGS.T, FLAGS.K],
      initializer=tf.constant_initializer(1.0 / FLAGS.K)))
  qmu = Empirical(params=tf.get_variable("qmu/params",
                                         [FLAGS.T, FLAGS.K, FLAGS.D],
                                         initializer=tf.zeros_initializer()))
  qsigma = Empirical(params=tf.get_variable("qsigma/params",
                                            [FLAGS.T, FLAGS.K, FLAGS.D],
                                            initializer=tf.ones_initializer()))
  qc = Empirical(params=tf.get_variable("qc/params",
                                        [FLAGS.T, FLAGS.N],
                                        initializer=tf.zeros_initializer(),
                                        dtype=tf.int32))

  gpi = Dirichlet(concentration=tf.constant([1.4, 1.6]))
  gmu = Normal(loc=tf.constant([[1.0, 1.0], [-1.0, -1.0]]),
               scale=tf.constant([[0.5, 0.5], [0.5, 0.5]]))
  gsigma = InverseGamma(concentration=tf.constant([[1.1, 1.1], [1.1, 1.1]]),
                        rate=tf.constant([[1.0, 1.0], [1.0, 1.0]]))
  gc = Categorical(logits=tf.zeros([FLAGS.N, FLAGS.K]))

  inference = ed.MetropolisHastings(
      latent_vars={pi: qpi, mu: qmu, sigma: qsigma, c: qc},
      proposal_vars={pi: gpi, mu: gmu, sigma: gsigma, c: gc},
      data={x: x_data})

  inference.initialize()

  sess = ed.get_session()
  tf.global_variables_initializer().run()

  for _ in range(inference.n_iter):
    info_dict = inference.update()
    inference.print_progress(info_dict)

    t = info_dict['t']
    if t == 1 or t % inference.n_print == 0:
      qpi_mean, qmu_mean = sess.run([qpi.mean(), qmu.mean()])
      print("")
      print("Inferred membership probabilities:")
      print(qpi_mean)
      print("Inferred cluster means:")
      print(qmu_mean)
Пример #13
0
  def _test_linear_regression(self, default, dtype):
    def build_toy_dataset(N, w, noise_std=0.1):
      D = len(w)
      x = np.random.randn(N, D)
      y = np.dot(x, w) + np.random.normal(0, noise_std, size=N)
      return x, y

    with self.test_session() as sess:
      N = 40  # number of data points
      D = 10  # number of features

      w_true = np.random.randn(D)
      X_train, y_train = build_toy_dataset(N, w_true)
      X_test, y_test = build_toy_dataset(N, w_true)

      X = tf.placeholder(dtype, [N, D])
      w = Normal(loc=tf.zeros(D, dtype=dtype), scale=tf.ones(D, dtype=dtype))
      b = Normal(loc=tf.zeros(1, dtype=dtype), scale=tf.ones(1, dtype=dtype))
      y = Normal(loc=ed.dot(X, w) + b, scale=0.1 * tf.ones(N, dtype=dtype))

      proposal_w = Normal(loc=w, scale=0.5 * tf.ones(D, dtype=dtype))
      proposal_b = Normal(loc=b, scale=0.5 * tf.ones(1, dtype=dtype))

      n_samples = 2000
      if not default:
        qw = Empirical(tf.Variable(tf.zeros([n_samples, D], dtype=dtype)))
        qb = Empirical(tf.Variable(tf.zeros([n_samples, 1], dtype=dtype)))
        inference = ed.ReplicaExchangeMC(
            {w: qw, b: qb}, {w: proposal_w, b: proposal_b},
            data={X: X_train, y: y_train})
      else:
        inference = ed.ReplicaExchangeMC(
            [w, b], {w: proposal_w, b: proposal_b},
            data={X: X_train, y: y_train})
        qw = inference.latent_vars[w]
        qb = inference.latent_vars[b]
      inference.run()

      self.assertAllClose(qw.mean().eval(), w_true, rtol=5e-1, atol=5e-1)
      self.assertAllClose(qb.mean().eval(), [0.0], rtol=5e-1, atol=5e-1)

      old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
      if not default:
        self.assertEqual(old_t, n_samples)
      else:
        self.assertEqual(old_t, 1e4)
      self.assertGreater(old_n_accept, 0.1)
      sess.run(inference.reset)
      new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(new_t, 0)
      self.assertEqual(new_n_accept, 0)
Пример #14
0
def ed_graph_2(disc=1):
	# Priors
	if str(sys.argv[4]) == 'laplace':
		W_0 = Laplace(loc=tf.zeros([D, n_hidden]), scale=(std**2/D)*tf.ones([D, n_hidden]))
		W_1 = Laplace(loc=tf.zeros([n_hidden, K]), scale=(std**2/n_hidden)*tf.ones([n_hidden, K]))
		b_0 = Laplace(loc=tf.zeros(n_hidden), scale=(std**2/D)*tf.ones(n_hidden))
		b_1 = Laplace(loc=tf.zeros(K), scale=(std**2/n_hidden)*tf.ones(K))

	if str(sys.argv[4]) == 'normal':
		W_0 = Normal(loc=tf.zeros([D, n_hidden]), scale=std*D**(-.5)*tf.ones([D, n_hidden]))
		W_1 = Normal(loc=tf.zeros([n_hidden, K]), scale=std*n_hidden**(-.5)*tf.ones([n_hidden, K]))
		b_0 = Normal(loc=tf.zeros(n_hidden), scale=std*D**(-.5)*tf.ones(n_hidden))
		b_1 = Normal(loc=tf.zeros(K), scale=std*n_hidden**(-.5)*tf.ones(K))

	if str(sys.argv[4]) == 'T':
		W_0 = StudentT(df=df*tf.ones([D, n_hidden]), loc=tf.zeros([D, n_hidden]), scale=std**2/D*tf.ones([D, n_hidden]))
		W_1 = StudentT(df=df*tf.ones([n_hidden, K]), loc=tf.zeros([n_hidden, K]), scale=std**2/n_hidden*tf.ones([n_hidden, K]))
		b_0 = StudentT(df=df*tf.ones([n_hidden]), loc=tf.zeros(n_hidden), scale=std**2/D*tf.ones(n_hidden))
		b_1 = StudentT(df=df*tf.ones([K]), loc=tf.zeros(K), scale=std**2/n_hidden*tf.ones(K))

	x = tf.placeholder(tf.float32, [None, None])
	y = Categorical(logits=nn(x, W_0, b_0, W_1, b_1))
	# We use a placeholder for the labels in anticipation of the traning data.
	y_ph = tf.placeholder(tf.int32, [None])

	# Use a placeholder for the pre-trained posteriors
	w0 = tf.placeholder(tf.float32, [n_samp, D, n_hidden])
	w1 = tf.placeholder(tf.float32, [n_samp, n_hidden, K])
	b0 = tf.placeholder(tf.float32, [n_samp, n_hidden])
	b1 = tf.placeholder(tf.float32, [n_samp, K])

	# Empirical distribution 
	qW_0 = Empirical(params=tf.Variable(w0))
	qW_1 = Empirical(params=tf.Variable(w1))
	qb_0 = Empirical(params=tf.Variable(b0))
	qb_1 = Empirical(params=tf.Variable(b1))
	
	if str(sys.argv[3]) == 'hmc':	
		inference = ed.HMC({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_ph})
	if str(sys.argv[3]) == 'sghmc':	
		inference = ed.SGHMC({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_ph})

	# Initialse the inference variables
	if str(sys.argv[3]) == 'hmc':
		inference.initialize(step_size = disc*leap_size, n_steps = step_no, n_print=100)
	if str(sys.argv[3]) == 'sghmc':
		inference.initialize(step_size = disc*leap_size, friction=disc**2*0.1, n_print=100)
	
	return ((x, y), y_ph, W_0, b_0, W_1, b_1, qW_0, qb_0, qW_1, qb_1, inference,
		w0, w1, b0, b1)
Пример #15
0
  def test_normalnormal_run(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=0.0, scale=1.0)
      x = Normal(loc=tf.ones(50) * mu, scale=1.0)

      qmu = Empirical(params=tf.Variable(tf.ones(2000)))

      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      inference = ed.HMC({mu: qmu}, data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-2, atol=1e-2)
Пример #16
0
  def test_normalnormal_run(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(mu=0.0, sigma=1.0)
      x = Normal(mu=tf.ones(50) * mu, sigma=1.0)

      qmu = Empirical(params=tf.Variable(tf.ones(5000)))

      # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
      inference = ed.SGLD({mu: qmu}, data={x: x_data})
      inference.run(step_size=0.2)

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.std().eval(), np.sqrt(1 / 51),
                          rtol=5e-2, atol=5e-2)
Пример #17
0
  def __init__(self, latent_vars=None, data=None):
    """Create an inference algorithm.

    Args:
      latent_vars: list or dict, optional.
        Collection of random variables (of type `RandomVariable` or
        `tf.Tensor`) to perform inference on. If list, each random
        variable will be approximated using a `Empirical` random
        variable that is defined internally (with unconstrained
        support). If dictionary, each value in the dictionary must be a
        `Empirical` random variable.
      data: dict, optional.
        Data dictionary which binds observed variables (of type
        `RandomVariable` or `tf.Tensor`) to their realizations (of
        type `tf.Tensor`). It can also bind placeholders (of type
        `tf.Tensor`) used in the model to their realizations.
    """
    if isinstance(latent_vars, list):
      with tf.variable_scope(None, default_name="posterior"):
        latent_vars = {z: Empirical(params=tf.Variable(tf.zeros(
            [1e4] + z.batch_shape.concatenate(z.event_shape).as_list())))
            for z in latent_vars}
    elif isinstance(latent_vars, dict):
      for qz in six.itervalues(latent_vars):
        if not isinstance(qz, Empirical):
          raise TypeError("Posterior approximation must consist of only "
                          "Empirical random variables.")
        elif len(qz.sample_shape) != 0:
          raise ValueError("Empirical posterior approximations must have "
                           "a scalar sample shape.")

    super(MonteCarlo, self).__init__(latent_vars, data)
Пример #18
0
  def __init__(self, latent_vars, proposal_vars, data=None,
               inverse_temperatures=np.logspace(0, -2, 5), exchange_freq=0.1):
    """Create an inference algorithm.

    Args:
      proposal_vars: dict of RandomVariable to RandomVariable.
        Collection of random variables to perform inference on; each is
        binded to a proposal distribution $g(z' \mid z)$.
      inverse_temperatures: list of inverse temperature.
      exchange_freq: frequency of exchanging replica.
    """
    check_latent_vars(proposal_vars)
    self.proposal_vars = proposal_vars

    self.n_replica = len(inverse_temperatures)
    if inverse_temperatures[0] != 1:
      raise ValueError("inverse_temperatures[0] must be 1.")
    self.inverse_temperatures = [tf.convert_to_tensor(inverse_temperature,
                                 dtype=list(latent_vars.values())[0].dtype)
                                 for inverse_temperature in
                                 inverse_temperatures]

    # Make replica.
    self.replica_vars = []
    for inverse_temperature in self.inverse_temperatures:
      self.replica_vars.append({z: Empirical(params=tf.Variable(tf.zeros(
          qz.params.shape, dtype=latent_vars[z].dtype))) for z, qz in
          six.iteritems(latent_vars)})

    self.exchange_freq = exchange_freq

    super(ReplicaExchangeMC, self).__init__(latent_vars, data)
Пример #19
0
  def test_data_tensor(self):
    with self.test_session() as sess:
      x_data = tf.zeros(50)

      mu = Normal(0.0, 1.0)
      x = Normal(mu, 1.0, sample_shape=50)

      qmu = Empirical(tf.Variable(tf.ones(1000)))

      # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
      inference = ed.Gibbs({mu: qmu}, data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-2, atol=1e-2)
    def init_posterior(self, positive=True, empirical=True, n_samples=1000):

        if empirical:
            if positive:
                self.posterior = Empirical(params=tf.nn.softplus(
                    tf.Variable(tf.random_normal([n_samples]))))
            else:
                self.posterior = Empirical(
                    params=tf.Variable(tf.random_normal([
                        n_samples,
                    ])))

        else:
            if positive:
                self.posterior = Normal(
                    loc=tf.Variable(tf.random_normal([1])),
                    scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
    def test_normalnormal_float32(self):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(loc=0.0, scale=1.0)
            x = Normal(loc=mu, scale=1.0, sample_shape=50)

            qmu = Empirical(params=tf.Variable(tf.ones(2000)))

            # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
            inference = ed.HMC({mu: qmu}, data={x: x_data})
            inference.run()

            self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
            self.assertAllClose(qmu.stddev().eval(),
                                np.sqrt(1 / 51),
                                rtol=1e-2,
                                atol=1e-2)
Пример #22
0
  def test_beta_bernoulli(self):
    with self.test_session() as sess:
      x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

      p = Beta(1.0, 1.0)
      x = Bernoulli(probs=p, sample_shape=10)

      qp = Empirical(tf.Variable(tf.zeros(1000)))
      inference = ed.Gibbs({p: qp}, data={x: x_data})
      inference.run()

      true_posterior = Beta(3.0, 9.0)

      val_est, val_true = sess.run([qp.mean(), true_posterior.mean()])
      self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)

      val_est, val_true = sess.run([qp.variance(), true_posterior.variance()])
      self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)
Пример #23
0
    def __init__(self, latent_vars=None, empirical_vars=None, **kwargs):
        """__init__
    Parameters
    ----------
    latent_vars : list / dict, optional
      Latent variables of the probabilistic model to approximate with diagonal Gaussians. If dict,
      the variables given are used for approximation.
    empirical_vars: dict, optional
      Variables used for SGLD sampling which build the model parameter of the evaluated model for sampling.
      Compared to normal SGLD sampling we don't save multiple samples but only the last state to save memory.
    """
        if isinstance(latent_vars, list):
            with tf.variable_scope("posterior"):
                latent_vars = {
                    rv:
                    Normal(mu=tf.Variable(tf.zeros(rv.get_batch_shape())),
                           sigma=tf.Variable(tf.zeros(rv.get_batch_shape())))
                    for rv in latent_vars
                }
        elif isinstance(latent_vars, dict):
            for qz in six.itervalues(latent_vars):
                if not (isinstance(qz, Normal)
                        or isinstance(qz, NormalWithSoftplusSigma)):
                    raise TypeError(
                        "Posterior approximation must consist of only "
                        "Normal random variables.")
        if isinstance(empirical_vars, dict):
            for ez in six.itervalues(empirical_vars):
                if not (isinstance(ez, tf.Variable)):
                    raise TypeError("Empirical vals must be tf.Variables.")
            empirical_vals = empirical_vars
        else:
            with tf.variable_scope("empirical"):
                empirical_vals = {
                    rv: tf.Variable(
                        tf.random_normal(rv.get_batch_shape().as_list(),
                                         stddev=0.1))
                    for rv, _ in six.iteritems(latent_vars)
                }
        with tf.variable_scope("empirical"):
            empiricals = {
                rv: Empirical(params=tf.expand_dims(k, 0))
                for rv, k in six.iteritems(empirical_vals)
            }

        # Build variables needed for online calculation of mean and variance
        with tf.variable_scope("deltas"):
            deltas = {
                rv: tf.Variable(tf.zeros(rv.get_batch_shape().as_list()))
                for rv, _ in six.iteritems(latent_vars)
            }
        self.empirical_vals = empirical_vals
        self.approximations = latent_vars
        self.deltas = deltas
        self.update_iters = tf.Variable(tf.constant(1, tf.int32))
        super(VariationalGaussAdam, self).__init__(latent_vars=empiricals,
                                                   **kwargs)
Пример #24
0
    def test_indexedslices(self):
        """Test that gradients accumulate when tf.gradients doesn't return
    tf.Tensor (IndexedSlices)."""
        with self.test_session() as sess:
            N = 10  # number of data points
            K = 2  # number of clusters
            T = 1  # number of MCMC samples

            x_data = np.zeros(N, dtype=np.float32)

            mu = Normal(0.0, 1.0, sample_shape=K)
            c = Categorical(logits=tf.zeros(N))
            x = Normal(tf.gather(mu, c), tf.ones(N))

            qmu = Empirical(params=tf.Variable(tf.ones([T, K])))
            qc = Empirical(params=tf.Variable(tf.ones([T, N])))

            inference = ed.HMC({mu: qmu}, data={x: x_data})
            inference.initialize()
Пример #25
0
def main(_):
  ed.set_seed(42)

  # MODEL
  z = MultivariateNormalTriL(
      loc=tf.ones(2),
      scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]])))

  # INFERENCE
  qz = Empirical(params=tf.get_variable("qz/params", [2000, 2]))

  inference = ed.SGLD({z: qz})
  inference.run(step_size=5.0)

  # CRITICISM
  sess = ed.get_session()
  mean, stddev = sess.run([qz.mean(), qz.stddev()])
  print("Inferred posterior mean:")
  print(mean)
  print("Inferred posterior stddev:")
  print(stddev)
Пример #26
0
def main(_):
    ed.set_seed(42)

    # MODEL
    z = MultivariateNormalTriL(loc=tf.ones(2),
                               scale_tril=tf.cholesky(
                                   tf.constant([[1.0, 0.8], [0.8, 1.0]])))

    # INFERENCE
    qz = Empirical(params=tf.get_variable("qz/params", [2000, 2]))

    inference = ed.SGLD({z: qz})
    inference.run(step_size=5.0)

    # CRITICISM
    sess = ed.get_session()
    mean, stddev = sess.run([qz.mean(), qz.stddev()])
    print("Inferred posterior mean:")
    print(mean)
    print("Inferred posterior stddev:")
    print(stddev)
Пример #27
0
def main(_):
  # DATA
  trait_true = np.random.normal(size=[FLAGS.nsubj, 1])
  thresh_true = np.random.normal(size=[1, FLAGS.nitem])
  X_data = np.random.binomial(1, expit(trait_true - thresh_true))

  # MODEL
  trait = Normal(loc=0.0, scale=1.0, sample_shape=[FLAGS.nsubj, 1])
  thresh = Normal(loc=0.0, scale=1.0, sample_shape=[1, FLAGS.nitem])
  X = Bernoulli(logits=trait - thresh)

  # INFERENCE
  q_trait = Empirical(params=tf.get_variable("q_trait/params",
                                             [FLAGS.T, FLAGS.nsubj, 1]))
  q_thresh = Empirical(params=tf.get_variable("q_thresh/params",
                                              [FLAGS.T, 1, FLAGS.nitem]))

  inference = ed.HMC({trait: q_trait, thresh: q_thresh}, data={X: X_data})
  inference.run(step_size=0.1)

  # Alternatively, use variational inference.
  # q_trait = Normal(
  #     loc=tf.get_variable("q_trait/loc", [FLAGS.nsubj, 1]),
  #     scale=tf.nn.softplus(
  #         tf.get_variable("q_trait/scale", [FLAGS.nsubj, 1])))
  # q_thresh = Normal(
  #     loc=tf.get_variable("q_thresh/loc", [1, FLAGS.nitem]),
  #     scale=tf.nn.softplus(
  #         tf.get_variable("q_thresh/scale", [1, FLAGS.nitem])))

  # inference = ed.KLqp({trait: q_trait, thresh: q_thresh}, data={X: X_data})
  # inference.run(n_iter=2500, n_samples=10)

  # CRITICISM
  # Check that the inferred posterior mean captures the true traits.
  plt.scatter(trait_true, q_trait.mean().eval())
  plt.show()

  print("MSE between true traits and inferred posterior mean:")
  print(np.mean(np.square(trait_true - q_trait.mean().eval())))
Пример #28
0
 def __init__(self, num_data, num_cluster, vector_dim, num_mcmc_sample):
     self.K = num_cluster
     self.D = vector_dim
     self.N = num_data
     self.pi = Dirichlet(tf.ones(self.K))
     self.mu = Normal(tf.zeros(self.D),
                      tf.ones(self.D),
                      sample_shape=self.K)
     self.sigmasq = InverseGamma(tf.ones(self.D),
                                 tf.ones(self.D),
                                 sample_shape=self.K)
     self.x = ParamMixture(self.pi, {
         'loc': self.mu,
         'scale_diag': tf.sqrt(self.sigmasq)
     },
                           MultivariateNormalDiag,
                           sample_shape=self.N)
     self.z = self.x.cat
     self.T = num_mcmc_sample  # number of MCMC samples
     self.qpi = Empirical(tf.Variable(tf.ones([self.T, self.K]) / self.K))
     self.qmu = Empirical(tf.Variable(tf.zeros([self.T, self.K, self.D])))
     self.qsigmasq = Empirical(
         tf.Variable(tf.ones([self.T, self.K, self.D])))
     self.qz = Empirical(
         tf.Variable(tf.zeros([self.T, self.N], dtype=tf.int32)))
  def test_normalnormal_float32(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=tf.constant(0.0, dtype=tf.float64),
                  scale=tf.constant(1.0, dtype=tf.float64))
      x = Normal(loc=mu,
                 scale=tf.constant(1.0, dtype=tf.float64),
                 sample_shape=50)

      n_samples = 2000
      qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=tf.float64)))

      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      inference = ed.MetropolisHastings({mu: qmu},
                                        {mu: mu},
                                        data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)
Пример #30
0
 def gibbs(self, wordIds, S, T):
     K = self.K
     V = self.V
     D = self.D
     N = self.N
     latent_vars = {}
     training_data = {}
     qbeta = Empirical(tf.Variable(tf.zeros([S, K, V]) + 0.01))
     latent_vars[self.beta] = qbeta
     qtheta = [None] * D
     qz = [None] * D
     for d in range(D):
         qtheta[d] = Empirical(tf.Variable(tf.zeros([S, K]) + 0.1))
         latent_vars[self.theta[d]] = qtheta[d]
         qz[d] = Empirical(tf.Variable(tf.zeros([S, N[d]], dtype=tf.int32)))
         latent_vars[self.z[d]] = qz[d]
         training_data[self.w[d]] = wordIds[d]
     self.latent_vars = latent_vars
     self.inference = ed.Gibbs(latent_vars, data=training_data)
     print("gibbs setup finished")
     self.inference.initialize(n_iter=T, n_print=1)
     self.__run_inference__(T)
     self.qbeta_sample = qbeta.eval()
Пример #31
0
def main(_):
  ed.set_seed(42)

  # DATA
  x_data = np.array([0.0] * 50)

  # MODEL: Normal-Normal with known variance
  mu = Normal(loc=0.0, scale=1.0)
  x = Normal(loc=mu, scale=1.0, sample_shape=50)

  # INFERENCE
  qmu = Empirical(params=tf.get_variable("qmu/params", [1000],
                                         initializer=tf.zeros_initializer()))

  # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
  inference = ed.HMC({mu: qmu}, data={x: x_data})
  inference.run()

  # CRITICISM
  sess = ed.get_session()
  mean, stddev = sess.run([qmu.mean(), qmu.stddev()])
  print("Inferred posterior mean:")
  print(mean)
  print("Inferred posterior stddev:")
  print(stddev)

  # Check convergence with visual diagnostics.
  samples = sess.run(qmu.params)

  # Plot histogram.
  plt.hist(samples, bins='auto')
  plt.show()

  # Trace plot.
  plt.plot(samples)
  plt.show()
Пример #32
0
def main(_):
  # Data generation (known mean)
  xn_data = np.random.normal(FLAGS.loc, FLAGS.scale, FLAGS.N)
  print("scale: {}".format(FLAGS.scale))

  # Prior definition
  alpha = 0.5
  beta = 0.7

  # Posterior inference
  # Probabilistic model
  ig = InverseGamma(alpha, beta)
  xn = Normal(FLAGS.loc, tf.sqrt(ig), sample_shape=FLAGS.N)

  # Inference
  qig = Empirical(params=tf.get_variable(
      "qig/params", [1000], initializer=tf.constant_initializer(0.5)))
  proposal_ig = InverseGamma(2.0, 2.0)
  inference = ed.MetropolisHastings({ig: qig},
                                    {ig: proposal_ig}, data={xn: xn_data})
  inference.run()

  sess = ed.get_session()
  print("Inferred scale: {}".format(sess.run(tf.sqrt(qig.mean()))))
Пример #33
0
def main(_):
    # Data generation (known mean)
    xn_data = np.random.normal(FLAGS.loc, FLAGS.scale, FLAGS.N)
    print("scale: {}".format(FLAGS.scale))

    # Prior definition
    alpha = 0.5
    beta = 0.7

    # Posterior inference
    # Probabilistic model
    ig = InverseGamma(alpha, beta)
    xn = Normal(FLAGS.loc, tf.sqrt(ig), sample_shape=FLAGS.N)

    # Inference
    qig = Empirical(params=tf.get_variable(
        "qig/params", [1000], initializer=tf.constant_initializer(0.5)))
    proposal_ig = InverseGamma(2.0, 2.0)
    inference = ed.MetropolisHastings({ig: qig}, {ig: proposal_ig},
                                      data={xn: xn_data})
    inference.run()

    sess = ed.get_session()
    print("Inferred scale: {}".format(sess.run(tf.sqrt(qig.mean()))))
Пример #34
0
    def _test_linear_regression(self, default, dtype):
        def build_toy_dataset(N, w, noise_std=0.1):
            D = len(w)
            x = np.random.randn(N, D)
            y = np.dot(x, w) + np.random.normal(0, noise_std, size=N)
            return x, y

        with self.test_session() as sess:
            N = 40  # number of data points
            D = 10  # number of features

            w_true = np.random.randn(D)
            X_train, y_train = build_toy_dataset(N, w_true)
            X_test, y_test = build_toy_dataset(N, w_true)

            X = tf.placeholder(dtype, [N, D])
            w = Normal(loc=tf.zeros(D, dtype=dtype),
                       scale=tf.ones(D, dtype=dtype))
            b = Normal(loc=tf.zeros(1, dtype=dtype),
                       scale=tf.ones(1, dtype=dtype))
            y = Normal(loc=ed.dot(X, w) + b,
                       scale=0.1 * tf.ones(N, dtype=dtype))

            n_samples = 2000
            if not default:
                qw = Empirical(
                    tf.Variable(tf.zeros([n_samples, D], dtype=dtype)))
                qb = Empirical(
                    tf.Variable(tf.zeros([n_samples, 1], dtype=dtype)))
                inference = ed.SGLD({
                    w: qw,
                    b: qb
                },
                                    data={
                                        X: X_train,
                                        y: y_train
                                    })
            else:
                inference = ed.SGLD([w, b], data={X: X_train, y: y_train})
                qw = inference.latent_vars[w]
                qb = inference.latent_vars[b]
            inference.run(step_size=0.001)

            self.assertAllClose(qw.mean().eval(), w_true, rtol=5e-1, atol=5e-1)
            self.assertAllClose(qb.mean().eval(), [0.0], rtol=5e-1, atol=5e-1)

            old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
            if not default:
                self.assertEqual(old_t, n_samples)
            else:
                self.assertEqual(old_t, 1e4)
            self.assertGreater(old_n_accept, 0.1)
            sess.run(inference.reset)
            new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
            self.assertEqual(new_t, 0)
            self.assertEqual(new_n_accept, 0)
Пример #35
0
class Emp:
    """A very simple wrapper around the Empirical class of Edward. Once
    the model is built with some samples, one can then draw aditional
    samples from it through the evaluate method

    """
    def __init__(self, model_id):
        self.model_id = model_id        

    def build(self,Y):
        self.var = Empirical(Y)
        return self

    def getVar(self):
        return self.var

    def evaluate(self, num_samples):
        return self.var.sample([num_samples]).eval(session=ed.get_session())
Пример #36
0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Empirical, MultivariateNormalFull

ed.set_seed(42)

# MODEL
z = MultivariateNormalFull(
    mu=tf.ones(2),
    sigma=tf.constant([[1.0, 0.8], [0.8, 1.0]]))

# INFERENCE
qz = Empirical(params=tf.Variable(tf.random_normal([2000, 2])))

inference = ed.SGLD({z: qz})
inference.run(step_size=5.0)

# CRITICISM
sess = ed.get_session()
mean, std = sess.run([qz.mean(), qz.std()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior std:")
print(std)
Пример #37
0
from edward.models import Bernoulli, Normal, Empirical
from scipy.special import expit

# DATA
nsubj = 200
nitem = 25
trait_true = np.random.normal(size=[nsubj, 1])
thresh_true = np.random.normal(size=[1, nitem])
X_data = np.random.binomial(1, expit(trait_true - thresh_true))

# MODEL
trait = Normal(loc=tf.zeros([nsubj, 1]), scale=tf.ones([nsubj, 1]))
thresh = Normal(loc=tf.zeros([1, nitem]), scale=tf.ones([1, nitem]))
X = Bernoulli(logits=trait - thresh)

# INFERENCE
T = 5000  # number of posterior samples
q_trait = Empirical(params=tf.Variable(tf.zeros([T, nsubj, 1])))
q_thresh = Empirical(params=tf.Variable(tf.zeros([T, 1, nitem])))

inference = ed.HMC({trait: q_trait, thresh: q_thresh}, data={X: X_data})
inference.run(step_size=0.1)

# CRITICISM
# Check that the inferred posterior mean captures the true traits.
plt.scatter(trait_true, q_trait.mean().eval())
plt.show()

print("MSE between true traits and inferred posterior mean:")
print(np.mean(np.square(trait_true - q_trait.mean().eval())))
Пример #38
0
import numpy as np
import tensorflow as tf

from edward.models import Empirical, Normal

ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50, dtype=np.float32)

# MODEL: Normal-Normal with known variance
mu = Normal(mu=0.0, sigma=1.0)
x = Normal(mu=tf.ones(50) * mu, sigma=1.0)

# INFERENCE
qmu = Empirical(params=tf.Variable(tf.zeros([1000])))

proposal_mu = Normal(mu=0.0, sigma=tf.sqrt(1.0 / 51.0))

# analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
inference = ed.MetropolisHastings({mu: qmu}, {mu: proposal_mu},
                                  data={x: x_data})
inference.run()

# CRITICISM
# Check convergence with visual diagnostics.
sess = ed.get_session()
mean, std = sess.run([qmu.mean(), qmu.std()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior std:")
Пример #39
0
 def _test(self, params, n):
   x = Empirical(params=params)
   val_est = x.sample(n).shape.as_list()
   val_true = n + tf.convert_to_tensor(params).shape.as_list()[1:]
   self.assertEqual(val_est, val_true)
Пример #40
0
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, Empirical

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(1.0, 1.0)
x = Bernoulli(tf.ones(10) * p)

# INFERENCE
qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5))

proposal_p = Beta(3.0, 9.0)

inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
inference.run()

# CRITICISM
# exact posterior has mean 0.25 and std 0.12
sess = ed.get_session()
mean, stddev = sess.run([qp.mean(), qp.stddev()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior stddev:")
print(stddev)
Пример #41
0
def main(_):
  ed.set_seed(42)

  # DATA
  X_train, y_train = build_toy_dataset(FLAGS.N)
  X_test, y_test = build_toy_dataset(FLAGS.N)

  # MODEL
  X = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.D])
  w = Normal(loc=tf.zeros(FLAGS.D), scale=tf.ones(FLAGS.D))
  b = Normal(loc=tf.zeros(1), scale=tf.ones(1))
  y = Normal(loc=ed.dot(X, w) + b, scale=tf.ones(FLAGS.N))

  # INFERENCE
  qw = Empirical(params=tf.get_variable("qw/params", [FLAGS.T, FLAGS.D]))
  qb = Empirical(params=tf.get_variable("qb/params", [FLAGS.T, 1]))

  inference = ed.SGHMC({w: qw, b: qb}, data={X: X_train, y: y_train})
  inference.run(step_size=1e-3)

  # CRITICISM

  # Plot posterior samples.
  sns.jointplot(qb.params.eval()[FLAGS.nburn:FLAGS.T:FLAGS.stride],
                qw.params.eval()[FLAGS.nburn:FLAGS.T:FLAGS.stride])
  plt.show()

  # Posterior predictive checks.
  y_post = ed.copy(y, {w: qw, b: qb})
  # This is equivalent to
  # y_post = Normal(loc=ed.dot(X, qw) + qb, scale=tf.ones(FLAGS.N))

  print("Mean squared error on test data:")
  print(ed.evaluate('mean_squared_error', data={X: X_test, y_post: y_test}))

  print("Displaying prior predictive samples.")
  n_prior_samples = 10

  w_prior = w.sample(n_prior_samples).eval()
  b_prior = b.sample(n_prior_samples).eval()

  plt.scatter(X_train, y_train)

  inputs = np.linspace(-1, 10, num=400)
  for ns in range(n_prior_samples):
      output = inputs * w_prior[ns] + b_prior[ns]
      plt.plot(inputs, output)

  plt.show()

  print("Displaying posterior predictive samples.")
  n_posterior_samples = 10

  w_post = qw.sample(n_posterior_samples).eval()
  b_post = qb.sample(n_posterior_samples).eval()

  plt.scatter(X_train, y_train)

  inputs = np.linspace(-1, 10, num=400)
  for ns in range(n_posterior_samples):
      output = inputs * w_post[ns] + b_post[ns]
      plt.plot(inputs, output)

  plt.show()
# DATA
X_train, y_train = build_toy_dataset(N)
X_test, y_test = build_toy_dataset(N)

# MODEL
X = tf.placeholder(tf.float32, [N, D])
w = Normal(loc=tf.zeros(D), scale=tf.ones(D))
b = Normal(loc=tf.zeros(1), scale=tf.ones(1))
y = Normal(loc=ed.dot(X, w) + b, scale=tf.ones(N))

# INFERENCE
T = 5000                        # Number of samples.
nburn = 100                     # Number of burn-in samples.
stride = 10                    # Frequency with which to plot samples.
qw = Empirical(params=tf.Variable(tf.random_normal([T, D])))
qb = Empirical(params=tf.Variable(tf.random_normal([T, 1])))

inference = ed.SGHMC({w: qw, b: qb}, data={X: X_train, y: y_train})
inference.run(step_size=1e-3)


# CRITICISM

# Plot posterior samples.
sns.jointplot(qb.params.eval()[nburn:T:stride],
              qw.params.eval()[nburn:T:stride])
plt.show()

# Posterior predictive checks.
y_post = ed.copy(y, {w: qw, b: qb})
Пример #43
0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Empirical, MultivariateNormalTriL

ed.set_seed(42)

# MODEL
z = MultivariateNormalTriL(
    loc=tf.ones(2),
    scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]])))

# INFERENCE
qz = Empirical(params=tf.Variable(tf.random_normal([2000, 2])))

inference = ed.SGLD({z: qz})
inference.run(step_size=5.0)

# CRITICISM
sess = ed.get_session()
mean, stddev = sess.run([qz.mean(), qz.stddev()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior stddev:")
print(stddev)
Пример #44
0
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, Empirical

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp = Empirical(params=tf.Variable(tf.zeros([1000]) + 0.5))

proposal_p = Beta(a=3.0, b=9.0)

data = {x: x_data}
inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data)
inference.run()

# CRITICISM
# exact posterior has mean 0.25 and std 0.12
sess = ed.get_session()
mean, std = sess.run([qp.mean(), qp.std()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior std:")
print(std)
Пример #45
0
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import tensorflow as tf

from edward.models import Empirical, MultivariateNormalFull

ed.set_seed(42)

# MODEL
z = MultivariateNormalFull(
    mu=tf.ones(2),
    sigma=tf.constant([[1.0, 0.8], [0.8, 1.0]]))

# INFERENCE
qz = Empirical(params=tf.Variable(tf.random_normal([2000, 2])))

inference = ed.SGLD({z: qz})
inference.run(step_size=5.0)

# CRITICISM
sess = ed.get_session()
mean, std = sess.run([qz.mean(), qz.std()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior std:")
print(std)
Пример #46
0
def _test(params, n):
  x = Empirical(params=params)
  val_est = get_dims(x.sample(n))
  val_true = n + get_dims(params)[1:]
  assert val_est == val_true
Пример #47
0
import numpy as np
import tensorflow as tf

from edward.models import Empirical, Normal

ed.set_seed(42)

# DATA
x_data = np.array([0.0] * 50)

# MODEL: Normal-Normal with known variance
mu = Normal(loc=0.0, scale=1.0)
x = Normal(loc=tf.ones(50) * mu, scale=1.0)

# INFERENCE
qmu = Empirical(params=tf.Variable(tf.zeros([1000])))

proposal_mu = Normal(loc=0.0, scale=tf.sqrt(1.0 / 51.0))

# analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
inference = ed.MetropolisHastings({mu: qmu}, {mu: proposal_mu},
                                  data={x: x_data})
inference.run()

# CRITICISM
# Check convergence with visual diagnostics.
sess = ed.get_session()
mean, stddev = sess.run([qmu.mean(), qmu.stddev()])
print("Inferred posterior mean:")
print(mean)
print("Inferred posterior stddev:")
Пример #48
0
N = 40  # number of data points
D = 1  # number of features

# DATA
X_train, y_train = build_toy_dataset(N)

# MODEL
X = tf.placeholder(tf.float32, [N, D])
w = Normal(loc=tf.zeros(D), scale=3.0 * tf.ones(D))
b = Normal(loc=tf.zeros([]), scale=3.0 * tf.ones([]))
y = Bernoulli(logits=ed.dot(X, w) + b)

# INFERENCE
T = 5000  # number of samples
qw = Empirical(params=tf.Variable(tf.random_normal([T, D])))
qb = Empirical(params=tf.Variable(tf.random_normal([T])))

inference = ed.HMC({w: qw, b: qb}, data={X: X_train, y: y_train})
inference.initialize(n_print=10, step_size=0.6)

tf.global_variables_initializer().run()

# Set up figure.
fig = plt.figure(figsize=(8, 8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)

# Build samples from inferred posterior.
n_samples = 50
Пример #49
0
import tensorflow as tf

from edward.models import InverseGamma, Normal, Empirical

N = 1000

# Data generation (known mean)
loc = 7.0
scale = 0.7
xn_data = np.random.normal(loc, scale, N)
print('sigma={}'.format(sigma))

# Prior definition
alpha = tf.Variable(0.5, trainable=False)
beta = tf.Variable(0.7, trainable=False)

# Posterior inference
# Probabilistic model
ig = InverseGamma(alpha, beta)
xn = Normal(loc, tf.ones([N]) * tf.sqrt(ig))

# Inference
qig = Empirical(params=tf.Variable(tf.zeros(1000) + 0.5))
proposal_ig = InverseGamma(2.0, 2.0)
inference = ed.MetropolisHastings({ig: qig},
                                  {ig: proposal_ig}, data={xn: xn_data})
inference.run()

sess = ed.get_session()
print('Inferred sigma={}'.format(sess.run(tf.sqrt(qig.mean()))))