Ejemplo n.º 1
0
  def _test_normal_normal(self, default, dtype):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(loc=tf.constant(0.0, dtype=dtype),
                  scale=tf.constant(1.0, dtype=dtype))
      x = Normal(loc=mu, scale=tf.constant(1.0, dtype=dtype),
                 sample_shape=50)

      n_samples = 2000
      # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
      if not default:
        qmu = Empirical(params=tf.Variable(tf.ones(n_samples, dtype=dtype)))
        inference = ed.Gibbs({mu: qmu}, data={x: x_data})
      else:
        inference = ed.Gibbs([mu], data={x: x_data})
        qmu = inference.latent_vars[mu]
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-1, atol=1e-1)

      old_t, old_n_accept = sess.run([inference.t, inference.n_accept])
      if not default:
        self.assertEqual(old_t, n_samples)
      else:
        self.assertEqual(old_t, 1e4)
      self.assertGreater(old_n_accept, 0.1)
      sess.run(inference.reset)
      new_t, new_n_accept = sess.run([inference.t, inference.n_accept])
      self.assertEqual(new_t, 0)
      self.assertEqual(new_n_accept, 0)
Ejemplo n.º 2
0
 def collapsed_gibbs(self, wordIds, S, T):
     K = self.K
     V = self.V
     D = self.D
     N = self.N
     latent_vars = {}
     training_data = {}
     qbeta = Empirical(tf.Variable(tf.zeros([S, K, V]) + 0.01))
     latent_vars[self.beta] = qbeta
     qtheta = [None] * D
     qz = [None] * D
     for d in range(D):
         qtheta[d] = Empirical(tf.Variable(tf.zeros([S, K]) + 0.1))
         latent_vars[self.theta[d]] = qtheta[d]
         qz[d] = Empirical(tf.Variable(tf.zeros([S, N[d]], dtype=tf.int32)))
         latent_vars[self.z[d]] = qz[d]
         training_data[self.w[d]] = wordIds[d]
     self.latent_vars = latent_vars
     proposal_vars = {}
     proposal_vars[self.beta] = ed.complete_conditional(self.beta)
     cond_set = set(self.w + self.z)
     for d in range(D):
         proposal_vars[self.theta[d]] = \
             ed.complete_conditional(self.theta[d])
         proposal_vars[self.z[d]] = \
             ed.complete_conditional(self.z[d], cond_set)
     self.inference = ed.Gibbs(latent_vars, proposal_vars, training_data)
     print("collapsed gibbs setup finished")
     self.inference.initialize(n_iter=T, n_print=1)
     print("initialize finished")
     self.__run_inference__(T)
     self.qbeta_sample = qbeta.eval()
Ejemplo n.º 3
0
    def fit(self, x_train):
        self.inference = ed.Gibbs(
            {
                self.pi: self.qpi,
                self.mu: self.qmu,
                self.sigmasq: self.qsigmasq,
                self.z: self.qz
            },
            data={self.x: x_train})
        self.inference.initialize()

        sess = ed.get_session()

        tf.global_variables_initializer().run()

        t_ph = tf.placeholder(tf.int32, [])
        running_cluster_means = tf.reduce_mean(self.qmu.params[:t_ph], 0)

        for _ in range(self.inference.n_iter):
            info_dict = self.inference.update()
            self.inference.print_progress(info_dict)
            t = info_dict['t']
            if t % self.inference.n_print == 0:
                print("\nInferred cluster means:")
                print(sess.run(running_cluster_means, {t_ph: t - 1}))
Ejemplo n.º 4
0
  def test_data_tensor(self):
    with self.test_session() as sess:
      x_data = tf.zeros(50)

      mu = Normal(0.0, 1.0)
      x = Normal(mu, 1.0, sample_shape=50)

      qmu = Empirical(tf.Variable(tf.ones(1000)))

      # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
      inference = ed.Gibbs({mu: qmu}, data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
                          rtol=1e-2, atol=1e-2)
Ejemplo n.º 5
0
  def test_normal_normal(self):
    with self.test_session() as sess:
      x_data = np.array([0.0] * 50, dtype=np.float32)

      mu = Normal(mu=0.0, sigma=1.0)
      x = Normal(mu=mu, sigma=1.0, sample_shape=50)

      qmu = Empirical(params=tf.Variable(tf.ones(1000)))

      # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
      inference = ed.Gibbs({mu: qmu}, data={x: x_data})
      inference.run()

      self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
      self.assertAllClose(qmu.std().eval(), np.sqrt(1 / 51),
                          rtol=1e-2, atol=1e-2)
Ejemplo n.º 6
0
  def test_beta_bernoulli(self):
    with self.test_session() as sess:
      x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

      p = Beta(1.0, 1.0)
      x = Bernoulli(probs=p, sample_shape=10)

      qp = Empirical(tf.Variable(tf.zeros(1000)))
      inference = ed.Gibbs({p: qp}, data={x: x_data})
      inference.run()

      true_posterior = Beta(3.0, 9.0)

      val_est, val_true = sess.run([qp.mean(), true_posterior.mean()])
      self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)

      val_est, val_true = sess.run([qp.variance(), true_posterior.variance()])
      self.assertAllClose(val_est, val_true, rtol=1e-2, atol=1e-2)
Ejemplo n.º 7
0
 def gibbs(self, wordIds, S, T):
     K = self.K
     V = self.V
     D = self.D
     N = self.N
     latent_vars = {}
     training_data = {}
     qbeta = Empirical(tf.Variable(tf.zeros([S, K, V]) + 0.01))
     latent_vars[self.beta] = qbeta
     qtheta = [None] * D
     qz = [None] * D
     for d in range(D):
         qtheta[d] = Empirical(tf.Variable(tf.zeros([S, K]) + 0.1))
         latent_vars[self.theta[d]] = qtheta[d]
         qz[d] = Empirical(tf.Variable(tf.zeros([S, N[d]], dtype=tf.int32)))
         latent_vars[self.z[d]] = qz[d]
         training_data[self.w[d]] = wordIds[d]
     self.latent_vars = latent_vars
     self.inference = ed.Gibbs(latent_vars, data=training_data)
     print("gibbs setup finished")
     self.inference.initialize(n_iter=T, n_print=1)
     self.__run_inference__(T)
     self.qbeta_sample = qbeta.eval()
Ejemplo n.º 8
0
                    initializer=tf.constant_initializer(1.0 / K)))
qmu = Empirical(
    tf.get_variable("qmu/params", [T, K, D],
                    initializer=tf.zeros_initializer()))
qsigmasq = Empirical(
    tf.get_variable("qsigmasq/params", [T, K, D],
                    initializer=tf.ones_initializer()))
qz = Empirical(
    tf.get_variable("qz/params", [T, N],
                    initializer=tf.zeros_initializer(),
                    dtype=tf.int32))

inference = ed.Gibbs({
    pi: qpi,
    mu: qmu,
    sigmasq: qsigmasq,
    z: qz
},
                     data={x: x_train})
inference.initialize()

sess = ed.get_session()
tf.global_variables_initializer().run()

t_ph = tf.placeholder(tf.int32, [])
running_cluster_means = tf.reduce_mean(qmu.params[:t_ph], 0)

for _ in range(inference.n_iter):
    info_dict = inference.update()
    inference.print_progress(info_dict)
    t = info_dict['t']
Ejemplo n.º 9
0
                        initializer=tf.zeros_initializer()))
    qsigma = Empirical(
        tf.get_variable("qsigma/params", [T, K, D],
                        initializer=tf.ones_initializer()))
    qz = Empirical(
        tf.get_variable("qz/params", [T, N],
                        initializer=tf.zeros_initializer(),
                        dtype=tf.int32))

print("Running Gibbs Sampling...")
Gibbs_inference_startTime = time.time()
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
inference = ed.Gibbs({
    pi: qpi,
    mu: qmu,
    sigma: qsigma,
    z: qz
},
                     data={x: train_img})
print("Sampling Done.")

inference.initialize(n_print=200,
                     logdir='log/IMG={}_K={}_T={}'.format(img_no, K, T))
sess = ed.get_session()
tf.global_variables_initializer().run()
t_ph = tf.placeholder(tf.int32, [])
running_cluster_means = tf.reduce_mean(qmu.params[:t_ph], 0)

learning_curve = []
for _ in range(inference.n_iter):
    info_dict = inference.update()
Ejemplo n.º 10
0
 def train(self,
           filename,
           total_batches=10,
           discrete_batch_iters=1000,
           continus_batch_iters=10000):
     sess = tf.Session()
     restorer = tf.train.import_meta_graph(filename, clear_devices=True)
     print("<meta graph imported>")
     [
         tf.add_to_collection(
             'd_pi_q',
             Empirical(tf.Variable(tf.zeros(tf.shape(var))),
                       name='Empirical_d_pi_q_' +
                       str.split(str.split(var.name, '/')[0], '_')[-2]))
         for var in tf.get_collection('d_pi')
     ]
     for var in tf.get_collection('c_w'):
         idx = str.split(str.split(var.name, '/')[0], '_')[-2]
         tf.add_to_collection(
             'c_w_q',
             Empirical(tf.Variable(tf.zeros(tf.shape(var))),
                       name='Empirical_c_w_q_' + idx))
         print(var.get_shape().as_list())
         tf.add_to_collection(
             'c_b_q',
             Empirical(tf.Variable(tf.zeros(
                 var.get_shape().as_list()[:-1])),
                       name='Empirical_c_b_q_' + idx))
         tf.add_to_collection(
             'c_sigma_q',
             Empirical(tf.Variable(tf.zeros([1])),
                       name='Empirical_c_sigma_q_' + idx))
     print("<variables collected>")
     variable_map = dict(
         zip(
             tf.get_collection('d') + tf.get_collection('c'),
             self.design_matrix[:,
                                tuple(np.arange(self.num_discrete_variables)
                                      )].flatten('F').tolist() +
             self.design_matrix[:, self.continus_variable_idxs].flatten(
                 'F').tolist()))
     discrete_prior_map = dict(
         zip(tf.get_collection('d_pi'), tf.get_collection('d_pi_q')))
     continus_prior_map = dict(
         zip(
             tf.get_collection('c_w') + tf.get_collection('c_b') +
             tf.get_collection('c_sigma'),
             tf.get_collection('c_w_q') + tf.get_collection('c_b_q') +
             tf.get_collection('c_sigma_q')))
     print("<running inference>")
     inference_d = ed.Gibbs(discrete_prior_map,
                            data=dict(variable_map.items() +
                                      continus_prior_map.items()))
     inference_c = ed.HMC(continus_prior_map,
                          data=dict(variable_map.items() +
                                    discrete_prior_map.items()))
     inference_d.initialize(n_iter=discrete_batch_iters)
     inference_c.initialize(n_iter=continus_batch_iters)
     sess.run(tf.global_variables_initializer())
     for _ in range(total_batches):
         for _ in range(inference_d.n_iter):
             info_dict = inference_d.update()
             inference_d.print_progress(info_dict)
         inference_d.n_iter += discrete_batch_iters
         inference_d.n_print = int(discrete_batch_iters / 10)
         inference_d.progbar = Progbar(inference_d.n_iter)
         for _ in range(inference_c.n_iter):
             info_dict = inference_c.update()
             inference_c.print_progress(info_dict)
         inference_c.n_iter += continus_batch_iters
         inference_c.n_print = int(continus_batch_iters / 10)
         inference_c.progbar = Progbar(inference_c.n_iter)
     inference_d.finalize()
     inference_c.finalize()
     filename = ''.join(str.split(filename, '.')[:-1],
                        '.') + '_trained_model'
     saver = tf.train.Saver()
     saver.save(sess, filename)
     tf.train.export_meta_graph(
         filename + '.meta',
         as_text=True,
         collection_list=['d_pi', 'd', 'c_w', 'c_b', 'c_sigma', 'c'])
Ejemplo n.º 11
0
phi = Normal(loc=m,scale=s)
mu=tf.gather(phi,z)
x = Normal(loc=mu,scale=[1.0]*N)

print(x)
# INFERENCE
qpi = Dirichlet(tf.nn.softplus(tf.Variable(tf.random_normal([K]))))
qz_var = tf.Variable(tf.zeros((N,K))+0.1)
qz = Categorical(logits=qz_var)
qphi = Normal(loc=tf.Variable(tf.zeros((K,))),scale=[1.0]*K)
#

qpi = Empirical(tf.Variable(tf.ones([K]) / K))
qphi = Empirical(tf.Variable(tf.zeros([K])))
qz = Empirical(tf.Variable(tf.zeros([N,K])))

#qz = Empirical(tf.Variable(tf.zeros([N, K], dtype=tf.int32)))
#qz = PointMass(params=tf.Variable(tf.zeros([N], dtype=tf.int32)))

learning_rate=0.0001
#inference = ed.KLqp({pi: qpi,phi:qphi,z:qz}, data={x: x_data})
#inference = ed.MAP({pi: qpi,phi:qphi}, data={x: x_data})
inference = ed.Gibbs({pi: qpi,z:qz,phi:qphi}, data={x: x_data})
#inference.run(n_iter=1500, n_samples=100,optimizer = tf.train.AdamOptimizer(learning_rate))

inference.run(n_iter=15000)

sess = ed.get_session()
print('Inferred pi={}'.format(sess.run(qphi.mean())))

Ejemplo n.º 12
0
    def __init__(self, n, xdim, n_mixtures=5, mc_samples=500):
        # Compute the shape dynamically from placeholders
        self.x_ph = tf.placeholder(tf.float32, [None, xdim])
        self.k = k = n_mixtures
        self.batch_size = n
        self.d = d = xdim
        self.sample_size = tf.placeholder(tf.int32, ())

        # Build the priors over membership probabilities and mixture parameters
        with tf.variable_scope("priors"):
            pi = Dirichlet(tf.ones(k))

            mu = Normal(tf.zeros(d), tf.ones(d), sample_shape=k)
            sigmasq = InverseGamma(tf.ones(d), tf.ones(d), sample_shape=k)

        # Build the conditional mixture model
        with tf.variable_scope("likelihood"):
            x = ParamMixture(pi, {'loc': mu, 'scale_diag': tf.sqrt(sigmasq)},
                             MultivariateNormalDiag,
                             sample_shape=n)
            z = x.cat

        # Build approximate posteriors as Empirical samples
        t = mc_samples
        with tf.variable_scope("posteriors_samples"):
            qpi = Empirical(tf.get_variable(
                "qpi/params", [t, k],
                initializer=tf.constant_initializer(1.0 / k)))
            qmu = Empirical(tf.get_variable(
                "qmu/params", [t, k, d],
                initializer=tf.zeros_initializer()))
            qsigmasq = Empirical(tf.get_variable(
                "qsigmasq/params", [t, k, d],
                initializer=tf.ones_initializer()))
            qz = Empirical(tf.get_variable(
                "qz/params", [t, n],
                initializer=tf.zeros_initializer(),
                dtype=tf.int32))

        # Build inference graph using Gibbs and conditionals
        with tf.variable_scope("inference"):
            self.inference = ed.Gibbs({
                pi: qpi,
                mu: qmu,
                sigmasq: qsigmasq,
                z: qz
            }, data={
                x: self.x_ph
            })
            self.inference.initialize()

        # Build predictive posterior graph by taking samples
        n_samples = self.sample_size
        with tf.variable_scope("posterior"):
            mu_smpl = qmu.sample(n_samples) # shape: [1, 100, k, d]
            sigmasq_smpl = qsigmasq.sample(n_samples)

            x_post = Normal(
                loc=tf.ones((n, 1, 1, 1)) * mu_smpl,
                scale=tf.ones((n, 1, 1, 1)) * tf.sqrt(sigmasq_smpl)
            )
            # NOTE: x_ph has shape [n, d]
            x_broadcasted = tf.tile(
                tf.reshape(self.x_ph, (n, 1, 1, d)),
                (1, n_samples, k, 1)
            )

            x_ll = x_post.log_prob(x_broadcasted)
            x_ll = tf.reduce_sum(x_ll, axis=3)
            x_ll = tf.reduce_mean(x_ll, axis=1)

        self.sample_t_ph = tf.placeholder(tf.int32, ())
        self.eval_ops = {
            'generative_post': x_post,
            'qmu': qmu,
            'qsigma': qsigma,
            'post_running_mu': tf.reduce_mean(
                qmu.params[:self.sample_t_ph],
                axis=0
            )
            'post_log_prob': xll
        }