Exemplo n.º 1
0
 def collapsed_gibbs(self, wordIds, S, T):
     K = self.K
     V = self.V
     D = self.D
     N = self.N
     latent_vars = {}
     training_data = {}
     qbeta = Empirical(tf.Variable(tf.zeros([S, K, V]) + 0.01))
     latent_vars[self.beta] = qbeta
     qtheta = [None] * D
     qz = [None] * D
     for d in range(D):
         qtheta[d] = Empirical(tf.Variable(tf.zeros([S, K]) + 0.1))
         latent_vars[self.theta[d]] = qtheta[d]
         qz[d] = Empirical(tf.Variable(tf.zeros([S, N[d]], dtype=tf.int32)))
         latent_vars[self.z[d]] = qz[d]
         training_data[self.w[d]] = wordIds[d]
     self.latent_vars = latent_vars
     proposal_vars = {}
     proposal_vars[self.beta] = ed.complete_conditional(self.beta)
     cond_set = set(self.w + self.z)
     for d in range(D):
         proposal_vars[self.theta[d]] = \
             ed.complete_conditional(self.theta[d])
         proposal_vars[self.z[d]] = \
             ed.complete_conditional(self.z[d], cond_set)
     self.inference = ed.Gibbs(latent_vars, proposal_vars, training_data)
     print("collapsed gibbs setup finished")
     self.inference.initialize(n_iter=T, n_print=1)
     print("initialize finished")
     self.__run_inference__(T)
     self.qbeta_sample = qbeta.eval()
Exemplo n.º 2
0
    def test_mog(self):
        x_val = np.array([1.1, 1.2, 2.1, 4.4, 5.5, 7.3, 6.8], np.float32)
        z_val = np.array([0, 0, 0, 1, 1, 2, 2], np.int32)
        pi_val = np.array([0.2, 0.3, 0.5], np.float32)
        mu_val = np.array([1.0, 5.0, 7.0], np.float32)

        N = x_val.shape[0]
        K = z_val.max() + 1

        pi_alpha = 1.3 + np.zeros(K, dtype=np.float32)
        mu_sigma = 4.0
        sigmasq = 2.0**2

        pi = rvs.Dirichlet(pi_alpha)
        mu = rvs.Normal(0.0, mu_sigma, sample_shape=[K])

        x = rvs.ParamMixture(pi, {
            'loc': mu,
            'scale': tf.sqrt(sigmasq)
        },
                             rvs.Normal,
                             sample_shape=N)
        z = x.cat

        mu_cond = ed.complete_conditional(mu)
        pi_cond = ed.complete_conditional(pi)
        z_cond = ed.complete_conditional(z)

        with self.test_session() as sess:
            pi_cond_alpha, mu_cond_mu, mu_cond_sigma, z_cond_p = (sess.run(
                [
                    pi_cond.concentration, mu_cond.loc, mu_cond.scale,
                    z_cond.probs
                ], {
                    z: z_val,
                    x: x_val,
                    pi: pi_val,
                    mu: mu_val
                }))

        true_pi = pi_alpha + np.unique(z_val, return_counts=True)[1]
        self.assertAllClose(pi_cond_alpha, true_pi)
        for k in range(K):
            sigmasq_true = (1.0 / 4**2 + 1.0 / sigmasq *
                            (z_val == k).sum())**-1
            mu_true = sigmasq_true * (1.0 / sigmasq * x_val[z_val == k].sum())
            self.assertAllClose(np.sqrt(sigmasq_true), mu_cond_sigma[k])
            self.assertAllClose(mu_true, mu_cond_mu[k])
        true_log_p_z = np.log(pi_val) - 0.5 / sigmasq * (x_val[:, np.newaxis] -
                                                         mu_val)**2
        true_log_p_z -= true_log_p_z.max(1, keepdims=True)
        true_p_z = np.exp(true_log_p_z)
        true_p_z /= true_p_z.sum(1, keepdims=True)
        self.assertAllClose(z_cond_p, true_p_z)
Exemplo n.º 3
0
  def test_blanket_changes(self):
    pi = rvs.Dirichlet(tf.ones(3))
    mu = rvs.Normal(0.0, 1.0)
    z = rvs.Categorical(p=pi)

    pi1_cond = ed.complete_conditional(pi, [z, pi])
    pi2_cond = ed.complete_conditional(pi, [z, mu, pi])

    self.assertIsInstance(pi1_cond, rvs.Dirichlet)
    self.assertIsInstance(pi2_cond, rvs.Dirichlet)

    with self.test_session() as sess:
      alpha1_val, alpha2_val = sess.run([pi1_cond.alpha, pi2_cond.alpha])

    self.assertAllClose(alpha1_val, alpha2_val)
def main(_):
    ed.set_seed(42)

    # DATA
    x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

    # MODEL
    p = Beta(1.0, 1.0)
    x = Bernoulli(probs=p, sample_shape=10)

    # COMPLETE CONDITIONAL
    p_cond = ed.complete_conditional(p)

    sess = ed.get_session()

    print('p(probs | x) type:', p_cond.parameters['name'])
    param_vals = sess.run(
        {
            key: val
            for key, val in six.iteritems(p_cond.parameters)
            if isinstance(val, tf.Tensor)
        }, {x: x_data})
    print('parameters:')
    for key, val in six.iteritems(param_vals):
        print('%s:\t%.3f' % (key, val))
Exemplo n.º 5
0
  def test_blanket_changes(self):
    pi = rvs.Dirichlet(tf.ones(3))
    mu = rvs.Normal(0.0, 1.0)
    z = rvs.Categorical(probs=pi)

    pi1_cond = ed.complete_conditional(pi, [z, pi])
    pi2_cond = ed.complete_conditional(pi, [z, mu, pi])

    self.assertIsInstance(pi1_cond, rvs.Dirichlet)
    self.assertIsInstance(pi2_cond, rvs.Dirichlet)

    with self.test_session() as sess:
      conc1_val, conc2_val = sess.run([pi1_cond.concentration,
                                       pi2_cond.concentration])

    self.assertAllClose(conc1_val, conc2_val)
Exemplo n.º 6
0
  def test_missing_blanket(self):
    N = 10
    z = rvs.Bernoulli(probs=0.75, sample_shape=N)
    z_cond = ed.complete_conditional(z)
    self.assertIsInstance(z_cond, rvs.Bernoulli)

    with self.test_session() as sess:
      p_val = sess.run(z_cond.probs)

    self.assertAllClose(p_val, 0.75 + np.zeros(N, np.float32))
Exemplo n.º 7
0
    def test_basic_bernoulli(self):
        N = 10
        z = rvs.Bernoulli(probs=0.75, sample_shape=N)
        z_cond = ed.complete_conditional(z, [z])
        self.assertIsInstance(z_cond, rvs.Bernoulli)

        with self.test_session() as sess:
            p_val = sess.run(z_cond.probs)

        self.assertAllClose(p_val, 0.75 + np.zeros(N, np.float32))
Exemplo n.º 8
0
  def test_missing_blanket(self):
    N = 10
    z = rvs.Bernoulli(p=0.75, sample_shape=N)
    z_cond = ed.complete_conditional(z)
    self.assertIsInstance(z_cond, rvs.Bernoulli)

    with self.test_session() as sess:
      p_val = sess.run(z_cond.p)

    self.assertAllClose(p_val, 0.75 + np.zeros(N, np.float32))
Exemplo n.º 9
0
  def test_mog(self):
    x_val = np.array([1.1, 1.2, 2.1, 4.4, 5.5, 7.3, 6.8], np.float32)
    z_val = np.array([0, 0, 0, 1, 1, 2, 2], np.int32)
    pi_val = np.array([0.2, 0.3, 0.5], np.float32)
    mu_val = np.array([1.0, 5.0, 7.0], np.float32)

    N = x_val.shape[0]
    K = z_val.max() + 1

    pi_alpha = 1.3 + np.zeros(K, dtype=np.float32)
    mu_sigma = 4.0
    sigmasq = 2.0**2

    pi = rvs.Dirichlet(pi_alpha)
    mu = rvs.Normal(0.0, mu_sigma, sample_shape=[K])

    x = rvs.ParamMixture(pi, {'loc': mu, 'scale': tf.sqrt(sigmasq)},
                         rvs.Normal, sample_shape=N)
    z = x.cat

    mu_cond = ed.complete_conditional(mu)
    pi_cond = ed.complete_conditional(pi)
    z_cond = ed.complete_conditional(z)

    with self.test_session() as sess:
      pi_cond_alpha, mu_cond_mu, mu_cond_sigma, z_cond_p = (
          sess.run([pi_cond.concentration, mu_cond.loc,
                    mu_cond.scale, z_cond.probs],
                   {z: z_val, x: x_val, pi: pi_val, mu: mu_val}))

    true_pi = pi_alpha + np.unique(z_val, return_counts=True)[1]
    self.assertAllClose(pi_cond_alpha, true_pi)
    for k in range(K):
      sigmasq_true = (1.0 / 4**2 + 1.0 / sigmasq * (z_val == k).sum())**-1
      mu_true = sigmasq_true * (1.0 / sigmasq * x_val[z_val == k].sum())
      self.assertAllClose(np.sqrt(sigmasq_true), mu_cond_sigma[k])
      self.assertAllClose(mu_true, mu_cond_mu[k])
    true_log_p_z = np.log(pi_val) - 0.5 / sigmasq * (x_val[:, np.newaxis] -
                                                     mu_val)**2
    true_log_p_z -= true_log_p_z.max(1, keepdims=True)
    true_p_z = np.exp(true_log_p_z)
    true_p_z /= true_p_z.sum(1, keepdims=True)
    self.assertAllClose(z_cond_p, true_p_z)
Exemplo n.º 10
0
  def test_dirichlet_multinomial(self):
    x_data = np.array([4, 3, 2, 1], np.int32)
    N = x_data.sum()
    D = x_data.shape[0]

    alpha = np.zeros(D).astype(np.float32) + 2.0

    theta = rvs.Dirichlet(alpha)
    x = rvs.Multinomial(total_count=tf.cast(N, tf.float32), probs=theta)

    theta_cond = ed.complete_conditional(theta, [theta, x])

    with self.test_session() as sess:
      alpha_val = sess.run(theta_cond.concentration, {x: x_data})

    self.assertAllClose(alpha_val, np.array([6.0, 5.0, 4.0, 3.0], np.float32))
Exemplo n.º 11
0
  def test_dirichlet_categorical(self):
    x_data = np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3], np.int32)
    N = x_data.shape[0]
    D = x_data.max() + 1

    alpha = np.zeros(D).astype(np.float32) + 2.0

    theta = rvs.Dirichlet(alpha)
    x = rvs.Categorical(probs=theta, sample_shape=N)

    theta_cond = ed.complete_conditional(theta, [theta, x])

    with self.test_session() as sess:
      alpha_val = sess.run(theta_cond.concentration, {x: x_data})

    self.assertAllClose(alpha_val, np.array([6.0, 5.0, 4.0, 3.0], np.float32))
Exemplo n.º 12
0
  def test_dirichlet_categorical(self):
    x_data = np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3], np.int32)
    N = x_data.shape[0]
    D = x_data.max() + 1

    alpha = np.zeros(D).astype(np.float32) + 2.0

    theta = rvs.Dirichlet(alpha)
    x = rvs.Categorical(p=theta, sample_shape=N)

    theta_cond = ed.complete_conditional(theta, [theta, x])

    with self.test_session() as sess:
      alpha_val = sess.run(theta_cond.alpha, {x: x_data})

    self.assertAllClose(alpha_val, np.array([6.0, 5.0, 4.0, 3.0], np.float32))
Exemplo n.º 13
0
  def test_dirichlet_multinomial(self):
    x_data = np.array([4, 3, 2, 1], np.int32)
    N = x_data.sum()
    D = x_data.shape[0]

    alpha = np.zeros(D).astype(np.float32) + 2.0

    theta = rvs.Dirichlet(alpha)
    x = rvs.Multinomial(n=tf.cast(N, tf.float32), p=theta)

    theta_cond = ed.complete_conditional(theta, [theta, x])

    with self.test_session() as sess:
      alpha_val = sess.run(theta_cond.alpha, {x: x_data})

    self.assertAllClose(alpha_val, np.array([6.0, 5.0, 4.0, 3.0], np.float32))
Exemplo n.º 14
0
  def test_beta_bernoulli(self):
    x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

    a0 = 0.5
    b0 = 1.5
    pi = rvs.Beta(a=a0, b=b0)
    x = rvs.Bernoulli(p=pi, sample_shape=10)

    pi_cond = ed.complete_conditional(pi, [pi, x])

    self.assertIsInstance(pi_cond, rvs.Beta)

    with self.test_session() as sess:
      a_val, b_val = sess.run([pi_cond.a, pi_cond.b], {x: x_data})

    self.assertAllClose(a_val, a0 + x_data.sum())
    self.assertAllClose(b_val, b0 + (1 - x_data).sum())
Exemplo n.º 15
0
    def test_gamma_gamma(self):
        x_data = np.array([0.1, 0.5, 3.3, 2.7])

        alpha0 = 0.5
        beta0 = 1.75
        alpha_likelihood = 2.3
        beta = rvs.Gamma(alpha0, beta0)
        x = rvs.Gamma(alpha_likelihood, beta, sample_shape=4)

        beta_cond = ed.complete_conditional(beta, [beta, x])

        self.assertIsInstance(beta_cond, rvs.Gamma)

        with self.test_session() as sess:
            alpha_val, beta_val = sess.run(
                [beta_cond.concentration, beta_cond.rate], {x: x_data})
        self.assertAllClose(alpha_val, alpha0 + alpha_likelihood * len(x_data))
        self.assertAllClose(beta_val, beta0 + x_data.sum())
Exemplo n.º 16
0
  def test_gamma_exponential(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    alpha0 = 0.5
    beta0 = 1.75
    lam = rvs.Gamma(alpha=alpha0, beta=beta0)
    x = rvs.Exponential(lam=lam, sample_shape=4)

    lam_cond = ed.complete_conditional(lam, [lam, x])

    self.assertIsInstance(lam_cond, rvs.Gamma)

    with self.test_session() as sess:
      alpha_val, beta_val = sess.run(
          [lam_cond.alpha, lam_cond.beta], {x: x_data})

    self.assertAllClose(alpha_val, alpha0 + len(x_data))
    self.assertAllClose(beta_val, beta0 + x_data.sum())
Exemplo n.º 17
0
  def test_gamma_exponential(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    alpha0 = 0.5
    beta0 = 1.75
    lam = rvs.Gamma(alpha0, beta0)
    x = rvs.Exponential(lam, sample_shape=4)

    lam_cond = ed.complete_conditional(lam, [lam, x])

    self.assertIsInstance(lam_cond, rvs.Gamma)

    with self.test_session() as sess:
      alpha_val, beta_val = sess.run(
          [lam_cond.concentration, lam_cond.rate], {x: x_data})

    self.assertAllClose(alpha_val, alpha0 + len(x_data))
    self.assertAllClose(beta_val, beta0 + x_data.sum())
Exemplo n.º 18
0
  def test_mul_rate_gamma(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    alpha0 = 0.5
    beta0 = 1.75
    alpha_likelihood = 2.3
    beta = rvs.Gamma(alpha0, beta0)
    x = rvs.Gamma(alpha_likelihood, alpha_likelihood * beta, sample_shape=4)

    beta_cond = ed.complete_conditional(beta, [beta, x])

    self.assertIsInstance(beta_cond, rvs.Gamma)

    with self.test_session() as sess:
      alpha_val, beta_val = sess.run([beta_cond.concentration, beta_cond.rate],
                                     {x: x_data})
    self.assertAllClose(alpha_val, alpha0 + alpha_likelihood * len(x_data))
    self.assertAllClose(beta_val, beta0 + alpha_likelihood * x_data.sum())
Exemplo n.º 19
0
  def test_beta_bernoulli(self):
    x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

    a0 = 0.5
    b0 = 1.5
    pi = rvs.Beta(a0, b0)
    x = rvs.Bernoulli(probs=pi, sample_shape=10)

    pi_cond = ed.complete_conditional(pi, [pi, x])

    self.assertIsInstance(pi_cond, rvs.Beta)

    with self.test_session() as sess:
      a_val, b_val = sess.run([pi_cond.concentration1,
                               pi_cond.concentration0], {x: x_data})

    self.assertAllClose(a_val, a0 + x_data.sum())
    self.assertAllClose(b_val, b0 + (1 - x_data).sum())
Exemplo n.º 20
0
  def test_gamma_poisson(self):
    x_data = np.array([0, 1, 0, 7, 0, 0, 2, 0, 0, 1])

    alpha0 = 0.5
    beta0 = 1.75
    lam = rvs.Gamma(alpha=alpha0, beta=beta0)
    # use value since cannot sample
    x = rvs.Poisson(lam=lam, value=tf.zeros(10), sample_shape=10)

    lam_cond = ed.complete_conditional(lam, [lam, x])

    self.assertIsInstance(lam_cond, rvs.Gamma)

    with self.test_session() as sess:
      alpha_val, beta_val = sess.run(
          [lam_cond.alpha, lam_cond.beta], {x: x_data})

    self.assertAllClose(alpha_val, alpha0 + x_data.sum())
    self.assertAllClose(beta_val, beta0 + len(x_data))
Exemplo n.º 21
0
  def test_beta_binomial(self):
    n_data = 10
    x_data = 2

    a0 = 0.5
    b0 = 1.5
    pi = rvs.Beta(a=a0, b=b0)
    # use value since cannot sample
    x = rvs.Binomial(n=n_data, p=pi, value=0.0)

    pi_cond = ed.complete_conditional(pi, [pi, x])

    self.assertIsInstance(pi_cond, rvs.Beta)

    with self.test_session() as sess:
      a_val, b_val = sess.run([pi_cond.a, pi_cond.b], {x: x_data})

    self.assertAllClose(a_val, a0 + x_data)
    self.assertAllClose(b_val, b0 + n_data - x_data)
Exemplo n.º 22
0
  def test_gamma_poisson(self):
    x_data = np.array([0, 1, 0, 7, 0, 0, 2, 0, 0, 1])

    alpha0 = 0.5
    beta0 = 1.75
    lam = rvs.Gamma(alpha0, beta0)
    # use value since cannot sample
    x = rvs.Poisson(lam, value=tf.zeros(10), sample_shape=10)

    lam_cond = ed.complete_conditional(lam, [lam, x])

    self.assertIsInstance(lam_cond, rvs.Gamma)

    with self.test_session() as sess:
      alpha_val, beta_val = sess.run(
          [lam_cond.concentration, lam_cond.rate], {x: x_data})

    self.assertAllClose(alpha_val, alpha0 + x_data.sum())
    self.assertAllClose(beta_val, beta0 + len(x_data))
Exemplo n.º 23
0
    def test_inverse_gamma_normal(self):
        x_data = np.array([0.1, 0.5, 3.3, 2.7])

        sigmasq_conc = 1.3
        sigmasq_rate = 2.1
        x_loc = 0.3

        sigmasq = rvs.InverseGamma(sigmasq_conc, sigmasq_rate)
        x = rvs.Normal(x_loc, tf.sqrt(sigmasq), sample_shape=len(x_data))

        sigmasq_cond = ed.complete_conditional(sigmasq, [sigmasq, x])
        self.assertIsInstance(sigmasq_cond, rvs.InverseGamma)

        with self.test_session() as sess:
            conc_val, rate_val = sess.run(
                [sigmasq_cond.concentration, sigmasq_cond.rate], {x: x_data})

        self.assertAllClose(conc_val, sigmasq_conc + 0.5 * len(x_data))
        self.assertAllClose(rate_val, sigmasq_rate + 0.5 * np.sum(
            (x_data - x_loc)**2))
Exemplo n.º 24
0
  def test_beta_binomial(self):
    n_data = 10
    x_data = 2

    a0 = 0.5
    b0 = 1.5
    pi = rvs.Beta(a0, b0)
    # use value since cannot sample
    x = rvs.Binomial(total_count=n_data, probs=pi, value=0.0)

    pi_cond = ed.complete_conditional(pi, [pi, x])

    self.assertIsInstance(pi_cond, rvs.Beta)

    with self.test_session() as sess:
      a_val, b_val = sess.run([pi_cond.concentration1,
                               pi_cond.concentration0], {x: x_data})

    self.assertAllClose(a_val, a0 + x_data)
    self.assertAllClose(b_val, b0 + n_data - x_data)
Exemplo n.º 25
0
  def test_inverse_gamma_normal(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    sigmasq_conc = 1.3
    sigmasq_rate = 2.1
    x_loc = 0.3

    sigmasq = rvs.InverseGamma(sigmasq_conc, sigmasq_rate)
    x = rvs.Normal(x_loc, tf.sqrt(sigmasq), sample_shape=len(x_data))

    sigmasq_cond = ed.complete_conditional(sigmasq, [sigmasq, x])
    self.assertIsInstance(sigmasq_cond, rvs.InverseGamma)

    with self.test_session() as sess:
      conc_val, rate_val = sess.run(
          [sigmasq_cond.concentration, sigmasq_cond.rate], {x: x_data})

    self.assertAllClose(conc_val, sigmasq_conc + 0.5 * len(x_data))
    self.assertAllClose(rate_val,
                        sigmasq_rate + 0.5 * np.sum((x_data - x_loc)**2))
Exemplo n.º 26
0
  def test_normal_normal(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    mu0 = 0.3
    sigma0 = 2.1
    sigma_likelihood = 1.2

    mu = rvs.Normal(mu0, sigma0)
    x = rvs.Normal(mu, sigma_likelihood, sample_shape=len(x_data))

    mu_cond = ed.complete_conditional(mu, [mu, x])
    self.assertIsInstance(mu_cond, rvs.Normal)

    with self.test_session() as sess:
      mu_val, sigma_val = sess.run([mu_cond.mu, mu_cond.sigma], {x: x_data})

    self.assertAllClose(sigma_val, (1.0 / sigma0**2 +
                                    len(x_data) / sigma_likelihood**2) ** -0.5)
    self.assertAllClose(mu_val,
                        sigma_val**2 * (mu0 / sigma0**2 +
                                        (1.0 / sigma_likelihood**2 *
                                         x_data.sum())))
Exemplo n.º 27
0
  def test_normal_normal(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    mu0 = 0.3
    sigma0 = 2.1
    sigma_likelihood = 1.2

    mu = rvs.Normal(mu0, sigma0)
    x = rvs.Normal(mu, sigma_likelihood, sample_shape=len(x_data))

    mu_cond = ed.complete_conditional(mu, [mu, x])
    self.assertIsInstance(mu_cond, rvs.Normal)

    with self.test_session() as sess:
      mu_val, sigma_val = sess.run([mu_cond.loc, mu_cond.scale], {x: x_data})

    self.assertAllClose(sigma_val, (1.0 / sigma0**2 +
                                    len(x_data) / sigma_likelihood**2) ** -0.5)
    self.assertAllClose(mu_val,
                        sigma_val**2 * (mu0 / sigma0**2 +
                                        (1.0 / sigma_likelihood**2 *
                                         x_data.sum())))
Exemplo n.º 28
0
def main(_):
  ed.set_seed(42)

  # DATA
  x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

  # MODEL
  p = Beta(1.0, 1.0)
  x = Bernoulli(probs=p, sample_shape=10)

  # COMPLETE CONDITIONAL
  p_cond = ed.complete_conditional(p)

  sess = ed.get_session()

  print('p(probs | x) type:', p_cond.parameters['name'])
  param_vals = sess.run({key: val for
                         key, val in six.iteritems(p_cond.parameters)
                         if isinstance(val, tf.Tensor)}, {x: x_data})
  print('parameters:')
  for key, val in six.iteritems(param_vals):
    print('%s:\t%.3f' % (key, val))
Exemplo n.º 29
0
G2 = ed.models.Bernoulli(probs=p_nextg2)
G3 = ed.models.Bernoulli(probs=p_nextg3)

mean_x2 = tf.where(tf.cast(G2,tf.bool),60.,50.)
mean_x3 = tf.where(tf.cast(G3,tf.bool),60.,50.)
sig = np.float32(np.sqrt(10))
X2 = ed.models.Normal(loc=mean_x2,scale=sig)
X3 = ed.models.Normal(loc=mean_x3,scale=sig)


### run for prob(g1=2|x2=50)
# I treaat 2==1 and 1==0
#part1 = ed.models.Bernoulli(probs=tf.nn.sigmoid(tf.Variable(tf.random_normal([]))))
#ed.get_session()
#inf = ed.KLpq({G1:part1},data={X2:tf.constant(50,dtype=tf.float32)})

#inf.run(n_samples=200)
#print(part1.probs.eval())


### run for prob(x3=50|x2=50)
ed.get_session()
cond = ed.complete_conditional(X3,{X2:tf.constant(50,dtype=tf.float32)})
probs = 0
for _ in range(100000):
    s = round(cond.eval())
    if s==50:
        probs += 1
probs = probs/100000
print(probs)
Exemplo n.º 30
0
# Prior hyperparameters
pi_alpha = np.ones(K, dtype=np.float32)
mu_sigma = np.std(true_mu)
sigmasq_alpha = 1.0
sigmasq_beta = 2.0

# Model
pi = Dirichlet(pi_alpha)
mu = Normal(0.0, mu_sigma, sample_shape=K)
sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
x = ParamMixture(pi, {'loc': mu, 'scale': tf.sqrt(sigmasq)}, Normal,
                 sample_shape=N)
z = x.cat

# Conditionals
mu_cond = ed.complete_conditional(mu)
sigmasq_cond = ed.complete_conditional(sigmasq)
pi_cond = ed.complete_conditional(pi)
z_cond = ed.complete_conditional(z)

sess = ed.get_session()

# Initialize randomly
pi_est, mu_est, sigmasq_est, z_est = sess.run([pi, mu, sigmasq, z])

print('Initial parameters:')
print('pi:', pi_est)
print('mu:', mu_est)
print('sigmasq:', sigmasq_est)
print()
Exemplo n.º 31
0
sigmasq_beta = 2.0

# Model
pi = Dirichlet(pi_alpha)
mu = Normal(0.0, mu_sigma, sample_shape=K)
sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
x = ParamMixture(pi, {
    'mu': mu,
    'sigma': tf.sqrt(sigmasq)
},
                 Normal,
                 sample_shape=N)
z = x.cat

# Conditionals
mu_cond = ed.complete_conditional(mu)
sigmasq_cond = ed.complete_conditional(sigmasq)
pi_cond = ed.complete_conditional(pi)
z_cond = ed.complete_conditional(z)

sess = ed.get_session()

# Initialize randomly
pi_est, mu_est, sigmasq_est, z_est = sess.run([pi, mu, sigmasq, z])

print('Initial parameters:')
print('pi:', pi_est)
print('mu:', mu_est)
print('sigmasq:', sigmasq_est)
print()
Exemplo n.º 32
0
import edward as ed
import numpy as np
import six
import tensorflow as tf

from edward.models import Bernoulli, Beta

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
pi = Beta(1.0, 1.0)
x = Bernoulli(probs=pi, sample_shape=10)

# COMPLETE CONDITIONAL
pi_cond = ed.complete_conditional(pi)

sess = ed.get_session()
tf.global_variables_initializer().run()

print('p(pi | x) type:', pi_cond.parameters['name'])
param_vals = sess.run({key: val for
                       key, val in six.iteritems(pi_cond.parameters)
                       if isinstance(val, tf.Tensor)}, {x: x_data})
print('parameters:')
for key, val in six.iteritems(param_vals):
  print('%s:\t%.3f' % (key, val))
Exemplo n.º 33
0
def main(_):
    # Generate data
    true_mu = np.array([-1.0, 0.0, 1.0], np.float32) * 10
    true_sigmasq = np.array([1.0**2, 2.0**2, 3.0**2], np.float32)
    true_pi = np.array([0.2, 0.3, 0.5], np.float32)
    N = 10000
    K = len(true_mu)
    true_z = np.random.choice(np.arange(K), size=N, p=true_pi)
    x_data = true_mu[true_z] + np.random.randn(N) * np.sqrt(
        true_sigmasq[true_z])

    # Prior hyperparameters
    pi_alpha = np.ones(K, dtype=np.float32)
    mu_sigma = np.std(true_mu)
    sigmasq_alpha = 1.0
    sigmasq_beta = 2.0

    # Model
    pi = Dirichlet(pi_alpha)
    mu = Normal(0.0, mu_sigma, sample_shape=K)
    sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
    x = ParamMixture(pi, {
        'loc': mu,
        'scale': tf.sqrt(sigmasq)
    },
                     Normal,
                     sample_shape=N)
    z = x.cat

    # Conditionals
    mu_cond = ed.complete_conditional(mu)
    sigmasq_cond = ed.complete_conditional(sigmasq)
    pi_cond = ed.complete_conditional(pi)
    z_cond = ed.complete_conditional(z)

    sess = ed.get_session()

    # Initialize randomly
    pi_est, mu_est, sigmasq_est, z_est = sess.run([pi, mu, sigmasq, z])

    print('Initial parameters:')
    print('pi:', pi_est)
    print('mu:', mu_est)
    print('sigmasq:', sigmasq_est)
    print()

    # Gibbs sampler
    cond_dict = {
        pi: pi_est,
        mu: mu_est,
        sigmasq: sigmasq_est,
        z: z_est,
        x: x_data
    }
    t0 = time()
    T = 500
    for t in range(T):
        z_est = sess.run(z_cond, cond_dict)
        cond_dict[z] = z_est
        pi_est, mu_est = sess.run([pi_cond, mu_cond], cond_dict)
        cond_dict[pi] = pi_est
        cond_dict[mu] = mu_est
        sigmasq_est = sess.run(sigmasq_cond, cond_dict)
        cond_dict[sigmasq] = sigmasq_est
    print('took %.3f seconds to run %d iterations' % (time() - t0, T))

    print()
    print('Final sample for parameters::')
    print('pi:', pi_est)
    print('mu:', mu_est)
    print('sigmasq:', sigmasq_est)
    print()

    print()
    print('True parameters:')
    print('pi:', true_pi)
    print('mu:', true_mu)
    print('sigmasq:', true_sigmasq)
    print()

    plt.figure(figsize=[10, 10])
    plt.subplot(2, 1, 1)
    plt.hist(x_data, 50)
    plt.title('Empirical Distribution of $x$')
    plt.xlabel('$x$')
    plt.ylabel('frequency')
    xl = plt.xlim()
    plt.subplot(2, 1, 2)
    plt.hist(sess.run(x, {pi: pi_est, mu: mu_est, sigmasq: sigmasq_est}), 50)
    plt.title("Predictive distribution $p(x \mid \mathrm{inferred }\ "
              "\pi, \mu, \sigma^2)$")
    plt.xlabel('$x$')
    plt.ylabel('frequency')
    plt.xlim(xl)
    plt.show()
# 基本模型
# p(x, z, beta) = Normal(x | beta, I) Categorical(z | pi) Normal(beta | 0, I)

# 变分推断
# beta的后验用一组参数正态逼近,z的后验用参数Categorical逼近
from edward.models import Categorical, Normal
qbeta = Normal(loc=tf.Variable(tf.zeros([K, D])),
               scale=tf.exp(tf.Variable(tf.zeros[K, D])))  # 定义后验分布
qz = Categorical(logits=tf.Variable(tf.zeros[N, K]))
inference = ed.VariationalInference({beta: qbeta, z: qz}, data={x: x_train})
# 用MAP方法做推断,推断方法都是继承VariationalInference
from edward.models import PointMass
qbeta = PointMass(params=tf.Variable(tf.zeros([K, D])))
qz = PointMass(params=tf.Variable(tf.zeros(N)))
inference = ed.MAP({beta: qbeta, z: qz}, data={x: x_train})

# MonteCarlo推断
# 用beta和z的采样分布,作为后验的逼近
from edward.models import Empirical
T = 10000  # number of samples
qbeta = Empirical(params=tf.Variable(tf.zeros([T, K, D])))  # Empirical为经验分布
qz = Empirical(params=tf.Variable(tf.zeros([T, N])))
inference = ed.MonteCarlo({beta: qbeta, z: qz}, data={x: x_train})

# 精确推断
from edward.models import Bernoulli, Beta
pi = Beta(1.0, 1.0)
x = Bernoulli(probs=pi, sample_shape=10)
pi_cond = ed.complete_conditional(pi)  # 计算pi的后验分布的精确表达式
sess.run(pi_cond, {x: np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])})
Exemplo n.º 35
0
import edward as ed
from deepx import T
import tensorflow as tf
from edward.models import Normal, Bernoulli

mu = Normal(tf.constant(0.0), tf.constant(0.00001))
x = Normal(mu, tf.constant(0.0001))

mu_x = ed.complete_conditional(mu)

sess = T.interactive_session()
Exemplo n.º 36
0
def main(_):
  # Generate data
  true_mu = np.array([-1.0, 0.0, 1.0], np.float32) * 10
  true_sigmasq = np.array([1.0**2, 2.0**2, 3.0**2], np.float32)
  true_pi = np.array([0.2, 0.3, 0.5], np.float32)
  N = 10000
  K = len(true_mu)
  true_z = np.random.choice(np.arange(K), size=N, p=true_pi)
  x_data = true_mu[true_z] + np.random.randn(N) * np.sqrt(true_sigmasq[true_z])

  # Prior hyperparameters
  pi_alpha = np.ones(K, dtype=np.float32)
  mu_sigma = np.std(true_mu)
  sigmasq_alpha = 1.0
  sigmasq_beta = 2.0

  # Model
  pi = Dirichlet(pi_alpha)
  mu = Normal(0.0, mu_sigma, sample_shape=K)
  sigmasq = InverseGamma(sigmasq_alpha, sigmasq_beta, sample_shape=K)
  x = ParamMixture(pi, {'loc': mu, 'scale': tf.sqrt(sigmasq)}, Normal,
                   sample_shape=N)
  z = x.cat

  # Conditionals
  mu_cond = ed.complete_conditional(mu)
  sigmasq_cond = ed.complete_conditional(sigmasq)
  pi_cond = ed.complete_conditional(pi)
  z_cond = ed.complete_conditional(z)

  sess = ed.get_session()

  # Initialize randomly
  pi_est, mu_est, sigmasq_est, z_est = sess.run([pi, mu, sigmasq, z])

  print('Initial parameters:')
  print('pi:', pi_est)
  print('mu:', mu_est)
  print('sigmasq:', sigmasq_est)
  print()

  # Gibbs sampler
  cond_dict = {pi: pi_est, mu: mu_est, sigmasq: sigmasq_est,
               z: z_est, x: x_data}
  t0 = time()
  T = 500
  for t in range(T):
    z_est = sess.run(z_cond, cond_dict)
    cond_dict[z] = z_est
    pi_est, mu_est = sess.run([pi_cond, mu_cond], cond_dict)
    cond_dict[pi] = pi_est
    cond_dict[mu] = mu_est
    sigmasq_est = sess.run(sigmasq_cond, cond_dict)
    cond_dict[sigmasq] = sigmasq_est
  print('took %.3f seconds to run %d iterations' % (time() - t0, T))

  print()
  print('Final sample for parameters::')
  print('pi:', pi_est)
  print('mu:', mu_est)
  print('sigmasq:', sigmasq_est)
  print()

  print()
  print('True parameters:')
  print('pi:', true_pi)
  print('mu:', true_mu)
  print('sigmasq:', true_sigmasq)
  print()

  plt.figure(figsize=[10, 10])
  plt.subplot(2, 1, 1)
  plt.hist(x_data, 50)
  plt.title('Empirical Distribution of $x$')
  plt.xlabel('$x$')
  plt.ylabel('frequency')
  xl = plt.xlim()
  plt.subplot(2, 1, 2)
  plt.hist(sess.run(x, {pi: pi_est, mu: mu_est, sigmasq: sigmasq_est}), 50)
  plt.title("Predictive distribution $p(x \mid \mathrm{inferred }\ "
            "\pi, \mu, \sigma^2)$")
  plt.xlabel('$x$')
  plt.ylabel('frequency')
  plt.xlim(xl)
  plt.show()