def test_persistent_state(self): with self.test_session() as sess: dp = DirichletProcess(0.1, Normal(loc=0.0, scale=1.0)) x = dp.sample(5) y = dp.sample(5) x_data, y_data, locs = sess.run([x, y, dp.locs]) for sample in x_data: self.assertTrue(sample in locs) for sample in y_data: self.assertTrue(sample in locs)
def test_persistent_state(self): with self.test_session() as sess: dp = DirichletProcess(0.1, Normal(mu=0.0, sigma=1.0)) x = dp.sample(5) y = dp.sample(5) x_data, y_data, theta = sess.run([x, y, dp.theta]) for sample in x_data: self.assertTrue(sample in theta) for sample in y_data: self.assertTrue(sample in theta)
dp = dirichlet_process(alpha=10.0) # The number of sticks broken is dynamic, changing across evaluations. sess = tf.Session() print(sess.run(dp)) print(sess.run(dp)) # Demo of the DirichletProcess random variable in Edward. # It is associated to a sample tensor, which in turn is associated to # one of its atoms (base distributions). base_cls = Normal kwargs = {'mu': 0.0, 'sigma': 1.0} # Highly concentrated DP. alpha = 1.0 dp = DirichletProcess(alpha, base_cls, **kwargs) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show() # More spread out DP. alpha = 50.0 dp = DirichletProcess(alpha, base_cls, **kwargs) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show()
def _test(n, alpha, base_cls, *args, **kwargs): x = DirichletProcess(alpha=alpha, base_cls=base_cls, *args, **kwargs) base = base_cls(*args, **kwargs) val_est = get_dims(x.sample(n)) val_true = n + get_dims(alpha) + get_dims(base) assert val_est == val_true
def _test(self, n, concentration, base): x = DirichletProcess(concentration=concentration, base=base) val_est = x.sample(n).shape.as_list() val_true = n + tf.convert_to_tensor(concentration).shape.as_list() + \ tf.convert_to_tensor(base).shape.as_list() self.assertEqual(val_est, val_true)
return stick_num dp = dirichlet_process(alpha=10.0) # The number of sticks broken is dynamic, changing across evaluations. sess = tf.Session() print(sess.run(dp)) print(sess.run(dp)) # Demo of the DirichletProcess random variable in Edward. base = Normal(mu=0.0, sigma=1.0) # Highly concentrated DP. alpha = 1.0 dp = DirichletProcess(alpha, base) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show() # More spread out DP. alpha = 50.0 dp = DirichletProcess(alpha, base) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show()
def test_no_support(self): with self.test_session(): x = DirichletProcess(1.0, Normal(0.0, 1.0)) with self.assertRaises(AttributeError): y = ed.transform(x)
def _test(self, n, alpha, base): x = DirichletProcess(alpha=alpha, base=base) val_est = x.sample(n).shape.as_list() val_true = n + tf.convert_to_tensor(alpha).shape.as_list() + \ tf.convert_to_tensor(base).shape.as_list() self.assertEqual(val_est, val_true)
def main(_): dp = dirichlet_process(10.0) # The number of sticks broken is dynamic, changing across evaluations. sess = tf.Session() print(sess.run(dp)) print(sess.run(dp)) # Demo of the DirichletProcess random variable in Edward. base = Normal(0.0, 1.0) # Highly concentrated DP. alpha = 1.0 dp = DirichletProcess(alpha, base) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show() # More spread out DP. alpha = 50.0 dp = DirichletProcess(alpha, base) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show() # States persist across calls to sample() in a DP. alpha = 1.0 dp = DirichletProcess(alpha, base) x = dp.sample(50) y = dp.sample(75) samples_x, samples_y = sess.run([x, y]) plt.subplot(211) plt.hist(samples_x, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1)) across two calls to sample()".format(alpha)) plt.subplot(212) plt.hist(samples_y, bins=100, range=(-3.0, 3.0)) plt.show() # `theta` is the distribution indirectly returned by the DP. # Fetching theta is the same as fetching the Dirichlet process. dp = DirichletProcess(alpha, base) theta = Normal(0.0, 1.0, value=tf.cast(dp, tf.float32)) print(sess.run([dp, theta])) print(sess.run([dp, theta])) # DirichletProcess can also take in non-scalar concentrations and bases. alpha = tf.constant([0.1, 0.6, 0.4]) base = Exponential(rate=tf.ones([5, 2])) dp = DirichletProcess(alpha, base) print(dp)
return stick_num dp = dirichlet_process(10.0) # The number of sticks broken is dynamic, changing across evaluations. sess = tf.Session() print(sess.run(dp)) print(sess.run(dp)) # Demo of the DirichletProcess random variable in Edward. base = Normal(0.0, 1.0) # Highly concentrated DP. alpha = 1.0 dp = DirichletProcess(alpha, base) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show() # More spread out DP. alpha = 50.0 dp = DirichletProcess(alpha, base) x = dp.sample(1000) samples = sess.run(x) plt.hist(samples, bins=100, range=(-3.0, 3.0)) plt.title("DP({0}, N(0, 1))".format(alpha)) plt.show()