def test_getitem(self):
     with self.test_session() as sess:
         x = Normal(tf.zeros([3, 4]), tf.ones([3, 4]))
         z = x[0:2, 2:3]
         z_value = x.value()[0:2, 2:3]
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
 def test_abs(self):
   with self.test_session() as sess:
     x = Normal(0.0, 1.0)
     z = abs(x)
     z_value = abs(x.value())
     z_eval, z_value_eval = sess.run([z, z_value])
     self.assertAllEqual(z_eval, z_value_eval)
 def test_getitem(self):
   with self.test_session() as sess:
     x = Normal(tf.zeros([3, 4]), tf.ones([3, 4]))
     z = x[0:2, 2:3]
     z_value = x.value()[0:2, 2:3]
     z_eval, z_value_eval = sess.run([z, z_value])
     self.assertAllEqual(z_eval, z_value_eval)
 def test_neg(self):
   with self.test_session() as sess:
     x = Normal(0.0, 1.0)
     z = -x
     z_value = -x.value()
     z_eval, z_value_eval = sess.run([z, z_value])
     self.assertAllEqual(z_eval, z_value_eval)
 def test_neg(self):
     with self.test_session() as sess:
         x = Normal(0.0, 1.0)
         z = -x
         z_value = -x.value()
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
Exemplo n.º 6
0
 def test_abs(self):
     with self.test_session() as sess:
         x = Normal(mu=0.0, sigma=1.0)
         z = abs(x)
         z_value = abs(x.value())
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
Exemplo n.º 7
0
 def test_neg(self):
     with self.test_session() as sess:
         x = Normal(mu=0.0, sigma=1.0)
         z = -x
         z_value = -x.value()
         z_eval, z_value_eval = sess.run([z, z_value])
         assert np.allclose(z_eval, z_value_eval)
 def test_div(self):
   with self.test_session() as sess:
     x = Normal(0.0, 1.0)
     y = 5.0
     z = x / y
     z_value = x.value() / y
     z_eval, z_value_eval = sess.run([z, z_value])
     self.assertAllEqual(z_eval, z_value_eval)
Exemplo n.º 9
0
 def test_mul(self):
     with self.test_session() as sess:
         x = Normal(mu=0.0, sigma=1.0)
         y = 5.0
         z = x * y
         z_value = x.value() * y
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
Exemplo n.º 10
0
 def test_rfloordiv(self):
     with self.test_session() as sess:
         x = Normal(mu=0.0, sigma=1.0)
         y = 5.0
         z = y // x
         z_value = y // x.value()
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
Exemplo n.º 11
0
 def test_rmul(self):
     with self.test_session() as sess:
         x = Normal(mu=0.0, sigma=1.0)
         y = 5.0
         z = y * x
         z_value = y * x.value()
         z_eval, z_value_eval = sess.run([z, z_value])
         assert np.allclose(z_eval, z_value_eval)
Exemplo n.º 12
0
 def test_floordiv(self):
     with self.test_session() as sess:
         x = Normal(mu=0.0, sigma=1.0)
         y = 5.0
         z = x // y
         z_value = x.value() // y
         z_eval, z_value_eval = sess.run([z, z_value])
         assert np.allclose(z_eval, z_value_eval)
 def test_rfloordiv(self):
   with self.test_session() as sess:
     x = Normal(0.0, 1.0)
     y = 5.0
     z = y // x
     z_value = y // x.value()
     z_eval, z_value_eval = sess.run([z, z_value])
     self.assertAllEqual(z_eval, z_value_eval)
 def test_floordiv(self):
     with self.test_session() as sess:
         x = Normal(0.0, 1.0)
         y = 5.0
         z = x // y
         z_value = x.value() // y
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
 def test_rmul(self):
     with self.test_session() as sess:
         x = Normal(0.0, 1.0)
         y = 5.0
         z = y * x
         z_value = y * x.value()
         z_eval, z_value_eval = sess.run([z, z_value])
         self.assertAllEqual(z_eval, z_value_eval)
Exemplo n.º 16
0
 def test_swap_rv_tensor(self):
     with self.test_session():
         ed.set_seed(289362)
         x = Normal(mu=0.0, sigma=0.1)
         y = tf.constant(1.0)
         z = x * y
         qx = Normal(mu=10.0, sigma=0.1)
         z_new = ed.copy(z, {x: qx.value()})
         self.assertGreater(z_new.eval(), 5.0)
Exemplo n.º 17
0
 def test_swap_tensor_rv(self):
   with self.test_session():
     ed.set_seed(95258)
     x = Normal(0.0, 0.1)
     y = tf.constant(1.0)
     z = x * y
     qx = Normal(10.0, 0.1)
     z_new = ed.copy(z, {x.value(): qx})
     self.assertGreater(z_new.eval(), 5.0)
Exemplo n.º 18
0
 def test_dict_tensor_rv(self):
   with self.test_session():
     set_seed(95258)
     x = Normal(mu=0.0, sigma=0.1)
     y = tf.constant(1.0)
     z = x * y
     qx = Normal(mu=10.0, sigma=0.1)
     z_new = copy(z, {x.value(): qx})
     self.assertGreater(z_new.eval(), 5.0)
Exemplo n.º 19
0
 def test_list(self):
     with self.test_session() as sess:
         x = Normal(tf.constant(0.0), tf.constant(0.1))
         y = Normal(tf.constant(10.0), tf.constant(0.1))
         cat = Categorical(logits=tf.zeros(5))
         components = [Normal(x, tf.constant(0.1)) for _ in range(5)]
         z = Mixture(cat=cat, components=components)
         z_new = ed.copy(z, {x: y.value()})
         self.assertGreater(z_new.value().eval(), 5.0)
Exemplo n.º 20
0
 def test_dict_tensor_rv(self):
     with self.test_session():
         set_seed(95258)
         x = Normal(mu=0.0, sigma=0.1)
         y = tf.constant(1.0)
         z = x * y
         qx = Normal(mu=10.0, sigma=0.1)
         z_new = copy(z, {x.value(): qx})
         self.assertGreater(z_new.eval(), 5.0)
Exemplo n.º 21
0
 def test_swap_tensor_rv(self):
   with self.test_session():
     ed.set_seed(95258)
     x = Normal(0.0, 0.1)
     y = tf.constant(1.0)
     z = x * y
     qx = Normal(10.0, 0.1)
     z_new = ed.copy(z, {x.value(): qx})
     self.assertGreater(z_new.eval(), 5.0)
Exemplo n.º 22
0
 def test_list(self):
   with self.test_session() as sess:
     x = Normal(tf.constant(0.0), tf.constant(0.1))
     y = Normal(tf.constant(10.0), tf.constant(0.1))
     cat = Categorical(logits=tf.zeros(5))
     components = [Normal(x, tf.constant(0.1))
                   for _ in range(5)]
     z = Mixture(cat=cat, components=components)
     z_new = ed.copy(z, {x: y.value()})
     self.assertGreater(z_new.value().eval(), 5.0)
Exemplo n.º 23
0
def simple_generator(x):
    z = Normal(loc=x, scale=tf.ones([M, D, d]))
    hidden = z.value()
    z1 = hidden
    z2 = tf.transpose(hidden, [0, 2, 1])
    alpha = 0.5
    a = tf.matmul(z1, z2)
    a = tf.reshape(a, [-1, D * D])
    ua = tf.gather(a, tri_idx, axis=1)
    p = tf.sigmoid(ua)
    x = Bernoulli(probs=p)
    return x
Exemplo n.º 24
0
 def test_lambda(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Lambda(lambda x: x ** 2)(x.value())
Exemplo n.º 25
0
 def test_activity_regularization(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.ActivityRegularization(l1=0.1)(x.value())
Exemplo n.º 26
0
 def test_permute(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Permute((2, 1))(x.value())
   with self.test_session():
     self.assertEqual(y.eval().shape, (100, 5, 10))
Exemplo n.º 27
0
 def test_repeat_vector(self):
   x = Normal(loc=tf.zeros([100, 10]), scale=tf.ones([100, 10]))
   y = layers.RepeatVector(2)(x.value())
   with self.test_session():
     self.assertEqual(y.eval().shape, (100, 2, 10))
Exemplo n.º 28
0
 def test_dropout(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Dropout(0.5)(x.value())
Exemplo n.º 29
0
 def test_flatten(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Flatten()(x.value())
   with self.test_session():
     self.assertEqual(y.eval().shape, (100, 50))
Exemplo n.º 30
0
 def test_dense(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Dense(32)(x.value())
Exemplo n.º 31
0
M = 128  # batch size during training
d = 10  # latent dimension
DATA_DIR = "data/mnist"
IMG_DIR = "img"

if not os.path.exists(DATA_DIR):
  os.makedirs(DATA_DIR)
if not os.path.exists(IMG_DIR):
  os.makedirs(IMG_DIR)

# DATA. MNIST batches are fed at training time.
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

# MODEL
z = Normal(mu=tf.zeros([M, d]), sigma=tf.ones([M, d]))
logits = generative_network(z.value())
x = Bernoulli(logits=logits)

# INFERENCE
x_ph = tf.placeholder(tf.float32, [M, 28 * 28])
mu, sigma = inference_network(x_ph)
qz = Normal(mu=mu, sigma=sigma)

# Bind p(x, z) and q(z | x) to the same placeholder for x.
data = {x: x_ph}
inference = ed.KLqp({z: qz}, data)
optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)

init = tf.initialize_all_variables()
init.run()
Exemplo n.º 32
0
if not os.path.exists(IMG_DIR):
  os.makedirs(IMG_DIR)

ed.set_seed(42)

M = 100  # batch size during training
d = 2  # latent dimension

# DATA. MNIST batches are fed at training time.
mnist = input_data.read_data_sets(DATA_DIR)

# MODEL
# Define a subgraph of the full model, corresponding to a minibatch of
# size M.
z = Normal(loc=tf.zeros([M, d]), scale=tf.ones([M, d]))
hidden = Dense(256, activation='relu')(z.value())
x = Bernoulli(logits=Dense(28 * 28)(hidden))

# INFERENCE
# Define a subgraph of the variational model, corresponding to a
# minibatch of size M.
x_ph = tf.placeholder(tf.int32, [M, 28 * 28])
hidden = Dense(256, activation='relu')(tf.cast(x_ph, tf.float32))
qz = Normal(loc=Dense(d)(hidden),
            scale=Dense(d, activation='softplus')(hidden))

# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
inference = ed.KLqp({z: qz}, data={x: x_ph})
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)
Exemplo n.º 33
0
)  # shape=(M, 2)
# ====== ellipticity of the galaxies ====== #
# e ~ Normal(∑ 1 / distance * log(mass), sigma)
# I don't know what is the value of sigma, so why not give it
# an Uniform distribution, we give each galaxy a different sigma :D
sigma = Uniform(
    a=np.full(shape=(M, 2), fill_value=0.12, dtype='float32'),
    b=np.full(shape=(M, 2), fill_value=0.33, dtype='float32')
)
galaxies_elp = Normal(
    mu=mean,
    sigma=sigma,
)

# ====== happy sampling ====== #
galXY, halXY, halMAS, galE, sigma = get_value(
    [galaxies_pos.value(), halos_pos.value(),
     halos_mass.value(), galaxies_elp.value(), sigma.value()]
)
print("Galaxies position:", galXY.shape)
print("Galaxies ellipticity:", galE.shape)
print("Halos position:", halXY.shape)
print("Halos mass:", halMAS.shape)
print("Sigma:", sigma.shape)

# ====== visualize the generated sky ====== #
plt.figure(figsize=(8, 8), dpi=180)
draw_sky(galaxies=np.concatenate([galXY, galE], axis=-1),
         halos=[N] + halXY.ravel().tolist())
plt.show(block=True)
Exemplo n.º 34
0
from edward.models import Bernoulli, Normal
from keras import backend as K
from keras.layers import Dense
from progressbar import ETA, Bar, Percentage, ProgressBar
from scipy.misc import imsave
from tensorflow.examples.tutorials.mnist import input_data

ed.set_seed(42)

M = 100  # batch size during training
d = 2  # latent variable dimension

# Probability model (subgraph)
z = Normal(mu=tf.zeros([M, d]), sigma=tf.ones([M, d]))
hidden = Dense(256, activation=K.relu)(z.value())
x = Bernoulli(logits=Dense(28 * 28)(hidden))

# Variational model (subgraph)
x_ph = ed.placeholder(tf.float32, [M, 28 * 28])
hidden = Dense(256, activation=K.relu)(x_ph)
qz = Normal(mu=Dense(d)(hidden), sigma=Dense(d, activation=K.softplus)(hidden))

# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
mnist = input_data.read_data_sets("data/mnist", one_hot=True)
data = {x: x_ph}

sess = ed.get_session()
K.set_session(sess)
inference = ed.MFVI({z: qz}, data)
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
Exemplo n.º 35
0
# we like to calculate posterior p(\theta|x) where \theta=[mu_1,..., mu_3, sigma_1,...,sigma_3, z_1,...,z_3]

# Model
pi = Dirichlet(np.ones(K, np.float32))
mu = Normal(0.0, 9.0, sample_shape=[K])
sigma = InverseGamma(1.0, 1.0, sample_shape=[K])

c = Categorical(logits=tf.log(pi) - tf.log(1.0 - pi), sample_shape=N)
ed_x = Normal(loc=tf.gather(mu, c), scale=tf.gather(sigma, c))

# parameters
q_pi = Dirichlet(
    tf.nn.softplus(
        tf.get_variable("qpi", [K],
                        initializer=tf.constant_initializer(1.0 / K))))
q_mu = Normal(loc=tf.get_variable("qmu", [K]), scale=1.0)
q_sigma = Normal(loc=tf.nn.softplus(tf.get_variable("qsigma", [K])), scale=1.0)

inference = ed.KLqp(latent_vars={
    mu: q_mu,
    sigma: q_sigma
}, data={ed_x: x})  # this will fail if we include qpi: pi

inference.run(n_iter=1000)

print q_pi.value().eval()

print q_mu.value().eval()
print q_sigma.value().eval()
Exemplo n.º 36
0
from edward.models import Bernoulli, Normal
from keras import backend as K
from keras.layers import Dense
from progressbar import ETA, Bar, Percentage, ProgressBar
from scipy.misc import imsave
from tensorflow.examples.tutorials.mnist import input_data

ed.set_seed(42)

M = 100  # batch size during training
d = 2  # latent dimension

# Probability model (subgraph)
z = Normal(mu=tf.zeros([M, d]), sigma=tf.ones([M, d]))
hidden = Dense(256, activation='relu')(z.value())
x = Bernoulli(logits=Dense(28 * 28)(hidden))

# Variational model (subgraph)
x_ph = tf.placeholder(tf.float32, [M, 28 * 28])
hidden = Dense(256, activation='relu')(x_ph)
qz = Normal(mu=Dense(d)(hidden), sigma=Dense(d, activation='softplus')(hidden))

mnist = input_data.read_data_sets("data/mnist", one_hot=True)

sess = ed.get_session()
K.set_session(sess)

# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
data = {x: x_ph}
inference = ed.KLqp({z: qz}, data)
Exemplo n.º 37
0
 def test_masking(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Masking()(x.value())
Exemplo n.º 38
0
 def test_activation(self):
   x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
   y = layers.Activation('tanh')(x.value())
Exemplo n.º 39
0
                                                                        D]))))

inference = ed.KLqp({mu: qmu, sigma: qsigma}, data={x: x_train})
inference.initialize(n_samples=20, n_iter=4000)

sess = ed.get_session()
init = tf.initialize_all_variables()
init.run()

for _ in range(inference.n_iter):
    info_dict = inference.update()
    inference.print_progress(info_dict)
    t = info_dict['t']
    if t % inference.n_print == 0:
        print("Inferred cluster means:")
        print(sess.run(qmu.value()))

# Average per-cluster and per-data point likelihood over many posterior samples.
log_liks = []
for _ in range(100):
    mu_sample = qmu.sample()
    sigma_sample = qsigma.sample()
    # Take per-cluster and per-data point likelihood.
    log_lik = []
    for k in range(K):
        x_post = Normal(mu=tf.ones([N, 1]) * tf.gather(mu_sample, k),
                        sigma=tf.ones([N, 1]) * tf.gather(sigma_sample, k))
        log_lik.append(tf.reduce_sum(x_post.log_prob(x_train), 1))

    log_lik = tf.pack(log_lik)  # has shape (K, N)
    log_liks.append(log_lik)
Exemplo n.º 40
0
N_MINIBATCH = 128  # batch size during training
d = 10  # latent variable dimension
DATA_DIR = "data/mnist"
IMG_DIR = "img"

if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)
if not os.path.exists(IMG_DIR):
    os.makedirs(IMG_DIR)

# DATA. MNIST batches are fed at training time.
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

# MODEL
z = Normal(mu=tf.zeros([N_MINIBATCH, d]), sigma=tf.ones([N_MINIBATCH, d]))
logits = generative_network(z.value())
x = Bernoulli(logits=logits)

# INFERENCE
x_ph = ed.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
mu, sigma = inference_network(x_ph)
qz = Normal(mu=mu, sigma=sigma)

# Bind p(x, z) and q(z | x) to the same placeholder for x.
data = {x: x_ph}
inference = ed.ReparameterizationKLKLqp({z: qz}, data)
optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer, use_prettytensor=True)

init = tf.initialize_all_variables()
init.run()