Exemple #2
0
  def test_data(self):
    x = Normal(mu=0.0, sigma=1.0)
    qx = Normal(mu=0.0, sigma=1.0)
    qx_misshape = Normal(mu=tf.constant([0.0]), sigma=tf.constant([1.0]))
    x_ph = ed.placeholder(tf.float32)

    ed.Inference()
    ed.Inference(data={x: tf.constant(0.0)})
    ed.Inference(data={x_ph: tf.constant(0.0)})
    ed.Inference(data={x: x_ph})
    ed.Inference(data={x: qx})
    self.assertRaises(TypeError, ed.Inference, data={5: tf.constant(0.0)})
    self.assertRaises(TypeError, ed.Inference, data={x: 'a'})
    self.assertRaises(TypeError, ed.Inference, data={x_ph: x})
    self.assertRaises(TypeError, ed.Inference, data={x: qx_misshape})
Exemple #4
0
                mat[i] += [multivariate_rbf(xi, xj)]

        mat[i] = tf.pack(mat[i])

    return tf.pack(mat)


ed.set_seed(42)

# DATA
df = np.loadtxt('data/crabs_train.txt', dtype='float32', delimiter=',')
df[df[:, 0] == -1, 0] = 0  # replace -1 label with 0 label
N = 25  # number of data points
D = df.shape[1] - 1  # number of features
subset = np.random.choice(df.shape[0], N, replace=False)
X_train = df[subset, 1:]
y_train = df[subset, 0]

# MODEL
X = ed.placeholder(tf.float32, [N, D])
f = MultivariateNormalFull(mu=tf.zeros(N), sigma=kernel(X))
y = Bernoulli(logits=f)

# INFERENCE
qf = Normal(mu=tf.Variable(tf.random_normal([N])),
            sigma=tf.nn.softplus(tf.Variable(tf.random_normal([N]))))

data = {X: X_train, y: y_train}
inference = ed.KLqp({f: qf}, data)
inference.run(n_iter=500)
Exemple #5
0
        result = self.pi * tf.exp(norm.logpdf(y, self.mus, self.sigmas))
        result = tf.log(tf.reduce_sum(result, 1))
        return tf.reduce_sum(result)


ed.set_seed(42)

X_train, X_test, y_train, y_test = build_toy_dataset(N=40000)
print("Size of features in training data: {:s}".format(X_train.shape))
print("Size of output in training data: {:s}".format(y_train.shape))
print("Size of features in test data: {:s}".format(X_test.shape))
print("Size of output in test data: {:s}".format(y_test.shape))
sns.regplot(X_train, y_train, fit_reg=False)
plt.show()

X = ed.placeholder(tf.float32, shape=(None, 1))
y = ed.placeholder(tf.float32, shape=(None, 1))
data = {'X': X, 'y': y}

model = MixtureDensityNetwork(20)

inference = ed.MAP([], data, model)
sess = ed.get_session()  # Start TF session
K.set_session(sess)  # Pass session info to Keras
inference.initialize()

NEPOCH = 1000
train_loss = np.zeros(NEPOCH)
test_loss = np.zeros(NEPOCH)
for i in range(NEPOCH):
    info_dict = inference.update(feed_dict={X: X_train, y: y_train})
    return train_test_split(x_data, y_data, random_state=42)


ed.set_seed(42)

N = 6000  # num data points
D = 1  # num features

# DATA
X_train, X_test, y_train, y_test = build_toy_dataset(N)
print("Size of features in training data: {:s}".format(X_train.shape))
print("Size of output in training data: {:s}".format(y_train.shape))
print("Size of features in test data: {:s}".format(X_test.shape))
print("Size of output in test data: {:s}".format(y_test.shape))

X = ed.placeholder(tf.float32, [None, D])
y = ed.placeholder(tf.float32, [None, D])
data = {'X': X, 'y': y}

# MODEL
model = MixtureDensityNetwork(10)

# INFERENCE
inference = ed.MAP([], data, model)
sess = ed.get_session()
inference.initialize()

init = tf.initialize_all_variables()
init.run()

NEPOCH = 20
Exemple #7
0
if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)
if not os.path.exists(IMG_DIR):
    os.makedirs(IMG_DIR)

# DATA. MNIST batches are fed at training time.
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)

# MODEL
z = Normal(mu=tf.zeros([N_MINIBATCH, d]), sigma=tf.ones([N_MINIBATCH, d]))
logits = generative_network(z.value())
x = Bernoulli(logits=logits)

# INFERENCE
x_ph = ed.placeholder(tf.float32, [N_MINIBATCH, 28 * 28])
mu, sigma = inference_network(x_ph)
qz = Normal(mu=mu, sigma=sigma)

# Bind p(x, z) and q(z | x) to the same placeholder for x.
data = {x: x_ph}
inference = ed.ReparameterizationKLKLqp({z: qz}, data)
optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer, use_prettytensor=True)

init = tf.initialize_all_variables()
init.run()

n_epoch = 100
n_iter_per_epoch = 1000
for epoch in range(n_epoch):
qi = Normal(mu=qi_mu, sigma=qi_sigma)

#qw_mu = tf.expand_dims(tf.convert_to_tensor(beta0[0].astype(np.float32)),1)
qw_mu = tf.Variable(tf.random_normal([D,1]))
qw_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([D,1])))
qw = Normal(mu=qw_mu, sigma=qw_sigma)

#qb_mu = tf.Variable(tf.random_normal([Db,1]))
qb_mu = tf.Variable(tf.random_normal([Db,1])) #force the random coeff to be zero-distributed
qb_mu = qb_mu - tf.reduce_mean(qb_mu)
qb_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([Db,1])))
qb = Normal(mu=qb_mu, sigma=qb_sigma)

zs   = {'beta': qw, 'b': qb, 'Intercept': qi}

Xnew = ed.placeholder(tf.float32, shape=(None, D))
Znew = ed.placeholder(tf.float32, shape=(None, Db))
ynew = ed.placeholder(tf.float32, shape=(None, ))

data = {'X': Xnew, 'y': ynew, 'Z': Znew}

edmodel = MixedModel(lik_std=10.0,prior_std=100.0)

sess = ed.get_session()
inference = ed.MFVI(zs, data, edmodel)

#optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
#optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
optimizer = tf.train.GradientDescentOptimizer(0.001)

inference.initialize(optimizer=optimizer, n_samples=10)
Exemple #9
0
    X = np.concatenate(
        [np.linspace(0, 2, num=N / 2),
         np.linspace(6, 8, num=N / 2)])
    y = 5.0 * X + norm.rvs(0, noise_std, size=N)
    X = X.reshape((N, 1))
    return X.astype(np.float32), y.astype(np.float32)


N = 40  # num data points
D = 1  # num features

ed.set_seed(42)
X_train, y_train = build_toy_dataset(N)
X_test, y_test = build_toy_dataset(N)

X = ed.placeholder(tf.float32, [N, D], name='X')
beta = Normal(mu=tf.zeros(D), sigma=tf.ones(D), name='beta')
y = Normal(mu=ed.dot(X, beta), sigma=tf.ones(N), name='y')

qmu_mu = tf.Variable(tf.random_normal([D]))
qmu_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([D])))
qbeta = Normal(mu=qmu_mu, sigma=qmu_sigma, name='qbeta')

data = {X: X_train, y: y_train}
inference = ed.MFVI({beta: qbeta}, data)
inference.initialize(logdir='train')

sess = ed.get_session()
for t in range(501):
    _, loss = sess.run([inference.train, inference.loss], {X: data[X]})
    inference.print_progress(t, loss)
Exemple #10
0
from progressbar import ETA, Bar, Percentage, ProgressBar
from scipy.misc import imsave
from tensorflow.examples.tutorials.mnist import input_data

ed.set_seed(42)

M = 100  # batch size during training
d = 2  # latent dimension

# Probability model (subgraph)
z = Normal(mu=tf.zeros([M, d]), sigma=tf.ones([M, d]))
hidden = Dense(256, activation='relu')(z.value())
x = Bernoulli(logits=Dense(28 * 28)(hidden))

# Variational model (subgraph)
x_ph = ed.placeholder(tf.float32, [M, 28 * 28])
hidden = Dense(256, activation='relu')(x_ph)
qz = Normal(mu=Dense(d)(hidden),
            sigma=Dense(d, activation='softplus')(hidden))

# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
mnist = input_data.read_data_sets("data/mnist", one_hot=True)
data = {x: x_ph}

sess = ed.get_session()
K.set_session(sess)
inference = ed.KLqp({z: qz}, data)
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)

init = tf.initialize_all_variables()