예제 #1
0
def fit_model(model, observations, POI, fit_type='mle'):
    """
    Perform a fit of the model to data

    Args:
        model (ed.models class): An Edward model
        observations (np.ndarray): Data to fit the model to
        POI (dict): Parameters of interest to return fit results on
        fit_type (str): The minimization technique used

    Returns:
        fit_result (dict): A dict of the fitted model parameters of interest
    """
    # observations is an ndarray of (n_observations, d_features)
    # model and data (obsevations) need to have the same size
    assert model.get_shape() == observations.shape,\
        "The model and observed data features must be of the same shape.\n\
    The model passed has shape {0} and the data passed have shape (n_observations, d_features) = {1}".format(
        model.get_shape(), observations.shape)

    fit_type = fit_type.lower()
    if fit_type == 'mle':
        # http://edwardlib.org/api/ed/MAP
        fit = ed.MAP({}, data={model: observations})
    else:
        fit = ed.MAP({}, data={model: observations})  # default to mle
    fit.run()

    sess = ed.get_session()

    fit_result = {}
    for poi in POI:
        fit_result[poi] = sess.run(POI[poi])
    return fit_result
예제 #2
0
    def run(self, adj_mat, n_iter=1000):
        assert adj_mat.shape[0] == adj_mat.shape[1]
        n_node = adj_mat.shape[0]

        # model
        gamma = Dirichlet(concentration=tf.ones([self.n_cluster]))
        Pi = Beta(concentration0=tf.ones([self.n_cluster, self.n_cluster]),
                  concentration1=tf.ones([self.n_cluster, self.n_cluster]))
        Z = Multinomial(total_count=1., probs=gamma, sample_shape=n_node)
        X = Bernoulli(probs=tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))

        # inference (point estimation)
        qgamma = PointMass(params=tf.nn.softmax(
            tf.Variable(tf.random_normal([self.n_cluster]))))
        qPi = PointMass(params=tf.nn.sigmoid(
            tf.Variable(tf.random_normal([self.n_cluster, self.n_cluster]))))
        qZ = PointMass(params=tf.nn.softmax(
            tf.Variable(tf.random_normal([n_node, self.n_cluster]))))

        # map estimation
        inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: adj_mat})
        inference.initialize(n_iter=n_iter)

        tf.global_variables_initializer().run()

        for _ in range(inference.n_iter):
            info_dict = inference.update()
            inference.print_progress(info_dict)
        inference.finalize()
        return qZ.mean().eval().argmax(axis=1)
예제 #3
0
def latent_space_model_example():
	x_train = celegans('~/data')

	#--------------------
	N = x_train.shape[0]  # Number of data points.
	K = 3  # Latent dimensionality.

	z = Normal(loc=tf.zeros([N, K]), scale=tf.ones([N, K]))

	# Calculate N x N distance matrix.
	# 1. Create a vector, [||z_1||^2, ||z_2||^2, ..., ||z_N||^2], and tile it to create N identical rows.
	xp = tf.tile(tf.reduce_sum(tf.pow(z, 2), 1, keep_dims=True), [1, N])
	# 2. Create a N x N matrix where entry (i, j) is ||z_i||^2 + ||z_j||^2 - 2 z_i^T z_j.
	xp = xp + tf.transpose(xp) - 2 * tf.matmul(z, z, transpose_b=True)
	# 3. Invert the pairwise distances and make rate along diagonals to be close to zero.
	xp = 1.0 / tf.sqrt(xp + tf.diag(tf.zeros(N) + 1e3))

	x = Poisson(rate=xp)

	#--------------------
	if True:
		# Maximum a posteriori (MAP) estimation is simple in Edward.
		inference = ed.MAP([z], data={x: x_train})
	else:
		# One could run variational inference.
		qz = Normal(loc=tf.get_variable('qz/loc', [N * K]), scale=tf.nn.softplus(tf.get_variable('qz/scale', [N * K])))
		inference = ed.KLqp({z: qz}, data={x: x_train})
	def main():
		latent_space_model_example()

	inference.run(n_iter=2500)
예제 #4
0
  def test_ar_mle(self):
    # set up test data: a random walk
    T = 100
    z_true = np.zeros(T)
    r = 0.95
    sig = 0.01
    eta = 0.01
    for t in range(1, 100):
      z_true[t] = r * z_true[t - 1] + sig * np.random.randn()

    x_data = (z_true + eta * np.random.randn(T)).astype(np.float32)

    # use scipy to find max likelihood
    def cost(z):
      initial = z[0]**2 / sig**2
      ar = np.sum((z[1:] - r * z[:-1])**2) / sig**2
      data = np.sum((x_data - z)**2) / eta**2
      return initial + ar + data

    mle = minimize(cost, np.zeros(T)).x

    with self.test_session() as sess:
      z = AutoRegressive(T, r, sig)
      x = Normal(loc=z, scale=eta)

      qz = PointMass(params=tf.Variable(tf.zeros(T)))
      inference = ed.MAP({z: qz}, data={x: x_data})
      inference.run(n_iter=500)

      self.assertAllClose(qz.eval(), mle, rtol=1e-3, atol=1e-3)
예제 #5
0
    def train(self, n_iter=1000):
        D = len(self.team_num_map.keys())
        N = self.xs.shape[0]
        with tf.name_scope('model'):
            self.X = tf.placeholder(tf.float32, [N, D])
            self.w1 = Normal(loc=tf.zeros(D), scale=tf.ones(D))
            # self.b1 = Normal(loc=tf.zeros(1), scale=tf.ones(1))
            self.y1 = Poisson(rate=tf.exp(ed.dot(self.X, self.w1)))

        with tf.name_scope('posterior'):
            if self.inf_type == 'Var':
                self.qw1 = Normal(loc=tf.get_variable("qw1_ll/loc", [D]),
                                  scale=tf.nn.softplus(
                                      tf.get_variable("qw1_ll/scale", [D])))
                # self.qb1 = Normal(loc=tf.get_variable("qb1/loc", [1]),
                #                  scale=tf.nn.softplus(tf.get_variable("qb1/scale",
                #                                                        [1])))
            elif self.inf_type == 'MAP':
                self.qw1 = PointMass(
                    Normal(loc=tf.get_variable("qw1_ll/loc", [D]),
                           scale=tf.nn.softplus(
                               tf.get_variable("qw1_ll/scale", [D]))))

        if self.inf_type == 'Var':
            inference = ed.ReparameterizationKLqp({self.w1: self.qw1},
                                                  data={
                                                      self.X: self.xs,
                                                      self.y1: self.ys
                                                  })
        elif self.inf_type == 'MAP':
            inference = ed.MAP({self.w1: self.qw1},
                               data={
                                   self.X: self.xs,
                                   self.y1: self.ys
                               })
        inference.initialize(optimizer=tf.train.AdamOptimizer(
            learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08),
                             n_iter=n_iter)
        tf.global_variables_initializer().run()

        self.loss = np.empty(n_iter, dtype=np.float32)
        for i in range(n_iter):
            info_dict = inference.update()
            self.loss[i] = info_dict["loss"]
            inference.print_progress(info_dict)

        self._trained = True

        graph = tf.get_default_graph()
        self.team_skill = graph.get_tensor_by_name("qw1_ll/loc:0").eval()
        self.perf_variance = graph.get_tensor_by_name("qw1_ll/scale:0").eval()
        # self.bias = (graph.get_tensor_by_name("qb1/loc:0").eval(),
        #              graph.get_tensor_by_name("qb2/loc:0").eval())

        self.y_post = ed.copy(self.y1, {self.w1: self.qw1})
        return
예제 #6
0
    def test_normalnormal_run(self):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(mu=0.0, sigma=1.0)
            x = Normal(mu=tf.ones(50) * mu, sigma=1.0)

            qmu = PointMass(params=tf.Variable(1.0))

            # analytic solution: N(mu=0.0, sigma=\sqrt{1/51}=0.140)
            inference = ed.MAP({mu: qmu}, data={x: x_data})
            inference.run(n_iter=1000)

            self.assertAllClose(qmu.mean().eval(), 0)
    def test_export_meta_graph(self):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(loc=0.0, scale=1.0)
            x = Normal(loc=mu, scale=1.0, sample_shape=50)

            qmu = PointMass(params=tf.Variable(1.0))

            inference = ed.MAP({mu: qmu}, data={x: x_data})
            inference.run(n_iter=10)

            saver = tf.train.Saver()
            saver.export_meta_graph("/tmp/test_saver.meta")
예제 #8
0
  def test_normalnormal_regularization(self):
    with self.test_session() as sess:
      x_data = np.array([5.0] * 50, dtype=np.float32)

      mu = Normal(loc=0.0, scale=1.0)
      x = Normal(loc=mu, scale=1.0, sample_shape=50)

      qmu = PointMass(params=tf.Variable(1.0))

      inference = ed.MAP({mu: qmu}, data={x: x_data})
      inference.run(n_iter=1000)
      mu_val = qmu.mean().eval()

      # regularized solution
      regularizer = tf.contrib.layers.l2_regularizer(scale=1.0)
      mu_reg = tf.get_variable("mu_reg", shape=[],
                               regularizer=regularizer)
      x_reg = Normal(loc=mu_reg, scale=1.0, sample_shape=50)

      inference_reg = ed.MAP(None, data={x_reg: x_data})
      inference_reg.run(n_iter=1000)

      mu_reg_val = mu_reg.eval()
      self.assertAllClose(mu_val, mu_reg_val)
    def test_map_default(self):
        with self.test_session() as sess:
            x = Gamma(2.0, 0.5)

            inference = ed.MAP([x])
            inference.initialize(auto_transform=True, n_iter=500)
            tf.global_variables_initializer().run()
            for _ in range(inference.n_iter):
                info_dict = inference.update()

            # Check approximation on constrained space has same mode as
            # target distribution.
            qx = inference.latent_vars[x]
            stats = sess.run([x.mode(), qx])
            self.assertAllClose(stats[0], stats[1], rtol=1e-5, atol=1e-5)
    def test_map_custom(self):
        with self.test_session() as sess:
            x = Gamma(2.0, 0.5)
            qx = PointMass(tf.nn.softplus(tf.Variable(0.5)))

            inference = ed.MAP({x: qx})
            inference.initialize(auto_transform=True, n_iter=500)
            tf.global_variables_initializer().run()
            for _ in range(inference.n_iter):
                info_dict = inference.update()

            # Check approximation on constrained space has same mode as
            # target distribution.
            stats = sess.run([x.mode(), qx])
            self.assertAllClose(stats[0], stats[1], rtol=1e-5, atol=1e-5)
예제 #11
0
    def fit(self, X, y, n_epoch, **kwargs):
        """
        build and train model
        """
        # define the full model
        self._build_model(X, y)

        # setup inference procedure
        self.inference = ed.MAP(data={self.mixtures: self.y_ph})
        self.inference.initialize(var_list=tf.trainable_variables(), n_iter=n_epoch)
        tf.global_variables_initializer().run()

        # train the model
        self.partial_fit(X, y, n_epoch=n_epoch, **kwargs)
        self.fitted = True
    def test_save(self):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(loc=0.0, scale=1.0)
            x = Normal(loc=mu, scale=1.0, sample_shape=50)

            with tf.variable_scope("posterior"):
                qmu = PointMass(params=tf.Variable(1.0))

            inference = ed.MAP({mu: qmu}, data={x: x_data})
            inference.run(n_iter=10)

            saver = tf.train.Saver()
            saver.save(sess, "/tmp/test_saver")
예제 #13
0
    def _fit(self, X, n_iter=1000, n_print=100, **kwargs):
        #         X  = np.asarray(X,np.float32)
        K = self.K
        N = len(X)

        self.cat = edm.Categorical(probs=self.prior.weight, sample_shape=N)
        self.emission = edm.Mixture(cat=self.cat,
                                    components=self.components,
                                    sample_shape=N)

        print('hiModel')
        self.dDict = {self.emission: X}
        self.inference = ed.MAP(self.paramDict, self.dDict)
        self.inference.run(n_iter=n_iter, n_print=n_print, *kwargs)
        return self
예제 #14
0
def mmsb(N, K, data):
    # sparsity
    rho = 0.3
    # MODEL
    # probability of belonging to each of K blocks for each node
    gamma = Dirichlet(concentration=tf.ones([K]))
    # block connectivity
    Pi = Beta(concentration0=tf.ones([K, K]), concentration1=tf.ones([K, K]))
    # probability of belonging to each of K blocks for all nodes
    Z = Multinomial(total_count=1.0, probs=gamma, sample_shape=N)
    # adjacency
    X = Bernoulli(probs=(1 - rho) *
                  tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))

    # INFERENCE (EM algorithm)
    qgamma = PointMass(
        params=tf.nn.softmax(tf.Variable(tf.random_normal([K]))))
    qPi = PointMass(
        params=tf.nn.sigmoid(tf.Variable(tf.random_normal([K, K]))))
    qZ = PointMass(params=tf.nn.softmax(tf.Variable(tf.random_normal([N, K]))))

    # qgamma = Normal(loc=tf.get_variable("qgamma/loc", [K]),
    #                scale=tf.nn.softplus(
    #                        tf.get_variable("qgamma/scale", [K])))
    # qPi = Normal(loc=tf.get_variable("qPi/loc", [K, K]),
    #                scale=tf.nn.softplus(
    #                        tf.get_variable("qPi/scale", [K, K])))
    # qZ = Normal(loc=tf.get_variable("qZ/loc", [N, K]),
    #                scale=tf.nn.softplus(
    #                        tf.get_variable("qZ/scale", [N, K])))

    # inference = ed.KLqp({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: data})
    inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: data})

    # inference.run()
    n_iter = 6000
    inference.initialize(optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
                         n_iter=n_iter)

    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)

    inference.finalize()
    print('qgamma after: ', qgamma.mean().eval())
    return qZ.mean().eval(), qPi.eval()
    def test_restore(self):
        with self.test_session() as sess:
            x_data = np.array([0.0] * 50, dtype=np.float32)

            mu = Normal(loc=0.0, scale=1.0)
            x = Normal(loc=mu, scale=1.0, sample_shape=50)

            with tf.variable_scope("posterior"):
                qmu = PointMass(params=tf.Variable(1.0))

            inference = ed.MAP({mu: qmu}, data={x: x_data})

            saver = tf.train.Saver()
            saver.restore(sess, "tests/data/test_saver")
            qmu_variable = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                             scope="posterior")[0]
            self.assertNotEqual(qmu_variable.eval(), 1.0)
예제 #16
0
 def define_inference(self, X, n_iter, n_samples, optimizer=None):
     optimizer = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=0.01)
     scale = {
         self.model.X: n_samples,
         self.model.Beta: n_samples,
         self.model.L: n_samples
     }
     if self.model.dispersion:
         self.vi = ed.ReparameterizationKLqp(
             {
                 self.model.Beta: self.qBeta,
                 self.model.L: self.qL
             },
             data={self.model.X: self.model.X_data})
         self.map = ed.MAP(
             data={
                 self.model.X: self.model.X_data,
                 self.model.Beta: self.qBeta,
                 self.model.L: self.qL
             })
         self.map.initialize(n_iter=n_iter,
                             var_list=[self.model.Theta],
                             scale=scale)
     else:
         self.vi = ed.ReparameterizationKLqp(
             {
                 self.model.Beta: self.qBeta,
                 self.model.L: self.qL
             },
             data={self.model.X: self.model.X_data})
     train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
     vi_vars = [v for v in train_vars if v.name != "Theta:0"]
     self.vi.initialize(var_list=vi_vars,
                        n_iter=n_iter,
                        optimizer=optimizer,
                        scale=scale)
예제 #17
0
def main(_):
    ed.set_seed(42)

    # DATA
    X_data, Z_true = karate("~/data")
    N = X_data.shape[0]  # number of vertices
    K = 2  # number of clusters

    # MODEL
    gamma = Dirichlet(concentration=tf.ones([K]))
    Pi = Beta(concentration0=tf.ones([K, K]), concentration1=tf.ones([K, K]))
    Z = Multinomial(total_count=1.0, probs=gamma, sample_shape=N)
    X = Bernoulli(probs=tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))

    # INFERENCE (EM algorithm)
    qgamma = PointMass(tf.nn.softmax(tf.get_variable("qgamma/params", [K])))
    qPi = PointMass(tf.nn.sigmoid(tf.get_variable("qPi/params", [K, K])))
    qZ = PointMass(tf.nn.softmax(tf.get_variable("qZ/params", [N, K])))

    inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: X_data})
    inference.initialize(n_iter=250)

    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        info_dict = inference.update()
        inference.print_progress(info_dict)

    # CRITICISM
    Z_pred = qZ.mean().eval().argmax(axis=1)
    print("Result (label flip can happen):")
    print("Predicted")
    print(Z_pred)
    print("True")
    print(Z_true)
    print("Adjusted Rand Index =", adjusted_rand_score(Z_pred, Z_true))
예제 #18
0
X_train, X_test, y_train, y_test = build_toy_dataset(N=40000)
print("Size of features in training data: {:s}".format(X_train.shape))
print("Size of output in training data: {:s}".format(y_train.shape))
print("Size of features in test data: {:s}".format(X_test.shape))
print("Size of output in test data: {:s}".format(y_test.shape))
sns.regplot(X_train, y_train, fit_reg=False)
plt.show()

X = ed.placeholder(tf.float32, shape=(None, 1))
y = ed.placeholder(tf.float32, shape=(None, 1))
data = {'X': X, 'y': y}

model = MixtureDensityNetwork(20)

inference = ed.MAP([], data, model)
sess = ed.get_session()  # Start TF session
K.set_session(sess)  # Pass session info to Keras
inference.initialize()

NEPOCH = 1000
train_loss = np.zeros(NEPOCH)
test_loss = np.zeros(NEPOCH)
for i in range(NEPOCH):
    info_dict = inference.update(feed_dict={X: X_train, y: y_train})
    train_loss[i] = info_dict['loss']
    test_loss[i] = sess.run(inference.loss, feed_dict={X: X_test, y: y_test})

pred_weights, pred_means, pred_std = \
    sess.run([model.pi, model.mus, model.sigmas], feed_dict={X: X_test})
예제 #19
0
파일: testEdward.py 프로젝트: tsizemo2/APT
    Normal(loc=loc, scale=scale) for loc, scale in zip(
        tf.unstack(tf.transpose(locs)), tf.unstack(tf.transpose(scales)))
]
y = Mixture(cat=cat, components=components, value=tf.zeros_like(y_ph))
# Note: A bug exists in Mixture which prevents samples from it to have
# a shape of [None]. For now fix it using the value argument, as
# sampling is not necessary for MAP estimation anyways.

#

# There are no latent variables to infer. Thus inference is concerned
# with only training model parameters, which are baked into how we
# specify the neural networks.
n_epoch = 1000

inference = ed.MAP(data={y: y_ph})
inference.initialize(var_list=tf.trainable_variables(), n_iter=n_epoch)

sess = ed.get_session()
tf.global_variables_initializer().run()

train_loss = np.zeros(n_epoch)
test_loss = np.zeros(n_epoch)
for i in range(n_epoch):
    info_dict = inference.update(feed_dict={X_ph: X_train, y_ph: y_train})
    train_loss[i] = info_dict['loss']
    test_loss[i] = sess.run(inference.loss,
                            feed_dict={
                                X_ph: X_test,
                                y_ph: y_test
                            })
예제 #20
0
            if not (self.interaction == "additive"
                    or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior


ed.set_seed(42)
x_train = np.load('data/celegans_brain.npy')

K = 3
model = MatrixFactorization(K, n_rows=x_train.shape[0])

qz = PointMass(
    params=tf.nn.softplus(tf.Variable(tf.random_normal([model.n_vars]))))

data = {'x': x_train}
inference = ed.MAP({'z': qz}, data, model)
# Alternatively, run
# qz_mu = tf.Variable(tf.random_normal([model.n_vars]))
# qz_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([model.n_vars])))
# qz = Normal(mu=qz_mu, sigma=qz_sigma)
# inference = ed.MFVI({'z': qz}, data, model)

inference.run(n_iter=2500)
#!/usr/bin/env python
"""Generate `test_saver`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Normal, PointMass

x_data = np.array([0.0] * 50, dtype=np.float32)

mu = Normal(loc=0.0, scale=1.0)
x = Normal(loc=mu, scale=1.0, sample_shape=50)

with tf.variable_scope("posterior"):
  qmu = PointMass(params=tf.Variable(1.0))

inference = ed.MAP({mu: qmu}, data={x: x_data})
inference.run(n_iter=10)

sess = ed.get_session()
saver = tf.train.Saver()
saver.save(sess, "test_saver")
def bayes_mult_cmd(table_file, metadata_file, formula, output_file):

    #metadata = _type_cast_to_float(metadata.copy())
    metadata = pd.read_table(metadata_file, index_col=0)
    G_data = dmatrix(formula, metadata, return_type='dataframe')
    table = load_table(table_file)

    # basic filtering parameters
    soil_filter = lambda val, id_, md: id_ in metadata.index
    read_filter = lambda val, id_, md: np.sum(val) > 10
    #sparse_filter = lambda val, id_, md: np.mean(val > 0) > 0.1
    sample_filter = lambda val, id_, md: np.sum(val) > 1000

    table = table.filter(soil_filter, axis='sample')
    table = table.filter(sample_filter, axis='sample')
    table = table.filter(read_filter, axis='observation')
    #table = table.filter(sparse_filter, axis='observation')
    print(table.shape)
    y_data = pd.DataFrame(np.array(table.matrix_data.todense()).T,
                          index=table.ids(axis='sample'),
                          columns=table.ids(axis='observation'))

    y_data, G_data = y_data.align(G_data, axis=0, join='inner')

    psi = _gram_schmidt_basis(y_data.shape[1])
    G_data = G_data.values
    y_data = y_data.values
    N, D = y_data.shape
    p = G_data.shape[1] # number of covariates
    r = G_data.shape[1] # rank of covariance matrix

    psi = tf.convert_to_tensor(psi, dtype=tf.float32)
    n = tf.convert_to_tensor(y_data.sum(axis=1), dtype=tf.float32)

    # hack to get multinomial working
    def _sample_n(self, n=1, seed=None):
        # define Python function which returns samples as a Numpy array
        def np_sample(p, n):
            return multinomial.rvs(p=p, n=n, random_state=seed).astype(np.float32)

        # wrap python function as tensorflow op
        val = tf.py_func(np_sample, [self.probs, n], [tf.float32])[0]
        # set shape from unknown shape
        batch_event_shape = self.batch_shape.concatenate(self.event_shape)
        shape = tf.concat(
            [tf.expand_dims(n, 0), tf.convert_to_tensor(batch_event_shape)], 0)
        val = tf.reshape(val, shape)
        return val
    Multinomial._sample_n = _sample_n


    # dummy variable for gradient
    G = tf.placeholder(tf.float32, [N, p])

    b = Exponential(rate=1.0)
    B = Normal(loc=tf.zeros([p, D-1]), 
               scale=tf.ones([p, D-1]) )

    # Factorization of covariance matrix
    # http://edwardlib.org/tutorials/klqp
    l = Exponential(rate=1.0)
    L = Normal(loc=tf.zeros([p, D-1]), 
               scale=tf.ones([p, D-1]) )
    z = Normal(loc=tf.zeros([N, p]), 
               scale=tf.ones([N, p]))

    # Cholesky trick to get multivariate normal
    v = tf.matmul(G, B) + tf.matmul(z, L)

    # get clr transformed values
    eta = tf.matmul(v, psi)

    Y = Multinomial(total_count=n, logits=eta)


    T = 100000  # the number of mixin samples from MCMC sampling

    qb = PointMass(params=tf.Variable(tf.random_normal([])))
    qB = PointMass(params=tf.Variable(tf.random_normal([p, D-1])))
    qz = Empirical(params=tf.Variable(tf.random_normal([T, N, p])))
    ql = PointMass(params=tf.Variable(tf.random_normal([])))
    qL = PointMass(params=tf.Variable(tf.random_normal([p, D-1])))

    # Imputation
    inference_z = ed.SGLD(
        {z: qz}, 
        data={G: G_data, Y: y_data, B: qB, L: qL}
    )

    # Maximization
    inference_BL = ed.MAP(
        {B: qB, L: qL, b: qb, l: ql}, 
        data={G: G_data, Y: y_data, z: qz}
    )

    inference_z.initialize(step_size=1e-10)
    inference_BL.initialize(n_iter=1000)


    sess = ed.get_session()
    saver = tf.train.Saver()

    tf.global_variables_initializer().run()
    for i in range(inference_BL.n_iter):
        inference_z.update()  # e-step
        # will need to compute the expectation of z

        info_dict = inference_BL.update() # m-step
        inference_BL.print_progress(info_dict)

    save_path = saver.save(sess, output_file)
    print("Model saved in file: %s" % save_path)
    pickle.dump({'qB': sess.run(qB.mean()),
                 'qL': sess.run(qL.mean()),
                 'qz': sess.run(qz.mean())},
                open(output_file + '.params.pickle', 'wb')
    )
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp))
        elif self.like == 'Poisson':
            if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior


def load_celegans_brain():
    x = np.load('data/celegans_brain.npy')
    N = x.shape[0]
    return {'x': x}, N


ed.set_seed(42)
data, N = load_celegans_brain()
model = LatentSpaceModel(N, K=3, like='Poisson', prior='Gaussian')

inference = ed.MAP(model, data)
# Alternatively, run
# variational = Variational()
# variational.add(Normal(model.n_vars))
# inference = ed.MFVI(model, variational,data)

inference.run(n_iter=5000, n_print=500)
예제 #24
0
            qgamma = PointMass(
                params=tf.nn.softplus(tf.gather(qgamma_variables[0], idx_ph)))


            qbeta0_variables = [tf.Variable(tf.random_uniform([1, 1])), \
                           tf.Variable(tf.nn.softplus(tf.random_uniform([1, 1])))]

            qbeta0 = PointMass(params=tf.nn.softplus(qbeta0_variables[0]))

            x_ph = tf.placeholder(tf.float32, [M, N])

            optimizer = tf.train.RMSPropOptimizer(5e-5)

            scale_factor = float(D) / M

            inference_U = ed.MAP({U: qU}, \
                data={x: x_ph, V: qV, gamma: qgamma, beta0: qbeta0})
            inference_V = ed.MAP({V: qV}, \
                data={x: x_ph, U: qU, gamma: qgamma, beta0: qbeta0})
            inference_gamma = ed.MAP({gamma: qgamma}, \
                data={x: x_ph, V: qV, U: qU, beta0: qbeta0})
            inference_beta0 = ed.MAP({beta0: qbeta0}, \
                data={x: x_ph, V: qV, U: qU, gamma: qgamma})

            inference_U.initialize(scale={
                x: scale_factor,
                U: scale_factor,
                gamma: scale_factor
            },
                                   var_list=qU_variables,
                                   optimizer=optimizer)
            inference_V.initialize(scale={
예제 #25
0
            qU = PointMass(params=tf.gather(qU_variables[0], idx_ph))


            qV_variables = [tf.Variable(tf.random_uniform([N, K])), \
                           tf.Variable(tf.random_uniform([N, K]))]

            qV = PointMass(params=qV_variables[0])

            x_ph = tf.placeholder(tf.float32, [M, N])

            optimizer = tf.train.RMSPropOptimizer(5e-5)

            scale_factor = float(D) / M

            inference_U = ed.MAP({U: qU}, \
                data={x: x_ph, V: qV})
            inference_V = ed.MAP({V: qV}, \
                data={x: x_ph, U: qU})

            inference_U.initialize(scale={
                x: scale_factor,
                U: scale_factor
            },
                                   var_list=qU_variables,
                                   optimizer=optimizer)
            inference_V.initialize(scale={
                x: scale_factor,
                U: scale_factor
            },
                                   var_list=qV_variables,
                                   n_iter=n_iter,
예제 #26
0
    return log_prior + log_lik


def build_toy_dataset(N):
  pi = np.array([0.4, 0.6])
  mus = [[1, 1], [-1, -1]]
  stds = [[0.1, 0.1], [0.1, 0.1]]
  x = np.zeros((N, 2), dtype=np.float32)
  for n in range(N):
    k = np.argmax(np.random.multinomial(1, pi))
    x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))

  return x


ed.set_seed(42)
x_train = build_toy_dataset(500)

K = 2
D = 2
model = MixtureGaussian(K, D)

qpi = PointMass(params=ed.to_simplex(tf.Variable(tf.random_normal([K - 1]))))
qmu = PointMass(params=tf.Variable(tf.random_normal([K * D])))
qsigma = PointMass(params=tf.exp(tf.Variable(tf.random_normal([K * D]))))

data = {'x': x_train}
inference = ed.MAP({'pi': qpi, 'mu': qmu, 'sigma': qsigma}, data, model)
inference.run(n_iter=500, n_minibatch=10)
예제 #27
0
#!/usr/bin/env python
"""A simple coin flipping example. Inspired by Stan's toy example.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import edward as ed
import numpy as np
import tensorflow as tf

from edward.models import Bernoulli, Beta, PointMass

ed.set_seed(42)

# DATA
x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])

# MODEL
p = Beta(a=1.0, b=1.0)
x = Bernoulli(p=tf.ones(10) * p)

# INFERENCE
qp_params = tf.nn.sigmoid(tf.Variable(tf.random_normal([])))
qp = PointMass(params=qp_params)

data = {x: x_data}
inference = ed.MAP({p: qp}, data)
inference.run(n_iter=50)
Mu = expand_scalar(0, (N, N, 1))

for n in range(N):
    for m in range(N):
        W[n, m] = npr.multivariate_normal(Mu[n, m], Sig[n, m])

# Initiaize the test model
L_estimate = Normal(loc=tf.zeros([N, dim]), scale=tf.ones([N, dim]))
xp = tf.tile(tf.reduce_sum(tf.pow(L_estimate, 2), 1, keep_dims=True), [1, N])
xp = xp + tf.transpose(xp) - 2 * tf.matmul(
    L_estimate, L_estimate, transpose_b=True)
xp = tf.exp(-xp / 2)
x = Normal(loc=tf.zeros([N, N]), scale=xp)

# Inference using varitional inference
# qL_estimate = Normal(loc=tf.Variable(tf.random_normal([N,dim])),
#                 scale=tf.nn.softplus(tf.Variable(tf.random_normal([N,dim]))))
#
# inference = ed.KLqp(latent_vars={L_estimate: qL_estimate}, data={x: W})
#
# inference.run(n_iter=100)
# L_estimate_samples = sess.run(qL_estimate)

# Inference using MAP
inference = ed.MAP([L_estimate], data={x: W})
inference.run(n_iter=1000)
mean = sess.run(L_estimate)

plt.scatter(mean[:, 0], mean[:, 1], color='blue')
plt.scatter(L[:, 0], L[:, 1], color='red')
x = [0] * T
for n in range(p):
    x[n] = Normal(loc=mu, scale=10.0)  # fat prior on x
for n in range(p, T):
    mu_ = mu
    for j in range(p):
        mu_ += beta[j] * x[n - j - 1]
    x[n] = Normal(loc=mu_, scale=noise_proc)

print("setting up distributions")
qmu = PointMass(params=tf.Variable(0.))
qbeta = [PointMass(params=tf.Variable(0.)) for i in range(p)]
print("constructing inference object")
vdict = {mu: qmu}
vdict.update({b: qb for b, qb in zip(beta, qbeta)})
inference = ed.MAP(vdict, data={xt: xt_true for xt, xt_true in zip(x, x_true)})
print("running inference")
inference.run()

print("parameter estimates:")
print("beta: ", [qb.value().eval() for qb in qbeta])
print("mu: ", qmu.value().eval())

print("setting up variational distributions")
qmu = Normal(loc=tf.Variable(0.), scale=tf.nn.softplus(tf.Variable(0.)))
qbeta = [
    Normal(loc=tf.Variable(0.), scale=tf.nn.softplus(tf.Variable(0.)))
    for i in range(p)
]
print("constructing inference object")
vdict = {mu: qmu}
예제 #30
0
            log_lik = tf.reduce_sum(norm.logpdf(xs['x'], xp, 1.0))
        elif self.like == 'Poisson':
            if not (self.dist == 'euclidean' or self.prior == "Lognormal"):
                raise NotImplementedError(
                    "Rate of Poisson has to be nonnegatve.")

            log_lik = tf.reduce_sum(poisson.logpmf(xs['x'], xp))
        else:
            raise NotImplementedError("likelihood not available.")

        return log_lik + log_prior


ed.set_seed(42)
x_train = np.load('data/celegans_brain.npy')

model = LatentSpaceModel(N=x_train.shape[0],
                         K=3,
                         like='Poisson',
                         prior='Gaussian')

data = {'x': x_train}
inference = ed.MAP(['z'], data, model)
# Alternatively, run
# qz_mu = tf.Variable(tf.random_normal([model.n_vars]))
# qz_sigma = tf.nn.softplus(tf.Variable(tf.random_normal([model.n_vars])))
# qz = Normal(mu=qz_mu, sigma=qz_sigma)
# inference = ed.KLqp({'z': qz}, data, model)

inference.run(n_iter=2500)