def build_update(self):
    """Draw sample from proposal conditional on last sample. Then
    accept or reject the sample based on the ratio,

    $\\text{ratio} =
          \log p(x, z^{\\text{new}}) - \log p(x, z^{\\text{old}}) +
          \log g(z^{\\text{new}} \mid z^{\\text{old}}) -
          \log g(z^{\\text{old}} \mid z^{\\text{new}})$

    #### Notes

    The updates assume each Empirical random variable is directly
    parameterized by `tf.Variable`s.
    """
    old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
                  for z, qz in six.iteritems(self.latent_vars)}
    old_sample = OrderedDict(old_sample)

    # Form dictionary in order to replace conditioning on prior or
    # observed variable with conditioning on a specific value.
    dict_swap = {}
    for x, qx in six.iteritems(self.data):
      if isinstance(x, RandomVariable):
        if isinstance(qx, RandomVariable):
          qx_copy = copy(qx, scope='conditional')
          dict_swap[x] = qx_copy.value()
        else:
          dict_swap[x] = qx

    dict_swap_old = dict_swap.copy()
    dict_swap_old.update(old_sample)
    base_scope = tf.get_default_graph().unique_name("inference") + '/'
    scope_old = base_scope + 'old'
    scope_new = base_scope + 'new'

    # Draw proposed sample and calculate acceptance ratio.
    new_sample = old_sample.copy()  # copy to ensure same order
    ratio = 0.0
    for z, proposal_z in six.iteritems(self.proposal_vars):
      # Build proposal g(znew | zold).
      proposal_znew = copy(proposal_z, dict_swap_old, scope=scope_old)
      # Sample znew ~ g(znew | zold).
      new_sample[z] = proposal_znew.value()
      # Increment ratio.
      ratio += tf.reduce_sum(proposal_znew.log_prob(new_sample[z]))

    dict_swap_new = dict_swap.copy()
    dict_swap_new.update(new_sample)

    for z, proposal_z in six.iteritems(self.proposal_vars):
      # Build proposal g(zold | znew).
      proposal_zold = copy(proposal_z, dict_swap_new, scope=scope_new)
      # Increment ratio.
      ratio -= tf.reduce_sum(proposal_zold.log_prob(dict_swap_old[z]))

    for z in six.iterkeys(self.latent_vars):
      # Build priors p(znew) and p(zold).
      znew = copy(z, dict_swap_new, scope=scope_new)
      zold = copy(z, dict_swap_old, scope=scope_old)
      # Increment ratio.
      ratio += tf.reduce_sum(znew.log_prob(dict_swap_new[z]))
      ratio -= tf.reduce_sum(zold.log_prob(dict_swap_old[z]))

    for x in six.iterkeys(self.data):
      if isinstance(x, RandomVariable):
        # Build likelihoods p(x | znew) and p(x | zold).
        x_znew = copy(x, dict_swap_new, scope=scope_new)
        x_zold = copy(x, dict_swap_old, scope=scope_old)
        # Increment ratio.
        ratio += tf.reduce_sum(x_znew.log_prob(dict_swap[x]))
        ratio -= tf.reduce_sum(x_zold.log_prob(dict_swap[x]))

    # Accept or reject sample.
    u = Uniform().sample()
    accept = tf.log(u) < ratio
    sample_values = tf.cond(accept, lambda: list(six.itervalues(new_sample)),
                            lambda: list(six.itervalues(old_sample)))
    if not isinstance(sample_values, list):
      # `tf.cond` returns tf.Tensor if output is a list of size 1.
      sample_values = [sample_values]

    sample = {z: sample_value for z, sample_value in
              zip(six.iterkeys(new_sample), sample_values)}

    # Update Empirical random variables.
    assign_ops = []
    for z, qz in six.iteritems(self.latent_vars):
      variable = qz.get_variables()[0]
      assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

    # Increment n_accept (if accepted).
    assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
    return tf.group(*assign_ops)
Exemple #2
0
#グループ差無しの線形回帰

df = pd.read_csv("data-salary-2.txt")
X_data = np.reshape(df.values[:, 0], (-1, 1))
Y_data = df.values[:, 1]

N = X_data.shape[0]  #データ数

##モデル記述
#Y[n] ~ Y_base[n] + \epsilon
#Y_base[n] ~ a + bX[n]
#\epsilon ~ N(0,\sigma_Y)
X = tf.placeholder(tf.float32, [N, 1])
a = Normal(mu=tf.zeros([1]), sigma=tf.ones([1]) * 500)
b = Normal(mu=tf.zeros([1]), sigma=tf.ones([1]) * 500)
sigma = Uniform(a=tf.ones([1]) * 1, b=tf.ones([1]) * 10)
Y = Normal(mu=ed.dot(X, b) + a, sigma=sigma)

#データ
data = {X: X_data, Y: Y_data}

##推論(変分ベイズ)
#qa = Normal(mu=tf.Variable(tf.random_normal([1])),\
#	sigma=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
#qb = Normal(mu=tf.Variable(tf.random_normal([1])),\
#	sigma=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
#qsigma = Normal(mu=tf.Variable(tf.random_normal([1])),\
#	sigma=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
#inference = ed.KLqp({a: qa, b: qb, sigma : qsigma}, data)
#inference.run(n_samples=1, n_iter=10000)
Exemple #3
0
    def build_update(self):
        """
    Draw sample from proposal conditional on last sample. Then accept
    or reject the sample based on the ratio,

    ratio = log p(x, znew) - log p(x, zold) +
            log g(znew | zold) - log g(zold | znew)
    """
        old_sample = {
            z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
            for z, qz in six.iteritems(self.latent_vars)
        }

        # Form dictionary in order to replace conditioning on prior or
        # observed variable with conditioning on a specific value.
        dict_swap = {}
        for x, qx in six.iteritems(self.data):
            if isinstance(x, RandomVariable):
                if isinstance(qx, RandomVariable):
                    qx_copy = copy(qx, scope='conditional')
                    dict_swap[x] = qx_copy.value()
                else:
                    dict_swap[x] = qx

        dict_swap_old = dict_swap.copy()
        dict_swap_old.update(old_sample)

        # Draw proposed sample and calculate acceptance ratio.
        new_sample = {}
        ratio = 0.0
        for z, proposal_z in six.iteritems(self.proposal_vars):
            # Build proposal g(znew | zold).
            proposal_znew = copy(proposal_z,
                                 dict_swap_old,
                                 scope='proposal_znew')
            # Sample znew ~ g(znew | zold).
            new_sample[z] = proposal_znew.value()
            # Increment ratio.
            ratio += tf.reduce_sum(proposal_znew.log_prob(new_sample[z]))

        dict_swap_new = dict_swap.copy()
        dict_swap_new.update(new_sample)

        for z, proposal_z in six.iteritems(self.proposal_vars):
            # Build proposal g(zold | znew).
            proposal_zold = copy(proposal_z,
                                 dict_swap_new,
                                 scope='proposal_zold')
            # Increment ratio.
            ratio -= tf.reduce_sum(proposal_zold.log_prob(dict_swap_old[z]))

        if self.model_wrapper is None:
            for z in six.iterkeys(self.latent_vars):
                # Build priors p(znew) and p(zold).
                znew = copy(z, dict_swap_new, scope='znew')
                zold = copy(z, dict_swap_old, scope='zold')
                # Increment ratio.
                ratio += tf.reduce_sum(znew.log_prob(dict_swap_new[z]))
                ratio -= tf.reduce_sum(zold.log_prob(dict_swap_old[z]))

            for x in six.iterkeys(self.data):
                if isinstance(x, RandomVariable):
                    # Build likelihoods p(x | znew) and p(x | zold).
                    x_znew = copy(x, dict_swap_new, scope='x_znew')
                    x_zold = copy(x, dict_swap_old, scope='x_zold')
                    # Increment ratio.
                    ratio += tf.reduce_sum(x_znew.log_prob(dict_swap[x]))
                    ratio -= tf.reduce_sum(x_zold.log_prob(dict_swap[x]))
        else:
            x = self.data
            ratio += self.model_wrapper.log_prob(x, new_sample)
            ratio -= self.model_wrapper.log_prob(x, old_sample)

        # Accept or reject sample.
        u = Uniform().sample()
        accept = tf.log(u) < ratio
        sample_values = tf.cond(accept,
                                lambda: list(six.itervalues(new_sample)),
                                lambda: list(six.itervalues(old_sample)))
        if not isinstance(sample_values, list):
            # ``tf.cond`` returns tf.Tensor if output is a list of size 1.
            sample_values = [sample_values]

        sample = {
            z: sample_value
            for z, sample_value in zip(six.iterkeys(new_sample), sample_values)
        }

        # Update Empirical random variables.
        assign_ops = []
        variables = {
            x.name: x
            for x in tf.get_default_graph().get_collection(
                tf.GraphKeys.VARIABLES)
        }
        for z, qz in six.iteritems(self.latent_vars):
            variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
            assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

        # Increment n_accept (if accepted).
        assign_ops.append(self.n_accept.assign_add(tf.select(accept, 1, 0)))
        return tf.group(*assign_ops)
Exemple #4
0
data_dir = "/tmp/data"
out_dir = "/tmp/out"
if not os.path.exists(out_dir):
    os.makedirs(out_dir)
M = 128  # batch size during training
d = 10  # latent dimension

# DATA. MNIST batches are fed at training time.
(x_train, _), (x_test, _) = mnist(data_dir)
x_train_generator = generator(x_train, M)
x_ph = tf.placeholder(tf.float32, [M, 784])

# MODEL
with tf.variable_scope("Gen"):
    eps = Uniform(low=tf.zeros([M, d]) - 1.0, high=tf.ones([M, d]))
    x = generative_network(eps)

# INFERENCE
optimizer = tf.train.RMSPropOptimizer(learning_rate=5e-5)
optimizer_d = tf.train.RMSPropOptimizer(learning_rate=5e-5)

inference = ed.GANInference(data={x: x_ph},
                            discriminator=discriminative_network)
inference.initialize(optimizer=optimizer,
                     optimizer_d=optimizer_d,
                     n_iter=15000,
                     n_print=1000)

sess = ed.get_session()
tf.global_variables_initializer().run()
import matplotlib as mpl
mpl.use("Agg") # force Matplotlib backend to Agg

# import edward and TensorFlow
import edward as ed
import tensorflow as tf
from edward.models import Normal, Uniform, Empirical

# import model and data
from createdata import *

# set the priors
cmin = -10. # lower range of uniform distribution on c
cmax = 10.  # upper range of uniform distribution on c
cp = Uniform(low=cmin, high=cmax)

mmu = 0.     # mean of Gaussian distribution on m
msigma = 10. # standard deviation of Gaussian distribution on m
mp = Normal(loc=mmu, scale=msigma)

# set the likelihood containing the model
y = Normal(loc=mp*x + cp, scale=sigma*tf.ones(len(data)))

# set number of samples
Nsamples = 2000 # final number of samples
Ntune = 2000    # number of tuning samples

# set parameters to infer
qm = Empirical(params=tf.Variable(tf.zeros(Nsamples+Ntune)))
qc = Empirical(params=tf.Variable(tf.zeros(Nsamples+Ntune)))
Exemple #6
0
DATA_DIR = "data/mnist"
IMG_DIR = "img"

if not os.path.exists(DATA_DIR):
    os.makedirs(DATA_DIR)
if not os.path.exists(IMG_DIR):
    os.makedirs(IMG_DIR)

# DATA. MNIST batches are fed at training time.
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x_ph = tf.placeholder(tf.float32, [M, 784])

# MODEL
with tf.variable_scope("Gen"):
    eps = Uniform(a=tf.zeros([M, d]) - 1.0, b=tf.ones([M, d]))
    x = generative_network(eps)

# INFERENCE
optimizer = tf.train.RMSPropOptimizer(learning_rate=5e-5)
optimizer_d = tf.train.RMSPropOptimizer(learning_rate=5e-5)

inference = ed.WGANInference(data={x: x_ph},
                             discriminator=discriminative_network)
inference.initialize(optimizer=optimizer,
                     optimizer_d=optimizer_d,
                     n_iter=15000,
                     n_print=1000)

sess = ed.get_session()
tf.global_variables_initializer().run()
Exemple #7
0
    def build_update(self):
        """Simulate Hamiltonian dynamics using a numerical integrator.
    Correct for the integrator's discretization error using an
    acceptance ratio.

    #### Notes

    The updates assume each Empirical random variable is directly
    parameterized by `tf.Variable`s.
    """
        old_sample = {
            z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
            for z, qz in six.iteritems(self.latent_vars)
        }
        old_sample = OrderedDict(old_sample)

        # Sample momentum.
        old_r_sample = OrderedDict()
        for z, qz in six.iteritems(self.latent_vars):
            event_shape = qz.event_shape
            normal = Normal(loc=tf.zeros(event_shape),
                            scale=tf.ones(event_shape))
            old_r_sample[z] = normal.sample()

        # Simulate Hamiltonian dynamics.
        new_sample, new_r_sample = leapfrog(old_sample, old_r_sample,
                                            self.step_size, self._log_joint,
                                            self.n_steps)

        # Calculate acceptance ratio.
        ratio = tf.reduce_sum([
            0.5 * tf.reduce_sum(tf.square(r))
            for r in six.itervalues(old_r_sample)
        ])
        ratio -= tf.reduce_sum([
            0.5 * tf.reduce_sum(tf.square(r))
            for r in six.itervalues(new_r_sample)
        ])
        ratio += self._log_joint(new_sample)
        ratio -= self._log_joint(old_sample)

        # Accept or reject sample.
        u = Uniform().sample()
        accept = tf.log(u) < ratio
        sample_values = tf.cond(accept,
                                lambda: list(six.itervalues(new_sample)),
                                lambda: list(six.itervalues(old_sample)))
        if not isinstance(sample_values, list):
            # `tf.cond` returns tf.Tensor if output is a list of size 1.
            sample_values = [sample_values]

        sample = {
            z: sample_value
            for z, sample_value in zip(six.iterkeys(new_sample), sample_values)
        }

        # Update Empirical random variables.
        assign_ops = []
        for z, qz in six.iteritems(self.latent_vars):
            variable = qz.get_variables()[0]
            assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))

        # Increment n_accept (if accepted).
        assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
        return tf.group(*assign_ops)
Exemple #8
0
from edward.models import Normal, Poisson, PointMass, Exponential, Uniform, Empirical

count_data = np.loadtxt("data/txtdata.csv")
n_count_data = len(count_data)

sess = tf.Session()

alpha_f = 1.0 / count_data.mean()

with tf.name_scope('model'):
    alpha = tf.Variable(alpha_f, name="alpha", dtype=tf.float32)

    # init
    lambda_1 = Exponential(alpha, name="lambda1")
    lambda_2 = Exponential(alpha, name="lambda2")
    tau = Uniform(low=0.0, high=float(n_count_data - 1), name="tau")
    idx = np.arange(n_count_data)
    lambda_ = tf.where(
        tau >= idx,
        tf.ones(shape=[
            n_count_data,
        ], dtype=tf.float32) * lambda_1,
        tf.ones(shape=[
            n_count_data,
        ], dtype=tf.float32) * lambda_2)

    # error
    z = Poisson(lambda_, value=tf.Variable(tf.ones(n_count_data)), name="poi")

# model
T = 5000  # number of posterior samples