コード例 #1
0
def build_model():
    """
    Build the model.
    :return: Tensors for the inputs, outputs, growths and shifts in each phase
    """

    # Initialize
    inputs = [0] * PHASES  # Number of blueberries in each phase at time t
    shifts = [
        0
    ] * PHASES  # Number of blueberries that will leave the the phase by time t + 1
    grow = [
        0
    ] * PHASES  # Number of blueberries that will arrive at phase by time t + 1
    outputs = [0] * PHASES  # Number of blueberries in each phase at time t + 1
    for i in range(PHASES):
        inputs[i] = edm.Normal(20., 7., sample_shape=[N])
        shift_mean = get_shift_dist(inputs[i], PHASE_SHIFT_DAYS[i],
                                    PHASE_SHIFT_DAYS[i] / 2.)
        shifts[i] = edm.Normal(shift_mean, PHASE_SHIFT_DAYS[i] / 2.)
        grow[i] = shifts[i -
                         1] if i > 0 else edm.Normal(20., 7., sample_shape=[N])
        outputs[i] = edm.Normal(
            inputs[i] - shifts[i] + grow[i],
            inputs[i].scale + shifts[i].scale + grow[i].scale)
    return inputs, shifts, grow, outputs
コード例 #2
0
def rebuild_model(graph):
    """
    Rebuild the model from a tensorflow graph
    :param graph: Tensorflow graph
    :return: Tensor that represents the outputs, Dictionary that contains the latent variables in the model, Placeholder for the inputs
    """
    # Create model
    inputs, shifts, grow, outputs = build_model()

    # populate with reloaded data
    q_grow = edm.Normal(
        loc=graph.get_tensor_by_name('posterior/Normal/loc:0'),
        scale=graph.get_tensor_by_name('posterior/Normal/scale:0'),
        sample_shape=[N])
    latent_vars = [0] * PHASES
    input_ph = [0] * PHASES
    for i in range(PHASES):
        input_ph[i] = tf.placeholder(tf.float32, shape=[N])
        latent_vars[i] = edm.Normal(
            loc=graph.get_tensor_by_name('posterior/Normal_' + str(i + 1) +
                                         '/loc:0'),
            scale=graph.get_tensor_by_name('posterior/Normal_' + str(i + 1) +
                                           '/scale:0'),
            sample_shape=[N])

    latent_var_dict = {grow[0]: q_grow}
    latent_var_dict.update(
        {key: value
         for key, value in zip(shifts, latent_vars)})

    return outputs, latent_var_dict, input_ph
コード例 #3
0
ファイル: VDQN.py プロジェクト: HarriBellThomas/VDQN
        def __model(self):
            with tf.variable_scope("prior", reuse=True):
                stateX = tf.placeholder(tf.float32, [None, self.__stateSpace],
                                        name="stateX")
                actionX = tf.placeholder(tf.int32, [None], name="actionX")
                selected_actionX = tf.one_hot(actionX,
                                              self.__actionSpace,
                                              dtype=tf.float32)

                activation = tf.nn.relu(
                    tf.matmul(stateX, self.__W[0]) + self.__b[0])
                layers = len(self.__W.keys())
                for i in range(1, layers - 1):
                    activation = tf.nn.relu(
                        tf.matmul(activation, self.__W[i]) + self.__b[i])
                activation = tf.matmul(
                    activation, self.__W[layers - 1]) + self.__b[layers - 1]

                chosenAction = tf.reduce_sum(tf.multiply(
                    activation, selected_actionX),
                                             axis=1)
                chosenActionDistribution = edm.Normal(loc=chosenAction,
                                                      scale=self.__sigma,
                                                      name="Y")

                self.__stateX = stateX
                self.__actionX = actionX
                self.__nextAction = chosenActionDistribution
コード例 #4
0
ファイル: VDQN.py プロジェクト: HarriBellThomas/VDQN
        def __posterior(self):
            with tf.variable_scope("posterior"):
                nnLayers = self.__nnLayers
                layerTransitionPairings = zip(nnLayers[:-1], nnLayers[1:])
                _index = 0
                sigmaRho = self.__sigmaRho or np.log(np.exp(0.017) - 1.0)

                # Posterior collections.
                self.__posterior_W, self.__posterior_b = {}, {}
                self.__posterior_W_mu, self.__posterior_b_mu = {}, {}
                self.__posterior_W_rho, self.__posterior_b_rho = {}, {}

                for _left, _right in layerTransitionPairings:
                    with tf.variable_scope("posterior_W{}".format(_index)):
                        _width = np.sqrt(3 / _left)
                        self.__posterior_W_mu[_index] = tf.Variable(
                            tf.random_uniform([_left, _right], -1 * _width,
                                              _width),
                            name="mean")
                        self.__posterior_W_rho[_index] = tf.Variable(
                            tf.random_uniform([_left, _right], sigmaRho,
                                              sigmaRho),
                            name="std",
                            trainable=True)
                        self.__posterior_W[_index] = edm.Normal(
                            loc=self.__posterior_W_mu[_index],
                            scale=tf.nn.softplus(
                                self.__posterior_W_rho[_index]))

                    with tf.variable_scope("posterior_b{}".format(_index)):
                        self.__posterior_b_mu[_index] = tf.Variable(
                            tf.random_uniform([_right], 0, 0), name="mean")
                        self.__posterior_b_rho[_index] = tf.Variable(
                            tf.random_uniform([_right], sigmaRho, sigmaRho),
                            name="std",
                            trainable=True)
                        self.__posterior_b[_index] = edm.Normal(
                            loc=self.__posterior_b_mu[_index],
                            scale=tf.nn.softplus(
                                self.__posterior_b_rho[_index]))

                    _index += 1
コード例 #5
0
    def test_mog(self):
        x_val = np.array([1.1, 1.2, 2.1, 4.4, 5.5, 7.3, 6.8], np.float32)
        z_val = np.array([0, 0, 0, 1, 1, 2, 2], np.int32)
        pi_val = np.array([0.2, 0.3, 0.5], np.float32)
        mu_val = np.array([1.0, 5.0, 7.0], np.float32)

        N = x_val.shape[0]
        K = z_val.max() + 1

        pi_alpha = 1.3 + np.zeros(K, dtype=np.float32)
        mu_sigma = 4.0
        sigmasq = 2.0**2

        pi = rvs.Dirichlet(pi_alpha)
        mu = rvs.Normal(0.0, mu_sigma, sample_shape=[K])

        x = rvs.ParamMixture(pi, {
            'loc': mu,
            'scale': tf.sqrt(sigmasq)
        },
                             rvs.Normal,
                             sample_shape=N)
        z = x.cat

        mu_cond = ed.complete_conditional(mu)
        pi_cond = ed.complete_conditional(pi)
        z_cond = ed.complete_conditional(z)

        with self.test_session() as sess:
            pi_cond_alpha, mu_cond_mu, mu_cond_sigma, z_cond_p = (sess.run(
                [
                    pi_cond.concentration, mu_cond.loc, mu_cond.scale,
                    z_cond.probs
                ], {
                    z: z_val,
                    x: x_val,
                    pi: pi_val,
                    mu: mu_val
                }))

        true_pi = pi_alpha + np.unique(z_val, return_counts=True)[1]
        self.assertAllClose(pi_cond_alpha, true_pi)
        for k in range(K):
            sigmasq_true = (1.0 / 4**2 + 1.0 / sigmasq *
                            (z_val == k).sum())**-1
            mu_true = sigmasq_true * (1.0 / sigmasq * x_val[z_val == k].sum())
            self.assertAllClose(np.sqrt(sigmasq_true), mu_cond_sigma[k])
            self.assertAllClose(mu_true, mu_cond_mu[k])
        true_log_p_z = np.log(pi_val) - 0.5 / sigmasq * (x_val[:, np.newaxis] -
                                                         mu_val)**2
        true_log_p_z -= true_log_p_z.max(1, keepdims=True)
        true_p_z = np.exp(true_log_p_z)
        true_p_z /= true_p_z.sum(1, keepdims=True)
        self.assertAllClose(z_cond_p, true_p_z)
コード例 #6
0
ファイル: test_conjugacy.py プロジェクト: powa64/edward
  def test_normal_normal(self):
    x_data = np.array([0.1, 0.5, 3.3, 2.7])

    mu0 = 0.3
    sigma0 = 2.1
    sigma_likelihood = 1.2

    mu = rvs.Normal(mu0, sigma0)
    x = rvs.Normal(mu, sigma_likelihood, sample_shape=len(x_data))

    mu_cond = ed.complete_conditional(mu, [mu, x])
    self.assertIsInstance(mu_cond, rvs.Normal)

    with self.test_session() as sess:
      mu_val, sigma_val = sess.run([mu_cond.mu, mu_cond.sigma], {x: x_data})

    self.assertAllClose(sigma_val, (1.0 / sigma0**2 +
                                    len(x_data) / sigma_likelihood**2) ** -0.5)
    self.assertAllClose(mu_val,
                        sigma_val**2 * (mu0 / sigma0**2 +
                                        (1.0 / sigma_likelihood**2 *
                                         x_data.sum())))
コード例 #7
0
ファイル: test_conjugacy.py プロジェクト: powa64/edward
  def test_blanket_changes(self):
    pi = rvs.Dirichlet(tf.ones(3))
    mu = rvs.Normal(0.0, 1.0)
    z = rvs.Categorical(p=pi)

    pi1_cond = ed.complete_conditional(pi, [z, pi])
    pi2_cond = ed.complete_conditional(pi, [z, mu, pi])

    self.assertIsInstance(pi1_cond, rvs.Dirichlet)
    self.assertIsInstance(pi2_cond, rvs.Dirichlet)

    with self.test_session() as sess:
      alpha1_val, alpha2_val = sess.run([pi1_cond.alpha, pi2_cond.alpha])

    self.assertAllClose(alpha1_val, alpha2_val)
コード例 #8
0
    def __init__(self,
                 loc=0,
                 scale=1,
                 dim=None,
                 observed=False,
                 name="Normal"):
        """Construct Normal distributions

        The parameters `loc` and `scale` must be shaped in a way that supports
        broadcasting (e.g. `loc + scale` is a valid operation). If dim is specified,
        it should be consistent with the lengths of `loc` and `scale`


        Args:
            loc (float): scalar or vector indicating the mean of the distribution at each dimension.
            scale (float): scalar or vector indicating the stddev of the distribution at each dimension.
            dim (int): optional scalar indicating the number of dimensions

        Raises
            ValueError: if the parameters are not consistent
            AttributeError: if any of the properties is changed once the object is constructed

        """

        self.__check_params(loc, scale, dim)

        param_dim = 1
        if dim != None: param_dim = dim

        # shape = (batches, dimension)
        self_shape = (replicate.get_total_size(),
                      np.max([
                          get_total_dimension(loc),
                          get_total_dimension(scale), param_dim
                      ]))

        loc_rep = self.__reshape_param(loc, self_shape)
        scale_rep = self.__reshape_param(scale, self_shape)

        # build the distribution

        super(Normal, self).__init__(base_models.Normal(loc=loc_rep,
                                                        scale=scale_rep,
                                                        name=name),
                                     observed=observed)
コード例 #9
0
    def test_inverse_gamma_normal(self):
        x_data = np.array([0.1, 0.5, 3.3, 2.7])

        sigmasq_conc = 1.3
        sigmasq_rate = 2.1
        x_loc = 0.3

        sigmasq = rvs.InverseGamma(sigmasq_conc, sigmasq_rate)
        x = rvs.Normal(x_loc, tf.sqrt(sigmasq), sample_shape=len(x_data))

        sigmasq_cond = ed.complete_conditional(sigmasq, [sigmasq, x])
        self.assertIsInstance(sigmasq_cond, rvs.InverseGamma)

        with self.test_session() as sess:
            conc_val, rate_val = sess.run(
                [sigmasq_cond.concentration, sigmasq_cond.rate], {x: x_data})

        self.assertAllClose(conc_val, sigmasq_conc + 0.5 * len(x_data))
        self.assertAllClose(rate_val, sigmasq_rate + 0.5 * np.sum(
            (x_data - x_loc)**2))
コード例 #10
0
        [probs_Sprinkler[Cloudy[j], :] for j in range(sam_size)])
    Sprinkler = edm.Categorical(probs=p_Sprinkler, name='Sprinkler')

    arr_WetGrass = np.array([[[0.99, 0.01], [0.01, 0.99]],
                             [[0.01, 0.99], [0.01, 0.99]]])
    ten_WetGrass = tf.convert_to_tensor(arr_WetGrass, dtype=tf.float32)
    p_WetGrass = tf.stack(
        [ten_WetGrass[Sprinkler[j], Rain[j], :] for j in range(sam_size)])
    WetGrass = edm.Categorical(probs=p_WetGrass, name='WetGrass')

with tf.name_scope('posterior'):
    # Cloudy = placeholder

    emp_Rain_q = edm.Empirical(tf.nn.softmax(
        tf.get_variable('var_Rain_q',
                        shape=(sam_size, 2, 2),
                        initializer=tf.constant_initializer(0.5))),
                               name='emp_Rain_q')
    propo_Rain_q = edm.Normal(loc=emp_Rain_q, scale=0.05)

    emp_Sprinkler_q = edm.Empirical(tf.nn.softmax(
        tf.get_variable('var_Sprinkler_q',
                        shape=(sam_size, 2, 2),
                        initializer=tf.constant_initializer(0.5))),
                                    name='emp_Sprinkler_q')
    propo_Sprinkler_q = edm.Normal(loc=emp_Sprinkler_q, scale=0.05)

    WetGrass_ph = tf.placeholder(tf.int32,
                                 shape=[sam_size],
                                 name="WetGrass_ph")
コード例 #11
0
import tensorflow as tf
import edward.models as md
import edward.inferences as inf
import util.pnet_tokenize as tok

dists = {'NORMAL': md.Normal}

acts = {'RELU': tf.nn.relu, 'SOFTMAX': tf.nn.softmax}

infs = {'KLqp': inf.KLqp, 'KLpq': inf.KLpq}

outtypes = {
    'CATEGORICAL': md.Categorical,
    'NORMAL': lambda x: md.Normal(x, 0.1),
}


class BNN:
    def __init__(self, spec_file):
        self.graph = tf.Graph()
        self.spec = tok.tokenize_net(spec_file)
        self.ff = 0
        self.conv = 0
        self.rnn = 0
        self.__gen_net__()

    def inference(self, x_train, y_train, n_iter):
        self.inftype(self.weights, data={
            self.x: x_train,
            self.y: y_train
        }).run(n_iter=n_iter)
コード例 #12
0
beta1t = 1  #true value of beta1 for synthetic data
sigmat = 1  #true value for measurement error of synthetic data

x = np.random.normal(0, 1,
                     N)  #generate normally distributed independent variables
y = beta0t + beta1t * x + np.random.normal(
    0, sigmat, N)  #generate the linear regression according to parameters
fig1 = plt.figure(1)
plt.scatter(x, y)  #plot synthetic data

#######################################################
#-- Make placeholders for inference and set up model
#######################################################
x_holder = tf.placeholder(tf.float32, [N, D])
#-- priors on slope and intercept
beta0 = edm.Normal(loc=tf.ones(1) * 10, scale=tf.ones(1))
beta1 = edm.Normal(loc=tf.zeros(D), scale=tf.ones(D))
#sigma = edm.HalfNormal(scale=tf.ones(1))
#sigma = edm.Uniform(low=tf.zeros(1),high=tf.ones(1)*2)
sigma = edm.InverseGamma(concentration=tf.ones(1), rate=tf.ones(1))
y_model = edm.Normal(loc=ed.dot(x_holder, beta1) + beta0,
                     scale=tf.ones(N) * sigma)

#######################################################
#-- Inference
#######################################################
qbeta1 = edm.Empirical(params=tf.get_variable("qbeta1/params", [T, D]))
qbeta0 = edm.Empirical(params=tf.get_variable("qbeta0/params", [T, 1]))
qsigma = edm.Empirical(params=tf.get_variable("qsigma/params", [T, 1]))
#-- Hamiltonian Monte Carlo
inference = ed.HMC({