예제 #1
0
파일: helpers.py 프로젝트: znob/arviz
def tfp_noncentered_schools(data, draws, chains):
    """Non-centered eight schools implementation for tfp."""
    del chains

    log_joint = ed.make_log_joint_fn(tfp_schools_model)

    def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
        """Unnormalized target density as a function of states."""
        return log_joint(
            num_schools=data["J"],
            treatment_stddevs=data["sigma"].astype(np.float32),
            avg_effect=avg_effect,
            avg_stddev=avg_stddev,
            school_effects_standard=school_effects_standard,
            treatment_effects=data["y"].astype(np.float32),
        )

    states, kernel_results = tfp.mcmc.sample_chain(
        num_results=draws,
        num_burnin_steps=500,
        current_state=[
            tf.zeros([], name="init_avg_effect"),
            tf.zeros([], name="init_avg_stddev"),
            tf.ones([data["J"]], name="init_school_effects_standard"),
        ],
        kernel=tfp.mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=target_log_prob_fn, step_size=0.4, num_leapfrog_steps=3
        ),
    )

    with tf.Session() as sess:
        [states_, _] = sess.run([states, kernel_results])

    return tfp_schools_model, states_
예제 #2
0
파일: helpers.py 프로젝트: MFreidank/arviz
def tfp_noncentered_schools(data, draws, chains):
    """Non-centered eight schools implementation for tfp."""
    del chains

    def schools_model(num_schools, treatment_stddevs):
        avg_effect = ed.Normal(loc=0.0, scale=10.0, name="avg_effect")  # `mu`
        avg_stddev = ed.Normal(loc=5.0, scale=1.0,
                               name="avg_stddev")  # `log(tau)`
        school_effects_standard = ed.Normal(
            loc=tf.zeros(num_schools),
            scale=tf.ones(num_schools),
            name="school_effects_standard")  # `eta`
        school_effects = avg_effect + tf.exp(
            avg_stddev) * school_effects_standard  # `theta`
        treatment_effects = ed.Normal(loc=school_effects,
                                      scale=treatment_stddevs,
                                      name="treatment_effects")  # `y`
        return treatment_effects

    log_joint = ed.make_log_joint_fn(schools_model)

    def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
        """Unnormalized target density as a function of states."""
        return log_joint(
            num_schools=data["J"],
            treatment_stddevs=data["sigma"].astype(np.float32),
            avg_effect=avg_effect,
            avg_stddev=avg_stddev,
            school_effects_standard=school_effects_standard,
            treatment_effects=data["y"].astype(np.float32),
        )

    states, kernel_results = tfp.mcmc.sample_chain(
        num_results=draws,
        num_burnin_steps=500,
        current_state=[
            tf.zeros([], name="init_avg_effect"),
            tf.zeros([], name="init_avg_stddev"),
            tf.ones([data["J"]], name="init_school_effects_standard"),
        ],
        kernel=tfp.mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=target_log_prob_fn,
            step_size=0.4,
            num_leapfrog_steps=3),
    )

    with tf.Session() as sess:
        [states_, _] = sess.run([states, kernel_results])

    data = from_tfp(
        states_,
        var_names=["mu", "tau", "eta"],
        model_fn=lambda: schools_model(data["J"], data["sigma"].astype(
            np.float32)),
        observed=data["y"].astype(np.float32),
    )
    return data
예제 #3
0
def make_log_prob_fn(model_fn, X, y):
    """Makes a log likelihood function for MCMC training.

    Args:
        model_fn: (function) A model function.
        X: (Tensor or ndarray) A Tensor of input variables
        y: (Tensor or ndarray) A Tensor of response variables

    Returns:
        (function): a log likelihood function for MCMC training
    """
    bnn_log_joint = ed.make_log_joint_fn(model_fn)
    rv_names = list(get_variable_dict(model_fn, X).keys())

    def bnn_log_prob_fn(*rv_positional_args):
        rv_kwargs = dict(zip(rv_names, rv_positional_args))
        rv_kwargs.pop('y', None)

        return bnn_log_joint(X, y=y, **rv_kwargs)

    return bnn_log_prob_fn
예제 #4
0
def probabilistic_pca(data_dim, latent_dim, num_datapoints,
                      stddv_datapoints):  # (unmodeled) data
    w = ed.Normal(loc=tf.zeros([data_dim, latent_dim]),
                  scale=2.0 * tf.ones([data_dim, latent_dim]),
                  name="w")  # parameter
    z = ed.Normal(loc=tf.zeros([latent_dim, num_datapoints]),
                  scale=tf.ones([latent_dim, num_datapoints]),
                  name="z")  # parameter
    x = ed.Normal(loc=tf.matmul(w, z),
                  scale=stddv_datapoints * tf.ones([data_dim, num_datapoints]),
                  name="x")  # (modeled) data
    return x, (w, z)


log_joint = ed.make_log_joint_fn(probabilistic_pca)

num_datapoints = 5000
data_dim = 2
latent_dim = 1
stddv_datapoints = 0.5

model = probabilistic_pca(data_dim=data_dim,
                          latent_dim=latent_dim,
                          num_datapoints=num_datapoints,
                          stddv_datapoints=stddv_datapoints)

with tf.Session() as sess:
    x_train, (actual_w, actual_z) = sess.run(model)

tf.reset_default_graph()
예제 #5
0
def logistic_regression(features):
    coeffs = ed.Normal(loc=tf.zeros(features.shape[1]),
                       scale=1.,
                       name="coeffs")
    intercept = ed.Normal(loc=0., scale=1., name='intercept')
    outcomes = ed.Bernoulli(logits=tf.tensordot(features, coeffs, [[1], [0]]) +
                            intercept,
                            name='outcomes')
    return outcomes


num_features = 55
features = tf.random_normal([100, num_features])
outcomes = tf.random_uniform([100], minval=0, maxval=2, dtype=tf.int32)

log_joint = ed.make_log_joint_fn(logistic_regression)


def target_log_prob_fn(coeffs, intercept):
    return log_joint(features,
                     coeffs=coeffs,
                     intercept=intercept,
                     outcomes=outcomes)


hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
    target_log_prob_fn=target_log_prob_fn, step_size=0.1, num_leapfrog_steps=5)
states, kernel_results = tfp.mcmc.sample_chain(
    num_results=1000,
    current_state=[tf.random_normal([55]),
                   tf.random_normal([])],