Beispiel #1
0
plt.plot(data["t"], data["youth"], "go-", label="Youth")
plt.xlabel("Time (quarters)")
plt.ylabel("Unemployment rate (logit)")
plt.legend()
plt.xlim([data["t"].min() - 0.5, data["t"].max() + 0.5])
plt.ylim([-4.0, 0.0])
plt.show()

# A model (of the prior information!)
model = bd.Model()

# AR(1) parameters for adult unemployment rate
model.add_node(bd.Node("mu1", bd.Uniform(-10.0, 0.0)))
model.add_node(bd.Node("L1", bd.LogUniform(1.0, 1E4)))
model.add_node(bd.Node("beta1", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha1", bd.Delta("exp(-1.0/L1)")))
model.add_node(bd.Node("sigma1", bd.Delta("beta1/sqrt(1.0 - alpha1*alpha1)")))

# Sampling distribution for adult data
model.add_node(bd.Node("adult0",\
                    bd.Normal("mu1", "sigma1"), observed=True))
for i in range(1, data["N"]):
    name = "adult{i}".format(i=i)
    dist = bd.Normal("mu1 + alpha1*(adult{k} - mu1)".format(k=(i-1)), "beta1")
    model.add_node(bd.Node(name, dist, observed=True))

# Parameters relating to youth data
model.add_node(bd.Node("offset", bd.Normal(0.0, 1.0)))
model.add_node(bd.Node("policy_effect", bd.Cauchy(0.0, 0.1)))
model.add_node(bd.Node("L2", bd.LogUniform(1E-2, 1E2)))
model.add_node(bd.Node("beta2", bd.LogUniform(1E-3, 1E3)))
Beispiel #2
0
data = {
    "x": np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
    "y": np.array([1.0, 2.0, 3.0, 3.9, 5.1]),
    "N": 5
}

# Create the model
model = bd.Model()

# Slope and intercept
model.add_node(bd.Node("m", bd.Uniform(-100.0, 100.0)))
model.add_node(bd.Node("b", bd.Uniform(-100.0, 100.0)))

# Noise standard deviation
model.add_node(bd.Node("log_sigma", bd.Uniform(-10.0, 10.0)))
model.add_node(bd.Node("sigma", bd.Delta("exp(log_sigma)")))

# p(data | parameters)
for i in range(0, data["N"]):
    name = "y{index}".format(index=i)
    mean = "m*x{index} + b".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sigma"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
#import os
# os.system("make")
Beispiel #3
0
        node = bd.Node(name, prior)
        model.add_node(node)

# Sampling distribution
for i in range(0, data["N"]):

    # Probability Distribution over Categories
    for j in range(0, data["N_Species"]):
        name = "p_{j}_{i}".format(i=i, j=j)
        formula = ""
        formula += "beta_{j}_0 +"
        formula += "beta_{j}_1 * SL{i} + beta_{j}_2 * SW{i} + "
        formula += "beta_{j}_3 * PL{i} + beta_{j}_4 * PW{i} + "
        formula += "beta_{j}_5 * SW{i} * PL{i}"
        formula = formula.format(i=i, j=j)
        model.add_node(bd.Node(name, bd.Delta(formula)))

    # Normalising constant
    name = "Z_{i}".format(i=i)
    formula = ""
    formula += "exp(p_0_{i}) + exp(p_1_{i}) + exp(p_2_{i})"
    formula = formula.format(i=i)
    model.add_node(bd.Node(name, bd.Delta(formula)))

    # Probability of the data point.
    name = "prob_{i}".format(i=i)
    formula = "exp(p_{Species}_{i}) / Z_{i}".format(i=i,
                                                    Species=data["Species"][i])
    model.add_node(bd.Node(name, bd.Delta(formula)))

    # One Trick - this is like the "zeroes trick" in JAGS
Beispiel #4
0
                                                       .astype("float64")
data["y"] = np.array([4, 5, 4, 1, 0, 4, 3, 4, 0, 6, 3, 3, 4, 0,
                      2, 6, 3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5,
                      3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, 2, 2, 1,
                      1, 1, 1, 3, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1,
                      0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,
                      0, 2, 1, 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1,
                      2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 0, 1, 4, 0,
                      0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\
                                                       .astype("float64")
data["N"] = int(len(data["t"]))

model = bd.Model()
model.add_node(bd.Node("log_mu1", bd.Cauchy(0.0, 5.0)))
model.add_node(bd.Node("diff", bd.Cauchy(0.0, 1.0)))
model.add_node(bd.Node("log_mu2", bd.Delta("log_mu1 + diff")))
model.add_node(bd.Node("mu1", bd.Delta("exp(log_mu1)")))
model.add_node(bd.Node("mu2", bd.Delta("exp(log_mu2)")))
model.add_node(bd.Node("change_year", bd.Uniform(1851.0, 1962.0)))
model.add_node(bd.Node("L", bd.LogUniform(1E-2, 1E2)))

# Data nodes
for i in range(0, data["N"]):
    name = "y{i}".format(i=i)
    mean = "mu1 + (mu2 - mu1)/(1.0 + exp(-(t{i} - change_year)/L))"\
                                        .format(i=i)
    model.add_node(bd.Node(name, bd.Poisson(mean), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)
Beispiel #5
0
    "y": raw[:, 1],
    "r": raw[:, 2],
    "theta": raw[:, 3],
    "v": raw[:, 4],
    "sig_v": raw[:, 5],
    "N": raw.shape[0]
}

# Create the model
model = bd.Model()

# Constant velocity dispersion
model.add_node(bd.Node("log10_velocity_dispersion", bd.T(1.0, 0.5, 4.0)))
model.add_node(
    bd.Node("velocity_dispersion",
            bd.Delta("pow(10.0, log10_velocity_dispersion)")))

# Constant velocity offset
model.add_node(bd.Node("c_v_systematic", bd.T(0.0, 0.1, 1.0)))
model.add_node(
    bd.Node("v_systematic", bd.Delta("c_v_systematic*velocity_dispersion")))

# Rotation amplitude
model.add_node(bd.Node("t_A", bd.T(0.0, 2.0, 2.0)))
model.add_node(
    bd.Node(
        "A",
        bd.Delta("pow(10.0, 1.0-\
                                       std::abs(t_A))*velocity_dispersion")))

# Rotation angle
Beispiel #6
0
    np.array([8.0, 15.0, 22.0, 29.0, 36.0])
}
data["x_bar"] = float(data["x"].mean())  # Convert from numpy.float64 to float

# A model
model = bd.Model()

# Priors
model.add_node(bd.Node("alpha_mu", bd.Normal(0.0, 100.0)))
model.add_node(bd.Node("beta_mu", bd.Normal(0.0, 100.0)))
model.add_node(bd.Node("tau_c", bd.Gamma(1E-3, 1E3)))
model.add_node(bd.Node("alpha_tau", bd.Gamma(1E-3, 1E3)))
model.add_node(bd.Node("beta_tau", bd.Gamma(1E-3, 1E3)))

# Transformations
model.add_node(bd.Node("alpha_sigma", bd.Delta("1.0/sqrt(alpha_tau)")))
model.add_node(bd.Node("beta_sigma", bd.Delta("1.0/sqrt(beta_tau)")))
model.add_node(bd.Node("sigma_c", bd.Delta("1.0/sqrt(tau_c)")))
model.add_node(bd.Node("a0", bd.Delta("alpha_mu - beta_mu*x_bar")))

# Sampling distribution
for i in range(0, data["N"]):
    model.add_node(bd.Node("n_alpha{i}".format(i=i), bd.Normal(0.0, 1.0)))
    model.add_node(bd.Node("n_beta{i}".format(i=i), bd.Normal(0.0, 1.0)))

    name = "alpha{i}".format(i=i)
    value = "alpha_mu + alpha_sigma*n_alpha{i}".format(i=i)
    model.add_node(bd.Node(name, bd.Delta(value)))

    name = "beta{i}".format(i=i)
    value = "beta_mu + beta_sigma*n_beta{i}".format(i=i)
Beispiel #7
0
                      80, 78, 83, 84, 77, 69, 75, 70, 74, 83, 80, 75, 76,\
                      75], dtype="float64")

# A model
model = bd.Model()

# Hyperparameters
model.add_node(bd.Node("grand_mean", bd.Normal(0.0, 1000.0)))
model.add_node(bd.Node("diversity", bd.LogUniform(1E-3, 1E3)))

# Group mean parameters
for i in range(0, data["N_groups"]):
    model.add_node(bd.Node("n{i}".format(i=i), bd.Normal(0.0, 1.0)))
    model.add_node(\
                    bd.Node("mu{i}".format(i=i),\
                         bd.Delta("grand_mean + diversity*n{i}".format(i=i))\
                        )\
                  )

# Noise sd
model.add_node(bd.Node("sigma", bd.LogUniform(1E-3, 1E3)))

# Data nodes
for i in range(0, data["N"]):
    name = "y{i}".format(i=i)
    model.add_node(bd.Node(name,\
                        bd.Normal("mu{group}".format(group=data["group"][i]),\
                                    "sigma"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
model = bd.Model()

# Coefficients
model.add_node(bd.Node("beta_0", bd.Normal(0, 10)))
model.add_node(bd.Node("beta_1", bd.Normal(0, 10)))

# Sampling Distribution
for i in range(0, data["N"]):
    name = "CHD{i}".format(i=i)
    #prob = "exp(beta_0 + beta_1 * Age{i})/(1.0 + exp(beta_0 + beta_1 * Age{i}))"
    prob = "exp(beta_0 + beta_1 * Age{i})"
    prob += "/(1.0 + exp(beta_0 + beta_1 * Age{i}))"
    prob = prob.format(i=i)
    distribution = bd.Binomial(1, prob)
    node = bd.Node(name, distribution, observed=True)
    model.add_node(node)

# Extra node for prediction
name = "CHDnew"
prob = "exp(beta_0 + beta_1 * 35)/(1.0 + exp(beta_0 + beta_1 * 35))"
distribution = bd.Delta(prob)
node = bd.Node(name, distribution)
model.add_node(node)

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
import os
os.system("make")