Exemple #1
0
# Plot the data
plt.plot(data["t"], data["adult"], "ko-", label="Adult")
plt.plot(data["t"], data["youth"], "go-", label="Youth")
plt.xlabel("Time (quarters)")
plt.ylabel("Unemployment rate (logit)")
plt.legend()
plt.xlim([data["t"].min() - 0.5, data["t"].max() + 0.5])
plt.ylim([-4.0, 0.0])
plt.show()

# A model (of the prior information!)
model = bd.Model()

# AR(1) parameters for adult unemployment rate
model.add_node(bd.Node("mu1", bd.Uniform(-10.0, 0.0)))
model.add_node(bd.Node("L1", bd.LogUniform(1.0, 1E4)))
model.add_node(bd.Node("beta1", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha1", bd.Delta("exp(-1.0/L1)")))
model.add_node(bd.Node("sigma1", bd.Delta("beta1/sqrt(1.0 - alpha1*alpha1)")))

# Sampling distribution for adult data
model.add_node(bd.Node("adult0",\
                    bd.Normal("mu1", "sigma1"), observed=True))
for i in range(1, data["N"]):
    name = "adult{i}".format(i=i)
    dist = bd.Normal("mu1 + alpha1*(adult{k} - mu1)".format(k=(i-1)), "beta1")
    model.add_node(bd.Node(name, dist, observed=True))

# Parameters relating to youth data
model.add_node(bd.Node("offset", bd.Normal(0.0, 1.0)))
model.add_node(bd.Node("policy_effect", bd.Cauchy(0.0, 0.1)))
Exemple #2
0
                      1, 1, 1, 3, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1,
                      0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,
                      0, 2, 1, 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1,
                      2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 0, 1, 4, 0,
                      0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\
                                                       .astype("float64")
data["N"] = int(len(data["t"]))

model = bd.Model()
model.add_node(bd.Node("log_mu1", bd.Cauchy(0.0, 5.0)))
model.add_node(bd.Node("diff", bd.Cauchy(0.0, 1.0)))
model.add_node(bd.Node("log_mu2", bd.Delta("log_mu1 + diff")))
model.add_node(bd.Node("mu1", bd.Delta("exp(log_mu1)")))
model.add_node(bd.Node("mu2", bd.Delta("exp(log_mu2)")))
model.add_node(bd.Node("change_year", bd.Uniform(1851.0, 1962.0)))
model.add_node(bd.Node("L", bd.LogUniform(1E-2, 1E2)))

# Data nodes
for i in range(0, data["N"]):
    name = "y{i}".format(i=i)
    mean = "mu1 + (mu2 - mu1)/(1.0 + exp(-(t{i} - change_year)/L))"\
                                        .format(i=i)
    model.add_node(bd.Node(name, bd.Poisson(mean), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
#import os
#os.system("make")
Exemple #3
0
data = {'log_age': np.log(np.array(Clocks['age']).astype('float64')),\
        'log_num_bidders': np.log(np.array(Clocks['num_bidders'])).astype('float64'),\
        'log_y': np.log(np.array(Clocks['y']).astype('float64'))}
data["N"] = int(len(data["log_y"]))

# Model
model = bd.Model()

# Slopes and Intercept
model.add_node(bd.Node("beta0", bd.Normal(0, 100)))
model.add_node(bd.Node("beta1", bd.Normal(0, 100)))
model.add_node(bd.Node("beta2", bd.Normal(0, 100)))
model.add_node(bd.Node("beta3", bd.Normal(0, 100)))

# Noise standard deviation
model.add_node(bd.Node("sigma", bd.LogUniform(0.001, 100)))

# Sampling distribution
for i in range(0, data["N"]):
    name = "log_y{i}".format(i=i)
    mean = "beta0 + beta1 * log_age{i}  + beta2 * log_num_bidders{i} +"
    mean += "beta3 * log_age{i} * log_num_bidders{i}"
    mean = mean.format(i=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sigma"), observed=True))

# Create the C++code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++code so it 's ready to go
import os
Exemple #4
0
data["N_groups"] = 4
data["group"] = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
                          1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\
                          2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\
                          3, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype="int64")
data["y"] = np.array([78, 88, 87, 88, 83, 82, 81, 80, 80, 89, 78, 78, 83,\
                      81, 78, 81, 81, 82, 76, 76, 79, 73, 79, 75, 77, 78,\
                      80, 78, 83, 84, 77, 69, 75, 70, 74, 83, 80, 75, 76,\
                      75], dtype="float64")

# A model
model = bd.Model()

# Hyperparameters
model.add_node(bd.Node("grand_mean", bd.Normal(0.0, 1000.0)))
model.add_node(bd.Node("diversity", bd.LogUniform(1E-3, 1E3)))

# Group mean parameters
for i in range(0, data["N_groups"]):
    model.add_node(bd.Node("n{i}".format(i=i), bd.Normal(0.0, 1.0)))
    model.add_node(\
                    bd.Node("mu{i}".format(i=i),\
                         bd.Delta("grand_mean + diversity*n{i}".format(i=i))\
                        )\
                  )

# Noise sd
model.add_node(bd.Node("sigma", bd.LogUniform(1E-3, 1E3)))

# Data nodes
for i in range(0, data["N"]):