예제 #1
0
plt.ylim([-4.0, 0.0])
plt.show()

# A model (of the prior information!)
model = bd.Model()

# AR(1) parameters for adult unemployment rate
model.add_node(bd.Node("mu1", bd.Uniform(-10.0, 0.0)))
model.add_node(bd.Node("L1", bd.LogUniform(1.0, 1E4)))
model.add_node(bd.Node("beta1", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha1", bd.Delta("exp(-1.0/L1)")))
model.add_node(bd.Node("sigma1", bd.Delta("beta1/sqrt(1.0 - alpha1*alpha1)")))

# Sampling distribution for adult data
model.add_node(bd.Node("adult0",\
                    bd.Normal("mu1", "sigma1"), observed=True))
for i in range(1, data["N"]):
    name = "adult{i}".format(i=i)
    dist = bd.Normal("mu1 + alpha1*(adult{k} - mu1)".format(k=(i-1)), "beta1")
    model.add_node(bd.Node(name, dist, observed=True))

# Parameters relating to youth data
model.add_node(bd.Node("offset", bd.Normal(0.0, 1.0)))
model.add_node(bd.Node("policy_effect", bd.Cauchy(0.0, 0.1)))
model.add_node(bd.Node("L2", bd.LogUniform(1E-2, 1E2)))
model.add_node(bd.Node("beta2", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha2", bd.Delta("exp(-1.0/L2)")))
model.add_node(bd.Node("sigma2", bd.Delta("beta2/sqrt(1.0 - alpha2*alpha2)")))

for i in range(0, data["N"]):
    name = "youth{i}".format(i=i)
예제 #2
0
        "Species": np.array(Iris['Species']).astype("int64")}
data["N_Species"] = len(np.unique(data["Species"]))
data["N"] = len(data["Species"])
data["N_Input"] = 6

# Add Some Ones to the Data for the One Trick
data["ones"] = np.ones(data["N"], dtype="int64")

# Create the model
model = bd.Model()

# Coefficients
for j in range(0, data["N_Species"]):
    for k in range(0, data["N_Input"]):
        name = "beta_{j}_{k}".format(j=j, k=k)
        prior = bd.Normal(0.0, 10.0)
        node = bd.Node(name, prior)
        model.add_node(node)

# Sampling distribution
for i in range(0, data["N"]):

    # Probability Distribution over Categories
    for j in range(0, data["N_Species"]):
        name = "p_{j}_{i}".format(i=i, j=j)
        formula = ""
        formula += "beta_{j}_0 +"
        formula += "beta_{j}_1 * SL{i} + beta_{j}_2 * SW{i} + "
        formula += "beta_{j}_3 * PL{i} + beta_{j}_4 * PW{i} + "
        formula += "beta_{j}_5 * SW{i} * PL{i}"
        formula = formula.format(i=i, j=j)
예제 #3
0
import math
import numpy as np
import dnest4.builder as bd

# Placeholder. data not needed
data = {"D": np.array([1.0])}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("r", bd.Uniform(0.0, 20.0)))
model.add_node(bd.Node("th", bd.Uniform(0.0, math.pi)))
model.add_node(bd.Node("ph", bd.Uniform(0.0, 2.0 * math.pi)))

# Likelihood p(D|mu) place holder. Will have to enter likelihood by hand.
name = "D{index}".format(index=0)
model.add_node(bd.Node(name, bd.Normal("mu", 1), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                 = about -6.0....
예제 #4
0
import dnest4.builder as bd
import pandas as pd

# The data, as a dictionary
Clocks = pd.read_csv('Clocks.csv')
data = {}
data = {'log_age': np.log(np.array(Clocks['age']).astype('float64')),\
        'log_num_bidders': np.log(np.array(Clocks['num_bidders'])).astype('float64'),\
        'log_y': np.log(np.array(Clocks['y']).astype('float64'))}
data["N"] = int(len(data["log_y"]))

# Model
model = bd.Model()

# Slopes and Intercept
model.add_node(bd.Node("beta0", bd.Normal(0, 100)))
model.add_node(bd.Node("beta1", bd.Normal(0, 100)))
model.add_node(bd.Node("beta2", bd.Normal(0, 100)))
model.add_node(bd.Node("beta3", bd.Normal(0, 100)))

# Noise standard deviation
model.add_node(bd.Node("sigma", bd.LogUniform(0.001, 100)))

# Sampling distribution
for i in range(0, data["N"]):
    name = "log_y{i}".format(i=i)
    mean = "beta0 + beta1 * log_age{i}  + beta2 * log_num_bidders{i} +"
    mean += "beta3 * log_age{i} * log_num_bidders{i}"
    mean = mean.format(i=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sigma"), observed=True))
예제 #5
0
data = {
    "x": np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
    "y": np.array([1.0, 2.0, 3.0, 3.9, 5.1]),
    "N": 5
}

# Create the model
model = bd.Model()

# Slope and intercept
model.add_node(bd.Node("m", bd.Uniform(-100.0, 100.0)))
model.add_node(bd.Node("b", bd.Uniform(-100.0, 100.0)))

# Noise standard deviation
model.add_node(bd.Node("log_sigma", bd.Uniform(-10.0, 10.0)))
model.add_node(bd.Node("sigma", bd.Delta("exp(log_sigma)")))

# p(data | parameters)
for i in range(0, data["N"]):
    name = "y{index}".format(index=i)
    mean = "m*x{index} + b".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sigma"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
#import os
# os.system("make")
예제 #6
0
import math
import numpy as np
import dnest4.builder as bd

# Placeholder. data not needed
data = {
    "Xb": 1.517116,
    "sCS": 0.0004726074,
    "Yb": 1.517456,
    "sSP": 0.001038594
}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("mucssp", bd.Normal(1.5, 0.5)))

# Likelihood
model.add_node(bd.Node("Xb", bd.Normal("mucssp", "sCS"), observed=True))
model.add_node(bd.Node("Yb", bd.Normal("mucssp", "sSP"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z) = XXXXX
예제 #7
0
# Constant velocity offset
model.add_node(bd.Node("v_systematic", bd.Uniform(-10.0, 10.0)))

# Rotation amplitude
model.add_node(bd.Node("A", bd.Uniform(0.0, 50.0)))

# Rotation angle
model.add_node(bd.Node("phi", bd.Uniform(0.0, 2.0 * np.pi)))

# p(data | parameters)
for i in range(0, data["N"]):

    #
    mean = "v_systematic + A*sin(theta{index} - phi)".format(index=i)

    # This will be a bit slow but it doesn't matter
    stdev = "sqrt(pow(sig_v{index}, 2) + pow(velocity_dispersion, 2))"\
                .format(index=i)

    name = "v{index}".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, stdev), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
import os
os.system("make")
예제 #8
0
파일: rats.py 프로젝트: zachh12/DNest4
203, 205, 190, 191, 211, 185, 207, 216, 205, 180, 200, 246, 249, 
263, 237, 230, 252, 231, 248, 285, 220, 261, 220, 244, 270, 242, 
248, 234, 243, 259, 246, 253, 225, 229, 250, 237, 257, 261, 248, 
219, 244, 283, 293, 312, 272, 280, 298, 275, 297, 350, 260, 313, 
273, 289, 326, 281, 288, 280, 283, 307, 286, 298, 267, 272, 285, 
286, 303, 295, 289, 258, 286, 320, 354, 328, 297, 323, 331, 305, 
338, 376, 296, 352, 314, 325, 358, 312, 324, 316, 317, 336, 321, 
334, 302, 302, 323, 331, 345, 333, 316, 291, 324]).astype("float64"),
            "x": np.array([8.0, 15.0, 22.0, 29.0, 36.0])}
data["x_bar"] = float(data["x"].mean())# Convert from numpy.float64 to float

# A model
model = bd.Model()

# Priors
model.add_node(bd.Node("alpha_mu", bd.Normal(0.0, 100.0)))
model.add_node(bd.Node("beta_mu", bd.Normal(0.0, 100.0)))
model.add_node(bd.Node("tau_c", bd.Gamma(1E-3, 1E3)))
model.add_node(bd.Node("alpha_tau", bd.Gamma(1E-3, 1E3)))
model.add_node(bd.Node("beta_tau", bd.Gamma(1E-3, 1E3)))

# Transformations
model.add_node(bd.Node("alpha_sigma", bd.Delta("1.0/sqrt(alpha_tau)")))
model.add_node(bd.Node("beta_sigma", bd.Delta("1.0/sqrt(beta_tau)")))
model.add_node(bd.Node("sigma_c", bd.Delta("1.0/sqrt(tau_c)")))
model.add_node(bd.Node("a0", bd.Delta("alpha_mu - beta_mu*x_bar")))

# Sampling distribution
for i in range(0, data["N"]):
    model.add_node(bd.Node("n_alpha{i}".format(i=i),\
                        bd.Normal(0.0, 1.0)))
예제 #9
0
        237, 257, 261, 248, 219, 244, 283, 293, 312, 272, 280, 298, 275, 297,
        350, 260, 313, 273, 289, 326, 281, 288, 280, 283, 307, 286, 298, 267,
        272, 285, 286, 303, 295, 289, 258, 286, 320, 354, 328, 297, 323, 331,
        305, 338, 376, 296, 352, 314, 325, 358, 312, 324, 316, 317, 336, 321,
        334, 302, 302, 323, 331, 345, 333, 316, 291, 324
    ]).astype("float64"),
    "x":
    np.array([8.0, 15.0, 22.0, 29.0, 36.0])
}
data["x_bar"] = float(data["x"].mean())  # Convert from numpy.float64 to float

# A model
model = bd.Model()

# Priors
model.add_node(bd.Node("alpha_mu", bd.Normal(0.0, 100.0)))
model.add_node(bd.Node("beta_mu", bd.Normal(0.0, 100.0)))
model.add_node(bd.Node("tau_c", bd.Gamma(1E-3, 1E3)))
model.add_node(bd.Node("alpha_tau", bd.Gamma(1E-3, 1E3)))
model.add_node(bd.Node("beta_tau", bd.Gamma(1E-3, 1E3)))

# Transformations
model.add_node(bd.Node("alpha_sigma", bd.Delta("1.0/sqrt(alpha_tau)")))
model.add_node(bd.Node("beta_sigma", bd.Delta("1.0/sqrt(beta_tau)")))
model.add_node(bd.Node("sigma_c", bd.Delta("1.0/sqrt(tau_c)")))
model.add_node(bd.Node("a0", bd.Delta("alpha_mu - beta_mu*x_bar")))

# Sampling distribution
for i in range(0, data["N"]):
    model.add_node(bd.Node("n_alpha{i}".format(i=i), bd.Normal(0.0, 1.0)))
    model.add_node(bd.Node("n_beta{i}".format(i=i), bd.Normal(0.0, 1.0)))
예제 #10
0
import math
import numpy as np
import dnest4.builder as bd

# Daniel Azevedo GC-Ethanol Data:
data = {"x": np.array([0.1716393, 0.2905149, 0.5521852, 0.8684159, 1.046752, 1.279638]),  # AreaRatio
        "y": np.array([0.05, 0.1, 0.2, 0.3, 0.4, 0.5]),                                  # Concentration
        "N": 6}


# Create the model
model = bd.Model()

# Prior p(m). Need to modify C++ for Half-Cauchy on beta1
model.add_node(bd.Node("beta0", bd.Cauchy(0, 1)))
model.add_node(bd.Node("beta1", bd.Cauchy(0, 5)))  # Should be Half-Cauchy
model.add_node(bd.Node("epsilon", bd.Normal(0, 1)))

# Likelihood p(data | parameters)
for i in range(0, data["N"]):
    name = "y{index}".format(index=i)
    mean = "beta1*x{index} + beta0".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "epsilon"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                 = 2.24468713205
# Mathematica result:                = XXXX
예제 #11
0
data["N"] = 40
data["N_groups"] = 4
data["group"] = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
                          1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\
                          2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\
                          3, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype="int64")
data["y"] = np.array([78, 88, 87, 88, 83, 82, 81, 80, 80, 89, 78, 78, 83,\
                      81, 78, 81, 81, 82, 76, 76, 79, 73, 79, 75, 77, 78,\
                      80, 78, 83, 84, 77, 69, 75, 70, 74, 83, 80, 75, 76,\
                      75], dtype="float64")

# A model
model = bd.Model()

# Hyperparameters
model.add_node(bd.Node("grand_mean", bd.Normal(0.0, 1000.0)))
model.add_node(bd.Node("diversity", bd.LogUniform(1E-3, 1E3)))

# Group mean parameters
for i in range(0, data["N_groups"]):
    model.add_node(bd.Node("n{i}".format(i=i), bd.Normal(0.0, 1.0)))
    model.add_node(\
                    bd.Node("mu{i}".format(i=i),\
                         bd.Delta("grand_mean + diversity*n{i}".format(i=i))\
                        )\
                  )

# Noise sd
model.add_node(bd.Node("sigma", bd.LogUniform(1E-3, 1E3)))

# Data nodes
예제 #12
0
        -0.32019000, 1.59480235, -0.23412293, -0.08350555, -1.46057870,
        -0.62142475, 0.84171547, 1.25053406, 1.22901729, -1.28844456,
        -0.79355889, 0.64806456, 2.43395630, -1.18086072, 0.36834657,
        0.51896395, 0.99233285, -0.19108939, -1.15934395, 0.64806456,
        0.82019870, -0.90114273, 0.75564840, -0.64294152, -1.80484699,
        0.56199749, 0.30379627
    ]),
    "N":
    42
}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("a", bd.Normal(0, 1000)))
model.add_node(bd.Node("b", bd.Normal(0, 100)))
# Need to modify C++ for Half-Cauchy on sigma_1
#model.add_node(bd.Node("sig1", bd.Cauchy(0, 5)))
model.add_node(bd.Node("sig1", bd.Uniform(0, 1e8)))

# Likelihood p(data | parameters)
for i in range(0, data["N"]):
    name = "y{index}".format(index=i)
    mean = "b*x{index} + a".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sig1"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)
import numpy as np
import dnest4.builder as bd
import pandas as pd

CHD = pd.read_csv('CHD.csv')

data = {"CHD": np.array(CHD["CHD"]).astype("int64"),\
        "Age": np.array(CHD["Age"]).astype("int64"),\
        "AgeGrp": np.array(CHD["AgeGrp"]).astype("int64")}
data["N"] = len(data["CHD"])

# Create the model
model = bd.Model()

# Coefficients
model.add_node(bd.Node("beta_0", bd.Normal(0, 10)))
model.add_node(bd.Node("beta_1", bd.Normal(0, 10)))

# Sampling Distribution
for i in range(0, data["N"]):
    name = "CHD{i}".format(i=i)
    #prob = "exp(beta_0 + beta_1 * Age{i})/(1.0 + exp(beta_0 + beta_1 * Age{i}))"
    prob = "exp(beta_0 + beta_1 * Age{i})"
    prob += "/(1.0 + exp(beta_0 + beta_1 * Age{i}))"
    prob = prob.format(i=i)
    distribution = bd.Binomial(1, prob)
    node = bd.Node(name, distribution, observed=True)
    model.add_node(node)

# Extra node for prediction
name = "CHDnew"
예제 #14
0
import numpy as np
import dnest4.builder as bd

# Placeholder. data not needed
data = {
    "Xb": 1.517116,
    "sCS": 0.0004726074,
    "Yb": 1.517456,
    "sSP": 0.001038594
}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("mucssp", bd.Normal(1.51, 0.01)))  # Hp
# model.add_node(bd.Node("mucs", bd.Normal(1.51, 0.01)))  # Hd
# model.add_node(bd.Node("musp", bd.Normal(1.51, 0.01)))  # Hd

# Likelihood
model.add_node(bd.Node("Xb", bd.Normal("mucssp", "sCS"), observed=True))  # Hp
model.add_node(bd.Node("Yb", bd.Normal("mucssp", "sSP"), observed=True))  # Hp
# model.add_node(bd.Node("Xb", bd.Normal("mucs", "sCS"), observed=True))  # Hd
# model.add_node(bd.Node("Yb", bd.Normal("musp", "sSP"), observed=True))  # Hd

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z) = XXXXX
# Hp: log(Z) = -4.29510326831  # Super Wide prior, mu = 0, sigma = 10000