Пример #1
0
# Plot the data
plt.plot(data["t"], data["adult"], "ko-", label="Adult")
plt.plot(data["t"], data["youth"], "go-", label="Youth")
plt.xlabel("Time (quarters)")
plt.ylabel("Unemployment rate (logit)")
plt.legend()
plt.xlim([data["t"].min() - 0.5, data["t"].max() + 0.5])
plt.ylim([-4.0, 0.0])
plt.show()

# A model (of the prior information!)
model = bd.Model()

# AR(1) parameters for adult unemployment rate
model.add_node(bd.Node("mu1", bd.Uniform(-10.0, 0.0)))
model.add_node(bd.Node("L1", bd.LogUniform(1.0, 1E4)))
model.add_node(bd.Node("beta1", bd.LogUniform(1E-3, 1E3)))
model.add_node(bd.Node("alpha1", bd.Delta("exp(-1.0/L1)")))
model.add_node(bd.Node("sigma1", bd.Delta("beta1/sqrt(1.0 - alpha1*alpha1)")))

# Sampling distribution for adult data
model.add_node(bd.Node("adult0",\
                    bd.Normal("mu1", "sigma1"), observed=True))
for i in range(1, data["N"]):
    name = "adult{i}".format(i=i)
    dist = bd.Normal("mu1 + alpha1*(adult{k} - mu1)".format(k=(i-1)), "beta1")
    model.add_node(bd.Node(name, dist, observed=True))

# Parameters relating to youth data
model.add_node(bd.Node("offset", bd.Normal(0.0, 1.0)))
Пример #2
0
import numpy as np
import dnest4.builder as bd

data = {
    "x": np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
    "y": np.array([1.0, 2.0, 3.0, 3.9, 5.1]),
    "N": 5
}

# Create the model
model = bd.Model()

# Slope and intercept
model.add_node(bd.Node("m", bd.Uniform(-100.0, 100.0)))
model.add_node(bd.Node("b", bd.Uniform(-100.0, 100.0)))

# Noise standard deviation
model.add_node(bd.Node("log_sigma", bd.Uniform(-10.0, 10.0)))
model.add_node(bd.Node("sigma", bd.Delta("exp(log_sigma)")))

# p(data | parameters)
for i in range(0, data["N"]):
    name = "y{index}".format(index=i)
    mean = "m*x{index} + b".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sigma"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
Пример #3
0
import math
import numpy as np
import dnest4.builder as bd

# Placeholder. data not needed
data = {"D": np.array([1.0])}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("r", bd.Uniform(0.0, 20.0)))
model.add_node(bd.Node("th", bd.Uniform(0.0, math.pi)))
model.add_node(bd.Node("ph", bd.Uniform(0.0, 2.0 * math.pi)))

# Likelihood p(D|mu) place holder. Will have to enter likelihood by hand.
name = "D{index}".format(index=0)
model.add_node(bd.Node(name, bd.Normal("mu", 1), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                 = about -6.0....
Пример #4
0
raw = np.loadtxt("gfl_new_data.txt")
data = {
    "x": raw[:, 0],
    "y": raw[:, 1],
    "r": raw[:, 2],
    "theta": raw[:, 3],
    "v": raw[:, 4],
    "sig_v": raw[:, 5],
    "N": raw.shape[0]
}

# Create the model
model = bd.Model()

# Constant velocity dispersion
model.add_node(bd.Node("velocity_dispersion", bd.Uniform(0.0, 50)))

# Constant velocity offset
model.add_node(bd.Node("v_systematic", bd.Uniform(-10.0, 10.0)))

# Rotation amplitude
model.add_node(bd.Node("A", bd.Uniform(0.0, 50.0)))

# Rotation angle
model.add_node(bd.Node("phi", bd.Uniform(0.0, 2.0 * np.pi)))

# p(data | parameters)
for i in range(0, data["N"]):

    #
    mean = "v_systematic + A*sin(theta{index} - phi)".format(index=i)
Пример #5
0
                      3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, 2, 2, 1,
                      1, 1, 1, 3, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1,
                      0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0,
                      0, 2, 1, 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1,
                      2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 0, 1, 4, 0,
                      0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])\
                                                       .astype("float64")
data["N"] = int(len(data["t"]))

model = bd.Model()
model.add_node(bd.Node("log_mu1", bd.Cauchy(0.0, 5.0)))
model.add_node(bd.Node("diff", bd.Cauchy(0.0, 1.0)))
model.add_node(bd.Node("log_mu2", bd.Delta("log_mu1 + diff")))
model.add_node(bd.Node("mu1", bd.Delta("exp(log_mu1)")))
model.add_node(bd.Node("mu2", bd.Delta("exp(log_mu2)")))
model.add_node(bd.Node("change_year", bd.Uniform(1851.0, 1962.0)))
model.add_node(bd.Node("L", bd.LogUniform(1E-2, 1E2)))

# Data nodes
for i in range(0, data["N"]):
    name = "y{i}".format(i=i)
    mean = "mu1 + (mu2 - mu1)/(1.0 + exp(-(t{i} - change_year)/L))"\
                                        .format(i=i)
    model.add_node(bd.Node(name, bd.Poisson(mean), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Compile the C++ code so it's ready to go
#import os
Пример #6
0
# Constant velocity offset
model.add_node(bd.Node("c_v_systematic", bd.T(0.0, 0.1, 1.0)))
model.add_node(
    bd.Node("v_systematic", bd.Delta("c_v_systematic*velocity_dispersion")))

# Rotation amplitude
model.add_node(bd.Node("t_A", bd.T(0.0, 2.0, 2.0)))
model.add_node(
    bd.Node(
        "A",
        bd.Delta("pow(10.0, 1.0-\
                                       std::abs(t_A))*velocity_dispersion")))

# Rotation angle
model.add_node(bd.Node("phi", bd.Uniform(0.0, 2.0 * np.pi)))

# p(data | parameters)
for i in range(0, data["N"]):

    #
    mean = "v_systematic + A*sin(theta{index} - phi)".format(index=i)

    # This will be a bit slow but it doesn't matter
    stdev = "sqrt(pow(sig_v{index}, 2) + pow(velocity_dispersion, 2))"\
                .format(index=i)

    name = "v{index}".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, stdev), observed=True))

# Create the C++ code
Пример #7
0
# Data
# Ntz original data:
# data = {"y": np.array([554, 701, 749, 868, 516, 573, 978, 399]),
#         "n": np.array([1183, 1510, 1597, 1924, 1178, 1324, 2173, 845]),
#         "N": 8}
# Kobe Bryant's whole field goal record from NBA records:
data = {"y": np.array([176, 391, 362, 554, 701, 749, 868, 516, 573, 978, 813, 775, 800, 716, 740]),
        "n": np.array([422, 913, 779, 1183, 1510, 1597, 1924, 1178, 1324, 2173, 1757, 1690, 1712, 1569, 1639]),
        "N": 15}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("p", bd.Uniform(0, 1)))

# Likelihood p(D|n,p) place holder.
for i in range(0, data["N"]):
    suc = "y{index}".format(index=i)
    atp = "n{index}".format(index=i)
    model.add_node(bd.Node(suc, bd.Binomial(atp, "p"), observed=True))


# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# take1      log(Z) = -39.2245367341 with Ntz original data
# take2      log(Z) = -39.223614467  with Ntz original data
# Analytical log(Z) = -39.2308
Пример #8
0
    "n":
    np.array([
        422, 913, 779, 1183, 1510, 1597, 1924, 1178, 1324, 2173, 1757, 1690,
        1712, 1569, 1639
    ]),
    "N":
    15
}

# Create the model
model = bd.Model()

# Prior p(m)
for i in range(0, data["N"]):
    ppi = "ppi{index}".format(index=i)
    model.add_node(bd.Node(ppi, bd.Uniform(0, 1)))

# Likelihood p(D|n,p) place holder.
for i in range(0, data["N"]):
    suc = "y{index}".format(index=i)
    atp = "n{index}".format(index=i)
    ppi = "ppi{index}".format(index=i)
    model.add_node(bd.Node(suc, bd.Binomial(atp, ppi), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# take1      log(Z) = -58.0551362482 with Ntz original data
# take2      log(Z) = -57.9893885883  with Ntz original data
# take3      log(Z) = -57.9826848935  with Ntz original data
Пример #9
0
        0.82019870, -0.90114273, 0.75564840, -0.64294152, -1.80484699,
        0.56199749, 0.30379627
    ]),
    "N":
    42
}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("a", bd.Normal(0, 1000)))
model.add_node(bd.Node("b", bd.Normal(0, 100)))
# Need to modify C++ for Half-Cauchy on sigma_1
#model.add_node(bd.Node("sig1", bd.Cauchy(0, 5)))
model.add_node(bd.Node("sig1", bd.Uniform(0, 1e8)))

# Likelihood p(data | parameters)
for i in range(0, data["N"]):
    name = "y{index}".format(index=i)
    mean = "b*x{index} + a".format(index=i)
    model.add_node(bd.Node(name, bd.Normal(mean, "sig1"), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                 = -41.6702379243 for Model 1
# Run result: log(Z)                 = -33.7010828147 for Model 2

# Run result: log(Z)                 =  -44.6145721304 for Model 1 wide prior
Пример #10
0
import math
import numpy as np
import dnest4.builder as bd

# Placeholder. data not needed
data = {"D": np.array([1.0])}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("a", bd.Uniform(0, 10 * math.pi)))
model.add_node(bd.Node("b", bd.Uniform(0, 10 * math.pi)))

# Likelihood p(D|mu) place holder. Will have to enter egg-box likelihood by hand.
name = "D{index}".format(index=0)
model.add_node(bd.Node(name, bd.Normal("mu", 1), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                 = 235.85129744
# Feroz numerical integration result = 235.856
# Feroz Multinest result             = 235.835 and 235.839
# Mathematica result:                = 231.944
Пример #11
0
import math
import numpy as np
import dnest4.builder as bd

# Data, shell centers:
data = {"cx": np.array([3.5,-3.5]),
        "cy": np.array([0.0,0.0])}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("m1", bd.Uniform(-6.0, 6.0)))
model.add_node(bd.Node("m2", bd.Uniform(-6.0, 6.0)))

# Likelihood p(D|mu) place holder. Will have to enter shell likelihood by hand.
name = "D{index}".format(index=0)
model.add_node(bd.Node(name, bd.Normal("mu", 1), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                  = -1.72298793735, -1.74864668478, -1.75171455818
# Feroz analytical integration result = −1.75
# Feroz Multinest result              = -1.61, -1.71, -1.72, -1.69 depending on control params
# Mathematica result:                 = 
Пример #12
0
import math
import numpy as np
import dnest4.builder as bd

# Placeholder. data not needed
data = {"D": np.array([10.0]), "u": np.array([0.01]), "v": np.array([0.1])}

# Create the model
model = bd.Model()

# Prior p(m)
model.add_node(bd.Node("th", bd.Uniform(-0.5, 0.5)))

# Likelihood p(D|mu) place holder. Will have to enter egg-box likelihood by hand.
name = "D{index}".format(index=0)
model.add_node(bd.Node(name, bd.Normal("mu", 1), observed=True))

# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)

# Run result: log(Z)                 = 0.684022078176
# Mathematica result:                = log(2) = 0.693147