コード例 #1
0
def mv_simple():
    mu = np.array([-.1, .5, 1.1])
    p = np.array([
        [2., 0, 0],
        [.05, .1, 0],
        [1., -0.05, 5.5]])
    tau = np.dot(p, p.T)
    with pm.Model() as model:
        pm.MvNormal('x', pm.constant(mu), pm.constant(tau), shape=3, testval=np.array([.1, 1., .8]))
    H = tau
    C = np.linalg.inv(H)
    return model.test_point, model, (mu, C)
コード例 #2
0
def mv_simple():
    mu = np.array([-.1, .5, 1.1])
    p = np.array([
        [2., 0, 0],
        [.05, .1, 0],
        [1., -0.05, 5.5]])

    tau = np.dot(p, p.T)

    with pm.Model() as model:
        x = pm.MvNormal('x', pm.constant(mu), pm.constant(
            tau), shape=3, testval=np.array([.1, 1., .8]))

    H = tau
    C = np.linalg.inv(H)

    return model.test_point, model, (mu, C)
コード例 #3
0
def mv_simple_discrete():
    d = 2
    n = 5
    p = np.array([.15, .85])
    with pm.Model() as model:
        pm.Multinomial('x', n, pm.constant(p), shape=d, testval=np.array([1, 4]))
        mu = n * p
        # covariance matrix
        C = np.zeros((d, d))
        for (i, j) in product(range(d), range(d)):
            if i == j:
                C[i, i] = n * p[i] * (1 - p[i])
            else:
                C[i, j] = -n * p[i] * p[j]

    return model.test_point, model, (mu, C)
コード例 #4
0
def mv_simple_discrete():
    d= 2
    n = 5
    p = np.array([.15,.85])

    with pm.Model() as model:
        x = pm.Multinomial('x', n, pm.constant(p), shape=d, testval=np.array([1,4]))
        mu = n * p

        #covariance matrix
        C = np.zeros((d,d))
        for (i, j) in product(range(d), range(d)):
            if i == j:
                C[i,i] = n * p[i]*(1-p[i])
            else:
                C[i,j] = -n*p[i]*p[j]

    return model.test_point, model, (mu, C)
コード例 #5
0
ndata = 100
spread = 3
centers = np.array([-spread, 0, spread])
p = stick_breaking(alpha=10.0, k=30)

# simulate data from mixture distribution
v = np.random.randint(0, k, ndata)
data = centers[v] + np.random.randn(ndata)

# plt.hist(data)
# plt.show()

model = pm.Model()
with model:
    # cluster sizes
    a = pm.constant(np.array([1., 1., 1.]))
    # p = pm.Dirichlet('p', a=a, shape=k)
    # ensure all clusters have some points
    # p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))

    # cluster centers
    means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)
    # break symmetry
    # order_means_potential = pm.Potential('order_means_potential',
    #                                      tt.switch(means[1]-means[0] < 0, -np.inf, 0)
    #                                      + tt.switch(means[2]-means[1] < 0, -np.inf, 0))

    # measurement error
    sd = pm.Uniform('sd', lower=0, upper=20)

    # latent cluster of each observation
コード例 #6
0
    # priors
    mu = pm.Normal('mu', mu=0, tau=1/100**2, shape=(2,1))
    lmbda = pm.Gamma('lambda', alpha=0.001, beta=0.001, shape=(2,1))
    r = pm.Uniform('r', lower=-1, upper=1)
    sigma = pm.Deterministic('sigma', tt.sqrt(1/lmbda))

    # Reparameterization
    #FIXME: How to create (and then inverse) a simple 2x2 matrix???
    T = tt.stacklists([[1/lmbda[0]         , r*sigma[0]*sigma[1]],
                       [r*sigma[0]*sigma[1],          1/lmbda[1]]])
#    T = tt.stack([1/lmbda[0]         , r*sigma[0]*sigma[1],
#                  r*sigma[0]*sigma[1],          1/lmbda[1]])
#    TI = tt.invert(T)
#    TI = tt.matrix(T)
    # TODO? Side-step inversion by doing it myself, i.e. 1/det(A)*reshuffle(A)?
    testtau = pm.constant(np.eye(2)) # works...
    pm.det(testtau) # works...

    x = pm.MvNormal('x', mu=0, tau=testtau)

#  # Reparameterization
#  sigma[1] <- 1/sqrt(lambda[1])
#  sigma[2] <- 1/sqrt(lambda[2])
#  T[1,1] <- 1/lambda[1]
#  T[1,2] <- r*sigma[1]*sigma[2]
#  T[2,1] <- r*sigma[1]*sigma[2]
#  T[2,2] <- 1/lambda[2]
#  TI[1:2,1:2] <- inverse(T[1:2,1:2])

    # data come from a Gaussian
#    x = pm.Normal('x', mu=mu, sd=sigma, observed=x)
コード例 #7
0
ファイル: ch3-6_Survey.py プロジェクト: JoKeyser/BCMinPyMC3
#model{
#  # Observed Returns
#  for (i in 1:m){
#     k[i] ~ dbin(theta,n)
#  }   
#  # Priors on Rate Theta and Number n
#  theta ~ dbeta(1,1)
#  n ~ dcat(p[])
#  for (i in 1:nmax){
#     p[i] <- 1/nmax
#  }
#}   
with model:
    # Priors on rate theta and number n
    theta = pm.Beta('theta', alpha=1, beta=1)
    p = pm.constant(np.ones(nmax)/nmax)
    n = pm.Categorical('n', p=p, shape=1) #FIXME: How to use this properly?
    # Observed Returns
#    k = pm.Binomial('k', p=theta, n=n, observed=k, shape=m)
    # instantiate samplers
    values_np = np.ones(nmax)/nmax #FIXME: How to use this properly?
    step1 = pm.Metropolis([theta])
    step2 = pm.ElemwiseCategoricalStep(var=n, values=values_np)
    stepFunc = [step1, step2]
    # draw posterior samples (in 4 parallel running chains), TODO: very slow!?
    Nsample = 100
    Nchains = 4
    traces = pm.sample(Nsample, step=stepFunc, njobs=Nchains)

plotVars = ['theta','n']
axs = pm.traceplot(traces, vars=plotVars, combined=False)
コード例 #8
0
spread = 3
centers = np.array([-spread, 0, spread])
p=stick_breaking(alpha=10.0,k=30)


# simulate data from mixture distribution
v = np.random.randint(0, k, ndata)
data = centers[v] + np.random.randn(ndata)

# plt.hist(data)
# plt.show()

model = pm.Model()
with model:
    # cluster sizes
    a = pm.constant(np.array([1., 1., 1.]))
    # p = pm.Dirichlet('p', a=a, shape=k)
    # ensure all clusters have some points
    # p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))


    # cluster centers
    means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)
    # break symmetry
    # order_means_potential = pm.Potential('order_means_potential',
    #                                      tt.switch(means[1]-means[0] < 0, -np.inf, 0)
    #                                      + tt.switch(means[2]-means[1] < 0, -np.inf, 0))

    # measurement error
    sd = pm.Uniform('sd', lower=0, upper=20)
コード例 #9
0
ファイル: ch3-6_Survey.py プロジェクト: pgnepal/BCMinPyMC3
#model{
#  # Observed Returns
#  for (i in 1:m){
#     k[i] ~ dbin(theta,n)
#  }
#  # Priors on Rate Theta and Number n
#  theta ~ dbeta(1,1)
#  n ~ dcat(p[])
#  for (i in 1:nmax){
#     p[i] <- 1/nmax
#  }
#}
with model:
    # Priors on rate theta and number n
    theta = pm.Beta('theta', alpha=1, beta=1)
    p = pm.constant(np.ones(nmax) / nmax)
    n = pm.Categorical('n', p=p, shape=1)  #FIXME: How to use this properly?
    # Observed Returns
    #    k = pm.Binomial('k', p=theta, n=n, observed=k, shape=m)
    # instantiate samplers
    values_np = np.ones(nmax) / nmax  #FIXME: How to use this properly?
    step1 = pm.Metropolis([theta])
    step2 = pm.ElemwiseCategoricalStep(var=n, values=values_np)
    stepFunc = [step1, step2]
    # draw posterior samples (in 4 parallel running chains), TODO: very slow!?
    Nsample = 100
    Nchains = 4
    traces = pm.sample(Nsample, step=stepFunc, njobs=Nchains)

plotVars = ['theta', 'n']
axs = pm.traceplot(traces, vars=plotVars, combined=False)