# output must be a 1d list or array in order to create a PythonFunction
    return res.reshape(-1)


nor0posterior = ot.PythonFunction(2 + N, 2, nor0post)
nor1posterior = ot.PythonFunction(2 + N, 2, nor1post)
zposterior = ot.PythonFunction(2 + N, N, zpost)

# We can now construct the Gibbs algorithm
initialState = [0.0] * (N + 2)
sampler0 = ot.RandomVectorMetropolisHastings(ot.RandomVector(ot.Normal()),
                                             initialState, [0], nor0posterior)
sampler1 = ot.RandomVectorMetropolisHastings(ot.RandomVector(ot.Normal()),
                                             initialState, [1], nor1posterior)
big_bernoulli = ot.ComposedDistribution([ot.Bernoulli()] * N)
sampler2 = ot.RandomVectorMetropolisHastings(ot.RandomVector(big_bernoulli),
                                             initialState, range(2, N + 2),
                                             zposterior)
gibbs = ot.Gibbs([sampler0, sampler1, sampler2])

# Run the Gibbs algorithm
s = gibbs.getSample(10000)

# Extract the relevant marginals: the first (:math:`mu_0`) and the second (:math:`\mu_1`).
posterior_sample = s[:, 0:2]
mean = posterior_sample.computeMean()
stddev = posterior_sample.computeStandardDeviation()
print(mean, stddev)
ott.assert_almost_equal(mean, [-0.0788226, 2.80322])
ott.assert_almost_equal(stddev, [0.0306272, 0.0591087])
# start from te mean x0=(0.,0.,0.)
print('x0=', mu0)

# conditional distribution y~N(z, 1.0)
conditional = ot.Normal()
print('y~', conditional)

# create a gibbs sampler
mh_coll = [
    ot.RandomWalkMetropolisHastings(prior, mu0, instrumental, [i])
    for i in range(chainDim)
]
for mh in mh_coll:
    mh.setLikelihood(conditional, y_obs, linkFunction, p)
sampler = ot.Gibbs(mh_coll)
sampler.setThinning(4)
sampler.setBurnIn(2000)

# get a realization
realization = sampler.getRealization()
print('y1=', realization)

# try to generate a sample
sampleSize = 1000
sample = sampler.getSample(sampleSize)

x_mu = sample.computeMean()
x_sigma = sample.computeStandardDeviation()

# compute covariance
Beispiel #3
0
    std_prior = ot.Dirac(2.0)  # standard dev is known
    prior = ot.ComposedDistribution([mean_prior, std_prior])
    # choose the initial state within the prior
    initialState = prior.getRealization()

    # conditional distribution
    conditional = ot.Normal()

    # create a Gibbs sampler
    mean_sampler = ot.RandomWalkMetropolisHastings(prior, initialState,
                                                   mean_instrumental, [0])
    mean_sampler.setLikelihood(conditional, data)
    std_sampler = ot.RandomWalkMetropolisHastings(prior, initialState,
                                                  std_instrumental, [1])
    std_sampler.setLikelihood(conditional, data)
    sampler = ot.Gibbs([mean_sampler, std_sampler])
    sampler.setThinning(2)
    sampler.setBurnIn(500)
    realization = sampler.getRealization()

    sigmay = ot.ConditionalDistribution(ot.Normal(),
                                        prior).getStandardDeviation()[0]
    w = size * sigma0**2. / (size * sigma0**2. + sigmay**2.0)

    print("prior variance= %.12g" % (sigma0**2.))
    print("  realization=", realization)

    print("  w= %.12g" % w)

    # the posterior for mu is analytical
    mu_exp = (w * data.computeMean()[0] + (1. - w) * mu0)