Ejemplo n.º 1
0
def test_gibbs_chain_get_replace_last():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)

    replacements = [22.22, 44.44]
    chain.replace_last([22.22, 44.44])
    assert all(chain.get_last() == replacements)
Ejemplo n.º 2
0
def test_gibbs_chain_take_step():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)
    first_n = chain.n

    chain.take_step()

    assert chain.n == first_n + 1
    assert len(chain.params[0].samples) == chain.n
    assert len(chain.probs) == chain.n
Ejemplo n.º 3
0
def test_gibbs_chain_get_probabilities():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)
    steps = 10
    chain.advance(steps)

    probabilities = chain.get_probabilities()
    assert len(probabilities) == steps
    assert probabilities[-1] > probabilities[0]

    burn = 2
    probabilities = chain.get_probabilities(burn=burn)
    assert len(probabilities) == expected_len(steps, start=burn)
    assert probabilities[-1] > probabilities[0]

    thin = 2
    probabilities = chain.get_probabilities(thin=thin)
    assert len(probabilities) == expected_len(steps, step=thin)
    assert probabilities[-1] > probabilities[0]

    probabilities = chain.get_probabilities(burn=burn, thin=thin)
    assert len(probabilities) == expected_len(steps, start=burn, step=thin)
    assert probabilities[-1] > probabilities[0]
Ejemplo n.º 4
0
def test_gibbs_chain_run_for_minute(line_posterior):
    expected_delta = datetime.timedelta(minutes=1)

    auto_tick_start = datetime.datetime.now()
    auto_tick = (datetime.datetime.now() - auto_tick_start).seconds
    start_time = datetime.datetime.now()
    chain = GibbsChain(posterior=line_posterior, start=[0.5, 0.1])
    chain.run_for(**timedelta_to_days_hours_minutes(expected_delta))
    end_time = datetime.datetime.now()

    # Extra 15s because two calls to `now()` in this function, plus initial
    # call to `time()` in `run_for`
    extra_delta = datetime.timedelta(seconds=4 * auto_tick)
    assert end_time - start_time == expected_delta + extra_delta
Ejemplo n.º 5
0
def test_gibbs_chain_advance():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)
    first_n = chain.n

    steps = 104
    chain.advance(steps)

    assert chain.n == first_n + steps
    assert len(chain.params[0].samples) == chain.n
    assert len(chain.probs) == chain.n
Ejemplo n.º 6
0
def test_gibbs_chain_remove_boundary(line_posterior):
    chain = GibbsChain(posterior=line_posterior, start=[0.5, 0.1])

    left, right = (0.45, 0.4500000000001)
    chain.set_boundaries(0, [left, right])
    chain.set_boundaries(0, None, remove=True)
    chain.advance(100)

    gradient = array(chain.get_parameter(0))

    # Some values should be outside the original boundary
    assert not all(gradient >= left) or not all(gradient <= right)
Ejemplo n.º 7
0
def test_gibbs_chain_run_for_day_hour_minute(line_posterior):
    expected_delta = datetime.timedelta(days=1, hours=2, minutes=3)

    auto_tick_start = datetime.datetime.now()
    auto_tick = (datetime.datetime.now() - auto_tick_start).seconds
    start_time = datetime.datetime.now()
    chain = GibbsChain(posterior=line_posterior, start=[0.5, 0.1])
    chain.run_for(**timedelta_to_days_hours_minutes(expected_delta))
    end_time = datetime.datetime.now()

    # Extra 15s because two calls to `now()` in this function, plus initial
    # call to `time()` in `run_for`
    extra_delta = datetime.timedelta(seconds=5 * auto_tick)
    assert end_time - start_time >= expected_delta
    assert end_time - start_time <= expected_delta + extra_delta
    # Probably get less due to multiple calls to `time()` per step
    assert chain.n <= expected_delta.total_seconds() // auto_tick
Ejemplo n.º 8
0
def test_gibbs_chain_non_negative(line_posterior):
    chain = GibbsChain(posterior=line_posterior, start=[0.5, 0.1])

    chain.set_non_negative(1)
    chain.advance(100)

    offset = array(chain.get_parameter(1))

    assert all(offset >= 0)
Ejemplo n.º 9
0
def test_gibbs_chain_set_boundary(line_posterior):
    chain = GibbsChain(posterior=line_posterior, start=[0.5, 0.1])

    left, right = (0.45, 0.55)
    chain.set_boundaries(0, [left, right])
    chain.advance(100)

    gradient = array(chain.get_parameter(0))

    assert all(gradient >= left)
    assert all(gradient <= right)
Ejemplo n.º 10
0
def test_gibbs_chain_restore(tmp_path):
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)
    steps = 50
    chain.advance(steps)

    filename = tmp_path / "restore_file.npz"
    chain.save(filename)

    new_chain = GibbsChain.load(filename)

    assert new_chain.n == chain.n
    assert new_chain.probs == chain.probs
    assert new_chain.get_sample() == chain.get_sample()
Ejemplo n.º 11
0
def test_gibbs_chain_burn_in():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)
    steps = 50
    chain.advance(steps)

    burn = chain.estimate_burn_in()

    assert 0 < burn <= steps

    chain.autoselect_burn()
    assert chain.burn == burn
Ejemplo n.º 12
0
def test_gibbs_chain_thin():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)
    steps = 50
    chain.advance(steps)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        chain.autoselect_thin()

    assert 0 < chain.thin <= steps
Ejemplo n.º 13
0
def test_gibbs_chain_get_parameter():
    start_location = array([2.0, -4.0])
    width_guesses = array([5.0, 0.05])

    chain = GibbsChain(posterior=rosenbrock,
                       start=start_location,
                       widths=width_guesses)

    steps = 5
    chain.advance(steps)

    samples = chain.get_parameter(0)
    assert len(samples) == expected_len(steps)

    burn = 2
    samples = chain.get_parameter(0, burn=burn)
    assert len(samples) == expected_len(steps, burn)

    thin = 2
    samples = chain.get_parameter(1, thin=thin)
    assert len(samples) == expected_len(steps, step=thin)

    samples = chain.get_parameter(1, burn=burn, thin=thin)
    assert len(samples) == expected_len(steps, start=burn, step=thin)
N = 25
x = linspace(-2, 5, N)
m = 0.5; c = 0.05; sigma = 0.3
y = m*x + c + normal(size=N)*sigma

# plot the synthetic data and underlying line
plt.plot(x, m*x + c)
plt.plot(x, y, '.')
plt.grid()
plt.show()

# create an instance of the posterior class
posterior = LinePosterior(x = x, y = y, err = ones(N)*sigma)

# pass the posterior to the MCMC sampler
chain = GibbsChain(posterior = posterior, start = [0.5, 0.1])

# Now suppose we know the offset parameter must be non-negative.
# This constraint can be imposed by passing the index of the
# parameter to the set_non_negative method as follows:
chain.set_non_negative(1)

# For the purposes of this demo, let's assume we also know that
# the gradient must exist in the range [0.45, 0.55].
# The gradient can be constrained to values between chosen boundaries
# by passing the parameter index and the boundary values to the
# set_boundaries method as follows:
chain.set_boundaries(0, [0.45, 0.55])

# Advance the chain
chain.advance(50000)
Ejemplo n.º 15
0
    b = 15  # correlation strength parameter
    v = 3  # variance of the gaussian term
    return -X2 - b * (Y - X2)**2 - 0.5 * (X2 + Y**2) / v


# The maximum of the rosenbrock function is [0,0] - here we intentionally
# start the chain far from the mode.
start_location = array([2., -4.])

# Here we make our initial guess for the proposal widths intentionally
# poor, to demonstrate that gibbs sampling allows each proposal width
# to be adjusted individually toward an optimal value.
# width_guesses = array([5.,0.05])

# create the chain object
chain = GibbsChain(posterior=rosenbrock,
                   start=start_location)  # widths = width_guesses)

# advance the chain 150k steps
chain.advance(150000)

# the samples for the n'th parameter can be accessed through the
# get_parameter(n) method. We could use this to plot the path of
# the chain through the 2D parameter space:

p = chain.get_probabilities()  # color the points by their probability value
plt.scatter(chain.get_parameter(0),
            chain.get_parameter(1),
            c=exp(p - max(p)),
            marker='.')
plt.xlabel('parameter 1')
plt.ylabel('parameter 2')
Ejemplo n.º 16
0
For an N-parameter problem, PcaChain produces a new sample by making N
sequential 1D Metropolis-Hastings steps in the direction of each of the
N eigenvectors of the NxN covariance matrix.

As an initial guess the covariance matrix is taken to be diagonal, which
results in standard gibbs sampling for the first samples in the chain.
Subsequently, the covariance matrix periodically updated with an estimate
derived from the sample itself, and the eigenvectors are re-calculated.
"""

# create our posterior with two highly-correlated parameters
posterior = CorrelatedLinePosterior()

# create a PcaChain, and also a GibbsChain for comparison
pca = PcaChain(posterior=posterior, start=[-1, 1, -1])
gibbs = GibbsChain(posterior=posterior, start=[-1, 1, -1])

# advance both chains for the same amount of samples
pca.advance(50000)
gibbs.advance(50000)

# get an estimate of the marginal distribution of one of the correlated parameters
pca_pdf = pca.get_marginal(2, burn=5000)
gibbs_pdf = gibbs.get_marginal(2, burn=5000)

# over-plot the marginal estimates to compare the performance
marginal_axis = linspace(-4, 2, 500)
plt.plot(marginal_axis,
         pca_pdf(marginal_axis),
         lw=2,
         label='PcaChain estimate')
Ejemplo n.º 17
0
def test_gibbs_chain_remove_non_negative(line_posterior):
    chain = GibbsChain(posterior=line_posterior, start=[0.5, 0.1])

    chain.set_non_negative(1, True)
    chain.set_non_negative(1, False)
    assert chain.params[1].non_negative is False
from numpy import array, exp
import matplotlib.pyplot as plt


def rosenbrock(t):
    x, y = t
    x2 = x**2
    b = 15.  # correlation strength parameter
    v = 3.  # variance of the gaussian term
    return -x2 - b * (y - x2)**2 - 0.5 * (x2 + y**2) / v


# create the chain object
from inference.mcmc import GibbsChain
gibbs = GibbsChain(posterior=rosenbrock, start=array([2., -4.]))
gibbs.advance(150000)
gibbs.burn = 10000
gibbs.thin = 70

p = gibbs.get_probabilities()  # color the points by their probability value
fig = plt.figure(figsize=(5, 4))
ax1 = fig.add_subplot(111)
ax1.scatter(gibbs.get_parameter(0),
            gibbs.get_parameter(1),
            c=exp(p - max(p)),
            marker='.')
ax1.set_ylim([None, 2.8])
ax1.set_xlim([-1.8, 1.8])
ax1.set_xticks([])
ax1.set_yticks([])
# ax1.set_title('Gibbs sampling')
    return -0.5 * z**2 + 4 * log(sin(phi * 2.)**2)


# required for multi-process code when running on windows
if __name__ == "__main__":

    from inference.mcmc import GibbsChain, ParallelTempering

    # define a set of temperature levels
    N_levels = 6
    temps = [10**(2.5 * k / (N_levels - 1.)) for k in range(N_levels)]

    # create a set of chains - one with each temperature
    chains = [
        GibbsChain(posterior=multimodal_posterior,
                   start=[0.5, 0.5],
                   temperature=T) for T in temps
    ]

    # When an instance of ParallelTempering is created, a dedicated process for each chain is spawned.
    # These separate processes will automatically make use of the available cpu cores, such that the
    # computations to advance the separate chains are performed in parallel.
    PT = ParallelTempering(chains=chains)

    # These processes wait for instructions which can be sent using the methods of the
    # ParallelTempering object:
    PT.run_for(minutes=0.5)

    # To recover a copy of the chains held by the processes
    # we can use the return_chains method:
    chains = PT.return_chains()
Ejemplo n.º 20
0
In this example a modified version of the Rosenbrock function (shown
above) is used as the log-posterior.
"""

# The maximum of the rosenbrock function is [0,0] - here we intentionally
# start the chain far from the mode.
start_location = array([2., -4.])

# Here we make our initial guess for the proposal widths intentionally
# poor, to demonstrate that gibbs sampling allows each proposal width
# to be adjusted individually toward an optimal value.
width_guesses = array([5., 0.05])

# create the chain object
chain = GibbsChain(posterior=rosenbrock,
                   start=start_location,
                   widths=width_guesses)

# advance the chain 150k steps
chain.advance(150000)

# the samples for the n'th parameter can be accessed through the
# get_parameter(n) method. We could use this to plot the path of
# the chain through the 2D parameter space:

p = chain.get_probabilities()  # color the points by their probability value
plt.scatter(chain.get_parameter(0),
            chain.get_parameter(1),
            c=exp(p - max(p)),
            marker='.')
Ejemplo n.º 21
0
    X2 = X**2
    b = 15  # correlation strength parameter
    v = 3  # variance of the gaussian term
    return -X2 - b * (Y - X2)**2 - 0.5 * (X2 + Y**2) / v


# required for multi-process code when running on windows
if __name__ == "__main__":
    """
    The ChainPool class provides a convenient means to store multiple
    chain objects, and simultaneously advance those chains using multiple
    python processes.
    """

    # for example, here we create a singular chain object
    chain = GibbsChain(posterior=rosenbrock, start=[0., 0.])
    # then advance it for some number of samples, and note the run-time
    t1 = time()
    chain.advance(150000)
    t2 = time()
    print('time elapsed, single chain:', t2 - t1)

    # We may want to run a number of chains in parallel - for example multiple chains
    # over different posteriors, or on a single posterior with different starting locations.

    # Here we create two chains with different starting points:
    chain_1 = GibbsChain(posterior=rosenbrock, start=[0., 0.])
    chain_2 = GibbsChain(posterior=rosenbrock, start=[0., 0.])

    # now we pass those chains to ChainPool in a list
    cpool = ChainPool([chain_1, chain_2])