Exemple #1
0
def test_conditional():
    logp = poisson_logp
    start = {'lam1': 1., 'lam2': 2.}
    metro = smp.Metropolis(logp, start, condition=['lam2'])
    state = metro._conditional_step()
    assert (len(state) == 2)
    assert (state['lam2'] == 2.)
Exemple #2
0
def test_sampler_no_args_logp():
    def logp():
        return x

    start = {'x': None}
    with pytest.raises(ValueError):
        metro = smp.Metropolis(logp, start)
Exemple #3
0
def test_sample_chain():
    start = {'lam1': 1., 'lam2': 1.}
    step1 = smp.Metropolis(poisson_logp, start, condition=['lam2'])
    step2 = smp.NUTS(poisson_logp, start, condition=['lam1'])

    chain = smp.Chain([step1, step2], start)
    trace = chain.sample(n_samples)
    assert (trace.shape == (n_samples, ))
Exemple #4
0
def test_parallel_2D():

    start = {'lam1': 1., 'lam2': 1.}
    metro = smp.Metropolis(poisson_logp, start)
    nuts = smp.NUTS(poisson_logp, start)

    metro_chains = metro.sample(n_samples, n_chains=2)
    nuts_chains = nuts.sample(n_samples, n_chains=2)

    assert (len(metro_chains) == 2)
    assert (len(nuts_chains) == 2)
Exemple #5
0
def test_conditional_chain():

    logp = poisson_logp
    start = {'lam1': 1., 'lam2': 2.}
    metro = smp.Metropolis(logp, start, condition=['lam2'])
    nuts = smp.NUTS(logp, start, condition=['lam1'])

    state = metro._conditional_step()
    assert (state['lam2'] == 2.)
    nuts.state.update(state)
    state = nuts._conditional_step()
    assert (len(state) == 2)
Exemple #6
0
def test_parallel_lin_model():

    logp = linear_model_logp
    start = {'b': np.zeros(5), 'sig': 1.}
    metro = smp.Metropolis(logp, start)
    nuts = smp.NUTS(logp, start)

    metro_chains = metro.sample(n_samples, n_chains=2)
    nuts_chains = nuts.sample(n_samples, n_chains=2)

    assert (len(metro_chains) == 2)
    assert (len(nuts_chains) == 2)
# correlated gaussian
def logp(x, y):
    icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]]))
    d = np.array([x, y])
    return -.5 * np.dot(np.dot(d, icov), d)


#logp_xy = lambda(th): logp(th[0], th[1])

start = {'x': 1., 'y': 1.}
# compare the performance of NUTS and Metropolis by effective sample size
nuts = smp.NUTS(logp, start)
nuts_trace = nuts.sample(1000)

met = smp.Metropolis(logp, start)
met_trace = met.sample(1000)

# compute effective sample size based on autocorrelation
nuts_eff = diagnostics.compute_n_eff_acf(nuts_trace.x)
met_eff = diagnostics.compute_n_eff_acf(met_trace.x)
print("NUTS effective sample size: {:0.2f}".format(nuts_eff))
print("MH   effective sample size: {:0.2f}".format(met_eff))

# graphically compare samples
fig, axarr = plt.subplots(1, 2)
axarr[0].scatter(nuts_trace.x, nuts_trace.y)
axarr[0].set_title("NUTS samples")
axarr[1].scatter(met_trace.x, met_trace.y)
axarr[1].set_title("MH samples")
plt.show()
Exemple #8
0
def test_metropolis_linear_model():
    logp = linear_model_logp
    start = {'b': np.zeros(5), 'sig': 1.}
    metro = smp.Metropolis(logp, start)
    trace = metro.sample(n_samples)
    assert (trace.shape == (n_samples, ))
Exemple #9
0
def test_metropolis_two_vars_start():
    logp = poisson_logp
    start = {'lam1': 1., 'lam2': 1.}
    metro = smp.Metropolis(logp, start)
    trace = metro.sample(n_samples)
    assert (trace.shape == (n_samples, ))
Exemple #10
0
def test_sampler_num_logp():
    logp = 1.
    start = {'x': None}
    with pytest.raises(TypeError):
        metro = smp.Metropolis(logp, start)
Exemple #11
0
def test_metropolis():
    logp = normal_1D_logp
    start = {'x': 1.}
    metro = smp.Metropolis(logp, start)
    trace = metro.sample(n_samples)
    assert (trace.shape == (n_samples, ))
Exemple #12
0
# correlated gaussian log likelihood
def logp(x, y):
    icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]]))
    d = np.array([x, y])
    return -.5 * np.dot(np.dot(d, icov), d)


logp_xy = lambda (th): logp(th[0], th[1])

# compare slice samplers, metropolis hastings, and the two variable
# slice sampler
ssamp = smp.Slice(logp, start={'x': 4., 'y': 4.})
slice_trace = ssamp.sample(1000)

met = smp.Metropolis(logp, start={'x': 4., 'y': 4.})
met_trace = met.sample(1000)

bslice = smp.Slice(logp_xy, start={'th': np.array([4., 4.])})
btrace = bslice.sample(1000)

# compute effective sample size based on autocorrelation
slice_eff = diagnostics.compute_n_eff_acf(slice_trace.x)
met_eff = diagnostics.compute_n_eff_acf(met_trace.x)
b_eff = diagnostics.compute_n_eff_acf(btrace.th[:, 0])
print "Slice         effective sample size: %2.2f" % slice_eff
print "MH            effective sample size: %2.2f" % met_eff
print "two var slice effective sample size: %2.2f" % b_eff

print " ----- "
print "Slice sampler evals per sample: ", ssamp.evals_per_sample