コード例 #1
0
def test_NUTS_autograd():
    logp = normal_1D_logp
    start = {'x': 1.}
    if np_source == 'autograd.numpy':
        nuts = smp.NUTS(logp, start)
        trace = nuts.sample(n_samples)
        assert (trace.shape == (n_samples, ))
    elif np_source == 'numpy':
        with pytest.raises(AutogradError):
            nuts = smp.NUTS(logp, start)
コード例 #2
0
def test_logp_with_grad():
    logp = poisson_with_grad
    start = {'lam1': 1., 'lam2': 1.}
    nuts = smp.NUTS(logp, start, grad_logp=True)
    chain = nuts.sample(n_samples)

    assert (len(chain) == n_samples)
コード例 #3
0
def test_sample_chain():
    start = {'lam1': 1., 'lam2': 1.}
    step1 = smp.Metropolis(poisson_logp, start, condition=['lam2'])
    step2 = smp.NUTS(poisson_logp, start, condition=['lam1'])

    chain = smp.Chain([step1, step2], start)
    trace = chain.sample(n_samples)
    assert (trace.shape == (n_samples, ))
コード例 #4
0
def test_parallel_2D():

    start = {'lam1': 1., 'lam2': 1.}
    metro = smp.Metropolis(poisson_logp, start)
    nuts = smp.NUTS(poisson_logp, start)

    metro_chains = metro.sample(n_samples, n_chains=2)
    nuts_chains = nuts.sample(n_samples, n_chains=2)

    assert (len(metro_chains) == 2)
    assert (len(nuts_chains) == 2)
コード例 #5
0
def test_conditional_chain():

    logp = poisson_logp
    start = {'lam1': 1., 'lam2': 2.}
    metro = smp.Metropolis(logp, start, condition=['lam2'])
    nuts = smp.NUTS(logp, start, condition=['lam1'])

    state = metro._conditional_step()
    assert (state['lam2'] == 2.)
    nuts.state.update(state)
    state = nuts._conditional_step()
    assert (len(state) == 2)
コード例 #6
0
def test_parallel_lin_model():

    logp = linear_model_logp
    start = {'b': np.zeros(5), 'sig': 1.}
    metro = smp.Metropolis(logp, start)
    nuts = smp.NUTS(logp, start)

    metro_chains = metro.sample(n_samples, n_chains=2)
    nuts_chains = nuts.sample(n_samples, n_chains=2)

    assert (len(metro_chains) == 2)
    assert (len(nuts_chains) == 2)
コード例 #7
0
ファイル: mlm_main.py プロジェクト: OlegArenz/vboost
    # save output here
    npvi_outfile = os.path.join(args.output, "npvi_%d-comp.npz" % args.ncomp)
    np.savez(npvi_outfile, theta, mu, s2)

#########################################
# MCMC code --- save posterior samples  #
#########################################

if args.mcmc:

    import sampyl
    if args.model == "baseball":
        nuts = sampyl.NUTS(baseball.lnp,
                           start={
                               'logit_phi': np.random.randn(1),
                               'log_kappa': np.random.randn(1),
                               'logit_theta': np.random.randn(D - 2)
                           })

        # keep track of number of LL calls
        cum_ll_evals = np.zeros(args.mcmc_nsamps, dtype=np.int)

        def callback(i):
            cum_ll_evals[i] = lnpdf.called
            if i % 500 == 0:
                print "total lnpdf calls", lnpdf.called

        lnpdf.called = 0
        chain = nuts.sample(args.mcmc_nsamps, burn=0, callback=callback)
        lnpdf.called = 0
        # compute log like of each sample
コード例 #8
0
import matplotlib.pyplot as plt
import seaborn as sns


# correlated gaussian
def logp(x, y):
    icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]]))
    d = np.array([x, y])
    return -.5 * np.dot(np.dot(d, icov), d)


#logp_xy = lambda(th): logp(th[0], th[1])

start = {'x': 1., 'y': 1.}
# compare the performance of NUTS and Metropolis by effective sample size
nuts = smp.NUTS(logp, start)
nuts_trace = nuts.sample(1000)

met = smp.Metropolis(logp, start)
met_trace = met.sample(1000)

# compute effective sample size based on autocorrelation
nuts_eff = diagnostics.compute_n_eff_acf(nuts_trace.x)
met_eff = diagnostics.compute_n_eff_acf(met_trace.x)
print("NUTS effective sample size: {:0.2f}".format(nuts_eff))
print("MH   effective sample size: {:0.2f}".format(met_eff))

# graphically compare samples
fig, axarr = plt.subplots(1, 2)
axarr[0].scatter(nuts_trace.x, nuts_trace.y)
axarr[0].set_title("NUTS samples")
コード例 #9
0
def test_nuts_linear_model():
    logp = linear_model_logp
    start = {'b': np.zeros(5), 'sig': 1.}
    nuts = smp.NUTS(logp, start)
    trace = nuts.sample(n_samples)
    assert (trace.shape == (n_samples, ))
コード例 #10
0
def test_NUTS_pass_grad_logp():
    logp, grad_logp = normal_1D_logp, normal_1D_grad_logp
    start = {'x': 1.}
    nuts = smp.NUTS(logp, start, grad_logp=grad_logp)
    trace = nuts.sample(n_samples)
    assert (trace.shape == (n_samples, ))
コード例 #11
0
    def compute_batch(self,
                      duplicate_manager=None,
                      context_manager=None,
                      batch_context_manager=None):
        """
        Computes the elements of the batch.
        """
        assert not batch_context_manager or len(
            batch_context_manager) == self.batch_size
        if batch_context_manager:
            self.acquisition.optimizer.context_manager = batch_context_manager[
                0]
            raise NotImplementedError("batch_context is not supported")

        if not context_manager or context_manager.A_reduce is None:
            # not reduce dimension
            _expand = lambda x: x
            _reduce_d = lambda x: x
            f = lambda x: -self.acquisition.acquisition_function(x)[0, 0]
            uniform_x = lambda: samples_multidimensional_uniform(
                self.acquisition.space.get_bounds(), 1)[0, :]
            dimension = self.acquisition.space.dimensionality
            #print("not reduce: {} D".format(dimension))
        else:
            # reduce dimension
            _expand = lambda x: context_manager._expand_vector(x)
            _reduce_d = lambda x: context_manager._reduce_derivative(x)
            f = lambda x: -self.acquisition.acquisition_function(
                context_manager._expand_vector(x))[0, 0]
            uniform_x = lambda: samples_multidimensional_uniform(
                context_manager.reduced_bounds, 1)[0, :]
            dimension = context_manager.space_reduced.dimensionality
            #print("do reduce: {} D".format(dimension))

        def is_valid(x):
            #print(x)
            #print(np.array(context_manager.noncontext_bounds))
            lower = np.alltrue(
                x > np.array(context_manager.noncontext_bounds)[:, 0])
            upper = np.alltrue(
                x < np.array(context_manager.noncontext_bounds)[:, 1])
            return lower and upper

        def _logp(x, fmin):
            x_ = _expand(x)
            p = -self.acquisition.acquisition_function(x_)[0, 0] - fmin
            if not is_valid(x_):
                p = 0
            #print("p(", x, x_, ") =", p)
            lower_barrier = np.sum(
                np.log(
                    np.clip(x_ -
                            np.array(context_manager.noncontext_bounds)[:, 0],
                            a_min=0,
                            a_max=None)))
            upper_barrier = np.sum(
                np.log(
                    np.clip(np.array(context_manager.noncontext_bounds)[:, 1] -
                            x_,
                            a_min=0,
                            a_max=None)))
            #print("lower_barrier:", lower_barrier)
            #print("upper_barrier:", upper_barrier)
            #logp = np.log(np.clip(p+lower_barrier+upper_barrier, a_min=0, a_max=None))
            logp = np.log(np.clip(p, a_min=0, a_max=None))
            #print("logp(", x, ") =", logp)
            return logp  #p+lower_barrier+upper_barrier

        def _dlogp(x, fmin):
            x_ = _expand(x)
            p, dp = self.acquisition.acquisition_function_withGradients(x_)
            p = -p
            dp = _reduce_d(-dp)[0]
            if not is_valid(x_):
                dp *= 0
            #print("dp", x, x_, ") =", dp)
            lower_barrier = np.sum(
                np.log(
                    np.clip(x_ -
                            np.array(context_manager.noncontext_bounds)[:, 0],
                            a_min=0,
                            a_max=None)))
            upper_barrier = np.sum(
                np.log(
                    np.clip(np.array(context_manager.noncontext_bounds)[:, 1] -
                            x_,
                            a_min=0,
                            a_max=None)))
            dlower_barrier = np.sum(
                1. /
                np.clip(x_ - np.array(context_manager.noncontext_bounds)[:, 0],
                        a_min=0,
                        a_max=None))
            dupper_barrier = np.sum(
                1. /
                np.clip(np.array(context_manager.noncontext_bounds)[:, 1] - x_,
                        a_min=0,
                        a_max=None))
            #print("lower_barrier:", lower_barrier)
            #print("upper_barrier:", upper_barrier)
            #logp = np.log(np.clip(p[0]-fmin+lower_barrier+upper_barrier, a_min=0, a_max=None))
            logp = np.log(np.clip(p[0] - fmin, a_min=0, a_max=None))
            #dlogp = (dp+dlower_barrier+dupper_barrier) / logp
            dlogp = (dp) / logp
            #print("dlogp(", x, ") =", dlogp)
            return dlogp  #dp+lower_barrier+upper_barrier#dlogp

        # first sample
        s0 = uniform_x()

        res = scipy.optimize.basinhopping(f, x0=s0, niter=100)
        acq_min = res.fun - self.epsilon
        #print("acq_min:",acq_min)

        # Now sample from x ~ p(x) = max(f(x) - acq_min, 0)
        # using No-U-Turn Sampler or Slice Sampler
        logp = lambda x: _logp(x, acq_min)
        dlogp = lambda x: _dlogp(x, acq_min)
        ok = False
        count = 0
        while not ok and count < self.max_resample:
            try:
                s0 = uniform_x()
                start = smp.find_MAP(logp, {'x': s0}, grad_logp=dlogp)
                #print("start:",start)
                if self.sampler == "slice":
                    s = smp.Slice(logp, start)  #, grad_logp=dlogp)
                elif self.sampler == "nuts":
                    s = smp.NUTS(logp, start, grad_logp=dlogp)
                chain = s.sample(self.n_sample,
                                 burn=self.warmup,
                                 n_chains=self.n_chains,
                                 progress_bar=self.verbose)
                ok = True
            except Exception as e:
                #print("Exception:", e.args)
                ok = False
                count += 1
        if count == self.max_resample:
            if self.verbose: print("Maximum number of resample exceeded!")
            self.samples = np.array(
                [uniform_x() for i in range(self.n_sample)])
        else:
            self.samples = chain.x

        # K-Means
        if self.kmeans_after_expand:
            km = KMeans(n_clusters=self.batch_size)
            km.fit(_expand(self.samples))
            self.km = km
            return km.cluster_centers_

        else:
            km = KMeans(n_clusters=self.batch_size)
            km.fit(self.samples)
            self.km = km

            return _expand(km.cluster_centers_)