Beispiel #1
0
def test_2():
    # test that random state is used correctly
    dim = 2
    x0 = np.zeros(dim)
    cov = np.array([[1, 1.98], [1.98, 4]])
    icov = np.linalg.inv(cov)

    samples1 = hmc(lnprob_gaussian,
                   x0,
                   args=(icov, ),
                   n_samples=10,
                   n_burn=0,
                   n_steps=10,
                   epsilon=0.25,
                   return_diagnostics=False,
                   random_state=0)

    samples2 = hmc(lnprob_gaussian,
                   x0,
                   args=(icov, ),
                   n_samples=10,
                   n_burn=0,
                   n_steps=10,
                   epsilon=0.25,
                   return_diagnostics=False,
                   random_state=0)
    np.testing.assert_array_almost_equal(samples1, samples2)
Beispiel #2
0
def sample_with_progress(repeats, n_samps, n_steps, epsilon, path=None):
    if path is not None:
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
    last = np.random.randn(num_dimensions)
    timestamps = []
    all_samples = []
    nfevals = []
    start = time()
    for i in range(repeats):
        samples = hmc(lnpdf,
                      x0=last,
                      n_samples=int(n_samps),
                      n_steps=n_steps,
                      epsilon=epsilon)
        last = samples[-1]
        nfevals.append(lnpdf.counter)
        all_samples.append(samples)
        if path is not None:
            np.savez(path + '_iter' + str(i),
                     true_means=true_means,
                     true_covs=true_covs,
                     samples=last,
                     timestamps=timestamps,
                     nfevals=nfevals)
        timestamps.append(time() - start)
    if path is not None:
        np.savez(path,
                 samples=np.array(all_samples),
                 true_means=true_means,
                 true_covs=true_covs,
                 timestamps=np.array(timestamps),
                 fevals=np.array(nfevals))
Beispiel #3
0
def sample_with_progress(repeats, n_samps, n_steps, epsilon, path=None):
    if path is not None:
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
    last = np.random.randn(D)
    timestamps = []
    all_samples = []
    nfevals = []
    for i in range(repeats):
        timestamps.append(time())
        samples = hmc(lnpdf,
                      x0=last,
                      n_samples=int(n_samps),
                      n_steps=n_steps,
                      epsilon=epsilon)
        last = samples[-1]
        nfevals.append(lnpdf.counter)
        all_samples.append(samples)
        if path is not None:
            np.savez(path + '_iter' + str(i),
                     samples=last,
                     timestamps=timestamps,
                     nfevals=nfevals)

    timestamps.append(time())
    if path is not None:
        np.savez(path,
                 samples=all_samples,
                 timestamps=timestamps,
                 nfevals=nfevals)
Beispiel #4
0
    def sample(self):
        assert pyhmc_imported, 'the ``pyhmc`` package is required.'
        alpha = self.prior_alpha
        beta = self.prior_beta
        if np.isscalar(self.prior_alpha):
            # symmetric dirichlet
            alpha = self.prior_alpha * np.ones(self.n_states_)
        if np.isscalar(self.prior_beta):
            beta = self.prior_beta * np.ones(
                len(self.theta0_) - self.n_states_)

        def func(theta):
            logp, grad = _log_posterior(theta,
                                        self.countsmat_,
                                        alpha=alpha,
                                        beta=beta,
                                        n=self.n_states_)
            return logp, grad

        epsilon = self.epsilon / len(self.theta0_)
        start = time.time()
        all_theta, diag = hmc(func,
                              x0=self.theta0_,
                              n_samples=self.n_samples,
                              epsilon=epsilon,
                              n_steps=self.n_steps,
                              display=self.verbose,
                              return_diagnostics=True)

        self._is_dirty = True
        return all_theta, diag, time.time() - start
Beispiel #5
0
def test_1():
    try:
        from matplotlib import pyplot as pp
    except ImportError:
        raise SkipTest('Not making QQ plot')

    # check that ldirichlet_softmax_pdf is actually giving a dirichlet
    # distribution, by comparing a QQ plot with np.random.dirichlet
    alpha = np.array([1, 2, 3], dtype=float)

    def logprob(x, alpha):
        grad = np.zeros_like(x)
        logp = ldirichlet_softmax(x, alpha, grad=grad)
        return logp, grad

    samples, diag = hmc(logprob,
                        x0=np.random.normal(size=(3, )),
                        n_samples=1000,
                        args=(alpha, ),
                        n_steps=10,
                        return_diagnostics=True)

    expx = np.exp(samples)
    pi1 = expx / np.sum(expx, 1, keepdims=True)
    pi2 = np.random.dirichlet(alpha=alpha, size=1000)

    sm.qqplot_2samples(pi1[:, 0], pi2[:, 0], line='45')
    pp.savefig('bayes_ratematrix-test-1.png')
Beispiel #6
0
def test_2():
    # test that random state is used correctly
    dim = 2
    x0 = np.zeros(dim)
    cov = np.array([[1, 1.98], [1.98, 4]])
    icov = np.linalg.inv(cov)

    samples1 = hmc(lnprob_gaussian, x0, args=(icov,),
                  n_samples=10, n_burn=0,
                  n_steps=10, epsilon=0.25, return_diagnostics=False,
                  random_state=0)

    samples2 = hmc(lnprob_gaussian, x0, args=(icov,),
                  n_samples=10, n_burn=0,
                  n_steps=10, epsilon=0.25, return_diagnostics=False,
                  random_state=0)
    np.testing.assert_array_almost_equal(samples1, samples2)
Beispiel #7
0
def sample(n_samps, n_steps, epsilon, path):
    if path is not None:
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
    start = time()
    samples = hmc(lnpdf, x0=np.random.randn(num_dimensions), n_samples=int(n_samps), n_steps=n_steps, epsilon=epsilon)
    end = time()
    np.savez(path, samples=samples, wallclocktime=end-start)
    #samples = np.vstack([c[0] for c in chain])
    print("done")
Beispiel #8
0
def test_3():
    rv = scipy.stats.loggamma(c=1)
    eps = np.sqrt(np.finfo(float).resolution)
    def logprob(x):
        return rv.logpdf(x), approx_fprime(x, rv.logpdf, eps)

    samples = hmc(logprob, [0], epsilon=1, n_steps=10, window=3, persistence=True)

    # import matplotlib.pyplot as pp
    (osm, osr), (slope, intercept, r) = scipy.stats.probplot(
        samples[:,0], dist=rv, fit=True)
    assert r > 0.99
Beispiel #9
0
def func_thread(proposal, queue, **kwargs):
    """
    Sample from log probability density function proposal with pyhmc,
    see https://github.com/rmcgibbo/pyhmc for tutorials.
    """
    res = hmc(proposal,
              args=(*kwargs['pr_args'], ),
              x0=np.random.randn(kwargs['dim']) + 1.5 * np.ones(kwargs['dim']),
              n_samples=kwargs['n_samples'],
              display=False,
              n_steps=40,
              n_burn=kwargs['n_burn'],
              epsilon=kwargs['epsilon'],
              return_diagnostics=True)
    queue.put(res[0])
def test_ldirchlet_softmax_pdf_qq():
    # check that ldirichlet_softmax_pdf is actually giving a dirichlet
    # distribution, by comparing a QQ plot with np.random.dirichlet
    alpha = np.array([1, 2, 3], dtype=float)

    def logprob(x, alpha):
        grad = np.zeros_like(x)
        logp = ldirichlet_softmax(x, alpha, grad=grad)
        return logp, grad

    samples, diag = hmc(logprob, x0=np.random.normal(size=(3,)), n_samples=1000,
                        args=(alpha,), n_steps=10, return_diagnostics=True)

    expx = np.exp(samples)
    pi1 = expx / np.sum(expx, 1, keepdims=True)
    pi2 = np.random.dirichlet(alpha=alpha, size=1000)
Beispiel #11
0
def test_ldirchlet_softmax_pdf_qq():
    # check that ldirichlet_softmax_pdf is actually giving a dirichlet
    # distribution, by comparing a QQ plot with np.random.dirichlet
    alpha = np.array([1, 2, 3], dtype=float)

    def logprob(x, alpha):
        grad = np.zeros_like(x)
        logp = ldirichlet_softmax(x, alpha, grad=grad)
        return logp, grad

    samples, diag = hmc(logprob, x0=np.random.normal(size=(3,)), n_samples=1000,
                        args=(alpha,), n_steps=10, return_diagnostics=True)

    expx = np.exp(samples)
    pi1 = expx / np.sum(expx, 1, keepdims=True)
    pi2 = np.random.dirichlet(alpha=alpha, size=1000)
Beispiel #12
0
    def generate(self, func):
        def logprob(pos, func):
            def f_logp(x):
                return np.log(func(x))

            logp = f_logp(pos)
            grad = egrad(f_logp)(pos)

            return logp, grad

        return hmc(logprob,
                   x0=np.random.randn(self.nelec * self.ndim),
                   args=(func, ),
                   n_samples=self.nwalkers,
                   epsilon=1,
                   n_burn=int(self.nstep / 10))
Beispiel #13
0
def Ex1_hmc():
    def U(theta):
        return (-2 * theta**2 + theta**4)

    # True gradient
    gradU = jacobian(U, argnum=0)

    # Noisy gradient, based on what they do in the paper for Fig 1
    def noisy_gradU(theta, x, n, batch_size):
        '''Noisy gradient \Delta\tilde{U}(\theta)=\Delta U(\theta)+N(0,4)
        Extra args (x, n, batch_size) for compatibility with sghmc()'''
        return -4 * theta + 4 * theta**3 + np.random.normal(0, 2)

    # define your probability distribution
    def logprob(theta):
        logp = -2 * theta**2 + theta**4
        grad = -4 * theta + 4 * theta**3
        return logp, grad

    # run the HMC sampler (use same theta_0 and niter as SGHMC)
    np.random.seed(1234)
    # Don't actually need 'data' in this example, just use
    # it as a place-holder to fit into our function.
    n = 100
    x = np.array([np.random.normal(0, 1, (n, 1))]).reshape(-1, 1)
    # Set up start values and tuning params
    theta_0 = np.array([0.0])  # Initialize theta
    batch_size = n  # since we're not actually using the data, don't need to batch it
    niter = 500000  # Lots of iterations
    samps_hmc = hmc(logprob, x0=theta_0, n_samples=niter)
    # plot the samples from the HMC algorithm
    sns.kdeplot(samps_hmc.reshape(-1))

    # The hmc() function with that many iterations goes kind of insane...
    # How about less samples? Also redefine logprob(theta) to use funs from above
    #def logprob(theta):
    #return U(theta, x, n, batch_size), gradU(theta, x, n, batch_size).reshape(1)
    # run the HMC sampler (use same theta_0 as above but fewer samples)
    #samps_hmc = hmc(logprob, x0=theta_0, n_samples=500) # NOPE! Still looks bad.
    # plot the samples from the HMC algorithm

    # plot the samples from the algorithm and save to a file
    kdeplt = sns.kdeplot(samps_hmc.reshape(-1))  # Plot the joint density
    fig = kdeplt.get_figure()
    fig.savefig('Example1_b.png')

    return (samps_hmc)
Beispiel #14
0
def test_3():
    rv = scipy.stats.loggamma(c=1)
    eps = np.sqrt(np.finfo(float).resolution)

    def logprob(x):
        return rv.logpdf(x), approx_fprime(x, rv.logpdf, eps)

    samples = hmc(logprob, [0],
                  epsilon=1,
                  n_steps=10,
                  window=3,
                  persistence=True)

    # import matplotlib.pyplot as pp
    (osm, osr), (slope, intercept, r) = scipy.stats.probplot(samples[:, 0],
                                                             dist=rv,
                                                             fit=True)
    assert r > 0.99
Beispiel #15
0
def test_1():
    # test sampling from a highly-correlated gaussian
    dim = 2
    x0 = np.zeros(dim)
    cov = np.array([[1, 1.98], [1.98, 4]])
    icov = np.linalg.inv(cov)

    samples, logp, diag = hmc(lnprob_gaussian, x0, args=(icov,),
                  n_samples=10**4, n_burn=10**3,
                  n_steps=10, epsilon=0.20, return_diagnostics=True,
                  return_logp=True, random_state=2)

    C = np.cov(samples, rowvar=0, bias=1)
    np.testing.assert_array_almost_equal(cov, C, 1)
    for i in range(100):
        np.testing.assert_almost_equal(
            lnprob_gaussian(samples[i], icov)[0],
            logp[i])
Beispiel #16
0
 def run_pyHMC(self):
     samples = hmc(self.numeric_posterior,
                   x0=self.prm_init,
                   n_samples=self.nsamples,
                   epsilon=self.epsilon,
                   return_diagnostics=True,
                   return_logp=True,
                   n_burn=1e3)
     self.rejection_rate = samples[2]["rej"]
     if self.plotsamples:
         counts = np.linspace(1,
                              len(self.prm_init),
                              len(self.prm_init),
                              dtype=int)
         figure = corner.corner(samples[0],
                                show_titles=True,
                                labels=counts,
                                truths=self.actual_rhos)
         figure.savefig('{}.png'.format(self.corner_plot))
def MoN_hmc():

    # define your probability distribution
    # note some fiddling to get dimensions to be compatible
    def logprob(theta):
        logp = np.sum(U(theta, x=x, n=n, batch_size=n))
        gradu = gradU(theta, x=x, n=n, batch_size=n).reshape((-1, ))
        return logp, gradu

    # run the HMC sampler
    # ideally would use same theta_0 and niter as SGHMC,
    # but computing the full gradient is prohibtively slow!!
    samps_hmc = hmc(logprob, x0=theta_0.reshape((-1)), n_samples=100)
    # plot the samples from the algorithm and save to a file
    kdeplt = sns.kdeplot(samps[0, :], samps[1, :])  # FIGURE 2b FOR PAPER
    fig = kdeplt.get_figure()
    fig.savefig('MixNorm_b.png')

    return (samps_hmc)
    def sample(self):
        assert pyhmc_imported, 'the ``pyhmc`` package is required.'
        alpha = self.prior_alpha
        beta = self.prior_beta
        if np.isscalar(self.prior_alpha):
            # symmetric dirichlet
            alpha = self.prior_alpha * np.ones(self.n_states_)
        if np.isscalar(self.prior_beta):
            beta = self.prior_beta * np.ones(len(self.theta0_) - self.n_states_)

        def func(theta):
            logp, grad = _log_posterior(theta, self.countsmat_,
                alpha=alpha, beta=beta, n=self.n_states_)
            return logp, grad

        epsilon = self.epsilon / len(self.theta0_)
        start = time.time()
        all_theta, diag = hmc(func, x0=self.theta0_, n_samples=self.n_samples,
                            epsilon=epsilon, n_steps=self.n_steps,
                            display=self.verbose, return_diagnostics=True)

        self._is_dirty = True
        return all_theta, diag, time.time() - start
Beispiel #19
0
def test_1():
    # test sampling from a highly-correlated gaussian
    dim = 2
    x0 = np.zeros(dim)
    cov = np.array([[1, 1.98], [1.98, 4]])
    icov = np.linalg.inv(cov)

    samples, logp, diag = hmc(lnprob_gaussian,
                              x0,
                              args=(icov, ),
                              n_samples=10**4,
                              n_burn=10**3,
                              n_steps=10,
                              epsilon=0.20,
                              return_diagnostics=True,
                              return_logp=True,
                              random_state=2)

    C = np.cov(samples, rowvar=0, bias=1)
    np.testing.assert_array_almost_equal(cov, C, 1)
    for i in range(100):
        np.testing.assert_almost_equal(
            lnprob_gaussian(samples[i], icov)[0], logp[i])
def test_1():
    try:
        from matplotlib import pyplot as pp
    except ImportError:
        raise SkipTest('Not making QQ plot')

    # check that ldirichlet_softmax_pdf is actually giving a dirichlet
    # distribution, by comparing a QQ plot with np.random.dirichlet
    alpha = np.array([1, 2, 3], dtype=float)

    def logprob(x, alpha):
        grad = np.zeros_like(x)
        logp = ldirichlet_softmax(x, alpha, grad=grad)
        return logp, grad
    samples, diag = hmc(logprob, x0=np.random.normal(size=(3,)), n_samples=1000,
                        args=(alpha,), n_steps=10, return_diagnostics=True)

    expx = np.exp(samples)
    pi1 = expx / np.sum(expx, 1, keepdims=True)
    pi2 = np.random.dirichlet(alpha=alpha, size=1000)

    sm.qqplot_2samples(pi1[:, 0], pi2[:, 0], line='45')
    pp.savefig('bayes_ratematrix-test-1.png')
Beispiel #21
0
    grad = -((27 * density + 27 - T_obs) * 27) / sigma_T**2

    c = np.linalg.det(cov)
    prefactor_likelihood = 1 / (np.sqrt(2 * np.pi) * c)**len(T_obs)
    prefactor = (prefactor_likelihood + prefactor_prior)
    logp = prefactor + (prior_rhos + trans_X_C_X)
    return logp, grad


def withCov_logprob(density):
    c = np.linalg.det(cov)
    prefactor_likelihood = np.log(1 / (np.sqrt(2 * np.pi) * c)**len(T_obs))
    trans_X = (-0.5) * (27 * density + 27 - T_obs).T
    trans_X_C = np.dot(trans_X, np.linalg.inv(cov))
    trans_X_C_X = np.dot(trans_X_C, (27 * density + 27 - T_obs))
    logp = trans_X_C_X * prefactor_likelihood
    grad = -((27 * density + 27 - T_obs) * 27) / sigma_T**2
    return logp, grad


og = logprob(one_rho)
og_new = withCov_logprob(one_rho)
for eps in [0.0001]:
    # for eps in [0.0001]:
    samples = hmc(logprob, x0=one_rho, n_samples=int(1e5), epsilon=eps)
    figure = corner.corner(samples,
                           show_titles=True,
                           labels=["d1", "d2", "d3", "d4"])
    figure.savefig('../STAT_IMAGES/all_pngs/testing_{}.png'.format(eps))
# samples *= sigma_D
Beispiel #22
0

# The PyHMC package requires a function returning both the logprob and its gradient
def logposterior_and_grad(model_amplitudes):
    return pm.get_log_posterior(model_amplitudes), pm.get_grad_log_posterior(
        model_amplitudes)


# Draw samples from the log posterior using hamiltonian monte carlo
print("A sensible sample would be..." + str(nz))
initial_position = np.random.rand(10)
nb_samples = 10
print("The initial position is..." + str(initial_position))
samples = hmc(logposterior_and_grad,
              x0=initial_position,
              n_samples=100000,
              n_steps=10,
              epsilon=0.0001)
print("Samples drawn: \n" + str(samples))

# Plot the estimated amplitude distribution
log_prob_samples = []
for sample in samples:
    log_prob_samples.append(pm.get_log_posterior(sample))
print(log_prob_samples)
plt.hist(log_prob_samples[70000:], bins=30)
plt.xlabel("Log-Posterior value")
plt.ylabel("Number of samples")
plt.show()
print("A sensible log-posterior value would be..." +
      str(pm.get_log_posterior(nz)))
Beispiel #23
0
import numpy as np
from pyhmc import hmc
import corner


# define your probability distribution
def logprob(x, ivar):
    logp = -0.5 * np.sum(ivar * x**2)
    grad = -ivar * x
    return logp, grad


# run the sampler
ivar = 1. / np.random.rand(5)
samples = hmc(logprob, x0=np.random.randn(5), args=(ivar, ), n_samples=1e4)

# Optionally, plot the results
figure = corner.corner(samples)
figure.savefig('triangle.pdf')
Beispiel #24
0
    def optimize_snp_new(self, snp_id, path_chain):
        """
        Optimisation of parameters for a SNPs by Hamiltonian Monte Carlo
        :param snp_id: Id of SNP
        :param path_chain: path to save MCMC chain
        """

        # snp_id = 6

        # print(snp_id)

        k1 = self.q[:, snp_id]
        n1 = self.n[:, snp_id]

        # In the number of alternative alleles in all locations is zero - return
        if sum(k1) == 0:
            return

        if sum(k1) == sum(n1):
            return

        if (self.n_loc - sum(k1 == 0)) <= 2:
            # print('Skip zero', path_chain, snp_id)
            return

        if (self.n_loc - sum(k1 == n1)) <= 2:
            # print('Skip non-zero', path_chain, snp_id)
            return

        # niter = 50000
        # samples = hmc(self.logprob, x0=[0] + list(np.random.uniform(-1, 1, self.n_loc)) + [0.1] + [1] * 2,
        #               args=(snp_id,), n_samples=2000, epsilon=0.1)
        # print(samples[-1])
        # samples = hmc(self.logprob, x0=samples[-1],
        #               args=(snp_id,), n_samples=niter, epsilon=0.2)

        # print(list(zip(k1, n1)))

        # Burn-in
        niter = 5000
        samples = [[k1.sum() / n1.sum()] +
                   list(np.random.uniform(-1, 1, self.n_loc)) + [0.1] + [1] * 2
                   ]

        params = np.array(samples[-1])
        print(self.logprob(np.array(samples[-1]), snp_id))

        eps = 0.11
        while len(samples) < (niter / 4):
            eps = eps - 0.01
            samples = hmc(self.logprob,
                          x0=samples[-1],
                          args=(snp_id, ),
                          n_samples=niter,
                          epsilon=eps)
            _, idx = np.unique(samples, axis=0, return_index=True)
            samples = samples[np.sort(idx)]

        print(len(samples), eps)

        # Find epsilon after burn-in
        niter = 5000
        eps = 0.18
        x0 = samples[-1]
        samples = []

        while len(samples) < (niter / 4) and eps > 0:
            if eps > 0.019:
                eps -= 0.01
            else:
                eps = eps * 0.7
            samples = hmc(self.logprob,
                          x0=x0,
                          args=(snp_id, ),
                          n_samples=niter,
                          epsilon=eps)
            _, idx = np.unique(samples, axis=0, return_index=True)
            samples = samples[np.sort(idx)]

            print(len(samples), eps)

        niter = 100000
        if eps > 0:
            samples = hmc(self.logprob,
                          x0=samples[-1],
                          args=(snp_id, ),
                          n_samples=niter,
                          epsilon=eps)
            _, idx = np.unique(samples, axis=0, return_index=True)
            samples = samples[np.sort(idx)]
        else:
            print('Bad epsilon')

        print('Len', path_chain, snp_id, len(samples), eps)

        if len(samples) < niter / 4:
            print('Low number of samples', path_chain, snp_id)

        np.savetxt(fname=path_chain + 'snp' + str(snp_id) + '.txt',
                   X=samples,
                   fmt='%.10f')

        np.savetxt(fname=path_chain + 'eps' + str(snp_id) + '.txt',
                   X=eps,
                   fmt='%.10f')

        n_thin = 10
        samples = samples[::n_thin]
        np.savetxt(fname=path_chain + 'mean' + str(snp_id) + '.txt',
                   X=samples.mean(0),
                   fmt='%.10f')

        if 0 in samples.var(0):
            print('Bad News', snp_id)
        np.savetxt(fname=path_chain + 'var' + str(snp_id) + '.txt',
                   X=samples.var(0),
                   fmt='%.10f')
    c = np.linalg.det(cov)
    exp_1 = (-0.5) * (T_vec - (alpha * rho_vec**2)).T
    exp_1 = np.dot(exp_1, np.linalg.inv(cov))
    exp_1 = np.dot(exp_1, (T_vec - (alpha * rho_vec**2))).flatten()
    prefactor_prior = np.log(1 / (2 * np.pi *
                                  (sigma_a * sigma_d**(len(rho_vec)))))
    prefactor_likelihood = np.log(1 / (2 * np.pi * c))
    num_post = prefactor_likelihood * prefactor_prior * (exp_1 + prior_alpha +
                                                         prior_rhos)
    global i
    i = i + 1
    if i % 1000:
        print("{} samples".format(i))
    return num_post, gradients(prm, cov, Tarr)


cov = np.asarray([[0.01, 0, 0], [0, 0.01, 0], [0, 0, 0.01]])
Tarr = np.asarray([1, 1, 1])
PRIOR_sigma = [2, 2]
# print(numeric_posterior(np.asarray([1, 1, 1]), alpha, cov, Tarr)
samples = hmc(numeric_posterior,
              x0=[1, 1, 1, 1],
              args=(cov, Tarr, PRIOR_sigma),
              n_samples=int(1e6))

figure = corner.corner(samples,
                       labels=["alpha", "d_1", "d_2", "d_3"],
                       show_titles=True,
                       title_kwargs={"fontsize": 12})
figure.savefig('adrian_sigma_2_3.png')
 def hmc(self,X,T,Z,n_samples=100,n_steps=10,epsilon=0.2,seed=None):
     ''' 
         Draws n_samples samples from the posterior distribution using hamiltonian monte
         carlo (implemented in pyhmc).
     
         Arguments:
             X (list): List of feature sequences (structure will vary with prior model)
             T (list): List of (L_i,) numpy arrays containing instance timestamps
             Z (list): List of observation sequence (structure will vary with prior model)
             n_samples: number of samples returned
             n_steps: number of hamiltonian steps taken between samples
             epsilon: step size
             seed: random seed for initialization
     
         Returns:
             self
             
     '''
     # get seed
     if seed is None:
         np.random.seed(int(time.time()))
     else:
         np.random.seed(seed)
     
     # Initialize the base classifier
     self.initialize_base_classifier(X,Y)
     
     # Initialize self
     self.initialize()
     
     # Preprocess the data
     # e.g. perform prefiltering
     X_aug = self.preprocess_data(X,Y,True)
     
     # Set gradient functions for regularizers
     self.set_grads()
     
     # Get the objective and gradient functions
     obj = self.get_obj(X_aug,Y)
     g_fun = self.get_grad(X_aug,Y)
     
     # get any parameter bounds
     # keyboard()
     bounds = self.get_bounds(X_aug,Y)
     
     # get start tiem
     start_time = time.time()
     
     def logp_and_grad(w):
         return -obj(w),-g_fun(w)
     
     # Samples
     if self.warm_start:
         w0 = self.init_params(X,Y)
         res = minimize(obj,w0,jac=g_fun,method='L-BFGS-B',tol=self.tol,options={"disp":self.verbose,"maxiter":25},bounds=bounds)
         w0 = res.x
     else:
         w0 = np.random.randn(self.n_params)
         
     self.param_samples,self.sampling_logps,self.sampling_diagnostics = hmc(logp_and_grad,x0=w0,n_samples=n_samples,n_steps=n_steps,epsilon=epsilon,display=True,return_logp=True,return_diagnostics=True)
         
     # Store total training time    
     self.train_time = time.time() - start_time
     if self.verbose > 0: print "Train time:", self.train_time
         
     # Clear gradient functions
     # TODO: move this to pickle helper functions
     self.ig_grad = None
     self.n_grad = None
     self.beta_grad = None
     self.base_reg_grad = None
     
     return self
     
# def logprob(x, ivar):
#     logp = -0.5 * np.sum(ivar * x**2)
#     grad = -ivar * x
#     return logp, grad


def gradients(prm, alpha, cov, Tarr):
    # placeholder
    densities = prm  # what we're sampling
    sigma_2 = cov[0][0]  # sigma^2, one element of cov matrix
    grads = (1 / sigma_2**2) * (
        -alpha * densities**2 + Tarr
    )  # calculated gradients for each density value
    return grads


def numeric_posterior(m, x, b):

    return num_post, gradients(prm, alpha, cov, Tarr)


samples = hmc(numeric_posterior,
              x0=[.5, 1.5, 1],
              args=(alpha, cov, Tarr),
              n_samples=int(1e6))
# print(samples)
#   # pip install triangle_plot
figure = corner.corner(samples)
figure.savefig('testing_more_samples.png')