示例#1
0
    def joint_update(self, model):
        """
        Sample the conductances of the neuron given its latent state variables
        """
        # c = self.compartment
        c = self.get_compartment(model)
        # Get the sub-structured arrays for this comp
        chs = c.channels

        # Get a list of conductances for this compartment
        gs = [ch.g for ch in chs]
        gs_values = np.array([g.value for g in gs]).ravel()

        dVc_dt, Isc, dt = self.get_dV_and_currents(model)

        # Sample new gs with HMC
        prior = ProductDistribution([g.distribution for g in gs])
        nll = lambda log_gs: -1.0*self._logp(log_gs,dVc_dt, Isc, dt, prior )
        grad_nll = lambda log_gs: -1.0*self._grad_logp(log_gs,dVc_dt, Isc, dt, prior)

        stepsz = 0.005
        nsteps = 10
        log_gs = hmc(nll, grad_nll, stepsz, nsteps, np.log(gs_values))
        gs = np.exp(log_gs)

        # Update the channel conductance parameter
        for g,ch in zip(gs,chs):
            ch.g.value = g
示例#2
0
    def joint_update(self, model):
        """
        Sample the conductances of the neuron given its latent state variables
        """
        # c = self.compartment
        c = self.get_compartment(model)
        # Get the sub-structured arrays for this comp
        chs = c.channels

        # Get a list of conductances for this compartment
        gs = [ch.g for ch in chs]
        gs_values = np.array([g.value for g in gs]).ravel()

        dVc_dt, Isc, dt = self.get_dV_and_currents(model)

        # Sample new gs with HMC
        prior = ProductDistribution([g.distribution for g in gs])
        nll = lambda log_gs: -1.0 * self._logp(log_gs, dVc_dt, Isc, dt, prior)
        grad_nll = lambda log_gs: -1.0 * self._grad_logp(
            log_gs, dVc_dt, Isc, dt, prior)

        stepsz = 0.005
        nsteps = 10
        log_gs = hmc(nll, grad_nll, stepsz, nsteps, np.log(gs_values))
        gs = np.exp(log_gs)

        # Update the channel conductance parameter
        for g, ch in zip(gs, chs):
            ch.g.value = g
示例#3
0
    def resample(self,
                 stateseqs=None,
                 covseqs=None,
                 n_steps=10,
                 step_sz=0.01,
                 **kwargs):
        K, D = self.num_states, self.covariate_dim

        # Run HMC
        from hips.inference.hmc import hmc

        def hmc_objective(params):
            # Unpack params
            assert params.size == K + K * D
            assert params.ndim == 1
            b = params[:K]
            logpi = anp.tile(b[None, :], (K, 1))
            W = params[K:].reshape((D, K))
            return self.joint_log_probability(logpi, W, stateseqs, covseqs)

        # hmc_objective = lambda params: self.joint_log_probability(params, stateseqs, covseqs)
        grad_hmc_objective = grad(hmc_objective)
        x0 = np.concatenate((self.b, np.ravel(self.W)))
        xf, self.step_sz, self.accept_rate = \
            hmc(hmc_objective, grad_hmc_objective,
                step_sz=self.step_sz, n_steps=n_steps, q_curr=x0,
                negative_log_prob=False,
                adaptive_step_sz=True,
                avg_accept_rate=self.accept_rate)

        self.b = xf[:K]
        self.W = xf[K:].reshape((D, K))
示例#4
0
    def sample(self, acc, size=(1, )):
        """ Sample from the prior
                """
        N, D = size

        # TODO: Actually sample a DPP
        # TODO: For now we just run a Markov chain to sample
        L = T.dmatrix('L')
        lp = self.log_p(L)
        glp = T.grad(lp, L)

        f_lp = lambda x: -1.0 * lp.eval({L: x.reshape((N, D))})
        f_glp = lambda x: -1.0 * glp.eval({
            L: x.reshape((N, D))
        }).reshape(N * D)

        # x0 = L1.reshape(N*D)
        x = self.bound.get_value() * np.random.randn(N * D)
        N_smpls = 1000
        for s in np.arange(N_smpls):
            x = hmc(f_lp, f_glp, 0.25, 1, x)

        assert np.all(np.isfinite(x))

        return x.reshape((N, D))
示例#5
0
    def _resample_L(self, A):
        """
        Resample the locations given A
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc

        lp  = lambda L: self._hmc_log_probability(L, self.mu_0, self.mu_self, A)
        dlp = grad(lp)

        nsteps = 10
        self.L, self._L_step_sz, self._L_accept_rate = \
            hmc(lp, dlp, self._L_step_sz, nsteps, self.L.copy(),
                negative_log_prob=False, avg_accept_rate=self._L_accept_rate,
                adaptive_step_sz=True)
示例#6
0
    def _resample_mu_0(self, A):
        """
        Resample the locations given A
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc


        lp  = lambda mu_0: self._hmc_log_probability(self.L, mu_0, self.mu_self, A)
        dlp = grad(lp)

        stepsz = 0.005
        nsteps = 10
        mu_0 = hmc(lp, dlp, stepsz, nsteps, np.array(self.mu_0), negative_log_prob=False)
        self.mu_0 = float(mu_0)
示例#7
0
    def _resample_mu_self(self, A):
        """
        Resample the self connection offset
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc


        lp  = lambda mu_self: self._hmc_log_probability(self.L, self.mu_0, mu_self, A)
        dlp = grad(lp)

        stepsz = 0.005
        nsteps = 10
        mu_self = hmc(lp, dlp, stepsz, nsteps, np.array(self.mu_self), negative_log_prob=False)
        self.mu_self = float(mu_self)
示例#8
0
    def _resample_L(self, A, W):
        """
        Resample the locations given A
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc

        lp  = lambda L: self._hmc_log_probability(L, self.b, A, W)
        dlp = grad(lp)

        stepsz = 0.005
        nsteps = 10
        # lp0 = lp(self.L)
        self.L = hmc(lp, dlp, stepsz, nsteps, self.L.copy(),
                     negative_log_prob=False)
示例#9
0
    def _resample_L(self, A):
        """
        Resample the locations given A
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc

        lp = lambda L: self._hmc_log_probability(L, self.mu_0, self.mu_self, A)
        dlp = grad(lp)

        nsteps = 10
        self.L, self._L_step_sz, self._L_accept_rate = \
            hmc(lp, dlp, self._L_step_sz, nsteps, self.L.copy(),
                negative_log_prob=False, avg_accept_rate=self._L_accept_rate,
                adaptive_step_sz=True)
示例#10
0
    def serial_update(self, model):
        # Sample each conductance in turn given that
        # C*dVc_dt ~ N(I_in - np.dot(gsc, Isc), sig_V^2)

        c = self.get_compartment(model)
        chs = c.channels

        # Get a list of conductances for this compartment
        gs = [ch.g for ch in chs]
        gsc = np.array([g.value for g in gs]).ravel()
        # import pdb; pdb.set_trace()

        dVc_dt, Isc, dt = self.get_dV_and_currents(model)

        for (i, (g, ch)) in enumerate(zip(gs, chs)):
            i_rem = np.concatenate((np.arange(i), np.arange(i + 1, len(chs))))
            gsc_rem = gsc[i_rem]
            Isc_rem = Isc[:, i_rem]
            dV_resid = dVc_dt - np.dot(Isc_rem, gsc_rem).ravel()

            # Sample new gs with HMC
            prior = g.distribution
            nll = lambda log_gs: -1.0 * self._logp(log_gs, dV_resid, Isc[:, i],
                                                   dt, prior)
            grad_nll = lambda log_gs: -1.0 * self._grad_logp(
                log_gs, dV_resid, Isc[:, i], dt, prior)

            # DEBUG:
            # self.check_grads(nll, grad_nll, np.log(gsc[i]).reshape((1,)), step=1e-4)

            nsteps = 10
            curr_log_g = np.log(gsc[i]).reshape((1, ))
            new_log_g, new_step_sz, new_accept_rate = \
                hmc(nll, grad_nll, self.step_sz[i], nsteps, curr_log_g,
                    adaptive_step_sz=True,
                    avg_accept_rate=self.avg_accept_rate[i],
                    min_step_sz=1e-4)

            # Update step size and accept rate
            self.step_sz[i] = new_step_sz
            self.avg_accept_rate[i] = new_accept_rate

            # Update the channel conductance parameter
            gsc[i] = np.exp(new_log_g)
            ch.g.value = np.exp(new_log_g)
示例#11
0
    def _resample_b_hmc(self, A, W):
        """
        Resample the distance dependence offset
        :return:
        """
        # TODO: We could sample from the exact Gaussian conditional
        from autograd import grad
        from hips.inference.hmc import hmc

        lp  = lambda b: self._hmc_log_probability(self.L, b, A, W)
        dlp = grad(lp)

        stepsz = 0.0001
        nsteps = 10
        b = hmc(lp, dlp, stepsz, nsteps,
                   np.array(self.b),
                   negative_log_prob=False)
        self.b = float(b)
        print "b: ", self.b
示例#12
0
    def serial_update(self, model):
        # Sample each conductance in turn given that
        # C*dVc_dt ~ N(I_in - np.dot(gsc, Isc), sig_V^2)

        c = self.get_compartment(model)
        chs = c.channels

        # Get a list of conductances for this compartment
        gs = [ch.g for ch in chs]
        gsc = np.array([g.value for g in gs]).ravel()
        # import pdb; pdb.set_trace()

        dVc_dt, Isc, dt = self.get_dV_and_currents(model)

        for (i,(g,ch)) in enumerate(zip(gs, chs)):
            i_rem = np.concatenate((np.arange(i), np.arange(i+1,len(chs))))
            gsc_rem = gsc[i_rem]
            Isc_rem = Isc[:,i_rem]
            dV_resid = dVc_dt - np.dot(Isc_rem, gsc_rem).ravel()

            # Sample new gs with HMC
            prior = g.distribution
            nll = lambda log_gs: -1.0*self._logp(log_gs,dV_resid, Isc[:,i], dt, prior )
            grad_nll = lambda log_gs: -1.0*self._grad_logp(log_gs,dV_resid, Isc[:,i], dt, prior)

            # DEBUG:
            # self.check_grads(nll, grad_nll, np.log(gsc[i]).reshape((1,)), step=1e-4)

            nsteps = 10
            curr_log_g = np.log(gsc[i]).reshape((1,))
            new_log_g, new_step_sz, new_accept_rate = \
                hmc(nll, grad_nll, self.step_sz[i], nsteps, curr_log_g,
                    adaptive_step_sz=True,
                    avg_accept_rate=self.avg_accept_rate[i],
                    min_step_sz=1e-4)

            # Update step size and accept rate
            self.step_sz[i] = new_step_sz
            self.avg_accept_rate[i] = new_accept_rate

            # Update the channel conductance parameter
            gsc[i] = np.exp(new_log_g)
            ch.g.value = np.exp(new_log_g)
示例#13
0
    def _resample_L(self, A, W):
        """
        Resample the locations given A
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc

        lp = lambda L: self._hmc_log_probability(L, self.b, A, W)
        dlp = grad(lp)

        stepsz = 0.005
        nsteps = 10
        # lp0 = lp(self.L)
        self.L = hmc(lp,
                     dlp,
                     stepsz,
                     nsteps,
                     self.L.copy(),
                     negative_log_prob=False)
示例#14
0
    def _resample_mu_0(self, A):
        """
        Resample the locations given A
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc

        lp = lambda mu_0: self._hmc_log_probability(self.L, mu_0, self.mu_self,
                                                    A)
        dlp = grad(lp)

        stepsz = 0.005
        nsteps = 10
        mu_0 = hmc(lp,
                   dlp,
                   stepsz,
                   nsteps,
                   np.array(self.mu_0),
                   negative_log_prob=False)
        self.mu_0 = float(mu_0)
示例#15
0
    def _resample_mu_self(self, A):
        """
        Resample the self connection offset
        :return:
        """
        from autograd import grad
        from hips.inference.hmc import hmc

        lp = lambda mu_self: self._hmc_log_probability(self.L, self.mu_0,
                                                       mu_self, A)
        dlp = grad(lp)

        stepsz = 0.005
        nsteps = 10
        mu_self = hmc(lp,
                      dlp,
                      stepsz,
                      nsteps,
                      np.array(self.mu_self),
                      negative_log_prob=False)
        self.mu_self = float(mu_self)
示例#16
0
    def _resample_b_hmc(self, A, W):
        """
        Resample the distance dependence offset
        :return:
        """
        # TODO: We could sample from the exact Gaussian conditional
        from autograd import grad
        from hips.inference.hmc import hmc

        lp = lambda b: self._hmc_log_probability(self.L, b, A, W)
        dlp = grad(lp)

        stepsz = 0.0001
        nsteps = 10
        b = hmc(lp,
                dlp,
                stepsz,
                nsteps,
                np.array(self.b),
                negative_log_prob=False)
        self.b = float(b)
        print "b: ", self.b
示例#17
0
    def sample(self, acc, size=(1,)):
        """ Sample from the prior
                """
        N,D = size

        # TODO: Actually sample a DPP
        # TODO: For now we just run a Markov chain to sample
        L = T.dmatrix('L')
        lp = self.log_p(L)
        glp = T.grad(lp, L)

        f_lp = lambda x: -1.0*lp.eval({L : x.reshape((N,D))})
        f_glp = lambda x: -1.0*glp.eval({L : x.reshape((N,D))}).reshape(N*D)

        # x0 = L1.reshape(N*D)
        x = self.bound.get_value() * np.random.randn(N*D)
        N_smpls = 1000
        for s in np.arange(N_smpls):
            x = hmc(f_lp, f_glp, 0.25, 1, x)

        assert np.all(np.isfinite(x))

        return x.reshape((N,D))
示例#18
0
lp1 = np.zeros(N_samples)
#lp1[0] = lp(L_estimate)
a = np.zeros(N_samples)
W_all = np.zeros((N_samples, N, N))

for s in np.arange(1, N_samples):

    W1 = W + np.random.normal(0.1, 0.1)

    lp = lambda L1: _hmc_log_probability(N, dim, L1, W, sigma)
    dlp = grad(lp)
    stepsz = 0.005
    nsteps = 10
    accept_rate = 0.9
    smpls[s], stepsz, accept_rate= \
        hmc(lp, dlp, stepsz, nsteps, smpls[s-1], negative_log_prob=False, avg_accept_rate=accept_rate,
                adaptive_step_sz=True)

    lp1[s] = lp(smpls[s])
    sigma = _resample_sigma(smpls[s])
    a[s] = sigma
    W_all[s - 1] = W1
    print(sigma)

for s in range(N_samples):
    R = compute_optimal_rotation(smpls[s], L)
    smpls[s] = np.dot(smpls[s], R)

L_estimate = smpls[N_samples // 2:].mean(0)

# Debug here, because the two directed weights are ploted together
# With different strength
    ax.set_ylim([-b, b])

    # Labels
    ax.set_xlabel('Latent Dimension 1')
    ax.set_ylabel('Latent Dimension 2')
    plt.show()

    return ax


for i in range(1000):

    L1 = L_estimate
    lp = lambda L1: _hmc_log_probability(N, dim, L1, W)
    dlp = grad(lp)

    stepsz = 0.001
    nsteps = 10
    L_estimate = hmc(lp,
                     dlp,
                     stepsz,
                     nsteps,
                     L1.copy(),
                     negative_log_prob=False)

D1 = ((L_estimate[:, None, :] - L_estimate[None, :, :])**2).sum(2)
W_estimate = -D1

plot_LatentDistanceModel(W_estimate, L_estimate, N)
plot_LatentDistanceModel(W, L, N)
示例#20
0
    def resample_obs_hypers_hmc(self):
        """
        Sample the parameters of a gamma prior given firing rates L

        log p(L[:,c] | a_0[c], b_0[c]) =
            \sum_k a_0[c] log b_0[c] - gammaln(a_0[c]) + (a_0[c]-1) log(L[k,c]) - b_0[c] L[k,c]

        We place a improper uniform prior over log a_0[c] and log b_0[c],
        which effectively introduces a prior of the form p(a_0) = const/a_0

        Since a_0 and b_0 are required to be positive, we work in log space

        :param a_0:
        :param b_0:
        :param L: a K x C matrix of firing rates for each cell
        :return:
        """
        a_0, b_0 = self.obs_distns[0].hypers
        L = np.array([o.lmbdas for o in self.obs_distns])

        # Use a gamma(aa,bb) prior over a_0 and b_0
        aa = 3.
        bb = 3.

        # Define helpers for log prob and its gradient
        def nlpc(x, c):
            lna = x[0]
            lnb = x[1]

            a = np.exp(lna)
            b = np.exp(lnb)
            ll =  (a * np.log(b) - gammaln(a) + (a-1) * np.log(L[:,c]) - b * L[:,c]).sum()

            # Prior is constant with respect to log a and log b (i.e. x)
            # lprior = 0
            lprior = (aa) * np.log(a) - bb*a
            lprior += (aa) * np.log(b) - bb*b

            lp = ll + lprior
            return -lp

        def gnlpc(x, c):

            # import pdb; pdb.set_trace()
            lna = x[0]
            lnb = x[1]

            a = np.exp(lna)
            b = np.exp(lnb)
            dll_da =  (np.log(b) - psi(a) + np.log(L[:,c])).sum()
            dll_db =  (a/b  - L[:,c]).sum()

            # Prior is constant with respect to log a and log b (i.e. x)
            # dlprior_da = 0
            # dlprior_db = 0

            dlprior_da = aa/a - bb
            dlprior_db = aa/b - bb

            dlp_da = dll_da + dlprior_da
            dlp_db = dll_db + dlprior_db

            da_dlna = a
            db_dlnb = b

            dlp_dlna = dlp_da * da_dlna
            dlp_dlnb = dlp_db * db_dlnb

            return np.array([-dlp_dlna, -dlp_dlnb])

        n_steps = 10
        step_sz = 0.001

        # Update the hypers for each cell
        a_f = a_0.copy()
        b_f = b_0.copy()
        for n in xrange(self.obs_distns[0].N):
            nlp = lambda x: nlpc(x,n)
            gnlp = lambda x: gnlpc(x,n)

            x0 = np.array([np.log(a_0[n]), np.log(b_0[n])])
            xc = hmc(nlp, gnlp, step_sz, n_steps, x0)

            # Set the hypers
            a_f[n], b_f[n] = np.exp(xc)

        # Truncate to make sure > 0
        a_f = np.clip(a_f, 1e-4, np.inf)
        b_f = np.clip(b_f, 1e-4, np.inf)

        for o in self.obs_distns:
            o.hypers = (a_f,b_f)
示例#21
0
def test_determinant():
    sigma = 5.0
    bound = 5.0
    det_prior = DeterminenalPointProcess({'sigma' : sigma, 'bound' : bound})
    L = T.dmatrix('L')
    lp = det_prior.log_p(L)
    glp = T.grad(lp, L)

    L1 = np.arange(6,step=1).reshape((3,2)).astype(np.float)
    print "theano lp: %f" % lp.eval({L : L1})

    # D1 = (L1 - L1.T)**2
    # lp_L1_test = np.log(np.linalg.det(np.exp(-D1))) + -0.5/10**2 * np.sum(L1**2)
    # print "test lp: %f" % lp_L1_test
    #
    # L2 = np.random.rand(3).reshape((3,1))
    # print "theano lp: %f" % lp.eval({L : L2})
    #
    # D2 = (L2 - L2.T)**2
    # lp_L2_test = np.log(np.linalg.det(np.exp(-D2))) + -0.5/10**2 * np.sum(L2**2)
    # print "test lp: %f" % lp_L2_test
    #
    # # TODO: Test 2d L
    #
    # # Compute gradients
    # print "theano glp: ",  glp.eval({L : L1})
    # print "theano glp: ",  glp.eval({L : L2})
    from hips.inference.hmc import hmc
    N = 3
    D = 2
    f_lp = lambda x: -1.0*lp.eval({L : x.reshape((N,D))})
    f_glp = lambda x: -1.0*glp.eval({L : x.reshape((N,D))}).reshape(N*D)

    # x0 = L1.reshape(N*D)
    x0 = bound * np.random.randn(N*D)
    N_smpls = 1000
    smpls = [x0]
    for s in np.arange(N_smpls):
        x_next = hmc(f_lp, f_glp, 0.25, 1, smpls[-1])
        # print "Iteration %d:" % s
        # print x_next
        smpls.append(x_next)

    # Make a movie of the samples
    import matplotlib
    matplotlib.use("Agg")
    import matplotlib.pyplot as plt
    import matplotlib.animation as manimation

    FFMpegWriter = manimation.writers['ffmpeg']
    metadata = dict(title='Movie Test', artist='Matplotlib',
        comment='Movie support!')
    writer = FFMpegWriter(fps=15, metadata=metadata)

    fig = plt.figure()
    x0 = smpls[0].reshape((N,D))
    l = plt.plot(x0[:,0], x0[:,1], 'ko')
    plt.xlim(-4*bound, 4*bound)
    plt.ylim(-4*bound, 4*bound)

    with writer.saving(fig, "dpp_hmc_smpl.mp4", 100):
        for i in range(N_smpls):
            xi = smpls[i].reshape((N,D))
            l[0].set_data(xi[:,0], xi[:,1])
            writer.grab_frame()
示例#22
0
    def resample_obs_hypers_hmc(self):
        """
        Sample the parameters of a gamma prior given firing rates L

        log p(L[:,c] | a_0[c], b_0[c]) =
            \sum_k a_0[c] log b_0[c] - gammaln(a_0[c]) + (a_0[c]-1) log(L[k,c]) - b_0[c] L[k,c]

        We place a improper uniform prior over log a_0[c] and log b_0[c],
        which effectively introduces a prior of the form p(a_0) = const/a_0

        Since a_0 and b_0 are required to be positive, we work in log space

        :param a_0:
        :param b_0:
        :param L: a K x C matrix of firing rates for each cell
        :return:
        """
        a_0, b_0 = self.obs_distns[0].hypers
        L = np.array([o.lmbdas for o in self.obs_distns])

        # Use a gamma(aa,bb) prior over a_0 and b_0
        aa = 3.
        bb = 3.

        # Define helpers for log prob and its gradient
        def nlpc(x, c):
            lna = x[0]
            lnb = x[1]

            a = np.exp(lna)
            b = np.exp(lnb)
            ll = (a * np.log(b) - gammaln(a) + (a - 1) * np.log(L[:, c]) -
                  b * L[:, c]).sum()

            # Prior is constant with respect to log a and log b (i.e. x)
            # lprior = 0
            lprior = (aa) * np.log(a) - bb * a
            lprior += (aa) * np.log(b) - bb * b

            lp = ll + lprior
            return -lp

        def gnlpc(x, c):

            # import pdb; pdb.set_trace()
            lna = x[0]
            lnb = x[1]

            a = np.exp(lna)
            b = np.exp(lnb)
            dll_da = (np.log(b) - psi(a) + np.log(L[:, c])).sum()
            dll_db = (a / b - L[:, c]).sum()

            # Prior is constant with respect to log a and log b (i.e. x)
            # dlprior_da = 0
            # dlprior_db = 0

            dlprior_da = aa / a - bb
            dlprior_db = aa / b - bb

            dlp_da = dll_da + dlprior_da
            dlp_db = dll_db + dlprior_db

            da_dlna = a
            db_dlnb = b

            dlp_dlna = dlp_da * da_dlna
            dlp_dlnb = dlp_db * db_dlnb

            return np.array([-dlp_dlna, -dlp_dlnb])

        n_steps = 10
        step_sz = 0.001

        # Update the hypers for each cell
        a_f = a_0.copy()
        b_f = b_0.copy()
        for n in xrange(self.obs_distns[0].N):
            nlp = lambda x: nlpc(x, n)
            gnlp = lambda x: gnlpc(x, n)

            x0 = np.array([np.log(a_0[n]), np.log(b_0[n])])
            xc = hmc(nlp, gnlp, step_sz, n_steps, x0)

            # Set the hypers
            a_f[n], b_f[n] = np.exp(xc)

        # Truncate to make sure > 0
        a_f = np.clip(a_f, 1e-4, np.inf)
        b_f = np.clip(b_f, 1e-4, np.inf)

        for o in self.obs_distns:
            o.hypers = (a_f, b_f)
示例#23
0
def test_determinant():
    sigma = 5.0
    bound = 5.0
    det_prior = DeterminenalPointProcess({'sigma': sigma, 'bound': bound})
    L = T.dmatrix('L')
    lp = det_prior.log_p(L)
    glp = T.grad(lp, L)

    L1 = np.arange(6, step=1).reshape((3, 2)).astype(np.float)
    print "theano lp: %f" % lp.eval({L: L1})

    # D1 = (L1 - L1.T)**2
    # lp_L1_test = np.log(np.linalg.det(np.exp(-D1))) + -0.5/10**2 * np.sum(L1**2)
    # print "test lp: %f" % lp_L1_test
    #
    # L2 = np.random.rand(3).reshape((3,1))
    # print "theano lp: %f" % lp.eval({L : L2})
    #
    # D2 = (L2 - L2.T)**2
    # lp_L2_test = np.log(np.linalg.det(np.exp(-D2))) + -0.5/10**2 * np.sum(L2**2)
    # print "test lp: %f" % lp_L2_test
    #
    # # TODO: Test 2d L
    #
    # # Compute gradients
    # print "theano glp: ",  glp.eval({L : L1})
    # print "theano glp: ",  glp.eval({L : L2})
    from hips.inference.hmc import hmc
    N = 3
    D = 2
    f_lp = lambda x: -1.0 * lp.eval({L: x.reshape((N, D))})
    f_glp = lambda x: -1.0 * glp.eval({L: x.reshape((N, D))}).reshape(N * D)

    # x0 = L1.reshape(N*D)
    x0 = bound * np.random.randn(N * D)
    N_smpls = 1000
    smpls = [x0]
    for s in np.arange(N_smpls):
        x_next = hmc(f_lp, f_glp, 0.25, 1, smpls[-1])
        # print "Iteration %d:" % s
        # print x_next
        smpls.append(x_next)

    # Make a movie of the samples
    import matplotlib
    matplotlib.use("Agg")
    import matplotlib.pyplot as plt
    import matplotlib.animation as manimation

    FFMpegWriter = manimation.writers['ffmpeg']
    metadata = dict(title='Movie Test',
                    artist='Matplotlib',
                    comment='Movie support!')
    writer = FFMpegWriter(fps=15, metadata=metadata)

    fig = plt.figure()
    x0 = smpls[0].reshape((N, D))
    l = plt.plot(x0[:, 0], x0[:, 1], 'ko')
    plt.xlim(-4 * bound, 4 * bound)
    plt.ylim(-4 * bound, 4 * bound)

    with writer.saving(fig, "dpp_hmc_smpl.mp4", 100):
        for i in range(N_smpls):
            xi = smpls[i].reshape((N, D))
            l[0].set_data(xi[:, 0], xi[:, 1])
            writer.grab_frame()