示例#1
0
    def fit(self, n1, n2):
        oo = self
        mnd    = oo.mnd
        x   = mnd.x[n1:n2]
        N   = n2-n1
        k   = mnd.k
        M   = oo.M

        covAll = _N.cov(x.T)
        dcovMag= _N.diagonal(covAll)*0.125

        #  termporary containers
        expTrm = _N.empty((M, N))
        expArg = _N.empty((M, N))
        crats = _N.zeros((M+1, N))
        rands = _N.random.rand(N, 1)
        dirArgs = _N.empty(M, dtype=_N.int)

        rsum = _N.empty((1, N))
        skpM   = _N.arange(0, N)*M
        
        for it in xrange(oo.ITERS-1):
            iscov = _N.linalg.inv(oo.scov[it])
            norms = 1/_N.sqrt(2*_N.pi*_N.linalg.det(oo.scov[it]))
            norms = norms.reshape(M, 1)

            for im in xrange(M):
                expArg[im] = -0.5*_N.sum(_N.multiply((x-oo.smu[it, im]), _N.dot(x-oo.smu[it, im], iscov[im])), axis=1)   #  expArg[im] is size N
            
            rexpArg = expArg.T.reshape(M*N)
            lrgInM = expArg.argmax(axis=0)
            lrgstArgs = rexpArg[skpM+lrgInM]
            expArg0 = expArg - lrgstArgs

            expTrm = _N.exp(expArg0)
            rats = oo.sm[it]*expTrm*norms  #  shape is M x oo.N
            _N.sum(rats, axis=0, out=rsum[0, :])

            rats /= rsum   #  each column of "rats" sums to 1

            for im in xrange(M):
                crats[im+1] = rats[im] + crats[im]

            rands = _N.random.rand(N)
            rrands = _N.tile(rands, M).reshape(M, N)
            ###  THIS once broke because we had an empty cluster
            irw, icl = _N.where((rrands >= crats[:-1]) & (rrands <= crats[1:]))

            oo.gz[it+1, icl, irw] = 1   #  we must clean out gz

            #  _N.sum(oo.gz...) sz M   its vec of num. of obs of each state 'm'
            _N.add(oo.PR_m_alp, _N.sum(oo.gz[it+1], axis=0), out=dirArgs)
            oo.sm[it+1, :, 0] = _N.random.dirichlet(dirArgs)
            
            for im in xrange(M):
                minds = _N.where(oo.gz[it+1, :, im] == 1)[0]

                if len(minds) > 0:
                    clstx    = x[minds]
                    mc       = _N.mean(clstx, axis=0)
                    Nm       = clstx.shape[0]
                    ##  cov of posterior distribution of cluster means
                    po_mu_sg = _N.linalg.inv(oo.iPR_mu_sg[im] + Nm*iscov[im])
                    ##  mean of posterior distribution of cluster means
                    po_mu_mu  = _N.dot(po_mu_sg, _N.dot(oo.iPR_mu_sg[im], oo.PR_mu_mu[im]) + Nm*_N.dot(iscov[im], mc))
                    oo.smu[it+1, im] = mvn(po_mu_mu, po_mu_sg)

                    ##  dof of posterior distribution of cluster covariance
                    po_sg_dof = oo.PR_cov_nu[im] + Nm
                    ##  dof of posterior distribution of cluster covariance
                    po_sg_PSI = oo.PR_cov_PSI[im] + _N.dot((clstx - oo.smu[it+1, im]).T, (clstx-oo.smu[it+1, im]))

                    oo.scov[it+1, im] = s_u.sample_invwishart(po_sg_PSI, po_sg_dof)
                    dgl = _N.diagonal(oo.scov[it + 1, im])
                    rat = dgl / dcovMag
                    bgr = _N.where(rat > 1)[0]
                    if len(bgr) > 0:
                        #print "making smaller"
                        scl = rat[_N.argmax(rat)]
                        oo.scov[it+1, im] /= scl
                else:  #  no marks assigned to this cluster 
                    oo.scov[it+1, im] = oo.scov[it, im]
                    oo.smu[it+1, im]  = oo.smu[it, im]
示例#2
0
    def gibbs(self, ITERS, K, ep1=0, ep2=None, savePosterior=True, gtdiffusion=False, doSepHash=True, use_spc=True, nz_pth=0., smth_pth_ker=100, ignoresilence=False, use_omp=False, nThrds=2):
        """
        gtdiffusion:  use ground truth center of place field in calculating variance of center.  Meaning of diffPerMin different
        """
        print "gibbs   %.5f" % _N.random.rand()
        oo = self
        oo.nThrds = nThrds
        twpi     = 2*_N.pi
        pcklme   = {}

        ep2 = oo.epochs if (ep2 == None) else ep2
        oo.epochs = ep2-ep1

        ######################################  GRID for calculating
        ####  #  points in sum.  
        ####  #  points in uniform sampling of exp(x)p(x)   (non-spike interals)
        ####  #  points in sampling of f  for conditional posterior distribution
        ####  #  points in sampling of q2 for conditional posterior distribution
        ####  NSexp, Nupx, fss, q2ss

        #  numerical grid
        ux = _N.linspace(oo.xLo, oo.xHi, oo.Nupx, endpoint=False)   # uniform x position   #  grid over 
        uxr = ux.reshape((1, oo.Nupx))
        uxrr= ux.reshape((1, 1, oo.Nupx))
        #q2x    = _N.exp(_N.linspace(_N.log(1e-7), _N.log(100), oo.q2ss))  #  5 orders of
        q2x    = _N.exp(_N.linspace(_N.log(oo.q2x_L), _N.log(oo.q2x_H), oo.q2ss))  #  5 orders of
        d_q2x  = _N.diff(q2x)
        q2x_m1 = _N.array(q2x[0:-1])
        lq2x    = _N.log(q2x)
        iq2x    = 1./q2x
        q2xr     = q2x.reshape((oo.q2ss, 1))
        iq2xr     = 1./q2xr
        q2xrr     = q2x.reshape((1, oo.q2ss, 1))
        iq2xrr     = 1./q2xrr
        d_q2xr  =  d_q2x.reshape((oo.q2ss - 1, 1))
        q2x_m1  = _N.array(q2x[0:-1])
        q2x_m1r = q2x_m1.reshape((oo.q2ss-1, 1))

        sqrt_2pi_q2x   = _N.sqrt(twpi*q2x)
        l_sqrt_2pi_q2x = _N.log(sqrt_2pi_q2x)

        freeClstr = None
        if smth_pth_ker > 0:
            gk     = gauKer(smth_pth_ker) # 0.1s  smoothing of motion
            gk     /= _N.sum(gk)
            xf     = _N.convolve(oo.dat[:, 0], gk, mode="same")
            oo.dat[:, 0] = xf + nz_pth*_N.random.randn(len(oo.dat[:, 0]))
        else:
            oo.dat[:, 0] += nz_pth*_N.random.randn(len(oo.dat[:, 0]))
        x      = oo.dat[:, 0]
        mks    = oo.dat[:, 2:]
        if nz_pth > 0:
            _N.savetxt(resFN("nzyx.txt", dir=oo.outdir), x, fmt="%.4f")

        f_q2_rate = (oo.diffusePerMin**2)/60000.  #  unit of minutes  
        
        ######################################  PRECOMPUTED

        tau_l0 = oo.t_hlf_l0/_N.log(2)
        tau_q2 = oo.t_hlf_q2/_N.log(2)

        for epc in xrange(ep1, ep2):
            print "^^^^^^^^^^^^^^^^^^^^^^^^    epoch %d" % epc

            t0 = oo.intvs[epc]
            t1 = oo.intvs[epc+1]
            if epc > 0:
                tm1= oo.intvs[epc-1]
                #  0  10 30     20 - 5  = 15    0.5*((10+30) - (10+0)) = 15
                dt = 0.5*((t1+t0) - (t0+tm1))

            dt = (t1-t0)*0.5
            xt0t1 = _N.array(x[t0:t1])
            posbins  = _N.linspace(oo.xLo, oo.xHi, oo.Nupx+1)
            #  _N.sum(px)*(xbns[1]-xbns[0]) = 1
            px, xbns = _N.histogram(xt0t1, bins=posbins, normed=True)   
            pxr      = px.reshape((1, oo.Nupx))
            pxrr     = px.reshape((1, 1, oo.Nupx))

            Asts    = _N.where(oo.dat[t0:t1, 1] == 1)[0]   #  based at 0

            if epc == ep1:   ###  initialize
                labS, labH, flatlabels, M, MF, hashthresh, nHSclusters = gAMxMu.initClusters(oo, K, x, mks, t0, t1, Asts, doSepHash=doSepHash, xLo=oo.xLo, xHi=oo.xHi, oneCluster=oo.oneCluster, nzclstr=oo.nzclstr)
 
                Mwowonz       = M+1 if oo.nzclstr else M
                #nHSclusters.append(M - nHSclusters[0]-nHSclusters[1])   #  last are free clusters that are not the noise cluster

                u_u_  = _N.empty((M, K))
                u_Sg_ = _N.empty((M, K, K))
                #######   containers for GIBBS samples iterations
                smp_sp_prms = _N.zeros((3, ITERS, M))  
                smp_mk_prms = [_N.zeros((K, ITERS, M)), 
                               _N.zeros((K, K, ITERS, M))]
                smp_sp_hyps = _N.zeros((6, ITERS, M))
                smp_mk_hyps = [_N.zeros((K, ITERS, M)), 
                               _N.zeros((K, K, ITERS, M)),
                               _N.zeros((1, ITERS, M)), 
                               _N.zeros((K, K, ITERS, M))]
                oo.smp_sp_prms = smp_sp_prms
                oo.smp_mk_prms = smp_mk_prms
                oo.smp_sp_hyps = smp_sp_hyps
                oo.smp_mk_hyps = smp_mk_hyps

                if oo.nzclstr:
                    smp_nz_l0     = _N.zeros(ITERS)
                    smp_nz_hyps = _N.zeros((2, ITERS))

                #  list of freeClstrs
                freeClstr = _N.empty(M, dtype=_N.bool)   #  Actual cluster
                freeClstr[:] = False

                l0, f, q2, u, Sg = gAMxMu.declare_params(M, K, nzclstr=oo.nzclstr)   #  nzclstr not inited  # sized to include noise cluster if needed
                _l0_a, _l0_B, _f_u, _f_q2, _q2_a, _q2_B, _u_u, _u_Sg, _Sg_nu, \
                    _Sg_PSI = gAMxMu.declare_prior_hyp_params(M, MF, K, x, mks, Asts, t0)
                fr = f[0:M].reshape((M, 1))
                gAMxMu.init_params_hyps(oo, M, MF, K, l0, f, q2, u, Sg, _l0_a, _l0_B, _f_u, _f_q2, _q2_a, _q2_B, _u_u, _u_Sg, _Sg_nu, \
                    _Sg_PSI, Asts, t0, x, mks, flatlabels, nHSclusters, nzclstr=oo.nzclstr)

                U   = _N.empty(M)
                FQ2 = _N.empty(M)
                _fxs0 = _N.tile(_N.linspace(0, 1, oo.fss), M).reshape(M, oo.fss)

                f_exp_px = _N.empty((M, oo.fss))
                q2_exp_px= _N.empty((M, oo.q2ss))

                if oo.nzclstr:
                    nz_l0_intgrd   = _N.exp(-0.5*ux*ux / q2[Mwowonz-1])
                    _nz_l0_a       = 0.001
                    _nz_l0_B       = 0.1

                ######  the hyperparameters for f, q2, u, Sg, l0 during Gibbs
                #  f_u_, f_q2_, q2_a_, q2_B_, u_u_, u_Sg_, Sg_nu, Sg_PSI_, l0_a_, l0_B_


            NSexp   = t1-t0    #  length of position data  #  # of no spike positions to sum
            xt0t1 = _N.array(x[t0:t1])

            nSpks    = len(Asts)
            gz   = _N.zeros((ITERS, nSpks, Mwowonz), dtype=_N.bool)
            oo.gz=gz
            print "spikes %d" % nSpks

            dSilenceX1 = (NSexp/float(oo.Nupx))*(oo.xHi-oo.xLo)
            dSilenceX2 = NSexp*(xbns[1]-xbns[0])  # dx of histogram
            print "-------------------------- %(1).4f  %(2).4f" % {"1" : dSilenceX1, "2" : dSilenceX2}

            dSilenceX  = dSilenceX1

            xAS  = x[Asts + t0]   #  position @ spikes
            mAS  = mks[Asts + t0]   #  position @ spikes
            xASr = xAS.reshape((1, nSpks))
            mASr = mAS.reshape((1, nSpks, K))
            econt = _N.empty((Mwowonz, nSpks))
            rat   = _N.zeros((Mwowonz+1, nSpks))

            qdrMKS = _N.empty((Mwowonz, nSpks))
            ################################  GIBBS ITERS ITERS ITERS

            clstsz = _N.zeros(M, dtype=_N.int)

            _iu_Sg = _N.array(_u_Sg)
            for m in xrange(M):
                _iu_Sg[m] = _N.linalg.inv(_u_Sg[m])

            ttA = _tm.time()

            for iter in xrange(ITERS):
                tt1 = _tm.time()
                iSg = _N.linalg.inv(Sg)

                if (iter % 100) == 0:    
                    #print "-------iter  %(i)d   %(r).5f" % {"i" : iter, "r" : _N.random.rand()}
                    print "-------iter  %(i)d" % {"i" : iter}

                gAMxMu.stochasticAssignment(oo, epc, iter, M, Mwowonz, K, l0, f, q2, u, Sg, _f_u, _u_u, _f_q2, _u_Sg, Asts, t0, mASr, xASr, rat, econt, gz, qdrMKS, freeClstr, hashthresh, ((epc > 0) and (iter == 0)), nthrds=oo.nThrds)
                #gAMxMu.stochasticAssignment(oo, iter, M, Mwowonz, K, l0, f, q2, u, Sg, _f_u, _u_u, Asts, t0, mASr, xASr, rat, econt, gz, qdrMKS, freeClstr, hashthresh, iter==0, nthrds=oo.nThrds)

                ###############  FOR EACH CLUSTER

                l_sts = []
                for m in xrange(M):   #  get the minds
                    minds = _N.where(gz[iter, :, m] == 1)[0]  
                    sts  = Asts[minds] + t0   #  sts is in absolute time
                    clstsz[m] = len(sts)
                    l_sts.append(sts)
                # for m in xrange(Mwowonz):   #  get the minds
                #     minds = _N.where(gz[iter, :, m] == 1)[0]  
                #     print "cluster %(m)d   len %(l)d    " % {"m" : m, "l" : len(minds)}
                #     print u[m]
                #     print f[m]

                #tt2 = _tm.time()
                ###############
                ###############  CONDITIONAL l0
                ###############

                #  _ss.gamma.rvs.  uses k, theta  k is 1/B (B is our thing)
                iiq2 = 1./q2[0:M]
                iiq2r= iiq2.reshape((M, 1))
                iiq2rr= iiq2.reshape((M, 1, 1))

                fr = f[0:M].reshape((M, 1))
                l0_intgrd   = _N.exp(-0.5*(fr - ux)*(fr-ux) * iiq2r)  

                sLLkPr      = _N.empty((M, oo.q2ss))
                l0_exp_px   = _N.sum(l0_intgrd*pxr, axis=1) * dSilenceX
                BL  = (oo.dt/_N.sqrt(twpi*q2[0:M]))*l0_exp_px    #  dim M

                if (epc > 0) and oo.adapt:
                    _md_nd= _l0_a / _l0_B
                    _Dl0_a = _l0_a * _N.exp(-dt/tau_l0)
                    _Dl0_B = _Dl0_a / _md_nd
                else:
                    _Dl0_a = _l0_a
                    _Dl0_B = _l0_B

                aL  = clstsz
                l0_a_ = aL + _Dl0_a
                l0_B_ = BL + _Dl0_B
                
                try:
                    #  mean is (l0_a_ / l0_B_)
                    l0[0:M] = _ss.gamma.rvs(l0_a_, scale=(1/l0_B_))  #  check
                except ValueError:
                    """
                    print l0_B_
                    print _Dl0_B
                    print BL
                    print l0_exp_px
                    print 1/_N.sqrt(twpi*q2[0:M])

                    print pxr
                    print l0_intgrd
                    """
                    _N.savetxt("fxux", (fr - ux)*(fr-ux))
                    _N.savetxt("fr", fr)
                    _N.savetxt("iiq2", iiq2)
                    _N.savetxt("l0_intgrd", l0_intgrd)
                    raise


                smp_sp_prms[oo.ky_p_l0, iter] = l0[0:M]
                smp_sp_hyps[oo.ky_h_l0_a, iter] = l0_a_
                smp_sp_hyps[oo.ky_h_l0_B, iter] = l0_B_
                mcs = _N.empty((M, K))   # cluster sample means

                #tt3 = _tm.time()

                ###############
                ###############     u
                ###############
                for m in xrange(M):
                    if clstsz[m] > 0:
                        u_Sg_[m] = _N.linalg.inv(_iu_Sg[m] + clstsz[m]*iSg[m])
                        clstx    = mks[l_sts[m]]

                        mcs[m]       = _N.mean(clstx, axis=0)
                        #u_u_[m] = _N.dot(u_Sg_[m], _N.dot(_iu_Sg[m], _u_u[m]) + clstsz[m]*_N.dot(iSg[m], mcs[m]))
                        u_u_[m] = _N.einsum("jk,k->j", u_Sg_[m], _N.dot(_iu_Sg[m], _u_u[m]) + clstsz[m]*_N.dot(iSg[m], mcs[m]))
                        # print "mean of cluster %d" % m
                        # print mcs[m]
                        # print u_u_[m]
                        # hyp
                        ########  POSITION
                        ##  mean of posterior distribution of cluster means
                        #  sigma^2 and mu are the current Gibbs-sampled values

                        ##  mean of posterior distribution of cluster means
                        # print "for cluster %(m)d with size %(sz)d" % {"m" : m, "sz" : clstsz[m]}
                        # print mcs[m]
                        # print u_u_[m]
                        # print _u_u[m]
                    else:
                        u_Sg_[m] = _N.array(_u_Sg[m])
                        u_u_[m] = _N.array(_u_u[m])

                ucmvnrms= _N.random.randn(M, K)
                C       = _N.linalg.cholesky(u_Sg_)
                u[0:M]       = _N.einsum("njk,nk->nj", C, ucmvnrms) + u_u_

                smp_mk_prms[oo.ky_p_u][:, iter] = u[0:M].T  # dim of u wrong
                smp_mk_hyps[oo.ky_h_u_u][:, iter] = u_u_.T
                smp_mk_hyps[oo.ky_h_u_Sg][:, :, iter] = u_Sg_.T

                #tt4 = _tm.time()
                ###############
                ###############  Conditional f
                ###############

                if (epc > 0) and oo.adapt:
                    q2pr = _f_q2 + f_q2_rate * dt
                else:
                    q2pr = _f_q2
                for m in xrange(M):
                    sts = l_sts[m]
                    if clstsz[m] > 0:
                        fs  = (1./clstsz[m])*_N.sum(xt0t1[sts-t0])
                        fq2 = q2[m]/clstsz[m]
                        U[m]   = (fs*q2pr[m] + _f_u[m]*fq2) / (q2pr[m] + fq2)
                        FQ2[m] = (q2pr[m]*fq2) / (q2pr[m] + fq2)
                    else:
                        U[m]   = _f_u[m]
                        FQ2[m] = q2pr[m]

                FQ    = _N.sqrt(FQ2)
                Ur    = U.reshape((M, 1))
                FQr   = FQ.reshape((M, 1))
                FQ2r  = FQ2.reshape((M, 1))

                if use_spc:
                    fxs  = _N.copy(_fxs0)
                    fxs *= (FQr*120)
                    fxs -= (FQr*60)
                    fxs += Ur

                    if use_omp:
                        M_times_N_f_intgrls_raw(fxs, ux, iiq2, dSilenceX, px, f_exp_px, M, oo.fss, oo.Nupx, oo.nThrds)
                    else:
                        fxsr     = fxs.reshape((M, oo.fss, 1))
                        fxrux = -0.5*(fxsr-uxrr)*(fxsr-uxrr)
                        #  f_intgrd    is M x fss x Nupx
                        f_intgrd  = _N.exp(fxrux*iiq2rr)   #  integrand
                        f_exp_px = _N.sum(f_intgrd*pxrr, axis=2) * dSilenceX
                        #  f_exp_px   is M x fss
                    l0r = l0[0:M].reshape((M, 1))
                    q2r = q2[0:M].reshape((M, 1))
                     #  s   is (M x fss)
                    s = -(l0r*oo.dt/_N.sqrt(twpi*q2r)) * f_exp_px  #  a function of x
                    #if (iter > ITERS - 40) and (iter % 5 == 0):
                    #    print f_exp_px
                    #    _plt.plot(fxs[0], _N.s)
                else:
                    s = _N.zeros(M)

                #  U, FQ2 is   dim(M)   
                #  fxs is M x fss
                funcf   = -0.5*((fxs-Ur)*(fxs-Ur))/FQ2r + s
                maxes   = _N.max(funcf, axis=1)   
                maxesr  = maxes.reshape((M, 1))
                funcf   -= maxesr
                condPosF= _N.exp(funcf)   #  condPosF is M x fss
                ttB = _tm.time()

                #  fxs   M x fss
                #  fxs            M x fss
                #  condPosF       M x fss
                norm    = 1./_N.sum(condPosF, axis=1)  #  sz M
                f_u_    = norm*_N.sum(fxs*condPosF, axis=1)  #  sz M
                f_u_r   = f_u_.reshape((M, 1))
                f_q2_   = norm*_N.sum(condPosF*(fxs-f_u_r)*(fxs-f_u_r), axis=1)
                f[0:M]       = _N.sqrt(f_q2_)*_N.random.randn() + f_u_
                smp_sp_prms[oo.ky_p_f, iter] = f[0:M]
                smp_sp_hyps[oo.ky_h_f_u, iter] = f_u_
                smp_sp_hyps[oo.ky_h_f_q2, iter] = f_q2_

                #tt5 = _tm.time()
                ##############
                ##############  VARIANCE, COVARIANCE
                ##############
                for m in xrange(M):
                    if clstsz[m] >= K:
                        ##  dof of posterior distribution of cluster covariance
                        Sg_nu_ = _Sg_nu[m, 0] + clstsz[m]
                        ##  dof of posterior distribution of cluster covariance
                        ur = u[m].reshape((1, K))
                        clstx    = mks[l_sts[m]]
                        Sg_PSI_ = _Sg_PSI[m] + _N.dot((clstx - ur).T, (clstx-ur))
                    else:
                        Sg_nu_ = _Sg_nu[m, 0] 
                        ##  dof of posterior distribution of cluster covariance
                        ur = u[m].reshape((1, K))
                        Sg_PSI_ = _Sg_PSI[m]
                    Sg[m] = s_u.sample_invwishart(Sg_PSI_, Sg_nu_)
                    smp_mk_hyps[oo.ky_h_Sg_nu][0, iter, m] = Sg_nu_
                    smp_mk_hyps[oo.ky_h_Sg_PSI][:, :, iter, m] = Sg_PSI_

                
                ##  dof of posterior distribution of cluster covariance

                smp_mk_prms[oo.ky_p_Sg][:, :, iter] = Sg[0:M].T

                #tt6 = _tm.time()
                ##############
                ##############  SAMPLE SPATIAL VARIANCE
                ##############
                if use_spc:
                    #  M x q2ss x Nupx  
                    #  f        M x 1    x 1
                    #  iq2xrr   1 x q2ss x 1
                    #  uxrr     1 x 1    x Nupx

                    if use_omp:  #ux variable held fixed
                        M_times_N_q2_intgrls_raw(f, ux, iq2x, dSilenceX, px, q2_exp_px, M, oo.q2ss, oo.Nupx, oo.nThrds)
                    else:
                        frr       = f.reshape((M, 1, 1))
                        q2_intgrd = _N.exp(-0.5*(frr - uxrr)*(frr-uxrr) * iq2xrr)
                        q2_exp_px = _N.sum(q2_intgrd*pxrr, axis=2) * dSilenceX

                    # function of q2

                    s = -((l0r*oo.dt)/sqrt_2pi_q2x)*q2_exp_px
                else:
                    s = _N.zeros((oo.q2ss, M))
                #  B' / (a' - 1) = MODE   #keep mode the same after discount
                #  B' = MODE * (a' - 1)
                if (epc > 0) and oo.adapt:
                    _md_nd= _q2_B / (_q2_a + 1)
                    _Dq2_a = _q2_a * _N.exp(-dt/tau_q2)
                    _Dq2_B = _Dq2_a / _md_nd
                else:
                    _Dq2_a = _q2_a
                    _Dq2_B = _q2_B

                SL_Bs = _N.empty(M)
                SL_as = _N.empty(M)

                for m in xrange(M):
                    if clstsz[m] > 0:
                        sts = l_sts[m]
                        xI = (xt0t1[sts-t0]-f[m])*(xt0t1[sts-t0]-f[m])*0.5
                        SL_a = 0.5*clstsz[m] - 1   #  spiking part of likelihood
                        SL_B = _N.sum(xI)  #  spiking part of likelihood
                        SL_Bs[m] = SL_B
                        SL_as[m] = SL_a
                        #  spiking prior x prior
                        #sLLkPr[m] = -(SL_a + 1)*lq2x - iq2x*SL_B
                        sLLkPr[m] = -(_q2_a[m] + SL_a + 2)*lq2x - iq2x*(_q2_B[m] + SL_B)
                    else:
                        sLLkPr[m] = -(_q2_a[m] + 1)*lq2x - iq2x*_q2_B[m]

                q2_a_, q2_B_ = mltpl_ig_prmsUV(q2xr, sLLkPr.T, s.T, d_q2xr, q2x_m1r, clstsz, iter, mks, t0, xt0t1, gz, l_sts, SL_as, SL_Bs, _q2_a, _q2_B, oo.q2_min, oo.q2_max)
                    
                q2[0:M] = _ss.invgamma.rvs(q2_a_ + 1, scale=q2_B_)  #  check

                tt7 = _tm.time()

                smp_sp_prms[oo.ky_p_q2, iter]   = q2[0:M]
                smp_sp_hyps[oo.ky_h_q2_a, iter] = q2_a_
                smp_sp_hyps[oo.ky_h_q2_B, iter] = q2_B_

                # print "timing start"
                # print (tt2-tt1)
                # print (tt3-tt2)
                # print (tt4-tt3)
                # print (tt5-tt4)
                # print (tt6-tt5)
                #print (tt7-tt1)
                # print "timing end"

                    
                #  nz clstr.  fixed width
                if oo.nzclstr:
                    nz_l0_exp_px   = _N.sum(nz_l0_intgrd*px) * dSilenceX
                    BL  = (oo.dt/_N.sqrt(twpi*q2[Mwowonz-1]))*nz_l0_exp_px

                    minds = len(_N.where(gz[iter, :, Mwowonz-1] == 1)[0])
                    l0_a_ = minds + _nz_l0_a
                    l0_B_ = BL    + _nz_l0_B

                    l0[Mwowonz-1]  = _ss.gamma.rvs(l0_a_, scale=(1/l0_B_)) 
                    smp_nz_l0[iter]       = l0[Mwowonz-1]
                    smp_nz_hyps[0, iter]  = l0_a_
                    smp_nz_hyps[1, iter]  = l0_B_

            ttB = _tm.time()
            print (ttB-ttA)

            gAMxMu.finish_epoch(oo, nSpks, epc, ITERS, gz, l0, f, q2, u, Sg, _f_u, _f_q2, _q2_a, _q2_B, _l0_a, _l0_B, _u_u, _u_Sg, _Sg_nu, _Sg_PSI, smp_sp_hyps, smp_sp_prms, smp_mk_hyps, smp_mk_prms, freeClstr, M, K)
            #  MAP of nzclstr
            if oo.nzclstr:
                frm = int(0.7*ITERS)
                _nz_l0_a              = _N.median(smp_nz_hyps[0, frm:])
                _nz_l0_B              = _N.median(smp_nz_hyps[1, frm:])
            pcklme["smp_sp_hyps"] = smp_sp_hyps
            pcklme["smp_mk_hyps"] = smp_mk_hyps
            pcklme["smp_sp_prms"] = smp_sp_prms
            pcklme["smp_mk_prms"] = smp_mk_prms
            pcklme["sp_prmPstMd"] = oo.sp_prmPstMd
            pcklme["mk_prmPstMd"] = oo.mk_prmPstMd
            pcklme["intvs"]       = oo.intvs
            pcklme["occ"]         = gz
            pcklme["nz_pth"]         = nz_pth
            pcklme["M"]           = M
            pcklme["Mwowonz"]           = Mwowonz
            if Mwowonz > M:  # or oo.nzclstr == True
                pcklme["nz_fs"]       = f[M]
                pcklme["nz_q2"]       = q2[M]
                pcklme["nz_Sg"]       = Sg[M]
                pcklme["nz_u"]        = u[M]
                pcklme["smp_nz_l0"]  = smp_nz_l0
                pcklme["smp_nz_hyps"]= smp_nz_hyps
                
            dmp = open(resFN("posteriors_%d.dmp" % epc, dir=oo.outdir), "wb")
            pickle.dump(pcklme, dmp, -1)
            dmp.close()
示例#3
0
    def fit(self, M, pos, mk, n1, n2, curr_encpos, init):
        """
        Fit, with the inverting done in blocks
        """
        oo = self
        k      = oo.k
        mnd    = oo.mnd

        if (n2 - n1) > 0:
            x   = _N.empty((n2-n1, k))
            x[:, 0]    = pos
            x[:, 1:]   = mk
            N   = n2-n1
            oo.pmdim = k
            oo.gz   = _N.zeros((oo.ITERS, N, M), dtype=_N.bool)
            oo.gz[:,:,:] = 0

            if init:
                oo.PR_m_alp[:] = 1. / M   #  initial

            covAll = _N.cov(x.T)
            dcovMag= _N.diagonal(covAll)*0.125

            #  termporary containers
            expTrm = _N.empty((M, N))
            expArg = _N.empty((M, N))
            crats = _N.zeros((M+1, N))
            rands = _N.random.rand(N, 1)
            dirArgs = _N.empty(M, dtype=_N.int)

            rsum = _N.empty((1, N))
            skpM   = _N.arange(0, N)*M

            Nms   = _N.empty((M, 1, 1), dtype=_N.int)
            mcs   = _N.empty((M, k))
            clstxs= []

            ####  stuff used repeatedly
            k_zeros    = _N.zeros(k)
            pr_iSg_Mu = _N.einsum("mjk,mk->mj", oo.iPR_mu_sg, oo.PR_mu_mu)

            # if not kde:
            #     occ = 
            #  
            for it in xrange(oo.ITERS-1):
                t1 = _tm.time()
                if it % 50 == 0:
                    print it
                iscov = _N.linalg.inv(oo.scov[it, 0:M])
                #print iscov

                norms = 1/_N.sqrt(2*_N.pi*_N.linalg.det(oo.scov[it, 0:M]))
                norms = norms.reshape(M, 1)
                t2 = _tm.time()

                ####  THIS IS THE BOTTLE NECK
                for im in xrange(M):
                    expArg[im] = -0.5*_N.einsum("nj,nj->n", x - oo.smu[it, im], _N.dot(x - oo.smu[it, im], iscov[im]))
                    #expArg[im] = -0.5*_N.sum(_N.multiply((x-oo.smu[it, im]), _N.dot(x-oo.smu[it, im], iscov[im])), axis=1)   #  expArg[im] is size N

                t3 = _tm.time()
                rexpArg = expArg.T.reshape(M*N)
                lrgInM = expArg.argmax(axis=0)
                lrgstArgs = rexpArg[skpM+lrgInM]
                expArg0 = expArg - lrgstArgs

                expTrm = _N.exp(expArg0)

                rats = oo.sm[it, 0:M]*expTrm*norms  #  shape is M x oo.N

                _N.sum(rats, axis=0, out=rsum[0, :])   
                rats /= rsum   #  each column of "rats" sums to 1

                #  gz 
                for im in xrange(M):
                    crats[im+1] = rats[im] + crats[im]
                t4 = _tm.time()
                rands = _N.random.rand(N)
                rrands = _N.tile(rands, M).reshape(M, N)
                ###  THIS once broke because we had an empty cluster
                irw, icl = _N.where((rrands >= crats[:-1]) & (rrands <= crats[1:]))



                ##############  GENERATE cluster membership
                oo.gz[it+1, icl, irw] = 1   #  we must clean out gz
                #  For the 

                ##  For the j-th cluster, look at the std. dev in position space.
                #   from current it. value.
                #   Either put into it 

                #  _N.sum(oo.gz...) sz M   its vec of num. of obs of each state 'm'
                occ = _N.sum(oo.gz[it+1], axis=0)
                _N.add(oo.PR_m_alp[0:M], occ, out=oo.po_alpha[it+1])

                ##############  SAMPLE WEIGHTS
                oo.sm[it+1, 0:M, 0] = _N.random.dirichlet(oo.po_alpha[it+1])
                if curr_encpos is not None:   #  weighted
                    for m in xrange(M):
                        if occ[m] == 0:
                            oo.sm[it+1, m, 0] = oo.ms[m, 0]
                    oo.sm[it+1, :, 0] /= _N.sum(oo.sm[it+1, :, 0])

                clstxs = []
                mindss = []
                mcs = _N.empty((M, k))   # cluster sample means

                t5 = _tm.time()            
                for im in xrange(M):  # 111111111111111
                    minds    = _N.where(oo.gz[it+1, :, im] == 1)[0]
                    Nms[im,0,0]      = minds.shape[0]
                    mindss.append(minds)
                oo.po_mu_sg[it+1] = _N.linalg.inv(oo.iPR_mu_sg + Nms*iscov)

                for im in xrange(M):  # 222222222222222
                    if Nms[im,0,0] > 0:
                        clstx    = x[mindss[im]]
                        mcs[im]       = _N.mean(clstx, axis=0)
                    else:
                        clstx    = k_zeros
                        mcs[im]       = clstx
                    clstxs.append(clstx)

                    # hyp
                    ########  POSITION
                    ##  mean of posterior distribution of cluster means
                    #  sigma^2 and mu are the current Gibbs-sampled values

                    ##  mean of posterior distribution of cluster means

                oo.po_mu_mu[it+1] = _N.einsum("mjk,mk->mj", oo.po_mu_sg[it+1], pr_iSg_Mu + Nms[:,:,0]*_N.einsum("mjk,mk->mj", iscov, mcs))
                # dot(MATRIX, vector)   

                    ##############  SAMPLE MEANS
                    #  this can be done without

                rn3    = _N.random.randn(M, k)
                C      = _N.linalg.cholesky(oo.po_mu_sg[it+1])
                oo.smu[it+1] = oo.po_mu_mu[it+1] + _N.einsum("njk,nk->nj", C, rn3)

                for im in xrange(M):  # 3333333333333333
                    Nm = Nms[im,0,0]

                    if Nm >= 1:
                        clstx = clstxs[im]
                        ##  dof of posterior distribution of cluster covariance
                        oo.po_cov_nu[it+1, im] = oo.PR_cov_nu[im] + Nm
                        ##  dof of posterior distribution of cluster covariance
                        oo.po_cov_PSI[it+1, im] = oo.PR_cov_PSI[im] + _N.dot((clstx - oo.smu[it+1, im]).T, (clstx-oo.smu[it+1, im]))

                        ##############  SAMPLE COVARIANCES
                        oo.scov[it+1, im] = s_u.sample_invwishart(oo.po_cov_PSI[it+1, im], oo.po_cov_nu[it+1, im])
                    else:  #  no marks assigned to this cluster 
                        oo.scov[it+1, im] = oo.scov[it, im]
                        oo.smu[it+1, im]  = oo.smu[it, im]
                        oo.po_mu_sg[it+1, im] = oo.PR_mu_sg[im]
                        oo.po_mu_mu[it+1, im] = oo.PR_mu_mu[im]
                        oo.po_cov_nu[it+1, im] = oo.PR_cov_nu[im]
                        ##  dof of posterior distribution of cluster covariance
                        oo.po_cov_PSI[it+1, im] = oo.PR_cov_PSI[im]

                t6 = _tm.time()
                # print "-----"
                # print (t2-t1)
                # print (t3-t2)
                # print (t4-t3)
                # print (t5-t4)
                # print (t6-t5)

            #  When I say prior for mu, I mean I have hyper parameters mu_mu and mu_sg.
            #  hyperparameters are not sampled

            #hITERS = int(oo.ITERS*0.75)
            hITERS = 0
            oo.us[:]  = _N.mean(oo.smu[hITERS:oo.ITERS], axis=0)
            oo.covs[:] = _N.mean(oo.scov[hITERS:oo.ITERS], axis=0)
            """
            oo.ms[:]  = _N.mean(oo.sm[hITERS:oo.ITERS], axis=0).reshape(oo.M, 1)
            """
            for mm in xrange(M):
                hstg, bns = _N.histogram(oo.sm[hITERS:oo.ITERS, mm], bins=_N.linspace(0, 1, 201))
                mxind = _N.where(hstg == _N.max(hstg))[0]
                oo.ms[mm, 0]  = bns[mxind[0]]

            oo.ms[:, 0] /= _N.sum(oo.ms[:, 0])
            oo.dat = x
        else:
            print "NO DATA for this encoding epoch.  Not doing anything."
示例#4
0
    def fit(self, M, pos, mk, n1, n2, curr_encpos, init=False):
        """
        Fit, with the inverting done in blocks
        """
        oo = self
        k      = oo.k
        mnd    = oo.mnd
        x   = _N.empty((n2-n1, k))
        x[:, 0]    = pos
        x[:, 1:]   = mk
        N   = n2-n1
        oo.pmdim = k
        oo.gz   = _N.zeros((oo.ITERS, N, M), dtype=_N.int)
        oo.gz[:,:,:] = 0

        if init:
            oo.PR_m_alp[:] = 1. / M   #  initial

        covAll = _N.cov(x.T)
        dcovMag= _N.diagonal(covAll)*0.125

        #  termporary containers
        expTrm = _N.empty((M, N))
        expArg = _N.empty((M, N))
        crats = _N.zeros((M+1, N))
        rands = _N.random.rand(N, 1)
        dirArgs = _N.empty(M, dtype=_N.int)

        rsum = _N.empty((1, N))
        skpM   = _N.arange(0, N)*M

        Nms   = _N.empty((M, 1, 1), dtype=_N.int)
        mcs   = _N.empty((M, k))
        clstxs= []

        ####  stuff used repeatedly
        k_zeros    = _N.zeros(k)
        pr_iSg_Mu = _N.einsum("mjk,mk->mj", oo.iPR_mu_sg, oo.PR_mu_mu)

        # if not kde:
        #     occ = 
        #  
        for it in xrange(oo.ITERS-1):
            t1 = _tm.time()
            if it % 50 == 0:
                print it
            iscov = _N.linalg.inv(oo.scov[it, 0:M])
            #print iscov

            norms = 1/_N.sqrt(2*_N.pi*_N.linalg.det(oo.scov[it, 0:M]))
            norms = norms.reshape(M, 1)
            t2 = _tm.time()

            ####  THIS IS THE BOTTLE NECK
            for im in xrange(M):
                expArg[im] = -0.5*_N.einsum("nj,nj->n", x - oo.smu[it, im], _N.dot(x - oo.smu[it, im], iscov[im]))
                #expArg[im] = -0.5*_N.sum(_N.multiply((x-oo.smu[it, im]), _N.dot(x-oo.smu[it, im], iscov[im])), axis=1)   #  expArg[im] is size N

            t3 = _tm.time()
            rexpArg = expArg.T.reshape(M*N)
            lrgInM = expArg.argmax(axis=0)
            lrgstArgs = rexpArg[skpM+lrgInM]
            expArg0 = expArg - lrgstArgs

            expTrm = _N.exp(expArg0)

            rats = oo.sm[it, 0:M]*expTrm*norms  #  shape is M x oo.N

            _N.sum(rats, axis=0, out=rsum[0, :])   
            rats /= rsum   #  each column of "rats" sums to 1

            for im in xrange(M):
                crats[im+1] = rats[im] + crats[im]
            t4 = _tm.time()
            rands = _N.random.rand(N)
            rrands = _N.tile(rands, M).reshape(M, N)
            ###  THIS once broke because we had an empty cluster
            irw, icl = _N.where((rrands >= crats[:-1]) & (rrands <= crats[1:]))

            ##############  GENERATE cluster membership
            oo.gz[it+1, icl, irw] = 1   #  we must clean out gz
            #  For the 

            ##  For the j-th cluster, look at the std. dev in position space.
            #   from current it. value.
            #   Either put into it 

            #  _N.sum(oo.gz...) sz M   its vec of num. of obs of each state 'm'

            #oo.smu[it, im]
            xcnp = _N.linspace(-6, 6, 241)   
            xt = _N.tile(xcnp, M)   
            xt = xt.reshape(M, 241)

            xb = _N.linspace(-6, 6, 242)

            dx = 0.05
            #print curr_encpos
            occ, bns = _N.histogram(curr_encpos, bins=xb)   # occ gives # of milliseconds in that bin
            focc = _N.asarray(occ, dtype=_N.double)
            focct = _N.tile(focc, M)   
            focct = focct.reshape(M, 241)

            sgs  = oo.scov[it, :, 0, 0].reshape(M, 1)
            isgs = 1/sgs

            us   = oo.smu[it, :, 0].reshape(M, 1)

            spfrPr= (1/_N.sqrt(2*_N.pi*sgs))*_N.exp(-0.5*isgs*(xt-us)**2)*dx   # spatial firing profile for each cluster

            osp = focct*spfrPr  #  this gives me a unit time

            sumGz = _N.sum(oo.gz[it+1], axis=0)#
            wocc = _N.sum(osp, axis=1)  #     weighted occupancy

            wGz = sumGz / wocc
            wGz  /= _N.sum(wGz)    #  mean wGz is 1
            wGz *= N   #  number of clusters  wGz sums to # of marks

            #zrOcc = _N.where(occ == 0)[0]
            #zrOcc = _N.where(occ == 0)[0]
            #bFullySampled = not (len(zrOcc) == 0)

            maxwocc = _N.max(wocc)

            inds = _N.where((wocc < 0.01 * maxwocc) & (sumGz < 1))[0]
            for m in inds:
                wGz[m] = oo.po_alpha[it, m] if (it == 0) else \
                         oo.po_alpha[it, m] - oo.PR_m_alp[m]

            #  if wocc is large, it means we can trust this as measure of firing
            #  if wocc is small, it means we can trust this as a reasonable measure

            #_N.add(oo.PR_m_alp[0:M], _N.sum(oo.gz[it+1], axis=0), out=oo.po_alpha[it+1])
            #print "--------------"
            #print (wGz - _N.sum(oo.gz[it+1], axis=0))
            _N.add(oo.PR_m_alp[0:M], wGz, out=oo.po_alpha[it+1])

            ##############  SAMPLE WEIGHTS
            oo.sm[it+1, 0:M, 0] = _N.random.dirichlet(oo.po_alpha[it+1])

            clstxs = []
            mindss = []
            mcs = _N.empty((M, k))   # cluster sample means

            t5 = _tm.time()            
            for im in xrange(M):  # 111111111111111
                minds    = _N.where(oo.gz[it+1, :, im] == 1)[0]
                Nms[im,0,0]      = minds.shape[0]
                mindss.append(minds)
            oo.po_mu_sg[it+1] = _N.linalg.inv(oo.iPR_mu_sg + Nms*iscov)

            for im in xrange(M):  # 222222222222222
                if Nms[im,0,0] > 0:
                    clstx    = x[mindss[im]]
                    mcs[im]       = _N.mean(clstx, axis=0)
                else:
                    clstx    = k_zeros
                    mcs[im]       = clstx
                clstxs.append(clstx)

                # hyp
                ########  POSITION
                ##  mean of posterior distribution of cluster means
                #  sigma^2 and mu are the current Gibbs-sampled values

                ##  mean of posterior distribution of cluster means

            oo.po_mu_mu[it+1] = _N.einsum("mjk,mk->mj", oo.po_mu_sg[it+1], pr_iSg_Mu + Nms[:,:,0]*_N.einsum("mjk,mk->mj", iscov, mcs))
            # dot(MATRIX, vector)   
            
                ##############  SAMPLE MEANS
                #  this can be done without

            rn3    = _N.random.randn(M, k)
            C      = _N.linalg.cholesky(oo.po_mu_sg[it+1])
            oo.smu[it+1] = oo.po_mu_mu[it+1] + _N.einsum("njk,nk->nj", C, rn3)

            for im in xrange(M):  # 3333333333333333
                Nm = Nms[im,0,0]

                if Nm >= 2:
                    clstx = clstxs[im]
                    ##  dof of posterior distribution of cluster covariance
                    oo.po_cov_nu[it+1, im] = oo.PR_cov_nu[im] + Nm
                    ##  dof of posterior distribution of cluster covariance
                    oo.po_cov_PSI[it+1, im] = oo.PR_cov_PSI[im] + _N.dot((clstx - oo.smu[it+1, im]).T, (clstx-oo.smu[it+1, im]))

                    ##############  SAMPLE COVARIANCES
                    oo.scov[it+1, im] = s_u.sample_invwishart(oo.po_cov_PSI[it+1, im], oo.po_cov_nu[it+1, im])
                else:  #  no marks assigned to this cluster 
                    oo.scov[it+1, im] = oo.scov[it, im]
                    oo.smu[it+1, im]  = oo.smu[it, im]
                    oo.po_mu_sg[it+1, im] = oo.PR_mu_sg[im]
                    oo.po_mu_mu[it+1, im] = oo.PR_mu_mu[im]
                    oo.po_cov_nu[it+1, im] = oo.PR_cov_nu[im]
                    ##  dof of posterior distribution of cluster covariance
                    oo.po_cov_PSI[it+1, im] = oo.PR_cov_PSI[im]

            t6 = _tm.time()
            # print "-----"
            # print (t2-t1)
            # print (t3-t2)
            # print (t4-t3)
            # print (t5-t4)
            # print (t6-t5)

        #  When I say prior for mu, I mean I have hyper parameters mu_mu and mu_sg.
        #  hyperparameters are not sampled

        print oo.po_alpha[oo.ITERS-1]
        hITERS = int(oo.ITERS*0.75)
        oo.us[:]  = _N.mean(oo.smu[hITERS:oo.ITERS], axis=0)
        oo.covs[:] = _N.mean(oo.scov[hITERS:oo.ITERS], axis=0)
        oo.ms[:]  = _N.mean(oo.sm[hITERS:oo.ITERS], axis=0).reshape(oo.M, 1)

        oo.dat = x
示例#5
0
    def gibbs(self, ITERS, K, ep1=0, ep2=None, savePosterior=True, gtdiffusion=False, Mdbg=None, doSepHash=True, use_spc=True, nz_pth=0., ignoresilence=False, use_omp=False):
        """
        gtdiffusion:  use ground truth center of place field in calculating variance of center.  Meaning of diffPerMin different
        """
        print "gibbs"
        oo = self
        twpi     = 2*_N.pi
        pcklme   = {}

        ep2 = oo.epochs if (ep2 == None) else ep2
        oo.epochs = ep2-ep1

        ######################################  GRID for calculating
        ####  #  points in sum.  
        ####  #  points in uniform sampling of exp(x)p(x)   (non-spike interals)
        ####  #  points in sampling of f  for conditional posterior distribution
        ####  #  points in sampling of q2 for conditional posterior distribution
        ####  NSexp, Nupx, fss, q2ss

        #  numerical grid
        ux = _N.linspace(oo.xLo, oo.xHi, oo.Nupx, endpoint=False)   # uniform x position
        q2x    = _N.exp(_N.linspace(_N.log(1e-7), _N.log(100), oo.q2ss))  #  5 orders of
        d_q2x  = _N.diff(q2x)
        q2x_m1 = _N.array(q2x[0:-1])
        lq2x    = _N.log(q2x)
        iq2x    = 1./q2x
        q2xr     = q2x.reshape((oo.q2ss, 1))
        iq2xr     = 1./q2xr
        sqrt_2pi_q2x   = _N.sqrt(twpi*q2x)
        l_sqrt_2pi_q2x = _N.log(sqrt_2pi_q2x)

        freeClstr = None
        gk     = gauKer(100) # 0.1s  smoothing of motion
        gk     /= _N.sum(gk)
        xf     = _N.convolve(oo.dat[:, 0], gk, mode="same")
        oo.dat[:, 0] = xf + nz_pth*_N.random.randn(len(oo.dat[:, 0]))
        x      = oo.dat[:, 0]
        mks    = oo.dat[:, 2:]

        f_q2_rate = (oo.diffusePerMin**2)/60000.  #  unit of minutes  

        ######################################  PRECOMPUTED

        tau_l0 = oo.t_hlf_l0/_N.log(2)
        tau_q2 = oo.t_hlf_q2/_N.log(2)

        for epc in xrange(ep1, ep2):
            t0 = oo.intvs[epc]
            t1 = oo.intvs[epc+1]
            if epc > 0:
                tm1= oo.intvs[epc-1]
                #  0  10 30     20 - 5  = 15    0.5*((10+30) - (10+0)) = 15
                dt = 0.5*((t1+t0) - (t0+tm1))

            dt = (t1-t0)*0.5
            xt0t1 = _N.array(x[t0:t1])
            posbins  = _N.linspace(oo.xLo, oo.xHi, oo.Nupx+1)
            #  _N.sum(px)*(xbns[1]-xbns[0]) = 1
            px, xbns = _N.histogram(xt0t1, bins=posbins, normed=True)   

            Asts    = _N.where(oo.dat[t0:t1, 1] == 1)[0]   #  based at 0
            Ants    = _N.where(oo.dat[t0:t1, 1] == 0)[0]

            if epc == ep1:   ###  initialize
                labS, labH, lab, flatlabels, M, MF, hashthresh, nHSclusters = gAMxMu.initClusters(oo, K, x, mks, t0, t1, Asts, doSepHash=doSepHash, xLo=oo.xLo, xHi=oo.xHi, oneCluster=oo.oneCluster)  # nHSclusters  is # of clusters in hash and signal 

                signalClusters = _N.where(flatlabels < nHSclusters[0])[0]
                Mwowonz = M if not oo.nzclstr else M + 1
                #######   containers for GIBBS samples iterations
                smp_sp_prms = _N.zeros((3, ITERS, M))  
                smp_mk_prms = [_N.zeros((K, ITERS, M)), 
                               _N.zeros((K, K, ITERS, M))]
                smp_sp_hyps = _N.zeros((6, ITERS, M))
                smp_mk_hyps = [_N.zeros((K, ITERS, M)), 
                               _N.zeros((K, K, ITERS, M)),
                               _N.zeros((1, ITERS, M)), 
                               _N.zeros((K, K, ITERS, M))]
                oo.smp_sp_prms = smp_sp_prms
                oo.smp_mk_prms = smp_mk_prms
                oo.smp_sp_hyps = smp_sp_hyps
                oo.smp_mk_hyps = smp_mk_hyps

                if oo.nzclstr:
                    smp_nz_l0     = _N.zeros(ITERS)
                    smp_nz_hyps = _N.zeros((2, ITERS))

                #  list of freeClstrs
                freeClstr = _N.empty(M, dtype=_N.bool)   #  Actual cluster
                freeClstr[:] = False

                l0, f, q2, u, Sg = gAMxMu.declare_params(M, K, nzclstr=oo.nzclstr)   #  nzclstr not INITED, sized to include noise cluster if needed
                _l0_a, _l0_B, _f_u, _f_q2, _q2_a, _q2_B, _u_u, _u_Sg, _Sg_nu, \
                    _Sg_PSI = gAMxMu.declare_prior_hyp_params(M, MF, K, x, mks, Asts, t0)    #  hyper params don't include noise cluster
                gAMxMu.init_params_hyps(oo, M, MF, K, l0, f, q2, u, Sg, Asts, t0, x, mks, flatlabels, nzclstr=oo.nzclstr, signalClusters=signalClusters)

                ######  the hyperparameters for f, q2, u, Sg, l0 during Gibbs
                #  f_u_, f_q2_, q2_a_, q2_B_, u_u_, u_Sg_, Sg_nu, Sg_PSI_, l0_a_, l0_B_

                if oo.nzclstr:
                    nz_l0_intgrd   = _N.exp(-0.5*ux*ux / q2[Mwowonz-1])
                    _nz_l0_a       = 0.001
                    _nz_l0_B       = 0.1

            NSexp   = t1-t0    #  length of position data  #  # of no spike positions to sum
            xt0t1 = _N.array(x[t0:t1])

            nSpks    = len(Asts)
            gz   = _N.zeros((ITERS, nSpks, Mwowonz), dtype=_N.bool)
            oo.gz=gz
            print "spikes %d" % nSpks

            #dSilenceX = (NSexp/float(oo.Nupx))*(oo.xHi-oo.xLo)
            dSilenceX = NSexp*(xbns[1]-xbns[0])  # dx of histogram

            xAS  = x[Asts + t0]   #  position @ spikes
            mAS  = mks[Asts + t0]   #  position @ spikes
            xASr = xAS.reshape((1, nSpks))
            #mASr = mAS.reshape((nSpks, 1, K))
            mASr = mAS.reshape((1, nSpks, K))
            econt = _N.empty((Mwowonz, nSpks))
            rat   = _N.zeros((Mwowonz+1, nSpks))

            qdrMKS = _N.empty((Mwowonz, nSpks))
            ################################  GIBBS ITERS ITERS ITERS

            #  linalgerror
            #_iSg_Mu = _N.einsum("mjk,mk->mj", _N.linalg.inv(_u_Sg), _u_u)

            clusSz = _N.zeros(M, dtype=_N.int)

            _iu_Sg = _N.array(_u_Sg)

            for m in xrange(M):
                _iu_Sg[m] = _N.linalg.inv(_u_Sg[m])

            ttA = _tm.time()
            for iter in xrange(ITERS):
                iSg = _N.linalg.inv(Sg)
                if (iter % 5) == 0:    
                    print "iter  %d" % iter

                gAMxMu.stochasticAssignment(oo, iter, M, Mwowonz, K, l0, f, q2, u, Sg, _f_u, _u_u, Asts, t0, mASr, xASr, rat, econt, gz, qdrMKS, freeClstr, hashthresh, ((epc > 0) and (iter == 0)))

        #         ###############  FOR EACH CLUSTER

                for m in xrange(M):
                    minds = _N.where(gz[iter, :, m] == 1)[0]
                    sts  = Asts[minds] + t0
                    nSpksM   = len(sts)
                    clusSz[m] = nSpksM

                    ###############  CONDITIONAL l0

                    #  _ss.gamma.rvs.  uses k, theta  k is 1/B (B is our thing)
                    iiq2 = 1./q2[m]
                    # xI = (xt0t1-f[m])*(xt0t1-f[m])*0.5*iiq2
                    # BL  = (oo.dt/_N.sqrt(twpi*q2[m]))*_N.sum(_N.exp(-xI))

                    #  l0_intgrd   (M x Nupx)
                    l0_intgrd   = _N.exp(-0.5*(f[m] - ux)*(f[m]-ux) * iiq2)  
                    l0_exp_px   = _N.sum(l0_intgrd*px) * dSilenceX

                    BL  = (oo.dt/_N.sqrt(twpi*q2[m]))*l0_exp_px


                    #    #  keep mode same after discount
                    #  a' - 1 / B' = MODE  # mode is a - 1 / B
                    #  B' = (a' - 1) / MODE
                    #  discount a
                    #if (epc > 0) and oo.adapt and (_l0_a[m] > 1.1):
                    if (epc > 0) and oo.adapt:
                        _md_nd= _l0_a[m] / _l0_B[m]
                        _Dl0_a = _l0_a[m] * _N.exp(-dt/tau_l0)
                        _Dl0_B = _Dl0_a / _md_nd
                    else:
                        _Dl0_a = _l0_a[m]
                        _Dl0_B = _l0_B[m]

                    #  a'/B' = a/B
                    #  B' = (B/a)a'
                    aL  = nSpksM
                    l0_a_ = aL + _Dl0_a
                    l0_B_ = BL + _Dl0_B


                    # print "------------------"
                    # print "liklhd  BL   %(B).3f     f   %(f).3f   a %(a)d    B/a  %(ba).3f" % {"B" : BL, "f" : f[m], "ba" : (aL/ BL), "a" : aL}
                    # print "prior   BL   %(B).3f     f   %(f).3f   a %(a)d    B/a  %(ba).3f" % {"B" : l0_B_, "f" : f[m], "ba" : (l0_a_/ l0_B_), "a" : l0_a_}
                    # print (len(xt0t1)*oo.dt)
                    # print "******************"
                    
                    #print "%(1).5f   %(2).5f" % {"1" : l0_a_, "2" : l0_B_}

                    try:
                        l0[m] = _ss.gamma.rvs(l0_a_, scale=(1/l0_B_))  #  check
                    except ValueError:
                        print "fail"
                        print "M:        %d" % M
                        print "_l0_a[m]  %.3f" % _l0_a[m]
                        print "_l0_B[m]  %.3f" % _l0_B[m]
                        print "l0_a_     %.3f" % l0_a_
                        print "l0_B_     %.3f" % l0_B_
                        print "aL        %.3f" % aL
                        print "BL        %.3f" % BL
                        print "_Dl0_a    %.3f" % _Dl0_a
                        print "_Dl0_B    %.3f" % _Dl0_B
                        raise

                    ###  l0 / _N.sqrt(twpi*q2) is f*dt used in createData2
                    smp_sp_prms[oo.ky_p_l0, iter, m] = l0[m]
                    smp_sp_hyps[oo.ky_h_l0_a, iter, m] = l0_a_
                    smp_sp_hyps[oo.ky_h_l0_B, iter, m] = l0_B_
                    mcs = _N.empty((M, K))   # cluster sample means

                    if nSpksM >= K:
                        u_Sg_ = _N.linalg.inv(_iu_Sg[m] + nSpksM*iSg[m])
                        clstx    = mks[sts]

                        mcs[m]       = _N.mean(clstx, axis=0)
                        #u_u_ = _N.einsum("jk,k->j", u_Sg_, _N.dot(_N.linalg.inv(_u_Sg[m]), _u_u[m]) + nSpksM*_N.dot(iSg[m], mcs[m]))
                        #u_u_ = _N.einsum("jk,k->j", u_Sg_, _N.dot(_iu_Sg[m], _u_u[m]) + nSpksM*_N.dot(iSg[m], mcs[m]))
                        # hyp
                        ########  POSITION
                        ##  mean of posterior distribution of cluster means
                        #  sigma^2 and mu are the current Gibbs-sampled values

                        ##  mean of posterior distribution of cluster means
                    else:
                        u_Sg_ = _N.array(_u_Sg[m])

                        u_u_ = _N.array(_u_u[m])
                    u[m] = _N.random.multivariate_normal(u_u_, u_Sg_)

                    smp_mk_prms[oo.ky_p_u][:, iter, m] = u[m]
                    smp_mk_hyps[oo.ky_h_u_u][:, iter, m] = u_u_
                    smp_mk_hyps[oo.ky_h_u_Sg][:, :, iter, m] = u_Sg_

                    """
                    ############################################
                    """
                    ###############  CONDITIONAL f
                    #q2pr = _f_q2[m] if (_f_q2[m] > q2rate) else q2rate
                    if (epc > 0) and oo.adapt:
                        q2pr = _f_q2[m] + f_q2_rate * dt
                    else:
                        q2pr = _f_q2[m]
                    if nSpksM > 0:  #  spiking portion likelihood x prior
                        fs  = (1./nSpksM)*_N.sum(xt0t1[sts-t0])
                        fq2 = q2[m]/nSpksM
                        U   = (fs*q2pr + _f_u[m]*fq2) / (q2pr + fq2)
                        FQ2 = (q2pr*fq2) / (q2pr + fq2)
                    else:
                        U   = _f_u[m]
                        FQ2 = q2pr

                    FQ    = _N.sqrt(FQ2)
                    fx    = _N.linspace(U - FQ*15, U + FQ*15, oo.fss)

                    if use_spc:
                        fxr     = fx.reshape((oo.fss, 1))
                        fxrux = -0.5*(fxr-ux)*(fxr-ux)  # 
                        f_intgrd  = _N.exp((fxrux*iiq2))   #  integrand
                        f_exp_px = _N.sum(f_intgrd*px, axis=1) * dSilenceX
                        s = -(l0[m]*oo.dt/_N.sqrt(twpi*q2[m])) * f_exp_px  #  a function of x
                    else:
                        s = 0
                    funcf   = -0.5*((fx-U)*(fx-U))/FQ2 + s
                    funcf   -= _N.max(funcf)
                    condPosF= _N.exp(funcf)

                    norm    = 1./_N.sum(condPosF)
                    f_u_    = norm*_N.sum(fx*condPosF)
                    f_q2_   = norm*_N.sum(condPosF*(fx-f_u_)*(fx-f_u_))
                    f[m]    = _N.sqrt(f_q2_)*_N.random.randn() + f_u_
                    smp_sp_prms[oo.ky_p_f, iter, m] = f[m]
                    smp_sp_hyps[oo.ky_h_f_u, iter, m] = f_u_
                    smp_sp_hyps[oo.ky_h_f_q2, iter, m] = f_q2_

                    #ttc1g = _tm.time()
                    #############  VARIANCE, COVARIANCE
                    if nSpksM >= K:
                        ##  dof of posterior distribution of cluster covariance
                        Sg_nu_ = _Sg_nu[m, 0] + nSpksM
                        ##  dof of posterior distribution of cluster covariance
                        ur = u[m].reshape((1, K))
                        Sg_PSI_ = _Sg_PSI[m] + _N.dot((clstx - ur).T, (clstx-ur))
                        Sg[m] = s_u.sample_invwishart(Sg_PSI_, Sg_nu_)
                    else:
                        Sg_nu_ = _Sg_nu[m, 0] 
                        ##  dof of posterior distribution of cluster covariance
                        ur = u[m].reshape((1, K))
                        Sg_PSI_ = _Sg_PSI[m]
                        Sg[m] = s_u.sample_invwishart(Sg_PSI_, Sg_nu_)

                    ##############  SAMPLE COVARIANCES

                    ##  dof of posterior distribution of cluster covariance

                    smp_mk_prms[oo.ky_p_Sg][:, :, iter, m] = Sg[m]
                    smp_mk_hyps[oo.ky_h_Sg_nu][0, iter, m] = Sg_nu_
                    smp_mk_hyps[oo.ky_h_Sg_PSI][:, :, iter, m] = Sg_PSI_

                    # ###############  CONDITIONAL q2
                    #xI = (xt0t1-f)*(xt0t1-f)*0.5*iq2xr

                    if use_spc:
                        q2_intgrd = _N.exp(-0.5*(f[m] - ux)*(f[m]-ux) * iq2xr)
                        q2_exp_px = _N.sum(q2_intgrd*px, axis=1) * dSilenceX

                        # function of q2
                        s = -((l0[m]*oo.dt)/sqrt_2pi_q2x)*q2_exp_px
                    else:
                        s = 0
                    #  B' / (a' - 1) = MODE   #keep mode the same after discount
                    #  B' = MODE * (a' - 1)
                    if (epc > 0) and oo.adapt:
                        _md_nd= _q2_B[m] / (_q2_a[m] + 1)
                        _Dq2_a = _q2_a[m] * _N.exp(-dt/tau_q2)
                        _Dq2_B = _Dq2_a / _md_nd
                    else:
                        _Dq2_a = _q2_a[m]
                        _Dq2_B = _q2_B[m]

                    if nSpksM > 0:
                        ##  (1/sqrt(sg2))^S
                        ##  (1/x)^(S/2)   = (1/x)-(a+1)
                        ##  -S/2 = -a - 1     -a = -S/2 + 1    a = S/2-1
                        xI = (xt0t1[sts-t0]-f[m])*(xt0t1[sts-t0]-f[m])*0.5
                        SL_a = 0.5*nSpksM - 1   #  spiking part of likelihood
                        SL_B = _N.sum(xI)  #  spiking part of likelihood
                        #  spiking prior x prior
                        sLLkPr = -(_q2_a[m] + SL_a + 2)*lq2x - iq2x*(_q2_B[m] + SL_B)
                    else:
                        sLLkPr = -(_q2_a[m] + 1)*lq2x - iq2x*_q2_B[m]


                    sat = sLLkPr + s
                    sat -= _N.max(sat)
                    condPos = _N.exp(sat)
                    q2_a_, q2_B_ = ig_prmsUV(q2x, sLLkPr, s, d_q2x, q2x_m1, ITER=1, nSpksM=nSpksM, clstr=m, l0=l0[m])

                    # sat = sLLkPr + s
                    # sat -= _N.max(sat)
                    # condPos = _N.exp(sat)
                    # q2_a_, q2_B_ = ig_prmsUV(q2x, condPos, d_q2x, q2x_m1, ITER=1)
                    q2[m] = _ss.invgamma.rvs(q2_a_ + 1, scale=q2_B_)  #  check


                    #q2[m] = 1.1**2

                    #print ((1./nSpks)*_N.sum((xt0t1[sts]-f)*(xt0t1[sts]-f)))

                    if q2[m] < 0:
                        print "********  q2[%(m)d] = %(q2).3f" % {"m" : m, "q2" : q2[m]}

                    smp_sp_prms[oo.ky_p_q2, iter, m]   = q2[m]
                    smp_sp_hyps[oo.ky_h_q2_a, iter, m] = q2_a_
                    smp_sp_hyps[oo.ky_h_q2_B, iter, m] = q2_B_
                    
                    if q2[m] < 0:
                        print "^^^^^^^^  q2[%(m)d] = %(q2).3f" % {"m" : m, "q2" : q2[m]}
                        print q2[m]
                        print smp_sp_prms[oo.ky_p_q2, 0:iter+1, m]
                    iiq2 = 1./q2[m]

                    #ttc1h = _tm.time()
                    

                #  nz clstr.  fixed width
                if oo.nzclstr:
                    nz_l0_exp_px   = _N.sum(nz_l0_intgrd*px) * dSilenceX
                    BL  = (oo.dt/_N.sqrt(twpi*q2[Mwowonz-1]))*nz_l0_exp_px

                    minds = len(_N.where(gz[iter, :, Mwowonz-1] == 1)[0])
                    l0_a_ = minds + _nz_l0_a
                    l0_B_ = BL    + _nz_l0_B

                    l0[Mwowonz-1]  = _ss.gamma.rvs(l0_a_, scale=(1/l0_B_)) 
                    smp_nz_l0[iter]       = l0[Mwowonz-1]
                    smp_nz_hyps[0, iter]  = l0_a_
                    smp_nz_hyps[1, iter]  = l0_B_

            ttB = _tm.time()
            print (ttB-ttA)

            ###  THIS LEVEL:  Finished Gibbs iters for epoch
            gAMxMu.finish_epoch(oo, nSpks, epc, ITERS, gz, l0, f, q2, u, Sg, _f_u, _f_q2, _q2_a, _q2_B, _l0_a, _l0_B, _u_u, _u_Sg, _Sg_nu, _Sg_PSI, smp_sp_hyps, smp_sp_prms, smp_mk_hyps, smp_mk_prms, freeClstr, M, K)
            #  MAP of nzclstr
            if oo.nzclstr:
                frm = int(0.7*ITERS)
                _nz_l0_a              = _N.median(smp_nz_hyps[0, frm:])
                _nz_l0_B              = _N.median(smp_nz_hyps[1, frm:])
            pcklme["smp_sp_hyps"] = smp_sp_hyps
            pcklme["smp_mk_hyps"] = smp_mk_hyps
            pcklme["smp_sp_prms"] = smp_sp_prms
            pcklme["smp_mk_prms"] = smp_mk_prms
            pcklme["sp_prmPstMd"] = oo.sp_prmPstMd
            pcklme["mk_prmPstMd"] = oo.mk_prmPstMd
            pcklme["intvs"]       = oo.intvs
            pcklme["occ"]         = gz
            pcklme["nz_pth"]         = nz_pth
            pcklme["M"]           = M
            pcklme["Mwowonz"]           = Mwowonz
            if Mwowonz > M:  # or oo.nzclstr == True
                pcklme["smp_nz_l0"]  = smp_nz_l0
                pcklme["smp_nz_hyps"]= smp_nz_hyps
                
            dmp = open(resFN("posteriors_%d.dmp" % epc, dir=oo.outdir), "wb")
            pickle.dump(pcklme, dmp, -1)
            dmp.close()
示例#6
0
    def fit(self, ITERS, M, x, _u_u=None, _u_Sg=None, _Sg_nu=None, _Sg_PSI=None, _ms_alp=None, u_0=None, Sg_0=None, ms_0=None):
        """
        Fit, with the inverting done in blocks
        """
        oo = self
        mdim   = x.shape[1]
        nSpks  = x.shape[0]

        u      = _N.empty((ITERS, M, mdim))
        Sg     = _N.empty((ITERS, M, mdim, mdim))
        ms     = _N.empty((ITERS, M, 1))
        xs     = _N.sort(x)

        if ms_0 is None:
            rats = _N.ones(M)/M
            rats += (1./M)*0.1*_N.random.randn(M)
            rats /= _N.sum(rats)
            ms[0, :, 0] = rats
        # else:
        #     ms[0, :, 0] = ms_0
        if u_0 is None:
            for m in xrange(M):
                u[0, m] = _N.mean(x, axis=0)
        # else:
        #     u[0, :, 0] = f_0
        if Sg_0 is None:
            for m in xrange(M):
                Sg[0, m]  = _N.identity(mdim)
        # else:
        #     q2[0, :, 0] = q2_0

        mAS  = x   #  position @ spikes
        mASr = mAS.reshape((nSpks, 1, mdim))

        gz   = _N.zeros((ITERS, nSpks, M), dtype=_N.bool)

        if _u_u is None:
            _u_u = _N.empty((M, mdim))
            for m in xrange(M):
                _u_u[m] = _N.array(u[0, m])
        if _u_Sg is None:
            _u_Sg = _N.empty((M, mdim, mdim))
            for m in xrange(M):
                _u_Sg[m] = _N.cov(x, rowvar=0)
        if _Sg_nu is None:
            _Sg_nu = _N.ones((M, 1));  
        if _Sg_PSI is None:
            _Sg_PSI = _N.tile(_N.identity(mdim), M).T.reshape((M, mdim, mdim))*0.1
        if _ms_alp is None:
            _ms_alp = _N.ones(M)*(1. / M)

        # #  termporary containers
        econt = _N.empty((M, nSpks))
        rat   = _N.zeros((M+1, nSpks))
        alp_  = _N.empty(M)
        qdrMKS = _N.empty((M, nSpks))

        # print ms[0, :, 0]
        # print f[0, :, 0]
        # print q2[0, :, 0]
        # print "^^^^^^^^"

        oo.u  = u
        oo.Sg = Sg
        oo.ms = ms
        oo.gz = gz

        #  initial values given for it == 0

        for it in xrange(1, ITERS):
            ur         = u[it-1].reshape((1, M, mdim))
            iSg        = _N.linalg.inv(Sg[it-1])

            zrs   = _N.where(ms[it-1] == 0)[0]
            ms[it-1, zrs, 0] = 1e-30
            lms   = _N.log(ms[it-1])

            mkNrms = _N.log(1/_N.sqrt(twpi*_N.linalg.det(Sg[it-1])))
            mkNrms = mkNrms.reshape((M, 1))

            rnds       = _N.random.rand(nSpks)
            dmu        = (mASr - ur)

            _N.einsum("nmj,mjk,nmk->mn", dmu, iSg, dmu, out=qdrMKS)

            cont       = lms + mkNrms - 0.5*qdrMKS

            mcontr     = _N.max(cont, axis=0).reshape((1, nSpks))  
            cont       -= mcontr
            _N.exp(cont, out=econt)

            for m in xrange(M):
                rat[m+1] = rat[m] + econt[m]

            rat /= rat[M]

            M1 = rat[1:] >= rnds
            M2 = rat[0:-1] <= rnds

            gz[it] = (M1&M2).T

            #  prior for weights + likelihood used to sample
            #  _alp (prior) and alp_ posterior hyper

            _N.add(_ms_alp, _N.sum(gz[it], axis=0), out=alp_)

            ##############  SAMPLE WEIGHTS
            ms[it, :, 0] = _N.random.dirichlet(alp_)

            for m in xrange(M):
                thisgr = _N.where(gz[it, :, m] == 1)[0]
                nSpksC  = len(thisgr)
                if nSpksC > mdim:
                    u_Sg_ = _N.linalg.inv(_N.linalg.inv(_u_Sg[m]) + nSpksC*iSg[m])
                    clstx    = mAS[thisgr]

                    mcs       = _N.mean(clstx, axis=0)
                    u_u_ = _N.einsum("jk,k->j", u_Sg_, _N.dot(_N.linalg.inv(_u_Sg[m]), _u_u[m]) + nSpksC*_N.dot(iSg[m], mcs))
                    u[it, m] = _N.random.multivariate_normal(u_u_, u_Sg_)

                    ####  sample component variancs
                    Sg_nu_ = _Sg_nu[m, 0] + nSpksC
                    ##  dof of posterior distribution of cluster covariance
                    ur = u[it, m].reshape((1, mdim))
                    Sg_PSI_ = _Sg_PSI[m] + _N.dot((clstx - ur).T, (clstx-ur))
                    Sg[it, m] = s_u.sample_invwishart(Sg_PSI_, Sg_nu_)
                else:
                    u[it, m]  = u[it-1, m]
                    Sg[it, m] = Sg[it-1, m]