def chooseTrials(setname, chosen, scopyNum, N): dat = _N.loadtxt(resFN("xprbsdN.dat", dir=setname)) cdat = _N.empty((N, 4 * len(chosen))) phs = [] for ct in xrange(len(chosen)): cdat[:, 4 * ct] = dat[:, 4 * chosen[ct]] cdat[:, 4 * ct + 1] = dat[:, 4 * chosen[ct] + 1] cdat[:, 4 * ct + 2] = dat[:, 4 * chosen[ct] + 2] cdat[:, 4 * ct + 3] = dat[:, 4 * chosen[ct] + 3] ts = _N.where(cdat[:, 2 + 4 * ct] == 1)[0] phs.extend(cdat[ts, 3 + 4 * ct]) csetname = "%(sn)s-%(cn)s" % {"sn": setname, "cn": scopyNum} _N.savetxt(resFN("xprbsdN.dat", dir=csetname, create=True), cdat, fmt=("% .1f % .1f %d %.3f " * len(chosen))) _N.savetxt(resFN("chosen.dat", dir=csetname, create=True), _N.array(chosen), fmt="%d") fig = _plt.figure(figsize=(13, 4)) _plt.plot(_N.sum(cdat[:, 2::4], axis=0), marker=".", ms=10) _plt.xticks() _plt.grid() _plt.savefig(resFN("spksPtrl", dir=csetname, create=True)) _plt.close() cols, TR4 = cdat.shape TR = TR4 / 4 isis = [] for tr in xrange(TR): sts = _N.where(dat[:, 2 + tr * 4] == 1) isis.extend(_N.diff(sts[0]).tolist()) fig = _plt.figure(figsize=(4, 4)) _plt.hist(isis, bins=_N.linspace(0, 600, 301)) _plt.grid() _plt.yscale("log") _plt.savefig(resFN("ISI-log-scale", dir=csetname)) _plt.close()
def plotQ2(setdir, baseFN, burn, NMC, TR0, TR1, smp_q2, hilite=None): fig = _plt.figure(figsize=(8.5, 4.2)) if hilite != None: # these are the ones that should be weakly modulated for tr in range(TR0, TR1): try: ind = hilite.index(tr - TR0) _plt.plot(_N.sqrt(smp_q2[tr - TR0]), lw=2.5, color="black") except ValueError: _plt.plot(_N.sqrt(smp_q2[tr - TR0]), lw=1.5, color="grey") _plt.savefig(resFN("%s_q2_smps.png" % baseFN, dir=setdir)) _plt.close() else: for tr in range(TR0, TR1): _plt.plot(smp_q2[tr - TR0], lw=1.5, color="black") _plt.savefig(resFN("%s_q2_smps.png" % baseFN, dir=setdir)) _plt.close() fig = _plt.figure(figsize=(8.5, 4.2)) _plt.bar(range(TR0, TR1), _N.sqrt(_N.mean(smp_q2[:, burn + NMC - 100:], axis=1)), align="center") _plt.xlim(TR0 - 0.5, TR1 - 0.5) _plt.savefig(resFN("%s_sqr_q2_smps.png" % baseFN, dir=setdir)) _plt.close()
def loadDat(self, trials): ################# loadDat oo = self bGetFP = False x_st_cnts = _N.loadtxt(resFN("xprbsdN.dat", dir=oo.setname)) y_ch = 2 # spike channel p = _re.compile("^\d{6}") # starts like "exptDate-....." m = p.match(oo.setname) bRealDat = True dch = 4 # # of data columns per trial if m == None: # not real data bRealDat, dch = False, 3 else: flt_ch, ph_ch, bGetFP = 1, 3, True # Filtered LFP, Hilb Trans TR = x_st_cnts.shape[1] / dch # number of trials will get filtered # If I only want to use a small portion of the data oo.N = x_st_cnts.shape[0] - 1 if oo.t1 == None: oo.t1 = oo.N + 1 # meaning of N changes here N = oo.t1 - 1 - oo.t0 x = x_st_cnts[oo.t0:oo.t1, ::dch].T y = x_st_cnts[oo.t0:oo.t1, y_ch::dch].T if bRealDat: fx = x_st_cnts[oo.t0:oo.t1, flt_ch::dch].T px = x_st_cnts[oo.t0:oo.t1, ph_ch::dch].T #### Now keep only trials that have spikes kpTrl = range(TR) if trials is None: trials = range(oo.TR) oo.useTrials = [] for utrl in trials: try: ki = kpTrl.index(utrl) if _N.sum(y[utrl, :]) > 0: oo.useTrials.append(ki) except ValueError: print "a trial requested to use will be removed %d" % utrl ###### oo.y are for trials that have at least 1 spike oo.y = _N.array(y[oo.useTrials], dtype=_N.int) oo.x = _N.array(x[oo.useTrials]) if bRealDat: oo.fx = _N.array(fx[oo.useTrials]) oo.px = _N.array(px[oo.useTrials]) # INITIAL samples if TR > 1: mnCt= _N.mean(oo.y, axis=1) else: mnCt= _N.array([_N.mean(oo.y)]) # remove trials where data has no information rmTrl = [] oo.kp = oo.y - 0.5 oo.rn = 1 oo.TR = len(oo.useTrials) oo.N = N oo.smpx = _N.zeros((oo.TR, oo.N + 1)) # start at 0 + u oo.ws = _N.empty((oo.TR, oo.N+1), dtype=_N.float) oo.lrn = _N.empty((oo.TR, oo.N+1)) oo.us = _N.zeros(oo.TR) tot_isi = 0 nisi = 0 for tr in xrange(oo.TR): spkts = _N.where(oo.y[tr] == 1) if len(spkts[0]) > 2: nisi += 1 tot_isi += spkts[0][1] - spkts[0][0] oo.mean_isi_1st2spks = float(tot_isi) / nisi ##### LOAD spike history oo.l2 = loadL2(oo.setname, fn=oo.histFN) if oo.l2 is None: oo.lrn[:] = 1 else: # assume ISIs near beginning of data are exponentially # distributed estimate for tr in xrange(oo.TR): oo.lrn[tr] = oo.build_lrnLambda2(tr)
def fitGLM(self): oo = self N, TR = oo.dat.shape if oo.t1 - oo.t0 > N: print "ERROR t1-t0 > N" return if oo.endTR - oo.startTR > TR: print "ERROR endTR-startTR > TR" return st = oo.dat[oo.t0:oo.t1, oo.COLS * oo.startTR + 2:oo.COLS * oo.endTR + 2:oo.COLS] N = oo.t1 - oo.t0 TR = oo.endTR - oo.startTR # The design matrix # # of params LHBin + nLHBins + 1 Ldf = N - oo.LHbin * (oo.nLHBins + 1) X = _N.empty((TR, Ldf, oo.LHbin + oo.nLHBins + 1)) X[:, :, 0] = 1 # offset y = _N.empty((TR, Ldf)) for tr in xrange(TR): for t in xrange(oo.LHbin * (oo.nLHBins + 1), N): # 0:9 hist = st[t - oo.LHbin * (oo.nLHBins + 1):t, tr][::-1] sthcts = hist[0:oo.LHbin] # lthcts = _N.sum(hist[oo.LHbin:oo.LHbin * (oo.nLHBins + 1)].reshape( oo.nLHBins, oo.LHbin), axis=1) X[tr, t - oo.LHbin * (oo.nLHBins + 1), 1:oo.LHbin + 1] = sthcts X[tr, t - oo.LHbin * (oo.nLHBins + 1), oo.LHbin + 1:] = lthcts y[tr, t - oo.LHbin * (oo.nLHBins + 1)] = st[t, tr] yr = y.reshape(TR * Ldf) Xr = X.reshape(TR * Ldf, oo.LHbin + oo.nLHBins + 1) est = _sm.GLM(yr, Xr, family=_sm.families.Poisson()).fit() oo.offs = est.params[0] oo.shrtH = est.params[1:oo.LHbin + 1] oo.oscH = est.params[oo.LHbin + 1:] cfi = est.conf_int() oscCI = cfi[oo.LHbin + 1:] fig = _plt.figure(figsize=(12, 6)) xlab = _N.arange(oo.LHbin, (oo.nLHBins + 1) * oo.LHbin, oo.LHbin) _plt.fill_between(xlab, _N.exp(oscCI[:, 0]), _N.exp(oscCI[:, 1]), color="blue", alpha=0.2) _plt.plot(xlab, _N.exp(oo.oscH), lw=2, color="black") _plt.xticks(xlab, fontsize=20) _plt.yticks(fontsize=20) _plt.xlabel("lags (ms)", fontsize=22) _plt.xlim(xlab[0], xlab[-1]) _plt.axhline(y=1, ls="--", color="grey") fig.subplots_adjust(left=0.1, bottom=0.15, top=0.94) _plt.savefig( resFN( "glmfit_LHBins=%(LHBins)d_%(binsz)d_strt=%(trS)d_%(trE)d_t0=%(t0)d_t1=%(t1)d" % { "trS": oo.startTR, "trE": oo.endTR, "t0": oo.t0, "t1": oo.t1, "LHBins": oo.nLHBins, "binsz": oo.LHbin }, dir=oo.setname)) _plt.close() return est, X, y
def __init__(self, setname, COLS=3): self.setname = setname self.dat = _N.loadtxt(resFN("xprbsdN.dat", dir=self.setname)) self.COLS = COLS
def create(setname): copyfile( "%s.py" % setname, "%(s)s/%(s)s.py" % { "s": setname, "to": setFN("%s.py" % setname, dir=setname, create=True) }) global dt, lambda2, rpsth, isis, us, TR, etme, f0VAR nColumns = 3 alldat = _N.empty((N, TR * nColumns)) for tr in xrange(TR): if f0VAR is None: f0VAR = _N.zeros(TR) sig = 0.1 x, y = createDataAR(100000, Bf, sig, sig) stdf = _N.std( x) # choice of 4 std devs to keep phase monotonically increasing x, y = createDataAR(100000, Ba, sig, sig) stda = _N.std( x) # choice of 4 std devs to keep phase monotonically increasing x, y = createDataAR(100000, Bf_1, sig, sig) stdf_1 = _N.std( x) # choice of 4 std devs to keep phase monotonically increasing x, y = createDataAR(100000, Ba_1, sig, sig) stda_1 = _N.std( x) # choice of 4 std devs to keep phase monotonically increasing x = createFlucOsc( f0, _N.array([f0VAR[tr]]), N, dt, 1, Bf=Bf, Ba=Ba, amp=amp, amp_nz=amp_nz, stdf=stdf, stda=stda, sig=sig, smoothKer=5, dSA=dSA, dSF=dSF ) # sig is arbitrary, but we need to keep it same as when stdf, stda measured x_1 = createFlucOsc( f0_1, _N.array([f0VAR_1[tr]]), N, dt, 1, Bf=Bf_1, Ba=Ba_1, amp=amp_1, amp_nz=amp_nz_1, stdf=stdf_1, stda=stda_1, sig=sig, smoothKer=5, dSA=dSA_1, dSF=dSF_1 ) # sig is arbitrary, but we need to keep it same as when stdf, stda measured y = x[0, :] + x_1[0, :] + obsNz * _N.random.randn(N) alldat[:, tr * nColumns] = x alldat[:, tr * nColumns + 1] = x_1 alldat[:, tr * nColumns + 2] = y fmtstr = "%.3f %.3f %.3f" * TR _N.savetxt(resFN("gauobs.dat", dir=setname), alldat, fmt=fmtstr) return alldat
def plotFigs(setname, N, k, burn, NMC, x, y, Bsmpx, smp_u, smp_q2, _t0, _t1, Cs, Cn, C, baseFN, TR, tr, bRealDat=False, ID_q2=False): if not bRealDat: pcs = _N.zeros(burn + NMC) offs = 0 for i in range(1, burn + NMC): pc, pv = _ss.pearsonr(Bsmpx[tr, i, 2 + offs:], x[tr, offs:]) pcs[i] = pc fig = _plt.figure(figsize=(8.5, 3 * 4.2)) fig.add_subplot(3, 1, 1) _plt.plot(smp_q2[tr], lw=1.5, color="black") _plt.ylabel("q2") fig.add_subplot(3, 1, 2) _plt.plot(smp_u[tr], lw=1.5, color="black") _plt.ylabel("u") if not bRealDat: fig.add_subplot(3, 1, 3) _plt.plot(range(1, burn + NMC), pcs[1:], color="black", lw=1.5) _plt.axhline(y=0, lw=1, ls="--", color="blue") _plt.ylabel("CC") _plt.xlabel("ITER") fig.subplots_adjust(left=0.15) if (ID_q2 == True) and (TR > 1): _plt.savefig( resFN("%(bf)s_tr=%(tr)d_smps.png" % { "bf": baseFN, "tr": tr }, dir=setname, create=True)) else: _plt.savefig(resFN("%s_smps.png" % baseFN, dir=setname, create=True)) _plt.close() msmpx = _N.mean(Bsmpx[tr, burn:, 2:], axis=0) MINx = min(x[tr, 50:]) MAXx = max(x[tr, 50:]) AMP = MAXx - MINx ht = 0.08 * AMP ys1 = MINx - 0.5 * ht ys2 = MINx - 3 * ht fig = _plt.figure(figsize=(14.5, 3.3)) if not bRealDat: pc2, pv2 = _ss.pearsonr(msmpx[offs:], x[tr, offs:]) else: pc2 = 0 _plt.plot(x[tr], color="black", lw=2) _plt.plot(msmpx, color="red", lw=1.5) for n in range(N + 1): if y[tr, n] == 1: _plt.plot([n, n], [ys1, ys2], lw=1.5, color="blue") _plt.ylim(ys2 - 0.05 * AMP, MAXx + 0.05 * AMP) _plt.xticks(fontsize=20) _plt.yticks(fontsize=20) _plt.grid() _plt.title("avg. smpx AR%(k)d %(pc).2f # spks %(n)d" % { "k": k, "pc": pc2, "n": _N.sum(y[tr]) }) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.15, top=0.85) if TR > 1: _plt.savefig( resFN("%(s)s_tr=%(tr)d_infer.png" % { "s": baseFN, "tr": tr }, dir=setname, create=True)) else: _plt.savefig(resFN("%s_infer.png" % baseFN, dir=setname, create=True)) _plt.close()
def plotARcomps(setname, N, k, burn, NMC, fs, amps, _t0, _t1, Cs, Cn, C, baseFN, TR, tr, bRealDat=False): for i in range(2): c0 = 0 c1 = Cs sSN = "SG" if i == 1: c0 = Cs c1 = C sSN = "NZ" rows = 2 fig = _plt.figure(figsize=(9, rows * 3.5)) ax1 = fig.add_subplot(2, 1, 1) if i == 0: _plt.ylim(0, 0.2) #_plt.ylim(0, 1.) if i == 1: _plt.ylim(0, 1.) _plt.ylabel("freqs") _plt.grid() ax2 = fig.add_subplot(2, 1, 2) _plt.ylim(0, 1) _plt.ylabel("amps") _plt.xlabel("MCMC iter") clrs = [ "black", "blue", "green", "red", "orange", "gray", "brown", "purple", "pink", "cyan" ] for cmp in range(c0, c1): lw = 1.5 if cmp == 0: lw = 3.5 ax1.plot(fs[:, cmp], lw=lw, color=clrs[cmp % 10]) ax2.plot(amps[:, cmp], lw=lw, color=clrs[cmp % 10]) _plt.grid() fig.subplots_adjust(top=0.97, bottom=0.08, left=0.11, right=0.96, hspace=0.18, wspace=0.18) _plt.savefig( resFN("%(bF)s_compsTS_%(sSN)s.png" % { "bF": baseFN, "sSN": sSN }, dir=setname, create=True)) _plt.close() rows = C fig = _plt.figure(figsize=(9, rows * 3.5)) for cmp in range(C): ax1 = _plt.subplot2grid((rows, 2), (cmp, 0), colspan=1) _plt.hist(fs[burn:, cmp], bins=40, color=clrs[cmp % 10]) _plt.title("comp %d" % cmp) if cmp == C - 1: _plt.xlabel("freqs") ax2 = _plt.subplot2grid((rows, 2), (cmp, 1), colspan=1) _plt.hist(amps[burn:, cmp], bins=40, color=clrs[cmp % 10]) if cmp == C - 1: _plt.xlabel("amps") fig.subplots_adjust(top=0.97, bottom=0.05, left=0.06, right=0.96, hspace=0.21, wspace=0.21) _plt.savefig( resFN("%(bF)s_comps_%(sSN)s.png" % { "bF": baseFN, "sSN": sSN }, dir=setname, create=True)) _plt.close()
# modulation histogram. phase @ spike #setname="080402-2-3-theta" setname="tim_n8" p = _re.compile("^\d{6}") # starts like "exptDate-....." m = p.match(setname) bRealDat = False COLS = 3 if m == None: bRealDat = False COLS = 3 dat = _N.loadtxt(resFN("xprbsdN.dat", dir=setname)) N, cols = dat.shape TR = cols / COLS M = 3 # mult by datM = _N.zeros((N/M, cols)) missingSpikes = 0 for tr in xrange(TR): for n in xrange(N/M): iFound = 0 for i in xrange(M): if (dat[M*n + i, COLS*tr + 2] == 1): iFound += 1 if iFound > 0:
def loadDat(self, trials): ################# loadDat oo = self bGetFP = False if oo.model == "bernoulli": x_st_cnts = _N.loadtxt(resFN("xprbsdN.dat", dir=oo.setname)) y_ch = 2 # spike channel p = _re.compile("^\d{6}") # starts like "exptDate-....." m = p.match(oo.setname) bRealDat = True dch = 4 # # of data columns per trial if m == None: # not real data bRealDat = False dch = 3 else: flt_ch = 1 # Filtered LFP ph_ch = 3 # Hilbert Trans bGetFP = True else: x_st_cnts = _N.loadtxt(resFN("cnt_data.dat", dir=oo.setname)) y_ch = 1 # spks dch = 2 TR = x_st_cnts.shape[1] / dch # number of trials will get filtered # If I only want to use a small portion of the data n0 = oo.t0 oo.N = x_st_cnts.shape[0] - 1 if oo.t1 == None: oo.t1 = oo.N + 1 # meaning of N changes here N = oo.t1 - 1 - oo.t0 if TR == 1: x = x_st_cnts[oo.t0:oo.t1, 0] y = x_st_cnts[oo.t0:oo.t1, y_ch] fx = x_st_cnts[oo.t0:oo.t1, 0] px = x_st_cnts[oo.t0:oo.t1, y_ch] x = x.reshape(1, oo.t1 - oo.t0) y = y.reshape(1, oo.t1 - oo.t0) fx = x.reshape(1, oo.t1 - oo.t0) px = y.reshape(1, oo.t1 - oo.t0) else: x = x_st_cnts[oo.t0:oo.t1, ::dch].T y = x_st_cnts[oo.t0:oo.t1, y_ch::dch].T if bRealDat: fx = x_st_cnts[oo.t0:oo.t1, flt_ch::dch].T px = x_st_cnts[oo.t0:oo.t1, ph_ch::dch].T #### Now keep only trials that have spikes kpTrl = range(TR) if trials is None: trials = range(oo.TR) oo.useTrials = [] for utrl in trials: try: ki = kpTrl.index(utrl) if _N.sum(y[utrl, :]) > 0: oo.useTrials.append(ki) except ValueError: print "a trial requested to use will be removed %d" % utrl ###### oo.y are for trials that have at least 1 spike oo.y = _N.array(y[oo.useTrials]) oo.x = _N.array(x[oo.useTrials]) if bRealDat: oo.fx = _N.array(fx[oo.useTrials]) oo.px = _N.array(px[oo.useTrials]) # INITIAL samples if TR > 1: mnCt = _N.mean(oo.y, axis=1) else: mnCt = _N.array([_N.mean(oo.y)]) # remove trials where data has no information rmTrl = [] if oo.model == "binomial": oo.kp = oo.y - oo.rn * 0.5 p0 = mnCt / float(oo.rn) # matches 1 - p of genearted u = _N.log(p0 / (1 - p0)) # -1*u generated elif oo.model == "negative binomial": oo.kp = (oo.y - oo.rn) * 0.5 p0 = mnCt / (mnCt + oo.rn) # matches 1 - p of genearted u = _N.log(p0 / (1 - p0)) # -1*u generated else: oo.kp = oo.y - 0.5 oo.rn = 1 oo.dt = 0.001 logdt = _N.log(oo.dt) if TR > 1: ysm = _N.sum(oo.y, axis=1) u = _N.log(ysm / ((N + 1 - ysm) * oo.dt)) + logdt else: # u is a vector here u = _N.array([ _N.log(_N.sum(oo.y) / ((N + 1 - _N.sum(oo.y)) * oo.dt)) + logdt ]) oo.u = _N.array(u[oo.useTrials]) oo.TR = len(oo.useTrials) #### oo.l2 = loadL2(oo.setname, fn=oo.histFN) """
def create(setname): global lowQs, lowQpc copyfile( "%s.py" % setname, "%(s)s/%(s)s.py" % { "s": setname, "to": setFN("%s.py" % setname, dir=setname, create=True) }) ARcoeff = _N.empty((nRhythms, 2)) for n in xrange(nRhythms): ARcoeff[n] = (-1 * _Npp.polyfromroots(alfa[n])[::-1][1:]).real V = _N.empty(N) dV = _N.empty(N) dN = _N.zeros(N) xprbsdN = _N.empty((N, 3 * TR)) isis = [] lowQs = [] spksPT = _N.empty(TR) for tr in xrange(TR): V[0] = 0.2 * _N.random.randn() eps = bcksig * _N.random.randn(N) # time series err = nzs[0, 1] if _N.random.rand() < lowQpc: err = nzs[0, 0] lowQs.append(tr) sTs = [] x, y = createDataAR(N, ARcoeff[0], err, obsnz, trim=trim) dN[:] = 0 for n in xrange(N - 1): dV[n] = -V[n] / tau + eps[n] + Is[tr] + x[n] + psth[n] V[n + 1] = V[n] + dV[n] * dt if V[n + 1] > thr: V[n + 1] = rst dN[n] = 1 sTs.append(n) spksPT[tr] = len(sTs) xprbsdN[:, tr * 3] = x xprbsdN[:, tr * 3 + 1] = V xprbsdN[:, tr * 3 + 2] = dN isis.extend(_U.toISI([sTs])[0]) fmt = "% .2e %.3f %d " * TR _N.savetxt(resFN("xprbsdN.dat", dir=setname, create=True), xprbsdN, fmt=fmt) plotWFandSpks( N - 1, dN, [x], sTitle="AR2 freq %(f).1fHz num spks %(d).0f spk Hz %(spkf).1fHz" % { "f": (500 * ths[0] / _N.pi), "d": _N.sum(dN), "spkf": (_N.sum(dN) / (N * 0.001)) }, sFilename=resFN("generative", dir=setname)) #plotWFandSpks(N-1, dN, [x], sTitle="AR2 freq %(f).1fHz num spks %(d).0f spk Hz %(spkf).1fHz" % {"f" : (500*ths[0]), "d" : _N.sum(dN), "spkf" : (_N.sum(dN) / (N*0.001))}, sFilename=resFN("generative", dir=setname)) fig = _plt.figure(figsize=(8, 4)) _plt.hist(isis, bins=range(100), color="black") _plt.grid() _plt.savefig(resFN("ISIhist", dir=setname)) _plt.close() cv = _N.std(spksPT)**2 / _N.mean(spksPT) fig = _plt.figure(figsize=(13, 4)) _plt.plot(spksPT, marker=".", color="black", ms=8) _plt.ylim(0, max(spksPT) * 1.1) _plt.grid() _plt.suptitle("avg. Hz %(hz).1f cv=%(cv).2f" % { "hz": (_N.mean(spksPT) / (N * 0.001)), "cv": cv }) _plt.savefig(resFN("spksPT", dir=setname)) _plt.close() """
def concat_smps(exptDir, runDir, xticks=None, ylimAmp=None): lms = [] i = 0 done = False totalIter = 0 its = [] while not done: i += 1 fn = resFN("%(ed)s/%(rd)s/smpls-%(i)d.dump" % { "ed": exptDir, "rd": runDir, "i": i }) if not os.access(fn, os.F_OK): print "couldn't open %s" % fn done = True else: with open(fn, "rb") as f: lm = pickle.load(f) lms.append(lm) totalIter += lm["allalfas"].shape[0] its.append(lm["allalfas"].shape[0]) allalfas = _N.empty((totalIter, lm["allalfas"].shape[1]), dtype=_N.complex) q2 = _N.empty((lm["q2"].shape[0], totalIter)) h_coeffs = _N.empty((lm["h_coeffs"].shape[0], totalIter)) Hbf = lm["Hbf"] spkhist = _N.empty((lm["spkhist"].shape[0], totalIter)) aS = _N.empty((totalIter, lm["aS"].shape[1])) fs = _N.empty((totalIter, lm["fs"].shape[1])) ws = lm["ws"] mnStds = _N.empty(totalIter) amps = _N.empty((totalIter, lm["fs"].shape[1])) smpx = lm["smpx"] us = _N.empty((lm["u"].shape[0], totalIter)) B = lm["B"] it = 0 for i in xrange(len(its)): it0 = it it1 = it0 + its[i] it += it1 - it0 allalfas[it0:it1] = lms[i]["allalfas"] q2[:, it0:it1] = lms[i]["q2"] h_coeffs[:, it0:it1] = lms[i]["h_coeffs"] spkhist[:, it0:it1] = lms[i]["spkhist"] aS[it0:it1] = lms[i]["aS"] fs[it0:it1] = lms[i]["fs"] mnStds[it0:it1] = lms[i]["mnStds"] amps[it0:it1] = lms[i]["amps"] us[:, it0:it1] = lms[i]["u"] concat_pkl = {} concat_pkl["allalfas"] = allalfas concat_pkl["q2"] = q2 concat_pkl["h_coeffs"] = h_coeffs concat_pkl["Hbf"] = Hbf concat_pkl["spkhist"] = spkhist concat_pkl["aS"] = aS concat_pkl["fs"] = fs concat_pkl["ws"] = ws concat_pkl["mnStds"] = mnStds concat_pkl["amps"] = amps concat_pkl["smpx"] = smpx concat_pkl["u"] = us concat_pkl["B"] = B concat_pkl["t0_is_t_since_1st_spk"] = lms[0]["t0_is_t_since_1st_spk"] fn = resFN("%(ed)s/%(rd)s/Csmpls.dump" % {"ed": exptDir, "rd": runDir}) dmp = open(fn, "wb") pickle.dump(concat_pkl, dmp, -1) dmp.close() ofn = resFN("%(ed)s/%(rd)s/fs_amps" % {"ed": exptDir, "rd": runDir}) mF.plotFsAmpDUMP(concat_pkl, totalIter, 0, xticks=xticks, yticksFrq=None, yticksMod=None, yticksAmp=None, fMult=2, dir=None, fn=ofn, ylimAmp=ylimAmp)
def create(setname): global lowQs, lowQpc, f0VAR, oscCS, etme, xmultLo, updn, buff, thsVAR, a, B, Is, psthThisTrial, nPSTHs, psthTypsPc copyfile("%s.py" % setname, "%(s)s/%(s)s.py" % {"s" : setname, "to" : setFN("%s.py" % setname, dir=setname, create=True)}) if etme is None: etme = _N.ones((TR, N)) if updn is None: updn = _N.zeros((TR, N+buff)) if bGenOscUsingAR: alfa = _N.empty((TR, nRhythms, 2), dtype=_N.complex) if thsVAR is None: thsVAR = _N.zeros((TR, nRhythms)) for tr in xrange(TR): for nr in xrange(nRhythms): th = ths[nr] + thsVAR[tr, nr] alfa[tr, nr, 0] = rs[nr]*(_N.cos(th) + 1j*_N.sin(th)) alfa[tr, nr, 1] = rs[nr]*(_N.cos(th) - 1j*_N.sin(th)) ARcoeff = _N.empty((TR, nRhythms, 2)) for tr in xrange(TR): for n in xrange(nRhythms): ARcoeff[tr, n] = (-1*_Npp.polyfromroots(alfa[tr, n])[::-1][1:]).real elif bGenOscUsingSines: if f0VAR is None: f0VAR = _N.zeros(TR) sig = 0.1 x, y = createDataAR(100000, Bf, sig, sig) stdf = _N.std(x) # choice of 4 std devs to keep phase monotonically increasing x, y = createDataAR(100000, Ba, sig, sig) stda = _N.std(x) # choice of 4 std devs to keep phase monotonically increasing if oscCS is None: # coupling strength of osc. given oscCS = _N.ones(TR) V = _N.empty(N+buff) dV = _N.empty(N+buff) dN = _N.zeros(N+buff) xprbsdN= _N.empty((N, 3*TR)) isis = [] bGivenLowQs = True bGivenPSTHbyTrial = True if lowQs is None: lowQs = [] bGivenLowQs = False if (psthTypsPc is None) or (nPSTHs == 1): bGivenPSTHbyTrial = False psthThisTrial = _N.zeros(TR, dtype=_N.int) nknts = a.shape[0] else: crat = _N.zeros(len(psthTypsPc)+1) for ityp in xrange(len(psthTypsPc)): crat[ityp+1] = crat[ityp] + psthTypsPc[ityp] nknts = a.shape[1] psthThisTrial = _N.empty(TR, dtype=_N.int) for tr in xrange(TR): rnd = _N.random.rand() typ = _N.where((rnd > crat[0:-1]) & (rnd < crat[1:]))[0][0] psthThisTrial[tr] = typ spksPT= _N.empty(TR) currentsPSTH = _N.empty((N+buff, TR)) currentsOsc = _N.empty((N+buff, TR)) currentsUpDn = _N.empty((N+buff, TR)) # slow up-downs for tr in xrange(TR): V[0] = 0.1*_N.random.randn() eps = bcksig*_N.random.randn(N+buff) # time series sTs = [] if bGenOscUsingAR: err = nzs[0, 1] if not bGivenLowQs: # randomly gen. trials which are lowQs if _N.random.rand() < lowQpc: err = nzs[0, 0] lowQs.append(tr) else: try: lowQs.index(tr) err = nzs[0, 0] except ValueError: pass maxPwr = 0 for i in xrange(cand): x, y= createDataAR(N, ARcoeff[tr, 0], err, obsnz, trim=trim) x *= etme[tr] pwr = _N.sum(x**2) if pwr > maxPwr: xBest = x maxPwr = pwr elif bGenOscUsingSines: xmult = 1 if not bGivenLowQs: # randomly gen. trials which are lowQs if _N.random.rand() < lowQpc: xmult = xmultLo lowQs.append(tr) else: try: lowQs.index(tr) xmult = xmultLo except ValueError: pass x = xmult*createFlucOsc(f0, _N.array([f0VAR[tr]]), N, dt, 1, Bf=Bf, Ba=Ba, amp=amp, amp_nz=amp_nz, stdf=stdf, stda=stda, sig=sig, smoothKer=5, dSA=dSA, dSF=dSF) * etme[tr] # sig is arbitrary, but we need to keep it same as when stdf, stda measured _xBest = x[0] dN[:] = 0 _psth = _N.empty((nPSTHs, N)) psth = _N.empty((nPSTHs, N+buff)) for np in xrange(nPSTHs): if nPSTHs == 1: _psth[np] = _N.dot(B, a + aNz*_N.random.randn(nknts)) else: _N.dot(B, a[np] + aNz*_N.random.randn(nknts)) _psth[np] = _N.dot(B, a[np] + aNz*_N.random.randn(nknts)) psth[np] = _N.empty(N+buff) psth[np, 0:buff] = _psth[np, 0] psth[np, buff:] = _psth[np] xBest = _N.empty(N+buff) xBest[0:buff] = 0#_xBest[0:buff][::-1] xBest[buff:] = _xBest #etmeBUF = _N.ones((TR, N+buff)) #etmeBUF[:, buff:] = etme for n in xrange(N-1+buff): currentsPSTH[n, tr] = psth[psthThisTrial[tr], n] currentsOsc[n, tr] = xBest[n] currentsUpDn[n, tr] = updn[tr, n] dV[n] = -V[n] / tau + eps[n] + currentsPSTH[n, tr] + Is[tr] + oscCS[tr]*currentsOsc[n, tr] V[n+1] = V[n] + dV[n]*dt if V[n+1] + updn[tr, n] > thr: V[n + 1] = rst dN[n] = 1 if n > buff: sTs.append(n-buff) spksPT[tr] = len(sTs) xprbsdN[:, tr*3] = xBest[buff:] xprbsdN[:, tr*3 + 1] = V[buff:] xprbsdN[:, tr*3 + 2] = dN[buff:] isis.extend(_U.toISI([sTs])[0]) fmt = "%.3f " * TR _N.savetxt(resFN("currentsPSTH.dat", dir=setname, create=True), currentsPSTH[buff:], fmt=fmt) fmt = "%.3f " _N.savetxt(resFN("currentsIs.dat", dir=setname, create=True), Is, fmt=fmt) fmt = "%.3f " * TR _N.savetxt(resFN("currentsUpDn.dat", dir=setname, create=True), currentsUpDn[buff:], fmt=fmt) fmt = "%.3f " * TR _N.savetxt(resFN("currentsOsc.dat", dir=setname, create=True), currentsOsc[buff:], fmt=fmt) if bGivenLowQs: fmt = "%d" _N.savetxt(resFN("lowQs.dat", dir=setname, create=True), lowQs, fmt=fmt) if bGivenPSTHbyTrial: fmt = "%d" _N.savetxt(resFN("psthTyps.dat", dir=setname, create=True), psthThisTrial, fmt=fmt) fmt = "% .2e %.3f %d " * TR _N.savetxt(resFN("xprbsdN.dat", dir=setname, create=True), xprbsdN, fmt=fmt) if bGenOscUsingAR: plotWFandSpks(N-1, dN[buff:], [xBest[buff:]], sTitle="AR2 freq %(f).1fHz num spks %(d).0f spk Hz %(spkf).1fHz" % {"f" : (500*ths[0]/_N.pi), "d" : _N.sum(dN[buff:]), "spkf" : (_N.sum(dN[buff:]) / (N*0.001))}, sFilename=resFN("generative", dir=setname)) else: plotWFandSpks(N-1, dN[buff:], [xBest[buff:]], sTitle="", sFilename=resFN("generative", dir=setname)) fig = _plt.figure(figsize=(8, 4)) _plt.hist(isis, bins=range(100), color="black") _plt.grid() _plt.savefig(resFN("ISIhist", dir=setname)) _plt.close() cv = _N.std(spksPT)**2/_N.mean(spksPT) fig = _plt.figure(figsize=(13, 4)) _plt.plot(spksPT, marker=".", color="black", ms=8) _plt.ylim(0, max(spksPT)*1.1) _plt.grid() _plt.suptitle("avg. Hz %(hz).1f cv=%(cv).2f" % {"hz" : (_N.mean(spksPT) / (N*0.001)), "cv" : cv}) _plt.savefig(resFN("spksPT", dir=setname)) _plt.close() cv = _N.std(isis) / _N.mean(isis) #fig = _plt.figure(figsize=(7, 3.5)) fig, ax = _plt.subplots(figsize=(7, 8)) _plt.subplot(2, 1, 1) _plt.hist(isis, bins=range(0, 50), color="black") _plt.grid() _plt.subplot(2, 1, 2) _plt.hist(isis, bins=range(0, ISItmscl), color="black") _plt.yscale("log") _plt.grid() _plt.suptitle("ISI cv %.2f" % cv) _plt.savefig(resFN("ISIhist", dir=setname)) _plt.close()