def calc(self): t0 = time.time() dts = util.xcorr(self.n0.spikes, self.n1.spikes, trange=self.trange) # in us print('xcorr calc took %.3f sec' % (time.time()-t0)) self.dts = np.array(dts) if self.autocorr: self.dts = self.dts[self.dts != 0] # remove 0s for autocorr return self
the resulting mean CCH is a result of some kind of causal regularity as a function of cell depth. To do this properly, when shuffling, should really generate lots of mean CCHs with shuffled nids and average them. That would guarantee no assymmetry in the super averaged CCH: """ np.random.shuffle(nids) nn = len(nids) binw = 2000 # us trange = np.array([-100000, 100000]) # us bins = np.arange(trange[0], trange[1]+binw, binw) hists = [] for nii0 in range(nn): for nii1 in range(nii0+1, nn): spikes0 = rec.n[nids[nii0]].spikes spikes1 = rec.n[nids[nii1]].spikes dts = util.xcorr(spikes0, spikes1, trange) # spike time differences in us hist = np.histogram(dts, bins=bins)[0] # if we don't normalize, we treat our confidence in the CCHs of cell pairs # proportional to the firing rates of cell pairs, which may be the optimal thing to do: #hist = hist / hist.sum() # pmf: normalize so that sum of each hist is 1 hists.append(hist) #hists = np.vstack(hists).sum(axis=0) hists = np.vstack(hists).mean(axis=0) figure() bar(bins[:-1]/1000, hists, width=binw/1000) xlim(trange/1000) xlabel('time (ms)') ylabel('mean cross-correlation histogram across all pairs') title(rec.name) tight_layout(pad=0.3)
farrdtype=[('nid0', np.int64), ('nid1', np.int64), ('f0', np.float64), ('f1', np.float64), ('df', np.float64)] # convert RP to half trange array, in us trange = np.array([0, RP]) # only need to work on one half of the autocorrelogram for tracki, (track, c) in enumerate(zip(tracks, colours)): print(track.absname) neurons = track.alln # dict nids = sorted(neurons) # sorted keys nn = len(nids) # precalculate nrpvs and f values for reuse in pair loop: nrpvs = np.zeros(nn, dtype=np.int64) # number of refractory period violations, per nid fs = np.zeros(nn, dtype=np.float64) # f values, per nid for nidi, nid in enumerate(nids): s = neurons[nid].spikes # should be sorted dts = util.xcorr(s, s, trange) dts = dts[dts != 0] # remove 0s for autocorr nrpvs[nidi] = len(dts) nISI = len(s) - 1 fs[nidi] = nrpvs[nidi] / nISI maxnpairs = nCr(nn, 2) # maximum number of pairs, actual will be less given REXCL farr = np.zeros(maxnpairs, dtype=farrdtype) pairi = 0 for nidi0 in range(nn): for nidi1 in range(nidi0+1, nn): # for all neuron pairs nid0 = nids[nidi0] nid1 = nids[nidi1] n0, n1 = neurons[nid0], neurons[nid1] if core.dist(n0.pos, n1.pos) <= REXCL: continue # skip this pair, they're too close together
the resulting mean CCH is a result of some kind of causal regularity as a function of cell depth. To do this properly, when shuffling, should really generate lots of mean CCHs with shuffled nids and average them. That would guarantee no assymmetry in the super averaged CCH: """ np.random.shuffle(nids) nn = len(nids) binw = 2000 # us trange = np.array([-100000, 100000]) # us bins = np.arange(trange[0], trange[1] + binw, binw) hists = [] for nii0 in range(nn): for nii1 in range(nii0 + 1, nn): spikes0 = rec.n[nids[nii0]].spikes spikes1 = rec.n[nids[nii1]].spikes dts = util.xcorr(spikes0, spikes1, trange) # spike time differences in us hist = np.histogram(dts, bins=bins)[0] # if we don't normalize, we treat our confidence in the CCHs of cell pairs # proportional to the firing rates of cell pairs, which may be the optimal thing to do: #hist = hist / hist.sum() # pmf: normalize so that sum of each hist is 1 hists.append(hist) #hists = np.vstack(hists).sum(axis=0) hists = np.vstack(hists).mean(axis=0) figure() bar(bins[:-1] / 1000, hists, width=binw / 1000) xlim(trange / 1000) xlabel('time (ms)') ylabel('mean cross-correlation histogram across all pairs') title(rec.name) tight_layout(pad=0.3)
def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10, rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)): """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1. If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then subtract that from the unshifted CCH to get the shift corrected CCH""" if nid1 == None: nid1 = nid0 autocorr = nid0 == nid1 n0 = self.alln[nid0] n1 = self.alln[nid1] calctrange = trange * 1000 # calculation trange, in us if shift: assert nshifts > 0 shift *= 1000 # convert to us maxshift = nshifts * shift calctrange = trange + maxshift # expand calculated trange to encompass shifts calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us if autocorr: dts = dts[dts != 0] # remove 0s for autocorr if shift: # calculate dts for shift corrector shiftis = range(-nshifts, nshifts + 1) shiftis.remove( 0 ) # don't shift by 0, that's the original which we'll subtract from shifts = np.asarray(shiftis) * shift shiftdts = np.hstack([dts + s for s in shifts]) # in us print('shifts =', shifts / 1000) if not binw: nbins = intround(np.sqrt(len(dts))) # good heuristic nbins = max(20, nbins) # enforce min nbins nbins = min(200, nbins) # enforce max nbins else: nbins = intround(2 * trange / binw) dts = dts / 1000 # in ms, converts to float64 array t = np.linspace(start=-trange, stop=trange, num=nbins + 1, endpoint=True) # ms binw = t[1] - t[0] # all should be equal width, ms n = np.histogram(dts, bins=t, density=False)[0] if shift: # subtract shift corrector shiftdts = shiftdts / 1000 # in ms, converts to float64 array shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts * 2) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') n -= shiftn if norm: # normalize and convert to float: n = n / n.max() elif rate: # normalize by binw and convert to float: n = n / binw f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') if norm: a.set_ylabel('coincidence rate (AU)') a.set_yticks([0, 1]) elif rate: a.set_ylabel('coincidence rate (Hz)') else: a.set_ylabel('count') if title: a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id)) wtitlestr = lastcmd() # + ', binw=%.1f ms' % binw gcfm().window.setWindowTitle(wtitlestr) f.tight_layout(pad=0.3) # crop figure to contents
# convert RP to half trange array, in us trange = np.array([0, RP]) # only need to work on one half of the autocorrelogram for track in tracks: print(track.absname) neurons = track.alln # dict nids = sorted(neurons) # sorted keys nn = len(nids) # precalculate nrpvs and f values for reuse in pair loop: nrpvs = np.zeros( nn, dtype=np.int64) # number of refractory period violations, per nid fs = np.zeros(nn, dtype=np.float64) # f values, per nid for nidi, nid in enumerate(nids): s = neurons[nid].spikes # should be sorted dts = util.xcorr(s, s, trange) dts = dts[dts != 0] # remove 0s for autocorr nrpvs[nidi] = len(dts) nISI = len(s) - 1 fs[nidi] = nrpvs[nidi] / nISI fss.append(fs) maxnpairs = nCr( nn, 2) # maximum number of pairs, actual will be less given REXCL farr = np.zeros(maxnpairs, dtype=farrdtype) pairi = 0 for nidi0 in range(nn): for nidi1 in range(nidi0 + 1, nn): # for all neuron pairs nid0 = nids[nidi0] nid1 = nids[nidi1] n0, n1 = neurons[nid0], neurons[nid1]
def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10, rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)): """Copied from Recording.cch(). Plot cross-correlation histogram given nid0 and nid1. If nid1 is None, calculate autocorrelogram. +/- trange and binw are in ms. If shift (in ms) is set, calculate the average of +/- nshift CCHs shifted by shift, and then subtract that from the unshifted CCH to get the shift corrected CCH""" if nid1 == None: nid1 = nid0 autocorr = nid0 == nid1 n0 = self.alln[nid0] n1 = self.alln[nid1] calctrange = trange * 1000 # calculation trange, in us if shift: assert nshifts > 0 shift *= 1000 # convert to us maxshift = nshifts * shift calctrange = trange + maxshift # expand calculated trange to encompass shifts calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us if autocorr: dts = dts[dts != 0] # remove 0s for autocorr if shift: # calculate dts for shift corrector shiftis = range(-nshifts, nshifts+1) shiftis.remove(0) # don't shift by 0, that's the original which we'll subtract from shifts = np.asarray(shiftis) * shift shiftdts = np.hstack([ dts+s for s in shifts ]) # in us print('shifts =', shifts / 1000) if not binw: nbins = intround(np.sqrt(len(dts))) # good heuristic nbins = max(20, nbins) # enforce min nbins nbins = min(200, nbins) # enforce max nbins else: nbins = intround(2 * trange / binw) dts = dts / 1000 # in ms, converts to float64 array t = np.linspace(start=-trange, stop=trange, num=nbins+1, endpoint=True) # ms binw = t[1] - t[0] # all should be equal width, ms n = np.histogram(dts, bins=t, density=False)[0] if shift: # subtract shift corrector shiftdts = shiftdts / 1000 # in ms, converts to float64 array shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts*2) f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') n -= shiftn if norm: # normalize and convert to float: n = n / n.max() elif rate: # normalize by binw and convert to float: n = n / binw f = pl.figure(figsize=figsize) a = f.add_subplot(111) a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t a.set_xlim(t[0], t[-1]) a.set_xlabel('spike interval (ms)') if norm: a.set_ylabel('coincidence rate (AU)') a.set_yticks([0, 1]) elif rate: a.set_ylabel('coincidence rate (Hz)') else: a.set_ylabel('count') if title: a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id)) wtitlestr = lastcmd()# + ', binw=%.1f ms' % binw gcfm().window.setWindowTitle(wtitlestr) f.tight_layout(pad=0.3) # crop figure to contents