lum[name] = np.zeros(nframeintervals)
    dlum[name] = np.zeros(nframeintervals)
    frame0 = frames[0] # init
    for i, frame1 in enumerate(frames[1:]):
        flow = cv2.calcOpticalFlowFarneback(frame0, frame1, pyr_scale, levels, winsize,
                                            iterations, poly_n, poly_sigma, flags)
        mag, ang = cv2.cartToPolar(flow[:, :, 0], flow[:, :, 1]) # mag is in pix/frame
        # average over entire vector flow field in space, convert from pix/frame to deg/sec:
        mot[name][i] = mag.mean() * degpermoviepix / dt
        con[name][i] = frame1.std()
        dcon[name][i] = frame1.std() - frame0.std()
        lum[name][i] = frame1.mean()
        dlum[name][i] = frame1.mean() - frame0.mean()
        frame0 = frame1 # update for next iteration

    motspars[name] = core.sparseness(mot[name])
    # this doesn't measure the actual frame times, but there isn't any reason for their
    # actual display time to differ, on average, over all trials and recordings, vs how long
    # dimstim was told to display them for:
    tmovie[name] = np.arange(1, len(frames)) * dt
    tmoviefilm[name] = np.arange(1, len(frames)) / MOVIEFRAMERATE

    if not PLOTMOVIESIGNALS:
        continue

    # plot motion:
    figure(figsize=FIGSIZE)
    #plot(frameis[1:], mot[name], 'k-', lw=1.5)
    #xlabel('frame index')
    plot(tmovie[name], mot[name], 'k-', lw=1.5)
    xlabel('time (s)')
Пример #2
0
        lfpt, lfps = rec.tlfps(trange=strange, blank=BLANK, plot=False)
        muat, muas = rec.tmuas(trange=strange, blank=BLANK, plot=False) # Hz/unit
        TLFPs[rec.absname].append((lfpt, lfps))
        TMUAs[rec.absname].append((muat, muas))
        MAXMUA[rec.absname] = max(MAXMUA[rec.absname], muas.max())
        ntrials = len(lfps)
        assert ntrials == len(muas)
        for triali in range(ntrials):
            # measure reliability as correlation of each trial with mean of all others.
            # To exclude last sec of blankscreen in each trial, set BLANK=False:
            lfptrial, muatrial = lfps[triali], muas[triali]
            otheris = np.ones(ntrials, dtype=bool)
            otheris[triali] = False # exclude current trial
            LFPCORRS[statei].append(corrcoef(lfptrial, lfps[otheris].mean(axis=0)))
            MUACORRS[statei].append(corrcoef(muatrial, muas[otheris].mean(axis=0)))
            LFPSPARS[statei].append(sparseness(abs(lfptrial)))
            MUASPARS[statei].append(sparseness(muatrial))

for statei in range(2): # desynched, then synched
    LFPCORRS[statei] = np.asarray(LFPCORRS[statei]) # convert from list to array
    MUACORRS[statei] = np.asarray(MUACORRS[statei])
    LFPSPARS[statei] = np.asarray(LFPSPARS[statei])
    MUASPARS[statei] = np.asarray(MUASPARS[statei])

# plot LFP and MUA time series, plus mean and stdevs, and SNR time series:
for rec in urecs:
    print(rec.absname)
    # subplotting trickery from
    # http://stackoverflow.com/questions/22511550/gridspec-with-shared-axes-in-python
    lfpf = plt.figure(figsize=FIGSIZE)
    muaf = plt.figure(figsize=FIGSIZE)
Пример #3
0
     peaknspikes.append(nspikes / ntrials) # nspikes of each detected peak per trial
     depth = rec.alln[nid].pos[1] # y position on polytrode, microns from top
     psthsdepths.append(np.tile([depth], npeaks))
     # calculate reliability of responsive PSTHs:
     cs = n2count[nid] # 2D array of spike counts over trial time, one row per trial
     rhos, weights = core.pairwisecorr(cs, weight=WEIGHT, invalid='ignore')
     # set rho to 0 for trial pairs with undefined rho (one or both trials with 0 spikes):
     nanis = np.isnan(rhos)
     rhos[nanis] = 0.0
     # for log plotting convenience, replace any mean rhos < NULLREL with NULLREL
     n2rel[nid] = np.mean(rhos)
     if n2rel[nid] < NULLREL:
         n2rel[nid] = NULLREL
         nreplacedbynullrel += 1
     # calculate sparseness of responsive PSTHs:
     n2sparseness[nid] = sparseness(psth)
     n2depth[nid] = depth
 psthparamsrecsec.append(psthparams)
 widthsrecsec.append(np.hstack(psthswidths))
 tsrecsec.append(np.hstack(psthsts))
 heightsrecsec.append(np.hstack(psthsheights))
 peaknspikesrecsec.append(np.hstack(peaknspikes))
 depthsrecsec.append(np.hstack(psthsdepths))
 relsrecsec.append(n2rel)
 sparsrecsec.append(n2sparseness)
 neurondepthsrecsec.append(n2depth)
 if statei % 2 == 1: # end of a recording
     # nids responsive in state 0 (desynch) & nonresponsive in state 1 (synch):
     RNR = np.intersect1d(rnids[0], nrnids[1])
     # nids nonresponsive in state 0 (desynch) & responsive in state 1 (synch):
     NRR = np.intersect1d(nrnids[0], rnids[1])
Пример #4
0
            assert len(psth) == len(tmua)
            # calculate reliability of this PSTH:
            cs = n2count[nid] # 2D array of spike counts over trial time, one row per trial
            rhos, weights = core.pairwisecorr(cs, weight=WEIGHT, invalid='ignore')
            # set rho to 0 for trial pairs with undefined rho (one or both trials
            # with 0 spikes):
            nanis = np.isnan(rhos)
            rhos[nanis] = 0.0
            # for log plotting convenience, replace any mean rhos < NULLREL with NULLREL
            rel = np.mean(rhos)
            if rel < NULLREL:
                rel = NULLREL
                nreplacedbynullrel += 1
            rels[statei].append(rel)
            # calculate sparseness of this PSTH:
            spars[statei].append(sparseness(psth))
            # calculate coupling of this PSTH with tMUA:
            coup = core.corrcoef(psth, tmua)
            coups[statei].append(coup)
        print()

for statei in stateis:
    rels[statei] = np.asarray(rels[statei])
    spars[statei] = np.asarray(spars[statei])
    coups[statei] = np.asarray(coups[statei])

# plot MUA coupling histogram:
dmean = coups[0].mean()
smean = coups[1].mean()
u, p = mannwhitneyu(coups[0], coups[1]) # 1-sided
if p < ALPHA:
Пример #5
0
                cs = n2count[
                    nid]  # 2D array of spike counts over trial time, one row per trial
                rhos, weights = core.pairwisecorr(cs,
                                                  weight=WEIGHT,
                                                  invalid='ignore')
                # set rho to 0 for trial pairs with undefined rho (one or both trials with
                # 0 spikes):
                nanis = np.isnan(rhos)
                rhos[nanis] = 0.0
                # for log plotting convenience, replace any mean rhos < NULLREL with NULLREL
                n2rel[state][nid] = np.mean(rhos)
                if n2rel[state][nid] < NULLREL:
                    n2rel[state][nid] = NULLREL
                    nreplacedbynullrel += 1
                # calculate sparseness of responsive PSTHs:
                n2spars[state][nid] = sparseness(psth)
                n2depth[state][nid] = depth
            print()
            # save measurements from this recording and movie:
            allrnids[state].append(rnids[state])
            allnrnids[state].append(nrnids[state])
            allrpsths[state].append(np.hstack(rpsths[state]))
            allpeaktimes[state].append(np.hstack(peaktimes[state]))
            allpeakwidths[state].append(np.hstack(peakwidths[state]))
            allpeakheights[state].append(np.hstack(peakheights[state]))
            allpeaknspikes[state].append(np.hstack(peaknspikes[state]))
            allpsthsdepths[state].append(np.hstack(psthsdepths[state]))
        alln2rel.append(n2rel)
        alln2spars.append(n2spars)
        alln2depth.append(n2depth)
Пример #6
0
        # get PSTH for all nids over this strange:
        pstht, psths, spikets = rec.psth(nids=nids, natexps=False, blank=BLANK,
                                         strange=strange, plot=False, binw=BINW, tres=TRES,
                                         gauss=GAUSS, norm='ntrials')
        # iterate over units:
        for nid, psth in zip(nids, psths):
            # calculate reliability:
            cs = n2count[nid] # 2D array of spike counts over trial time, one row per trial
            rhos, weights = core.pairwisecorr(cs, weight=WEIGHT, invalid='ignore')
            # set rho to 0 for trial pairs with undefined rho (one or both trials with 0
            # spikes):
            nanis = np.isnan(rhos)
            rhos[nanis] = 0.0
            rels.append(np.mean(rhos))
            # calculate sparseness:
            spars.append(sparseness(psth))
            

sis = np.concatenate(sis)
rels = np.asarray(rels)
spars = np.asarray(spars)

# plot reliability vs SI:
figure(figsize=figsize)
plot(sis, rels, 'k.', ms=0.5)
xlabel('trial range SI')
ylabel('trial range reliability')
titlestr = ('SI_reliability_trials_trialwinwidth=%d_trialwintres=%d'
            % (TRIALWINWIDTH, TRIALWINTRES))
gcfm().window.setWindowTitle(titlestr)
tight_layout(pad=0.3)
Пример #7
0
                nid]  # 2D array of spike counts over trial time, one row per trial
            rhos, weights = core.pairwisecorr(cs,
                                              weight=WEIGHT,
                                              invalid='ignore')
            # set rho to 0 for trial pairs with undefined rho (one or both trials
            # with 0 spikes):
            nanis = np.isnan(rhos)
            rhos[nanis] = 0.0
            # for log plotting convenience, replace any mean rhos < NULLREL with NULLREL
            rel = np.mean(rhos)
            if rel < NULLREL:
                rel = NULLREL
                nreplacedbynullrel += 1
            rels[statei].append(rel)
            # calculate sparseness of this PSTH:
            spars[statei].append(sparseness(psth))
            # calculate coupling of this PSTH with tMUA:
            coup = core.corrcoef(psth, tmua)
            coups[statei].append(coup)
        print()

for statei in stateis:
    rels[statei] = np.asarray(rels[statei])
    spars[statei] = np.asarray(spars[statei])
    coups[statei] = np.asarray(coups[statei])

# plot MUA coupling histogram:
dmean = coups[0].mean()
smean = coups[1].mean()
u, p = mannwhitneyu(coups[0], coups[1])  # 1-sided
if p < ALPHA:
Пример #8
0
                                         norm='ntrials')
        # iterate over units:
        for nid, psth in zip(nids, psths):
            # calculate reliability:
            cs = n2count[
                nid]  # 2D array of spike counts over trial time, one row per trial
            rhos, weights = core.pairwisecorr(cs,
                                              weight=WEIGHT,
                                              invalid='ignore')
            # set rho to 0 for trial pairs with undefined rho (one or both trials with 0
            # spikes):
            nanis = np.isnan(rhos)
            rhos[nanis] = 0.0
            rels.append(np.mean(rhos))
            # calculate sparseness:
            spars.append(sparseness(psth))

sis = np.concatenate(sis)
rels = np.asarray(rels)
spars = np.asarray(spars)

# plot reliability vs SI:
figure(figsize=figsize)
plot(sis, rels, 'k.', ms=0.5)
xlabel('trial range SI')
ylabel('trial range reliability')
titlestr = ('SI_reliability_trials_trialwinwidth=%d_trialwintres=%d' %
            (TRIALWINWIDTH, TRIALWINTRES))
gcfm().window.setWindowTitle(titlestr)
tight_layout(pad=0.3)
    frame0 = frames[0]  # init
    for i, frame1 in enumerate(frames[1:]):
        flow = cv2.calcOpticalFlowFarneback(frame0, frame1, pyr_scale, levels,
                                            winsize, iterations, poly_n,
                                            poly_sigma, flags)
        mag, ang = cv2.cartToPolar(flow[:, :, 0],
                                   flow[:, :, 1])  # mag is in pix/frame
        # average over entire vector flow field in space, convert from pix/frame to deg/sec:
        mot[name][i] = mag.mean() * degpermoviepix / dt
        con[name][i] = frame1.std()
        dcon[name][i] = frame1.std() - frame0.std()
        lum[name][i] = frame1.mean()
        dlum[name][i] = frame1.mean() - frame0.mean()
        frame0 = frame1  # update for next iteration

    motspars[name] = core.sparseness(mot[name])
    # this doesn't measure the actual frame times, but there isn't any reason for their
    # actual display time to differ, on average, over all trials and recordings, vs how long
    # dimstim was told to display them for:
    tmovie[name] = np.arange(1, len(frames)) * dt
    tmoviefilm[name] = np.arange(1, len(frames)) / MOVIEFRAMERATE

    if not PLOTMOVIESIGNALS:
        continue

    # plot motion:
    figure(figsize=FIGSIZE)
    #plot(frameis[1:], mot[name], 'k-', lw=1.5)
    #xlabel('frame index')
    plot(tmovie[name], mot[name], 'k-', lw=1.5)
    xlabel('time (s)')
Пример #10
0
         1]  # y position on polytrode, microns from top
     psthsdepths.append(np.tile([depth], npeaks))
     # calculate reliability of responsive PSTHs:
     cs = n2count[
         nid]  # 2D array of spike counts over trial time, one row per trial
     rhos, weights = core.pairwisecorr(cs, weight=WEIGHT, invalid='ignore')
     # set rho to 0 for trial pairs with undefined rho (one or both trials with 0 spikes):
     nanis = np.isnan(rhos)
     rhos[nanis] = 0.0
     # for log plotting convenience, replace any mean rhos < NULLREL with NULLREL
     n2rel[nid] = np.mean(rhos)
     if n2rel[nid] < NULLREL:
         n2rel[nid] = NULLREL
         nreplacedbynullrel += 1
     # calculate sparseness of responsive PSTHs:
     n2sparseness[nid] = sparseness(psth)
     n2depth[nid] = depth
 psthparamsrecsec.append(psthparams)
 widthsrecsec.append(np.hstack(psthswidths))
 tsrecsec.append(np.hstack(psthsts))
 heightsrecsec.append(np.hstack(psthsheights))
 peaknspikesrecsec.append(np.hstack(peaknspikes))
 depthsrecsec.append(np.hstack(psthsdepths))
 relsrecsec.append(n2rel)
 sparsrecsec.append(n2sparseness)
 neurondepthsrecsec.append(n2depth)
 if statei % 2 == 1:  # end of a recording
     # nids responsive in state 0 (desynch) & nonresponsive in state 1 (synch):
     RNR = np.intersect1d(rnids[0], nrnids[1])
     # nids nonresponsive in state 0 (desynch) & responsive in state 1 (synch):
     NRR = np.intersect1d(nrnids[0], rnids[1])
Пример #11
0
        TLFPs[rec.absname].append((lfpt, lfps))
        TMUAs[rec.absname].append((muat, muas))
        MAXMUA[rec.absname] = max(MAXMUA[rec.absname], muas.max())
        ntrials = len(lfps)
        assert ntrials == len(muas)
        for triali in range(ntrials):
            # measure reliability as correlation of each trial with mean of all others.
            # To exclude last sec of blankscreen in each trial, set BLANK=False:
            lfptrial, muatrial = lfps[triali], muas[triali]
            otheris = np.ones(ntrials, dtype=bool)
            otheris[triali] = False  # exclude current trial
            LFPCORRS[statei].append(
                corrcoef(lfptrial, lfps[otheris].mean(axis=0)))
            MUACORRS[statei].append(
                corrcoef(muatrial, muas[otheris].mean(axis=0)))
            LFPSPARS[statei].append(sparseness(abs(lfptrial)))
            MUASPARS[statei].append(sparseness(muatrial))

for statei in range(2):  # desynched, then synched
    LFPCORRS[statei] = np.asarray(
        LFPCORRS[statei])  # convert from list to array
    MUACORRS[statei] = np.asarray(MUACORRS[statei])
    LFPSPARS[statei] = np.asarray(LFPSPARS[statei])
    MUASPARS[statei] = np.asarray(MUASPARS[statei])

# plot LFP and MUA time series, plus mean and stdevs, and SNR time series:
for rec in urecs:
    print(rec.absname)
    # subplotting trickery from
    # http://stackoverflow.com/questions/22511550/gridspec-with-shared-axes-in-python
    lfpf = plt.figure(figsize=FIGSIZE)
Пример #12
0
                depth = rec.alln[nid].pos[1] # y position on polytrode, microns from top
                psthsdepths[state].append(np.tile([depth], npeaks))
                # calculate reliability of responsive PSTHs:
                cs = n2count[nid] # 2D array of spike counts over trial time, one row per trial
                rhos, weights = core.pairwisecorr(cs, weight=WEIGHT, invalid='ignore')
                # set rho to 0 for trial pairs with undefined rho (one or both trials with
                # 0 spikes):
                nanis = np.isnan(rhos)
                rhos[nanis] = 0.0
                # for log plotting convenience, replace any mean rhos < NULLREL with NULLREL
                n2rel[state][nid] = np.mean(rhos)
                if n2rel[state][nid] < NULLREL:
                    n2rel[state][nid] = NULLREL
                    nreplacedbynullrel += 1
                # calculate sparseness of responsive PSTHs:
                n2spars[state][nid] = sparseness(psth)
                n2depth[state][nid] = depth
            print()
            # save measurements from this recording and movie:
            allrnids[state].append(rnids[state])
            allnrnids[state].append(nrnids[state])
            allrpsths[state].append(np.hstack(rpsths[state]))
            allpeaktimes[state].append(np.hstack(peaktimes[state]))
            allpeakwidths[state].append(np.hstack(peakwidths[state]))
            allpeakheights[state].append(np.hstack(peakheights[state]))
            allpeaknspikes[state].append(np.hstack(peaknspikes[state]))
            allpsthsdepths[state].append(np.hstack(psthsdepths[state]))
        alln2rel.append(n2rel)
        alln2spars.append(n2spars)
        alln2depth.append(n2depth)