diref = behavior_performance['DI'][epoch.strip('TAR_').strip('CAT_')]
                direfall = behavior_performance_all['DI'][epoch.strip('TAR_').strip('CAT_')]
            else:
                di = np.inf
                diall = np.inf
            for tb in time_bins:
                sidx = int(tb[0] * options['rasterfs']) 
                eidx = int(tb[1] * options['rasterfs']) 
                da = {k: r[:, :, sidx:eidx] for k, r in rec['resp'].extract_epochs([epoch], mask=ra['mask']).items()}
                dp = {k: r[:, :, sidx:eidx] for k, r in rec['resp'].extract_epochs([epoch], mask=rp['mask']).items()}

                dfa = nc.compute_rsc(da, chans=rec['resp'].chans).rename(columns={'rsc': 'active', 'pval': 'pa'})
                dfp = nc.compute_rsc(dp, chans=rec['resp'].chans).rename(columns={'rsc': 'passive', 'pval': 'pp'})
                df = pd.concat([dfa, dfp], axis=1)

                df['snr'] = thelp.get_snrs([epoch_str])[0]
                df['f'] = thelp.get_tar_freqs([epoch])[0]
                df['tbin'] = '_'.join([str(t) for t in tb])
                df['DI'] = di
                df['DIall'] = diall
                df['DIref'] = diref
                df['DIrefall'] = direfall
                df['site'] = site
                if batch in [324, 302, 307]: area='A1'
                else: area='PEG'
                df['area'] = area
                df['batch'] = batch

                dfs.append(df)

Beispiel #2
0
                dat = dat.astype({'Spike Count': 'float32'})
            g = sns.FacetGrid(dat, col='state', hue='Epoch')
            g.map(sns.kdeplot, "Spike Count", fill=True)
            g.add_legend(frameon=False)
            g.fig.suptitle(cellid)
            g.fig.set_size_inches(8, 4)
            g.fig.set_tight_layout(True)
            g.fig.savefig(fpath + f'{cellid}_tarDist.pdf')
            plt.close('all')

            _df = pd.DataFrame()
            for pair in pairs:
                if ('STIM_' in pair[0]) | ('REFERENCE' in pair[0]):
                    snr1 = np.inf
                else:
                    snr1 = thelp.get_snrs([pair[0]])[0]
                if ('STIM_' in pair[1]) | ('REFERENCE' in pair[1]):
                    snr2 = np.inf
                else:
                    snr2 = thelp.get_snrs([pair[1]])[0]

                if ('REFERENCE' in pair[0]): f1 = 0
                else: f1 = thelp.get_tar_freqs([pair[0].strip('STIM_')])[0]
                if 'REFERENCE' in pair[1]: f2 = 0
                else: f2 = thelp.get_tar_freqs([pair[1].strip('STIM_')])[0]

                # get behavioral DI
                if ('REFERENCE' in pair[0]) & (('TAR_' in pair[1]) |
                                               ('CAT_' in pair[1])):
                    di = behavior_performance['DI'][pair[1].strip(
                        'TAR_').strip('CAT_')]
Beispiel #3
0
                if regress_pupil:
                    f.savefig(fpath + f'{site}{fext}_pr.pdf')
                else:
                    f.savefig(fpath + f'{site}{fext}.pdf')

            # get behavior performance for this site
            behavior_performance = manager.get_behavior_performance(**options)

            # for each pair, project into TDR (overall and pair-specific) and compute dprime
            for i, pair in enumerate(pairs):
                print(f"pair {i}/{len(pairs)}")
                idx = pair[0] + '_' + pair[1]
                if ('STIM_' in pair[0]) | ('REFERENCE' in pair[0]):
                    snr1 = np.inf
                else:
                    snr1 = thelp.get_snrs([pairs_str[i][0]])[0]
                if ('STIM_' in pair[1]) | ('REFERENCE' in pair[1]):
                    snr2 = np.inf
                else:
                    snr2 = thelp.get_snrs([pairs_str[i][1]])[0]

                if ('REFERENCE' in pair[0]): f1 = 0
                else:
                    f1 = thelp.get_tar_freqs([pairs_str[i][0].strip('STIM_')
                                              ])[0]
                if 'REFERENCE' in pair[1]: f2 = 0
                else:
                    f2 = thelp.get_tar_freqs([pairs_str[i][1].strip('STIM_')
                                              ])[0]
                cat_cat = ('CAT_' in pairs_str[i][0]) & ('CAT_'
                                                         in pairs_str[i][1])
Beispiel #4
0
                b = np.array(projections[k])[np.array(masks[k])==False][np.newaxis, :]
                dp, _, _, _, _, _ = compute_dprime(a, b)
                chance_dp, pvalue = compute_dprime_noiseFloor(a, b)
                chance_dp = np.sqrt(chance_dp)
                dp = (dp ** 0.5) - chance_dp.mean()
                choice_decoder = choice_decoder.append(pd.DataFrame(index=['sound', 'dprime', 'pvalue', 'nDim', 'axes', 'soundCategory', 'site', 'batch'],
                                    data=['target', dp, pvalue, nAx, k, 'target', site, batch]).T)


        # ======================= STIMULUS DECODING ============================
        # perform stimulus decoding on each set of axes. 
        stim_pairs = list(product(catches, targets))
        for pair in stim_pairs:
            d1 = rcat['resp'].extract_epoch(pair[0], mask=rcat['mask'])[:, :, tstart:tend].mean(axis=-1)
            d2 = rtar['resp'].extract_epoch(pair[1], mask=rtar['mask'])[:, :, tstart:tend].mean(axis=-1)
            snr = thelp.get_snrs([pair[1]])[0]
            d = np.concatenate((d1, d2), axis=0)
            mask = np.concatenate((np.zeros(d1.shape[0]), np.ones(d2.shape[0])), axis=0).astype(bool)
            for nAx in range(nPCs): 
                projections = {k: [] for k in axes.keys()}
                masks = {k: [] for k in axes.keys()}
                for i in range(d.shape[0]):
                    idx = np.array(list(set(range(d.shape[0])).difference(set([i]))))
                    for ax_str in axes.keys():
                        _d = d[idx]
                        _choice = mask[idx]
                        # project into space
                        _d = _d.dot(axes[ax_str][range(0, nAx+1), :].T)
                        # compute discrimination axis
                        if nAx > 0:
                            _, wopt, _, _, _, _= compute_dprime(_d[_choice].T, _d[~_choice].T)
Beispiel #5
0
                dat = pd.concat([dat, pd.DataFrame(np.stack([np.concatenate([r1, r2]).squeeze(), 
                                r1.shape[0]*['Active'] + r2.shape[0]*['Passive'], [ep]*(r1.shape[0] + r2.shape[0])]).T, columns=['Spike Count', 'state', 'Epoch'])])
                dat = dat.astype({'Spike Count': 'float32'})
            g = sns.FacetGrid(dat, col='state', hue='Epoch')
            g.map(sns.kdeplot, "Spike Count", fill=True)
            g.add_legend(frameon=False)
            g.fig.suptitle(cellid)
            g.fig.set_size_inches(8,4)
            g.fig.set_tight_layout(True)
            g.fig.savefig(fpath + f'{cellid}_tarDist.pdf')
            plt.close('all')

            _df = pd.DataFrame()
            for pair in pairs:
                if ('STIM_' in pair[0]) | ('REFERENCE' in pair[0]): snr1 = np.inf
                else: snr1 = thelp.get_snrs([pair[0]])[0]
                if ('STIM_' in pair[1]) | ('REFERENCE' in pair[1]): snr2 = np.inf
                else: snr2 = thelp.get_snrs([pair[1]])[0]
                
                if ('REFERENCE' in pair[0]): f1 = 0
                else: f1 = thelp.get_tar_freqs([pair[0].strip('STIM_')])[0]
                if 'REFERENCE' in pair[1]: f2 = 0
                else: f2 = thelp.get_tar_freqs([pair[1].strip('STIM_')])[0]

                # get behavioral DI
                if ('REFERENCE' in pair[0]) & (('TAR_' in pair[1]) | ('CAT_' in pair[1])):
                    di = behavior_performance['DI'][pair[1].strip('TAR_').strip('CAT_')]
                elif ('REFERENCE' in pair[1]) & (('TAR_' in pair[0]) | ('CAT_' in pair[0])):
                    di = behavior_performance['DI'][pair[0].strip('TAR_').strip('CAT_')]
                elif ('STIM_' in pair[0]) | ('STIM_' in pair[1]):
                    di = np.inf
        # need to do some "hacky" stuff for batch 302 / 307 to get names to align with the TBP data
        if batch in [324, 325]:
            targets = thelp.sort_targets([f for f in _ra['resp'].epochs.name.unique() if 'TAR_' in f])
            # only keep target presented at least 5 times
            targets = [t for t in targets if (_ra['resp'].epochs.name==t).sum()>=5]
            # remove "off-center targets"
            on_center = thelp.get_tar_freqs([f.strip('REM_') for f in _ra['resp'].epochs.name.unique() if 'REM_' in f])[0]
            targets = [t for t in targets if str(on_center) in t]
            catch = [f for f in _ra['resp'].epochs.name.unique() if 'CAT_' in f]
            # remove off-center catches
            catch = [c for c in catch if str(on_center) in c]
            rem = [f for f in rec['resp'].epochs.name.unique() if 'REM_' in f]
            #targets += rem
            targets_str = targets
            catch_str = catch
            snrs = thelp.get_snrs(targets)

        # compute noise PCs using difference covariance matrix (so they're ordered by change in rsc)
        respa = []
        respp = []
        for t in targets:
            _r = rec['resp'].extract_epoch(t, mask=ra['mask'])[:, :, start:end].mean(axis=-1, keepdims=True)
            m = _r.mean(axis=0)
            sd = _r.std(axis=0)
            sd[sd==0] = 1
            _r = (_r - m) / sd
            respa.append(_r)

            _r = rec['resp'].extract_epoch(t, mask=rp['mask'])[:, :, start:end].mean(axis=-1, keepdims=True)
            m = _r.mean(axis=0)
            sd = _r.std(axis=0)