예제 #1
0
def load_with_parms(site, **kwargs):
    # defaults

    options = {'batch': 316,
               'cellid': site,
               'stimfmt': 'envelope',
               'rasterfs': 100,
               'runclass': 'CPN',
               'stim': False,
               'resp':True}

    options.update(**kwargs)

    manager = BAPHYExperiment(siteid=site, batch=options['batch'])

    loaded_rec = manager.get_recording(recache=True, **options)
    parameters = manager.get_baphy_exptparams()

    # load_URI, _ = nb.baphy_load_recording_uri(**options)
    # loaded_rec = recording.load_recording(load_URI)

    CPN_rec = cpe.set_recording_subepochs(loaded_rec)
    recordings  = split_recording(CPN_rec)

    return recordings, parameters
예제 #2
0
    def ready_recording(self, parmfiles):
        self.parmfiles = parmfiles
        self.animal = self.parmfiles[0].split('/')[3]

        manager = BAPHYExperiment(parmfile=self.parmfiles)
        self.rec = manager.get_recording(recache=self.recache, **self.options)
        self.rec['resp'] = self.rec['resp'].rasterize()
        self.prestim = self.rec['resp'].extract_epoch(
            'PreStimSilence').shape[-1] / self.rasterfs
        m = self.rec.copy().and_mask(['PreStimSilence', 'PostStimSilence'],
                                     invert=True)
        self.poststim = (self.rec['resp'].extract_epoch(
            'REFERENCE', mask=m['mask'], allow_incomplete=True).shape[-1] /
                         self.rasterfs) + self.prestim
        self.lim = (self.tstart, self.tend)

        # get light on / off
        opt_data = self.rec['resp'].epoch_to_signal('LIGHTON')
        self.opto_mask = opt_data.extract_epoch('REFERENCE').any(axis=(1, 2))

        self.opt_s_stop = (np.argwhere(
            np.diff(
                opt_data.extract_epoch('REFERENCE')[self.opto_mask, :, :]
                [0].squeeze())) + 1) / self.rasterfs

        self.sort_by_dprime()
예제 #3
0
    def ready_recording(self, site):
        """
        load a whole recording at a time. Much more efficient than loading individual neurons.
        Uses a site name to define a list of parameters files
        :param site:
        :return:
        """
        print('loading selected site...')

        parmfile, rawid = self.DF.query('recording == @site').loc[:, (
            'parmfile', 'rawid')].iloc[0, :].tolist()

        self.animal = parmfile.split('/')[4]

        manager = BAPHYExperiment(parmfile=parmfile, rawid=rawid)

        rec = manager.get_recording(recache=self.recache, **self.options)
        rec['resp'] = rec['resp'].rasterize()
        self.prestim = rec['resp'].extract_epoch(
            'PreStimSilence').shape[-1] / self.rasterfs

        # get light on / off
        opt_data = rec['resp'].epoch_to_signal('LIGHTON')
        self.opto_mask = opt_data.extract_epoch('REFERENCE').any(axis=(1, 2))

        opt_start_stop_bins = np.argwhere(
            np.diff(
                opt_data.extract_epoch('REFERENCE')[self.opto_mask, :, :]
                [0].squeeze())).squeeze() + 1
        self.opt_duration = np.diff(opt_start_stop_bins) / self.rasterfs

        # due to some database discrepancies a recordign might load neurons no longer preset, this compares vs sCellFile
        # as the ground truth
        rec_cellids = np.asarray(rec['resp'].chans)
        true_cellids = self.DF.loc[self.DF.recording == site, 'cellid'].values
        good_cells_maks = np.isin(rec_cellids, true_cellids)
        self.cell_id = rec_cellids[good_cells_maks].tolist()

        raw_raster = rec['resp'].extract_epoch(
            'REFERENCE').squeeze()[:, good_cells_maks, :]
        start_time = self.prestim + self.tstart
        end_time = self.prestim + self.tend
        start_bin = np.floor(start_time * self.options['rasterfs']).astype(int)
        end_bin = np.floor(end_time * self.options['rasterfs']).astype(int)

        self.raster = raw_raster[:, :, start_bin:end_bin]
        self.t = np.linspace(
            start_time, end_time, self.raster.shape[-1],
            endpoint=False) - self.prestim

        print('done')
예제 #4
0
def get_pairs(parm, rasterfs=100):
    expt = BAPHYExperiment(parm)
    rec = expt.get_recording(rasterfs=rasterfs, resp=True, stim=False)
    resp = rec['resp'].rasterize()
    expt_params = expt.get_baphy_exptparams()  # Using Charlie's manager
    ref_handle = expt_params[0]['TrialObject'][1]['ReferenceHandle'][1]
    soundies = list(ref_handle['SoundPairs'].values())
    pairs = [tuple([j for j in (soundies[s]['bg_sound_name'].split('.')[0],
                                          soundies[s]['fg_sound_name'].split('.')[0])])
                       for s in range(len(soundies))]
    for c, p in enumerate(pairs):
        print(f"{c} - {p}")
    print(f"There are {len(resp.chans) - 1} units and {len(pairs)} sound pairs.")
    print("Returning one value less than channel and pair count.")

    return (len(pairs)-1), (len(resp.chans)-1)
예제 #5
0
def load_site(site, fs=20, return_baphy_manager=False, recache=False):
    """
    Load data for all active/passive files at this site with the largest number of stable cellids
        i.e. if the site has a prepassive file where there were only 3 cells but there were 4 other a/p 
        files with 20 cells, will not load the prepassive.
    """
    rawid = which_rawids(site)
    ops = {'batch': 307, 'pupil': 1, 'rasterfs': fs, 'cellid': site, 'stim': 0,
        'rawid': rawid, 'resp': True, 'recache': recache}
    #rec = nb.baphy_load_recording_file(**ops)
    manager = BAPHYExperiment(batch=307, siteid=site, rawid=rawid)
    rec = manager.get_recording(**ops)
    rec['resp'] = rec['resp'].rasterize()

    if return_baphy_manager:
        return rec, manager
    else:
        return rec
예제 #6
0
def load_tbp_recording(siteid, batch, **options):
    options['resp'] = True
    options['pupil'] = options.get('pupil', True)
    options['rasterfs'] = options.get('rasterfs', 50)

    manager = BAPHYExperiment(siteid=siteid, batch=batch)
    rec = manager.get_recording(**options)
    rec['resp'] = rec['resp'].rasterize()

    onsetsec = 0.1
    offsetsec = 0.4

    onset = int(onsetsec * options['rasterfs'])
    offset = int(offsetsec * options['rasterfs'])

    # PCA on trial averaged responses
    ref_stims, sounds, all_sounds = get_sound_labels(rec)
    """
    try:
        targets = [f for f in rec['resp'].epochs.name.unique() if 'TAR_' in f]
        catch = [f for f in rec['resp'].epochs.name.unique() if 'CAT_' in f]

        sounds = targets + catch

        ref_stims = [x for x in rec['resp'].epochs.name.unique() if 'STIM_' in x]
        idx = np.argsort([int(s.split('_')[-1]) for s in ref_stims])
        ref_stims = np.array(ref_stims)[idx].tolist()

        sounds=sort_targets(sounds)
    except:
        ref_stims=['REFERENCE']
        sounds = ['TARGET']

    all_sounds = ref_stims + sounds
    """
    rall = rec.and_mask(['ACTIVE_EXPERIMENT', 'PASSIVE_EXPERIMENT'])

    # can't simply extract evoked for refs because can be longer/shorted if it came after target
    # and / or if it was the last stim.So, masking prestim / postim doesn't work.Do it manually
    d = rall['resp'].extract_epochs(all_sounds, mask=rall['mask'])
    d = {
        k: v[~np.isnan(v[:, :, onset:offset].sum(axis=(1, 2))), :, :]
        for (k, v) in d.items()
    }
    d = {k: v[:, :, onset:offset] for (k, v) in d.items()}

    Rall_u = np.vstack([d[k].sum(axis=2).mean(axis=0) for k in d.keys()])

    pca = PCA(n_components=2)
    pca.fit(Rall_u)
    pc_axes = pca.components_

    # project onto first PC and plot trial psth
    rec['pc1'] = rec['resp']._modified_copy(rec['resp']._data.T.dot(
        pc_axes.T).T[[0], :])
    rec['pc2'] = rec['resp']._modified_copy(rec['resp']._data.T.dot(
        pc_axes.T).T[[1], :])

    #rec = make_state_signal(rec, state_signals=['pupil','active'], permute_signals=['pupil'])
    rec = make_state_signal(rec, state_signals=['pupil', 'active'])

    return rec
예제 #7
0
import charlieTools.reward_learning.behavior_helpers as bh

batch = 302
site = 'DRX006b.e1:64'  # DRX005c, DRX006b, DRX007a, DRX008b
fs = 40
time_window = 0.2   # n secs of evoked response to collapse over for calculating dprime
bins = int(fs * time_window)

options = {'batch': batch,
           'cellid': site,
           'rasterfs': fs,
           'pupil': True,
           'resp': True,
           'stim':False}

manager = BAPHYExperiment(batch=batch, siteid=site[:7])
rec = manager.get_recording(**options)
rec['resp'] = rec['resp'].rasterize()

# extract appropriate shank
cells, _ = nb.parse_cellid(options)
rec['resp'] = rec['resp'].extract_channels(cells)
ncells = rec['resp'].shape[0]
rec = rec.and_mask(['FALSE_ALARM_TRIAL', 'EARLY_TRIAL'], invert=True)

# extract target responses
targets = [e for e in rec.epochs.name.unique() if 'TAR_' in e]
rew_tar = bh.get_rewarded_targets(rec, manager)
nr_tar = bh.get_nonrewarded_targets(rec, manager)
R_center = []
rt = rec.copy()
예제 #8
0
    for x in parmfiles.parmfile
]
ed = dt.datetime.strptime(earliest_date, '%Y_%m_%d')
parmfiles = parmfiles[parmfiles.date > ed]

# Perform analysis for each unique DAY, not file. So, group parmfiles coming from same date.
options = {'pupil': True, 'rasterfs': 100}
pupil = []
R_HR = []
NR_HR = []
LI = []
for date in parmfiles['date'].unique():
    files = [p for i, p in parmfiles.iterrows() if p.date == date]
    files = [f['resppath'] + f['parmfile'] for f in files]

    manager = BAPHYExperiment(files)
    rec = manager.get_recording(**options)

    # get R / NR hit rate and pupil size over valid trials

    # compute HR's over sliding window in baphy trial time
    # only on valid trials
    totalTrials = rec['pupil'].extract_epoch('TRIAL').shape[0]
    rec = rec.and_mask('INVALID_BAPHY_TRIAL', invert=True)
    validTrials = rec['pupil'].extract_epoch('TRIAL',
                                             mask=rec['mask']).shape[0]

    # find valid trial numbers using rec epochs times
    invalid_time = rec['pupil'].get_epoch_bounds('INVALID_BAPHY_TRIAL')
    trial_epochs = rec['pupil'].get_epoch_bounds('TRIAL')
    good_trials = [
예제 #9
0
recache = False

# regress out first order pupil?
regress_pupil = False

# extract evoked periods
start = int(0.1 * options['rasterfs'])
end = int(0.4 * options['rasterfs'])

# siteids
df = pd.DataFrame()
for batch in batches:
    sites = np.unique([c[:7] for c in nd.get_batch_cells(batch).cellid])
    sites = [s for s in sites if s != 'CRD013b']
    for site in sites:
        manager = BAPHYExperiment(batch=batch, siteid=site)
        rec = manager.get_recording(recache=recache, **options)
        rec['resp'] = rec['resp'].rasterize()

        # mask appropriate trials
        rec = rec.and_mask(
            ['PASSIVE_EXPERIMENT', 'HIT_TRIAL', 'CORRECT_REJECT_TRIAL'])
        rec = rec.apply_mask(reset_epochs=True)

        if regress_pupil:
            rec = preproc.regress_state(rec, state_sigs=['pupil'])

        ra = rec.copy()
        ra = ra.create_mask(True)
        ra = ra.and_mask(['HIT_TRIAL', 'CORRECT_REJECT_TRIAL'])
예제 #10
0
# screen for dates
parmfiles['date'] = [
    dt.datetime.strptime('-'.join(x.split('_')[1:-2]), '%Y-%m-%d')
    for x in parmfiles.parmfile
]
ed = dt.datetime.strptime(earliest_date, '%Y_%m_%d')
parmfiles = parmfiles[parmfiles.date > ed]

results = pd.DataFrame(index=parmfiles['date'].unique(),
                       columns=['Early', 'Late', 'Overall', 'nTrials'])
options = {}
for date in parmfiles['date'].unique():
    files = [p for i, p in parmfiles.iterrows() if p.date == date]
    files = [f['resppath'] + f['parmfile'] for f in files]

    manager = BAPHYExperiment(files)
    events = manager.get_behavior_events(correction_method='baphy', **options)
    events = manager._stack_events(events)

    good_trials = events[(~events.invalidTrial)].Trial.unique()
    # note, number of target trials in performance dict doesn't have to add up to
    # number of window_length trials. There could (definitely are) FA trials
    # that are "valid trials", but not counted for behavior analysis bc target never
    # played

    params = manager.get_baphy_exptparams()[0]
    targets = np.array(params['TrialObject'][1]['TargetHandle'][1]['Names'])
    pump_dur = np.array(params['BehaveObject'][1]['PumpDuration'])
    r_tar = targets[pump_dur > 0][0]
    nr_tar = targets[pump_dur == 0][0]
예제 #11
0
파일: csd_test.py 프로젝트: LBHB/nems_db
    parmfile = dparm.resppath[0] + dparm.parmfile[0]
else:
    #hard-code path to parmfile
    #parmfile = "/auto/data/daq/Teonancatl/TNC018/TNC018a16_p_BNB.m"
    #parmfile = "/auto/data/daq/Teonancatl/TNC020/TNC020a11_p_BNB.m"
    parmfile = "/auto/data/daq/Teonancatl/TNC017/TNC017a03_p_BNB.m"
    parmfile = "/auto/data/daq/Teonancatl/TNC017/TNC017a10_p_BNB.m"
    parmfile = "/auto/data/daq/Teonancatl/TNC016/TNC016a03_p_BNB.m"
    parmfile = "/auto/data/daq/Teonancatl/TNC018/TNC018a03_p_BNB.m"
    parmfile = "/auto/data/daq/Tartufo/TAR010/TAR010a03_p_BNB.m"
    parmfile = "/auto/data/daq/Teonancatl/TNC006/TNC006a03_p_BNB.m"
    parmfile = "/auto/data/daq/Teonancatl/TNC006/TNC006a19_p_BNB.m"

## load the recording
parmfile = "/auto/data/daq/Tartufo/TAR010/TAR010a03_p_BNB.m"
ex = BAPHYExperiment(parmfile=parmfile)
print(ex.experiment, ex.openephys_folder, ex.openephys_tarfile,
      ex.openephys_tarfile_relpath)

rec = ex.get_recording(raw=True,
                       resp=False,
                       stim=False,
                       recache=False,
                       rawchans=None,
                       rasterfs=1500)

data = rec['raw']._data.copy()

print('HP-filtering MUA >100 Hz...')
sos = butter(4, 100, 'hp', fs=rec['raw'].fs, output='sos')
mua_ = sosfilt(sos, data, axis=1)
예제 #12
0
for batch in batches:
    sites = np.unique([c[:7] for c in nd.get_batch_cells(batch).cellid])
    sites = [s for s in sites if s!='CRD013b']
    options = Aoptions[batch]
    time_bins = twin[batch]
    sites = [s for s in sites if (s!='CRD013b') & ('gus' not in s)]
    if batch == 302:
        sites1 = [s+'.e1:64' for s in sites]
        sites2 = [s+'.e65:128' for s in sites]
        sites = sites1 + sites2
    for site in sites:
        if batch == 307:
            rawid = which_rawids(site)
        else:
            rawid = None
        manager = BAPHYExperiment(batch=batch, siteid=site[:7], rawid=rawid)
        rec = manager.get_recording(recache=recache, **options)
        rec['resp'] = rec['resp'].rasterize()
        if batch == 302:
            c, _ = parse_cellid({'cellid': site, 'batch': batch})
            rec['resp'] = rec['resp'].extract_channels(c)

        behavior_performance = manager.get_behavior_performance(**options)
        allop = copy.deepcopy(options)
        allop['keep_following_incorrect_trial'] = True
        allop['keep_cue_trials'] = True
        allop['keep_early_trials'] = True
        behavior_performance_all = manager.get_behavior_performance(**allop)

        # regress out first order pupil
        if regress_pupil & regress_task:
예제 #13
0
import ptd_utils as pu
import pandas as pd

sites = [
    'TAR010c', 'BRT026c', 'BRT033b', 'BRT034f', 'BRT036b', 'BRT037b',
    'BRT039c', 'bbl102d', 'AMT018a', 'AMT020a', 'AMT022c', 'AMT026a'
]

fs = 20
batch = 307
results = pd.DataFrame(
    index=sites, columns=['DI', 'dprime', 'targets', 'all_DI', 'all_dprime'])
for site in sites:
    rawid = tuple(pu.which_rawids(site))

    manager = BAPHYExperiment(batch=batch, siteid=site, rawid=rawid)

    options = {
        'rasterfs': fs,
        'batch': batch,
        'siteid': site,
        'keep_following_incorrect_trial': False,
        'keep_early_trials': False
    }
    correct_trials = manager.get_behavior_performance(**options)

    options = {
        'rasterfs': fs,
        'batch': batch,
        'siteid': site,
        'keep_following_incorrect_trial': True,
예제 #14
0
import matplotlib as mpl
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False

# load noise correlation results for comparison
df = ld.load_noise_correlation('rsc_ev')

win = 15  # total window size (non overlapping across data)
subwin = 0.25  # sub window size (mean rate across all / sd across all)
# CV = sd of spike counts across all subwindows divided by the mean across all sub windows
# If all neurons are Poisson and statistically independent, then the CV of the population rate will approach zero

site = 'TAR010c'
batch = 289

manager = BAPHYExperiment(cellid=site, batch=batch)
options = {'rasterfs': 4, 'resp': True, 'stim': False, 'pupil': True}
rec = manager.get_recording(**options)
rec['resp'] = rec['resp'].rasterize()
if batch == 331:
    rec = nems_preproc.fix_cpn_epochs(rec)
else:
    rec = nems_preproc.mask_high_repetion_stims(rec)
rec = generate_psth_from_resp(rec)

# extract continuous data (subtract psth?)
data = rec.apply_mask()['resp']._data  #- rec.apply_mask()['psth_sp']._data
pupil = rec.apply_mask()['pupil']._data

# divide into bins
win_bin = int(rec['resp'].fs * win)
예제 #15
0
    if os.path.isdir(os.path.join(fig_path, site)):
        pass
    else:
        os.mkdir(os.path.join(fig_path, site))
    
    site_path = os.path.join(fig_path, site)
    # get parmfiles
    sql = "SELECT sCellFile.cellid, sCellFile.respfile, gDataRaw.resppath from sCellFile INNER JOIN" \
               " gCellMaster ON (gCellMaster.id=sCellFile.masterid) INNER JOIN" \
               " gDataRaw ON (sCellFile.rawid=gDataRaw.id)" \
               " WHERE gCellMaster.siteid=%s" \
               " and gDataRaw.runclass='TBP' and gDataRaw.bad=0"
    d = nd.pd_query(sql, (site,))
    d['parmfile'] = [f.replace('.spk.mat', '.m') for f in d['respfile']]
    parmfiles = np.unique(np.sort([os.path.join(d['resppath'].iloc[i], d['parmfile'].iloc[i]) for i in range(d.shape[0])])).tolist()
    manager = BAPHYExperiment(parmfiles)
    rec = manager.get_recording(**options)
    rec['resp'] = rec['resp'].rasterize()

    # find / sort epoch names
    files = [f for f in rec['resp'].epochs.name.unique() if 'FILE_' in f]
    targets = [f for f in rec['resp'].epochs.name.unique() if 'TAR_' in f]
    catch = [f for f in rec['resp'].epochs.name.unique() if 'CAT_' in f]

    sounds = targets + catch
    ref_stims = [x for x in rec['resp'].epochs.name.unique() if 'STIM_' in x]
    idx = np.argsort([int(s.split('_')[-1]) for s in ref_stims])
    ref_stims = np.array(ref_stims)[idx].tolist()
    all_stims = ref_stims + sounds

    # ================================================================================================
예제 #16
0
# assume levels are [-10, -5, 0, Inf]
all_snrs = np.array([-10, -5, 0, np.inf])
LI_EARLY = np.full((4, len(uDate)), np.nan)
LI_MID = np.full((4, len(uDate)), np.nan)
LI_LATE = np.full((4, len(uDate)), np.nan)
LI_ALL = np.full((4, len(uDate)), np.nan)
VALID_TRIALS = np.full(len(uDate), np.nan)

for idx, ud in enumerate(uDate):
    parmfiles = d[d.date == ud].parmfile_path.values.tolist()
    # add catch to make sure "siteid" the same for all files
    sid = [p.split(os.path.sep)[-1][:7] for p in parmfiles]
    if np.any(np.array(sid) != sid[0]):
        bad_idx = (np.array(sid) != sid[0])
        parmfiles = np.array(parmfiles)[~bad_idx].tolist()
    manager = BAPHYExperiment(parmfiles)

    # make sure only loaded actives
    pf_mask = [
        True if k['BehaveObjectClass'] == 'RewardTargetLBHB' else False
        for k in manager.get_baphy_exptparams()
    ]
    if sum(pf_mask) == len(manager.parmfile):
        pass
    else:
        parmfiles = np.array(manager.parmfile)[pf_mask].tolist()
        manager = BAPHYExperiment(parmfiles)

    rec = manager.get_recording(recache=True, **options)
    rec = rec.and_mask(['ACTIVE_EXPERIMENT'])
예제 #17
0
import charlieTools.reward_learning.behavior_helpers as bhelp

import matplotlib.pyplot as plt

date = "2020_07_07"
runclass = 'TBP'
animal = 'Cordyceps'
rasterfs = 100
options = {'rasterfs': rasterfs, 'pupil': True}

# get list of all training parmfiles
parmfiles = bhelp.get_training_files(animal, runclass, date)

fns = [r + p for r, p in zip(parmfiles['resppath'], parmfiles['parmfile'])]

manager = BAPHYExperiment(fns)
rec = manager.get_recording(**options)

# plot pupil results

f = plt.figure(figsize=(12, 6))

ptax = plt.subplot2grid((2, 2), (0, 0), colspan=2)
rtax = plt.subplot2grid((2, 2), (1, 0))
ntax = plt.subplot2grid((2, 2), (1, 1))

# plot continuous trace
ptax.plot(rec['pupil']._data.T)

# plot raw trial averaged
behave_outcomes = [
예제 #18
0
def calc_psth_metrics_Greg(
    batch,
    cellid,
):
    import numpy as np
    import SPO_helpers as sp
    import nems.preprocessing as preproc
    import nems.metrics.api as nmet
    import nems.metrics.corrcoef
    import copy
    import nems.epoch as ep
    import scipy.stats as sst
    from nems_lbhb.gcmodel.figures.snr import compute_snr
    from nems.preprocessing import generate_psth_from_resp
    import logging
    log = logging.getLogger(__name__)

    start_win_offset = 0  #Time (in sec) to offset the start of the window used to calculate threshold, exitatory percentage, and inhibitory percentage
    options = {}

    manager = BAPHYExperiment(cellid=cellid, batch=batch)
    options = {'rasterfs': 100, 'stim': False, 'resp': True}
    rec = manager.get_recording(**options)

    passive = rec['resp'].epochs[rec['resp'].epochs['name'] ==
                                 'PASSIVE_EXPERIMENT']
    rec['resp'] = rec['resp'].extract_channels([cellid])

    if passive.shape[0] >= 2:
        #if OLP test was sorted in here as well, slice it out of the epochs and data
        print(f"Multiple ({passive.shape[0]}) OLPs found in {cellid}")
        runs = passive.shape[0] - 1
        max_run = (passive['end'] -
                   passive['start']).reset_index(drop=True).idxmax()
        if runs != max_run:
            print(
                f"There are {runs+1} OLPs, the longest is run {max_run+1}. Using last run but make sure that is what you want."
            )
        else:
            print(
                f"The {runs+1} run is also the longest run: {max_run+1}, using last run."
            )
        good_start = passive.iloc[-1, 1]
        rec['resp']._data = {
            key: val[val >= good_start] - good_start
            for key, val in rec['resp']._data.items()
        }
        rec['resp'].epochs = rec['resp'].epochs.loc[
            rec['resp'].epochs['start'] >= good_start, :].reset_index(
                drop=True)
        rec['resp'].epochs['start'] = rec['resp'].epochs['start'] - good_start
        rec['resp'].epochs['end'] = rec['resp'].epochs['end'] - good_start

    rec['resp'] = rec['resp'].extract_channels([cellid])
    resp = copy.copy(rec['resp'].rasterize())
    rec['resp'].fs = 100

    #Greg spont rate subtraction with std norm
    prestimsilence = resp.extract_epoch('PreStimSilence')
    # average over reps(0) and time(-1), preserve neurons
    spont_rate = np.expand_dims(np.nanmean(prestimsilence, axis=(0, -1)),
                                axis=1)
    ##STD OVER PRESETIM ONLY
    std_per_neuron = resp._data.std(axis=1, keepdims=True)
    std_per_neuron[std_per_neuron == 0] = 1
    norm_spont = resp._modified_copy(data=(resp._data - spont_rate) /
                                     std_per_neuron)

    file = os.path.splitext(rec.meta['files'][0])[0]
    experiment_name = file[-3:]
    #get dictionary of parameters for experiment
    if experiment_name == 'OLP':
        params = get_expt_params(resp, manager, cellid)
    else:
        params = {}

    epcs = rec['resp'].epochs[rec['resp'].epochs['name'] ==
                              'PreStimSilence'].copy()
    ep2 = rec['resp'].epochs[rec['resp'].epochs['name'] ==
                             'PostStimSilence'].iloc[0].copy()
    prestim = epcs.iloc[0]['end']
    poststim = ep2['end'] - ep2['start']
    lenstim = ep2['end']

    stim_epochs = ep.epoch_names_matching(resp.epochs, 'STIM_')
    if paths:
        bg_dir = f"/auto/users/hamersky/baphy/Config/lbhb/SoundObjects/@OverlappingPairs/{paths[0]}/"
        bg_nm = os.listdir(bg_dir)
        bg_names = [
            os.path.splitext(name)[0][2:].replace('_', '') for name in bg_nm
        ]
        fg_dir = f"/auto/users/hamersky/baphy/Config/lbhb/SoundObjects/@OverlappingPairs/{paths[1]}/"
        fg_nm = os.listdir(fg_dir)
        fg_names = [
            os.path.splitext(name)[0][2:].replace('_', '') for name in fg_nm
        ]
        bg_names.append('null'), fg_names.append('null')

        bg_epochs = [b.split('_')[1].split('-')[0] for b in stim_epochs]
        fg_epochs = [f.split('_')[2].split('-')[0] for f in stim_epochs]
        bg_epochs = [b[2:] if b != 'null' else b for b in bg_epochs]
        fg_epochs = [f[2:] if f != 'null' else f for f in fg_epochs]

        bool_bg, bool_fg = [], []
        for (b, f) in zip(bg_epochs, fg_epochs):
            bool_bg.append(b in bg_names)
            bool_fg.append(f in fg_names)

        mask = np.logical_and(np.asarray(bool_bg), np.asarray(bool_fg))
        stim_epochs = [b for a, b in zip(mask, stim_epochs) if a]

        good_epochs = resp.epochs.loc[(
            resp.epochs['name'].isin(stim_epochs)), :]
        starts = good_epochs.start
        ends = good_epochs.end

        ff_start = resp.epochs.start.isin(starts)
        ff_ends = resp.epochs.end.isin(ends)
        ff_silence = resp.epochs.name.isin(
            stim_epochs + ['PreStimSilence', 'PostStimSilence', 'REFERENCE'])

        resp.epochs = resp.epochs.loc[ff_silence & (ff_start | ff_ends), :]
        rec['resp'].epochs = resp.epochs.loc[ff_silence &
                                             (ff_start | ff_ends), :]

    epoch_repetitions = [resp.count_epoch(cc) for cc in stim_epochs]
    full_resp = np.empty((max(epoch_repetitions), len(stim_epochs),
                          (int(lenstim) * rec['resp'].fs)))
    full_resp[:] = np.nan
    for cnt, epo in enumerate(stim_epochs):
        resps_list = resp.extract_epoch(epo)
        full_resp[:resps_list.shape[0], cnt, :] = resps_list[:, 0, :]

    ##base reliability
    # gets two subsamples across repetitions, and takes the mean across reps
    rep1 = np.nanmean(full_resp[0:-1:2, ...], axis=0)
    rep2 = np.nanmean(full_resp[1:full_resp.shape[0] + 1:2, ...], axis=0)

    resh1 = np.reshape(rep1, [-1])
    resh2 = np.reshape(rep2, [-1])

    corcoef = sst.pearsonr(resh1[:], resh2[:])[0]

    ##average response
    pre_bin = int(prestim * rec['resp'].fs)
    post_bin = int(full_resp.shape[-1] - (poststim * rec['resp'].fs))

    raster = np.squeeze(full_resp[..., pre_bin:post_bin])

    S = tuple([*range(0, len(raster.shape), 1)])
    avg_resp = np.nanmean(np.absolute(raster), axis=S)

    ##signal to noise
    snr = compute_snr(resp)

    #Calculate suppression for each sound pair.
    # epochs with two sounds in them
    if paths:
        epcs_twostim = resp.epochs.loc[
            (resp.epochs['name'].str.count('-0-1') == 2) &
            (resp.epochs['name'].isin(stim_epochs)), :].copy()
    else:
        epcs_twostim = resp.epochs[resp.epochs['name'].str.count('-0-1') ==
                                   2].copy()

    twostims = np.unique(epcs_twostim.name.values.tolist())
    supp_array = np.empty((len(twostims)))
    supp_array[:] = np.nan

    for cnt, stimmy in enumerate(twostims.tolist()):
        ABepo = resp.extract_epoch(stimmy)
        sep = get_sep_stim_names(stimmy)
        Aepo = resp.extract_epoch('STIM_' + sep[0] + '_null')
        Bepo = resp.extract_epoch('STIM_null_' + sep[1])
        lenA, lenB = Aepo.shape[0], Bepo.shape[0]
        min_rep = np.min((Aepo.shape[0], Bepo.shape[0]))
        lin_resp = (Aepo[:min_rep, :, :] + Bepo[:min_rep, :, :])

        mean_lin = np.nanmean(np.squeeze(lin_resp), axis=(0, 1))
        mean_combo = np.nanmean(np.squeeze(ABepo), axis=(0, 1))
        supp_array[cnt] = mean_lin - mean_combo

    spike_times = rec['resp']._data[cellid]
    count = 0
    for index, row in epcs.iterrows():
        count += np.sum((spike_times > row['start'])
                        & (spike_times < row['end']))
    SR = count / (epcs['end'] - epcs['start']).sum()

    resp = rec['resp'].rasterize()
    resp = add_stimtype_epochs(resp)
    ps = resp.select_epochs(['PreStimSilence']).as_continuous()
    ff = np.isfinite(ps)
    SR_rast = ps[ff].mean() * resp.fs
    SR_std = ps[ff].std() * resp.fs
예제 #19
0
def calc_psth_metrics(batch, cellid, parmfile=None, paths=None):
    start_win_offset = 0  # Time (in sec) to offset the start of the window used to calculate threshold, exitatory percentage, and inhibitory percentage
    if parmfile:
        manager = BAPHYExperiment(parmfile)
    else:
        manager = BAPHYExperiment(cellid=cellid, batch=batch)

    options = ohel.get_load_options(
        batch)  #gets options that will include gtgram if batch=339
    rec = manager.get_recording(**options)

    area_df = db.pd_query(
        f"SELECT DISTINCT area FROM sCellFile where cellid like '{manager.siteid}%%'"
    )
    area = area_df.area.iloc[0]

    if rec['resp'].epochs[rec['resp'].epochs['name'] ==
                          'PASSIVE_EXPERIMENT'].shape[0] >= 2:
        rec = ohel.remove_olp_test(rec)

    rec['resp'] = rec['resp'].extract_channels([cellid])
    resp = copy.copy(rec['resp'].rasterize())
    rec['resp'].fs = 100

    norm_spont, SR, STD = ohel.remove_spont_rate_std(resp)
    params = ohel.get_expt_params(resp, manager, cellid)

    epcs = rec['resp'].epochs[rec['resp'].epochs['name'] ==
                              'PreStimSilence'].copy()
    ep2 = rec['resp'].epochs[rec['resp'].epochs['name'] ==
                             'PostStimSilence'].iloc[0].copy()
    params['prestim'], params['poststim'] = epcs.iloc[0][
        'end'], ep2['end'] - ep2['start']
    params['lenstim'] = ep2['end']

    stim_epochs = ep.epoch_names_matching(resp.epochs, 'STIM_')

    if paths and cellid[:3] == 'TBR':
        print(f"Deprecated, run on {cellid} though...")
        stim_epochs, rec, resp = ohel.path_tabor_get_epochs(
            stim_epochs, rec, resp, params)

    epoch_repetitions = [resp.count_epoch(cc) for cc in stim_epochs]
    full_resp = np.empty((max(epoch_repetitions), len(stim_epochs),
                          (int(params['lenstim']) * rec['resp'].fs)))
    full_resp[:] = np.nan
    for cnt, epo in enumerate(stim_epochs):
        resps_list = resp.extract_epoch(epo)
        full_resp[:resps_list.shape[0], cnt, :] = resps_list[:, 0, :]

    #Calculate a few metrics
    corcoef = ohel.calc_base_reliability(full_resp)
    avg_resp = ohel.calc_average_response(full_resp, params)
    snr = compute_snr(resp)

    #Grab and label epochs that have two sounds in them (no null)
    presil, postsil = int(params['prestim'] * rec['resp'].fs), int(
        params['poststim'] * rec['resp'].fs)
    twostims = resp.epochs[resp.epochs['name'].str.count('-0-1') == 2].copy()
    ep_twostim = twostims.name.unique().tolist()
    ep_twostim.sort()

    ep_names = resp.epochs[resp.epochs['name'].str.contains('STIM_')].copy()
    ep_names = ep_names.name.unique().tolist()
    ep_types = list(map(ohel.label_ep_type, ep_names))
    ep_df = pd.DataFrame({'name': ep_names, 'type': ep_types})

    cell_df = []
    for cnt, stimmy in enumerate(ep_twostim):
        kind = ohel.label_pair_type(stimmy)
        seps = (stimmy.split('_')[1], stimmy.split('_')[2])
        BG, FG = seps[0].split('-')[0][2:], seps[1].split('-')[0][2:]

        Aepo, Bepo = 'STIM_' + seps[0] + '_null', 'STIM_null_' + seps[1]

        rAB = resp.extract_epoch(stimmy)
        rA, rB = resp.extract_epoch(Aepo), resp.extract_epoch(Bepo)

        fn = lambda x: np.atleast_2d(sp.smooth(x.squeeze(), 3, 2) - SR)
        rAsm = np.squeeze(np.apply_along_axis(fn, 2, rA))
        rBsm = np.squeeze(np.apply_along_axis(fn, 2, rB))
        rABsm = np.squeeze(np.apply_along_axis(fn, 2, rAB))

        rA_st, rB_st = rAsm[:, presil:-postsil], rBsm[:, presil:-postsil]
        rAB_st = rABsm[:, presil:-postsil]

        rAm, rBm = np.nanmean(rAsm, axis=0), np.nanmean(rBsm, axis=0)
        rABm = np.nanmean(rABsm, axis=0)

        AcorAB = np.corrcoef(
            rAm, rABm)[0, 1]  # Corr between resp to A and resp to dual
        BcorAB = np.corrcoef(
            rBm, rABm)[0, 1]  # Corr between resp to B and resp to dual

        A_FR, B_FR, AB_FR = np.nanmean(rA_st), np.nanmean(rB_st), np.nanmean(
            rAB_st)

        min_rep = np.min(
            (rA.shape[0],
             rB.shape[0]))  #only will do something if SoundRepeats==Yes
        lin_resp = np.nanmean(rAsm[:min_rep, :] + rBsm[:min_rep, :], axis=0)
        supp = np.nanmean(lin_resp - AB_FR)

        AcorLin = np.corrcoef(
            rAm, lin_resp)[0, 1]  # Corr between resp to A and resp to lin
        BcorLin = np.corrcoef(
            rBm, lin_resp)[0, 1]  # Corr between resp to B and resp to lin

        Apref, Bpref = AcorAB - AcorLin, BcorAB - BcorLin
        pref = Apref - Bpref

        # if params['Binaural'] == 'Yes':
        #     dA, dB = ohel.get_binaural_adjacent_epochs(stimmy)
        #
        #     rdA, rdB = resp.extract_epoch(dA), resp.extract_epoch(dB)
        #     rdAm = np.nanmean(np.squeeze(np.apply_along_axis(fn, 2, rdA))[:, presil:-postsil], axis=0)
        #     rdBm = np.nanmean(np.squeeze(np.apply_along_axis(fn, 2, rdB))[:, presil:-postsil], axis=0)
        #
        #     ABcordA = np.corrcoef(rABm, rdAm)[0, 1]  # Corr between resp to AB and resp to BG swap
        #     ABcordB = np.corrcoef(rABm, rdBm)[0, 1]  # Corr between resp to AB and resp to FG swap

        cell_df.append({
            'epoch': stimmy,
            'kind': kind,
            'BG': BG,
            'FG': FG,
            'AcorAB': AcorAB,
            'BcorAB': BcorAB,
            'AcorLin': AcorLin,
            'BcorLin': BcorLin,
            'Apref': Apref,
            'Bpref': Bpref,
            'pref': pref,
            'combo_FR': AB_FR,
            'bg_FR': A_FR,
            'fg_FR': B_FR,
            'supp': supp
        })

    cell_df = pd.DataFrame(cell_df)
    cell_df['SR'], cell_df['STD'] = SR, STD
    # cell_df['corcoef'], cell_df['avg_resp'], cell_df['snr'] = corcoef, avg_resp, snr
    cell_df.insert(loc=0, column='area', value=area)

    return cell_df

    # COMPUTE ALL FOLLOWING metrics using smoothed driven rate
    # est, val = rec.split_using_epoch_occurrence_counts(rec,epoch_regex='^STIM_')
    val = rec.copy()
    val['resp'] = val['resp'].rasterize()
    val['stim'] = val['stim'].rasterize()
    val = preproc.average_away_epoch_occurrences(val, epoch_regex='^STIM_')

    # smooth and subtract SR
    fn = lambda x: np.atleast_2d(sp.smooth(x.squeeze(), 3, 2) - SR)
    val['resp'] = val['resp'].transform(fn)
    val['resp'] = ohel.add_stimtype_epochs(val['resp'])

    if val['resp'].count_epoch('REFERENCE'):
        epochname = 'REFERENCE'
    else:
        epochname = 'TRIAL'
    sts = val['resp'].epochs['start'].copy()
    nds = val['resp'].epochs['end'].copy()
    sts_rec = rec['resp'].epochs['start'].copy()
    val['resp'].epochs['end'] = val['resp'].epochs['start'] + params['prestim']
    ps = val['resp'].select_epochs([epochname]).as_continuous()
    ff = np.isfinite(ps)
    SR_av = ps[ff].mean() * resp.fs
    SR_av_std = ps[ff].std() * resp.fs

    # Compute max over single-voice trials
    val['resp'].epochs['end'] = nds
    val['resp'].epochs['start'] = sts
    val['resp'].epochs[
        'start'] = val['resp'].epochs['start'] + params['prestim']
    TotalMax = np.nanmax(val['resp'].as_continuous())
    ps = np.hstack((val['resp'].extract_epoch('10').flatten(),
                    val['resp'].extract_epoch('01').flatten()))
    SinglesMax = np.nanmax(ps)

    # Compute threshold, exitatory percentage, and inhibitory percentage
    prestim, poststim = params['prestim'], params['poststim']
    val['resp'].epochs['end'] = nds
    val['resp'].epochs['start'] = sts
    val['resp'].epochs[
        'start'] = val['resp'].epochs['start'] + prestim + start_win_offset
    val['resp'].epochs['end'] = val['resp'].epochs['end'] - poststim
    thresh = np.array(((SR + SR_av_std) / resp.fs, (SR - SR_av_std) / resp.fs))
    thresh = np.array((SR / resp.fs + 0.1 * (SinglesMax - SR / resp.fs),
                       (SR - SR_av_std) / resp.fs))
    # SR/resp.fs - 0.5 * (np.nanmax(val['resp'].as_continuous()) - SR/resp.fs)]

    types = ['10', '01', '20', '02', '11', '12', '21', '22']
    excitatory_percentage = {}
    inhibitory_percentage = {}
    Max = {}
    Mean = {}
    for _type in types:
        if _type in val['resp'].epochs.name.values:
            ps = val['resp'].extract_epoch(_type).flatten()
            ff = np.isfinite(ps)
            excitatory_percentage[_type] = (ps[ff] >
                                            thresh[0]).sum() / ff.sum()
            inhibitory_percentage[_type] = (ps[ff] <
                                            thresh[1]).sum() / ff.sum()
            Max[_type] = ps[ff].max() / SinglesMax
            Mean[_type] = ps[ff].mean()

    # Compute threshold, exitatory percentage, and inhibitory percentage just over onset time
    # restore times
    val['resp'].epochs['end'] = nds
    val['resp'].epochs['start'] = sts
    # Change epochs to stimulus onset times
    val['resp'].epochs['start'] = val['resp'].epochs['start'] + prestim
    val['resp'].epochs['end'] = val['resp'].epochs['start'] + prestim + .5
    excitatory_percentage_onset = {}
    inhibitory_percentage_onset = {}
    Max_onset = {}
    for _type in types:
        ps = val['resp'].extract_epoch(_type).flatten()
        ff = np.isfinite(ps)
        excitatory_percentage_onset[_type] = (ps[ff] >
                                              thresh[0]).sum() / ff.sum()
        inhibitory_percentage_onset[_type] = (ps[ff] <
                                              thresh[1]).sum() / ff.sum()
        Max_onset[_type] = ps[ff].max() / SinglesMax

        # find correlations between double and single-voice responses
    val['resp'].epochs['end'] = nds
    val['resp'].epochs['start'] = sts
    val['resp'].epochs['start'] = val['resp'].epochs['start'] + prestim
    rec['resp'].epochs['start'] = rec['resp'].epochs['start'] + prestim
    # over stim on time to end + 0.5
    val['linmodel'] = val['resp'].copy()
    val['linmodel']._data = np.full(val['linmodel']._data.shape, np.nan)
    types = ['11', '12', '21', '22']
    epcs = val['resp'].epochs[val['resp'].epochs['name'].str.contains(
        'STIM')].copy()
    epcs['type'] = epcs['name'].apply(ohel.label_ep_type)
    names = [[n.split('_')[1], n.split('_')[2]] for n in epcs['name']]
    EA = np.array([n[0] for n in names])
    EB = np.array([n[1] for n in names])

    r_dual_B, r_dual_A, r_dual_B_nc, r_dual_A_nc = {}, {}, {}, {}
    r_dual_B_bal, r_dual_A_bal = {}, {}
    r_lin_B, r_lin_A, r_lin_B_nc, r_lin_A_nc = {}, {}, {}, {}
    r_lin_B_bal, r_lin_A_bal = {}, {}

    N_ac = 200
    full_resp = rec['resp'].rasterize()
    full_resp = full_resp.transform(fn)
    for _type in types:
        inds = np.nonzero(epcs['type'].values == _type)[0]
        rA_st, rB_st, r_st, rA_rB_st = [], [], [], []
        init = True
        for ind in inds:
            # for each dual-voice response
            r = val['resp'].extract_epoch(epcs.iloc[ind]['name'])
            if np.any(np.isfinite(r)):
                print(epcs.iloc[ind]['name'])
                # Find the indicies of single-voice responses that match this dual-voice response
                indA = np.where((EA[ind] == EA) & (EB == 'null'))[0]
                indB = np.where((EB[ind] == EB) & (EA == 'null'))[0]
                if (len(indA) > 0) & (len(indB) > 0):
                    # from pdb import set_trace
                    # set_trace()
                    rA = val['resp'].extract_epoch(epcs.iloc[indA[0]]['name'])
                    rB = val['resp'].extract_epoch(epcs.iloc[indB[0]]['name'])
                    r_st.append(
                        full_resp.extract_epoch(epcs.iloc[ind]['name'])[:,
                                                                        0, :])
                    rA_st_ = full_resp.extract_epoch(
                        epcs.iloc[indA[0]]['name'])[:, 0, :]
                    rB_st_ = full_resp.extract_epoch(
                        epcs.iloc[indB[0]]['name'])[:, 0, :]
                    rA_st.append(rA_st_)
                    rB_st.append(rB_st_)
                    minreps = np.min((rA_st_.shape[0], rB_st_.shape[0]))
                    rA_rB_st.append(rA_st_[:minreps, :] + rB_st_[:minreps, :])
                    if init:
                        rA_ = rA.squeeze()
                        rB_ = rB.squeeze()
                        r_ = r.squeeze()
                        rA_rB_ = rA.squeeze() + rB.squeeze()
                        init = False
                    else:
                        rA_ = np.hstack((rA_, rA.squeeze()))
                        rB_ = np.hstack((rB_, rB.squeeze()))
                        r_ = np.hstack((r_, r.squeeze()))
                        rA_rB_ = np.hstack(
                            (rA_rB_, rA.squeeze() + rB.squeeze()))
                    val['linmodel'] = val['linmodel'].replace_epoch(
                        epcs.iloc[ind]['name'], rA + rB, preserve_nan=False)
        ff = np.isfinite(r_) & np.isfinite(rA_) & np.isfinite(
            rB_)  # find places with data
        r_dual_A[_type] = np.corrcoef(rA_[ff], r_[ff])[
            0, 1]  # Correlation between response to A and response to dual
        r_dual_B[_type] = np.corrcoef(rB_[ff], r_[ff])[
            0, 1]  # Correlation between response to B and response to dual
        r_lin_A[_type] = np.corrcoef(
            rA_[ff], rA_rB_[ff]
        )[0,
          1]  # Correlation between response to A and response to linear 'model'
        r_lin_B[_type] = np.corrcoef(
            rB_[ff], rA_rB_[ff]
        )[0,
          1]  # Correlation between response to B and response to linear 'model'

        # correlations over single-trial data
        minreps = np.min([x.shape[0] for x in r_st])
        r_st = [x[:minreps, :] for x in r_st]
        r_st = np.concatenate(r_st, axis=1)
        rA_st = [x[:minreps, :] for x in rA_st]
        rA_st = np.concatenate(rA_st, axis=1)
        rB_st = [x[:minreps, :] for x in rB_st]
        rB_st = np.concatenate(rB_st, axis=1)
        rA_rB_st = [x[:minreps, :] for x in rA_rB_st]
        rA_rB_st = np.concatenate(rA_rB_st, axis=1)

        r_lin_A_bal[_type] = np.corrcoef(rA_st[0::2, ff].mean(axis=0),
                                         rA_rB_st[1::2, ff].mean(axis=0))[0, 1]
        r_lin_B_bal[_type] = np.corrcoef(rB_st[0::2, ff].mean(axis=0),
                                         rA_rB_st[1::2, ff].mean(axis=0))[0, 1]
        r_dual_A_bal[_type] = np.corrcoef(rA_st[0::2, ff].mean(axis=0),
                                          r_st[:, ff].mean(axis=0))[0, 1]
        r_dual_B_bal[_type] = np.corrcoef(rB_st[0::2, ff].mean(axis=0),
                                          r_st[:, ff].mean(axis=0))[0, 1]

        r_dual_A_nc[_type] = ohel.r_noise_corrected(rA_st, r_st)
        r_dual_B_nc[_type] = ohel.r_noise_corrected(rB_st, r_st)
        r_lin_A_nc[_type] = ohel.r_noise_corrected(rA_st, rA_rB_st)
        r_lin_B_nc[_type] = ohel.r_noise_corrected(rB_st, rA_rB_st)

        if _type == '11':
            r11 = nems.metrics.corrcoef._r_single(r_st, 200, 0)
        elif _type == '12':
            r12 = nems.metrics.corrcoef._r_single(r_st, 200, 0)
        elif _type == '21':
            r21 = nems.metrics.corrcoef._r_single(r_st, 200, 0)
        elif _type == '22':
            r22 = nems.metrics.corrcoef._r_single(r_st, 200, 0)
        # rac = _r_single(X, N)
        # r_ceiling = [nmet.r_ceiling(p, rec, 'pred', 'resp') for p in val_copy]

    # Things that used to happen only for _type is 'C' but still seem valid
    r_A_B = np.corrcoef(rA_[ff], rB_[ff])[0, 1]
    r_A_B_nc = r_noise_corrected(rA_st, rB_st)
    rAA = nems.metrics.corrcoef._r_single(rA_st, 200, 0)
    rBB = nems.metrics.corrcoef._r_single(rB_st, 200, 0)
    Np = 0
    rAA_nc = np.zeros(Np)
    rBB_nc = np.zeros(Np)
    hv = int(minreps / 2)
    for i in range(Np):
        inds = np.random.permutation(minreps)
        rAA_nc[i] = sp.r_noise_corrected(rA_st[inds[:hv]], rA_st[inds[hv:]])
        rBB_nc[i] = sp.r_noise_corrected(rB_st[inds[:hv]], rB_st[inds[hv:]])
    ffA = np.isfinite(rAA_nc)
    ffB = np.isfinite(rBB_nc)
    rAAm = rAA_nc[ffA].mean()
    rBBm = rBB_nc[ffB].mean()
    mean_nsA = rA_st.sum(axis=1).mean()
    mean_nsB = rB_st.sum(axis=1).mean()
    min_nsA = rA_st.sum(axis=1).min()
    min_nsB = rB_st.sum(axis=1).min()

    # Calculate correlation between linear 'model and dual-voice response, and mean amount of suppression, enhancement relative to linear 'model'
    r_fit_linmodel = {}
    r_fit_linmodel_NM = {}
    r_ceil_linmodel = {}
    mean_enh = {}
    mean_supp = {}
    EnhP = {}
    SuppP = {}
    DualAboveZeroP = {}
    resp_ = copy.deepcopy(rec['resp'].rasterize())
    resp_.epochs['start'] = sts_rec
    fn = lambda x: np.atleast_2d(
        sp.smooth(x.squeeze(), 3, 2) - SR / val['resp'].fs)
    resp_ = resp_.transform(fn)
    for _type in types:
        val_copy = copy.deepcopy(val)
        #        from pdb import set_trace
        #        set_trace()
        val_copy['resp'] = val_copy['resp'].select_epochs([_type])
        # Correlation between linear 'model' (response to A plus response to B) and dual-voice response
        r_fit_linmodel_NM[_type] = nmet.corrcoef(val_copy, 'linmodel', 'resp')
        # r_ceil_linmodel[_type] = nems.metrics.corrcoef.r_ceiling(val_copy,rec,'linmodel', 'resp',exclude_neg_pred=False)[0]
        # Noise-corrected correlation between linear 'model' (response to A plus response to B) and dual-voice response
        r_ceil_linmodel[_type] = nems.metrics.corrcoef.r_ceiling(
            val_copy, rec, 'linmodel', 'resp')[0]

        pred = val_copy['linmodel'].as_continuous()
        resp = val_copy['resp'].as_continuous()
        ff = np.isfinite(pred) & np.isfinite(resp)
        # cc = np.corrcoef(sp.smooth(pred[ff],3,2), sp.smooth(resp[ff],3,2))
        cc = np.corrcoef(pred[ff], resp[ff])
        r_fit_linmodel[_type] = cc[0, 1]

        prdiff = resp[ff] - pred[ff]
        mean_enh[_type] = prdiff[prdiff > 0].mean() * val['resp'].fs
        mean_supp[_type] = prdiff[prdiff < 0].mean() * val['resp'].fs

        # Find percent of time response is suppressed vs enhanced relative to what would be expected by a linear sum of single-voice responses
        # First, jacknife to find...
    #        Njk=10
    #        if _type is 'C':
    #            stims=['STIM_T+si464+si464','STIM_T+si516+si516']
    #        else:
    #            stims=['STIM_T+si464+si516', 'STIM_T+si516+si464']
    #        T=int(700+prestim*val['resp'].fs)
    #        Tps=int(prestim*val['resp'].fs)
    #        jns=np.zeros((Njk,T,len(stims)))
    #        for ns in range(len(stims)):
    #            for njk in range(Njk):
    #                resp_jn=resp_.jackknife_by_epoch(Njk,njk,stims[ns])
    #                jns[njk,:,ns]=np.nanmean(resp_jn.extract_epoch(stims[ns]),axis=0)
    #        jns=np.reshape(jns[:,Tps:,:],(Njk,700*len(stims)),order='F')
    #
    #        lim_models=np.zeros((700,len(stims)))
    #        for ns in range(len(stims)):
    #            lim_models[:,ns]=val_copy['linmodel'].extract_epoch(stims[ns])
    #        lim_models=lim_models.reshape(700*len(stims),order='F')
    #
    #        ff=np.isfinite(lim_models)
    #        mean_diff=(jns[:,ff]-lim_models[ff]).mean(axis=0)
    #        std_diff=(jns[:,ff]-lim_models[ff]).std(axis=0)
    #        serr_diff=np.sqrt(Njk/(Njk-1))*std_diff
    #
    #        thresh=3
    #        dual_above_zero = (jns[:,ff].mean(axis=0) > std_diff)
    #        sig_enh = ((mean_diff/serr_diff) > thresh) & dual_above_zero
    #        sig_supp = ((mean_diff/serr_diff) < -thresh)
    #        DualAboveZeroP[_type] = (dual_above_zero).sum()/len(mean_diff)
    #        EnhP[_type] = (sig_enh).sum()/len(mean_diff)
    #        SuppP[_type] = (sig_supp).sum()/len(mean_diff)

    #        time = np.arange(0, lim_models.shape[0])/ val['resp'].fs
    #        plt.figure();
    #        plt.plot(time,jns.mean(axis=0),'.-k');
    #        plt.plot(time,lim_models,'.-g');
    #        plt.plot(time[sig_enh],lim_models[sig_enh],'.r')
    #        plt.plot(time[sig_supp],lim_models[sig_supp],'.b')
    #        plt.title('Type:{:s}, Enh:{:.2f}, Sup:{:.2f}, Resp_above_zero:{:.2f}'.format(_type,EnhP[_type],SuppP[_type],DualAboveZeroP[_type]))
    #        from pdb import set_trace
    #        set_trace()
    #        a=2
    # thrsh=5
    #        EnhP[_type] = ((prdiff*val['resp'].fs) > thresh).sum()/len(prdiff)
    #        SuppP[_type] = ((prdiff*val['resp'].fs) < -thresh).sum()/len(prdiff)
    #    return val
    #    return {'excitatory_percentage':excitatory_percentage,
    #            'inhibitory_percentage':inhibitory_percentage,
    #            'r_fit_linmodel':r_fit_linmodel,
    #            'SR':SR, 'SR_std':SR_std, 'SR_av_std':SR_av_std}
    #
    return {
        'thresh': thresh * val['resp'].fs,
        'EP_A': excitatory_percentage['A'],
        'EP_B': excitatory_percentage['B'],
        #            'EP_C':excitatory_percentage['C'],
        'EP_I': excitatory_percentage['I'],
        'IP_A': inhibitory_percentage['A'],
        'IP_B': inhibitory_percentage['B'],
        #            'IP_C':inhibitory_percentage['C'],
        'IP_I': inhibitory_percentage['I'],
        'OEP_A': excitatory_percentage_onset['A'],
        'OEP_B': excitatory_percentage_onset['B'],
        #            'OEP_C':excitatory_percentage_onset['C'],
        'OEP_I': excitatory_percentage_onset['I'],
        'OIP_A': inhibitory_percentage_onset['A'],
        'OIP_B': inhibitory_percentage_onset['B'],
        #            'OIP_C':inhibitory_percentage_onset['C'],
        'OIP_I': inhibitory_percentage_onset['I'],
        'Max_A': Max['A'],
        'Max_B': Max['B'],
        #            'Max_C':Max['C'],
        'Max_I': Max['I'],
        'Mean_A': Mean['A'],
        'Mean_B': Mean['B'],
        #            'Mean_C':Mean['C'],
        'Mean_I': Mean['I'],
        'OMax_A': Max_onset['A'],
        'OMax_B': Max_onset['B'],
        #            'OMax_C':Max_onset['C'],
        'OMax_I': Max_onset['I'],
        'TotalMax': TotalMax * val['resp'].fs,
        'SinglesMax': SinglesMax * val['resp'].fs,
        #            'r_lin_C':r_fit_linmodel['C'],
        'r_lin_I': r_fit_linmodel['I'],
        #            'r_lin_C_NM':r_fit_linmodel_NM['C'],
        'r_lin_I_NM': r_fit_linmodel_NM['I'],
        #            'r_ceil_C':r_ceil_linmodel['C'],
        'r_ceil_I': r_ceil_linmodel['I'],
        #            'MEnh_C':mean_enh['C'],
        'MEnh_I': mean_enh['I'],
        #            'MSupp_C':mean_supp['C'],
        'MSupp_I': mean_supp['I'],
        #            'EnhP_C':EnhP['C'],
        #        'EnhP_I':EnhP['I'],
        #            'SuppP_C':SuppP['C'],
        #        'SuppP_I':SuppP['I'],
        #            'DualAboveZeroP_C':DualAboveZeroP['C'],
        #        'DualAboveZeroP_I':DualAboveZeroP['I'],
        #            'r_dual_A_C':r_dual_A['C'],
        'r_dual_A_I': r_dual_A['I'],
        #            'r_dual_B_C':r_dual_B['C'],
        'r_dual_B_I': r_dual_B['I'],
        #            'r_dual_A_C_nc':r_dual_A_nc['C'],
        'r_dual_A_I_nc': r_dual_A_nc['I'],
        #            'r_dual_B_C_nc':r_dual_B_nc['C'],
        'r_dual_B_I_nc': r_dual_B_nc['I'],
        #            'r_dual_A_C_bal':r_dual_A_bal['C'],
        'r_dual_A_I_bal': r_dual_A_bal['I'],
        #            'r_dual_B_C_bal':r_dual_B_bal['C'],
        'r_dual_B_I_bal': r_dual_B_bal['I'],
        #            'r_lin_A_C':r_lin_A['C'],
        'r_lin_A_I': r_lin_A['I'],
        #            'r_lin_B_C':r_lin_B['C'],
        'r_lin_B_I': r_lin_B['I'],
        #            'r_lin_A_C_nc':r_lin_A_nc['C'],
        'r_lin_A_I_nc': r_lin_A_nc['I'],
        #            'r_lin_B_C_nc':r_lin_B_nc['C'],
        'r_lin_B_I_nc': r_lin_B_nc['I'],
        #            'r_lin_A_C_bal':r_lin_A_bal['C'],
        'r_lin_A_I_bal': r_lin_A_bal['I'],
        #            'r_lin_B_C_bal':r_lin_B_bal['C'],
        'r_lin_B_I_bal': r_lin_B_bal['I'],
        'r_A_B': r_A_B,
        'r_A_B_nc': r_A_B_nc,
        'rAAm': rAAm,
        'rBBm': rBBm,
        'rAA': rAA,
        'rBB': rBB,
        'rII': rII,
        #           'rCC':rCC
        'rAA_nc': rAA_nc,
        'rBB_nc': rBB_nc,
        'mean_nsA': mean_nsA,
        'mean_nsB': mean_nsB,
        'min_nsA': min_nsA,
        'min_nsB': min_nsB,
        'SR': SR,
        'SR_std': SR_std,
        'SR_av_std': SR_av_std,
        'norm_spont': norm_spont,
        'spont_rate': spont_rate,
        'params': params,
        'corcoef': corcoef,
        'avg_resp': avg_resp,
        'snr': snr,
        'pair_names': twostims,
        'suppression': supp_array,
        'FR': FR_array,
        'rec': rec,
        'animal': cellid[:3]
    }
예제 #20
0
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as ss

from nems_lbhb.baphy_experiment import BAPHYExperiment

parmfile = '/auto/data/daq/Cordyceps/training2020/Cordyceps_2020_05_25_BVT_1.m'

options = {'pupil': True, 'rasterfs': 100}

manager = BAPHYExperiment(parmfile=parmfile)
rec = manager.get_recording(**options)
pupil = rec['pupil']._data.squeeze()

# load facemap analysis
fmap_fn = '/auto/data/daq/Cordyceps/training2020/Cordyceps_2020_05_25_facemap.npy'
fmap_results = np.load(fmap_fn, allow_pickle=True).item()

fmap_pupil = fmap_results['pupil'][0]['area']

# resample nems pupil to match length
pupil = ss.resample(pupil, fmap_pupil.shape[0])

# plot results
f = plt.figure(figsize=(12, 4))
p1 = plt.subplot2grid((2, 3), (0, 0), colspan=2)
p2 = plt.subplot2grid((2, 3), (1, 0), colspan=2)
scat = plt.subplot2grid((2, 3), (0, 2), rowspan=2)

p1.plot(pupil)
p1.set_title('lbhb results')
예제 #21
0
def load_TwoStim(batch,
                 cellid,
                 fit_epochs,
                 modelspec_name,
                 loader='env100',
                 modelspecs_dir='/auto/users/luke/Code/nems/modelspecs',
                 fs=100,
                 get_est=True,
                 get_stim=True,
                 paths=None):
    # load into a recording object
    if not get_stim:
        loadkey = 'ns.fs100'
    else:
        raise RuntimeError('Put stimuli in batch')
    manager = BAPHYExperiment(cellid=cellid, batch=batch)

    options = {'rasterfs': 100, 'stim': False, 'resp': True}
    rec = manager.get_recording(**options)

    rec['resp'].fs = fs
    rec['resp'] = rec['resp'].extract_channels([cellid])
    # ----------------------------------------------------------------------------
    # DATA PREPROCESSING
    #
    # GOAL: Split your data into estimation and validation sets so that you can
    #       know when your model exhibits overfitting.

    # Method #1: Find which stimuli have the most reps, use those for val
    #    if not get_stim:
    #        del rec.signals['stim']

    ##Added Greg 9/22/21 for

    stim_epochs = ep.epoch_names_matching(rec['resp'].epochs, 'STIM_')

    val = rec.copy()
    val['resp'] = val['resp'].rasterize()
    val = preproc.average_away_epoch_occurrences(val, epoch_regex='^STIM_')

    if get_est:
        raise RuntimeError('Fix me')
        df0 = est['resp'].epochs.copy()
        df2 = est['resp'].epochs.copy()
        df0['name'] = df0['name'].apply(parse_stim_type)
        df0 = df0.loc[df0['name'].notnull()]
        df3 = pd.concat([df0, df2])

        est['resp'].epochs = df3
        est_sub = copy.deepcopy(est)
        est_sub['resp'] = est_sub['resp'].select_epochs(fit_epochs)
    else:
        est_sub = None

    df0 = val['resp'].epochs.copy()
    df2 = val['resp'].epochs.copy()
    # df0['name'] = df0['name'].apply(ts.parse_stim_type)
    df0['name'] = df0['name'].apply(ohel.label_ep_type)
    df0 = df0.loc[df0['name'].notnull()]
    df3 = pd.concat([df0, df2])

    val['resp'].epochs = df3
    val_sub = copy.deepcopy(val)
    val_sub['resp'] = val_sub['resp'].select_epochs(fit_epochs)

    # ----------------------------------------------------------------------------
    # GENERATE SUMMARY STATISTICS

    if modelspec_name is None:
        return None, [est_sub], [val_sub]
    else:
        fit_epochs_str = "+".join([str(x) for x in fit_epochs])
        mn = loader + '_subset_' + fit_epochs_str + '.' + modelspec_name
        an_ = modelspecs_dir + '/' + cellid + '/' + mn
        an = glob.glob(an_ + '*')
        if len(an) > 1:
            warnings.warn('{} models found, loading an[0]:{}'.format(
                len(an), an[0]))
            an = [an[0]]
        if len(an) == 1:
            filepath = an[0]
            modelspecs = [ms.load_modelspec(filepath)]
            modelspecs[0][0]['meta']['modelname'] = mn
            modelspecs[0][0]['meta']['cellid'] = cellid
        else:
            raise RuntimeError('not fit')
        # generate predictions
        est_sub, val_sub = nems.analysis.api.generate_prediction(
            est_sub, val_sub, modelspecs)
        est_sub, val_sub = nems.analysis.api.generate_prediction(
            est_sub, val_sub, modelspecs)

        return modelspecs, est_sub, val_sub
예제 #22
0
def fit_bgfg_model(batch, site):
    cell_df = nd.get_batch_cells(batch)
    cellid = [cell for cell in cell_df['cellid'].tolist()
              if cell[:7] == site][0]
    fs = 100

    manager = BAPHYExperiment(cellid=cellid, batch=batch)
    options = {'rasterfs': 100, 'stim': False, 'resp': True}
    rec = manager.get_recording(**options)
    newrec = ts.generate_psth_from_resp_bgfg(rec, manager)

    rec = newrec.copy()
    rec['resp'] = rec['resp'].rasterize()

    bgfg_psth_signal = rec['psth'].concatenate_channels(
        (rec['psth_bg'], rec['psth_fg']))
    bgfg_psth_signal.name = 'psth_bgfg'
    rec.add_signal(bgfg_psth_signal)

    epoch_regex = '^STIM'
    rec = nems.preprocessing.average_away_epoch_occurrences(
        rec, epoch_regex=epoch_regex)
    # mask out epochs with "null" in the name
    ep = nems.epoch.epoch_names_matching(rec['psth'].epochs, '^STIM')
    for e in ep:
        if ('null' not in e) and ('0.5' not in e):
            print(e)
            rec = rec.or_mask(e)

    est = rec.copy()
    val = rec.copy()

    outputcount = rec['psth'].shape[0]
    inputcount = outputcount * 2

    insignal = 'psth_bgfg'
    outsignal = 'psth_sp'

    modelspec_name = f'wc.{inputcount}x{outputcount}-lvl.{outputcount}'

    # record some meta data for display and saving
    meta = {
        'cellid': site,
        'batch': 1,
        'modelname': modelspec_name,
        'recording': est.name
    }
    modelspec = initializers.from_keywords(modelspec_name,
                                           meta=meta,
                                           input_name=insignal,
                                           output_name=outsignal)

    init_weights = np.eye(outputcount, outputcount)
    init_weights = np.concatenate((init_weights, init_weights), axis=1)
    modelspec[0]['phi']['coefficients'] = init_weights / 2

    # RUN AN ANALYSIS
    # GOAL: Fit your model to your data, producing the improved modelspecs.
    #       Note that: nems.analysis.* will return a list of modelspecs, sorted
    #       in descending order of how they performed on the fitter's metric.

    # then fit full nonlinear model
    fit_kwargs = {'tolerance': 1e-5, 'max_iter': 100000}
    modelspec = nems.analysis.api.fit_basic(est,
                                            modelspec,
                                            fitter=scipy_minimize,
                                            fit_kwargs=fit_kwargs)

    # GENERATE SUMMARY STATISTICS
    print('Generating summary statistics ...')

    # generate predictions
    est, val = nems.analysis.api.generate_prediction(est, val, modelspec)

    # evaluate prediction accuracy
    modelspec = nems.analysis.api.standard_correlation(est, val, modelspec)

    print("Performance: r_fit={0:.3f} r_test={1:.3f}".format(
        modelspec.meta['r_fit'][0][0], modelspec.meta['r_test'][0][0]))

    ctx = {'modelspec': modelspec, 'rec': rec, 'val': val, 'est': est}
    xfspec = []

    #import nems.gui.editors as gui
    #gui.browse_xform_fit(ctx, xfspec)

    f, ax = plt.subplots(4, 1, figsize=(12, 6))
    cellnumber = 6
    dur = 2000
    r = val.apply_mask()
    ax[0].plot(r['pred'].as_continuous()[cellnumber, :dur])
    ax[0].plot(r['psth_sp'].as_continuous()[cellnumber, :dur])
    ax[1].plot(r['psth_fg'].as_continuous()[cellnumber, :dur])
    ax[2].plot(r['psth_bg'].as_continuous()[cellnumber, :dur])
    ax[3].plot(r['mask'].as_continuous()[0, :dur])

    #plt.legend(('pred','actual','mask'))

    plt.figure()
    plt.imshow(modelspec.phi[0]['coefficients'])
    plt.colorbar()

    return modelspec, val, r


# aw = browse_recording(val, ['psth_sp','pred', 'psth_bg', 'psth_fg'], cellid='ARM017a-01-10')

#
# batch=329
# cell_df=nd.get_batch_cells(batch)
# cell_list=cell_df['cellid'].tolist()
# fs=100
#
# cell_list = [cell for cell in cell_list if cell[:3] != 'HOD']
# # cell_list = [cell for cell in cell_list if cell[:7] == 'ARM026b']
# cell_dict = {cell[0:7]:cell for cell in cell_list}
#
# rec_dict = dict()
# for site, cell in cell_dict.items():
#     manager = BAPHYExperiment(cellid=cell, batch=batch)
#     options = {'rasterfs': 100,
#                'stim': False,
#                'resp': True}
#     rec = manager.get_recording(**options)
#     rec_dict[site] = ts.generate_psth_from_resp_bgfg(rec, manager)
#
# cellid='ARM026b'
# rec=rec_dict[cellid].copy()
# rec['resp']=rec['resp'].rasterize()
#
# bgfg_psth_signal = rec['psth'].concatenate_channels((rec['psth_bg'], rec['psth_fg']))
# bgfg_psth_signal.name = 'psth_bgfg'
# rec.add_signal(bgfg_psth_signal)
#
# epoch_regex = '^STIM'
# rec = nems.preprocessing.average_away_epoch_occurrences(rec, epoch_regex=epoch_regex)
# # mask out epochs with "null" in the name
# ep = nems.epoch.epoch_names_matching(rec['psth'].epochs, '^STIM')
# for e in ep:
#     if ('null' not in e) and ('0.5' not in e):
#         print(e)
#         rec = rec.or_mask(e)
#
# est=rec.copy()
# val=rec.copy()
#
# outputcount=rec['psth'].shape[0]
# inputcount=outputcount*2
#
# insignal='psth_bgfg'
# outsignal='psth_sp'
#
# modelspec_name = f'wc.{inputcount}x{outputcount}-lvl.{outputcount}'
#
# # record some meta data for display and saving
# meta = {'cellid': cellid,
#         'batch': 1,
#         'modelname': modelspec_name,
#         'recording': est.name
#         }
# modelspec = initializers.from_keywords(modelspec_name, meta=meta, input_name=insignal, output_name=outsignal)
#
# init_weights = np.eye(outputcount,outputcount)
# init_weights = np.concatenate((init_weights,init_weights), axis=1)
# modelspec[0]['phi']['coefficients'] = init_weights/2
#
# # RUN AN ANALYSIS
#
# # GOAL: Fit your model to your data, producing the improved modelspecs.
# #       Note that: nems.analysis.* will return a list of modelspecs, sorted
# #       in descending order of how they performed on the fitter's metric.
#
# # then fit full nonlinear model
# fit_kwargs={'tolerance': 1e-5, 'max_iter': 100000}
# modelspec = nems.analysis.api.fit_basic(est, modelspec, fitter=scipy_minimize,
#                                         fit_kwargs=fit_kwargs)
#
# # GENERATE SUMMARY STATISTICS
# print('Generating summary statistics ...')
#
# # generate predictions
# est, val = nems.analysis.api.generate_prediction(est, val, modelspec)
#
# # evaluate prediction accuracy
# modelspec = nems.analysis.api.standard_correlation(est, val, modelspec)
#
# print("Performance: r_fit={0:.3f} r_test={1:.3f}".format(
#         modelspec.meta['r_fit'][0][0],
#         modelspec.meta['r_test'][0][0]))
#
# ctx = {'modelspec': modelspec, 'rec': rec, 'val': val, 'est': est}
# xfspec=[]
#
# #import nems.gui.editors as gui
# #gui.browse_xform_fit(ctx, xfspec)
#
#
# f,ax=plt.subplots(4,1, figsize=(12,6))
# cellnumber=3
# dur=2000
# r=val.apply_mask()
# ax[0].plot(r['pred'].as_continuous()[cellnumber,:dur])
# ax[0].plot(r['psth_sp'].as_continuous()[cellnumber,:dur])
# ax[1].plot(r['psth_fg'].as_continuous()[cellnumber,:dur])
# ax[2].plot(r['psth_bg'].as_continuous()[cellnumber,:dur])
# ax[3].plot(r['mask'].as_continuous()[0,:dur])
#
# #plt.legend(('pred','actual','mask'))
#
# plt.figure()
# plt.imshow(modelspec.phi[0]['coefficients'])
# plt.colorbar()
#
#
#
# aw = browse_recording(val, ['psth_sp','pred', 'psth_bg', 'psth_fg'], cellid='ARM017a-01-10')
예제 #23
0
parmfiles['pumpdur']=parmfiles['pumpdur'].str.replace('\.0','')
parmfiles['pumpdur']=parmfiles['pumpdur'].str.replace(' ', '+')

parmfiles['session']=parmfiles.date.dt.strftime("%Y-%m-%d")+"_"+parmfiles['svalue']+"_"+parmfiles['pumpdur']

r_center = np.nan * np.ones((len(parmfiles.session.unique()), 30))
nr_center = np.nan * np.ones((len(parmfiles.session.unique()), 30))
raw =  np.nan * np.ones((len(parmfiles.session.unique()), 15))

# loop over days
for i, session in enumerate(parmfiles.session.unique()):
    files = parmfiles[parmfiles.session==session]
    date = files.iloc[0].date
    files = [r + p for r, p in zip(files['resppath'], files['parmfile'])]

    manager = BAPHYExperiment(files)
    rec = manager.get_recording(**{'rasterfs': 100})

    # get rewarded target frequencies (and non-rewarded)
    params = manager.get_baphy_exptparams()[0]
    PumpDur = np.array(params['BehaveObject'][1]['PumpDuration'])
    TargetFreq = np.array(params['TrialObject'][1]['TargetHandle'][1]['Names'])
    RTarget = TargetFreq[PumpDur>0]
    RTargetStr = [str(r) for r in RTarget]
    NRTarget = TargetFreq[PumpDur==0]
    NRTargetStr = [str(r) for r in NRTarget]

    # get list of reference stimuli
    ref_stims = [e for e in rec.epochs.name.unique() if 'STIM_' in e]
    cfs = [int(e.replace('STIM_', '')) for e in ref_stims]
    idx = np.argsort(cfs)
예제 #24
0
파일: OLP_helpers.py 프로젝트: LBHB/nems_db
def get_sound_statistics(weight_df, plot=True):
    '''5/12/22 Takes a cellid and batch and figures out all the sounds that were played
    in that experiment and calculates some stastistics it plots side by side. Also outputs
    those numbers in a cumbersome dataframe'''
    lfreq, hfreq, bins = 100, 24000, 48
    cid, btch = weight_df.cellid.iloc[0], weight_df.batch.iloc[0]
    manager = BAPHYExperiment(cellid=cid, batch=btch)
    expt_params = manager.get_baphy_exptparams()  # Using Charlie's manager
    ref_handle = expt_params[-1]['TrialObject'][1]['ReferenceHandle'][1]
    BG_folder, FG_folder = ref_handle['BG_Folder'], ref_handle['FG_Folder']

    bbs = list(set([bb.split('_')[1][:2] for bb in weight_df.epoch]))
    ffs = list(set([ff.split('_')[2][:2] for ff in weight_df.epoch]))
    bbs.sort(key=int), ffs.sort(key=int)

    bg_paths = [glob.glob((f'/auto/users/hamersky/baphy/Config/lbhb/SoundObjects/@OverlappingPairs/'
                           f'{BG_folder}/{bb}*.wav'))[0] for bb in bbs]
    fg_paths = [glob.glob((f'/auto/users/hamersky/baphy/Config/lbhb/SoundObjects/@OverlappingPairs/'
                           f'{FG_folder}/{ff}*.wav'))[0] for ff in ffs]
    paths = bg_paths + fg_paths
    bgname = [bb.split('/')[-1].split('.')[0] for bb in bg_paths]
    fgname = [ff.split('/')[-1].split('.')[0] for ff in fg_paths]
    names = bgname + fgname

    Bs, Fs = ['BG'] * len(bgname), ['FG'] * len(fgname)
    labels = Bs + Fs

    sounds = []
    means = np.empty((bins, len(names)))
    means[:] = np.NaN
    for cnt, sn, pth, ll in zip(range(len(labels)), names, paths, labels):
        sfs, W = wavfile.read(pth)
        spec = gtgram(W, sfs, 0.02, 0.01, bins, lfreq, hfreq)

        dev = np.std(spec, axis=1)

        freq_mean = np.nanmean(spec, axis=1)
        x_freq = np.logspace(np.log2(lfreq), np.log2(hfreq), num=bins, base=2)
        csm = np.cumsum(freq_mean)
        big = np.max(csm)

        freq75 = x_freq[np.abs(csm - (big * 0.75)).argmin()]
        freq25 = x_freq[np.abs(csm - (big * 0.25)).argmin()]
        freq50 = x_freq[np.abs(csm - (big * 0.5)).argmin()]
        bandw = np.log2(freq75 / freq25)

        means[:, cnt] = freq_mean

        sounds.append({'name': sn,
                       'type': ll,
                       'std': dev,
                       'bandwidth': bandw,
                       '75th': freq75,
                       '25th': freq25,
                       'center': freq50,
                       'spec': spec,
                       'mean_freq': freq_mean,
                       'freq_stationary': np.std(freq_mean)})

    sound_df = pd.DataFrame(sounds)

    # allmean = np.nanmean(means, axis=1, keepdims=True)
    # norm_mean = [aa / allmean for aa in sound_df.mean_freq]
    # freq_stationarity = [np.std(aa) for aa in allmean]
    # sound_df['norm_mean'],  = norm_mean
    # sound_df['freq_stationary'] = freq_stationarity

    ss = sound_df.explode('std')
    # frs = sound_df.explode('norm_mean')
    # frs = sound_df.explode('mean_freq')
    snames = [dd[2:] for dd in sound_df.name]

    if plot:
        fig, ax = plt.subplots(1, 3, figsize=(18, 8))

        sb.barplot(x='name', y='std', palette=["lightskyblue" if x == 'BG' else 'yellowgreen' for x in sound_df.type],
                   data=ss, ci=68, ax=ax[0], errwidth=1)
        ax[0].set_xticklabels(snames, rotation=90, fontweight='bold', fontsize=7)
        ax[0].set_ylabel('Non-stationariness', fontweight='bold', fontsize=12)
        ax[0].spines['top'].set_visible(True), ax[0].spines['right'].set_visible(True)
        ax[0].set(xlabel=None)

        sb.barplot(x='name', y='bandwidth',
                   palette=["lightskyblue" if x == 'BG' else 'yellowgreen' for x in sound_df.type],
                   data=sound_df, ax=ax[1])
        ax[1].set_xticklabels(snames, rotation=90, fontweight='bold', fontsize=7)
        ax[1].set_ylabel('Bandwidth (octaves)', fontweight='bold', fontsize=12)
        ax[1].spines['top'].set_visible(True), ax[1].spines['right'].set_visible(True)
        ax[1].set(xlabel=None)

        sb.barplot(x='name', y='freq_stationary',
                   palette=["lightskyblue" if x == 'BG' else 'yellowgreen' for x in sound_df.type],
                   data=sound_df, ax=ax[2])
        ax[2].set_xticklabels(snames, rotation=90, fontweight='bold', fontsize=7)
        ax[2].set_ylabel('Frequency Non-stationariness', fontweight='bold', fontsize=12)
        ax[2].spines['top'].set_visible(True), ax[2].spines['right'].set_visible(True)
        ax[2].set(xlabel=None)

        fig.tight_layout()

    return sound_df
예제 #25
0
LI_EARLY = np.full((4, len(uDate)), np.nan)
LI_MID = np.full((4, len(uDate)), np.nan)
LI_LATE = np.full((4, len(uDate)), np.nan)
LI_ALL = np.full((4, len(uDate)), np.nan)
VALID_TRIALS = np.full(len(uDate), np.nan)
VALID_TRIALS_EAR = np.full(len(uDate), np.nan)
VALID_TRIALS_LAT = np.full(len(uDate), np.nan)
for idx, ud in enumerate(uDate):
    print(f"Loading data from {ud}")
    parmfiles = d[d.date == ud].parmfile_path.values.tolist()
    # add catch to make sure "siteid" the same for all files
    sid = [p.split(os.path.sep)[-1][:7] for p in parmfiles]
    if np.any(np.array(sid) != sid[0]):
        bad_idx = (np.array(sid) != sid[0])
        parmfiles = np.array(parmfiles)[~bad_idx].tolist()
    manager = BAPHYExperiment(parmfiles)

    # make sure only loaded actives
    pf_mask = [
        True if k['BehaveObjectClass'] == 'RewardTargetLBHB' else False
        for k in manager.get_baphy_exptparams()
    ]
    if sum(pf_mask) == len(manager.parmfile):
        pass
    else:
        parmfiles = np.array(manager.parmfile)[pf_mask].tolist()
        manager = BAPHYExperiment(parmfiles)

    # get reaction times of targets, only for "correct" trials
    bev = manager.get_behavior_events(**options)
    bev = manager._stack_events(bev)
예제 #26
0
# store HR, DI, near each cf regardless of reward value.
HR = np.nan * np.zeros((len(files), len(cfs)))
DI = np.nan * np.zeros((len(files), len(cfs)))
# store LI as function of both cfs (rew/n.r.)
LI = np.nan * np.zeros((len(files), len(cfs), len(cfs)))
# store experiment length
exptlen = np.nan * np.zeros(len(files))
# store total repetitions
ref_reps = np.nan * np.zeros(len(files))
tar_reps = np.nan * np.zeros((2, len(files)))
# store trial counts
totalBaphyTrials = np.nan * np.zeros(len(files))
totalValidTrials = np.nan * np.zeros(len(files))

for i, f in enumerate(files):
    manager = BAPHYExperiment(f)
    options = {'pupil': False, 'rasterfs': 100}
    rec = manager.get_recording(**options)

    try:
        out_valid = manager.get_behavior_performance(trials=None, **options)

        #options.update({'keep_following_incorrect_trial': True, 'keep_cue_trials': True, 'keep_early_trials': True})
        #out_all = manager.get_behavior_performance(trials=None, **options)

        # get target freqs
        tars = list(out_valid['DI'].keys())
        int_tar = [int(t.replace('TAR_', '')) for t in tars]
        tars = np.array(tars)[np.argsort(int_tar)]
        if len(tars) >= 2:
            exptparams = manager.get_baphy_exptparams()[0]
예제 #27
0
parmfiles['svalue']=parmfiles['svalue'].str.replace(' ', '+')
parmfiles['pumpdur']=parmfiles['pumpdur'].str.replace('\.0','')
parmfiles['pumpdur']=parmfiles['pumpdur'].str.replace(' ', '+')

parmfiles['session']=parmfiles.date.dt.strftime("%Y-%m-%d")+"_"+parmfiles['svalue']+"_"+parmfiles['pumpdur']

df = pd.DataFrame()

# now, loop over each unique session and decide if it qualifies to keep based on 
# parameters set above
for session in parmfiles.session.unique():
    files = [p for i, p in parmfiles.iterrows() if p.session==session]
    date = files[0].date
    files = [f['resppath']+f['parmfile'] for f in files]

    manager = BAPHYExperiment(files)
    rec = manager.get_recording(**options)
    rec = rec.and_mask('INVALID_BAPHY_TRIAL', invert=True)

    # search for the number of good trials in which a target was actually presented (i.e. not a false alarm)
    PumpDur = np.array([float(i) for i in session.split('_')[-1].split('+')])
    TargetFreq = np.array([int(i) for i in session.split('_')[-2].split('+')])
    targets = [t for t in rec.epochs.name.unique() if 'TAR_' in t]
    tar_folded = rec['fileidx'].extract_epochs(targets, mask=rec['mask'])
    rew_tars = TargetFreq[PumpDur>0]
    nr_tars = TargetFreq[PumpDur==0]
    rew_tar_str = ['TAR_'+str(t) for t in rew_tars]
    rew_tar_str = [t for t in rew_tar_str if t in tar_folded.keys()]
    nr_tar_str = ['TAR_'+str(t) for t in nr_tars]
    nr_tar_str = [t for t in nr_tar_str if t in tar_folded.keys()]
    
예제 #28
0
def plot_binaural_psths(df, cellid, bg, fg, batch, save=False, close=False):
    '''Takes input of a data fram from ohel.calc_psth_metrics along with a cellid and pair of
    sounds and will plot all the spatial psth combos with spectrogram of sound. Can save.'''
    manager = BAPHYExperiment(cellid=cellid, batch=batch)
    options = ohel.get_load_options(batch)
    rec = manager.get_recording(**options)

    rec['resp'] = rec['resp'].extract_channels([cellid])
    resp = copy.copy(rec['resp'].rasterize())

    expt_params = manager.get_baphy_exptparams()  # Using Charlie's manager
    if len(expt_params) == 1:
        ref_handle = expt_params[0]['TrialObject'][1]['ReferenceHandle'][1]
    if len(expt_params) > 1:
        ref_handle = expt_params[-1]['TrialObject'][1]['ReferenceHandle'][1]
    BG_folder, FG_folder = ref_handle['BG_Folder'], ref_handle['FG_Folder']

    ## I could make this not dependent on DF if I add some code that loads the epochs of the cell
    ## that you inputted and applies that type label function, that's really all I'm using from df

    df_filtered = df[(df.BG == bg) & (df.FG == fg) & (df.cellid == cellid)]
    if len(df_filtered) == 0:
        pairs = get_pair_names(cellid, df, show=False)
        raise ValueError(f"The inputted BG: {bg} and FG: {fg} are not in {cellid}.\n"
                         f"Maybe try one of these:\n{pairs}")

    epochs = []
    name = df_filtered.epoch.loc[df_filtered.kind == '11'].values[0]
    bb, ff = name.split('_')[1], name.split('_')[2]
    bb1, ff1 = name.replace(ff, 'null'), name.replace(bb, 'null')
    epochs.append(bb1), epochs.append(ff1)

    name = df_filtered.epoch.loc[df_filtered.kind == '22'].values[0]
    bb, ff = name.split('_')[1], name.split('_')[2]
    bb2, ff2 = name.replace(ff, 'null'), name.replace(bb, 'null')
    epochs.append(bb2), epochs.append(ff2)
    epochs.extend(df_filtered.epoch.values)

    r = resp.extract_epochs(epochs)
    SR = df_filtered['SR'].values[0]

    f = plt.figure(figsize=(18, 9))
    psth11 = plt.subplot2grid((9, 8), (0, 0), rowspan=3, colspan=3)
    psth12 = plt.subplot2grid((9, 8), (0, 3), rowspan=3, colspan=3, sharey=psth11)
    psth21 = plt.subplot2grid((9, 8), (3, 0), rowspan=3, colspan=3, sharey=psth11)
    psth22 = plt.subplot2grid((9, 8), (3, 3), rowspan=3, colspan=3, sharey=psth11)
    specA1 = plt.subplot2grid((9, 8), (7, 0), rowspan=1, colspan=3)
    specB1 = plt.subplot2grid((9, 8), (8, 0), rowspan=1, colspan=3)
    specA2 = plt.subplot2grid((9, 8), (7, 3), rowspan=1, colspan=3)
    specB2 = plt.subplot2grid((9, 8), (8, 3), rowspan=1, colspan=3)
    psthbb = plt.subplot2grid((9, 8), (0, 6), rowspan=3, colspan=2, sharey=psth11)
    psthff = plt.subplot2grid((9, 8), (3, 6), rowspan=3, colspan=2, sharey=psth11)
    ax = [psth11, psth12, psth21, psth22, specA1, specB1, specA2, specB2, psthbb, psthff]

    prestim = resp.epochs[resp.epochs['name'] == 'PreStimSilence'].copy().iloc[0]['end']
    time = (np.arange(0, r[epochs[0]].shape[-1]) / options['rasterfs']) - prestim

    # r_mean = {e: np.squeeze(np.nanmean(r[e], axis=0)) for e in epochs}
    r_mean = {e: np.squeeze(np.nanmean(r[e], axis=0)) - SR for e in epochs}

    epochs.extend(['lin11', 'lin12', 'lin21', 'lin22'])
    bg1, fg1, bg2, fg2 = epochs[0], epochs[1], epochs[2], epochs[3]
    r_mean['lin11'], r_mean['lin12'] = r_mean[bg1]+r_mean[fg1], r_mean[bg1]+r_mean[fg2]
    r_mean['lin21'], r_mean['lin22'] = r_mean[bg2]+r_mean[fg1], r_mean[bg2]+r_mean[fg2]

    colors = ['deepskyblue'] *3 + ['violet'] *3 + ['yellowgreen'] *3 + ['darksalmon'] *3 \
             + ['dimgray'] *4 + ['black'] *4
    styles = ['-'] *16 + [':'] *4
    ax_num = [0, 1, 8, 2, 3, 8, 0, 2, 9, 1, 3, 9, 0, 1, 2, 3, 0, 1, 2, 3]
    ep_num = [0, 0, 0, 2, 2, 2, 1, 1, 1, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11]
    labels = ['BG1'] *3 + ['BG2'] *3 + ['FG1'] *3 + ['FG2'] *3 \
             + ['BG1+FG1'] + ['BG1+FG2'] + ['BG2+FG1'] + ['BG2+FG2'] + ['LS'] *4

    for e, a, c, s, l in zip(ep_num, ax_num, colors, styles, labels):
        ax[a].plot(time, sf.gaussian_filter1d(r_mean[epochs[e]], sigma=2)
                   * options['rasterfs'], color=c, linestyle=s, label=l)

    ymin, ymax = ax[0].get_ylim()
    AXS = [0, 1, 2, 3, 8, 9]
    for AX, tt, aab, bab, ali, bli, prf in zip(range(4), df_filtered.kind, df_filtered.AcorAB,
                                          df_filtered.BcorAB, df_filtered.AcorLin,
                                               df_filtered.BcorLin, df_filtered.pref):
        ax[AX].legend((f'BG{tt[0]}, corr={np.around(aab, 3)}',
                       f'FG{tt[1]}, corr={np.around(bab, 3)}',
                       f'BG{tt[0]}+FG{tt[1]}',
                       f'LS, Acorr={np.around(ali, 3)}\nBcorr={np.around(bli, 3)}\npref={np.around(prf, 3)}'))
    for AX in AXS:
        ax[AX].vlines([0, 1.0], ymin, ymax, color='black', lw=0.75, ls='--')
        ax[AX].vlines(0.5, ymax * 0.9, ymax, color='black', lw=0.75, ls=':')
        ax[AX].spines['right'].set_visible(True), ax[AX].spines['top'].set_visible(True)
        if AX !=8 and AX !=9:
            ax[AX].set_xlim((-prestim * 0.5), (1 + (prestim * 0.75)))
        else:
            ax[AX].set_xlim((-prestim * 0.15), (1 + (prestim * 0.25)))

        if AX == 0 or AX == 1 or AX == 8:
            plt.setp(ax[AX].get_xticklabels(), visible=False)
        if AX == 1 or AX == 3 or AX == 8 or AX == 9:
            plt.setp(ax[AX].get_yticklabels(), visible=False)
        if AX == 2 or AX == 3 or AX == 9:
            ax[AX].set_xlabel('Time(s)', fontweight='bold', fontsize=10)
        if AX == 0 or AX == 2:
            ax[AX].set_ylabel('Spikes', fontweight='bold', fontsize=10)

    ax[0].set_title(f"{cellid} - BG: {bg} - FG: {fg}", fontweight='bold', fontsize=12)

    bbn, ffn = bb[:2], ff[:2]
    bg_path = glob.glob((f'/auto/users/hamersky/baphy/Config/lbhb/SoundObjects/@OverlappingPairs/'
                        f'{BG_folder}/{bbn}*.wav'))[0]
    fg_path = glob.glob((f'/auto/users/hamersky/baphy/Config/lbhb/SoundObjects/@OverlappingPairs/'
                        f'{FG_folder}/{ffn}*.wav'))[0]

    xf = 100
    low, high = ax[0].get_xlim()
    low, high = low * xf, high * xf

    for AX in range(4,8):
        if AX == 4 or AX == 6:
            sfs, W = wavfile.read(bg_path)
        elif AX == 5 or AX == 7:
            sfs, W = wavfile.read(fg_path)
        spec = gtgram(W, sfs, 0.02, 0.01, 48, 100, 24000)
        ax[AX].imshow(spec, aspect='auto', origin='lower', extent=[0, spec.shape[1], 0, spec.shape[0]])
        ax[AX].set_xlim(low, high)
        ax[AX].set_xticks([]), ax[AX].set_yticks([])
        ax[AX].set_xticklabels([]), ax[AX].set_yticklabels([])
        ax[AX].spines['top'].set_visible(False), ax[AX].spines['bottom'].set_visible(False)
        ax[AX].spines['left'].set_visible(False), ax[AX].spines['right'].set_visible(False)
    ax[4].set_ylabel(f"{bb.split('-')[0]}", fontweight='bold')
    ax[5].set_ylabel(f"{ff.split('-')[0]}", fontweight='bold')

    if save:
        site, animal, area, unit = cellid.split('-')[0], cellid[:3], df.area.loc[df.cellid == cellid].unique()[0], cellid[8:]
        path = f"/home/hamersky/OLP Binaural/{animal}/{area}/{site}/{unit}/"
        Path(path).mkdir(parents=True, exist_ok=True)
        print(f"Saving to {path + f'{cellid}-{bg}-{fg}.png'}")
        plt.savefig(path + f"{cellid}-{bg}-{fg}.png")
        if close:
            plt.close()
예제 #29
0
파일: csd_pup_nat.py 프로젝트: LBHB/nems_db
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import convolve2d, butter, sosfilt

from nems import db
from nems.utils import smooth
from nems_lbhb.xform_wrappers import generate_recording_uri
from nems_lbhb.baphy_experiment import BAPHYExperiment
from nems_lbhb.baphy_io import load_continuous_openephys
from nems_lbhb.plots import plot_waveforms_64D

batch = 322
siteid = "TAR010c"
siteid = "TAR017b"

ex = BAPHYExperiment(batch=batch, cellid=siteid)
print(ex.experiment, ex.openephys_folder, ex.openephys_tarfile,
      ex.openephys_tarfile_relpath)

rec = ex.get_recording(raw=True,
                       pupil=True,
                       resp=False,
                       stim=False,
                       recache=False,
                       rawchans=None,
                       rasterfs=300)

data = rec['raw']._data.copy()

print('HP-filtering MUA >100 Hz...')
sos = butter(4, 100, 'hp', fs=rec['raw'].fs, output='sos')
예제 #30
0
"""
test load TNL stimuli from physiology expt
"""
import matplotlib.pyplot as plt
from nems_lbhb.baphy_experiment import BAPHYExperiment

parmfile = '/auto/data/daq/Cordyceps/CRD002/CRD002a10_a_TNL.m'

manager = BAPHYExperiment(parmfile)

# get recording (w/o resp or pupil)
rec = manager.get_recording(**{'pupil': False, 'resp': False, 'rasterfs': 10})

# get all stim epochs
stims = [s for s in rec.epochs.name.unique() if 'STIM_' in s]

# count occurences of each
counts = dict.fromkeys(stims)
for s in stims:
    counts[s] = (rec.epochs.name == s).sum()

plt.plot(counts.values())

plt.show()