def populatemytables(paralel=True, cores=9): IDs = { k: v for k, v in zip(*lab.WaterRestriction().fetch( 'water_restriction_number', 'subject_id')) } if paralel: schema = dj.schema(pipeline_tools.get_schema_name('behavior_foraging'), locals()) schema.jobs.delete() ray.init(num_cpus=cores) for runround in [1]: arguments = { 'display_progress': False, 'reserve_jobs': True, 'order': 'random' } print('round ' + str(runround) + ' of populate') result_ids = [] for coreidx in range(cores): result_ids.append( populatemytables_core_paralel.remote(arguments, runround)) ray.get(result_ids) arguments = {'display_progress': True, 'reserve_jobs': False} populatemytables_core(arguments, runround) ray.shutdown() else: for runround in [1]: arguments = { 'display_progress': True, 'reserve_jobs': False, 'order': 'random' } populatemytables_core(arguments, runround)
def populatebehavior(paralel = True,drop_last_session_for_mice_in_training = True): print('adding behavior experiments') if paralel: #ray.init() result_ids = [] #%% IDs = {k: v for k, v in zip(*lab.WaterRestriction().fetch('water_restriction_number', 'subject_id'))} df_surgery = pd.read_csv(dj.config['locations.metadata_behavior']+'Surgery.csv') for subject_now,subject_id_now in zip(IDs.keys(),IDs.values()): # iterating over subjects and removing last session if subject_now in df_surgery['ID'].values and drop_last_session_for_mice_in_training == True and df_surgery['status'][df_surgery['ID']==subject_now].values[0] != 'sacrificed': # the last session is deleted only if the animal is still in training.. print(df_surgery['status'][df_surgery['ID']==subject_now].values[0]) if len((experiment.Session() & 'subject_id = "'+str(subject_id_now)+'"').fetch('session')) > 0: sessiontodel = np.max((experiment.Session() & 'subject_id = "'+str(subject_id_now)+'"').fetch('session')) session_todel = experiment.Session() & 'subject_id = "' + str(subject_id_now)+'"' & 'session = ' + str(sessiontodel) dj.config['safemode'] = False print('deleting last session of ' + subject_now) session_todel.delete() dj.config['safemode'] = True #%% for subject_now,subject_id_now in zip(IDs.keys(),IDs.values()): # iterating over subjects dict_now = dict() dict_now[subject_now] = subject_id_now result_ids.append(populatebehavior_core(dict_now)) #ray.get(result_ids) #ray.shutdown() else: arguments = {'display_progress' : True} populatebehavior_core(arguments)
def make(self, key): ''' Ephys .make() function ''' log.info('EphysIngest().make(): key: {k}'.format(k=key)) # # Find corresponding BehaviorIngest # # ... we are keying times, sessions, etc from behavior ingest; # so lookup behavior ingest for session id, quit with warning otherwise # try: behavior = (behavior_ingest.BehaviorIngest() & key).fetch1() except dj.DataJointError: log.warning('EphysIngest().make(): skip - behavior ingest error') return log.info('behavior for ephys: {b}'.format(b=behavior)) # # Find Ephys Recording # key = (experiment.Session & key).fetch1() sinfo = ((lab.WaterRestriction() * lab.Subject().proj() * experiment.Session()) & key).fetch1() rigpath = EphysDataPath().fetch1('data_path') h2o = sinfo['water_restriction_number'] date = key['session_date'].strftime('%Y%m%d') dpath = pathlib.Path(rigpath, h2o, date) dglob = '[0-9]/{}' # probe directory pattern v3spec = '{}_*_jrc.mat'.format(h2o) # old v3spec = '{}_g0_*.imec.ap_imec3_opt3_jrc.mat'.format(h2o) v3files = list(dpath.glob(dglob.format(v3spec))) v4spec = '{}_*.ap_res.mat'.format(h2o) # old v4spec = '{}_g0_*.imec?.ap_res.mat'.format(h2o) # TODO v4ify v4files = list(dpath.glob(dglob.format(v4spec))) if (v3files and v4files) or not (v3files or v4files): log.warning( 'Error - v3files ({}) + v4files ({}). Skipping.'.format( v3files, v4files)) return if v3files: files = v3files loader = self._load_v3 if v4files: files = v4files loader = self._load_v4 for f in files: self._load(loader(sinfo, rigpath, dpath, f.relative_to(dpath)))
def populatemytables(paralel=True, cores=9): IDs = { k: v for k, v in zip(*lab.WaterRestriction().fetch( 'water_restriction_number', 'subject_id')) } if paralel: schema = dj.schema(get_schema_name('foraging_analysis'), locals()) # schema.jobs.delete() if use_ray: ray.init(num_cpus=cores) arguments = { 'display_progress': False, 'reserve_jobs': True, 'order': 'random' } for runround in [1]: print('round ' + str(runround) + ' of populate') result_ids = [] for coreidx in range(cores): result_ids.append( populatemytables_core_paralel.remote( arguments, runround)) ray.get(result_ids) ray.shutdown() else: # Use multiprocessing arguments = { 'display_progress': False, 'reserve_jobs': False, 'order': 'random' } for runround in [1]: print('round ' + str(runround) + ' of populate') result_ids = [ pool.apply_async(populatemytables_core_paralel, args=(arguments, runround)) for coreidx in range(cores) ] for result_id in result_ids: result_id.get() # Just in case there're anything missing? arguments = {'display_progress': True, 'reserve_jobs': False} populatemytables_core(arguments, runround) else: for runround in [1]: arguments = { 'display_progress': True, 'reserve_jobs': False, 'order': 'random' } populatemytables_core(arguments, runround)
def make(self, key): ''' HistologyIngest .make() function ''' # TODO: check the length of the `site.ont.name` variable, # and only ingest the sites with an ontology associated to it. log.info('HistologyIngest().make(): key: {}'.format(key)) session = (experiment.Session & key).fetch1() egmap = {e['insertion_number']: e for e in (ephys.ProbeInsertion * lab.ElectrodeConfig.ElectrodeGroup & session).fetch('KEY')} if not len(egmap): log.info('... no probe information. skipping.'.format(key)) return rigpath = ephys_ingest.EphysDataPath().fetch1('data_path') subject_id = session['subject_id'] session_date = session['session_date'] water = ( lab.WaterRestriction() & {'subject_id': subject_id} ).fetch1('water_restriction_number') directory = pathlib.Path( rigpath, water, session_date.strftime('%Y-%m-%d'), 'histology') for probe in range(1, 3): probefile = 'landmarks_{}_{}_{}_siteInfo.mat'.format( water, session['session_date'].strftime('%Y%m%d'), probe) trackfile = 'landmarks_{}_{}_{}.csv'.format( water, session['session_date'].strftime('%Y%m%d'), probe) probepath = directory / probefile trackpath = directory / trackfile try: self._load_histology_probe( key, session, egmap, probe, probepath) self._load_histology_track( key, session, egmap, probe, trackpath) except StopIteration: pass log.info('HistologyIngest().make(): {} complete.'.format(key)) self.insert1(key)
def plot_IV(wr_name='FOR04', cellnum=1, ivnum=0, IVsweepstoplot=[0, 14]): subject_id = (lab.WaterRestriction() & 'water_restriction_number = "' + wr_name + '"').fetch('subject_id')[0] key = {'subject_id': subject_id, 'cell_number': cellnum} sweeps = pd.DataFrame(ephys_patch.Sweep() & key) protocolnames = sweeps['protocol_name'].unique() ivprotocolnames = [i for i in protocolnames if 'iv' in i.lower()] ivprotocolname = ivprotocolnames[ivnum] key['protocol_name'] = ivprotocolname #% df_iv = pd.DataFrame() for sweepnum in IVsweepstoplot: key['protocol_sweep_number'] = sweepnum df_iv = pd.concat([ df_iv, pd.DataFrame((ephys_patch.Sweep() & key) * (ephys_patch.SweepResponse() & key) * (ephys_patch.SweepStimulus() & key) * (ephys_patch.SweepMetadata() & key)) ]) df_IV = pd.DataFrame() for line in df_iv.iterrows(): linenow = line[1] time = np.arange(0, len( linenow['response_trace'])) / linenow['sample_rate'] linenow['time'] = time df_IV = pd.concat([df_IV, linenow.to_frame().transpose()]) fig = plt.figure() ax_IV = fig.add_axes([0, 0, 2, .8]) ax_stim = fig.add_axes([0, -.6, 2, .4]) for line in df_IV.iterrows(): ax_IV.plot(line[1]['time'], line[1]['response_trace'] * 1000, '-') ax_stim.plot(line[1]['time'], line[1]['stimulus_trace'] * 10**12, '-') ax_IV.set_xlabel('Time (s)') ax_IV.set_xlim([0, 1]) ax_IV.set_ylabel('mV') ax_IV.set_title('Firing pattern') ax_stim.set_xlabel('Time (s)') ax_stim.set_xlim([0, 1]) ax_stim.set_ylabel('pA') ax_stim.set_title('Stimulus')
def make(self, key): ''' TrackingIngest .make() function ''' log.info('TrackingIngest().make(): key: {k}'.format(k=key)) h2o = (lab.WaterRestriction() & key).fetch1('water_restriction_number') session = (experiment.Session() & key).fetch1() trials = (experiment.SessionTrial() & session).fetch('trial') log.info('got session: {} ({} trials)'.format(session, len(trials))) sdate = session['session_date'] sdate_iso = sdate.isoformat() # YYYY-MM-DD sdate_sml = "{}{:02d}{:02d}".format(sdate.year, sdate.month, sdate.day) paths = TrackingDataPath.fetch(as_dict=True) devices = tracking.TrackingDevice().fetch(as_dict=True) # paths like: <root>/<h2o>/YYYY-MM-DD/tracking for p, d in ((p, d) for d in devices for p in paths): tdev = d['tracking_device'] tpos = d['tracking_position'] tdat = p['tracking_data_path'] log.info('checking {} for tracking data'.format(tdat)) tpath = pathlib.Path(tdat, h2o, sdate_iso, 'tracking') if not tpath.exists(): log.warning('tracking path {} n/a - skipping'.format(tpath)) continue camtrial = '{}_{}_{}.txt'.format(h2o, sdate_sml, tpos) campath = tpath / camtrial log.info('trying camera position trial map: {}'.format(campath)) if not campath.exists(): log.info('skipping {} - does not exist'.format(campath)) continue tmap = self.load_campath(campath) n_tmap = len(tmap) log.info('loading tracking data for {} trials'.format(n_tmap)) i = 0 for t in tmap: # load tracking for trial if tmap[t] not in trials: log.warning('nonexistant trial {}.. skipping'.format(t)) continue i += 1 if i % 50 == 0: log.info('item {}/{}, trial #{} ({:.2f}%)'.format( i, n_tmap, t, (i / n_tmap) * 100)) else: log.debug('item {}/{}, trial #{} ({:.2f}%)'.format( i, n_tmap, t, (i / n_tmap) * 100)) # ex: dl59_side_1-0000.csv / h2o_position_tn-0000.csv tfile = '{}_{}_{}-*.csv'.format(h2o, tpos, t) tfull = list(tpath.glob(tfile)) if not tfull or len(tfull) > 1: log.info('tracking file {} mismatch'.format(tfull)) continue tfull = tfull[-1] trk = self.load_tracking(tfull) recs = {} rec_base = dict(key, trial=tmap[t], tracking_device=tdev) for k in trk: if k == 'samples': recs['tracking'] = { **rec_base, 'tracking_samples': len(trk['samples']['ts']), } else: rec = dict(rec_base) for attr in trk[k]: rec_key = '{}_{}'.format(k, attr) rec[rec_key] = np.array(trk[k][attr]) recs[k] = rec tracking.Tracking.insert1(recs['tracking'], allow_direct_insert=True) tracking.Tracking.NoseTracking.insert1( recs['nose'], allow_direct_insert=True) tracking.Tracking.TongueTracking.insert1( recs['tongue'], allow_direct_insert=True) tracking.Tracking.JawTracking.insert1(recs['jaw'], allow_direct_insert=True) log.info('... completed {}/{} items.'.format(i, n_tmap)) log.info('... saving load record') self.insert1(key) log.info('... done.')
def mockdata(): print('populating with mock data') reload(ccf) reload(lab) reload(experiment) reload(ephys) reload(publication) try: # TODO: these should be loaded in a more 'official' way lab.Person().insert1({ 'username': '******', 'fullname': 'Dave Liu' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'VGAT-Chr2-EYFP Jax', 'gene_modification_description': 'VGAT' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'PV-ires-Cre X Ai32', 'gene_modification_description': 'PV' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'Rosa26 Cag lsl reachR-citrine 1A4 X PV-ires-Cre', 'gene_modification_description': 'reachR PV' }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 399752, 'username': '******', 'cage_number': 145375, 'date_of_birth': '2017-08-03', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 399752, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 399752, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-03', 'end_time': '2017-11-03', 'surgery_description': 'Headbar anterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 399752, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -4, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 399752, 'water_restriction_number': 'dl7', 'cage_number': 148861, 'wr_start_date': '2017-11-07', 'wr_start_weight': 25 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 397853, 'username': '******', 'cage_number': 144545, 'date_of_birth': '2017-07-15', 'sex': 'M', 'animal_source': 'Allen Institute' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 397853, 'gene_modification': 'PV-ires-Cre X Ai32' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 397853, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-20', 'end_time': '2017-11-20', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 397853, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 397853, 'water_restriction_number': 'dl14', 'cage_number': 149595, 'wr_start_date': '2017-11-27', 'wr_start_weight': 24.1 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 400480, 'username': '******', 'cage_number': 145700, 'date_of_birth': '2017-08-09', 'sex': 'M', 'animal_source': 'Allen Institute' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 400480, 'gene_modification': 'PV-ires-Cre X Ai32' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 400480, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-21', 'end_time': '2017-11-21', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 400480, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 400480, 'water_restriction_number': 'dl15', 'cage_number': 149598, 'wr_start_date': '2017-11-27', 'wr_start_weight': 27.6 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 406680, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-06', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 406680, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 406680, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-04', 'end_time': '2018-01-04', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 406680, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 406680, 'water_restriction_number': 'dl20', 'cage_number': 151282, 'wr_start_date': '2018-01-10', 'wr_start_weight': 22.7 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 408022, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-19', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 408022, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 408022, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-05', 'end_time': '2018-01-05', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 408022, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 408022, 'water_restriction_number': 'dl21', 'cage_number': 151283, 'wr_start_date': '2018-01-10', 'wr_start_weight': 21.1 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 408021, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-19', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 408021, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 408021, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-15', 'end_time': '2018-01-15', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 408021, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 408021, 'water_restriction_number': 'dl22', 'cage_number': 151704, 'wr_start_date': '2018-01-19', 'wr_start_weight': 21 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 407512, 'username': '******', 'cage_number': 151629, 'date_of_birth': '2017-10-13', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407512, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407512, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-16', 'end_time': '2018-01-16', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407512, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407512, 'water_restriction_number': 'dl24', 'cage_number': 151793, 'wr_start_date': '2018-01-22', 'wr_start_weight': 26 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 407513, 'username': '******', 'cage_number': 148636, 'date_of_birth': '2017-10-13', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407513, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407513, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-17', 'end_time': '2018-01-17', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407513, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407513, 'water_restriction_number': 'dl25', 'cage_number': 151794, 'wr_start_date': '2018-01-22', 'wr_start_weight': 25.5 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 407986, 'username': '******', 'cage_number': 152268, 'date_of_birth': '2017-10-18', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407986, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407986, 'surgery_id': 1, 'username': '******', 'start_time': '2018-02-01', 'end_time': '2018-02-01', 'surgery_description': 'Headbar anterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407986, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -4, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407986, 'water_restriction_number': 'dl28', 'cage_number': 152312, 'wr_start_date': '2018-02-05', 'wr_start_weight': 19.8 }, skip_duplicates=True) lab.Subject().insert1( { 'subject_id': 123457, 'username': '******', 'cage_number': 145375, 'date_of_birth': '2017-08-03', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 123457, 'water_restriction_number': 'tw5', 'cage_number': 148861, 'wr_start_date': '2017-11-07', 'wr_start_weight': 20.5 }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig1', 'room': '2w.334', 'rig_description': 'Training rig 1' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig2', 'room': '2w.334', 'rig_description': 'Training rig 2' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig3', 'room': '2w.334', 'rig_description': 'Training rig 3' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'RRig', 'room': '2w.334', 'rig_description': 'Recording rig' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'Ephys1', 'room': '2w.334', 'rig_description': 'Recording computer' }, skip_duplicates=True) except Exception as e: print("error creating mock data: {e}".format(e=e), file=sys.stderr) raise
def make(self, key): log.info('BehaviorIngest.make(): key: {key}'.format(key=key)) subject_id = key['subject_id'] h2o = (lab.WaterRestriction() & { 'subject_id': subject_id }).fetch1('water_restriction_number') ymd = key['session_date'] datestr = ymd.strftime('%Y%m%d') log.info('h2o: {h2o}, date: {d}'.format(h2o=h2o, d=datestr)) # session record key skey = {} skey['subject_id'] = subject_id skey['session_date'] = ymd skey['username'] = self.get_session_user() skey['rig'] = key['rig'] # File paths conform to the pattern: # dl7/TW_autoTrain/Session Data/dl7_TW_autoTrain_20180104_132813.mat # which is, more generally: # {h2o}/{training_protocol}/Session Data/{h2o}_{training protocol}_{YYYYMMDD}_{HHMMSS}.mat path = pathlib.Path(key['rig_data_path'], key['subpath']) if experiment.Session() & skey: log.info("note: session exists for {h2o} on {d}".format(h2o=h2o, d=ymd)) trial = namedtuple( # simple structure to track per-trial vars 'trial', ('ttype', 'stim', 'free', 'settings', 'state_times', 'state_names', 'state_data', 'event_data', 'event_times', 'trial_start')) if os.stat(path).st_size / 1024 < 1000: log.info('skipping file {} - too small'.format(path)) return log.debug('loading file {}'.format(path)) mat = spio.loadmat(path, squeeze_me=True) SessionData = mat['SessionData'].flatten() # parse session datetime session_datetime_str = str('').join( (str(SessionData['Info'][0]['SessionDate']), ' ', str(SessionData['Info'][0]['SessionStartTime_UTC']))) session_datetime = datetime.strptime(session_datetime_str, '%d-%b-%Y %H:%M:%S') AllTrialTypes = SessionData['TrialTypes'][0] AllTrialSettings = SessionData['TrialSettings'][0] AllTrialStarts = SessionData['TrialStartTimestamp'][0] AllTrialStarts = AllTrialStarts - AllTrialStarts[0] # real 1st trial RawData = SessionData['RawData'][0].flatten() AllStateNames = RawData['OriginalStateNamesByNumber'][0] AllStateData = RawData['OriginalStateData'][0] AllEventData = RawData['OriginalEventData'][0] AllStateTimestamps = RawData['OriginalStateTimestamps'][0] AllEventTimestamps = RawData['OriginalEventTimestamps'][0] # verify trial-related data arrays are all same length assert (all((x.shape[0] == AllStateTimestamps.shape[0] for x in (AllTrialTypes, AllTrialSettings, AllStateNames, AllStateData, AllEventData, AllEventTimestamps, AllTrialStarts, AllTrialStarts)))) # AllStimTrials optional special case if 'StimTrials' in SessionData.dtype.fields: log.debug('StimTrials detected in session - will include') AllStimTrials = SessionData['StimTrials'][0] assert (AllStimTrials.shape[0] == AllStateTimestamps.shape[0]) else: log.debug('StimTrials not detected in session - will skip') AllStimTrials = np.array( [None for _ in enumerate(range(AllStateTimestamps.shape[0]))]) # AllFreeTrials optional special case if 'FreeTrials' in SessionData.dtype.fields: log.debug('FreeTrials detected in session - will include') AllFreeTrials = SessionData['FreeTrials'][0] assert (AllFreeTrials.shape[0] == AllStateTimestamps.shape[0]) else: log.debug('FreeTrials not detected in session - synthesizing') AllFreeTrials = np.zeros(AllStateTimestamps.shape[0], dtype=np.uint8) trials = list( zip(AllTrialTypes, AllStimTrials, AllFreeTrials, AllTrialSettings, AllStateTimestamps, AllStateNames, AllStateData, AllEventData, AllEventTimestamps, AllTrialStarts)) if not trials: log.warning('skipping date {d}, no valid files'.format(d=date)) return # # Trial data seems valid; synthesize session id & add session record # XXX: note - later breaks can result in Sessions without valid trials # assert skey['session_date'] == session_datetime.date() skey['session_date'] = session_datetime.date() skey['session_time'] = session_datetime.time() log.debug('synthesizing session ID') session = (dj.U().aggr(experiment.Session() & { 'subject_id': subject_id }, n='max(session)').fetch1('n') or 0) + 1 log.info('generated session id: {session}'.format(session=session)) skey['session'] = session key = dict(key, **skey) # # Actually load the per-trial data # log.info('BehaviorIngest.make(): trial parsing phase') # lists of various records for batch-insert rows = { k: list() for k in ('trial', 'behavior_trial', 'trial_note', 'trial_event', 'corrected_trial_event', 'action_event', 'photostim', 'photostim_location', 'photostim_trial', 'photostim_trial_event') } i = 0 # trial numbering starts at 1 for t in trials: # # Misc # t = trial(*t) # convert list of items to a 'trial' structure i += 1 # increment trial counter log.debug('BehaviorIngest.make(): parsing trial {i}'.format(i=i)) # covert state data names into a lookup dictionary # # names (seem to be? are?): # # Trigtrialstart, PreSamplePeriod, SamplePeriod, DelayPeriod # EarlyLickDelay, EarlyLickSample, ResponseCue, GiveLeftDrop # GiveRightDrop, GiveLeftDropShort, GiveRightDropShort # AnswerPeriod, Reward, RewardConsumption, NoResponse # TimeOut, StopLicking, StopLickingReturn, TrialEnd # states = {k: (v + 1) for v, k in enumerate(t.state_names)} required_states = ('PreSamplePeriod', 'SamplePeriod', 'DelayPeriod', 'ResponseCue', 'StopLicking', 'TrialEnd') missing = list(k for k in required_states if k not in states) if len(missing): log.warning('skipping trial {i}; missing {m}'.format( i=i, m=missing)) continue gui = t.settings['GUI'].flatten() # ProtocolType - only ingest protocol >= 3 # # 1 Water-Valve-Calibration 2 Licking 3 Autoassist # 4 No autoassist 5 DelayEnforce 6 SampleEnforce 7 Fixed # if 'ProtocolType' not in gui.dtype.names: log.warning( 'skipping trial {i}; protocol undefined'.format(i=i)) continue protocol_type = gui['ProtocolType'][0] if gui['ProtocolType'][0] < 3: log.warning('skipping trial {i}; protocol {n} < 3'.format( i=i, n=gui['ProtocolType'][0])) continue # # Top-level 'Trial' record # tkey = dict(skey) startindex = np.where(t.state_data == states['PreSamplePeriod'])[0] # should be only end of 1st StopLicking; # rest of data is irrelevant w/r/t separately ingested ephys endindex = np.where(t.state_data == states['StopLicking'])[0] log.debug('states\n' + str(states)) log.debug('state_data\n' + str(t.state_data)) log.debug('startindex\n' + str(startindex)) log.debug('endindex\n' + str(endindex)) if not (len(startindex) and len(endindex)): log.warning('skipping {}: start/end mismatch: {}/{}'.format( i, str(startindex), str(endindex))) continue try: tkey['trial'] = i tkey['trial_uid'] = i tkey['start_time'] = t.trial_start tkey['stop_time'] = t.trial_start + t.state_times[endindex][0] except IndexError: log.warning('skipping {}: IndexError: {}/{} -> {}'.format( i, str(startindex), str(endindex), str(t.state_times))) continue log.debug('tkey' + str(tkey)) rows['trial'].append(tkey) # # Specific BehaviorTrial information for this trial # bkey = dict(tkey) bkey['task'] = 'audio delay' # hard-coded here bkey['task_protocol'] = 1 # hard-coded here # determine trial instruction trial_instruction = 'left' # hard-coded here if gui['Reversal'][0] == 1: if t.ttype == 1: trial_instruction = 'left' elif t.ttype == 0: trial_instruction = 'right' elif gui['Reversal'][0] == 2: if t.ttype == 1: trial_instruction = 'right' elif t.ttype == 0: trial_instruction = 'left' bkey['trial_instruction'] = trial_instruction # determine early lick early_lick = 'no early' if (protocol_type >= 5 and 'EarlyLickDelay' in states and np.any(t.state_data == states['EarlyLickDelay'])): early_lick = 'early' if (protocol_type >= 5 and ('EarlyLickSample' in states and np.any(t.state_data == states['EarlyLickSample']))): early_lick = 'early' bkey['early_lick'] = early_lick # determine outcome outcome = 'ignore' if ('Reward' in states and np.any(t.state_data == states['Reward'])): outcome = 'hit' elif ('TimeOut' in states and np.any(t.state_data == states['TimeOut'])): outcome = 'miss' elif ('NoResponse' in states and np.any(t.state_data == states['NoResponse'])): outcome = 'ignore' bkey['outcome'] = outcome # Determine free/autowater (Autowater 1 == enabled, 2 == disabled) bkey['auto_water'] = True if gui['Autowater'][0] == 1 else False bkey['free_water'] = t.free rows['behavior_trial'].append(bkey) # # Add 'protocol' note # nkey = dict(tkey) nkey['trial_note_type'] = 'protocol #' nkey['trial_note'] = str(protocol_type) rows['trial_note'].append(nkey) # # Add 'autolearn' note # nkey = dict(tkey) nkey['trial_note_type'] = 'autolearn' nkey['trial_note'] = str(gui['Autolearn'][0]) rows['trial_note'].append(nkey) # # Add 'bitcode' note # if 'randomID' in gui.dtype.names: nkey = dict(tkey) nkey['trial_note_type'] = 'bitcode' nkey['trial_note'] = str(gui['randomID'][0]) rows['trial_note'].append(nkey) # # Add presample event # log.debug('BehaviorIngest.make(): presample') ekey = dict(tkey) sampleindex = np.where(t.state_data == states['SamplePeriod'])[0] ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'presample' ekey['trial_event_time'] = t.state_times[startindex][0] ekey['duration'] = (t.state_times[sampleindex[0]] - t.state_times[startindex])[0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing presample duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trial rows['trial_event'].append(ekey) # # Add other 'sample' events # log.debug('BehaviorIngest.make(): sample events') last_dur = None for s in sampleindex: # in protocol > 6 ~-> n>1 # todo: batch events ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'sample' ekey['trial_event_time'] = t.state_times[s] ekey['duration'] = gui['SamplePeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning( '... trial {} bad duration, no last_edur'.format( i, last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.warning( '... trial {} duration using last_edur {}'.format( i, last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. rows['trial_event'].append(ekey) # # Add 'delay' events # log.debug('BehaviorIngest.make(): delay events') last_dur = None delayindex = np.where(t.state_data == states['DelayPeriod'])[0] for d in delayindex: # protocol > 6 ~-> n>1 ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'delay' ekey['trial_event_time'] = t.state_times[d] ekey['duration'] = gui['DelayPeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning('... {} bad duration, no last_edur'.format( i, last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.warning('... {} duration using last_edur {}'.format( i, last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. log.debug('delay event duration: {}'.format(ekey['duration'])) rows['trial_event'].append(ekey) # # Add 'go' event # log.debug('BehaviorIngest.make(): go') ekey = dict(tkey) responseindex = np.where(t.state_data == states['ResponseCue'])[0] ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'go' ekey['trial_event_time'] = t.state_times[responseindex][0] ekey['duration'] = gui['AnswerPeriod'][0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing go duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trials rows['corrected_trial_event'].append(ekey) rows['trial_event'].append(ekey) # # Add 'trialEnd' events # log.debug('BehaviorIngest.make(): trialend events') last_dur = None trialendindex = np.where(t.state_data == states['TrialEnd'])[0] ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'trialend' ekey['trial_event_time'] = t.state_times[trialendindex][0] ekey['duration'] = 0.0 rows['trial_event'].append(ekey) # # Add lick events # lickleft = np.where(t.event_data == 69)[0] log.debug('... lickleft: {r}'.format(r=str(lickleft))) action_event_count = len(rows['action_event']) if len(lickleft): [ rows['action_event'].append( dict(tkey, action_event_id=action_event_count + idx, action_event_type='left lick', action_event_time=t.event_times[l])) for idx, l in enumerate(lickleft) ] lickright = np.where(t.event_data == 71)[0] log.debug('... lickright: {r}'.format(r=str(lickright))) action_event_count = len(rows['action_event']) if len(lickright): [ rows['action_event'].append( dict(tkey, action_event_id=action_event_count + idx, action_event_type='right lick', action_event_time=t.event_times[r])) for idx, r in enumerate(lickright) ] # # Photostim Events # if t.stim: log.debug('BehaviorIngest.make(): t.stim == {}'.format(t.stim)) rows['photostim_trial'].append(tkey) delay_period_idx = np.where( t.state_data == states['DelayPeriod'])[0][0] rows['photostim_trial_event'].append( dict(tkey, photo_stim=t.stim, photostim_event_id=len(rows['photostim_trial_event']), photostim_event_time=t.state_times[delay_period_idx], power=5.5)) # end of trial loop. # Session Insertion log.info('BehaviorIngest.make(): adding session record') experiment.Session().insert1(skey) # Behavior Insertion log.info('BehaviorIngest.make(): bulk insert phase') log.info('BehaviorIngest.make(): saving ingest {d}'.format(d=key)) self.insert1(key, ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.Session.Trial') experiment.SessionTrial().insert(rows['trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.BehaviorTrial') experiment.BehaviorTrial().insert(rows['behavior_trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.TrialNote') experiment.TrialNote().insert(rows['trial_note'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.TrialEvent') experiment.TrialEvent().insert(rows['trial_event'], ignore_extra_fields=True, allow_direct_insert=True, skip_duplicates=True) log.info('BehaviorIngest.make(): ... CorrectedTrialEvents') BehaviorIngest().CorrectedTrialEvents().insert( rows['corrected_trial_event'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.ActionEvent') experiment.ActionEvent().insert(rows['action_event'], ignore_extra_fields=True, allow_direct_insert=True) # Photostim Insertion photostim_ids = np.unique( [r['photo_stim'] for r in rows['photostim_trial_event']]) unknown_photostims = np.setdiff1d(photostim_ids, list(photostims.keys())) if unknown_photostims: raise ValueError( 'Unknown photostim protocol: {}'.format(unknown_photostims)) if photostim_ids.size > 0: log.info('BehaviorIngest.make(): ... experiment.Photostim') for stim in photostim_ids: experiment.Photostim.insert1(dict(skey, **photostims[stim]), ignore_extra_fields=True) experiment.Photostim.PhotostimLocation.insert( (dict( skey, **loc, photo_stim=photostims[stim]['photo_stim']) for loc in photostims[stim]['locations']), ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.PhotostimTrial') experiment.PhotostimTrial.insert(rows['photostim_trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.PhotostimTrialEvent') experiment.PhotostimEvent.insert(rows['photostim_trial_event'], ignore_extra_fields=True, allow_direct_insert=True) # Behavior Ingest Insertion log.info('BehaviorIngest.make(): ... BehaviorIngest.BehaviorFile') BehaviorIngest.BehaviorFile().insert1(dict( key, behavior_file=os.path.basename(key['subpath'])), ignore_extra_fields=True, allow_direct_insert=True)
def mockdata(): print('populating with mock data') reload(lab) reload(ccf) reload(experiment) reload(ephys) reload(histology) reload(tracking) try: lab.Person().insert1({ 'username': '******', 'fullname': 'Unknown' }, skip_duplicates=True) lab.Person().insert1({ 'username': '******', 'fullname': 'Dave Liu' }, skip_duplicates=True) lab.Person().insert1({ 'username': '******', 'fullname': 'Susu Chen' }, skip_duplicates=True) lab.Person().insert1({ 'username': '******', 'fullname': 'Thinh Nguyen' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'VGAT-Chr2-EYFP Jax', 'gene_modification_description': 'VGAT' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'PV-ires-Cre X Ai32', 'gene_modification_description': 'PV' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'Rosa26 Cag lsl reachR-citrine 1A4 X PV-ires-Cre', 'gene_modification_description': 'reachR PV' }, skip_duplicates=True) # Subject 399752 / dl7 lab.Subject().insert1( { 'subject_id': 399752, 'username': '******', 'cage_number': 145375, 'date_of_birth': '2017-08-03', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 399752, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 399752, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-03', 'end_time': '2017-11-03', 'surgery_description': 'Headbar anterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 399752, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -4, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 399752, 'water_restriction_number': 'dl7', 'cage_number': 148861, 'wr_start_date': '2017-11-07', 'wr_start_weight': 25 }, skip_duplicates=True) # Subject 397853 / dl14 lab.Subject().insert1( { 'subject_id': 397853, 'username': '******', 'cage_number': 144545, 'date_of_birth': '2017-07-15', 'sex': 'M', 'animal_source': 'Allen Institute' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 397853, 'gene_modification': 'PV-ires-Cre X Ai32' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 397853, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-20', 'end_time': '2017-11-20', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 397853, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 397853, 'water_restriction_number': 'dl14', 'cage_number': 149595, 'wr_start_date': '2017-11-27', 'wr_start_weight': 24.1 }, skip_duplicates=True) # Subject 400480 / dl15 lab.Subject().insert1( { 'subject_id': 400480, 'username': '******', 'cage_number': 145700, 'date_of_birth': '2017-08-09', 'sex': 'M', 'animal_source': 'Allen Institute' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 400480, 'gene_modification': 'PV-ires-Cre X Ai32' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 400480, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-21', 'end_time': '2017-11-21', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 400480, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 400480, 'water_restriction_number': 'dl15', 'cage_number': 149598, 'wr_start_date': '2017-11-27', 'wr_start_weight': 27.6 }, skip_duplicates=True) # Subject 406680 / dl20 lab.Subject().insert1( { 'subject_id': 406680, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-06', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 406680, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 406680, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-04', 'end_time': '2018-01-04', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 406680, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 406680, 'water_restriction_number': 'dl20', 'cage_number': 151282, 'wr_start_date': '2018-01-10', 'wr_start_weight': 22.7 }, skip_duplicates=True) # Subject 408022 / dl21 lab.Subject().insert1( { 'subject_id': 408022, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-19', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 408022, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 408022, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-05', 'end_time': '2018-01-05', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 408022, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 408022, 'water_restriction_number': 'dl21', 'cage_number': 151283, 'wr_start_date': '2018-01-10', 'wr_start_weight': 21.1 }, skip_duplicates=True) # Subject 408021 / dl22 lab.Subject().insert1( { 'subject_id': 408021, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-19', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 408021, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 408021, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-15', 'end_time': '2018-01-15', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 408021, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 408021, 'water_restriction_number': 'dl22', 'cage_number': 151704, 'wr_start_date': '2018-01-19', 'wr_start_weight': 21 }, skip_duplicates=True) # Subject 407512 / dl24 lab.Subject().insert1( { 'subject_id': 407512, 'username': '******', 'cage_number': 151629, 'date_of_birth': '2017-10-13', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407512, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407512, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-16', 'end_time': '2018-01-16', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407512, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407512, 'water_restriction_number': 'dl24', 'cage_number': 151793, 'wr_start_date': '2018-01-22', 'wr_start_weight': 26 }, skip_duplicates=True) # 407513 / dl25 lab.Subject().insert1( { 'subject_id': 407513, 'username': '******', 'cage_number': 148636, 'date_of_birth': '2017-10-13', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407513, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407513, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-17', 'end_time': '2018-01-17', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407513, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407513, 'water_restriction_number': 'dl25', 'cage_number': 151794, 'wr_start_date': '2018-01-22', 'wr_start_weight': 25.5 }, skip_duplicates=True) # Subject 407986 / dl28 lab.Subject().insert1( { 'subject_id': 407986, 'username': '******', 'cage_number': 152268, 'date_of_birth': '2017-10-18', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407986, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407986, 'surgery_id': 1, 'username': '******', 'start_time': '2018-02-01', 'end_time': '2018-02-01', 'surgery_description': 'Headbar anterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407986, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -4, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407986, 'water_restriction_number': 'dl28', 'cage_number': 152312, 'wr_start_date': '2018-02-05', 'wr_start_weight': 19.8 }, skip_duplicates=True) # Subject 123457 / tw5 lab.Subject().insert1( { 'subject_id': 123457, 'username': '******', 'cage_number': 145375, 'date_of_birth': '2017-08-03', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 123457, 'water_restriction_number': 'tw5', 'cage_number': 148861, 'wr_start_date': '2017-11-07', 'wr_start_weight': 20.5 }, skip_duplicates=True) # Subject 412330 / tw34 lab.Subject().insert1( { 'subject_id': 412330, 'username': '******', 'cage_number': 154522, 'date_of_birth': '2017-12-05', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 412330, 'water_restriction_number': 'tw34', 'cage_number': 154522, 'wr_start_date': '2018-03-18', 'wr_start_weight': 21.0 }, skip_duplicates=True) # subject 432998 / dl55 lab.Subject().insert1( { 'subject_id': 432998, 'username': '******', 'cage_number': 160920, 'date_of_birth': '2018-07-02', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 432998, 'water_restriction_number': 'dl55', 'cage_number': 160920, 'wr_start_date': '2018-09-05', 'wr_start_weight': 21.0 }, skip_duplicates=True) # Subject 435884 / dl59 lab.Subject().insert1( { 'subject_id': 435884, 'username': '******', 'cage_number': 161908, 'date_of_birth': '2018-08-06', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 435884, 'water_restriction_number': 'dl59', 'cage_number': 154522, 'wr_start_date': '2018-09-30', 'wr_start_weight': 21.0 }, skip_duplicates=True) # Subject 432572 / dl56 lab.Subject().insert1( { 'subject_id': 432572, 'username': '******', 'cage_number': 161125, 'date_of_birth': '2018-06-28', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 432572, 'water_restriction_number': 'dl56', 'cage_number': 161125, 'wr_start_date': '2018-09-10', 'wr_start_weight': 21.0 }, skip_duplicates=True) # Subject 412753 / dl36 lab.Subject().insert1( { 'subject_id': 412753, 'username': '******', 'cage_number': 154570, 'date_of_birth': '2017-12-07', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 412753, 'water_restriction_number': 'dl36', 'cage_number': 154570, 'wr_start_date': '2017-03-30', 'wr_start_weight': 21.0 }, skip_duplicates=True) # Subject 440010 / dl62 lab.Subject().insert1( { 'subject_id': 440010, 'username': '******', 'cage_number': 163782, 'date_of_birth': '2018-09-24', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 440010, 'water_restriction_number': 'dl62', 'cage_number': 163782, 'wr_start_date': '2018-11-24', 'wr_start_weight': 23.0 }, skip_duplicates=True) # Subject 55004 / DL004 lab.Subject().insert1( { 'subject_id': 550004, 'username': '******', 'cage_number': 163782, 'date_of_birth': '2018-09-24', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 550004, 'water_restriction_number': 'DL004', 'cage_number': 163782, 'wr_start_date': '2018-11-24', 'wr_start_weight': 23.0 }, skip_duplicates=True) # Subject 550009 / DL009 lab.Subject().insert1( { 'subject_id': 550009, 'username': '******', 'cage_number': 163782, 'date_of_birth': '2018-09-24', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 550009, 'water_restriction_number': 'DL009', 'cage_number': 163782, 'wr_start_date': '2018-11-24', 'wr_start_weight': 23.0 }, skip_duplicates=True) # Subject 550010 / DL010 lab.Subject().insert1( { 'subject_id': 550010, 'username': '******', 'cage_number': 163782, 'date_of_birth': '2018-09-24', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 550010, 'water_restriction_number': 'DL010', 'cage_number': 163782, 'wr_start_date': '2018-11-24', 'wr_start_weight': 23.0 }, skip_duplicates=True) # Subject 550022 / DL022 lab.Subject().insert1( { 'subject_id': 550022, 'username': '******', 'cage_number': 163782, 'date_of_birth': '2018-09-24', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 550022, 'water_restriction_number': 'DL022', 'cage_number': 163782, 'wr_start_date': '2018-11-24', 'wr_start_weight': 23.0 }, skip_duplicates=True) # Subject 440959 / SC011 lab.Subject().insert1( { 'subject_id': 440959, 'username': '******', 'cage_number': 440959, 'date_of_birth': '2018-10-09', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 440959, 'water_restriction_number': 'SC011', 'cage_number': 440959, 'wr_start_date': '2018-12-21', 'wr_start_weight': 22.8 }, skip_duplicates=True) # Subject 442571 / SC022 lab.Subject().insert1( { 'subject_id': 442571, 'username': '******', 'cage_number': 442571, 'date_of_birth': '2018-10-29', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 442571, 'water_restriction_number': 'SC022', 'cage_number': 442571, 'wr_start_date': '2019-01-02', 'wr_start_weight': 26.5 }, skip_duplicates=True) # Subject 460432 / SC030 lab.Subject().insert1( { 'subject_id': 456773, 'username': '******', 'cage_number': 171857, 'date_of_birth': '2019-05-29', 'sex': 'M', 'animal_source': 'Jackson Labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 456773, 'water_restriction_number': 'SC030', 'cage_number': 171857, 'wr_start_date': '2019-08-08', 'wr_start_weight': 25.700 }, skip_duplicates=True) # Subject 460432 / SC032 lab.Subject().insert1( { 'subject_id': 460432, 'username': '******', 'cage_number': 173167, 'date_of_birth': '2019-07-15', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 460432, 'water_restriction_number': 'SC032', 'cage_number': 173167, 'wr_start_date': '2019-09-20', 'wr_start_weight': 22.8 }, skip_duplicates=True) for num in range(1, 20): lab.Subject().insert1( { 'subject_id': 777000 + num, 'username': '******', 'cage_number': 173167, 'date_of_birth': '2019-07-15', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 777000 + num, 'water_restriction_number': 'FOR' + f'{num:02}', 'cage_number': 173167, 'wr_start_date': '2019-09-20', 'wr_start_weight': 22.8 }, skip_duplicates=True) # Rig lab.Rig().insert1( { 'rig': 'TRig1', 'room': '2w.334', 'rig_description': 'Training rig 1' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig2', 'room': '2w.334', 'rig_description': 'Training rig 2' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig3', 'room': '2w.334', 'rig_description': 'Training rig 3' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'RRig', 'room': '2w.334', 'rig_description': 'Recording rig' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'RRig2', 'room': '2w.334', 'rig_description': 'Recording rig2' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'RRig3', 'room': '2w.334', 'rig_description': 'Recording rig3 for multi-target-licking' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'Ephys1', 'room': '2w.334', 'rig_description': 'Recording computer' }, skip_duplicates=True) lab.ProbeType.create_neuropixels_probe() except Exception as e: print("error creating mock data: {e}".format(e=e), file=sys.stderr) raise
def mockdata(): print('populating with mock data') reload(ccf) reload(lab) reload(experiment) reload(ephys) reload(publication) try: # TODO: these should be loaded in a more 'official' way lab.Person().insert1({ 'username': '******', 'fullname': 'Dave Liu' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'VGAT-Chr2-EYFP Jax', 'gene_modification_description': 'VGAT' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'PV-ires-Cre X Ai32', 'gene_modification_description': 'PV' }, skip_duplicates=True) lab.ModifiedGene().insert1( { 'gene_modification': 'Rosa26 Cag lsl reachR-citrine 1A4 X PV-ires-Cre', 'gene_modification_description': 'reachR PV' }, skip_duplicates=True) # Subject 399752 / dl7 lab.Subject().insert1( { 'subject_id': 399752, 'username': '******', 'cage_number': 145375, 'date_of_birth': '2017-08-03', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 399752, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 399752, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-03', 'end_time': '2017-11-03', 'surgery_description': 'Headbar anterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 399752, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -4, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 399752, 'water_restriction_number': 'dl7', 'cage_number': 148861, 'wr_start_date': '2017-11-07', 'wr_start_weight': 25 }, skip_duplicates=True) # Subject 397853 / dl14 lab.Subject().insert1( { 'subject_id': 397853, 'username': '******', 'cage_number': 144545, 'date_of_birth': '2017-07-15', 'sex': 'M', 'animal_source': 'Allen Institute' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 397853, 'gene_modification': 'PV-ires-Cre X Ai32' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 397853, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-20', 'end_time': '2017-11-20', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 397853, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 397853, 'water_restriction_number': 'dl14', 'cage_number': 149595, 'wr_start_date': '2017-11-27', 'wr_start_weight': 24.1 }, skip_duplicates=True) # Subject 400480 / dl15 lab.Subject().insert1( { 'subject_id': 400480, 'username': '******', 'cage_number': 145700, 'date_of_birth': '2017-08-09', 'sex': 'M', 'animal_source': 'Allen Institute' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 400480, 'gene_modification': 'PV-ires-Cre X Ai32' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 400480, 'surgery_id': 1, 'username': '******', 'start_time': '2017-11-21', 'end_time': '2017-11-21', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 400480, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 400480, 'water_restriction_number': 'dl15', 'cage_number': 149598, 'wr_start_date': '2017-11-27', 'wr_start_weight': 27.6 }, skip_duplicates=True) # Subject 406680 / dl20 lab.Subject().insert1( { 'subject_id': 406680, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-06', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 406680, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 406680, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-04', 'end_time': '2018-01-04', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 406680, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 406680, 'water_restriction_number': 'dl20', 'cage_number': 151282, 'wr_start_date': '2018-01-10', 'wr_start_weight': 22.7 }, skip_duplicates=True) # Subject 408022 / dl21 lab.Subject().insert1( { 'subject_id': 408022, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-19', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 408022, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 408022, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-05', 'end_time': '2018-01-05', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 408022, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 408022, 'water_restriction_number': 'dl21', 'cage_number': 151283, 'wr_start_date': '2018-01-10', 'wr_start_weight': 21.1 }, skip_duplicates=True) # Subject 408021 / dl22 lab.Subject().insert1( { 'subject_id': 408021, 'username': '******', 'cage_number': 148859, 'date_of_birth': '2017-10-19', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 408021, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 408021, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-15', 'end_time': '2018-01-15', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 408021, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 408021, 'water_restriction_number': 'dl22', 'cage_number': 151704, 'wr_start_date': '2018-01-19', 'wr_start_weight': 21 }, skip_duplicates=True) # Subject 407512 / dl24 lab.Subject().insert1( { 'subject_id': 407512, 'username': '******', 'cage_number': 151629, 'date_of_birth': '2017-10-13', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407512, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407512, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-16', 'end_time': '2018-01-16', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407512, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407512, 'water_restriction_number': 'dl24', 'cage_number': 151793, 'wr_start_date': '2018-01-22', 'wr_start_weight': 26 }, skip_duplicates=True) # 407513 / dl25 lab.Subject().insert1( { 'subject_id': 407513, 'username': '******', 'cage_number': 148636, 'date_of_birth': '2017-10-13', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407513, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407513, 'surgery_id': 1, 'username': '******', 'start_time': '2018-01-17', 'end_time': '2018-01-17', 'surgery_description': 'Headbar posterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407513, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -1.75, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407513, 'water_restriction_number': 'dl25', 'cage_number': 151794, 'wr_start_date': '2018-01-22', 'wr_start_weight': 25.5 }, skip_duplicates=True) # Subject 407986 / dl28 lab.Subject().insert1( { 'subject_id': 407986, 'username': '******', 'cage_number': 152268, 'date_of_birth': '2017-10-18', 'sex': 'F', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.Subject.GeneModification().insert1( { 'subject_id': 407986, 'gene_modification': 'VGAT-Chr2-EYFP Jax' }, skip_duplicates=True) lab.Surgery().insert1( { 'subject_id': 407986, 'surgery_id': 1, 'username': '******', 'start_time': '2018-02-01', 'end_time': '2018-02-01', 'surgery_description': 'Headbar anterior' }, skip_duplicates=True) lab.Surgery.Procedure().insert1( { 'subject_id': 407986, 'surgery_id': 1, 'procedure_id': 1, 'skull_reference': 'Bregma', 'ml_location': 0, 'ap_location': -4, 'surgery_procedure_description': 'Fiducial marker' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 407986, 'water_restriction_number': 'dl28', 'cage_number': 152312, 'wr_start_date': '2018-02-05', 'wr_start_weight': 19.8 }, skip_duplicates=True) # Subject 123457 / tw5 lab.Subject().insert1( { 'subject_id': 123457, 'username': '******', 'cage_number': 145375, 'date_of_birth': '2017-08-03', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 123457, 'water_restriction_number': 'tw5', 'cage_number': 148861, 'wr_start_date': '2017-11-07', 'wr_start_weight': 20.5 }, skip_duplicates=True) # Subject 412330 / tw34 lab.Subject().insert1( { 'subject_id': 412330, 'username': '******', 'cage_number': 154522, 'date_of_birth': '2017-12-05', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 412330, 'water_restriction_number': 'tw34', 'cage_number': 154522, 'wr_start_date': '2018-03-18', 'wr_start_weight': 21.0 }, skip_duplicates=True) # subject 432998 / dl55 lab.Subject().insert1( { 'subject_id': 432998, 'username': '******', 'cage_number': 160920, 'date_of_birth': '2018-07-02', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 432998, 'water_restriction_number': 'dl55', 'cage_number': 160920, 'wr_start_date': '2018-09-05', 'wr_start_weight': 21.0 }, skip_duplicates=True) # Subject 435884 / dl59 lab.Subject().insert1( { 'subject_id': 435884, 'username': '******', 'cage_number': 161908, 'date_of_birth': '2018-08-06', 'sex': 'M', 'animal_source': 'Jackson labs' }, skip_duplicates=True) lab.WaterRestriction().insert1( { 'subject_id': 435884, 'water_restriction_number': 'dl59', 'cage_number': 154522, 'wr_start_date': '2018-09-30', 'wr_start_weight': 21.0 }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig1', 'room': '2w.334', 'rig_description': 'Training rig 1' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig2', 'room': '2w.334', 'rig_description': 'Training rig 2' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'TRig3', 'room': '2w.334', 'rig_description': 'Training rig 3' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'RRig', 'room': '2w.334', 'rig_description': 'Recording rig' }, skip_duplicates=True) lab.Rig().insert1( { 'rig': 'Ephys1', 'room': '2w.334', 'rig_description': 'Recording computer' }, skip_duplicates=True) # ---- Brain Location ---- experiment.BrainLocation.insert1( { 'brain_location_name': 'left_alm', 'brain_area': 'ALM', 'hemisphere': 'left', 'skull_reference': 'Bregma' }, skip_duplicates=True) experiment.BrainLocation.insert1( { 'brain_location_name': 'right_alm', 'brain_area': 'ALM', 'hemisphere': 'right', 'skull_reference': 'Bregma' }, skip_duplicates=True) experiment.BrainLocation.insert1( { 'brain_location_name': 'both_alm', 'brain_area': 'ALM', 'hemisphere': 'both', 'skull_reference': 'Bregma' }, skip_duplicates=True) experiment.BrainLocation.insert1( { 'brain_location_name': 'left_medulla', 'brain_area': 'Medulla', 'hemisphere': 'left', 'skull_reference': 'Bregma' }, skip_duplicates=True) experiment.BrainLocation.insert1( { 'brain_location_name': 'right_medulla', 'brain_area': 'Medulla', 'hemisphere': 'right', 'skull_reference': 'Bregma' }, skip_duplicates=True) experiment.BrainLocation.insert1( { 'brain_location_name': 'both_medulla', 'brain_area': 'Medulla', 'hemisphere': 'both', 'skull_reference': 'Bregma' }, skip_duplicates=True) # Probe (Neuropixel) npx_probe_model = '15131808323' # using Model No. - SN TBD? lab.Probe.insert1( { 'probe': npx_probe_model, 'probe_type': 'neuropixel' }, skip_duplicates=True, ) lab.Probe.Electrode.insert( ({ 'probe': npx_probe_model, 'electrode': x } for x in range(1, 961)), skip_duplicates=True, ) except Exception as e: print("error creating mock data: {e}".format(e=e), file=sys.stderr) raise
def populatebehavior_core(IDs = None): if IDs: print('subject started:') print(IDs.keys()) print(IDs.values()) rigpath_1 = 'E:/Projects/Ablation/datajoint/Behavior' #df_surgery = pd.read_csv(dj.config['locations.metadata']+'Surgery.csv') if IDs == None: IDs = {k: v for k, v in zip(*lab.WaterRestriction().fetch('water_restriction_number', 'subject_id'))} for subject_now,subject_id_now in zip(IDs.keys(),IDs.values()): # iterating over subjects print('subject: ',subject_now) # ============================================================================= # if drop_last_session_for_mice_in_training: # delete_last_session_before_upload = True # else: # delete_last_session_before_upload = False # #df_wr = online_notebook.fetch_water_restriction_metadata(subject_now) # ============================================================================= try: df_wr = pd.read_csv(dj.config['locations.metadata_behavior']+subject_now+'.csv') except: print(subject_now + ' has no metadata available') df_wr = pd.DataFrame() for df_wr_row in df_wr.iterrows(): date_now = df_wr_row[1].Date.replace('-','') print('subject: ',subject_now,' date: ',date_now) session_date = datetime(int(date_now[0:4]),int(date_now[4:6]),int(date_now[6:8])) if len(experiment.Session() & 'subject_id = "'+str(subject_id_now)+'"' & 'session_date > "'+str(session_date)+'"') != 0: # if it is not the last print('session already imported, skipping: ' + str(session_date)) dotheupload = False elif len(experiment.Session() & 'subject_id = "'+str(subject_id_now)+'"' & 'session_date = "'+str(session_date)+'"') != 0: # if it is the last dotheupload = False else: # reuploading new session that is not present on the server dotheupload = True # if dotheupload is True, meaning that there are new mat file hasn't been uploaded # => needs to find which mat file hasn't been uploaded if dotheupload: found = set() rigpath_2 = subject_now rigpath_3 = rigpath_1 + '/' + rigpath_2 rigpath = pathlib.Path(rigpath_3) def buildrec(rigpath, root, f): try: fullpath = pathlib.Path(root, f) subpath = fullpath.relative_to(rigpath) fsplit = subpath.stem.split('_') h2o = fsplit[0] ymd = fsplit[-2:-1][0] animal = IDs[h2o] if ymd == date_now: return { 'subject_id': animal, 'session_date': date(int(ymd[0:4]), int(ymd[4:6]), int(ymd[6:8])), 'rig_data_path': rigpath.as_posix(), 'subpath': subpath.as_posix(), } except: pass for root, dirs, files in os.walk(rigpath): for f in files: r = buildrec(rigpath, root, f) if r: found.add(r['subpath']) file = r # now start insert data path = pathlib.Path(file['rig_data_path'], file['subpath']) mat = spio.loadmat(path, squeeze_me=True) SessionData = mat['SessionData'].flatten() # session record key skey = {} skey['subject_id'] = file['subject_id'] skey['session_date'] = file['session_date'] skey['username'] = '******' #skey['rig'] = key['rig'] trial = namedtuple( # simple structure to track per-trial vars 'trial', ('ttype', 'settings', 'state_times', 'state_names', 'state_data', 'event_data', 'event_times', 'trial_start')) # parse session datetime session_datetime_str = str('').join((str(SessionData['Info'][0]['SessionDate']),' ', str(SessionData['Info'][0]['SessionStartTime_UTC']))) session_datetime = datetime.strptime(session_datetime_str, '%d-%b-%Y %H:%M:%S') AllTrialTypes = SessionData['TrialTypes'][0] AllTrialSettings = SessionData['TrialSettings'][0] AllTrialStarts = SessionData['TrialStartTimestamp'][0] AllTrialStarts = AllTrialStarts - AllTrialStarts[0] RawData = SessionData['RawData'][0].flatten() AllStateNames = RawData['OriginalStateNamesByNumber'][0] AllStateData = RawData['OriginalStateData'][0] AllEventData = RawData['OriginalEventData'][0] AllStateTimestamps = RawData['OriginalStateTimestamps'][0] AllEventTimestamps = RawData['OriginalEventTimestamps'][0] trials = list(zip(AllTrialTypes, AllTrialSettings, AllStateTimestamps, AllStateNames, AllStateData, AllEventData, AllEventTimestamps, AllTrialStarts)) if not trials: log.warning('skipping date {d}, no valid files'.format(d=date)) return # # Trial data seems valid; synthesize session id & add session record # XXX: note - later breaks can result in Sessions without valid trials # assert skey['session_date'] == session_datetime.date() skey['session_date'] = session_datetime.date() #skey['session_time'] = session_datetime.time() if len(experiment.Session() & 'subject_id = "'+str(file['subject_id'])+'"' & 'session_date = "'+str(file['session_date'])+'"') == 0: if len(experiment.Session() & 'subject_id = "'+str(file['subject_id'])+'"') == 0: skey['session'] = 1 else: skey['session'] = len((experiment.Session() & 'subject_id = "'+str(file['subject_id'])+'"').fetch()['session']) + 1 # # Actually load the per-trial data # log.info('BehaviorIngest.make(): trial parsing phase') # lists of various records for batch-insert rows = {k: list() for k in ('trial', 'behavior_trial', 'trial_note', 'trial_event', 'corrected_trial_event', 'action_event')} #, 'photostim', #'photostim_location', 'photostim_trial', #'photostim_trial_event')} i = 0 # trial numbering starts at 1 for t in trials: t = trial(*t) # convert list of items to a 'trial' structure i += 1 # increment trial counter log.debug('BehaviorIngest.make(): parsing trial {i}'.format(i=i)) states = {k: (v+1) for v, k in enumerate(t.state_names)} required_states = ('PreSamplePeriod', 'SamplePeriod', 'DelayPeriod', 'ResponseCue', 'StopLicking', 'TrialEnd') missing = list(k for k in required_states if k not in states) if len(missing) and missing =='PreSamplePeriod': log.warning('skipping trial {i}; missing {m}'.format(i=i, m=missing)) continue gui = t.settings['GUI'].flatten() if len(experiment.Session() & 'subject_id = "'+str(file['subject_id'])+'"' & 'session_date = "'+str(file['session_date'])+'"') == 0: if len(experiment.Session() & 'subject_id = "'+str(file['subject_id'])+'"') == 0: skey['session'] = 1 else: skey['session'] = len((experiment.Session() & 'subject_id = "'+str(file['subject_id'])+'"').fetch()['session']) + 1 # # Top-level 'Trial' record # protocol_type = gui['ProtocolType'][0] tkey = dict(skey) has_presample = 1 try: startindex = np.where(t.state_data == states['PreSamplePeriod'])[0] has_presample = 1 except: startindex = np.where(t.state_data == states['SamplePeriod'])[0] has_presample = 0 # should be only end of 1st StopLicking; # rest of data is irrelevant w/r/t separately ingested ephys endindex = np.where(t.state_data == states['StopLicking'])[0] log.debug('states\n' + str(states)) log.debug('state_data\n' + str(t.state_data)) log.debug('startindex\n' + str(startindex)) log.debug('endindex\n' + str(endindex)) if not(len(startindex) and len(endindex)): log.warning('skipping {}: start/end mismatch: {}/{}'.format(i, str(startindex), str(endindex))) continue try: tkey['trial'] = i tkey['trial_uid'] = i tkey['trial_start_time'] = t.trial_start tkey['trial_stop_time'] = t.trial_start + t.state_times[endindex][0] except IndexError: log.warning('skipping {}: IndexError: {}/{} -> {}'.format(i, str(startindex), str(endindex), str(t.state_times))) continue log.debug('tkey' + str(tkey)) rows['trial'].append(tkey) # # Specific BehaviorTrial information for this trial # bkey = dict(tkey) bkey['task'] = 'audio delay' # hard-coded here bkey['task_protocol'] = 1 # hard-coded here # determine trial instruction trial_instruction = 'left' # hard-coded here if gui['Reversal'][0] == 1: if t.ttype == 1: trial_instruction = 'left' elif t.ttype == 0: trial_instruction = 'right' elif t.ttype == 2: trial_instruction = 'catch_right_autowater' elif t.ttype == 3: trial_instruction = 'catch_left_autowater' elif t.ttype == 4: trial_instruction = 'catch_right_noDelay' elif t.ttype == 5: trial_instruction = 'catch_left_noDelay' elif gui['Reversal'][0] == 2: if t.ttype == 1: trial_instruction = 'right' elif t.ttype == 0: trial_instruction = 'left' elif t.ttype == 2: trial_instruction = 'catch_left_autowater' elif t.ttype == 3: trial_instruction = 'catch_right_autowater' elif t.ttype == 4: trial_instruction = 'catch_left_noDelay' elif t.ttype == 5: trial_instruction = 'catch_right_noDelay' bkey['trial_instruction'] = trial_instruction # determine early lick early_lick = 'no early' if (protocol_type >= 5 and 'EarlyLickDelay' in states and np.any(t.state_data == states['EarlyLickDelay'])): early_lick = 'early' if (protocol_type >= 5 and ('EarlyLickSample' in states and np.any(t.state_data == states['EarlyLickSample']))): early_lick = 'early' bkey['early_lick'] = early_lick # determine outcome outcome = 'ignore' if ('Reward' in states and np.any(t.state_data == states['Reward'])): outcome = 'hit' elif ('TimeOut' in states and np.any(t.state_data == states['TimeOut'])): outcome = 'miss' elif ('NoResponse' in states and np.any(t.state_data == states['NoResponse'])): outcome = 'ignore' bkey['outcome'] = outcome rows['behavior_trial'].append(bkey) # # Add 'protocol' note # nkey = dict(tkey) nkey['trial_note_type'] = 'protocol #' nkey['trial_note'] = str(protocol_type) rows['trial_note'].append(nkey) # # Add 'autolearn' note # nkey = dict(tkey) nkey['trial_note_type'] = 'autolearn' nkey['trial_note'] = str(gui['Autolearn'][0]) rows['trial_note'].append(nkey) # # Add 'bitcode' note # if 'randomID' in gui.dtype.names: nkey = dict(tkey) nkey['trial_note_type'] = 'bitcode' nkey['trial_note'] = str(gui['randomID'][0]) rows['trial_note'].append(nkey) # # Add presample event # sampleindex = np.where(t.state_data == states['SamplePeriod'])[0] if has_presample == 1: log.debug('BehaviorIngest.make(): presample') ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'presample' ekey['trial_event_time'] = t.state_times[startindex][0] ekey['duration'] = (t.state_times[sampleindex[0]]- t.state_times[startindex])[0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing presample duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trial rows['trial_event'].append(ekey) # # Add other 'sample' events # log.debug('BehaviorIngest.make(): sample events') last_dur = None for s in sampleindex: # in protocol > 6 ~-> n>1 # todo: batch events ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'sample' ekey['trial_event_time'] = t.state_times[s] ekey['duration'] = gui['SamplePeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning('... trial {} bad duration, no last_edur'.format(i, last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.warning('... trial {} duration using last_edur {}'.format(i, last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. rows['trial_event'].append(ekey) # # Add 'delay' events # log.debug('BehaviorIngest.make(): delay events') last_dur = None delayindex = np.where(t.state_data == states['DelayPeriod'])[0] for d in delayindex: # protocol > 6 ~-> n>1 ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'delay' ekey['trial_event_time'] = t.state_times[d] ekey['duration'] = gui['DelayPeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning('... {} bad duration, no last_edur'.format(i, last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.warning('... {} duration using last_edur {}'.format(i, last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. log.debug('delay event duration: {}'.format(ekey['duration'])) rows['trial_event'].append(ekey) # # Add 'go' event # log.debug('BehaviorIngest.make(): go') ekey = dict(tkey) responseindex = np.where(t.state_data == states['ResponseCue'])[0] ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'go' ekey['trial_event_time'] = t.state_times[responseindex][0] ekey['duration'] = gui['AnswerPeriod'][0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing go duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trials rows['corrected_trial_event'].append(ekey) rows['trial_event'].append(ekey) # # Add 'trialEnd' events # log.debug('BehaviorIngest.make(): trialend events') last_dur = None trialendindex = np.where(t.state_data == states['TrialEnd'])[0] ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'trialend' ekey['trial_event_time'] = t.state_times[trialendindex][0] ekey['duration'] = 0.0 rows['trial_event'].append(ekey) # # Add lick events # lickleft = np.where(t.event_data == 69)[0] log.debug('... lickleft: {r}'.format(r=str(lickleft))) action_event_count = len(rows['action_event']) if len(lickleft): [rows['action_event'].append( dict(tkey, action_event_id=action_event_count+idx, action_event_type='left lick', action_event_time=t.event_times[l])) for idx, l in enumerate(lickleft)] lickright = np.where(t.event_data == 71)[0] log.debug('... lickright: {r}'.format(r=str(lickright))) action_event_count = len(rows['action_event']) if len(lickright): [rows['action_event'].append( dict(tkey, action_event_id=action_event_count+idx, action_event_type='right lick', action_event_time=t.event_times[r])) for idx, r in enumerate(lickright)] # end of trial loop.. # Session Insertion log.info('BehaviorIngest.make(): adding session record') skey['session_date'] = df_wr_row[1].Date skey['rig'] = 'Old Recording rig' skey['username'] = '******' experiment.Session().insert1(skey,skip_duplicates=True) # Behavior Insertion log.info('BehaviorIngest.make(): ... experiment.Session.Trial') experiment.SessionTrial().insert( rows['trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.BehaviorTrial') experiment.BehaviorTrial().insert( rows['behavior_trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.TrialNote') experiment.TrialNote().insert( rows['trial_note'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.TrialEvent') experiment.TrialEvent().insert( rows['trial_event'], ignore_extra_fields=True, allow_direct_insert=True, skip_duplicates=True) # log.info('BehaviorIngest.make(): ... CorrectedTrialEvents') # BehaviorIngest().CorrectedTrialEvents().insert( # rows['corrected_trial_event'], ignore_extra_fields=True, # allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.ActionEvent') experiment.ActionEvent().insert( rows['action_event'], ignore_extra_fields=True, allow_direct_insert=True) #%% for ingest tracking if IDs: print('subject started:') print(IDs.keys()) print(IDs.values()) rigpath_tracking_1 = 'E:/Projects/Ablation/datajoint/video/' rigpath_tracking_2 = subject_now VideoDate1 = str(df_wr_row[1].VideoDate) if len(VideoDate1)==5: VideoDate = '0'+ VideoDate1 elif len(VideoDate1)==7: VideoDate = '0'+ VideoDate1 rigpath_tracking_3 = rigpath_tracking_1 + rigpath_tracking_2 + '/' + rigpath_tracking_2 + '_'+ VideoDate + '_front' rigpath_tracking = pathlib.Path(rigpath_tracking_3) #df_surgery = pd.read_csv(dj.config['locations.metadata']+'Surgery.csv') if IDs == None: IDs = {k: v for k, v in zip(*lab.WaterRestriction().fetch('water_restriction_number', 'subject_id'))} h2o = subject_now session = df_wr_row[1].Date trials = (experiment.SessionTrial() & session).fetch('trial') log.info('got session: {} ({} trials)'.format(session, len(trials))) #sdate = session['session_date'] #sdate_sml = date_now #"{}{:02d}{:02d}".format(sdate.year, sdate.month, sdate.day) paths = rigpath_tracking devices = tracking.TrackingDevice().fetch(as_dict=True) # paths like: <root>/<h2o>/YYYY-MM-DD/tracking tracking_files = [] for d in (d for d in devices): tdev = d['tracking_device'] tpos = d['tracking_position'] tdat = paths log.info('checking {} for tracking data'.format(tdat)) # if not tpath.exists(): # log.warning('tracking path {} n/a - skipping'.format(tpath)) # continue # # camtrial = '{}_{}_{}.txt'.format(h2o, sdate_sml, tpos) # campath = tpath / camtrial # # log.info('trying camera position trial map: {}'.format(campath)) # # if not campath.exists(): # log.info('skipping {} - does not exist'.format(campath)) # continue # # tmap = load_campath(campath) # file:trial # n_tmap = len(tmap) # log.info('loading tracking data for {} trials'.format(n_tmap)) i = 0 VideoTrialNum = df_wr_row[1].VideoTrialNum #tpath = pathlib.Path(tdat, h2o, VideoDate, 'tracking') ppp = list(range(0,VideoTrialNum)) for tt in reversed(range(VideoTrialNum)): # load tracking for trial i += 1 # if i % 50 == 0: # log.info('item {}/{}, trial #{} ({:.2f}%)' # .format(i, n_tmap, t, (i/n_tmap)*100)) # else: # log.debug('item {}/{}, trial #{} ({:.2f}%)' # .format(i, n_tmap, t, (i/n_tmap)*100)) # ex: dl59_side_1-0000.csv / h2o_position_tn-0000.csv tfile = '{}_{}_{}_{}-*.csv'.format(h2o, VideoDate ,tpos, tt) tfull = list(tdat.glob(tfile)) if not tfull or len(tfull) > 1: log.info('file mismatch: file: {} trial: ({})'.format( tt, tfull)) continue tfull = tfull[-1] trk = load_tracking(tfull) recs = {} #key_source = experiment.Session - tracking.Tracking rec_base = dict(trial=ppp[tt], tracking_device=tdev) #print(rec_base) for k in trk: if k == 'samples': recs['tracking'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **rec_base, 'tracking_samples': len(trk['samples']['ts']), } else: rec = dict(rec_base) for attr in trk[k]: rec_key = '{}_{}'.format(k, attr) rec[rec_key] = np.array(trk[k][attr]) recs[k] = rec tracking.Tracking.insert1( recs['tracking'], allow_direct_insert=True) #if len(recs['nose']) > 3000: #continue recs['nose'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['nose'], } #print(recs['nose']['nose_x']) if 'nose' in recs: tracking.Tracking.NoseTracking.insert1( recs['nose'], allow_direct_insert=True) recs['tongue_mid'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['tongue_mid'], } if 'tongue_mid' in recs: tracking.Tracking.TongueTracking.insert1( recs['tongue_mid'], allow_direct_insert=True) recs['jaw'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['jaw'], } if 'jaw' in recs: tracking.Tracking.JawTracking.insert1( recs['jaw'], allow_direct_insert=True) recs['tongue_left'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['tongue_left'], } if 'tongue_left' in recs: tracking.Tracking.LeftTongueTracking.insert1( recs['tongue_left'], allow_direct_insert=True) recs['tongue_right'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['tongue_right'], } if 'tongue_right' in recs: tracking.Tracking.RightTongueTracking.insert1( recs['tongue_right'], allow_direct_insert=True) # fmap = {'paw_left_x': 'left_paw_x', # remap field names # 'paw_left_y': 'left_paw_y', # 'paw_left_likelihood': 'left_paw_likelihood'} # tracking.Tracking.LeftPawTracking.insert1({ # **{k: v for k, v in recs['paw_left'].items() # if k not in fmap}, # **{fmap[k]: v for k, v in recs['paw_left'].items() # if k in fmap}}, allow_direct_insert=True) recs['right_lickport'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['right_lickport'], } if 'right_lickport' in recs: tracking.Tracking.RightLickPortTracking.insert1( recs['right_lickport'], allow_direct_insert=True) # fmap = {'paw_right_x': 'right_paw_x', # remap field names # 'paw_right_y': 'right_paw_y', # 'paw_right_likelihood': 'right_paw_likelihood'} # # tracking.Tracking.RightPawTracking.insert1({ # **{k: v for k, v in recs['paw_right'].items() # if k not in fmap}, # **{fmap[k]: v for k, v in recs['paw_right'].items() # if k in fmap}}, allow_direct_insert=True) recs['left_lickport'] = { 'subject_id' : skey['subject_id'], 'session' : skey['session'], **recs['left_lickport'], } if 'left_lickport' in recs: tracking.Tracking.LeftLickPortTracking.insert1( recs['left_lickport'], allow_direct_insert=True) # tracking_files.append({**key, 'trial': tmap[t], 'tracking_device': tdev, # 'tracking_file': str(tfull.relative_to(tdat))}) # # log.info('... completed {}/{} items.'.format(i, n_tmap)) # # self.insert1(key) # self.TrackingFile.insert(tracking_files) # tracking.VideoFiducialsTrial.populate() bottom_tongue.Camera_pixels.populate() print('start!') bottom_tongue.VideoTongueTrial.populate() sessiontrialdata={ 'subject_id':skey['subject_id'], 'session':skey['session'], 'trial': tt } if len(bottom_tongue.VideoTongueTrial* experiment.Session & experiment.BehaviorTrial & 'session_date = "'+str(file['session_date'])+'"' &{'trial':tt})==0: print('trial couldn''t be exported, deleting trial') print(tt) dj.config['safemode'] = False (experiment.SessionTrial()&sessiontrialdata).delete() dj.config['safemode'] = True log.info('... done.')
def make(self, key): water_res_num, sess_date = get_wr_sessdate(key) sess_dir = store_stage / water_res_num / sess_date sess_dir.mkdir(parents=True, exist_ok=True) # -- Plotting -- fig = plt.figure(figsize=(20, 12)) fig.suptitle(f'{water_res_num}, session {key["session"]}') gs = GridSpec(5, 1, wspace=0.4, hspace=0.4, bottom=0.07, top=0.95, left=0.1, right=0.9) ax1 = fig.add_subplot(gs[0:3, :]) ax2 = fig.add_subplot(gs[3, :]) ax3 = fig.add_subplot(gs[4, :]) ax1.get_shared_x_axes().join(ax1, ax2, ax3) # Plot settings plot_setting = {'left lick': 'red', 'right lick': 'blue'} # -- Get event times -- key_subject_id_session = ( experiment.Session() & (lab.WaterRestriction() & 'water_restriction_number="{}"'.format(water_res_num)) & 'session="{}"'.format(key['session'])).fetch1("KEY") go_cue_times = (experiment.TrialEvent() & key_subject_id_session & 'trial_event_type="go"').fetch( 'trial_event_time', order_by='trial').astype(float) lick_times = pd.DataFrame( (experiment.ActionEvent() & key_subject_id_session).fetch(order_by='trial')) trial_num = len(go_cue_times) all_trial_num = np.arange(1, trial_num + 1).tolist() all_trial_start = [[-x] for x in go_cue_times] all_lick = dict() for event_type in plot_setting: all_lick[event_type] = [] for i, trial_start in enumerate(all_trial_start): all_lick[event_type].append( (lick_times[(lick_times['trial'] == i + 1) & (lick_times['action_event_type'] == event_type )]['action_event_time'].values.astype(float) + trial_start).tolist()) # -- All licking events (Ordered by trials) -- ax1.plot([0, 0], [0, trial_num], 'k', lw=0.5) # Aligned by go cue ax1.set(ylabel='Trial number', xlim=(-3, 3), xticks=[]) # Batch plotting to speed up ax1.eventplot(lineoffsets=all_trial_num, positions=all_trial_start, color='k') # Aligned by go cue for event_type in plot_setting: ax1.eventplot(lineoffsets=all_trial_num, positions=all_lick[event_type], color=plot_setting[event_type], linewidth=2) # Trial start # -- Histogram of all licks -- for event_type in plot_setting: sns.histplot(np.hstack(all_lick[event_type]), binwidth=0.01, alpha=0.5, ax=ax2, color=plot_setting[event_type], label=event_type) # 10-ms window ymax_tmp = max(ax2.get_ylim()) sns.histplot(-go_cue_times, binwidth=0.01, color='k', ax=ax2, label='trial start') # 10-ms window ax2.axvline(x=0, color='k', lw=0.5) ax2.set(ylim=(0, ymax_tmp), xticks=[], title='All events') # Fix the ylim of left and right licks ax2.legend() # -- Histogram of reaction time (first lick after go cue) -- plot_setting = {'LEFT': 'red', 'RIGHT': 'blue'} for water_port in plot_setting: this_RT = (foraging_analysis.TrialStats() & key_subject_id_session & (experiment.WaterPortChoice() & 'water_port="{}"'.format(water_port)) ).fetch('reaction_time').astype(float) sns.histplot(this_RT, binwidth=0.01, alpha=0.5, ax=ax3, color=plot_setting[water_port], label=water_port) # 10-ms window ax3.axvline(x=0, color='k', lw=0.5) ax3.set(xlabel='Time to Go Cue (s)', title='First lick (reaction time)' ) # Fix the ylim of left and right licks ax3.legend() # ---- Save fig and insert ---- fn_prefix = f'{water_res_num}_{sess_date}_' fig_dict = save_figs((fig, ), ('session_foraging_licking_psth', ), sess_dir, fn_prefix) plt.close('all') self.insert1({**key, **fig_dict})
def populatemetadata(): #%% save metadata from google drive if necessairy lastmodify = online_notebook.fetch_lastmodify_time_animal_metadata() #print(lastmodify) with open(dj.config['locations.metadata_behavior'] + 'last_modify_time.json') as timedata: lastmodify_prev = json.loads(timedata.read()) if lastmodify != lastmodify_prev: print('updating surgery and WR metadata from google drive') dj.config['locations.metadata_behavior'] df_surgery = online_notebook.fetch_animal_metadata() df_surgery.to_csv(dj.config['locations.metadata_behavior'] + 'Surgery.csv') IDs = df_surgery['ID'].tolist() for ID in IDs: df_wr = online_notebook.fetch_water_restriction_metadata(ID) if type(df_wr) == pd.DataFrame: df_wr.to_csv(dj.config['locations.metadata_behavior'] + ID + '.csv') with open( dj.config['locations.metadata_behavior'] + 'last_modify_time.json', "w") as write_file: json.dump(lastmodify, write_file) print('surgery and WR metadata updated') lastmodify = online_notebook.fetch_lastmodify_time_lab_metadata() with open(dj.config['locations.metadata_lab'] + 'last_modify_time.json') as timedata: lastmodify_prev = json.loads(timedata.read()) if lastmodify != lastmodify_prev: print('updating Lab metadata from google drive') dj.config['locations.metadata_lab'] IDs = ['Experimenter', 'Rig'] for ID in IDs: df_wr = online_notebook.fetch_lab_metadata(ID) if type(df_wr) == pd.DataFrame: df_wr.to_csv(dj.config['locations.metadata_lab'] + ID + '.csv') with open( dj.config['locations.metadata_lab'] + 'last_modify_time.json', "w") as write_file: json.dump(lastmodify, write_file) print('Lab metadata updated') #%% add users df_experimenters = pd.read_csv(dj.config['locations.metadata_lab'] + 'Experimenter.csv') experimenterdata = list() for experimenter in df_experimenters.iterrows(): experimenter = experimenter[1] dictnow = { 'username': experimenter['username'], 'fullname': experimenter['fullname'] } experimenterdata.append(dictnow) print('adding experimenters') for experimenternow in experimenterdata: try: lab.Person().insert1(experimenternow) except: # dj.DuplicateError: print('duplicate. experimenter: ', experimenternow['username'], ' already exists') #%% add rigs df_rigs = pd.read_csv(dj.config['locations.metadata_lab'] + 'Rig.csv') rigdata = list() for rig in df_rigs.iterrows(): rig = rig[1] dictnow = { 'rig': rig['rig'], 'room': rig['room'], 'rig_description': rig['rig_description'] } rigdata.append(dictnow) print('adding rigs') for rignow in rigdata: try: lab.Rig().insert1(rignow) except dj.errors.DuplicateError: print('duplicate. rig: ', rignow['rig'], ' already exists') #%% populate subjects, surgeries and water restrictions print('adding surgeries and stuff') df_surgery = pd.read_csv(dj.config['locations.metadata_behavior'] + 'Surgery.csv') #%% for item in df_surgery.iterrows(): if item[1]['status'] == 'experiment': subjectdata = { 'subject_id': int(item[1]['animal#']), 'username': item[1]['experimenter'], 'cage_number': item[1]['cage#'], 'date_of_birth': item[1]['DOB'], 'sex': item[1]['sex'], 'animal_source': None, } #print(subjectdata) try: lab.Subject.insert1(subjectdata) except dj.errors.DuplicateError: print('duplicate. animal :', item[1]['animal#'], ' already exists') surgeryidx = 1 #while 'surgery date ('+str(surgeryidx)+')' in item[1].keys() and item[1]['surgery date ('+str(surgeryidx)+')'] and type(item[1]['surgery date ('+str(surgeryidx)+')']) == str: # start_time = datetime.strptime(item[1]['surgery date ('+str(surgeryidx)+')']+' '+item[1]['surgery time ('+str(surgeryidx)+')'],'%Y-%m-%d %H:%M') # end_time = start_time + timedelta(minutes = int(item[1]['surgery length (min) ('+str(surgeryidx)+')'])) surgerydata = { 'surgery_id': surgeryidx, 'subject_id': item[1]['animal#'], 'username': item[1]['experimenter'], 'surgery_description': 'lesion', } # 'subject_id':int(item[1]['animal#']), # 'username': item[1]['experimenter'], # 'brain_area': item[1]['BrainArea'], # 'hemisphere': item[1]['Hemisphere'], try: lab.Surgery.insert1(surgerydata) except dj.errors.DuplicateError: print('duplicate. surgery for animal ', item[1]['animal#'], ' already exists: ') surgerylesiondata = { 'surgery_id': surgeryidx, 'lesion_id': surgeryidx, 'subject_id': item[1]['animal#'], 'method': item[1]['LesionMethod'] } try: lab.Surgery.Lesion.insert1(surgerylesiondata) except dj.errors.DuplicateError: print('duplicate. surgery lesion animal ', item[1]['animal#'], ' already exists: ') brainareadata = { #'surgery_id': surgeryidx, 'brain_area': item[1]['BrainArea'], 'subject_id': item[1]['animal#'], } try: lab.BrainArea.insert1(brainareadata) except dj.errors.DuplicateError: print('duplicate. surgery brain area animal ', item[1]['animal#'], ' already exists: ') hemispheredata = { #'surgery_id': surgeryidx, 'hemisphere': item[1]['Hemisphere'], 'subject_id': item[1]['animal#'], } try: lab.Hemisphere.insert1(hemispheredata) except dj.errors.DuplicateError: print('duplicate. surgery hemisphere area animal ', item[1]['animal#'], ' already exists: ') trainingmethoddata = { 'training_method': item[1]['TrainingMethod'], 'subject_id': item[1]['animal#'], } try: lab.Training.insert1(trainingmethoddata) except dj.errors.DuplicateError: print('duplicate. training animal ', item[1]['animal#'], ' already exists: ') #print(surgerydata) #%% #checking craniotomies #% # cranioidx = 1 # while 'craniotomy diameter ('+str(cranioidx)+')' in item[1].keys() and item[1]['craniotomy diameter ('+str(cranioidx)+')'] and (type(item[1]['craniotomy surgery id ('+str(cranioidx)+')']) == int or type(item[1]['craniotomy surgery id ('+str(cranioidx)+')']) == float): # if item[1]['craniotomy surgery id ('+str(cranioidx)+')'] == surgeryidx: # proceduredata = { # 'surgery_id': surgeryidx, # 'subject_id':item[1]['animal#'], # 'procedure_id':cranioidx, # 'skull_reference':item[1]['craniotomy reference ('+str(cranioidx)+')'], # 'ml_location':item[1]['craniotomy lateral ('+str(cranioidx)+')'], # 'ap_location':item[1]['craniotomy anterior ('+str(cranioidx)+')'], # 'surgery_procedure_description': 'craniotomy: ' + item[1]['craniotomy comments ('+str(cranioidx)+')'], # } # try: # lab.Surgery.Procedure().insert1(proceduredata) # except dj.DuplicateError: # print('duplicate cranio for animal ',item[1]['animal#'], ' already exists: ', cranioidx) # cranioidx += 1 # #% # virusinjidx = 1 # while 'virus inj surgery id ('+str(virusinjidx)+')' in item[1].keys() and item[1]['virus inj virus id ('+str(virusinjidx)+')'] and item[1]['virus inj surgery id ('+str(virusinjidx)+')']: # if item[1]['virus inj surgery id ('+str(virusinjidx)+')'] == surgeryidx: # # ============================================================================= # # print('waiting') # # timer.sleep(1000) # # ============================================================================= # if '[' in item[1]['virus inj lateral ('+str(virusinjidx)+')']: # virus_ml_locations = eval(item[1]['virus inj lateral ('+str(virusinjidx)+')']) # virus_ap_locations = eval(item[1]['virus inj anterior ('+str(virusinjidx)+')']) # virus_dv_locations = eval(item[1]['virus inj ventral ('+str(virusinjidx)+')']) # virus_volumes = eval(item[1]['virus inj volume (nl) ('+str(virusinjidx)+')']) # else: # virus_ml_locations = [int(item[1]['virus inj lateral ('+str(virusinjidx)+')'])] # virus_ap_locations = [int(item[1]['virus inj anterior ('+str(virusinjidx)+')'])] # virus_dv_locations = [int(item[1]['virus inj ventral ('+str(virusinjidx)+')'])] # virus_volumes = [int(item[1]['virus inj volume (nl) ('+str(virusinjidx)+')'])] # # for virus_ml_location,virus_ap_location,virus_dv_location,virus_volume in zip(virus_ml_locations,virus_ap_locations,virus_dv_locations,virus_volumes): # injidx = len(lab.Surgery.VirusInjection() & surgerydata) +1 # virusinjdata = { # 'surgery_id': surgeryidx, # 'subject_id':item[1]['animal#'], # 'injection_id':injidx, # 'virus_id':item[1]['virus inj virus id ('+str(virusinjidx)+')'], # 'skull_reference':item[1]['virus inj reference ('+str(virusinjidx)+')'], # 'ml_location':virus_ml_location, # 'ap_location':virus_ap_location, # 'dv_location':virus_dv_location, # 'volume':virus_volume, # 'dilution':item[1]['virus inj dilution ('+str(virusinjidx)+')'], # 'description': 'virus injection: ' + item[1]['virus inj comments ('+str(virusinjidx)+')'], # } # try: # lab.Surgery.VirusInjection().insert1(virusinjdata) # except dj.DuplicateError: # print('duplicate virus injection for animal ',item[1]['animal#'], ' already exists: ', injidx) # virusinjidx += 1 # #% # # surgeryidx += 1 #%% if item[1]['ID']: #df_wr = online_notebook.fetch_water_restriction_metadata(item[1]['ID']) try: df_wr = pd.read_csv( dj.config['locations.metadata_behavior'] + item[1]['ID'] + '.csv') except: df_wr = None if type(df_wr) == pd.DataFrame: wrdata = { 'water_restriction_number': item[1]['ID'], 'subject_id': int(item[1]['animal#']), 'cage_number': item[1]['cage#'], 'wr_start_date': '0001-01-01', 'wr_start_weight': 0, } try: lab.WaterRestriction().insert1(wrdata) except dj.errors.DuplicateError: print('duplicate. water restriction :', item[1]['animal#'], ' already exists')
def make(self, key): ''' TrackingIngest .make() function ''' log.info('TrackingIngest().make(): key: {k}'.format(k=key)) h2o = (lab.WaterRestriction() & key).fetch1('water_restriction_number') session = (experiment.Session() & key).fetch1() trials = (experiment.SessionTrial() & session).fetch('trial') log.info('got session: {} ({} trials)'.format(session, len(trials))) sdate = session['session_date'] sdate_sml = "{}{:02d}{:02d}".format(sdate.year, sdate.month, sdate.day) paths = get_tracking_paths() devices = tracking.TrackingDevice().fetch(as_dict=True) # paths like: <root>/<h2o>/YYYY-MM-DD/tracking tracking_files = [] for p, d in ((p, d) for d in devices for p in paths): tdev = d['tracking_device'] tpos = d['tracking_position'] tdat = p[-1] log.info('checking {} for tracking data'.format(tdat)) tpath = pathlib.Path(tdat, h2o, sdate.strftime('%Y%m%d'), 'tracking') if not tpath.exists(): log.warning('tracking path {} n/a - skipping'.format(tpath)) continue camtrial = '{}_{}_{}.txt'.format(h2o, sdate_sml, tpos) campath = tpath / camtrial log.info('trying camera position trial map: {}'.format(campath)) if not campath.exists(): log.info('skipping {} - does not exist'.format(campath)) continue tmap = self.load_campath(campath) # file:trial n_tmap = len(tmap) log.info('loading tracking data for {} trials'.format(n_tmap)) i = 0 for t in tmap: # load tracking for trial if tmap[t] not in trials: log.warning('nonexistant trial {}.. skipping'.format(t)) continue i += 1 if i % 50 == 0: log.info('item {}/{}, trial #{} ({:.2f}%)'.format( i, n_tmap, t, (i / n_tmap) * 100)) else: log.debug('item {}/{}, trial #{} ({:.2f}%)'.format( i, n_tmap, t, (i / n_tmap) * 100)) # ex: dl59_side_1-0000.csv / h2o_position_tn-0000.csv tfile = '{}_{}_{}-*.csv'.format(h2o, tpos, t) tfull = list(tpath.glob(tfile)) if not tfull or len(tfull) > 1: log.info('file mismatch: file: {} trial: {} ({})'.format( t, tmap[t], tfull)) continue tfull = tfull[-1] trk = self.load_tracking(tfull) recs = {} rec_base = dict(key, trial=tmap[t], tracking_device=tdev) for k in trk: if k == 'samples': recs['tracking'] = { **rec_base, 'tracking_samples': len(trk['samples']['ts']), } else: rec = dict(rec_base) for attr in trk[k]: rec_key = '{}_{}'.format(k, attr) rec[rec_key] = np.array(trk[k][attr]) recs[k] = rec tracking.Tracking.insert1(recs['tracking'], allow_direct_insert=True) if 'nose' in recs: tracking.Tracking.NoseTracking.insert1( recs['nose'], allow_direct_insert=True) if 'tongue' in recs: tracking.Tracking.TongueTracking.insert1( recs['tongue'], allow_direct_insert=True) if 'jaw' in recs: tracking.Tracking.JawTracking.insert1( recs['jaw'], allow_direct_insert=True) if 'paw_left' in recs: fmap = { 'paw_left_x': 'left_paw_x', # remap field names 'paw_left_y': 'left_paw_y', 'paw_left_likelihood': 'left_paw_likelihood' } tracking.Tracking.LeftPawTracking.insert1( { **{ k: v for k, v in recs['paw_left'].items() if k not in fmap }, **{ fmap[k]: v for k, v in recs['paw_left'].items() if k in fmap } }, allow_direct_insert=True) if 'paw_right' in recs: fmap = { 'paw_right_x': 'right_paw_x', # remap field names 'paw_right_y': 'right_paw_y', 'paw_right_likelihood': 'right_paw_likelihood' } tracking.Tracking.RightPawTracking.insert1( { **{ k: v for k, v in recs['paw_right'].items() if k not in fmap }, **{ fmap[k]: v for k, v in recs['paw_right'].items() if k in fmap } }, allow_direct_insert=True) tracking_files.append({ **key, 'trial': tmap[t], 'tracking_device': tdev, 'tracking_file': str(tfull.relative_to(tdat)) }) log.info('... completed {}/{} items.'.format(i, n_tmap)) self.insert1(key) self.TrackingFile.insert(tracking_files) log.info('... done.')
df_sessiontrialdata.to_records(index=False), allow_direct_insert=True) experiment.BehaviorTrial().insert( df_behaviortrialdata.to_records(index=False), allow_direct_insert=True) sessionnumber = 30 for subject_now in [ 'leaky3t.05c30h', 'leaky3t.05c15h', 'leaky3t.05c5h', 'W-St-L-Rnd', 'W-St-L-Sw', 'leaky3t5it30h', 'leaky3t3it30h', 'cheater' ]: #subject_now = 'leaky3ms.05c30t'#'leaky3ms.05c15t'#'leaky3ms.05c30t'#'W-St-L-Rnd'#'W-St-L-Sw' #'leakyint_9ms+c'#'leakyint_3ms+c'#' setupname = 'virtual_setup' experimenter = 'rozsam' print('subject: ', subject_now) if len(lab.WaterRestriction() & 'water_restriction_number = "' + subject_now + '"') > 0: subject_id_to_del = (lab.WaterRestriction() & 'water_restriction_number = "' + subject_now + '"').fetch('subject_id')[0] dj.config['safemode'] = False (lab.Subject() & 'subject_id = ' + str(subject_id_to_del)).delete() dj.config['safemode'] = True for subject_id in range(100): if len(lab.Subject & 'subject_id = ' + str(subject_id)) == 0: break #% subjectdata = { 'subject_id': subject_id, 'cage_number': 0, 'date_of_birth': datetime.now().strftime('%Y-%m-%d'),
def key_source(self): # 2 letters, anything, _, anything, 8 digits, _, 6 digits, .mat # where: # (2 letters, anything): water restriction # (anything): task name # (8 digits): date YYYYMMDD # (6 digits): time HHMMSS rexp = '^[a-zA-Z]{2}.*_.*_[0-9]{8}_[0-9]{6}.mat$' # water_restriction_number -> subject h2os = {k: v for k, v in zip(*lab.WaterRestriction().fetch( 'water_restriction_number', 'subject_id'))} def buildrec(rig, rigpath, root, f): if not re.match(rexp, f): log.debug("{f} skipped - didn't match rexp".format(f=f)) return log.debug('found file {f}'.format(f=f)) fullpath = os.path.join(root, f) subpath = fullpath.split(rigpath)[1].lstrip(os.path.sep) fsplit = f.split('.')[0].split('_') h2o = fsplit[0] date = fsplit[-2:-1][0] if h2o not in h2os: log.warning('{f} skipped - no animal for {h2o}'.format( f=f, h2o=h2o)) return animal = h2os[h2o] log.debug('animal is {animal}'.format(animal=animal)) return { 'subject_id': animal, 'session_date': datetime.date( int(date[0:4]), int(date[4:6]), int(date[6:8])), 'rig': rig, 'rig_data_path': rigpath, 'subpath': subpath } recs = [] found = set() rigs = RigDataPath().fetch(as_dict=True, order_by='rig_search_order') for r in rigs: rig = r['rig'] rigpath = r['rig_data_path'] log.info('RigDataFile.make(): traversing {p}'.format(p=rigpath)) for root, dirs, files in os.walk(rigpath): log.debug('RigDataFile.make(): entering {r}'.format(r=root)) for f in files: log.debug('RigDataFile.make(): visiting {f}'.format(f=f)) r = buildrec(rig, rigpath, root, f) if r and r['subpath'] not in found: found.add(r['subpath']) # block duplicate path conf recs.append(r) return recs
def make(self, key): log.info('BehaviorIngest.make(): key: {key}'.format(key=key)) subject_id = key['subject_id'] h2o = (lab.WaterRestriction() & {'subject_id': subject_id}).fetch1( 'water_restriction_number') date = key['session_date'] datestr = date.strftime('%Y%m%d') log.debug('h2o: {h2o}, date: {d}'.format(h2o=h2o, d=datestr)) # session record key skey = {} skey['subject_id'] = subject_id skey['session_date'] = date skey['username'] = '******' # username has to be changed # e.g: dl7/anything/Session Data/dl7_anything_20180104_132813.ma root = os.path.join(key['rig_data_path'], os.path.dirname(key['subpath'])) path = os.path.join(root, '{h2o}_*_{d}*.mat'.format( h2o=h2o, d=datestr)) log.debug('rigpath {p}'.format(p=path)) matches = glob.glob(path) if len(matches): log.debug('found files, this is the rig') skey['rig'] = key['rig'] else: log.info('no file matches found in {p}'.format(p=path)) if not len(matches): log.warning('no file matches found for {h2o} / {d}'.format( h2o=h2o, d=datestr)) return # # Find files & Check for split files # XXX: not checking rig.. 2+ sessions on 2+ rigs possible for date? # if len(matches) > 1: log.warning('split session case detected for {h2o} on {date}' .format(h2o=h2o, date=date)) # session:date relationship is 1:1; skip if we have a session if experiment.Session() & skey: log.warning("Warning! session exists for {h2o} on {d}".format( h2o=h2o, d=date)) return # # Extract trial data from file(s) & prepare trial loop # trials = zip() trial = namedtuple( # simple structure to track per-trial vars 'trial', ('ttype', 'settings', 'state_times', 'state_names', 'state_data', 'event_data', 'event_times')) for f in matches: if os.stat(f).st_size/1024 < 100: log.info('skipping file {f} - too small'.format(f=f)) continue mat = spio.loadmat(f, squeeze_me=True) SessionData = mat['SessionData'].flatten() AllTrialTypes = SessionData['TrialTypes'][0] AllTrialSettings = SessionData['TrialSettings'][0] RawData = SessionData['RawData'][0].flatten() AllStateNames = RawData['OriginalStateNamesByNumber'][0] AllStateData = RawData['OriginalStateData'][0] AllEventData = RawData['OriginalEventData'][0] AllStateTimestamps = RawData['OriginalStateTimestamps'][0] AllEventTimestamps = RawData['OriginalEventTimestamps'][0] # verify trial-related data arrays are all same length assert(all((x.shape[0] == AllStateTimestamps.shape[0] for x in (AllTrialTypes, AllTrialSettings, AllStateNames, AllStateData, AllEventData, AllEventTimestamps)))) z = zip(AllTrialTypes, AllTrialSettings, AllStateTimestamps, AllStateNames, AllStateData, AllEventData, AllEventTimestamps) trials = chain(trials, z) # concatenate the files trials = list(trials) # all files were internally invalid or size < 100k if not trials: log.warning('skipping date {d}, no valid files'.format(d=date)) # # Trial data seems valid; synthesize session id & add session record # XXX: note - later breaks can result in Sessions without valid trials # log.debug('synthesizing session ID') session = (dj.U().aggr(experiment.Session() & {'subject_id': subject_id}, n='max(session)').fetch1('n') or 0) + 1 log.info('generated session id: {session}'.format(session=session)) skey['session'] = session key = dict(key, **skey) log.debug('BehaviorIngest.make(): adding session record') experiment.Session().insert1(skey) # # Actually load the per-trial data # log.info('BehaviorIngest.make(): trial parsing phase') # lists of various records for batch-insert rows = {k: list() for k in ('trial', 'behavior_trial', 'trial_note', 'trial_event', 'corrected_trial_event', 'action_event')} i = -1 for t in trials: # # Misc # t = trial(*t) # convert list of items to a 'trial' structure i += 1 # increment trial counter log.info('BehaviorIngest.make(): parsing trial {i}'.format(i=i)) # covert state data names into a lookup dictionary # # names (seem to be? are?): # # Trigtrialstart # PreSamplePeriod # SamplePeriod # DelayPeriod # EarlyLickDelay # EarlyLickSample # ResponseCue # GiveLeftDrop # GiveRightDrop # GiveLeftDropShort # GiveRightDropShort # AnswerPeriod # Reward # RewardConsumption # NoResponse # TimeOut # StopLicking # StopLickingReturn # TrialEnd states = {k: (v+1) for v, k in enumerate(t.state_names)} required_states = ('PreSamplePeriod', 'SamplePeriod', 'DelayPeriod', 'ResponseCue', 'StopLicking', 'TrialEnd') missing = list(k for k in required_states if k not in states) if len(missing): log.info('skipping trial {i}; missing {m}' .format(i=i, m=missing)) continue gui = t.settings['GUI'].flatten() # ProtocolType - only ingest protocol >= 3 # # 1 Water-Valve-Calibration 2 Licking 3 Autoassist # 4 No autoassist 5 DelayEnforce 6 SampleEnforce 7 Fixed # if 'ProtocolType' not in gui.dtype.names: log.info('skipping trial {i}; protocol undefined' .format(i=i)) continue protocol_type = gui['ProtocolType'][0] if gui['ProtocolType'][0] < 3: log.info('skipping trial {i}; protocol {n} < 3' .format(i=i, n=gui['ProtocolType'][0])) continue # # Top-level 'Trial' record # tkey = dict(skey) startindex = np.where(t.state_data == states['PreSamplePeriod'])[0] # should be only end of 1st StopLicking; # rest of data is irrelevant w/r/t separately ingested ephys endindex = np.where(t.state_data == states['StopLicking'])[0] log.debug('states\n' + str(states)) log.debug('state_data\n' + str(t.state_data)) log.debug('startindex\n' + str(startindex)) log.debug('endendex\n' + str(endindex)) if not(len(startindex) and len(endindex)): log.info('skipping trial {i}: start/end index error: {s}/{e}'.format(i=i,s=str(startindex), e=str(endindex))) continue try: tkey['trial'] = i tkey['trial_uid'] = i # Arseny has unique id to identify some trials tkey['start_time'] = t.state_times[startindex][0] except IndexError: log.info('skipping trial {i}: error indexing {s}/{e} into {t}'.format(i=i,s=str(startindex), e=str(endindex), t=str(t.state_times))) continue log.debug('BehaviorIngest.make(): Trial().insert1') # TODO msg log.debug('tkey' + str(tkey)) rows['trial'].append(tkey) # # Specific BehaviorTrial information for this trial # bkey = dict(tkey) bkey['task'] = 'audio delay' bkey['task_protocol'] = 1 # determine trial instruction trial_instruction = 'left' if gui['Reversal'][0] == 1: if t.ttype == 1: trial_instruction = 'left' elif t.ttype == 0: trial_instruction = 'right' elif gui['Reversal'][0] == 2: if t.ttype == 1: trial_instruction = 'right' elif t.ttype == 0: trial_instruction = 'left' bkey['trial_instruction'] = trial_instruction # determine early lick early_lick = 'no early' if (protocol_type >= 5 and 'EarlyLickDelay' in states and np.any(t.state_data == states['EarlyLickDelay'])): early_lick = 'early' if (protocol_type > 5 and ('EarlyLickSample' in states and np.any(t.state_data == states['EarlyLickSample']))): early_lick = 'early' bkey['early_lick'] = early_lick # determine outcome outcome = 'ignore' if ('Reward' in states and np.any(t.state_data == states['Reward'])): outcome = 'hit' elif ('TimeOut' in states and np.any(t.state_data == states['TimeOut'])): outcome = 'miss' elif ('NoResponse' in states and np.any(t.state_data == states['NoResponse'])): outcome = 'ignore' bkey['outcome'] = outcome # add behavior record log.debug('BehaviorIngest.make(): BehaviorTrial()') rows['behavior_trial'].append(bkey) # # Add 'protocol' note # nkey = dict(tkey) nkey['trial_note_type'] = 'protocol #' nkey['trial_note'] = str(protocol_type) log.debug('BehaviorIngest.make(): TrialNote().insert1') rows['trial_note'].append(nkey) # # Add 'autolearn' note # nkey = dict(tkey) nkey['trial_note_type'] = 'autolearn' nkey['trial_note'] = str(gui['Autolearn'][0]) rows['trial_note'].append(nkey) #pdb.set_trace() # # Add 'bitcode' note # if 'randomID' in gui.dtype.names: nkey = dict(tkey) nkey['trial_note_type'] = 'bitcode' nkey['trial_note'] = str(gui['randomID'][0]) rows['trial_note'].append(nkey) # # Add presample event # log.debug('BehaviorIngest.make(): presample') ekey = dict(tkey) sampleindex = np.where(t.state_data == states['SamplePeriod'])[0] ekey['trial_event_type'] = 'presample' ekey['trial_event_time'] = t.state_times[startindex][0] ekey['duration'] = (t.state_times[sampleindex[0]] - t.state_times[startindex])[0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing presample duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trial rows['trial_event'].append(ekey) # # Add 'go' event # log.debug('BehaviorIngest.make(): go') ekey = dict(tkey) responseindex = np.where(t.state_data == states['ResponseCue'])[0] ekey['trial_event_type'] = 'go' ekey['trial_event_time'] = t.state_times[responseindex][0] ekey['duration'] = gui['AnswerPeriod'][0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing go duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trials rows['corrected_trial_event'].append(ekey) rows['trial_event'].append(ekey) # # Add other 'sample' events # log.debug('BehaviorIngest.make(): sample events') last_dur = None for s in sampleindex: # in protocol > 6 ~-> n>1 # todo: batch events ekey = dict(tkey) ekey['trial_event_type'] = 'sample' ekey['trial_event_time'] = t.state_times[s] ekey['duration'] = gui['SamplePeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning('... bad duration, no last_edur' .format(last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.debug('... duration using last_edur {}' .format(last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. rows['trial_event'].append(ekey) # # Add 'delay' events # log.debug('BehaviorIngest.make(): delay events') last_dur = None delayindex = np.where(t.state_data == states['DelayPeriod'])[0] for d in delayindex: # protocol > 6 ~-> n>1 ekey = dict(tkey) ekey['trial_event_type'] = 'delay' ekey['trial_event_time'] = t.state_times[d] ekey['duration'] = gui['DelayPeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning('... bad duration, no last_edur' .format(last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.debug('... duration using last_edur {}' .format(last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. log.debug('delay event duration: {}'.format(ekey['duration'])) rows['trial_event'].append(ekey) # # Add lick events # lickleft = np.where(t.event_data == 69)[0] log.debug('... lickleft: {r}'.format(r=str(lickleft))) if len(lickleft): [rows['action_event'].append( dict(**tkey, action_event_type='left lick', action_event_time=t.event_times[l])) for l in lickleft] lickright = np.where(t.event_data == 70)[0] log.debug('... lickright: {r}'.format(r=str(lickright))) if len(lickright): [rows['action_event'].append( dict(**tkey, action_event_type='right lick', action_event_time=t.event_times[r])) for r in lickright] # end of trial loop. log.info('BehaviorIngest.make(): bulk insert phase') log.info('BehaviorIngest.make(): saving ingest {d}'.format(d=key)) self.insert1(key, ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.Session.Trial') experiment.SessionTrial().insert( rows['trial'], ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.BehaviorTrial') experiment.BehaviorTrial().insert( rows['behavior_trial'], ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.TrialNote') experiment.TrialNote().insert( rows['trial_note'], ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.TrialEvent') experiment.TrialEvent().insert( rows['trial_event'], ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... CorrectedTrialEvents') BehaviorIngest().CorrectedTrialEvents().insert( rows['corrected_trial_event'], ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.ActionEvent') experiment.ActionEvent().insert( rows['action_event'], ignore_extra_fields=True) BehaviorIngest.BehaviorFile().insert( (dict(key, behavior_file=f.split(root)[1]) for f in matches), ignore_extra_fields=True)
def populateelphys(): #%% df_subject_wr_sessions = pd.DataFrame(lab.WaterRestriction() * experiment.Session() * experiment.SessionDetails) df_subject_ids = pd.DataFrame(lab.Subject()) if len(df_subject_wr_sessions) > 0: subject_names = df_subject_wr_sessions[ 'water_restriction_number'].unique() subject_names.sort() else: subject_names = list() subject_ids = df_subject_ids['subject_id'].unique() #% sumdata = list() basedir = Path(dj.config['locations.elphysdata_acq4']) for setup_dir in basedir.iterdir(): setup_name = setup_dir.name sessions = np.sort( os.listdir(setup_dir) ) #configfile.readConfigFile(setup_dir.joinpath('.index')) for session_acq in sessions[::-1]: #.keys(): if session_acq != '.' and session_acq != 'log.txt': session_dir = setup_dir.joinpath(session_acq) try: cells = configfile.readConfigFile( session_dir.joinpath('.index')) except: # if there is no file cells = None if cells and 'WR_name/ID' in cells['.'].keys( ): # it needs to have WRname wrname_ephys = cells['.']['WR_name/ID'] wrname = None for wrname_potential in subject_names: # look for water restriction number if wrname_potential.lower() in wrname_ephys.lower(): wrname = wrname_potential subject_id = (df_subject_wr_sessions.loc[ df_subject_wr_sessions[ 'water_restriction_number'] == wrname, 'subject_id']).unique()[0] if wrname == None: # look for animal identifier: for wrname_potential in subject_ids: # look for water restriction number if str(wrname_potential) in wrname_ephys.lower(): subject_id = wrname_potential if len(df_subject_wr_sessions) > 0 and len( (df_subject_wr_sessions.loc[ df_subject_wr_sessions['subject_id'] == subject_id, 'water_restriction_number'] ).unique()) > 0: wrname = (df_subject_wr_sessions.loc[ df_subject_wr_sessions['subject_id'] == subject_id, 'water_restriction_number'] ).unique()[0] else: wrname = 'no water restriction number for this mouse' if wrname: session_date = ( session_acq[0:session_acq.find('_')]).replace( '.', '-') print('animal: ' + str(subject_id) + ' - ' + wrname) ## if setup_name == 'Voltage_rig_1P': setupname = 'Voltage-Imaging-1p' else: print('unkwnown setup, please add') timer.wait(1000) if 'experimenter' in cells['.'].keys(): username = cells['.']['experimenter'] else: username = '******' print( 'username not specified in acq4 file, assuming rozsam' ) ### check if session already exists sessiondata = { 'subject_id': subject_id, #(lab.WaterRestriction() & 'water_restriction_number = "'+df_behavior_session['subject'][0]+'"').fetch()[0]['subject_id'], 'session': np.nan, 'session_date': session_date, 'session_time': np.nan, #session_time.strftime('%H:%M:%S'), 'username': username, 'rig': setupname } for cell in cells.keys(): if cell != '.' and cell != 'log.txt': ephisdata_cell = list() sweepstarttimes = list() cell_dir = session_dir.joinpath(cell) serieses = configfile.readConfigFile( cell_dir.joinpath('.index')) cellstarttime = datetime.datetime.fromtimestamp( serieses['.']['__timestamp__']) for series in serieses.keys(): if series != '.' and series != 'log.txt': series_dir = cell_dir.joinpath(series) sweeps = configfile.readConfigFile( series_dir.joinpath('.index')) if 'Clamp1.ma' in sweeps.keys(): protocoltype = 'single sweep' sweepkeys = [''] else: protocoltype = 'multiple sweeps' sweepkeys = sweeps.keys() for sweep in sweepkeys: if sweep != '.' and '.txt' not in sweep and '.ma' not in sweep: sweep_dir = series_dir.joinpath( sweep) sweepinfo = configfile.readConfigFile( sweep_dir.joinpath( '.index')) if sweep == '': sweep = '0' for file in sweepinfo.keys(): if '.ma' in file: try: # old file version #print('new file version') #% ephysfile = h5.File( sweep_dir. joinpath(file), "r") data = ephysfile[ 'data'][()] metadata_h5 = ephysfile[ 'info'] metadata = read_h5f_metadata( metadata_h5) daqchannels = list( metadata[2] ['DAQ'].keys()) sweepstarttime = datetime.datetime.fromtimestamp( metadata[2] ['DAQ'] [daqchannels[ 0]] ['startTime']) relativetime = ( sweepstarttime - cellstarttime ).total_seconds() if len( ephisdata_cell ) > 0 and ephisdata_cell[ -1]['sweepstarttime'] == sweepstarttime: ephisdata = ephisdata_cell.pop( ) else: ephisdata = dict( ) if 'primary' in daqchannels: # ephys data ephisdata[ 'V'] = data[ 1] ephisdata[ 'stim'] = data[ 0] ephisdata[ 'data'] = data ephisdata[ 'metadata'] = metadata ephisdata[ 'time'] = metadata[ 1]['values'] ephisdata[ 'relativetime'] = relativetime ephisdata[ 'sweepstarttime'] = sweepstarttime ephisdata[ 'series'] = series ephisdata[ 'sweep'] = sweep sweepstarttimes.append( sweepstarttime ) else: # other daq stuff #% for idx, channel in enumerate( metadata[ 0] ['cols']): channelname = channel[ 'name'].decode( ) if channelname[ 0] == 'u': channelname = channelname[ 2: -1] if channelname in [ 'OrcaFlashExposure', 'Temperature', 'LED525', 'FrameCommand', 'NextFileTrigger' ]: ephisdata[ channelname] = data[ idx] #print('{} added'.format(channelname)) else: print( 'waiting in the other daq' ) timer.sleep( 1000 ) ephisdata_cell.append( ephisdata) #% except: # new file version print( 'old version') ephysfile = MetaArray( ) ephysfile.readFile( sweep_dir. joinpath(file)) data = ephysfile.asarray( ) metadata = ephysfile.infoCopy( ) sweepstarttime = datetime.datetime.fromtimestamp( metadata[2] ['startTime']) relativetime = ( sweepstarttime - cellstarttime ).total_seconds() ephisdata = dict() ephisdata[ 'V'] = data[1] ephisdata[ 'stim'] = data[ 0] ephisdata[ 'data'] = data ephisdata[ 'metadata'] = metadata ephisdata[ 'time'] = metadata[ 1]['values'] ephisdata[ 'relativetime'] = relativetime ephisdata[ 'sweepstarttime'] = sweepstarttime ephisdata[ 'series'] = series ephisdata[ 'sweep'] = sweep sweepstarttimes.append( sweepstarttime) ephisdata_cell.append( ephisdata) # ============================================================================ # if wrname == 'FOR04': # ============================================================================= # add session to DJ if not present if len(ephisdata_cell) > 0: # ============================================================================= # print('waiting') # timer.sleep(1000) # ============================================================================= #% if len(experiment.Session() & 'subject_id = "' + str(sessiondata['subject_id']) + '"' & 'session_date = "' + str(sessiondata['session_date']) + '"') == 0: if len(experiment.Session() & 'subject_id = "' + str(sessiondata['subject_id']) + '"') == 0: sessiondata['session'] = 1 else: sessiondata['session'] = len( (experiment.Session() & 'subject_id = "' + str(sessiondata['subject_id']) + '"').fetch()['session']) + 1 sessiondata['session_time'] = ( sweepstarttimes[0] ).strftime( '%H:%M:%S' ) # the time of the first sweep will be the session time experiment.Session().insert1( sessiondata) #% session = ( experiment.Session() & 'subject_id = "' + str(sessiondata['subject_id']) + '"' & 'session_date = "' + str(sessiondata['session_date']) + '"').fetch('session')[0] cell_number = int(cell[cell.find('_') + 1:]) #add cell if not added already celldata = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, } #% if len(ephys_patch.Cell() & celldata ) == 0 or len(ephys_patch.Cell() * ephys_patch.Sweep() & celldata) < len( ephisdata_cell): if len(ephys_patch.Cell() * ephys_patch.Sweep() & celldata ) < len(ephisdata_cell): print('finishing a recording:') else: print('adding new recording:') print(celldata) if 'type' in serieses['.'].keys(): if serieses['.'][ 'type'] == 'interneuron': celldata['cell_type'] = 'int' elif serieses['.'][ 'type'] == 'unknown' or serieses[ '.']['type'] == 'fail': celldata[ 'cell_type'] = 'unidentified' else: print('unhandled cell type!!') timer.sleep(1000) else: celldata[ 'cell_type'] = 'unidentified' celldata['cell_recording_start'] = ( sweepstarttimes[0] ).strftime('%H:%M:%S') if 'depth' in serieses['.'].keys( ) and len(serieses['.']['depth']) > 0: celldata['depth'] = int( serieses['.']['depth']) else: celldata['depth'] = -1 try: ephys_patch.Cell().insert1( celldata, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded if 'notes' in serieses['.'].keys(): cellnotes = serieses['.']['notes'] else: cellnotes = '' cellnotesdata = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'notes': cellnotes } try: ephys_patch.CellNotes().insert1( cellnotesdata, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded #% for i, ephisdata in enumerate( ephisdata_cell): #% sweep_number = i print('sweep {}'.format( sweep_number)) sweep_data = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'sweep_start_time': (ephisdata['sweepstarttime'] - sweepstarttimes[0] ).total_seconds(), 'sweep_end_time': (ephisdata['sweepstarttime'] - sweepstarttimes[0] ).total_seconds() + ephisdata['time'][-1], 'protocol_name': ephisdata[ 'series'], #[:ephisdata['series'].find('_')], 'protocol_sweep_number': int(ephisdata['sweep']) } if 'mode' in ephisdata['metadata'][ 2]['ClampState']: # old file version recmode = ephisdata[ 'metadata'][2][ 'ClampState']['mode'] else: recmode = ephisdata[ 'metadata'][2]['Protocol'][ 'mode'] if 'IC' in str(recmode): recording_mode = 'current clamp' else: print( 'unhandled recording mode, please act..' ) timer.sleep(10000) channelnames = list() channelunits = list() for line_now in ephisdata[ 'metadata'][0]['cols']: if type(line_now['name'] ) == bytes: channelnames.append( line_now['name']. decode().strip("'")) channelunits.append( line_now['units']. decode().strip("'")) else: channelnames.append( line_now['name']) channelunits.append( line_now['units']) commandidx = np.where( np.array(channelnames) == 'command')[0][0] dataidx = np.where( np.array(channelnames) == 'primary')[0][0] #% clampparams_data = ephisdata[ 'metadata'][2]['ClampState'][ 'ClampParams'].copy() clampparams_data_new = dict() for clampparamkey in clampparams_data.keys( ): #6004 is true for some reason.. changing it back to 1 if type(clampparams_data[ clampparamkey] ) == np.int32: if clampparams_data[ clampparamkey] > 0: clampparams_data[ clampparamkey] = int( 1) else: clampparams_data[ clampparamkey] = int( 0) else: clampparams_data[ clampparamkey] = float( clampparams_data[ clampparamkey]) clampparams_data_new[ clampparamkey.lower( )] = clampparams_data[ clampparamkey] #% sweepmetadata_data = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'recording_mode': recording_mode, 'sample_rate': np.round(1 / np.median( np.diff( ephisdata['metadata'] [1]['values']))) } sweepmetadata_data.update( clampparams_data_new) sweepdata_data = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'response_trace': ephisdata['data'][dataidx, :], 'response_units': ephisdata['metadata'][0] ['cols'][dataidx]['units'] } sweepstimulus_data = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'stimulus_trace': ephisdata['data'][ commandidx, :], 'stimulus_units': ephisdata['metadata'][0] ['cols'][commandidx]['units'] } #print('waiting') #timer.sleep(10000) try: ephys_patch.Sweep().insert1( sweep_data, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded try: # maybe it's a duplicate.. ephys_patch.ClampParams( ).insert1( clampparams_data_new, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded try: ephys_patch.SweepMetadata( ).insert1( sweepmetadata_data, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded try: ephys_patch.SweepResponse( ).insert1( sweepdata_data, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded try: ephys_patch.SweepStimulus( ).insert1( sweepstimulus_data, allow_direct_insert=True) except dj.errors.DuplicateError: pass #already uploaded #% if 'OrcaFlashExposure' in ephisdata.keys( ): sweepimagingexposuredata = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'imaging_exposure_trace': ephisdata[ 'OrcaFlashExposure'] } try: ephys_patch.SweepImagingExposure( ).insert1( sweepimagingexposuredata, allow_direct_insert=True ) except dj.errors.DuplicateError: pass #already uploaded if 'Temperature' in ephisdata.keys( ): sweeptemperaturedata = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'temperature_trace': ephisdata['Temperature'] * 10, 'temperature_units': 'degC' } try: ephys_patch.SweepTemperature( ).insert1( sweeptemperaturedata, allow_direct_insert=True ) except dj.errors.DuplicateError: pass #already uploaded if 'LED525' in ephisdata.keys(): sweepLEDdata = { 'subject_id': subject_id, 'session': session, 'cell_number': cell_number, 'sweep_number': sweep_number, 'imaging_led_trace': ephisdata['LED525'] } try: ephys_patch.SweepLED( ).insert1( sweepLEDdata, allow_direct_insert=True ) except dj.errors.DuplicateError: pass #already uploaded
def extract_trials(plottype='2lickport', wr_name='FOR01', sessions=(5, 11), show_bias_check_trials=True, kernel=np.ones(10) / 10, filters=None, local_matching={'calculate_local_matching': False}): #%% # ============================================================================= # plottype = '2lickport' # wr_name = 'FOR11' # sessions = (25,46) # show_bias_check_trials = False # kernel = np.ones(20)/20 # filters = {'ignore_rate_max':40} # local_matching = {'calculate_local_matching': True, # 'sliding_window':50, # 'matching_window':500, # 'matching_step':100} # ============================================================================= movingwindow = local_matching['sliding_window'] fit_window = local_matching['matching_window'] fit_step = local_matching['matching_step'] subject_id = (lab.WaterRestriction() & 'water_restriction_number = "{}"'.format(wr_name) ).fetch1('subject_id') df_behaviortrial = pd.DataFrame(np.asarray( (experiment.BehaviorTrial() * experiment.SessionTrial() * experiment.TrialEvent() * experiment.SessionBlock() * behavior_foraging.TrialReactionTime & 'subject_id = {}'.format(subject_id) & 'session >= {}'.format(sessions[0]) & 'session <= {}'.format(sessions[1]) & 'trial_event_type = "go"').fetch( 'session', 'trial', 'early_lick', 'trial_start_time', 'reaction_time', 'p_reward_left', 'p_reward_right', 'p_reward_middle', 'trial_event_time', 'trial_choice', 'outcome')).T, columns=[ 'session', 'trial', 'early_lick', 'trial_start_time', 'reaction_time', 'p_reward_left', 'p_reward_right', 'p_reward_middle', 'trial_event_time', 'trial_choice', 'outcome' ]) unique_sessions = df_behaviortrial['session'].unique() df_behaviortrial['iti'] = np.nan df_behaviortrial['delay'] = np.nan df_behaviortrial['early_count'] = 0 df_behaviortrial.loc[df_behaviortrial['early_lick'] == 'early', 'early_count'] = 1 df_behaviortrial['ignore_rate'] = np.nan df_behaviortrial['reaction_time_smoothed'] = np.nan if type(filters) == dict: df_behaviortrial['keep_trial'] = 1 for session in unique_sessions: total_trials_so_far = ( behavior_foraging.SessionStats() & 'subject_id = {}'.format(subject_id) & 'session < {}'.format(session)).fetch('session_total_trial_num') bias_check_trials_now = (behavior_foraging.SessionStats() & 'subject_id = {}'.format(subject_id) & 'session = {}'.format(session) ).fetch1('session_bias_check_trial_num') total_trials_so_far = sum(total_trials_so_far) gotime = df_behaviortrial.loc[df_behaviortrial['session'] == session, 'trial_event_time'] trialtime = df_behaviortrial.loc[df_behaviortrial['session'] == session, 'trial_start_time'] itis = np.concatenate([[np.nan], np.diff(np.asarray(trialtime + gotime, float))]) df_behaviortrial.loc[df_behaviortrial['session'] == session, 'iti'] = itis df_behaviortrial.loc[df_behaviortrial['session'] == session, 'delay'] = np.asarray(gotime, float) df_behaviortrial.loc[df_behaviortrial['session'] == session, 'ignore_rate'] = np.convolve( df_behaviortrial.loc[ df_behaviortrial['session'] == session, 'outcome'] == 'ignore', kernel, 'same') reaction_time_interpolated = np.asarray( pd.DataFrame( np.asarray( df_behaviortrial.loc[df_behaviortrial['session'] == session, 'reaction_time'].values, float)).interpolate().values.ravel().tolist()) * 1000 df_behaviortrial.loc[df_behaviortrial['session'] == session, 'reaction_time_smoothed'] = np.convolve( reaction_time_interpolated, kernel, 'same') df_behaviortrial.loc[df_behaviortrial['session'] == session, 'trial'] += total_trials_so_far if type(filters) == dict: max_idx = ( df_behaviortrial.loc[df_behaviortrial['session'] == session, 'ignore_rate'] > filters['ignore_rate_max'] / 100).idxmax() session_first_trial_idx = ( df_behaviortrial['session'] == session).idxmax() #print(max_idx) if max_idx > session_first_trial_idx or df_behaviortrial[ 'ignore_rate'][session_first_trial_idx] > filters[ 'ignore_rate_max'] / 100: df_behaviortrial.loc[df_behaviortrial.index.isin( np.arange(max_idx, len(df_behaviortrial))) & (df_behaviortrial['session'] == session), 'keep_trial'] = 0 #% if type(filters) == dict: trialstokeep = df_behaviortrial['keep_trial'] == 1 df_behaviortrial = df_behaviortrial[trialstokeep] df_behaviortrial = df_behaviortrial.reset_index(drop=True) if not show_bias_check_trials: realtraining = (df_behaviortrial['p_reward_left'] < 1) & (df_behaviortrial['p_reward_right'] < 1) & ( (df_behaviortrial['p_reward_middle'] < 1) | df_behaviortrial['p_reward_middle'].isnull()) df_behaviortrial = df_behaviortrial[realtraining] df_behaviortrial = df_behaviortrial.reset_index(drop=True) #% calculating local matching, bias, reward rate kernel = np.ones(movingwindow) / movingwindow p1 = np.asarray( np.max([ df_behaviortrial['p_reward_right'], df_behaviortrial['p_reward_left'] ], 0), float) p0 = np.asarray( np.min([ df_behaviortrial['p_reward_right'], df_behaviortrial['p_reward_left'] ], 0), float) m_star_greedy = np.floor(np.log(1 - p1) / np.log(1 - p0)) p_star_greedy = p1 + (1 - (1 - p0)** (m_star_greedy + 1) - p1**2) / (m_star_greedy + 1) local_reward_rate = np.convolve(df_behaviortrial['outcome'] == 'hit', kernel, 'same') max_reward_rate = np.convolve(p_star_greedy, kernel, 'same') local_efficiency = local_reward_rate / max_reward_rate choice_right = np.asarray(df_behaviortrial['trial_choice'] == 'right') choice_left = np.asarray(df_behaviortrial['trial_choice'] == 'left') choice_middle = np.asarray(df_behaviortrial['trial_choice'] == 'middle') reward_rate_right = np.asarray( (df_behaviortrial['trial_choice'] == 'right') & (df_behaviortrial['outcome'] == 'hit')) reward_rate_left = np.asarray((df_behaviortrial['trial_choice'] == 'left') & (df_behaviortrial['outcome'] == 'hit')) reward_rate_middle = np.asarray( (df_behaviortrial['trial_choice'] == 'middle') & (df_behaviortrial['outcome'] == 'hit')) # ============================================================================= # choice_fraction_right = np.convolve(choice_right,kernel,'same')/np.convolve(choice_right+choice_left+choice_middle,kernel,'same') # reward_fraction_right = np.convolve(reward_rate_right,kernel,'same')/local_reward_rate # ============================================================================= choice_rate_right = np.convolve( choice_right, kernel, 'same') / np.convolve( choice_left + choice_middle, kernel, 'same') reward_rate_right = np.convolve( reward_rate_right, kernel, 'same') / np.convolve( reward_rate_left + reward_rate_middle, kernel, 'same') slopes = list() intercepts = list() trial_number = list() for center_trial in np.arange(np.round(fit_window / 2), len(df_behaviortrial), fit_step): #% reward_rates_now = reward_rate_right[ int(np.round(center_trial - fit_window / 2)):int(np.round(center_trial + fit_window / 2))] choice_rates_now = choice_rate_right[ int(np.round(center_trial - fit_window / 2)):int(np.round(center_trial + fit_window / 2))] todel = (reward_rates_now == 0) | (choice_rates_now == 0) reward_rates_now = reward_rates_now[~todel] choice_rates_now = choice_rates_now[~todel] try: slope_now, intercept_now = np.polyfit(np.log2(reward_rates_now), np.log2(choice_rates_now), 1) slopes.append(slope_now) intercepts.append(intercept_now) trial_number.append(center_trial) except: pass df_behaviortrial['local_efficiency'] = local_efficiency df_behaviortrial['local_matching_slope'] = np.nan df_behaviortrial.loc[trial_number, 'local_matching_slope'] = slopes df_behaviortrial['local_matching_bias'] = np.nan df_behaviortrial.loc[trial_number, 'local_matching_bias'] = intercepts #%% return df_behaviortrial
def plot_efficiency_matching_bias(ax3, plottype='2lickport', wr_name='FOR01', sessions=(5, 11), show_bias_check_trials=True, plot_efficiency_type='sum_prob'): #%% # ============================================================================= # fig=plt.figure() # ax3=fig.add_axes([0,0,2,.8]) # plottype = '2lickport' # wr_name = 'FOR11' # sessions = (1,46) # show_bias_check_trials = True, # plot_efficiency_type='sum_prob' # ============================================================================= subject_id = (lab.WaterRestriction() & 'water_restriction_number = "{}"'.format(wr_name) ).fetch1('subject_id') if show_bias_check_trials: maxrealforagingvalue = -1 else: maxrealforagingvalue = 0 df_blockefficiency = pd.DataFrame( behavior_foraging.BlockEfficiency() * behavior_foraging.BlockStats() * behavior_foraging.SessionTaskProtocol() & 'subject_id = {}'.format(subject_id) & 'session >= {}'.format(sessions[0]) & 'session <= {}'.format(sessions[1]) & 'session_real_foraging > {}'.format(maxrealforagingvalue)) df_blockefficiency = df_blockefficiency.sort_values(["session", "block"], ascending=(True, True)) unique_sessions = df_blockefficiency['session'].unique() df_blockefficiency['session_start_trialnum'] = 0 session_start_trial_nums = list() session_end_trial_nums = list() for session in unique_sessions: total_trials_so_far = ( behavior_foraging.SessionStats() & 'subject_id = {}'.format(subject_id) & 'session < {}'.format(session)).fetch('session_total_trial_num') #bias_check_trials_now = (behavior_foraging.SessionStats()&'subject_id = {}'.format(subject_id) &'session = {}'.format(session)).fetch1('session_bias_check_trial_num') total_trials_so_far = sum(total_trials_so_far) session_start_trial_nums.append(total_trials_so_far) total_trials_now = ( behavior_foraging.SessionStats() & 'subject_id = {}'.format(subject_id) & 'session = {}'.format(session)).fetch1('session_total_trial_num') session_end_trial_nums.append(total_trials_so_far + total_trials_now) #bias_check_trials_now = (behavior_foraging.SessionStats()&'subject_id = {}'.format(subject_id) &'session = {}'.format(session)).fetch1('session_bias_check_trial_num') df_blockefficiency.loc[df_blockefficiency['session'] == session, 'session_start_trialnum'] += total_trials_so_far blocks = df_blockefficiency.loc[df_blockefficiency['session'] == session, 'block'].values trial_num_so_far = 0 for block in blocks: block_idx_now = (df_blockefficiency['session'] == session) & ( df_blockefficiency['block'] == block) blocktrialnum = df_blockefficiency.loc[block_idx_now, 'block_trial_num'].values[0] df_blockefficiency.loc[ block_idx_now, 'trialnum_block_middle'] = total_trials_so_far + trial_num_so_far + blocktrialnum / 2 trial_num_so_far += blocktrialnum if plot_efficiency_type == 'max_prob': eff_text = 'block_effi_one_p_reward' elif plot_efficiency_type == 'sum_prob': eff_text = 'block_effi_sum_p_reward' elif plot_efficiency_type == 'max_available': eff_text = 'block_effi_one_a_reward' elif plot_efficiency_type == 'sum_available': eff_text = 'block_effi_sum_a_reward' elif plot_efficiency_type == 'ideal': eff_text = 'block_ideal_phat_greedy' elif plot_efficiency_type == 'ideal_regret': eff_text = 'regret_ideal_phat_greedy' ax3.plot(df_blockefficiency['trialnum_block_middle'], df_blockefficiency[eff_text], 'ko-') session_switch_trial_nums = session_start_trial_nums.copy() session_switch_trial_nums.append(session_end_trial_nums[-1]) for session_switch_trial_num in session_switch_trial_nums: ax3.plot([session_switch_trial_num, session_switch_trial_num], [-.15, 1.15], 'b--') ax3.set_xlim([ np.min(session_switch_trial_nums) - 10, np.max(session_switch_trial_nums) + 10 ]) match_idx_r, bias_r, sessions = np.asarray( (behavior_foraging.SessionMatchBias() * behavior_foraging.SessionStats() & 'subject_id = {}'.format(subject_id) & 'session >= {}'.format(sessions[0]) & 'session <= {}'.format(sessions[1])).fetch('match_idx_r', 'bias_r', 'session')) bias_r = (np.asarray(bias_r, float)) #bias_r = (np.asarray(bias_r,float)+np.log2(10))/(np.log2(10)*2) # converting it between 0 and 1 session_middle_trial_nums = list() for session_now in sessions: sessionidx = np.where(session_now == unique_sessions)[0] if len(sessionidx) > 0: session_middle_trial_nums.extend( (np.asarray(session_start_trial_nums)[sessionidx] + np.asarray(session_end_trial_nums)[sessionidx]) / 2) else: session_middle_trial_nums.append(np.nan) ax3.plot(session_middle_trial_nums, match_idx_r, 'ro-') ax3.set_ylim([-.1, 1.1]) ax33 = ax3.twinx() ax33.plot(session_middle_trial_nums, bias_r, 'yo-') ax33.set_ylim( np.asarray([-1.1, 1.1]) * np.nanmin([np.nanmax(np.abs(bias_r)), 4])) ax33.set_ylabel('Bias', color='y') ax33.spines["right"].set_color("yellow") #ax33.tick_params(axis='y', colors='yellow') multicolor_ylabel(ax3, ('Efficiency', ' Matching '), ('r', 'k'), axis='y', size=12) #%% return ax3
def make(self, key): log.info('EphysIngest().make(): key: {k}'.format(k=key)) # # Find Ephys Recording # key = (experiment.Session & key).fetch1() rigpath = EphysDataPath().fetch1('data_path') date = key['session_date'].strftime('%Y-%m-%d') subject_id = key['subject_id'] water = (lab.WaterRestriction() & { 'subject_id': subject_id }).fetch1('water_restriction_number') file = '{h2o}ap_imec3_opt3_jrc.mat'.format( h2o=water) # current file naming format # file = '{h2o}_g0_t0.imec.ap_imec3_opt3_jrc.mat'.format(h2o=water) # some older files # subpath = os.path.join('Spike', date, file) fullpath = os.path.join(rigpath, date, file) if not os.path.exists(fullpath): log.info('EphysIngest().make(): skipping - no file in %s' % fullpath) return log.info('EphysIngest().make(): found ephys recording in %s' % fullpath) # # Find corresponding BehaviorIngest # # ... we are keying times, sessions, etc from behavior ingest; # so lookup behavior ingest for session id, quit with warning otherwise try: behavior = (ingestBehavior.BehaviorIngest() & key).fetch1() except dj.DataJointError: log.warning('EphysIngest().make(): skip - behavior ingest error') return log.info('behavior for ephys: {b}'.format(b=behavior)) # # Prepare ElectrodeGroup configuration # # HACK / TODO: assuming single specific ElectrodeGroup for all tests; # better would be to have this encoded in filename or similar. ekey = { 'subject_id': behavior['subject_id'], 'session': behavior['session'], 'electrode_group': 1, } log.debug('inserting electrode group') ephys.ElectrodeGroup().insert1(dict(ekey, probe_part_no=15131808323)) ephys.ElectrodeGroup().make(ekey) # note: no locks; is dj.Manual log.debug('extracting spike data') f = h5py.File(fullpath, 'r') ind = np.argsort(f['S_clu']['viClu'][0]) # index sorted by cluster cluster_ids = f['S_clu']['viClu'][0][ind] # cluster (unit) number ind = ind[np.where( cluster_ids > 0)[0]] # get rid of the -ve noise clusters cluster_ids = cluster_ids[np.where( cluster_ids > 0)[0]] # get rid of the -ve noise clusters trWav_raw_clu = f['S_clu']['trWav_raw_clu'] # spike waveform # trWav_raw_clu1 = np.concatenate((trWav_raw_clu[0:1][:][:],trWav_raw_clu),axis=0) # add a spike waveform to cluster 0, not necessary anymore after the previous step csNote_clu = f['S_clu']['csNote_clu'][0] # manual sorting note strs = ["all" for x in range(len(csNote_clu)) ] # all units are "all" by definition for iU in range( 0, len(csNote_clu)): # read the manual curation of each unit log.debug('extracting spike indicators {s}:{u}'.format( s=behavior['session'], u=iU)) unitQ = f[csNote_clu[iU]] str1 = ''.join(chr(i) for i in unitQ[:]) if str1 == 'single': # definitions in unit quality strs[iU] = 'good' elif str1 == 'multi': strs[iU] = 'multi' spike_times = f['viTime_spk'][0][ind] # spike times viSite_spk = f['viSite_spk'][0][ind] # electrode site for the spike viT_offset_file = f[ 'viT_offset_file'][:] # start of each trial, subtract this number for each trial sRateHz = f['P']['sRateHz'][0] # sampling rate spike_trials = np.ones(len(spike_times)) * ( len(viT_offset_file) - 1) # every spike is in the last trial spike_times2 = np.copy(spike_times) for i in range(len(viT_offset_file) - 1, 0, -1): #find the trials each unit has a spike in log.debug('locating trials with spikes {s}:{t}'.format( s=behavior['session'], t=i)) spike_trials[spike_times < viT_offset_file[ i]] = i - 1 # Get the trial number of each spike spike_times2[(spike_times >= viT_offset_file[i - 1]) & ( spike_times < viT_offset_file[i])] = spike_times[ (spike_times >= viT_offset_file[i - 1]) & (spike_times < viT_offset_file[i])] - viT_offset_file[ i - 1] # subtract the viT_offset_file from each trial spike_times2[np.where( spike_times2 >= viT_offset_file[-1])] = spike_times[np.where( spike_times2 >= viT_offset_file[-1])] - viT_offset_file[ -1] # subtract the viT_offset_file from each trial spike_times2 = spike_times2 / sRateHz # divide the sampling rate, sRateHz clu_ids_diff = np.diff(cluster_ids) # where the units seperate clu_ids_diff = np.where( clu_ids_diff != 0)[0] + 1 # separate the spike_times units = np.split( spike_times, clu_ids_diff ) / sRateHz # sub arrays of spike_times for each unit (for ephys.Unit()) trialunits = np.split( spike_trials, clu_ids_diff) # sub arrays of spike_trials for each unit unit_ids = np.arange(len(clu_ids_diff) + 1) # unit number trialunits1 = [] # array of unit number (for ephys.Unit.UnitTrial()) trialunits2 = [] # array of trial number for i in range(0, len(trialunits)): # loop through each unit log.debug('aggregating trials with units {s}:{t}'.format( s=behavior['session'], t=i)) trialunits2 = np.append(trialunits2, np.unique( trialunits[i])) # add the trials that a unit is in trialunits1 = np.append(trialunits1, np.zeros(len(np.unique(trialunits[i]))) + i) # add the unit numbers log.debug( 'inserting units for session {s}'.format(s=behavior['session'])) ephys.Unit().insert( list( dict(ekey, unit=x, unit_uid=x, unit_quality=strs[x], spike_times=units[x], waveform=trWav_raw_clu[x][0]) for x in unit_ids)) # batch insert the units file = '{h2o}_bitcode.mat'.format( h2o=water) # fetch the bitcode and realign # subpath = os.path.join('Spike', date, file) fullpath = os.path.join(rigpath, date, file) log.debug('opening bitcode for {s} ({f})'.format(s=behavior['session'], f=fullpath)) #pdb.set_trace() mat = spio.loadmat(fullpath, squeeze_me=True) # load the bitcode file bitCodeE = mat['bitCodeS'].flatten() # bitCodeS is the char variable trialNote = experiment.TrialNote() bitCodeB = (trialNote & { 'subject_id': ekey['subject_id'] } & { 'session': ekey['session'] } & { 'trial_note_type': 'bitcode' }).fetch( 'trial_note', order_by='trial') # fetch the bitcode from the behavior trialNote if len(bitCodeB) < len( bitCodeE ): # behavior file is shorter; e.g. seperate protocols were used; Bpod trials missing due to crash; session restarted startB = np.where(bitCodeE == bitCodeB[0])[0] elif len(bitCodeB) > len( bitCodeE ): # behavior file is longer; e.g. only some trials are sorted, the bitcode.mat should reflect this; Sometimes SpikeGLX can skip a trial, I need to check the last trial startE = np.where(bitCodeB == bitCodeE[0])[0] startB = -startE else: startB = 0 startE = 0 log.debug('extracting trial unit information {s} ({f})'.format( s=behavior['session'], f=fullpath)) trialunits2 = trialunits2 - startB # behavior has less trials if startB is +ve, behavior has more trials if startB is -ve indT = np.where(trialunits2 > -1)[0] # get rid of the -ve trials trialunits1 = trialunits1[indT] trialunits2 = trialunits2[indT] spike_trials = spike_trials - startB # behavior has less trials if startB is +ve, behavior has more trials if startB is -ve indT = np.where(spike_trials > -1)[0] # get rid of the -ve trials cluster_ids = cluster_ids[indT] spike_times2 = spike_times2[indT] viSite_spk = viSite_spk[indT] spike_trials = spike_trials[indT] trialunits = np.asarray(trialunits) # convert the list to an array trialunits = trialunits - startB # split units based on which trial they are in (for ephys.TrialSpikes()) trialPerUnit = np.copy(units) # list of trial index for each unit for i in unit_ids: # loop through each unit, maybe this can be avoid? log.debug('.. unit information {u}'.format(u=i)) indT = np.where(trialunits[i] > -1)[0] # get rid of the -ve trials trialunits[i] = trialunits[i][indT] units[i] = units[i][indT] trialidx = np.argsort(trialunits[i]) # index of the sorted trials trialunits[i] = np.sort( trialunits[i]) # sort the trials for a given unit trial_ids_diff = np.diff( trialunits[i]) # where the trial index seperate trial_ids_diff = np.where(trial_ids_diff != 0)[0] + 1 units[i] = units[i][ trialidx] # sort the spike times based on the trial mapping units[i] = np.split( units[i], trial_ids_diff) # separate the spike_times based on trials trialPerUnit[i] = np.arange(0, len(trial_ids_diff) + 1, dtype=int) # list of trial index log.debug('inserting UnitTrial information') ephys.Unit.UnitTrial().insert( list( dict(ekey, unit=trialunits1[x], trial=trialunits2[x]) for x in range(0, len(trialunits2))) ) # batch insert the TrialUnit (key, unit, trial) log.debug('inserting UnitSpike information') ephys.Unit.UnitSpike().insert( list( dict(ekey, unit=cluster_ids[x] - 1, spike_time=spike_times2[x], electrode=viSite_spk[x], trial=spike_trials[x]) for x in range(0, len(spike_times2))), skip_duplicates=True ) # batch insert the Spikes (key, unit, spike_time, electrode, trial) # TODO: 2D batch insert # pdb.set_trace() l = [] # list of trialSpikes to be inserted for x in zip(unit_ids, trialPerUnit): # loop through the units for i in x[1]: # loop through the trials for each unit l.append( dict(ekey, unit=x[0], trial=int(trialunits2[x[1]][i]), spike_times=units[x[0]][x[1][i]])) # create the list ephys.TrialSpikes().insert( l, skip_duplicates=True) # batch insert TrialSpikes log.debug('inserting file load information') self.insert1(key, ignore_extra_fields=True) EphysIngest.EphysFile().insert1(dict(key, ephys_file=fullpath), ignore_extra_fields=True)
def plot_AP(wr_name='FOR04', cellnum=1, timeedges=[380, 400], timeback=.0015, timeforward=.003, moving_n_diff=0): subject_id = (lab.WaterRestriction() & 'water_restriction_number = "' + wr_name + '"').fetch('subject_id')[0] key = {'subject_id': subject_id, 'cell_number': cellnum} APstoplot = pd.DataFrame(ephysanal.ActionPotential() & key & 'ap_max_time >' + str(np.min(timeedges)) & 'ap_max_time <' + str(np.max(timeedges))) prevsweepnum = np.nan Y = list() dY = list() T = list() for ap in APstoplot.iterrows(): sweepnum = ap[1]['sweep_number'] if sweepnum != prevsweepnum: key['sweep_number'] = sweepnum sweepdata = pd.DataFrame((ephys_patch.Sweep() & key) * (ephys_patch.SweepResponse() & key) * (ephys_patch.SweepMetadata() & key)) sr = sweepdata['sample_rate'].values[0] trace = sweepdata['response_trace'].values[0] prevsweepnum = sweepnum stepback = int(np.round(timeback * sr)) stepforward = int(np.round(timeforward * sr)) apmaxidx = ap[1]['ap_max_index'] if apmaxidx > stepback and apmaxidx < len(trace) - stepforward: y = trace[apmaxidx - stepback:apmaxidx + stepforward] if moving_n_diff > 1: dy = np.diff(movingaverage(y, moving_n_diff)) * sr else: dy = np.diff(y) * sr dy = np.squeeze( np.asarray( np.nanmean( np.asmatrix([ np.concatenate([[np.nan], dy]), np.concatenate([dy, [np.nan]]) ]), 0).transpose())) t_y = np.arange(-stepback, stepforward) / sr Y.append(y) dY.append(dy) T.append(t_y) #%b Y = np.asmatrix(Y).transpose() * 1000 T = np.asmatrix(T).transpose() * 1000 dY = np.asmatrix(dY).transpose() #% fig = plt.figure() ax_v = fig.add_axes([0, 0, .8, .8]) ax_v.plot(T, Y) ax_v.set_xlabel('ms') ax_v.set_ylabel('mV') ax_v.set_xlim([-1 * timeback * 1000, timeforward * 1000]) ax_dv = fig.add_axes([1, 0, .8, .8]) ax_dv.plot(Y, dY) ax_dv.set_xlabel('mV') ax_dv.set_ylabel('mV/ms')
def load_meta_foraging(): ''' Load metadata for the foraging task Adapted from Marton's code: https://github.com/rozmar/DataPipeline/blob/master/ingest/datapipeline_metadata.py ''' import pathlib meta_dir = dj.config.get('custom', {}).get('behavior_bpod', []).get('meta_dir') meta_lab_dir = dj.config.get('custom', {}).get('behavior_bpod', []).get('meta_lab_dir') # --- Add experimenters --- print('Adding experimenters...') df_experimenters = pd.read_csv( pathlib.Path(meta_lab_dir) / 'Experimenter.csv') duplicate_num = 0 for experimenter in df_experimenters.iterrows(): experimenter = experimenter[1] experimenternow = { 'username': experimenter['username'], 'fullname': experimenter['fullname'] } try: lab.Person().insert1(experimenternow) print(' added experimenter: ', experimenternow['username']) except dj.errors.DuplicateError: duplicate_num += 1 # print(' duplicate. experimenter: ',experimenternow['username'], ' already exists') print(f' {duplicate_num} experimenters already exist') # --- Add rigs --- print('Adding rigs... ') df_rigs = pd.read_csv(pathlib.Path(meta_lab_dir) / 'Rig.csv') duplicate_num = 0 for rig in df_rigs.iterrows(): rig = rig[1] rignow = { 'rig': rig['rig'], 'room': rig['room'], 'rig_description': rig['rig_description'] } try: lab.Rig().insert1(rignow) print(' added rig: ', rignow['rig']) except dj.errors.DuplicateError: duplicate_num += 1 # print(' duplicate. rig: ',rignow['rig'], ' already exists') print(f' {duplicate_num} rigs already exist') # --- Add viruses --- # Not implemented for now. Han # --- Add subjects and water restrictions --- print('Adding subjects and water restrictions...') df_surgery = pd.read_csv(pathlib.Path(meta_dir) / 'Surgery.csv') # For each entry duplicate_subject_num = 0 duplicate_WR_num = 0 for item in df_surgery.iterrows(): item = item[1] if item['project'] == 'foraging' and (item['status'] == 'training' or item['status'] == 'sacrificed'): # -- Add lab.Subject() -- subjectdata = { 'subject_id': item['animal#'], 'cage_number': item['cage#'], 'date_of_birth': item['DOB'], 'sex': item['sex'], 'username': item['experimenter'], } try: lab.Subject().insert1(subjectdata) print(' added subject: ', item['animal#']) except dj.errors.DuplicateError: duplicate_subject_num += 1 # print(' duplicate. animal :',item['animal#'], ' already exists') # -- Add lab.Surgery() -- # Not implemented. Han # -- Virus injection -- # Not implemented. Han # -- Add lab.WaterRestriction() -- if item['ID']: # Get water restriction start date and weight subject_csv = pathlib.Path(meta_dir) / '{}.csv'.format( item['ID']) if subject_csv.exists(): df_wr = pd.read_csv(subject_csv) else: print(' No metadata csv found for {}'.format(item['ID'])) continue wrdata = { 'subject_id': item['animal#'], 'water_restriction_number': item['ID'], 'cage_number': item['cage#'], 'wr_start_date': df_wr['Date'][0], 'wr_start_weight': df_wr['Weight'][0], } try: lab.WaterRestriction().insert1(wrdata) print(' added WR: ', item['ID']) except dj.errors.DuplicateError: duplicate_WR_num += 1 # print(' duplicate. water restriction:', item['ID'], ' already exists') print( f' {duplicate_subject_num} subjects and {duplicate_WR_num} WRs already exist' )
def make(self, key): ''' Ephys .make() function ''' log.info('EphysIngest().make(): key: {k}'.format(k=key)) # # Find corresponding BehaviorIngest # # ... we are keying times, sessions, etc from behavior ingest; # so lookup behavior ingest for session id, quit with warning otherwise try: behavior = (behavior_ingest.BehaviorIngest() & key).fetch1() except dj.DataJointError: log.warning('EphysIngest().make(): skip - behavior ingest error') return log.info('behavior for ephys: {b}'.format(b=behavior)) # # Find Ephys Recording # key = (experiment.Session & key).fetch1() rigpath = EphysDataPath().fetch1('data_path') date = key['session_date'].strftime('%Y-%m-%d') subject_id = key['subject_id'] water = (lab.WaterRestriction() & { 'subject_id': subject_id }).fetch1('water_restriction_number') for probe in range(1, 3): # TODO: should code include actual logic to pick these up still? # file = '{h2o}_g0_t0.imec.ap_imec3_opt3_jrc.mat'.format(h2o=water) # some older files # subpath = os.path.join('{}-{}'.format(date, probe), file) # file = '{h2o}ap_imec3_opt3_jrc.mat'.format(h2o=water) # current file naming format epfile = '{h2o}_g0_*.imec.ap_imec3_opt3_jrc.mat'.format( h2o=water) # current file naming format epfullpath = pathlib.Path(rigpath, water, date, str(probe)) ephys_files = list(epfullpath.glob(epfile)) if len(ephys_files) != 1: log.info( 'EphysIngest().make(): skipping probe {} - incorrect files found: {}/{}' .format(probe, epfullpath, ephys_files)) continue epfullpath = ephys_files[0] epsubpath = epfullpath.relative_to(rigpath) log.info( 'EphysIngest().make(): found probe {} ephys recording in {}'. format(probe, epfullpath)) # # Prepare ProbeInsertion configuration # # HACK / TODO: assuming single specific ProbeInsertion for all tests; # better would be to have this encoded in filename or similar. probe_part_no = '15131808323' # hard-coded here ekey = { 'subject_id': behavior['subject_id'], 'session': behavior['session'], 'insertion_number': probe } # ElectrodeConfig - add electrode group and group member (hard-coded to be the first 384 electrode) electrode_group = {'probe': probe_part_no, 'electrode_group': 0} electrode_group_member = [{ **electrode_group, 'electrode': chn } for chn in range(1, 385)] electrode_config_name = 'npx_first384' # user-friendly name - npx probe config with the first 384 channels electrode_config_hash = dict_to_hash({ **electrode_group, **{ str(idx): k for idx, k in enumerate(electrode_group_member) } }) # extract ElectrodeConfig, check DB to reference if exists, else create if ({ 'probe': probe_part_no, 'electrode_config_name': electrode_config_name } not in lab.ElectrodeConfig()): log.info( 'create Neuropixels electrode configuration (lab.ElectrodeConfig)' ) lab.ElectrodeConfig.insert1({ 'probe': probe_part_no, 'electrode_config_hash': electrode_config_hash, 'electrode_config_name': electrode_config_name }) lab.ElectrodeConfig.ElectrodeGroup.insert1({ 'electrode_config_name': electrode_config_name, **electrode_group }) lab.ElectrodeConfig.Electrode.insert({ 'electrode_config_name': electrode_config_name, **member } for member in electrode_group_member) log.info('inserting probe insertion') ephys.ProbeInsertion.insert1( dict(ekey, probe=probe_part_no, electrode_config_name=electrode_config_name)) # # Extract spike data # log.info('extracting spike data') f = h5py.File(epfullpath, 'r') cluster_ids = f['S_clu']['viClu'][0] # cluster (unit) number trWav_raw_clu = f['S_clu']['trWav_raw_clu'] # spike waveform # trWav_raw_clu1 = np.concatenate((trWav_raw_clu[0:1][:][:],trWav_raw_clu),axis=0) # add a spike waveform to cluster 0, not necessary anymore after the previous step csNote_clu = f['S_clu']['csNote_clu'][0] # manual sorting note viSite_clu = f['S_clu'][ 'viSite_clu'][:] # site of the unit with the largest amplitude vrPosX_clu = f['S_clu']['vrPosX_clu'][0] # x position of the unit vrPosY_clu = f['S_clu']['vrPosY_clu'][0] # y position of the unit vrVpp_uv_clu = f['S_clu']['vrVpp_uv_clu'][ 0] # amplitude of the unit vrSnr_clu = f['S_clu']['vrSnr_clu'][0] # snr of the unit strs = ["all" for x in range(len(csNote_clu)) ] # all units are "all" by definition for iU in range( 0, len(csNote_clu)): # read the manual curation of each unit log.debug('extracting spike indicators {s}:{u}'.format( s=behavior['session'], u=iU)) unitQ = f[csNote_clu[iU]] str1 = ''.join(chr(i) for i in unitQ[:]) if str1 == 'single': # definitions in unit quality strs[iU] = 'good' elif str1 == 'ok': strs[iU] = 'ok' elif str1 == 'multi': strs[iU] = 'multi' spike_times = f['viTime_spk'][0] # spike times viSite_spk = f['viSite_spk'][0] # electrode site for the spike sRateHz = f['P']['sRateHz'][0] # sampling rate # get rid of the -ve noise clusters non_neg_cluster_idx = cluster_ids > 0 cluster_ids = cluster_ids[non_neg_cluster_idx] spike_times = spike_times[non_neg_cluster_idx] viSite_spk = viSite_spk[non_neg_cluster_idx] file = '{h2o}_bitcode.mat'.format( h2o=water) # fetch the bitcode and realign # subpath = os.path.join('{}-{}'.format(date, probe), file) bcsubpath = pathlib.Path(water, date, str(probe), file) bcfullpath = rigpath / bcsubpath log.info('opening bitcode for session {s} probe {p} ({f})'.format( s=behavior['session'], p=probe, f=bcfullpath)) mat = spio.loadmat(bcfullpath, squeeze_me=True) # load the bitcode file log.info('extracting spike information {s} probe {p} ({f})'.format( s=behavior['session'], p=probe, f=bcfullpath)) bitCodeE = mat['bitCodeS'].flatten( ) # bitCodeS is the char variable goCue = mat['goCue'].flatten() # bitCodeS is the char variable viT_offset_file = mat['sTrig'].flatten( ) - 7500 # start of each trial, subtract this number for each trial trialNote = experiment.TrialNote() bitCodeB = (trialNote & { 'subject_id': ekey['subject_id'] } & { 'session': ekey['session'] } & { 'trial_note_type': 'bitcode' }).fetch('trial_note', order_by='trial' ) # fetch the bitcode from the behavior trialNote # check ephys/bitcode match to determine trial numbering method bitCodeB_0 = np.where(bitCodeB == bitCodeE[0])[0][0] bitCodeB_ext = bitCodeB[bitCodeB_0:][:len(bitCodeE)] spike_trials_fix = None if not np.all(np.equal(bitCodeE, bitCodeB_ext)): log.info('ephys/bitcode trial mismatch - attempting fix') if 'trialNum' in mat: spike_trials_fix = mat['trialNum'] else: raise Exception('Bitcode Mismatch') spike_trials = np.full_like( spike_times, (len(viT_offset_file) - 1)) # every spike is in the last trial spike_times2 = np.copy(spike_times) for i in range(len(viT_offset_file) - 1, 0, -1): #find the trials each unit has a spike in log.debug('locating trials with spikes {s}:{t}'.format( s=behavior['session'], t=i)) spike_trials[(spike_times >= viT_offset_file[i - 1]) & (spike_times < viT_offset_file[i] )] = i - 1 # Get the trial number of each spike spike_times2[(spike_times >= viT_offset_file[i - 1]) & ( spike_times < viT_offset_file[i])] = spike_times[ (spike_times >= viT_offset_file[i - 1]) & (spike_times < viT_offset_file[i])] - goCue[ i - 1] # subtract the goCue from each trial spike_trials[np.where(spike_times2 >= viT_offset_file[-1] )] = len(viT_offset_file) - 1 spike_times2[np.where( spike_times2 >= viT_offset_file[-1])] = spike_times[np.where( spike_times2 >= viT_offset_file[-1])] - goCue[ -1] # subtract the goCue from the last trial spike_times2 = spike_times2 / sRateHz # divide the sampling rate, sRateHz # at this point, spike-times are aligned to go-cue for that respective trial unit_trial_spks = { u: (spike_trials[cluster_ids == u], spike_times2[cluster_ids == u]) for u in set(cluster_ids) } trial_start_time = viT_offset_file / sRateHz log.info('inserting units for session {s}'.format( s=behavior['session'])) #pdb.set_trace() # Unit - with JRclust clustering method ekey['clustering_method'] = 'jrclust' def build_unit_insert(): for u_id, (u, (u_spk_trials, u_spk_times)) in enumerate( unit_trial_spks.items()): # unit spike times - realign back to trial-start, relative to 1st trial spk_times = sorted(u_spk_times + (goCue / sRateHz)[u_spk_trials] + trial_start_time[u_spk_trials]) yield (dict(ekey, unit=u, unit_uid=u, unit_quality=strs[u_id], electrode_config_name=electrode_config_name, probe=probe_part_no, electrode_group=0, electrode=int(viSite_clu[u_id]), unit_posx=vrPosX_clu[u_id], unit_posy=vrPosY_clu[u_id], unit_amp=vrVpp_uv_clu[u_id], unit_snr=vrSnr_clu[u_id], spike_times=spk_times, waveform=trWav_raw_clu[u_id][0])) ephys.Unit.insert(build_unit_insert(), allow_direct_insert=True) # UnitTrial log.info('inserting UnitTrial information') if spike_trials_fix is None: if len(bitCodeB) < len( bitCodeE ): # behavior file is shorter; e.g. seperate protocols were used; Bpod trials missing due to crash; session restarted startB = np.where(bitCodeE == bitCodeB[0])[0].squeeze() elif len(bitCodeB) > len( bitCodeE ): # behavior file is longer; e.g. only some trials are sorted, the bitcode.mat should reflect this; Sometimes SpikeGLX can skip a trial, I need to check the last trial startE = np.where(bitCodeB == bitCodeE[0])[0].squeeze() startB = -startE else: startB = 0 startE = 0 spike_trials_fix = np.arange(spike_trials.max() + 1) else: # XXX: under test startB = 0 startE = 0 spike_trials_fix -= 1 with InsertBuffer(ephys.Unit.UnitTrial, 10000, skip_duplicates=True, allow_direct_insert=True) as ib: for x, (u_spk_trials, u_spk_times) in unit_trial_spks.items(): ib.insert( dict(ekey, unit=x, trial=spike_trials_fix[tr] - startB) for tr in set(spike_trials)) if ib.flush(): log.debug('... UnitTrial spike') # TrialSpike with InsertBuffer(ephys.TrialSpikes, 10000, skip_duplicates=True, allow_direct_insert=True) as ib: for x, (u_spk_trials, u_spk_times) in unit_trial_spks.items(): ib.insert( dict(ekey, unit=x, spike_times=u_spk_times[u_spk_trials == tr], trial=spike_trials_fix[tr] - startB) for tr in set(spike_trials)) if ib.flush(): log.debug('... TrialSpike spike') log.info('inserting file load information') self.insert1(key, ignore_extra_fields=True, skip_duplicates=True, allow_direct_insert=True) EphysIngest.EphysFile().insert1(dict( key, probe_insertion_number=probe, ephys_file=epsubpath.as_posix()), ignore_extra_fields=True, allow_direct_insert=True) log.info('ephys ingest for {} complete'.format(key))
def populatemetadata(): #%% save metadata from google drive if necessairy lastmodify = online_notebook.fetch_lastmodify_time_animal_metadata() with open(dj.config['locations.metadata_surgery_experiment'] + 'last_modify_time.json') as timedata: lastmodify_prev = json.loads(timedata.read()) if lastmodify != lastmodify_prev: print('updating surgery and WR metadata from google drive') dj.config['locations.metadata_surgery_experiment'] df_surgery = online_notebook.fetch_animal_metadata() df_surgery.to_csv(dj.config['locations.metadata_surgery_experiment'] + 'Surgery.csv') IDs = df_surgery['ID'].tolist() for ID in IDs: df_wr = online_notebook.fetch_water_restriction_metadata(ID) if type(df_wr) == pd.DataFrame: df_wr.to_csv( dj.config['locations.metadata_surgery_experiment'] + ID + '.csv') with open( dj.config['locations.metadata_surgery_experiment'] + 'last_modify_time.json', "w") as write_file: json.dump(lastmodify, write_file) print('surgery and WR metadata updated') lastmodify = online_notebook.fetch_lastmodify_time_lab_metadata() with open(dj.config['locations.metadata_lab'] + 'last_modify_time.json') as timedata: lastmodify_prev = json.loads(timedata.read()) if lastmodify != lastmodify_prev: print('updating Lab metadata from google drive') dj.config['locations.metadata_lab'] IDs = ['Experimenter', 'Rig', 'Virus'] for ID in IDs: df_wr = online_notebook.fetch_lab_metadata(ID) if type(df_wr) == pd.DataFrame: df_wr.to_csv(dj.config['locations.metadata_lab'] + ID + '.csv') with open( dj.config['locations.metadata_lab'] + 'last_modify_time.json', "w") as write_file: json.dump(lastmodify, write_file) print('Lab metadata updated') #%% add users df_experimenters = pd.read_csv(dj.config['locations.metadata_lab'] + 'Experimenter.csv') experimenterdata = list() for experimenter in df_experimenters.iterrows(): experimenter = experimenter[1] dictnow = { 'username': experimenter['username'], 'fullname': experimenter['fullname'] } experimenterdata.append(dictnow) print('adding experimenters') for experimenternow in experimenterdata: try: lab.Person().insert1(experimenternow) except dj.errors.DuplicateError: print('duplicate. experimenter: ', experimenternow['username'], ' already exists') #%% add rigs df_rigs = pd.read_csv(dj.config['locations.metadata_lab'] + 'Rig.csv') rigdata = list() for rig in df_rigs.iterrows(): rig = rig[1] dictnow = { 'rig': rig['rig'], 'room': rig['room'], 'rig_description': rig['rig_description'] } rigdata.append(dictnow) print('adding rigs') for rignow in rigdata: try: lab.Rig().insert1(rignow) except dj.errors.DuplicateError: print('duplicate. rig: ', rignow['rig'], ' already exists') #%% add viruses df_viruses = pd.read_csv(dj.config['locations.metadata_lab'] + 'Virus.csv') virusdata = list() serotypedata = list() for virus in df_viruses.iterrows(): virus = virus[1] if type(virus['remarks']) != str: virus['remarks'] = '' dictnow = { 'virus_id': virus['virus_id'], 'virus_source': virus['virus_source'], 'serotype': virus['serotype'], 'username': virus['username'], 'virus_name': virus['virus_name'], 'titer': virus['titer'], 'order_date': virus['order_date'], 'remarks': virus['remarks'] } virusdata.append(dictnow) dictnow = {'serotype': virus['serotype']} serotypedata.append(dictnow) print('adding rigs') for virusnow, serotypenow in zip(virusdata, serotypedata): try: lab.Serotype().insert1(serotypenow) except dj.errors.DuplicateError: print('duplicate serotype: ', serotypenow['serotype'], ' already exists') try: lab.Virus().insert1(virusnow) except dj.errors.DuplicateError: print('duplicate virus: ', virusnow['virus_name'], ' already exists') #%% populate subjects, surgeries and water restrictions print('adding surgeries and stuff') df_surgery = pd.read_csv( dj.config['locations.metadata_surgery_experiment'] + 'Surgery.csv') #%% for item in df_surgery.iterrows(): if item[1]['project'] == dj.config['project'] and ( item[1]['status'] == 'training' or item[1]['status'] == 'sacrificed'): subjectdata = { 'subject_id': item[1]['animal#'], 'cage_number': item[1]['cage#'], 'date_of_birth': item[1]['DOB'], 'sex': item[1]['sex'], 'username': item[1]['experimenter'], } try: lab.Subject().insert1(subjectdata) except dj.errors.DuplicateError: print('duplicate. animal :', item[1]['animal#'], ' already exists') surgeryidx = 1 while 'surgery date (' + str(surgeryidx) + ')' in item[1].keys( ) and item[1]['surgery date (' + str(surgeryidx) + ')'] and type( item[1]['surgery date (' + str(surgeryidx) + ')']) == str: start_time = datetime.strptime( item[1]['surgery date (' + str(surgeryidx) + ')'] + ' ' + item[1]['surgery time (' + str(surgeryidx) + ')'], '%Y-%m-%d %H:%M') end_time = start_time + timedelta( minutes=int(item[1]['surgery length (min) (' + str(surgeryidx) + ')'])) surgerydata = { 'surgery_id': surgeryidx, 'subject_id': item[1]['animal#'], 'username': item[1]['experimenter'], 'start_time': start_time, 'end_time': end_time, 'surgery_description': item[1]['surgery type (' + str(surgeryidx) + ')'] + ':-: comments: ' + str(item[1]['surgery comments (' + str(surgeryidx) + ')']), } try: lab.Surgery().insert1(surgerydata) except dj.errors.DuplicateError: print('duplicate. surgery for animal ', item[1]['animal#'], ' already exists: ', start_time) #checking craniotomies #% cranioidx = 1 while 'craniotomy diameter (' + str( cranioidx) + ')' in item[1].keys() and item[1][ 'craniotomy diameter (' + str(cranioidx) + ')'] and ( type(item[1]['craniotomy surgery id (' + str(cranioidx) + ')']) == int or type(item[1]['craniotomy surgery id (' + str(cranioidx) + ')']) == float): if item[1]['craniotomy surgery id (' + str(cranioidx) + ')'] == surgeryidx: proceduredata = { 'surgery_id': surgeryidx, 'subject_id': item[1]['animal#'], 'procedure_id': cranioidx, 'skull_reference': item[1]['craniotomy reference (' + str(cranioidx) + ')'], 'ml_location': item[1]['craniotomy lateral (' + str(cranioidx) + ')'], 'ap_location': item[1]['craniotomy anterior (' + str(cranioidx) + ')'], 'surgery_procedure_description': 'craniotomy: ' + item[1]['craniotomy comments (' + str(cranioidx) + ')'], } try: lab.Surgery.Procedure().insert1(proceduredata) except dj.errors.DuplicateError: print('duplicate cranio for animal ', item[1]['animal#'], ' already exists: ', cranioidx) cranioidx += 1 #% virusinjidx = 1 while 'virus inj surgery id (' + str( virusinjidx) + ')' in item[1].keys() and item[1][ 'virus inj virus id (' + str(virusinjidx) + ')'] and item[1]['virus inj surgery id (' + str(virusinjidx) + ')']: if item[1]['virus inj surgery id (' + str(virusinjidx) + ')'] == surgeryidx: # ============================================================================= # print('waiting') # timer.sleep(1000) # ============================================================================= if '[' in item[1]['virus inj lateral (' + str(virusinjidx) + ')']: virus_ml_locations = eval( item[1]['virus inj lateral (' + str(virusinjidx) + ')']) virus_ap_locations = eval( item[1]['virus inj anterior (' + str(virusinjidx) + ')']) virus_dv_locations = eval( item[1]['virus inj ventral (' + str(virusinjidx) + ')']) virus_volumes = eval( item[1]['virus inj volume (nl) (' + str(virusinjidx) + ')']) else: virus_ml_locations = [ int(item[1]['virus inj lateral (' + str(virusinjidx) + ')']) ] virus_ap_locations = [ int(item[1]['virus inj anterior (' + str(virusinjidx) + ')']) ] virus_dv_locations = [ int(item[1]['virus inj ventral (' + str(virusinjidx) + ')']) ] virus_volumes = [ int(item[1]['virus inj volume (nl) (' + str(virusinjidx) + ')']) ] for virus_ml_location, virus_ap_location, virus_dv_location, virus_volume in zip( virus_ml_locations, virus_ap_locations, virus_dv_locations, virus_volumes): injidx = len(lab.Surgery.VirusInjection() & surgerydata) + 1 virusinjdata = { 'surgery_id': surgeryidx, 'subject_id': item[1]['animal#'], 'injection_id': injidx, 'virus_id': item[1]['virus inj virus id (' + str(virusinjidx) + ')'], 'skull_reference': item[1]['virus inj reference (' + str(virusinjidx) + ')'], 'ml_location': virus_ml_location, 'ap_location': virus_ap_location, 'dv_location': virus_dv_location, 'volume': virus_volume, 'dilution': item[1]['virus inj dilution (' + str(virusinjidx) + ')'], 'description': 'virus injection: ' + item[1]['virus inj comments (' + str(virusinjidx) + ')'], } try: lab.Surgery.VirusInjection().insert1( virusinjdata) except dj.errors.DuplicateError: print('duplicate virus injection for animal ', item[1]['animal#'], ' already exists: ', injidx) virusinjidx += 1 #% surgeryidx += 1 #% if item[1]['ID']: #df_wr = online_notebook.fetch_water_restriction_metadata(item[1]['ID']) try: df_wr = pd.read_csv( dj.config['locations.metadata_surgery_experiment'] + item[1]['ID'] + '.csv') except: df_wr = None if type(df_wr) == pd.DataFrame: wrdata = { 'subject_id': item[1]['animal#'], 'water_restriction_number': item[1]['ID'], 'cage_number': item[1]['cage#'], 'wr_start_date': df_wr['Date'][0], 'wr_start_weight': df_wr['Weight'][0], } try: lab.WaterRestriction().insert1(wrdata) except dj.errors.DuplicateError: print('duplicate. water restriction :', item[1]['animal#'], ' already exists')
def key_source(self): # 2 letters, anything, _, anything, 8 digits, _, 6 digits, .mat # where: # (2 letters, anything): water restriction # (anything): task name # (8 digits): date YYYYMMDD # (6 digits): time HHMMSS rexp = '^[a-zA-Z]{2}.*_.*_[0-9]{8}_[0-9]{6}.mat$' # water_restriction_number -> subject h2os = { k: v for k, v in zip(*lab.WaterRestriction().fetch( 'water_restriction_number', 'subject_id')) } def buildrec(rig, rigpath, root, f): if not re.match(rexp, f): log.debug("{f} skipped - didn't match rexp".format(f=f)) return log.debug('found file {f}'.format(f=f)) fullpath = pathlib.Path(root, f) subpath = fullpath.relative_to(rigpath) fsplit = subpath.stem.split('_') h2o = fsplit[0] ymd = fsplit[-2:-1][0] if h2o not in h2os: log.warning('{f} skipped - no animal for {h2o}'.format( f=f, h2o=h2o)) return animal = h2os[h2o] log.debug('animal is {animal}'.format(animal=animal)) return { 'subject_id': animal, 'session_date': date(int(ymd[0:4]), int(ymd[4:6]), int(ymd[6:8])), 'rig': rig, 'rig_data_path': rigpath.as_posix(), 'subpath': subpath.as_posix() } recs = [] found = set() known = set(BehaviorIngest.BehaviorFile().fetch('behavior_file')) rigs = get_behavior_paths() for (rig, rigpath, _) in rigs: rigpath = pathlib.Path(rigpath) log.info('RigDataFile.make(): traversing {}'.format(rigpath)) for root, dirs, files in os.walk(rigpath): log.debug('RigDataFile.make(): entering {}'.format(root)) for f in files: log.debug('RigDataFile.make(): visiting {}'.format(f)) r = buildrec(rig, rigpath, root, f) if not r: continue if f in set.union(known, found): log.info('skipping already ingested file {}'.format( r['subpath'])) else: found.add(f) # block duplicate path conf recs.append(r) return recs
def plot_time_interval(wr_name='FOR04', cellnum=1, timeedges=[0, 10], ylimits_response=None, plotRS=False): subject_id = (lab.WaterRestriction() & 'water_restriction_number = "' + wr_name + '"').fetch('subject_id')[0] key = {'subject_id': subject_id, 'cell_number': cellnum} allsweeps = pd.DataFrame((ephys_patch.Sweep() & key)) sweepstoplot = np.where( np.logical_and( allsweeps['sweep_end_time'] > float(np.min(timeedges)), allsweeps['sweep_start_time'] < float(np.max(timeedges))))[0] df_iv = pd.DataFrame() for sweepnum in sweepstoplot: key['sweep_number'] = sweepnum df_iv = pd.concat([ df_iv, pd.DataFrame((ephys_patch.Sweep() & key) * (ephys_patch.SweepResponse() & key) * (ephys_patch.SweepStimulus() & key) * (ephys_patch.SweepMetadata() & key)) ]) df_IV = pd.DataFrame() for line in df_iv.iterrows(): linenow = line[1] time = np.arange(0, len( linenow['response_trace'])) / linenow['sample_rate'] linenow['time'] = time + float(linenow['sweep_start_time']) df_IV = pd.concat([df_IV, linenow.to_frame().transpose()]) fig = plt.figure() ax_IV = fig.add_axes([0, 0, 2, .8]) ax_stim = fig.add_axes([0, -.6, 2, .4]) for line in df_IV.iterrows(): ax_IV.plot(line[1]['time'], line[1]['response_trace'] * 1000, 'k-') ax_stim.plot(line[1]['time'], line[1]['stimulus_trace'] * 10**12, 'k-') ax_IV.set_xlabel('Time (s)') ax_IV.set_xlim([np.min(timeedges), np.max(timeedges)]) if ylimits_response: ax_IV.set_ylim([np.min(ylimits_response), np.max(ylimits_response)]) ax_IV.set_ylabel('mV') ax_IV.set_title('Response') ax_stim.set_xlabel('Time (s)') ax_stim.set_xlim([np.min(timeedges), np.max(timeedges)]) ax_stim.set_ylabel('pA') ax_stim.set_title('Stimulus') if plotRS: del key['sweep_number'] df_RS = pd.DataFrame((ephysanal.SeriesResistance() * ephysanal.SquarePulse()) & key) needed = (df_RS['square_pulse_start_time'].values > np.min(timeedges)) & (df_RS['square_pulse_start_time'].values < np.max(timeedges)) ax_RS = fig.add_axes([0, -1.2, 2, .4]) ax_RS.plot(df_RS[needed]['square_pulse_start_time'].values, df_RS[needed]['series_resistance'].values, 'ko') ax_RS.set_xlabel('Time (s)') ax_RS.set_ylabel('RS (MOhm)') ax_RS.set_xlim([np.min(timeedges), np.max(timeedges)])
def make(self, key): log.info('BehaviorIngest.make(): key: {key}'.format(key=key)) subject_id = key['subject_id'] h2o = (lab.WaterRestriction() & { 'subject_id': subject_id }).fetch1('water_restriction_number') date = key['session_date'] datestr = date.strftime('%Y%m%d') log.info('h2o: {h2o}, date: {d}'.format(h2o=h2o, d=datestr)) # session record key skey = {} skey['subject_id'] = subject_id skey['session_date'] = date skey['username'] = self.get_session_user() # File paths conform to the pattern: # dl7/TW_autoTrain/Session Data/dl7_TW_autoTrain_20180104_132813.mat # which is, more generally: # {h2o}/{training_protocol}/Session Data/{h2o}_{training protocol}_{YYYYMMDD}_{HHMMSS}.mat root = pathlib.Path(key['rig_data_path'], os.path.dirname(key['subpath'])) path = root / '{h2o}_*_{d}*.mat'.format(h2o=h2o, d=datestr) log.info('rigpath {p}'.format(p=path)) matches = sorted( root.glob('{h2o}_*_{d}*.mat'.format(h2o=h2o, d=datestr))) if matches: log.info('found files: {}, this is the rig'.format(matches)) skey['rig'] = key['rig'] else: log.info('no file matches found in {p}'.format(p=path)) if not len(matches): log.warning('no file matches found for {h2o} / {d}'.format( h2o=h2o, d=datestr)) return # # Find files & Check for split files # XXX: not checking rig.. 2+ sessions on 2+ rigs possible for date? # if len(matches) > 1: log.warning( 'split session case detected for {h2o} on {date}'.format( h2o=h2o, date=date)) # session:date relationship is 1:1; skip if we have a session if experiment.Session() & skey: log.warning("Warning! session exists for {h2o} on {d}".format( h2o=h2o, d=date)) return # # Prepare PhotoStim # photosti_duration = 0.5 # (s) Hard-coded here photostims = { 4: { 'photo_stim': 4, 'photostim_device': 'OBIS470', 'brain_location_name': 'left_alm', 'duration': photosti_duration }, 5: { 'photo_stim': 5, 'photostim_device': 'OBIS470', 'brain_location_name': 'right_alm', 'duration': photosti_duration }, 6: { 'photo_stim': 6, 'photostim_device': 'OBIS470', 'brain_location_name': 'both_alm', 'duration': photosti_duration } } # # Extract trial data from file(s) & prepare trial loop # trials = zip() trial = namedtuple( # simple structure to track per-trial vars 'trial', ('ttype', 'stim', 'settings', 'state_times', 'state_names', 'state_data', 'event_data', 'event_times')) for f in matches: if os.stat(f).st_size / 1024 < 1000: log.info('skipping file {f} - too small'.format(f=f)) continue log.debug('loading file {}'.format(f)) mat = spio.loadmat(f, squeeze_me=True) SessionData = mat['SessionData'].flatten() AllTrialTypes = SessionData['TrialTypes'][0] AllTrialSettings = SessionData['TrialSettings'][0] RawData = SessionData['RawData'][0].flatten() AllStateNames = RawData['OriginalStateNamesByNumber'][0] AllStateData = RawData['OriginalStateData'][0] AllEventData = RawData['OriginalEventData'][0] AllStateTimestamps = RawData['OriginalStateTimestamps'][0] AllEventTimestamps = RawData['OriginalEventTimestamps'][0] # verify trial-related data arrays are all same length assert (all( (x.shape[0] == AllStateTimestamps.shape[0] for x in (AllTrialTypes, AllTrialSettings, AllStateNames, AllStateData, AllEventData, AllEventTimestamps)))) if 'StimTrials' in SessionData.dtype.fields: log.debug('StimTrials detected in session - will include') AllStimTrials = SessionData['StimTrials'][0] assert (AllStimTrials.shape[0] == AllStateTimestamps.shape[0]) else: log.debug('StimTrials not detected in session - will skip') AllStimTrials = np.array([ None for i in enumerate(range(AllStateTimestamps.shape[0])) ]) z = zip(AllTrialTypes, AllStimTrials, AllTrialSettings, AllStateTimestamps, AllStateNames, AllStateData, AllEventData, AllEventTimestamps) trials = chain(trials, z) # concatenate the files trials = list(trials) # all files were internally invalid or size < 100k if not trials: log.warning('skipping date {d}, no valid files'.format(d=date)) return # # Trial data seems valid; synthesize session id & add session record # XXX: note - later breaks can result in Sessions without valid trials # log.debug('synthesizing session ID') session = (dj.U().aggr(experiment.Session() & { 'subject_id': subject_id }, n='max(session)').fetch1('n') or 0) + 1 log.info('generated session id: {session}'.format(session=session)) skey['session'] = session key = dict(key, **skey) # # Actually load the per-trial data # log.info('BehaviorIngest.make(): trial parsing phase') # lists of various records for batch-insert rows = { k: list() for k in ('trial', 'behavior_trial', 'trial_note', 'trial_event', 'corrected_trial_event', 'action_event', 'photostim', 'photostim_location', 'photostim_trial', 'photostim_trial_event') } i = -1 for t in trials: # # Misc # t = trial(*t) # convert list of items to a 'trial' structure i += 1 # increment trial counter log.debug('BehaviorIngest.make(): parsing trial {i}'.format(i=i)) # covert state data names into a lookup dictionary # # names (seem to be? are?): # # Trigtrialstart # PreSamplePeriod # SamplePeriod # DelayPeriod # EarlyLickDelay # EarlyLickSample # ResponseCue # GiveLeftDrop # GiveRightDrop # GiveLeftDropShort # GiveRightDropShort # AnswerPeriod # Reward # RewardConsumption # NoResponse # TimeOut # StopLicking # StopLickingReturn # TrialEnd states = {k: (v + 1) for v, k in enumerate(t.state_names)} required_states = ('PreSamplePeriod', 'SamplePeriod', 'DelayPeriod', 'ResponseCue', 'StopLicking', 'TrialEnd') missing = list(k for k in required_states if k not in states) if len(missing): log.warning('skipping trial {i}; missing {m}'.format( i=i, m=missing)) continue gui = t.settings['GUI'].flatten() # ProtocolType - only ingest protocol >= 3 # # 1 Water-Valve-Calibration 2 Licking 3 Autoassist # 4 No autoassist 5 DelayEnforce 6 SampleEnforce 7 Fixed # if 'ProtocolType' not in gui.dtype.names: log.warning( 'skipping trial {i}; protocol undefined'.format(i=i)) continue protocol_type = gui['ProtocolType'][0] if gui['ProtocolType'][0] < 3: log.warning('skipping trial {i}; protocol {n} < 3'.format( i=i, n=gui['ProtocolType'][0])) continue # # Top-level 'Trial' record # tkey = dict(skey) startindex = np.where(t.state_data == states['PreSamplePeriod'])[0] # should be only end of 1st StopLicking; # rest of data is irrelevant w/r/t separately ingested ephys endindex = np.where(t.state_data == states['StopLicking'])[0] log.debug('states\n' + str(states)) log.debug('state_data\n' + str(t.state_data)) log.debug('startindex\n' + str(startindex)) log.debug('endindex\n' + str(endindex)) if not (len(startindex) and len(endindex)): log.warning( 'skipping trial {i}: start/end index error: {s}/{e}'. format(i=i, s=str(startindex), e=str(endindex))) continue try: tkey['trial'] = i tkey[ 'trial_uid'] = i # Arseny has unique id to identify some trials tkey['start_time'] = t.state_times[startindex][0] tkey['stop_time'] = t.state_times[endindex][0] except IndexError: log.warning( 'skipping trial {i}: error indexing {s}/{e} into {t}'. format(i=i, s=str(startindex), e=str(endindex), t=str(t.state_times))) continue log.debug('BehaviorIngest.make(): Trial().insert1') # TODO msg log.debug('tkey' + str(tkey)) rows['trial'].append(tkey) # # Specific BehaviorTrial information for this trial # bkey = dict(tkey) bkey['task'] = 'audio delay' # hard-coded here bkey['task_protocol'] = 1 # hard-coded here # determine trial instruction trial_instruction = 'left' # hard-coded here if gui['Reversal'][0] == 1: if t.ttype == 1: trial_instruction = 'left' elif t.ttype == 0: trial_instruction = 'right' elif gui['Reversal'][0] == 2: if t.ttype == 1: trial_instruction = 'right' elif t.ttype == 0: trial_instruction = 'left' bkey['trial_instruction'] = trial_instruction # determine early lick early_lick = 'no early' if (protocol_type >= 5 and 'EarlyLickDelay' in states and np.any(t.state_data == states['EarlyLickDelay'])): early_lick = 'early' if (protocol_type > 5 and ('EarlyLickSample' in states and np.any(t.state_data == states['EarlyLickSample']))): early_lick = 'early' bkey['early_lick'] = early_lick # determine outcome outcome = 'ignore' if ('Reward' in states and np.any(t.state_data == states['Reward'])): outcome = 'hit' elif ('TimeOut' in states and np.any(t.state_data == states['TimeOut'])): outcome = 'miss' elif ('NoResponse' in states and np.any(t.state_data == states['NoResponse'])): outcome = 'ignore' bkey['outcome'] = outcome rows['behavior_trial'].append(bkey) # # Add 'protocol' note # nkey = dict(tkey) nkey['trial_note_type'] = 'protocol #' nkey['trial_note'] = str(protocol_type) rows['trial_note'].append(nkey) # # Add 'autolearn' note # nkey = dict(tkey) nkey['trial_note_type'] = 'autolearn' nkey['trial_note'] = str(gui['Autolearn'][0]) rows['trial_note'].append(nkey) # # Add 'bitcode' note # if 'randomID' in gui.dtype.names: nkey = dict(tkey) nkey['trial_note_type'] = 'bitcode' nkey['trial_note'] = str(gui['randomID'][0]) rows['trial_note'].append(nkey) # # Add presample event # log.debug('BehaviorIngest.make(): presample') ekey = dict(tkey) sampleindex = np.where(t.state_data == states['SamplePeriod'])[0] ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'presample' ekey['trial_event_time'] = t.state_times[startindex][0] ekey['duration'] = (t.state_times[sampleindex[0]] - t.state_times[startindex])[0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing presample duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trial rows['trial_event'].append(ekey) # # Add other 'sample' events # log.debug('BehaviorIngest.make(): sample events') last_dur = None for s in sampleindex: # in protocol > 6 ~-> n>1 # todo: batch events ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'sample' ekey['trial_event_time'] = t.state_times[s] ekey['duration'] = gui['SamplePeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning( '... trial {} bad duration, no last_edur'.format( i, last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.warning( '... trial {} duration using last_edur {}'.format( i, last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. rows['trial_event'].append(ekey) # # Add 'delay' events # log.debug('BehaviorIngest.make(): delay events') last_dur = None delayindex = np.where(t.state_data == states['DelayPeriod'])[0] for d in delayindex: # protocol > 6 ~-> n>1 ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'delay' ekey['trial_event_time'] = t.state_times[d] ekey['duration'] = gui['DelayPeriod'][0] if math.isnan(ekey['duration']) and last_dur is None: log.warning('... {} bad duration, no last_edur'.format( i, last_dur)) ekey['duration'] = 0.0 # FIXDUR: cross-trial check rows['corrected_trial_event'].append(ekey) elif math.isnan(ekey['duration']) and last_dur is not None: log.warning('... {} duration using last_edur {}'.format( i, last_dur)) ekey['duration'] = last_dur rows['corrected_trial_event'].append(ekey) else: last_dur = ekey['duration'] # only track 'good' values. log.debug('delay event duration: {}'.format(ekey['duration'])) rows['trial_event'].append(ekey) # # Add 'go' event # log.debug('BehaviorIngest.make(): go') ekey = dict(tkey) responseindex = np.where(t.state_data == states['ResponseCue'])[0] ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'go' ekey['trial_event_time'] = t.state_times[responseindex][0] ekey['duration'] = gui['AnswerPeriod'][0] if math.isnan(ekey['duration']): log.debug('BehaviorIngest.make(): fixing go duration') ekey['duration'] = 0.0 # FIXDUR: lookup from previous trials rows['corrected_trial_event'].append(ekey) rows['trial_event'].append(ekey) # # Add 'trialEnd' events # log.debug('BehaviorIngest.make(): trialend events') last_dur = None trialendindex = np.where(t.state_data == states['TrialEnd'])[0] ekey = dict(tkey) ekey['trial_event_id'] = len(rows['trial_event']) ekey['trial_event_type'] = 'trialend' ekey['trial_event_time'] = t.state_times[trialendindex][0] ekey['duration'] = 0.0 rows['trial_event'].append(ekey) # # Add lick events # lickleft = np.where(t.event_data == 69)[0] log.debug('... lickleft: {r}'.format(r=str(lickleft))) action_event_count = len(rows['action_event']) if len(lickleft): [ rows['action_event'].append( dict(tkey, action_event_id=action_event_count + idx, action_event_type='left lick', action_event_time=t.event_times[l])) for idx, l in enumerate(lickleft) ] lickright = np.where(t.event_data == 71)[0] log.debug('... lickright: {r}'.format(r=str(lickright))) action_event_count = len(rows['action_event']) if len(lickright): [ rows['action_event'].append( dict(tkey, action_event_id=action_event_count + idx, action_event_type='right lick', action_event_time=t.event_times[r])) for idx, r in enumerate(lickright) ] # Photostim Events # # TODO: # # - base stimulation parameters: # # - should be loaded elsewhere - where # - actual ccf locations - cannot be known apriori apparently? # - Photostim.Profile: what is? fix/add # # - stim data # # - how retrieve power from file (didn't see) or should # be statically coded here? # - how encode stim type 6? # - we have hemisphere as boolean or # - but adding an event 4 and event 5 means querying # is less straightforwrard (e.g. sessions with 5 & 6) if t.stim: log.info('BehaviorIngest.make(): t.stim == {}'.format(t.stim)) rows['photostim_trial'].append(tkey) delay_period_idx = np.where( t.state_data == states['DelayPeriod'])[0][0] rows['photostim_trial_event'].append( dict(tkey, **photostims[t.stim], photostim_event_id=len(rows['photostim_trial_event']), photostim_event_time=t.state_times[delay_period_idx], power=5.5)) # end of trial loop. # Session Insertion log.info('BehaviorIngest.make(): adding session record') experiment.Session().insert1(skey) # Behavior Insertion log.info('BehaviorIngest.make(): bulk insert phase') log.info('BehaviorIngest.make(): saving ingest {d}'.format(d=key)) self.insert1(key, ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.Session.Trial') experiment.SessionTrial().insert(rows['trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.BehaviorTrial') experiment.BehaviorTrial().insert(rows['behavior_trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.TrialNote') experiment.TrialNote().insert(rows['trial_note'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.TrialEvent') experiment.TrialEvent().insert(rows['trial_event'], ignore_extra_fields=True, allow_direct_insert=True, skip_duplicates=True) log.info('BehaviorIngest.make(): ... CorrectedTrialEvents') BehaviorIngest().CorrectedTrialEvents().insert( rows['corrected_trial_event'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.ActionEvent') experiment.ActionEvent().insert(rows['action_event'], ignore_extra_fields=True, allow_direct_insert=True) BehaviorIngest.BehaviorFile().insert( (dict(key, behavior_file=f.name) for f in matches), ignore_extra_fields=True, allow_direct_insert=True) # Photostim Insertion photostim_ids = set( [r['photo_stim'] for r in rows['photostim_trial_event']]) if photostim_ids: log.info('BehaviorIngest.make(): ... experiment.Photostim') experiment.Photostim.insert( (dict(skey, **photostims[stim]) for stim in photostim_ids), ignore_extra_fields=True) log.info('BehaviorIngest.make(): ... experiment.PhotostimTrial') experiment.PhotostimTrial.insert(rows['photostim_trial'], ignore_extra_fields=True, allow_direct_insert=True) log.info('BehaviorIngest.make(): ... experiment.PhotostimTrialEvent') experiment.PhotostimEvent.insert(rows['photostim_trial_event'], ignore_extra_fields=True, allow_direct_insert=True)