def _make_tuples(self, key): # Get mouse folder subdirs = get_mouse_session_subdirs(key['mouse']) for sdir in subdirs: skey = key.copy() skey['sess_name'] = get_last_dir_in_path(sdir) manual_insert_skip_duplicate(self, skey)
def _make_tuples(self, key): tags = pd.read_hdf('vgatPAG/database/manual_tags.h5') # Get tags for the recording # rec = (Recording & f'mouse="{mouse}"' & f"sess_name='{sess}'" & f'rec_n={}') tags = tags.loc[(tags.mouse == key['mouse']) & (tags.sess_name == key['sess_name'])] # Make an entry in the main class manual_insert_skip_duplicate(self, key) # Loop over event types for ev in tags.event_type.unique(): # Loop over tag type for tt in tags.loc[tags.event_type == ev].tag_type.unique(): _tags = tags.loc[(tags.event_type == ev) & (tags.tag_type == tt)] # Create an entry for each tag for i, row in _tags.iterrows(): rkey = key.copy() rkey['event_type'] = row.event_type rkey['tag_type'] = row.tag_type rkey['frame'] = row.frame rkey['session_frame'] = row.session_frame rkey['stim_frame'] = row.stim_frame rkey['session_stim_frame'] = row.session_stim_frame rkey['rec_n'] = row.rec_number manual_insert_skip_duplicate(self.Tags, rkey)
def _make_tuples(self, key): # Get DFF threshold to compute dff s, m = key['sess_name'], key['mouse'] tc = key['trial_class'] session = (ManualTrials & key).fetch1("manual_sess_name") f, keys, subkeys, allkeys = open_hdf(summary_file) trial = dict(f['all'][tc][session][key['trial_name']]) rois_trials = [k for k in trial.keys() if 'C_ROI' in k] rois_dff = (RoiDFF & f"sess_name='{s}'" & f"mouse='{m}'").fetch('roi_id') for roi, roi_dff in zip(rois_trials, rois_dff): roi_trace = trial[roi][()] try: dffth = (RoiDFF & f"sess_name='{s}'" & f"mouse='{m}'" & f"roi_id='{roi_dff}'").fetch1('dff_th') except Exception as e: raise ValueError(f'AAAA Skipping: {e}') rkey = key.copy() rkey['roi_id'] = roi rkey['signal'] = roi_trace rkey['dff'] = (roi_trace - dffth) / dffth manual_insert_skip_duplicate(self, rkey)
def _make_tuples(self, key): session_fld = get_session_folder(**key) fps = (Recording & key).fetch1('fps_behav') # Load recordings AI file aifile = (Recording & key).fetch1("aifile") f, keys, subkeys, allkeys = open_hdf(os.path.join(session_fld, aifile)) # Get the start and end time in behaviour frames camera_triggers = f['AI']['0'][()] microscope_triggers = f['AI']['1'][()] starts, ends = get_times_signal_high_and_low(microscope_triggers) cam_starts, cam_ends = get_times_signal_high_and_low(camera_triggers) # Get the start end time of recording in sample numbers print('\nExactring start end of individual recordings') rec_starts = [starts[0]] + [ starts[s] for s in list( np.where(derivative(starts) > self.sampling_frequency)[0]) ] rec_ends = [] for rs in rec_starts: _ends = np.array([e for e in ends if e > rs]) try: nxt = np.where( derivative(_ends) > self.sampling_frequency)[0][0] rec_ends.append(_ends[nxt - 1]) except: rec_ends.append(ends[-1]) if len(rec_ends) != len(rec_starts): raise ValueError('Something went wrong') startends = [(s, e) for s, e in zip(rec_starts, rec_ends)] # Go from sample number to frame numbers def smpl2frm(val): return np.int32(round((val / self.sampling_frequency) * fps)) startends = [(smpl2frm(s), smpl2frm(e)) for s, e in startends] # Get is recording roi_data = f['Fiji_ROI_1'][()] is_recording = np.zeros_like(roi_data) for start, end in startends: is_recording[start:end - 20] = 1 key['is_ca_recording'] = is_recording key['starts'] = np.array([s for s, e in startends]) key['ends'] = np.array([e for s, e in startends]) key['camera_frames'] = camera_triggers key['microscope_frames'] = camera_triggers key['n_frames'] = len(is_recording) manual_insert_skip_duplicate(self, key)
def _make_tuples(self, key): sub = [ f for f in fld.glob('*') if f.is_dir() and key['mouse'] in str(f) ][0] hdfs = sorted([f for f in sub.glob('*.hdf5') if 'Fiji-tag' in f.name]) exp_names = [f.name.split('_Fiji')[0] for f in hdfs] exp_data = {} for exp in exp_names: h = [h for h in hdfs if exp in str(h)] v = [f for f in sub.glob('*.mp4') if exp in str(f)] if len(h) != 1 or len(v) != 1: continue # raise ValueError(f'h:{h}\nv:{v}') exp_data[exp] = dict(hdf=h[0], video=v[0]) for name, data in exp_data.items(): if '_t' in name: splitter = '_t' else: splitter = '_v' _, _, _, fps, _ = get_video_params( get_cap_from_file(str(data['video']))) try: f, keys, subkeys, allkeys = open_hdf(str(data['hdf'])) except Exception as e: print(f'Failed to open AI file: {data["hdf"].name}:\n{e}') return roi = [k for k in keys if 'Fiji_ROI' in k][0] sig = f[roi][()] is_rec = np.zeros_like(sig) is_rec[sig > 0] = 1 rec_starts = np.where(derivative(is_rec) > 0)[0] rec_ends = np.where(derivative(is_rec) < 0)[0] ekey = key.copy() ekey['date'] = name.split('_')[0] ekey['rec'] = int(name.split(splitter)[1][0]) ekey['name'] = name ekey['hdf_path'] = str(data['hdf']) ekey['video_path'] = str(data['video']) ekey['video_fps'] = fps ekey['is_ca_rec'] = is_rec ekey['ca_rec_starts'] = rec_starts ekey['ca_rec_ends'] = rec_ends manual_insert_skip_duplicate(self, ekey)
def _make_tuples(self, key): rois = pd.DataFrame( (Sessions * Sessions.RoiTrace & key).fetch(as_dict=True)) for i, roi in rois.iterrows(): sig = roi.sig is_rec = roi.is_ca_rec starts = roi.ca_rec_starts ends = roi.ca_rec_ends fps = roi.video_fps # remove noise sig = self.chunk_wise(sig, starts, ends, rolling_mean, 3) # whole session DFF dff, th = self.merge_apply_split(sig, is_rec, starts, ends, self.dff, self.dff_percentile) # zscore zscored, _ = self.merge_apply_split(dff, is_rec, starts, ends, zscore) # Remove slow fluctuations slow = self.chunk_wise(dff, starts, ends, rolling_mean, self.slow_filter_window * fps) # # save figure with traces # fig, axarr = plt.subplots(ncols=2, sharex=True, figsize=(32, 9)) # axarr[0].plot(sig, color='k', lw=2, label='raw') # axarr[0].plot(is_rec, label='rec on') # axarr[0].axhline(th, lw=2, color='seagreen', label='F') # axarr[0].legend() # axarr[1].plot(dff, color='k', lw=3, label='dff') # axarr[1].plot(slow, color='salmon', lw=1, label='low pass') # axarr[1].legend() # x = np.arange(0, len(sig), 300*fps) # axarr[1].set(title=roi.roi_name, xticks=x, xticklabels=(x/fps).astype(np.int64), xlabel='seconds') # save_figure(fig, f'D:\\Dropbox (UCL)\\Project_vgatPAG\\analysis\\doric\\Fede\\dff_filtering\\{key["mouse"]}_{key["date"]}_{roi.roi_name}', verbose=False) # plt.close() # Store rkey = key.copy() rkey['id'] = roi.roi_name rkey['raw'] = sig rkey['dff'] = dff rkey['slow_dff'] = slow rkey['zscore'] = zscored rkey['dff_percentile'] = self.dff_percentile rkey['slow_filter_window'] = self.slow_filter_window manual_insert_skip_duplicate(self, rkey)
def _make_tuples(self, key): session_fld = get_session_folder(**key) recs = load_yaml(metadatafile)['sessions'][key['mouse']][ key['sess_name']] for n, rec in enumerate(sorted(recs)): # Get files rec_files = [f for f in os.listdir(session_fld) if rec in f] videos = [ f for f in rec_files if f.endswith(".mp4") or f.endswith(".avi") ] if len(videos) != 1: if len(set([get_file_name(f) for f in videos])) == 1: video = get_file_name(videos[0]) + ".mp4" else: raise ValueError else: video = videos[0] temp_fld = os.path.join(self.temp_files_fld, key['mouse']) temp_rec_files = [f for f in os.listdir(temp_fld) if rec in f] ais = [fl for fl in temp_rec_files if fl == f"{rec}_Fiji-tag.hdf5"] if not ais: continue if len(ais) != 1: raise ValueError(f'Found ais: {ais}') else: ai = ais[0] # Open video and get number of frames nframes, width, height, fps, _ = get_video_params( get_cap_from_file(os.path.join(session_fld, video))) # Open AI file and get number of samples f, keys, subkeys, allkeys = open_hdf(os.path.join(session_fld, ai)) n_samples = len(f['AI']['0'][()]) rkey = key.copy() rkey['rec_name'] = rec rkey['rec_n'] = n rkey['videofile'] = video rkey['aifile'] = ai rkey['n_frames'] = nframes rkey['fps_behav'] = fps rkey['n_samples'] = n_samples manual_insert_skip_duplicate(self, rkey)
def populate(self): f, keys, subkeys, allkeys = open_hdf(summary_file) for trial_class in subkeys['all']: sessions = list(dict(f['all'][trial_class]).keys()) for session in sessions: trials = list(dict(f['all'][trial_class][session]).keys()) for trial in trials: tkey = dict( trial_class=trial_class, mouse=session.split("_")[1], sess_name=session.split("_")[2], frame=int(trial.split("_")[1]), trial_name=trial, manual_sess_name=session, ) manual_insert_skip_duplicate(self, tkey)
def _append(self, key, event_type, tag, onset_frame, stim, stim_session): tkey = key.copy() tkey['event_type'] = event_type tkey['tag_type'] = tag tkey['frame'] = onset_frame + stim tkey['session_frame'] = onset_frame + stim_session tkey['stim_frame'] = stim tkey['session_stim_frame'] = stim_session # skip trials where cable got caught in arena if key['mouse'] == 'BF164p1': if key['date'] == '19JUN03' and stim == 19415: return elif key['mouse'] == 'BF164p1': if key['date'] == '19JUN05' and stim == 95806: return if key['date'] == '19JUN018' and stim == 32290: return manual_insert_skip_duplicate(self.Tags, tkey)
def _make_tuples(self, key): session_fld = get_session_folder(**key) recs = load_yaml(metadatafile)['sessions'][key['mouse']][ key['sess_name']] # Get each session's recordin AI file for n, rec in enumerate(sorted(recs)): rec_files = [f for f in os.listdir(session_fld) if rec in f] ais = [fl for fl in rec_files if fl == f"{rec}.hdf5"] if len(ais) != 1: raise ValueError else: ai = ais[0] f, keys, subkeys, allkeys = open_hdf(os.path.join(session_fld, ai)) # Get stim start and protocol name try: protocol_names = dict( f['Audio Stimulation'])['Audio Stimuli Names'][( )].decode("utf-8") except KeyError as err: return protocol_names = protocol_names.replace("[", "").replace( "]", "").replace("'", "") protocol_names = protocol_names.split(", u") protocol_names = [pc.split("\\\\")[-1] for pc in protocol_names] start_frames = np.where( dict(f['Audio Stimulation'])['Audio Stimuli Start Indices'][ ()] > 0)[0] if len(start_frames) != len(protocol_names): raise ValueError for sf, pc in zip(start_frames, protocol_names): skey = key.copy() skey['frame'] = sf skey['protocol_name'] = pc skey['rec_name'] = rec manual_insert_skip_duplicate(self, skey)
def _make_tuples(self, key): ids, sigs, n = Roi().get_sessions_rois(key['mouse'], key['sess_name']) recs = list(ids.keys()) ids = ids[recs[0]] for n, rid in enumerate(ids): rsig, clean_rsig = [], [] for rec in recs: try: clean_rsig.append(Roi().get_roi_signal_clean(rec, rid)) except Exception as e: is_recording = ( TiffTimes & f"rec_name='{rec}'").fetch1("is_ca_recording") signal = (Roi & f"rec_name='{rec}'" & f"roi_id='{rid}'").fetch1("signal") raise ValueError(f'AAAA skipping: {e}') rsig.append(sigs[rec][n]) clean_rsig = np.concatenate(clean_rsig) rsig = np.concatenate(rsig) rkey = key.copy() dff_threshold = np.percentile(clean_rsig, self.DFF_PERCENTILE) dff = (rsig - dff_threshold) / dff_threshold dff_clean = (clean_rsig - dff_threshold) / dff_threshold rkey['roi_id'] = rid rkey['dff_perc'] = self.DFF_PERCENTILE rkey['dff_th'] = dff_threshold rkey['dff_sig'] = dff rkey['signal'] = rsig rkey['clean_signal'] = clean_rsig rkey['clean_dff_sig'] = dff_clean manual_insert_skip_duplicate(self, rkey)
def _make_tuples(self, key): # Insert into main table manual_insert_skip_duplicate(self, key) # Get number of frmes per recording frames_shift = (Sessions & key).fetch1('frames_shift') # Iterate over recordings hdf = (Experiment & key).fetch('hdf_path') for rec_n, h in enumerate(hdf): h = h.replace('.hdf5', '_forFede.hdf5') try: f, keys, subkeys, allkeys = open_hdf(h) except FileExistsError: print(f'File: {Path(h).name} does not exist!') # Get tag entries for each type of event for k in keys: if k in ('audio', 'visual', 'auditory', 'visual_audio'): self.process_stim_evoked(k, f, frames_shift, key, rec_n) else: self.process_other(k, f, frames_shift, key, rec_n)
def _make_tuples(self, key): session_fld = get_session_folder(**key) aifile = (Recording & key).fetch1("aifile") f, keys, subkeys, allkeys = open_hdf(os.path.join(session_fld, aifile)) rois = [k for k in keys if 'Fiji_ROI' in k] print(f"{key['rec_name']} -> {len(rois)} rois") rec_name = key['rec_name'] is_recording = (TiffTimes & f"rec_name='{rec_name}'").fetch1("is_ca_recording") for roi in rois: roi_trace = f[roi][()] if len(roi_trace) != len(is_recording): raise ValueError('oops') rkey = key.copy() rkey['roi_id'] = roi rkey['signal'] = roi_trace manual_insert_skip_duplicate(self, rkey)
def _make_tuples(self, key): key['ca_fps'] = IntPrompt.ask( f'\nWhats the framerate for: {key["mouse"]} {key["date"]}?') manual_insert_skip_duplicate(self, key)
def populate(self): mice_subs = [f for f in fld.glob('*') if f.is_dir()] mice = [f.name for f in mice_subs] for mouse in mice: manual_insert_skip_duplicate(self, {'mouse': mouse})
def _make_tuples(self, key): # Prepr some vars fps = Recording().get_recording_fps(**key) max_homing_duration = self.max_homing_duration * fps max_run_duration = self.max_run_duration * fps min_time_after_stim = self.min_time_after_stim * fps max_event_duration = self.max_event_duration * fps # Get stimuli vstims, astims = Recording().get_recording_stimuli(**key) # Get tracking body_tracking, ang_vel, speed, shelter_distance = Trackings( ).get_recording_tracking_clean(**key) # Get spontaneous homings and runs from tracking homings = get_spont_homings(shelter_distance, speed, astims, vstims, max_homing_duration, min_time_after_stim, self.spont_initiation_speed) runs = get_spont_out_runs(shelter_distance, speed, astims, vstims, max_run_duration, min_time_after_stim, self.spont_initiation_speed) # now it's time to populate stuff manual_insert_skip_duplicate(self, key) # insert into main tabel # Insert into evoked subtable for stims, stims_type in zip([astims, vstims], ["audio", "visual"]): for stim in stims: # Get the escape onset from the speed data try: estart = np.where( speed[stim + 5:stim + max_event_duration] >= self.escape_initiation_speed)[0][0] + stim + 5 except: continue # if there's no escape ignore # Get if/when the mouse reacehd the shelter and ignore otherwsie try: at_shelter = np.where( shelter_distance[stim:stim + max_event_duration] <= 0)[0][0] + stim except: continue # Get the time at which the peak of escape speed is reached speed_peak = np.argmax( np.nan_to_num( speed[stim:stim + max_event_duration])) + stim # Now add all of these events to the tables events = [stim, estart, at_shelter, speed_peak] classes = [ "stim_onset", "escape_onset", "shelter_arrival", "escape_peak_speed" ] for ev, cl in zip(events, classes): ekey = key.copy() ekey['frame'] = ev ekey['type'] = cl ekey['stim_type'] = stims_type manual_insert_skip_duplicate(self.Evoked, ekey) # And now populate spontaneous subtables for spont in homings: peak_speed = np.argmax( np.nan_to_num(speed[spont:spont + max_event_duration])) + spont for ev, cl in zip([spont, peak_speed], ["homing", "homing_peak_speed"]): skey = key.copy() skey['frame'] = ev skey['type'] = cl manual_insert_skip_duplicate(self.Spontaneous, skey) for spont in runs: skey = key.copy() skey['frame'] = spont skey['type'] = "outrun" manual_insert_skip_duplicate(self.Spontaneous, skey)
def populate(self): mice = load_yaml(metadatafile)['mice'] for mouse in mice: manual_insert_skip_duplicate(self, {'mouse': mouse})
def _make_tuples(self, key): # Get all experiments for this mouse, by date exps = pd.DataFrame((Experiment & key).fetch(as_dict=True)) if len(exps) == 0: return # Loop over dates for date in exps.date.unique(): es = exps.loc[exps.date == date].sort_values('name', axis=0) # get stuff n_frames = [len(r.is_ca_rec) for i, r in es.iterrows()] frames_shift = [0] + list(np.cumsum(n_frames[:-1])) if len(es.video_fps.unique()) > 1: raise ValueError('More than one video fps') is_ca_rec = np.concatenate(es.is_ca_rec.values) ca_rec_starts = np.concatenate([ np.array(ca) + frames_shift[n] for n, ca in enumerate(es.ca_rec_starts.values) ]) ca_rec_ends = np.concatenate([ np.array(ca) + frames_shift[n] for n, ca in enumerate(es.ca_rec_ends.values) ]) # f, ax = plt.subplots() # ax.plot(is_ca_rec) # for s,e in zip(ca_rec_starts, ca_rec_ends): # ax.axvline(s, color='g') # ax.axvline(e, color='m') # plt.show() # Check that ROIs match roi_data = [open_hdf(f) for f in es.hdf_path] rois = sorted([k for k in roi_data[0][1] if 'Fiji_ROI' in k]) if len(roi_data) > 1: for (_, keys, _, _) in roi_data[1:]: for k in keys: if 'Fiji_ROI' in k and k not in rois: raise ValueError('Unrecognized ROI') # Stack ROIs rois_stacks = {} for roi in rois: rois_stacks[roi] = np.concatenate( [f[roi][()] for f, _, _, _ in roi_data]) # Stack tracking trackings = [] for i, s in es.iterrows(): name = s['name'] trackings.append((Trackings * Trackings.BodyPartTracking & key & 'bp="body"' & f'date="{s.date}"' & f'name="{name}"').fetch('x', 'y', 'speed', 'dir_of_mvmt', as_dict=True)[0]) keys = list(trackings[0].keys()) tracking = { k: np.concatenate([t[k] for t in trackings]) for k in keys } # time to insert in the table mkey = key.copy() mkey['date'] = date mkey['video_fps'] = es.video_fps.values[0] mkey['is_ca_rec'] = is_ca_rec mkey['ca_rec_starts'] = ca_rec_starts mkey['ca_rec_ends'] = ca_rec_ends mkey['exps_nframes'] = n_frames mkey['frames_shift'] = frames_shift manual_insert_skip_duplicate(self, mkey) tkey = key.copy() tkey['date'] = date tkey['x'] = tracking['x'] tkey['y'] = tracking['y'] tkey['s'] = tracking['speed'] tkey['dir_of_mvmt'] = tracking['dir_of_mvmt'] manual_insert_skip_duplicate(self.Tracking, tkey) for roi, data in rois_stacks.items(): rkey = key.copy() rkey['date'] = date rkey['roi_name'] = roi rkey['sig'] = data manual_insert_skip_duplicate(self.RoiTrace, rkey)