def get_micro_manipulator_data(subject, one=None, force_extract=False): """ Looks for all ephys sessions for a given subject and get the probe micro-manipulator trajectories. If probes ALF object not on flat-iron, attempts to perform the extraction from meta-data and task settings file. """ if not one: one = ONE() eids, sessions = one.search(subject=subject, task_protocol='ephys', details=True) dtypes = [ 'probes.description', 'probes.trajectory', ] probes = alf.io.AlfBunch({}) for ses in sessions: sess_path = Path(ses['local_path']) probe = None if not force_extract: probe = one.load_object(ses['url'], 'probes') if not probe: _logger.warning(f"Re-extraction probe info for {sess_path}") dtypes = ['_iblrig_taskSettings.raw', 'ephysData.raw.meta'] raw_files = one.load(ses['url'], dataset_types=dtypes, download_only=True) if all([rf is None for rf in raw_files]): _logger.warning( f"no raw settings files nor ephys data found for" f" {ses['local_path']}. Skip this session.") continue extract_probes(sess_path, bin_exists=False) probe = alf.io.load_object(sess_path.joinpath('alf'), 'probes') one.load(ses['url'], dataset_types='channels.localCoordinates', download_only=True) # get for each insertion the sites local mapping: if not found assumes checkerboard pattern probe['sites_coordinates'] = [] for prb in probe.description: chfile = Path(ses['local_path']).joinpath( 'alf', prb['label'], 'channels.localCoordinates.npy') if chfile.exists(): probe['sites_coordinates'].append(np.load(chfile)) else: _logger.warning( f"no channel.localCoordinates found for {ses['local_path']}." f"Assumes checkerboard pattern") probe['sites_coordinates'].append(SITES_COORDINATES) # put the session information in there probe['session'] = [ses] * len(probe.description) probes = probes.append(probe) return probes
def get_session_flatiron(): from oneibl.one import ONE one = ONE() ses = one.search(subjects='CSHL_003', date_range=['2019-04-17']) # session ok ses = one.search(subjects='CSHL_003', date_range=['2019-04-18']) # session has wrong reaction one.load( ses, dataset_types=['_iblrig_taskData.raw', '_iblrig_taskSettings.raw'], download_only=True)
def get_trials_from_times(eid): ''' output number of quiet segments per trial number ''' one = ONE() sat_times_path = '/home/mic/saturation_scan2/%s.npy' % eid sat_times_info = np.load(sat_times_path, allow_pickle=True)[0] sat_times = sat_times_info[1] D = one.load(eid, dataset_types=['trials.intervals'], dclass_output=True) alf_path = Path(D.local_path[0]).parent.parent / 'alf' trials = alf.io.load_object(alf_path, '_ibl_trials') trials_with_sat = [] for t in range(len(trials['intervals'])): ter = trials['intervals'][t] for tt in sat_times: if ter[0] < tt < ter[1]: trials_with_sat.append(t) C = Counter(trials_with_sat) print(sat_times_info[0]) print(len(C), 'of', len(trials['intervals']), 'trials have at least one saturation event') return C
def get_DLC(eid, video_type): '''load dlc traces load dlc traces for a given session and video type. :param eid: A session eid :param video_type: string in 'left', 'right', body' :return: array of times and dict with dlc points as keys and x,y coordinates as values, for each frame id ''' one = ONE() D = one.load(eid, dataset_types=['camera.dlc', 'camera.times']) alf_path = one.path_from_eid(eid) / 'alf' cam0 = alf.io.load_object(alf_path, '%sCamera' % video_type, namespace='ibl') Times = cam0['times'] cam = cam0['dlc'] points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) XYs = {} for point in points: x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_x']) x = x.filled(np.nan) y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_y']) y = y.filled(np.nan) XYs[point] = np.array([x, y]) return Times, XYs
def download_all_dlc(): eids = get_repeated_sites() one = ONE() dataset_types = ['camera.dlc', 'camera.times'] for eid in eids: try: a = one.list(eid, 'dataset-types') # for newer iblib version do [x['dataset_type'] for x in a] if not all([x['dataset_type'] for x in a]): print('not all data available') continue one.load(eid, dataset_types=dataset_types) except: continue
def load_spike_sorting(eid, one=None, dataset_types=None): """ From an eid, hits the Alyx database and downloads a standard default set of dataset types From a local session Path (pathlib.Path), loads a standard default set of dataset types to perform analysis: 'clusters.channels', 'clusters.depths', 'clusters.metrics', 'spikes.clusters', 'spikes.times', 'probes.description' :param eid: experiment UUID or pathlib.Path of the local session :param one: :param dataset_types: additional spikes/clusters objects to add to the standard default list :return: spikes, clusters (dict of bunch, 1 bunch per probe) """ if isinstance(eid, Path): return _load_spike_sorting_local(eid) if not one: one = ONE() # This is a first draft, no safeguard, no error handling and a draft dataset list. session_path = one.path_from_eid(eid) if not session_path: print("no session path") return (None, None), 'no session path' dtypes_default = [ 'clusters.channels', 'clusters.depths', 'clusters.metrics', 'spikes.clusters', 'spikes.times', 'probes.description' ] if dataset_types is None: dtypes = dtypes_default else: # Append extra optional DS dtypes = list(set(dataset_types + dtypes_default)) one.load(eid, dataset_types=dtypes, download_only=True) return _load_spike_sorting_local(session_path)
def check_for_saturation(eid, probes): ''' This functions reads in spikes for a given session, bins them into time bins and computes for how many of them, there is too little activity across all channels such that this must be an artefact (saturation) ''' T_BIN = 0.2 # time bin in sec ACT_THR = 0.05 # maximal activity for saturated segment print('Bin size: %s [ms]' % T_BIN) print('Activity threshold: %s [fraction]' % ACT_THR) #probes = ['probe00', 'probe01'] probeDict = {'probe00': 'probe_left', 'probe01': 'probe_right'} one = ONE() dataset_types = ['spikes.times', 'spikes.clusters'] D = one.load(eid, dataset_types=dataset_types, dclass_output=True) alf_path = Path(D.local_path[0]).parent.parent print(alf_path) l = [] for probe in probes: probe_path = alf_path / probe if not probe_path.exists(): probe_path = alf_path / probeDict[probe] if not probe_path.exists(): print("% s doesn't exist..." % probe) continue try: spikes = alf.io.load_object(probe_path, 'spikes') except: continue # bin spikes R, times, Clusters = bincount2D(spikes['times'], spikes['clusters'], T_BIN) saturated_bins = np.where(np.mean(R, axis=0) < 0.15)[0] if len(saturated_bins) > 1: print('WARNING: Saturation present!') print(probe) print('Number of saturated bins: %s of %s' % (len(saturated_bins), len(times))) l.append(['%s_%s' % (eid, probe), times[saturated_bins]]) np.save('/home/mic/saturation_scan2/%s.npy' % eid, l) return l
def get_ME(eid, video_type): #video_type = 'left' one = ONE() dataset_types = ['camera.ROIMotionEnergy', 'camera.times'] a = one.list(eid, 'dataset-types') # for newer iblib version do [x['dataset_type'] for x in a] # if not all([(u in [x['dataset_type'] for x in a]) for u in dataset_types]): # print('not all data available') # return one.load(eid, dataset_types=dataset_types) local_path = one.path_from_eid(eid) alf_path = local_path / 'alf' cam0 = alf.io.load_object(alf_path, '%sCamera' % video_type, namespace='ibl') ME = np.load(alf_path / f'{video_type}Camera.ROIMotionEnergy.npy') Times = cam0['times'] return Times, ME
def get_dlc_XYs(eid, video_type): #video_type = 'left' one = ONE() dataset_types = ['camera.dlc', 'camera.times'] a = one.list(eid, 'dataset-types') # for newer iblib version do [x['dataset_type'] for x in a] # if not all([(u in [x['dataset_type'] for x in a]) for u in dataset_types]): # print('not all data available') # return one.load(eid, dataset_types=dataset_types) #clobber=True # force download local_path = one.path_from_eid(eid) alf_path = local_path / 'alf' cam0 = alf.io.load_object(alf_path, '%sCamera' % video_type, namespace='ibl') Times = cam0['times'] cam = cam0['dlc'] points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) # Set values to nan if likelyhood is too low # for pqt: .to_numpy() XYs = {} for point in points: x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_x']) x = x.filled(np.nan) y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_y']) y = y.filled(np.nan) XYs[point] = np.array([x, y]) return Times, XYs
def get_info(seg_info, sess_info): l = [] one = ONE() for i in range(len(sess_info)): sess = sess_info[i] eid, probe = sess.split('_') D = one.load(eid, dataset_types=['trials.intervals'], dclass_output=True) l.append([ eid, probe, str(Path(D.local_path[0]).parent.parent).split('/')[5:], seg_info[i] ]) return l
def download_raw_video(eid, cameras=None): """ Downloads the raw video from FlatIron or cache dir. This allows you to download just one of the three videos :param cameras: the specific camera to load (i.e. 'left', 'right', or 'body') If None all three videos are downloaded. :return: the file path(s) of the raw videos """ one = ONE() if cameras: cameras = [cameras] if isinstance(cameras, str) else cameras cam_files = ['_iblrig_{}Camera.raw.mp4'.format(cam) for cam in cameras] datasets = one._alyxClient.get('sessions/' + eid)['data_dataset_session_related'] urls = [ds['data_url'] for ds in datasets if ds['name'] in cam_files] cache_dir = one.path_from_eid(eid).joinpath('raw_video_data') if not os.path.exists(str(cache_dir)): os.mkdir(str(cache_dir)) else: # Check if file already downloaded # cam_files = [fi[:-4] for fi in cam_files] # Remove ext filenames = [ f for f in os.listdir(str(cache_dir)) if any([cam in f for cam in cam_files]) ] if filenames: return [cache_dir.joinpath(file) for file in filenames] http_download_file_list(urls, username=one._par.HTTP_DATA_SERVER_LOGIN, password=one._par.HTTP_DATA_SERVER_PWD, cache_dir=str(cache_dir)) return else: return one.load(eid, ['_iblrig_Camera.raw'], download_only=True)
def load_spike_sorting(eid, one=None, dataset_types=None): """ From an eid, hits the Alyx database and downloads a standard set of dataset types to perform analysis. :param eid: :param dataset_types: additional spikes/clusters objects to add to the standard list :return: """ if not one: one = ONE() # This is a first draft, no safeguard, no error handling and a draft dataset list. session_path = one.path_from_eid(eid) dtypes = [ 'clusters.channels', 'clusters.depths', 'clusters.metrics', 'spikes.clusters', 'spikes.times', 'probes.description', ] if dataset_types: dtypes = list(set(dataset_types + dtypes)) _ = one.load(eid, dataset_types=dtypes, download_only=True) probes = alf.io.load_object(session_path.joinpath('alf'), 'probes') spikes = {} clusters = {} for i, _ in enumerate(probes['description']): probe_path = session_path.joinpath('alf', probes['description'][i]['label']) cluster = alf.io.load_object(probe_path, object='clusters') spike = alf.io.load_object(probe_path, object='spikes') label = probes['description'][i]['label'] clusters[label] = cluster spikes[label] = spike return spikes, clusters
idx_rf = idx_rfs[0] stim_ts[idx_rf], stim_datas[idx_rf] = interpolate_rf_mapping_stimulus( frame_ttl_signal, stim_ts[idx_rf], frames, t_bin) if save and incorrect_pulses == 0: print('exporting stimulus information to %s' % os.path.join(session_path, 'alf/')) export_to_alf(session_path, stim_ts, stim_datas, stim_names) elif save and incorrect_pulses != 0: print('did not find expected TTL pulses; not saving extracted signals') if __name__ == '__main__': # example usage from oneibl.one import ONE one = ONE() eid = one.search(subject='ZM_2104', date='2019-09-19', number=1) dtypes = [ 'ephysData.raw.meta', '_spikeglx_sync.channels', '_spikeglx_sync.polarities', '_spikeglx_sync.times', '_iblrig_RFMapStim.raw', '_iblrig_codeFiles.raw', '_iblrig_taskSettings.raw' ] files_paths = one.load(eid[0], dataset_types=dtypes, clobber=False, download_only=True) session_path = get_session_path(files_paths[0]) extract_stimulus_info_to_alf(session_path, save=True)
["NYU-11", "2020-02-21", "probe01", "2020-09-13T11:19:59_petrina.lau"], ["NYU-12", "2020-01-22", "probe00", "2020-09-13T19:53:43_petrina.lau"], ["SWC_014", "2019-12-12", "probe00", "2020-07-27T11:27:16_noam.roth"], ["SWC_038", "2020-08-01", "probe01", "2020-08-31T12:32:05_nate"], ["ibl_witten_14", "2019-12-11", "probe00", "2020-06-14T15:33:45_noam.roth"]] for sess in small_scaling: eid = one.search(subject=sess[0], date=sess[1])[0] probe_label = sess[2] # for eid, probe_label in zip(eid_several, probe_several): trajectory = one.alyx.rest('trajectories', 'list', provenance='Ephys aligned histology track', session=eid, probe=probe_label) subject = trajectory[0]['session']['subject'] date = trajectory[0]['session']['start_time'][0:10] chn_coords = one.load(eid, dataset_types=['channels.localCoordinates'])[0] depths = chn_coords[:, 1] insertion = one.alyx.rest('insertions', 'list', session=eid, name=probe_label) xyz_picks = np.array(insertion[0]['json']['xyz_picks']) / 1e6 alignments = trajectory[0]['json'] def plot_regions(region, label, colour, ax): for reg, col in zip(region, colour): height = np.abs(reg[1] - reg[0]) color = col / 255 ax.bar(x=0.5, height=height, width=1, color=color, bottom=reg[0], edgecolor='w') ax.set_yticks(label[:, 0].astype(int)) ax.set_yticklabels(label[:, 1])
import matplotlib.pyplot as plt import numpy as np import alf.io from brainbox.singlecell import calculate_peths from oneibl.one import ONE one = ONE() eid = one.search(subject='KS004', date=['2019-09-25'], task_protocol='ephysChoiceWorld')[0] datasets = one.load(eid, download_only=True) ses_path = datasets[0].local_path.parent spikes = alf.io.load_object(ses_path, 'spikes') trials = alf.io.load_object(ses_path, '_ibl_trials') peth, bs = calculate_peths(spikes.times, spikes.clusters, [225, 52], trials.goCue_times) plt.plot(peth.tscale, peth.means.T) for m in np.arange(peth.means.shape[0]): plt.fill_between(peth.tscale, peth.means[m, :].T - peth.stds[m, :].T / 20, peth.means[m, :].T + peth.stds[m, :].T / 20, alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', linewidth=4, linestyle='dashdot', antialiased=True)
elif ses_type == 'datajoint': eid = sessions.loc[i, 'session_eid'] elif ses_type == 'sessions': eid = sessions[i]['url'][-36:] # Load in data try: spikes, clusters, channels = bbone.load_spike_sorting_with_channel( eid, aligned=True, one=one) ses_path = one.path_from_eid(eid) if DOWNLOAD_TRIALS: _ = one.load(eid, dataset_types=[ 'trials.stimOn_times', 'trials.probabilityLeft', 'trials.contrastLeft', 'trials.contrastRight', 'trials.feedbackType', 'trials.choice', 'trials.feedback_times' ], download_only=True, clobber=True) trials = alf.io.load_object(join(ses_path, 'alf'), 'trials') except Exception as error_message: print(error_message) continue # Check data integrity if check_trials(trials) is False: continue # Extract session data depending on whether input is a list of sessions or insertions if ses_type == 'insertions':
ss = [] one = ONE() assert len(eids) == len(probes) == len(session_rank) metric_list = [] lab_list = [] for eid, probe, s in zip(eids, probes, session_rank): print(eid) if eid in bad_eids: continue session_path = one.path_from_eid(eid) if not session_path: print(session_path) print("no session path") continue _ = one.load(eid, dataset_types='clusters.metrics', download_only=True) try: _ = alf.io.load_object(session_path.joinpath('alf'), 'probes') except FileNotFoundError: print(session_path.joinpath('alf')) print("no probes") continue probe_path = session_path.joinpath('alf', probe) try: metrics = alf.io.load_object(probe_path, object='clusters.metrics') except FileNotFoundError: print(probe_path) print("one probe missing") continue
from oneibl.one import ONE from plot import gen_figures # if you could also try just adding 'ibllib - brainbox', 'iblscripts - certification', and 'analysis - cert_master_fn' repositories (on those branches) to your python path import sys sys.path.append('~/Documents/code/ibllib') sys.path.append('~/Documents/code/iblscripts') one = ONE() eid = one.search(subject='ZM_2104', date='2019-09-19', number=1)[0] one.load(eid, dataset_types=one.list(), clobber=False, download_only=True) gen_figures(eid, probe='probe_right', cluster_ids_summary=1)
keys, eIDs = (acquisition.Session.proj( 'session_uuid', session_date="date(session_start_time)") & behavior.TrialSet & sessions_with_dates).fetch( 'KEY', 'session_uuid') # update response time of behavior.TrialSet.Trial sessions_with_small_rts = [] unupdated_keys = [] updated_keys = [] for key, eID in tqdm(zip(keys, eIDs), position=0): dtypes = ['trials.stimOn_times', 'trials.response_times'] try: files = one.load(str(eID), dataset_types=dtypes, download_only=True) ses_path = alf.io.get_session_path(files[0]) trials = alf.io.load_object(ses_path.joinpath('alf'), '_ibl_trials') except Exception as e: print(str(eID) + ': ' + str(e)) continue if np.median(trials.response_times - trials.stimOn_times) < 0.01: print('\n Still having small rt:' + str(eID)) unupdated_keys.append(key) sessions_with_small_rts.append(eID) else: for itrial, response_time in enumerate(trials.response_times): if len(behavior.TrialSet.Trial & key
def check_wheel_angle(eid): Plot = True one = ONE() #eid = 'e1023140-50c1-462a-b80e-5e05626d7f0e' # at least 9 bad cases #eid = one.search(subject='ZM_2104', date='2019-09-19', number=1) Dataset_types = [ 'wheel.position', 'wheel.timestamps', 'trials.feedback_times', 'trials.feedbackType' ] D = one.load(eid, dataset_types=Dataset_types, clobber=False, download_only=True) session_path = Path(D[0]).parent wheel = alf.io.load_object(session_path, 'wheel') trials = alf.io.load_object(session_path, 'trials') reward_success = trials['feedback_times'][trials['feedbackType'] == 1] reward_failure = trials['feedback_times'][trials['feedbackType'] == -1] if Plot: plt.plot(wheel['times'], wheel['position'], linestyle='', marker='o') #iblplt.vertical_lines(trials['stimOn_times'], ymin=-100, ymax=100, # color='r', linewidth=0.5, label='stimOn_times') #iblplt.vertical_lines(reward_failure, ymin=-100, ymax=100, # color='b', linewidth=0.5, label='reward_failure') iblplt.vertical_lines(reward_success, ymin=-100, ymax=100, color='k', linewidth=0.5, label='reward_success') plt.legend() plt.xlabel('time [sec]') plt.ylabel('wheel linear displacement [cm]') plt.show() # get fraction of reward deliveries with silent wheel time_delay before the reward time_delay = 0.5 bad_cases1 = [] for rew in reward_success: left = wheel['times'][find_nearest(wheel['times'], rew - time_delay)] right = wheel['times'][find_nearest(wheel['times'], rew)] if left == right: if left < rew - time_delay: bad_cases1.append(rew) if len(bad_cases1) == 0: print('Good news, no impossible case found.') else: print('Bad news, at least one impossible case found.') return len(bad_cases1)
from pathlib import Path import matplotlib.pyplot as plt import numpy as np from oneibl.one import ONE import alf.io as ioalf import ibllib.plots as iblplt from brainbox.processing import bincount2D T_BIN = 0.01 # get the data from flatiron and the current folder one = ONE() eid = one.search(subject='ZM_1150', date='2019-05-07', number=1) D = one.load(eid[0], clobber=False, download_only=True) session_path = Path(D.local_path[0]).parent # load objects spikes = ioalf.load_object(session_path, 'spikes') clusters = ioalf.load_object(session_path, 'clusters') channels = ioalf.load_object(session_path, 'channels') trials = ioalf.load_object(session_path, '_ibl_trials') # compute raster map as a function of cluster number R, times, clusters = bincount2D(spikes['times'], spikes['clusters'], T_BIN) # plot raster map plt.imshow(R, aspect='auto', cmap='binary',
def Viewer(eid, video_type, trial_range, save_video=True, eye_zoom=False): ''' eid: session id, e.g. '3663d82b-f197-4e8b-b299-7b803a155b84' video_type: one of 'left', 'right', 'body' trial_range: first and last trial number of range to be shown, e.g. [5,7] save_video: video is displayed and saved in local folder Example usage to view and save labeled video with wheel angle: Viewer('3663d82b-f197-4e8b-b299-7b803a155b84', 'left', [5,7]) 3D example: 'cb2ad999-a6cb-42ff-bf71-1774c57e5308', [5,7] ''' save_vids_here = '/home/mic/' if save_vids_here[-1] != '/': return 'Last character of save_vids_here must be slash' one = ONE() dataset_types = [ 'camera.times', 'wheel.position', 'wheel.timestamps', 'trials.intervals', 'camera.dlc' ] a = one.list(eid, 'dataset-types') assert all([i in a for i in dataset_types ]), 'For this eid, not all data available' D = one.load(eid, dataset_types=dataset_types, dclass_output=True) alf_path = Path(D.local_path[0]).parent.parent / 'alf' # Download a single video video_data = alf_path.parent / 'raw_video_data' download_raw_video(eid, cameras=[video_type]) video_path = list(video_data.rglob('_iblrig_%sCamera.raw.*' % video_type))[0] print(video_path) # that gives cam time stamps and DLC output (change to alf_path eventually) cam = alf.io.load_object(alf_path, '%sCamera' % video_type, namespace='ibl') # just to read in times for newer data (which has DLC results in pqt format # cam = alf.io.load_object(alf_path, '_ibl_%sCamera' % video_type) # set where to read and save video and get video info cap = cv2.VideoCapture(video_path.as_uri()) length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = cap.get(cv2.CAP_PROP_FPS) size = (int(cap.get(3)), int(cap.get(4))) assert length < len(cam['times']), '#frames > #stamps' print(eid, ', ', video_type, ', fsp:', fps, ', #frames:', length, ', #stamps:', len(cam['times']), ', #frames - #stamps = ', length - len(cam['times'])) # pick trial range for which to display stuff trials = alf.io.load_object(alf_path, 'trials', namespace='ibl') num_trials = len(trials['intervals']) if trial_range[-1] > num_trials - 1: print('There are only %s trials' % num_trials) frame_start = find_nearest(cam['times'], [trials['intervals'][trial_range[0]][0]]) frame_stop = find_nearest(cam['times'], [trials['intervals'][trial_range[-1]][1]]) ''' wheel related stuff ''' wheel = alf.io.load_object(alf_path, 'wheel', namespace='ibl') import brainbox.behavior.wheel as wh try: pos, t = wh.interpolate_position(wheel['timestamps'], wheel['position'], freq=1000) except BaseException: pos, t = wh.interpolate_position(wheel['times'], wheel['position'], freq=1000) w_start = find_nearest(t, trials['intervals'][trial_range[0]][0]) w_stop = find_nearest(t, trials['intervals'][trial_range[-1]][1]) # confine to interval pos_int = pos[w_start:w_stop] t_int = t[w_start:w_stop] # alignment of cam stamps and interpolated wheel stamps wheel_pos = [] kk = 0 for wt in cam['times'][frame_start:frame_stop]: wheel_pos.append(pos_int[find_nearest(t_int, wt)]) kk += 1 if kk % 3000 == 0: print('iteration', kk) ''' DLC related stuff ''' Times = cam['times'][frame_start:frame_stop] del cam['times'] # some exception for inconsisitent data formats try: dlc_name = '_ibl_%sCamera.dlc.pqt' % video_type dlc_path = alf_path / dlc_name cam = pd.read_parquet(dlc_path, engine="fastparquet") print('it is pqt') except BaseException: raw_vid_path = alf_path.parent / 'raw_video_data' cam = alf.io.load_object(raw_vid_path, '%sCamera' % video_type, namespace='ibl') points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) if len(points) == 1: cam = cam['dlc'] points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) if video_type != 'body': d = list(points) d.remove('tube_top') d.remove('tube_bottom') points = np.array(d) # Set values to nan if likelyhood is too low # for pqt: .to_numpy() XYs = {} for point in points: x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_x']) x = x.filled(np.nan) y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_y']) y = y.filled(np.nan) XYs[point] = np.array( [x[frame_start:frame_stop], y[frame_start:frame_stop]]) # Just for 3D testing # return XYs # Zoom at eye if eye_zoom: pivot = np.nanmean(XYs['pupil_top_r'], axis=1) x0 = int(pivot[0]) - 33 x1 = int(pivot[0]) + 33 y0 = int(pivot[1]) - 28 y1 = int(pivot[1]) + 38 size = (66, 66) dot_s = 1 # [px] for painting DLC dots else: x0 = 0 x1 = size[0] y0 = 0 y1 = size[1] if video_type == 'left': dot_s = 10 # [px] for painting DLC dots else: dot_s = 5 if save_video: loc = save_vids_here + '%s_trials_%s_%s_%s.mp4' % ( eid, trial_range[0], trial_range[-1], video_type) out = cv2.VideoWriter(loc, cv2.VideoWriter_fourcc(*'mp4v'), fps, size) # put , 0 if grey scale # writing stuff on frames font = cv2.FONT_HERSHEY_SIMPLEX if video_type == 'left': bottomLeftCornerOfText = (20, 1000) fontScale = 4 else: bottomLeftCornerOfText = (10, 500) fontScale = 2 lineType = 2 # assign a color to each DLC point (now: all points red) cmap = matplotlib.cm.get_cmap('Spectral') CR = np.arange(len(points)) / len(points) block = np.ones((2 * dot_s, 2 * dot_s, 3)) # set start frame cap.set(1, frame_start) k = 0 while (cap.isOpened()): ret, frame = cap.read() gray = frame # print wheel angle fontColor = (255, 255, 255) Angle = round(wheel_pos[k], 2) Time = round(Times[k], 3) cv2.putText(gray, 'Wheel angle: ' + str(Angle), bottomLeftCornerOfText, font, fontScale / 2, fontColor, lineType) a, b = bottomLeftCornerOfText bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b) cv2.putText(gray, ' time: ' + str(Time), bottomLeftCornerOfText0, font, fontScale / 2, fontColor, lineType) # print DLC dots ll = 0 for point in points: # Put point color legend fontColor = (np.array([cmap(CR[ll])]) * 255)[0][:3] a, b = bottomLeftCornerOfText if video_type == 'right': bottomLeftCornerOfText2 = (a, a * 2 * (1 + ll)) else: bottomLeftCornerOfText2 = (b, a * 2 * (1 + ll)) fontScale2 = fontScale / 4 cv2.putText(gray, point, bottomLeftCornerOfText2, font, fontScale2, fontColor, lineType) X0 = XYs[point][0][k] Y0 = XYs[point][1][k] # transform for opencv? X = Y0 Y = X0 if not np.isnan(X) and not np.isnan(Y): col = (np.array([cmap(CR[ll])]) * 255)[0][:3] # col = np.array([0, 0, 255]) # all points red X = X.astype(int) Y = Y.astype(int) gray[X - dot_s:X + dot_s, Y - dot_s:Y + dot_s] = block * col ll += 1 gray = gray[y0:y1, x0:x1] if save_video: out.write(gray) cv2.imshow('frame', gray) cv2.waitKey(1) k += 1 if k == (frame_stop - frame_start) - 1: break if save_video: out.release() cap.release() cv2.destroyAllWindows()
# START BIG OVERVIEW PLOT # ============================================= # for lidx, lab in enumerate(users): print(lab) # LOAD ALL AMBIENT SENSOR DATA FOR THIS LAB # see https://github.com/int-brain-lab/ibllib/issues/51#event-2148508648 eids, details = one.search(dataset_types='_iblrig_ambientSensorData.raw', lab=lab, details=True) for ix, eid in enumerate(eids): asd = one.load(eid, dataset_types=['_iblrig_ambientSensorData.raw']) ambient_tmp = pd.DataFrame(asd[0]) if ambient_tmp.empty: continue # HACK: wait for Nicco to return values, rather than wrapping in dict ambient_tmp['Temperature_C'] = ambient_tmp['Temperature_C'].apply( pd.Series) ambient_tmp['RelativeHumidity'] = ambient_tmp[ 'RelativeHumidity'].apply(pd.Series) ambient_tmp['AirPressure_mb'] = ambient_tmp['AirPressure_mb'].apply( pd.Series) # take values at the beginning and end of session ambient_summ = ambient_tmp.iloc[[2, -2]].reset_index()
"probe00", "probe01", "probe00", "probe01", "probe00", "probe01", "probe00", "probe00", "probe00", "probe00", "probe00", "probe00", "probe00" ] one = ONE() assert len(eids) == len(probes) metrics = {} for _, metric_name in metric_funcs: metrics[metric_name] = [] for i, (eid, probe) in enumerate(zip(eids, probes)): print(eid) if eid in bad_eids: continue print("{} from {}".format(i, len(eids))) print(one.list(eid, 'subjects')) coords = one.load(eid, dataset_types=['probes.trajectory']) for c in coords[0]: if c['label'] == probe: print("{}, x: {}, y: {}, z: {}".format(c['label'], c['x'], c['y'], c['z'])) continue spikes, _ = load_spike_sorting(eid, one=one) spikes = spikes[0] if spikes[probe]['times'] is None: print('empty times skip') continue fr = calc_fr(spikes[probe]['times'], spikes[probe]['clusters']) labs.append(one.list(eid, 'labs'))
del spikes, clusters, clusters_brain, channels # Delete for the purpose of the example # --------------------------------------------- # 3. I don't want to connect to ONE and I already know my session path session_path = one.path_from_eid(eid) # replace by your local path spikes, clusters = bbone.load_spike_sorting(session_path, one=one) # TODO offline loading of channel locations ? Probably by caching the queries. # ---------------- WIP --------------------- # TODO one.load_object(): return dict of bunch # --- Download spikes data # 1. either a specific subset of dataset types via the one command # 2. either the whole spikes object via the one ''' # Option 1 -- Download only subset of dataset in spike object dataset_types = ['spikes.times', 'spikes.clusters'] one.load(eid, dataset_types=dataset_types) # Option 2 -- Download and load into memory the whole spikes object spks_b1 = one.load_object(eid, 'spikes') # TODO OUTPUT DOES NOT WORK for multiple probes, which probe returned unknown # TODO return dict of bunch # --- Get single probe directory filename either by # 1. getting probe description in alf # 2. using alyx rest end point
BIN_SIZE = 0.025 # seconds SMOOTH_SIZE = 0.025 # seconds; standard deviation of gaussian kernel PCA_DIMS = 20 CCA_DIMS = PCA_DIMS N_SPLITS = 5 RNG_SEED = 0 # get the data from flatiron subject = 'KS005' date = '2019-08-30' number = 1 one = ONE() eid = one.search(subject=subject, date=date, number=number) D = one.load(eid[0], download_only=True) session_path = Path(D.local_path[0]).parent spikes = ioalf.load_object(session_path, 'spikes') clusters = ioalf.load_object(session_path, 'clusters') # channels = ioalf.load_object(session_path, 'channels') trials = ioalf.load_object(session_path, 'trials') # bin spikes and get trial IDs associated with them binned_spikes, binned_trialIDs, _ = bin_spikes_trials(spikes, trials, bin_size=0.01) # define areas brain_areas = np.unique(clusters.brainAcronyms) brain_areas = brain_areas[1:4] # [take subset for testing]
one = ONE() # Find sessions dataset_types = ['spikes.times', 'spikes.amps', 'spikes.depths'] # eids = one.search(dataset_types=dataset_types, # project='ibl_neuropixel_brainwide_01', # task_protocol='_iblrig_tasks_ephysChoiceWorld') # # eid = eids[0] # Test with little drift: '7cdb71fb-928d-4eea-988f-0b655081f21c' eid = '89f0d6ff-69f4-45bc-b89e-72868abb042a' # Test with huge drift # Get dataset spike_times, spike_amps, spike_depths = \ one.load(eid, dataset_types=dataset_types) drift = estimate_drift(spike_times, spike_amps, spike_depths, display=False) # PLOT # Tight layout fig3 = plt.figure(constrained_layout=True) gs = fig3.add_gridspec(3, 3) f3_ax0 = fig3.add_subplot(gs[0, :]) f3_ax0.plot(drift) f3_ax1 = fig3.add_subplot(gs[1:, :]) bbplot.driftmap(spike_times, spike_depths, ax=f3_ax1, plot_style='bincount') f3_ax0.set_xlim(f3_ax1.get_xlim())
# -*- coding: utf-8 -*- """ Created on Mon Oct 7 12:18:30 2019 @author: guido """ from pathlib import Path import alf.io from oneibl.one import ONE # Query sessions with available DLC data using ONE one = ONE() dtypes = ['odsgratings.times.00'] eids = one.search(dataset_types=dtypes) # Loop over sessions for i, eid in enumerate(eids): d = one.load(eid, dataset_types=dtypes, download_only=True, dclass_output=True) """ , download_only=True, dclass_output=True) ses_path = Path(d.local_path[0]).parent segments = alf.io.load_object(ses_path, '_ibl_leftCamera', short_keys=True) dlc, timestamps = one.load(eid, dataset_types=dtypes) """
def load_trials_df(eid, one=None, maxlen=None, t_before=0., t_after=0., ret_wheel=False, ret_abswheel=False, wheel_binsize=0.02): """ Generate a pandas dataframe of per-trial timing information about a given session. Each row in the frame will correspond to a single trial, with timing values indicating timing session-wide (i.e. time in seconds since session start). Can optionally return a resampled wheel velocity trace of either the signed or absolute wheel velocity. The resulting dataframe will have a new set of columns, trial_start and trial_end, which define via t_before and t_after the span of time assigned to a given trial. (useful for bb.modeling.glm) Parameters ---------- eid : str Session UUID string to pass to ONE one : oneibl.one.OneAlyx, optional one object to use for loading. Will generate internal one if not used, by default None maxlen : float, optional Maximum trial length for inclusion in df. Trials where feedback - response is longer than this value will not be included in the dataframe, by default None t_before : float, optional Time before stimulus onset to include for a given trial, as defined by the trial_start column of the dataframe. If zero, trial_start will be identical to stimOn, by default 0. t_after : float, optional Time after feedback to include in the trail, as defined by the trial_end column of the dataframe. If zero, trial_end will be identical to feedback, by default 0. ret_wheel : bool, optional Whether to return the time-resampled wheel velocity trace, by default False ret_abswheel : bool, optional Whether to return the time-resampled absolute wheel velocity trace, by default False wheel_binsize : float, optional Time bins to resample wheel velocity to, by default 0.02 Returns ------- pandas.DataFrame Dataframe with trial-wise information. Indices are the actual trial order in the original data, preserved even if some trials do not meet the maxlen criterion. As a result will not have a monotonic index. Has special columns trial_start and trial_end which define start and end times via t_before and t_after """ if not one: one = ONE() if ret_wheel and ret_abswheel: raise ValueError('ret_wheel and ret_abswheel cannot both be true.') # Define which datatypes we want to pull out trialstypes = [ 'trials.choice', 'trials.probabilityLeft', 'trials.feedbackType', 'trials.feedback_times', 'trials.contrastLeft', 'trials.contrastRight', 'trials.goCue_times', 'trials.stimOn_times', ] # A quick function to remap probabilities in those sessions where it was not computed correctly def remap_trialp(probs): # Block probabilities in trial data aren't accurate and need to be remapped validvals = np.array([0.2, 0.5, 0.8]) diffs = np.abs(np.array([x - validvals for x in probs])) maps = diffs.argmin(axis=1) return validvals[maps] starttimes = one.load(eid, dataset_types=['trials.stimOn_times'])[0] endtimes = one.load(eid, dataset_types=['trials.feedback_times'])[0] tmp = one.load(eid, dataset_types=trialstypes) if maxlen is not None: with np.errstate(invalid='ignore'): keeptrials = (endtimes - starttimes) <= maxlen else: keeptrials = range(len(starttimes)) trialdata = { x.split('.')[1]: tmp[i][keeptrials] for i, x in enumerate(trialstypes) } trialdata['probabilityLeft'] = remap_trialp(trialdata['probabilityLeft']) trialsdf = pd.DataFrame(trialdata) if maxlen is not None: trialsdf.set_index(np.nonzero(keeptrials)[0], inplace=True) trialsdf['trial_start'] = trialsdf['stimOn_times'] - t_before trialsdf['trial_end'] = trialsdf['feedback_times'] + t_after if not ret_wheel and not ret_abswheel: return trialsdf wheel = one.load_object(eid, 'wheel') whlpos, whlt = wheel.position, wheel.timestamps starttimes = trialsdf['trial_start'] endtimes = trialsdf['trial_end'] wh_endlast = 0 trials = [] for (start, end) in np.vstack((starttimes, endtimes)).T: wh_startind = np.searchsorted(whlt[wh_endlast:], start) + wh_endlast wh_endind = np.searchsorted(whlt[wh_endlast:], end, side='right') + wh_endlast + 4 wh_endlast = wh_endind tr_whlpos = whlpos[wh_startind - 1:wh_endind + 1] tr_whlt = whlt[wh_startind - 1:wh_endind + 1] - start tr_whlt[0] = 0. # Manual previous-value interpolation whlseries = TimeSeries(tr_whlt, tr_whlpos, columns=['whlpos']) whlsync = sync(wheel_binsize, timeseries=whlseries, interp='previous') trialstartind = np.searchsorted(whlsync.times, 0) trialendind = np.ceil((end - start) / wheel_binsize).astype(int) trpos = whlsync.values[trialstartind:trialendind + trialstartind] whlvel = trpos[1:] - trpos[:-1] whlvel = np.insert(whlvel, 0, 0) if np.abs((trialendind - len(whlvel))) > 0: raise IndexError( 'Mismatch between expected length of wheel data and actual.') if ret_wheel: trials.append(whlvel) elif ret_abswheel: trials.append(np.abs(whlvel)) trialsdf['wheel_velocity'] = trials return trialsdf
# Specify subject, date and probe we are interested in subject = 'CSHL049' date = '2020-01-08' sess_no = 1 probe_label = 'probe00' eid = one.search(subject=subject, date=date, number=sess_no)[0] # Specify the dataset types of interest dtypes = ['_iblqc_ephysSpectralDensity.freqs', '_iblqc_ephysSpectralDensity.power', 'channels.rawInd', 'channels.localCoordinates'] # Download the data and get paths to downloaded data _ = one.load(eid, dataset_types=dtypes, download_only=True) ephys_path = one.path_from_eid(eid).joinpath('raw_ephys_data', probe_label) alf_path = one.path_from_eid(eid).joinpath('alf', probe_label) # Index of good recording channels along probe chn_inds = np.load(alf_path.joinpath('channels.rawInd.npy')) # Position of each recording channel along probe chn_pos = np.load(alf_path.joinpath('channels.localCoordinates.npy')) # Get range for y-axis depth_range = [np.min(chn_pos[:, 1]), np.max(chn_pos[:, 1])] # Load in power spectrum data lfp_spectrum = alf.io.load_object(ephys_path, 'ephysSpectralDensityLF', namespace='iblqc') lfp_freq = lfp_spectrum['freqs'] lfp_power = lfp_spectrum['power'][:, chn_inds]