def extract_rfmapping( session_path: str, sync: dict = None, sync_map: dict = None, trfm: np.array = None ) -> Tuple[np.array, np.array]: meta = _load_passive_stim_meta() mkey = ( "VISUAL_STIM_" + {v: k for k, v in meta["VISUAL_STIMULI"].items()}["receptive_field_mapping"] ) if sync is None or sync_map is None: sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) if trfm is None: passivePeriods_df = extract_passive_periods(session_path, sync=sync, sync_map=sync_map) trfm = passivePeriods_df.RFM.values fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=trfm[0], tmax=trfm[1]) fttl = ephys_fpga._clean_frame2ttl(fttl) RF_file = Path().joinpath(session_path, "raw_passive_data", "_iblrig_RFMapStim.raw.bin") passiveRFM_frames, RF_ttl_trace = _reshape_RF(RF_file=RF_file, meta_stim=meta[mkey]) rf_id_up, rf_id_dw, RF_n_ttl_expected = _get_id_raisefall_from_analogttl(RF_ttl_trace) meta[mkey]["ttl_num"] = RF_n_ttl_expected rf_times_on_idx = np.where(np.diff(fttl["times"]) < 1)[0] rf_times_off_idx = rf_times_on_idx + 1 RF_times = fttl["times"][np.sort(np.concatenate([rf_times_on_idx, rf_times_off_idx]))] RF_times_1 = RF_times[0::2] # Interpolate times for RF before outputting dataset passiveRFM_times = _interpolate_rf_mapping_stimulus( idxs_up=rf_id_up, idxs_dn=rf_id_dw, times=RF_times_1, Xq=np.arange(passiveRFM_frames.shape[0]), t_bin=1 / FRAME_FS, ) return passiveRFM_times # _ibl_passiveRFM.times.npy
def extract_task_replay( session_path: str, sync: dict = None, sync_map: dict = None, treplay: np.array = None ) -> Tuple[pd.DataFrame, pd.DataFrame]: if sync is None or sync_map is None: sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) if treplay is None: passivePeriods_df = extract_passive_periods(session_path, sync=sync, sync_map=sync_map) treplay = passivePeriods_df.taskReplay.values fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=treplay[0]) fttl = ephys_fpga._clean_frame2ttl(fttl) passiveGabor_df = _extract_passiveGabor_df(fttl, session_path) bpod = ephys_fpga.get_sync_fronts(sync, sync_map["bpod"], tmin=treplay[0]) passiveValve_intervals = _extract_passiveValve_intervals(bpod) task_version = _load_task_protocol(session_path) audio = ephys_fpga.get_sync_fronts(sync, sync_map["audio"], tmin=treplay[0]) passiveTone_intervals, passiveNoise_intervals = _extract_passiveAudio_intervals(audio, task_version) passiveStims_df = np.concatenate( [passiveValve_intervals, passiveTone_intervals, passiveNoise_intervals], axis=1 ) columns = ["valveOn", "valveOff", "toneOn", "toneOff", "noiseOn", "noiseOff"] passiveStims_df = pd.DataFrame(passiveStims_df, columns=columns) return ( passiveGabor_df, passiveStims_df, ) # _ibl_passiveGabor.table.csv, _ibl_passiveStims.times_table.csv
def compare_wheel_fpga_behaviour(session_path, display=DISPLAY): alf_path = session_path.joinpath('alf') shutil.rmtree(alf_path, ignore_errors=True) sync, chmap = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) fpga_t, fpga_pos = ephys_fpga.extract_wheel_sync(sync, chmap=chmap) bpod_t, bpod_pos = training_wheel.get_wheel_position(session_path, display=display) data, _ = ephys_fpga.extract_all(session_path) bpod2fpga = scipy.interpolate.interp1d(data['intervals_bpod'][:, 0], data['intervals'][:, 0], fill_value="extrapolate") # resample both traces to the same rate and compute correlation coeff bpod_t = bpod2fpga(bpod_t) tmin = max([np.min(fpga_t), np.min(bpod_t)]) tmax = min([np.max(fpga_t), np.max(bpod_t)]) wheel = {'tscale': np.arange(tmin, tmax, 0.01)} wheel['fpga'] = scipy.interpolate.interp1d(fpga_t, fpga_pos)(wheel['tscale']) wheel['bpod'] = scipy.interpolate.interp1d(bpod_t, bpod_pos)(wheel['tscale']) if display: plt.figure() plt.plot(fpga_t - bpod2fpga(0), fpga_pos, '*') plt.plot(bpod_t - bpod2fpga(0), bpod_pos, '.') raw_wheel = { 'fpga_t': fpga_t, 'fpga_pos': fpga_pos, 'bpod_t': bpod_t, 'bpod_pos': bpod_pos } return raw_wheel, wheel
def test_basic_extraction(self, mock_vc, mock_aux): """ Tests extraction of a session without pin state and GPIO files, etc. :param mock_vc: A mock OpenCV VideoCapture object for stubbing the video length :param mock_aux: A mock object for stubbing the load_embedded_frame_data function :return: """ side = 'left' mock_vc().get.return_value = self.n_frames[side] mock_vc().isOpened.return_value = True # Act as though the embedded frame data files don't exist mock_aux.return_value = (None, [None] * 4) ext = camio.CameraTimestampsFPGA(side, self.session_path) sync, chmap = get_main_probe_sync(self.session_path) ts, _ = ext.extract(save=False, sync=sync, chmap=chmap) # Verify returns unaltered FPGA times. This behaviour will change in the future self.assertEqual(ts.size, 255505) expected = np.array([0.01363197, 0.03036363, 0.04709529, 0.06382695, 0.08055861]) np.testing.assert_array_almost_equal(ts[:5], expected) # Now test fallback when GPIO or audio data are unusable (i.e. raise an assertion) n = 888 # Number of GPIOs (number not important) gpio = {'indices': np.sort(np.random.choice(np.arange(self.n_frames[side]), n)), 'polarities': np.insert(np.random.choice([-1, 1], n - 1), 0, -1)} mock_aux.return_value = (np.arange(self.n_frames[side]), [None, None, None, gpio]) with self.assertLogs(logging.getLogger('ibllib'), logging.CRITICAL): ts, _ = ext.extract(save=False, sync=sync, chmap=chmap) # Should fallback to basic extraction np.testing.assert_array_almost_equal(ts[:5], expected)
def _get_passive_spacers(session_path, sync=None, sync_map=None): """ load and get spacer information, do corr to find spacer timestamps returns t_passive_starts, t_starts, t_ends """ if sync is None or sync_map is None: sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) meta = _load_passive_stim_meta() # t_end_ephys = passive.ephysCW_end(session_path=session_path) fttl = ephys_fpga._get_sync_fronts(sync, sync_map["frame2ttl"], tmin=None) spacer_template = ( np.array(meta["VISUAL_STIM_0"]["ttl_frame_nums"], dtype=np.float32) / FRAME_FS) jitter = 3 / FRAME_FS # allow for 3 screen refresh as jitter t_quiet = meta["VISUAL_STIM_0"]["delay_around"] spacer_times, _ = _get_spacer_times(spacer_template=spacer_template, jitter=jitter, ttl_signal=fttl["times"], t_quiet=t_quiet) # Check correct number of spacers found n_exp_spacer = np.sum(np.array( meta["STIM_ORDER"]) == 0) # Hardcoded 0 for spacer if n_exp_spacer != np.size(spacer_times) / 2: raise ValueError(f"The number of expected spacer ({n_exp_spacer}) " f"is different than the one found on the raw " f"trace ({np.size(spacer_times)/2})") spacer_times = np.r_[spacer_times.flatten(), sync["times"][-1]] return spacer_times[0], spacer_times[1::2], spacer_times[2::2]
def load_raw_data(self): """ Loads the TTLs, raw task data and task settings :return: """ self.log.info(f"Loading raw data from {self.session_path}") self.type = self.type or get_session_extractor_type(self.session_path) self.settings, self.raw_data = raw.load_bpod(self.session_path) # Fetch the TTLs for the photodiode and audio if self.type != 'ephys' or self.bpod_only is True: # Extract from Bpod self.frame_ttls, self.audio_ttls = raw.load_bpod_fronts( self.session_path, data=self.raw_data) else: # Extract from FPGA sync, chmap = ephys_fpga.get_main_probe_sync(self.session_path) def channel_events(name): """Fetches the polarities and times for a given channel""" keys = ('polarities', 'times') mask = sync['channels'] == chmap[name] return dict(zip(keys, (sync[k][mask] for k in keys))) ttls = [ channel_events(ch) for ch in ('frame2ttl', 'audio', 'bpod') ] self.frame_ttls, self.audio_ttls, self.bpod_ttls = ttls
def _qc_from_path(sess_path, display=True): WHEEL = False sess_path = Path(sess_path) temp_alf_folder = sess_path.joinpath('fpga_test', 'alf') temp_alf_folder.mkdir(parents=True, exist_ok=True) raw_trials = raw_data_loaders.load_data(sess_path) tmax = raw_trials[-1]['behavior_data']['States timestamps']['exit_state'][ 0][-1] + 60 sync, chmap = ephys_fpga.get_main_probe_sync(sess_path, bin_exists=False) _ = ephys_fpga.extract_all(sess_path, output_path=temp_alf_folder, save=True) # check that the output is complete fpga_trials = ephys_fpga.extract_behaviour_sync( sync, output_path=temp_alf_folder, tmax=tmax, chmap=chmap, save=True, display=display) # align with the bpod bpod2fpga = ephys_fpga.align_with_bpod(temp_alf_folder.parent) alf_trials = alf.io.load_object(temp_alf_folder, 'trials') shutil.rmtree(temp_alf_folder) # do the QC qcs, qct = qc_fpga_task(fpga_trials, alf_trials) # do the wheel part if WHEEL: bpod_wheel = training_wheel.get_wheel_data(sess_path, save=False) fpga_wheel = ephys_fpga.extract_wheel_sync(sync, chmap=chmap, save=False) if display: import matplotlib.pyplot as plt t0 = max(np.min(bpod2fpga(bpod_wheel['re_ts'])), np.min(fpga_wheel['re_ts'])) dy = np.interp( t0, fpga_wheel['re_ts'], fpga_wheel['re_pos']) - np.interp( t0, bpod2fpga(bpod_wheel['re_ts']), bpod_wheel['re_pos']) fix, axes = plt.subplots(nrows=2, sharex='all', sharey='all') # axes[0].plot(t, pos), axes[0].title.set_text('Extracted') axes[0].plot(bpod2fpga(bpod_wheel['re_ts']), bpod_wheel['re_pos'] + dy) axes[0].plot(fpga_wheel['re_ts'], fpga_wheel['re_pos']) axes[0].title.set_text('FPGA') axes[1].plot(bpod2fpga(bpod_wheel['re_ts']), bpod_wheel['re_pos'] + dy) axes[1].title.set_text('Bpod') return alf.io.dataframe({**fpga_trials, **alf_trials, **qct})
def extract_replay_debug( session_path: str, sync: dict = None, sync_map: dict = None, treplay: np.array = None, ax: plt.axes = None, ) -> Tuple[pd.DataFrame, pd.DataFrame]: # Load sessions sync channels, map if sync is None or sync_map is None: sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) if treplay is None: passivePeriods_df = extract_passive_periods(session_path, sync=sync, sync_map=sync_map) treplay = passivePeriods_df.taskReplay.values if ax is None: f, ax = plt.subplots(1, 1) f = ax.figure f.suptitle("/".join(str(session_path).split("/")[-5:])) plot_sync_channels(sync=sync, sync_map=sync_map, ax=ax) passivePeriods_df = extract_passive_periods(session_path, sync=sync, sync_map=sync_map) treplay = passivePeriods_df.taskReplay.values plot_passive_periods(passivePeriods_df, ax=ax) fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=treplay[0]) passiveGabor_df = _extract_passiveGabor_df(fttl, session_path) plot_gabor_times(passiveGabor_df, ax=ax) bpod = ephys_fpga.get_sync_fronts(sync, sync_map["bpod"], tmin=treplay[0]) passiveValve_intervals = _extract_passiveValve_intervals(bpod) plot_valve_times(passiveValve_intervals, ax=ax) task_version = _load_task_protocol(session_path) audio = ephys_fpga.get_sync_fronts(sync, sync_map["audio"], tmin=treplay[0]) passiveTone_intervals, passiveNoise_intervals = _extract_passiveAudio_intervals(audio, task_version) plot_audio_times(passiveTone_intervals, passiveNoise_intervals, ax=ax) passiveStims_df = np.concatenate( [passiveValve_intervals, passiveTone_intervals, passiveNoise_intervals], axis=1 ) columns = ["valveOn", "valveOff", "toneOn", "toneOff", "noiseOn", "noiseOff"] passiveStims_df = pd.DataFrame(passiveStims_df, columns=columns) return ( passiveGabor_df, passiveStims_df, ) # _ibl_passiveGabor.table.csv, _ibl_passiveStims.table.csv
def _extract( self, sync: dict = None, sync_map: dict = None, plot: bool = False, **kwargs ) -> tuple: if sync is None or sync_map is None: sync, sync_map = ephys_fpga.get_main_probe_sync(self.session_path, bin_exists=False) # Passive periods passivePeriods_df = extract_passive_periods( self.session_path, sync=sync, sync_map=sync_map ) trfm = passivePeriods_df.RFM.values treplay = passivePeriods_df.taskReplay.values try: # RFMapping passiveRFM_times = extract_rfmapping( self.session_path, sync=sync, sync_map=sync_map, trfm=trfm ) except Exception as e: log.error(f"Failed to extract RFMapping datasets: {e}") passiveRFM_times = None try: (passiveGabor_df, passiveStims_df,) = extract_task_replay( self.session_path, sync=sync, sync_map=sync_map, treplay=treplay ) except Exception as e: log.error(f"Failed to extract task replay stimuli: {e}") passiveGabor_df, passiveStims_df = (None, None) if plot: f, ax = plt.subplots(1, 1) f.suptitle("/".join(str(self.session_path).split("/")[-5:])) plot_sync_channels(sync=sync, sync_map=sync_map, ax=ax) plot_passive_periods(passivePeriods_df, ax=ax) plot_rfmapping(passiveRFM_times, ax=ax) plot_gabor_times(passiveGabor_df, ax=ax) plot_stims_times(passiveStims_df, ax=ax) plt.show() data = ( passivePeriods_df, # _ibl_passivePeriods.intervalsTable.csv passiveRFM_times, # _ibl_passiveRFM.times.npy passiveGabor_df, # _ibl_passiveGabor.table.csv, passiveStims_df # _ibl_passiveStims.table.csv ) # Set save names to None if data not extracted - these will not be saved or registered self.save_names = tuple(None if y is None else x for x, y in zip(self.save_names, data)) return data
def test_extraction(self, mock_vc): """ Mock the VideoCapture class of cv2 so that we can control the number of frames :param mock_vc: :return: """ side = 'left' n_frames = self.n_frames[side] # Number of frames in video mock_vc().get.return_value = n_frames mock_vc().isOpened.return_value = True # out = camio.extract_all(session_path, save=False) ext = camio.CameraTimestampsFPGA(side, self.session_path) sync, chmap = get_main_probe_sync(self.session_path) ts, _ = ext.extract(save=False, sync=sync, chmap=chmap) self.assertEqual(ts.size, n_frames, 'unexpected size') self.assertTrue(not np.isnan(ts).any(), 'nans in timestamps') self.assertTrue(np.all(np.diff(ts) > 0), 'timestamps not strictly increasing') expected = np.array([197.76558813, 197.79905145, 197.81578311, 197.83251477, 197.84924643, 197.86597809, 197.88270975, 197.89944141, 197.91617307, 197.93290473]) np.testing.assert_array_almost_equal(ts[:10], expected) # Test extraction parameters ts, _ = ext.extract(save=False, sync=sync, chmap=chmap, display=True, extrapolate_missing=False) self.assertEqual(ts.size, n_frames, 'unexpected size') self.assertEqual(np.isnan(ts).sum(), 499, 'unexpected number of nans') # Verify plots figs = [plt.figure(i) for i in plt.get_fignums()] lines = figs[0].axes[0].lines actual = {ln._label: len(ln._xy) for ln in lines} expected = { 'audio TTLs': 3400, 'GPIO': 3394, 'cam times': 255617, 'assigned audio TTL': 3392 } self.assertEqual(actual, expected, 'unexpected plot') lines = figs[1].axes[0].lines actual = {ln._label: len(ln._xy) for ln in lines} expected = {'GPIO': 255617, 'FPGA timestamps': 255617, 'audio TTL': 5088} self.assertEqual(actual, expected, 'unexpected plot')
def load_ttl_pulses(session_path): """ Extract ttl pulses from sync signals :param session_path: absolute path of a session, i.e. /mnt/data/Subjects/ZM_1887/2019-07-10/001 :type session_path: str :return: ttl pulse times :rtype: np.ndarray """ from ibllib.io.extractors.ephys_fpga import get_main_probe_sync sync, sync_chmap = get_main_probe_sync(session_path, bin_exists=False) fr2ttl_ch = sync_chmap['frame2ttl'] # find times of when ttl polarity changes on fr2ttl channel sync_pol_ = sync['polarities'][sync['channels'] == fr2ttl_ch] sync_times_ = sync['times'][sync['channels'] == fr2ttl_ch] return sync_pol_, sync_times_
def extract_passive_periods( session_path: str, sync: dict = None, sync_map: dict = None ) -> pd.DataFrame: if sync is None or sync_map is None: sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) t_start_passive, t_starts, t_ends = _get_passive_spacers( session_path, sync=sync, sync_map=sync_map ) t_starts_col = np.insert(t_starts, 0, t_start_passive) t_ends_col = np.insert(t_ends, 0, t_ends[-1]) # tpassive_protocol = [t_start_passive, t_ends[-1]] # tspontaneous = [t_starts[0], t_ends[0]] # trfm = [t_starts[1], t_ends[1]] # treplay = [t_starts[2], t_ends[2]] passivePeriods_df = pd.DataFrame( [t_starts_col, t_ends_col], index=["start", "stop"], columns=["passiveProtocol", "spontaneousActivity", "RFM", "taskReplay"], ) return passivePeriods_df # _ibl_passivePeriods.intervalsTable.csv
def extract_all(session_path, session_type=None, save=True, **kwargs): """ For the IBL ephys task, reads ephys binary file and extract: - video time stamps :param session_path: '/path/to/subject/yyyy-mm-dd/001' :param session_type: the session type to extract, i.e. 'ephys', 'training' or 'biased'. If None the session type is inferred from the settings file. :param save: Bool, defaults to False :param kwargs: parameters to pass to the extractor :return: outputs, files """ if session_type is None: session_type = get_session_extractor_type(session_path) if not session_type or session_type not in _get_task_types_json_config( ).values(): raise ValueError( f"Session type {session_type} has no matching extractor") elif 'ephys' in session_type: # assume ephys == FPGA labels = assert_valid_label( kwargs.pop('labels', ('left', 'right', 'body'))) labels = (labels, ) if isinstance(labels, str) else labels # Ensure list/tuple extractor = [partial(CameraTimestampsFPGA, label) for label in labels] if 'sync' not in kwargs: kwargs['sync'], kwargs['chmap'] = \ get_main_probe_sync(session_path, bin_exists=kwargs.pop('bin_exists', False)) else: # assume Bpod otherwise assert kwargs.pop('labels', 'left'), 'only left camera is currently supported' extractor = CameraTimestampsBpod outputs, files = run_extractor_classes(extractor, session_path=session_path, save=save, **kwargs) return outputs, files
def validate_ttl_test(ses_path, display=False): """ For a mock session on the Ephys Choice world task, check the sync channels for all device properly connected and perform a synchronization if dual probes to check that all channels are recorded properly :param ses_path: session path :param display: show the probe synchronization plot if several probes :return: True if tests pass, errors otherwise """ def _single_test(assertion, str_ok, str_ko): if assertion: _logger.info(str_ok) return True else: _logger.error(str_ko) return False EXPECTED_RATES_HZ = {'left_camera': 60, 'right_camera': 150, 'body_camera': 30} SYNC_RATE_HZ = 1 MIN_TRIALS_NB = 6 ok = True ses_path = Path(ses_path) if not ses_path.exists(): return False # get the synchronization fronts (from the raw binary if necessary) ephys_fpga.extract_sync(session_path=ses_path, overwrite=False) rawsync, sync_map = ephys_fpga.get_main_probe_sync(ses_path) last_time = rawsync['times'][-1] # get upgoing fronts for each sync = Bunch({}) for k in sync_map: fronts = ephys_fpga._get_sync_fronts(rawsync, sync_map[k]) sync[k] = fronts['times'][fronts['polarities'] == 1] wheel = ephys_fpga.extract_wheel_sync(rawsync, chmap=sync_map) frame_rates = {'right_camera': np.round(1 / np.median(np.diff(sync.right_camera))), 'left_camera': np.round(1 / np.median(np.diff(sync.left_camera))), 'body_camera': np.round(1 / np.median(np.diff(sync.body_camera)))} # check the camera frame rates for lab in frame_rates: expect = EXPECTED_RATES_HZ[lab] ok &= _single_test(assertion=abs((1 - frame_rates[lab] / expect)) < 0.1, str_ok=f'PASS: {lab} frame rate: {frame_rates[lab]} = {expect} Hz', str_ko=f'FAILED: {lab} frame rate: {frame_rates[lab]} != {expect} Hz') # check that the wheel has a minimum rate of activity on both channels re_test = abs(1 - sync.rotary_encoder_1.size / sync.rotary_encoder_0.size) < 0.1 re_test &= len(wheel[1]) / last_time > 5 ok &= _single_test(assertion=re_test, str_ok="PASS: Rotary encoder", str_ko="FAILED: Rotary encoder") # check that the frame 2 ttls has a minimum rate of activity ok &= _single_test(assertion=len(sync.frame2ttl) / last_time > 0.2, str_ok="PASS: Frame2TTL", str_ko="FAILED: Frame2TTL") # the audio has to have at least one event per trial ok &= _single_test(assertion=len(sync.bpod) > len(sync.audio) > MIN_TRIALS_NB, str_ok="PASS: audio", str_ko="FAILED: audio") # the bpod has to have at least twice the amount of min trial pulses ok &= _single_test(assertion=len(sync.bpod) > MIN_TRIALS_NB * 2, str_ok="PASS: Bpod", str_ko="FAILED: Bpod") try: # note: tried to depend as little as possible on the extraction code but for the valve... behaviour = ephys_fpga.extract_behaviour_sync(rawsync, chmap=sync_map) res = behaviour.valveOpen_times.size > 1 except AssertionError: res = False # check that the reward valve is actionned at least once ok &= _single_test(assertion=res, str_ok="PASS: Valve open", str_ko="FAILED: Valve open not detected") _logger.info('ALL CHECKS PASSED !') # the imec sync is for 3B Probes only if sync.get('imec_sync') is not None: ok &= _single_test(assertion=np.all(1 - SYNC_RATE_HZ * np.diff(sync.imec_sync) < 0.1), str_ok="PASS: imec sync", str_ko="FAILED: imec sync") # second step is to test that we can make the sync. Assertions are whithin the synch code if sync.get('imec_sync') is not None: sync_result, _ = sync_probes.version3B(ses_path, display=display) else: sync_result, _ = sync_probes.version3A(ses_path, display=display) ok &= _single_test(assertion=sync_result, str_ok="PASS: synchronisation", str_ko="FAILED: probe synchronizations threshold exceeded") if not ok: raise ValueError('FAILED TTL test') return ok
def _task_extraction_assertions(self, session_path): alf_path = session_path.joinpath('alf') shutil.rmtree(alf_path, ignore_errors=True) # try once without the sync pulses trials, out_files = ephys_fpga.FpgaTrials(session_path).extract( save=False) # then extract for real sync, chmap = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) trials, out_files = ephys_fpga.FpgaTrials(session_path).extract( save=True, sync=sync, chmap=chmap) # check that the output is complete for f in BPOD_FILES: self.assertTrue(alf_path.joinpath(f).exists()) # check that the output is complete for f in FPGA_FILES: self.assertTrue(alf_path.joinpath(f).exists()) # check dimensions after alf load alf_trials = alf.io.load_object(alf_path, 'trials') self.assertTrue(alf.io.check_dimensions(alf_trials) == 0) # go deeper and check the internal fpga trials structure consistency fpga_trials = ephys_fpga.extract_behaviour_sync(sync, chmap) # check dimensions self.assertEqual(alf.io.check_dimensions(fpga_trials), 0) # check that the stimOn < stimFreeze < stimOff self.assertTrue( np.all(fpga_trials['stimOn_times'][:-1] < fpga_trials['stimOff_times'][:-1])) self.assertTrue( np.all(fpga_trials['stimFreeze_times'][:-1] < fpga_trials['stimOff_times'][:-1])) # a trial is either an error-nogo or a reward self.assertTrue( np.all( np.isnan(fpga_trials['valveOpen_times'][:-1] * fpga_trials['errorCue_times'][:-1]))) self.assertTrue( np.all( np.logical_xor(np.isnan(fpga_trials['valveOpen_times'][:-1]), np.isnan(fpga_trials['errorCue_times'][:-1])))) # do the task qc # tqc_ephys.extractor.settings['PYBPOD_PROTOCOL'] from ibllib.qc.task_extractors import TaskQCExtractor ex = TaskQCExtractor(session_path, lazy=True, one=None, bpod_only=False) ex.data = fpga_trials ex.extract_data() from ibllib.qc.task_metrics import TaskQC # '/mnt/s0/Data/IntegrationTests/ephys/ephys_choice_world_task/CSP004/2019-11-27/001' tqc_ephys = TaskQC(session_path) tqc_ephys.extractor = ex _, res_ephys = tqc_ephys.run(bpod_only=False, download_data=False) tqc_bpod = TaskQC(session_path) _, res_bpod = tqc_bpod.run(bpod_only=True, download_data=False) for k in res_ephys: if k == "_task_response_feedback_delays": continue assert (np.abs(res_bpod[k] - res_ephys[k]) < .2) shutil.rmtree(alf_path, ignore_errors=True)
session_path = alf.io.get_session_path(local_paths[0]) # load session fixtures settings = rawio.load_settings(session_path) ses_nb = settings['SESSION_ORDER'][settings['SESSION_IDX']] path_fixtures = Path(ephys_fpga.__file__).parent.joinpath('ephys_sessions') fixture = {'pcs': np.load(path_fixtures.joinpath(f'session_{ses_nb}_passive_pcs.npy')), 'delays': np.load(path_fixtures.joinpath(f'session_{ses_nb}_passive_stimDelays.npy')), 'ids': np.load(path_fixtures.joinpath(f'session_{ses_nb}_passive_stimIDs.npy'))} # load general metadata with open(path_fixtures.joinpath('passive_stim_meta.json'), 'r') as f: meta = json.load(f) t_end_ephys = passive.ephysCW_end(session_path=session_path) # load stimulus sequence sync, sync_map = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) fpga_sync = ephys_fpga._get_sync_fronts(sync, sync_map['frame2ttl']) fttl = ephys_fpga._get_sync_fronts(sync, sync_map['frame2ttl'], tmin=t_end_ephys) def get_spacers(): """ load and get spacer information, do corr to find spacer timestamps returns t_passive_starts, t_starts, t_ends """ spacer_template = np.array(meta['VISUAL_STIM_0']['ttl_frame_nums'], dtype=np.float32) / FRAME_FS jitter = 3 / FRAME_FS # allow for 3 screen refresh as jitter t_quiet = meta['VISUAL_STIM_0']['delay_around'] spacer_times, _ = passive.get_spacer_times(spacer_template=spacer_template, jitter=jitter, ttl_signal=fttl['times'], t_quiet=t_quiet)
def load_data(self, download_data: bool = None, extract_times: bool = False, load_video: bool = True) -> None: """Extract the data from raw data files Extracts all the required task data from the raw data files. Data keys: - count (int array): the sequential frame number (n, n+1, n+2...) - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame - audio (float array): timestamps of audio TTL fronts - fpga_times (float array): timestamps of camera TTLs recorded by FPGA - timestamps (float array): extracted video timestamps (the camera.times ALF) - bonsai_times (datetime array): system timestamps of video PC; should be one per frame - camera_times (float array): camera frame timestamps extracted from frame headers - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion - video (Bunch): video meta data, including dimensions and FPS - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel) :param download_data: if True, any missing raw data is downloaded via ONE. Missing data will raise an AssertionError :param extract_times: if True, the camera.times are re-extracted from the raw data :param load_video: if True, calls the load_video_data method """ assert self.session_path, 'no session path set' if download_data is not None: self.download_data = download_data if self.download_data and self.eid and self.one and not self.one.offline: self.ensure_required_data() _log.info('Gathering data for QC') # Get frame count and pin state self.data['count'], self.data['pin_state'] = \ raw.load_embedded_frame_data(self.session_path, self.label, raw=True) # Load the audio and raw FPGA times if self.type == 'ephys': sync, chmap = ephys_fpga.get_main_probe_sync(self.session_path) audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) self.data['audio'] = audio_ttls['times'] # Get rises # Load raw FPGA times cam_ts = extract_camera_sync(sync, chmap) self.data['fpga_times'] = cam_ts[self.label] else: bpod_data = raw.load_data(self.session_path) _, audio_ttls = raw.load_bpod_fronts(self.session_path, bpod_data) self.data['audio'] = audio_ttls['times'] # Load extracted frame times alf_path = self.session_path / 'alf' try: assert not extract_times self.data['timestamps'] = alfio.load_object( alf_path, f'{self.label}Camera', short_keys=True)['times'] except AssertionError: # Re-extract kwargs = dict(video_path=self.video_path, labels=self.label) if self.type == 'ephys': kwargs = {**kwargs, 'sync': sync, 'chmap': chmap} # noqa outputs, _ = extract_all(self.session_path, self.type, save=False, **kwargs) self.data['timestamps'] = outputs[ f'{self.label}_camera_timestamps'] except ALFObjectNotFound: _log.warning('no camera.times ALF found for session') # Get audio and wheel data wheel_keys = ('timestamps', 'position') try: self.data['wheel'] = alfio.load_object(alf_path, 'wheel', short_keys=True) except ALFObjectNotFound: # Extract from raw data if self.type == 'ephys': wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap) else: wheel_data = training_wheel.get_wheel_position( self.session_path) self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data)) # Find short period of wheel motion for motion correlation. if data_for_keys( wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None: self.data['wheel'].period = self.get_active_wheel_period( self.data['wheel']) # Load Bonsai frame timestamps try: ssv_times = raw.load_camera_ssv_times(self.session_path, self.label) self.data['bonsai_times'], self.data['camera_times'] = ssv_times except AssertionError: _log.warning('No Bonsai video timestamps file found') # Gather information from video file if load_video: _log.info('Inspecting video file...') self.load_video_data()
def _task_extraction_assertions(self, session_path): alf_path = session_path.joinpath('alf') shutil.rmtree(alf_path, ignore_errors=True) # this gets the full output ephys_fpga.extract_all(session_path, save=True, bin_exists=False) # check that the output is complete for f in BPOD_FILES: self.assertTrue(alf_path.joinpath(f).exists()) # check that the output is complete for f in FPGA_FILES: self.assertTrue(alf_path.joinpath(f).exists()) # check dimensions after alf load alf_trials = alf.io.load_object(alf_path, 'trials') self.assertTrue(alf.io.check_dimensions(alf_trials) == 0) # go deeper and check the internal fpga trials structure consistency sync, chmap = ephys_fpga.get_main_probe_sync(session_path, bin_exists=False) fpga_trials = ephys_fpga.extract_behaviour_sync(sync, chmap) # check dimensions self.assertEqual(alf.io.check_dimensions(fpga_trials), 0) # check that the stimOn < stimFreeze < stimOff self.assertTrue( np.all(fpga_trials['stimOn_times'][:-1] < fpga_trials['stimOff_times'][:-1])) self.assertTrue( np.all(fpga_trials['stimFreeze_times'][:-1] < fpga_trials['stimOff_times'][:-1])) # a trial is either an error-nogo or a reward self.assertTrue( np.all( np.isnan(fpga_trials['valveOpen_times'][:-1] * fpga_trials['errorCue_times'][:-1]))) self.assertTrue( np.all( np.logical_xor(np.isnan(fpga_trials['valveOpen_times'][:-1]), np.isnan(fpga_trials['errorCue_times'][:-1])))) # do the task qc # tqc_ephys.extractor.settings['PYBPOD_PROTOCOL'] from ibllib.qc.task_extractors import TaskQCExtractor ex = TaskQCExtractor(session_path, lazy=True, one=None, bpod_only=False) ex.data = fpga_trials ex.extract_data() from ibllib.qc.task_metrics import TaskQC # '/mnt/s0/Data/IntegrationTests/ephys/ephys_choice_world_task/CSP004/2019-11-27/001' tqc_ephys = TaskQC(session_path) tqc_ephys.extractor = ex _, res_ephys = tqc_ephys.run(bpod_only=False, download_data=False) tqc_bpod = TaskQC(session_path) _, res_bpod = tqc_bpod.run(bpod_only=True, download_data=False) # for a swift comparison using variable explorer # import pandas as pd # df = pd.DataFrame([[res_bpod[k], res_ephys[k]] for k in res_ephys], index=res_ephys.keys()) ok = True for k in res_ephys: if k == "_task_response_feedback_delays": continue if (np.abs(res_bpod[k] - res_ephys[k]) > .2): ok = False print(f"{k} bpod: {res_bpod[k]}, ephys: {res_ephys[k]}") assert ok shutil.rmtree(alf_path, ignore_errors=True)