def _clean_frame2ttl(frame2ttl, display=False): """ Frame 2ttl calibration can be unstable and the fronts may be flickering at an unrealistic pace. This removes the consecutive frame2ttl pulses happening too fast, below a threshold of F2TTL_THRESH """ dt = np.diff(frame2ttl['times']) iko = np.where( np.logical_and(dt < F2TTL_THRESH, frame2ttl['polarities'][:-1] == -1))[0] iko = np.unique(np.r_[iko, iko + 1]) frame2ttl_ = { 'times': np.delete(frame2ttl['times'], iko), 'polarities': np.delete(frame2ttl['polarities'], iko) } if iko.size > (0.1 * frame2ttl['times'].size): _logger.warning( f'{iko.size} ({iko.size / frame2ttl["times"].size * 100} %) ' f'frame to TTL polarity switches below {F2TTL_THRESH} secs') if display: from ibllib.plots import squares plt.figure() squares(frame2ttl['times'] * 1000, frame2ttl['polarities'], yrange=[0.1, 0.9]) squares(frame2ttl_['times'] * 1000, frame2ttl_['polarities'], yrange=[1.1, 1.9]) import seaborn as sns sns.displot(dt[dt < 0.05], binwidth=0.0005) return frame2ttl_
def plot_sync_channels(sync, sync_map, ax=None): # Plot all sync pulses if ax is None: f, ax = plt.subplots(1, 1) for i, device in enumerate(["frame2ttl", "audio", "bpod"]): sy = ephys_fpga.get_sync_fronts( sync, sync_map[device]) # , tmin=t_start_passive) squares(sy["times"], sy["polarities"], yrange=[0.1 + i, 0.9 + i], color="k", ax=ax)
def _assign_events_audio(audio_t, audio_polarities, return_indices=False, display=False): """ From detected fronts on the audio sync traces, outputs the synchronisation events related to tone in :param audio_t: numpy vector containing times of fronts :param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall) :param return_indices (False): returns indices of tones :param display (False): for debug mode, displays the raw fronts overlaid with detections :return: numpy arrays t_ready_tone_in, t_error_tone_in :return: numpy arrays ind_ready_tone_in, ind_error_tone_in if return_indices=True """ # make sure that there are no 2 consecutive fall or consecutive rise events assert (np.all(np.abs(np.diff(audio_polarities)) == 2)) # take only even time differences: ie. from rising to falling fronts dt = np.diff(audio_t) # detect ready tone by length below 110 ms i_ready_tone_in = np.where( np.logical_and(dt <= 0.11, audio_polarities[:-1] == 1))[0] t_ready_tone_in = audio_t[i_ready_tone_in] # error tones are events lasting from 400ms to 1200ms i_error_tone_in = np.where( np.logical_and(np.logical_and(0.4 < dt, dt < 1.2), audio_polarities[:-1] == 1))[0] t_error_tone_in = audio_t[i_error_tone_in] if display: # pragma: no cover from ibllib.plots import squares, vertical_lines squares( audio_t, audio_polarities, yrange=[-1, 1], ) vertical_lines(t_ready_tone_in, ymin=-.8, ymax=.8) vertical_lines(t_error_tone_in, ymin=-.8, ymax=.8) if return_indices: return t_ready_tone_in, t_error_tone_in, i_ready_tone_in, i_error_tone_in else: return t_ready_tone_in, t_error_tone_in
def _clean_audio(audio, display=False): """ one guy wired the 150 Hz camera output onto the soundcard. The effect is to get 150 Hz periodic square pulses, 2ms up and 4.666 ms down. When this happens we remove all of the intermediate pulses to repair the audio trace Here is some helper code dd = np.diff(audio['times']) 1 / np.median(dd[::2]) # 2ms up 1 / np.median(dd[1::2]) # 4.666 ms down 1 / (np.median(dd[::2]) + np.median(dd[1::2])) # both sum to 150 Hx This only runs on sessions when the bug is detected and leaves others untouched """ DISCARD_THRESHOLD = 0.01 average_150_hz = np.mean( 1 / np.diff(audio['times'][audio['polarities'] == 1]) > 140) naudio = audio['times'].size if average_150_hz > 0.7 and naudio > 100: _logger.warning( "Soundcard signal on FPGA seems to have been mixed with 150Hz camera" ) keep_ind = np.r_[np.diff(audio['times']) > DISCARD_THRESHOLD, False] keep_ind = np.logical_and(keep_ind, audio['polarities'] == -1) keep_ind = np.where(keep_ind)[0] keep_ind = np.sort(np.r_[0, keep_ind, keep_ind + 1, naudio - 1]) if display: # pragma: no cover from ibllib.plots import squares squares(audio['times'], audio['polarities'], ax=None, yrange=[-1, 1]) squares(audio['times'][keep_ind], audio['polarities'][keep_ind], yrange=[-1, 1]) audio = { 'times': audio['times'][keep_ind], 'polarities': audio['polarities'][keep_ind] } return audio
def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first', min_diff=0.): """ Align the GPIO pin state to the FPGA audio TTLs. Any audio TTLs not reflected in the pin state are removed from the dict and the times of the detected fronts are converted to FPGA time. At the end of this the number of GPIO fronts should equal the number of audio fronts. Note: - This function is ultra safe: we probably don't need assign all the ups and down fronts separately and could potentially even align the timestamps without removing the missed fronts - The input gpio and audio dicts may be modified by this function - For training sessions the frame rate is only 30Hz and the TTLs tend to be broken up by small gaps. Setting the min_diff to 5ms helps the timestamp assignment accuracy. :param gpio: array of GPIO pin state values :param audio: dict of FPGA audio TTLs (see ibllib.io.extractors.ephys_fpga._get_sync_fronts) :param ts: camera frame times :param tolerance: two pulses need to be within this many seconds to be considered related :param take: If 'first' the first value within tolerance is assigned; if 'nearest' the closest value is assigned :param display: If true, the resulting timestamps are plotted against the raw audio signal :param min_diff: Audio TTL fronts less than min_diff seconds apart will be removed :returns: dict of GPIO FPGA front indices, polarities and FPGA aligned times :returns: audio times and polarities sans the TTLs not detected in the frame data :returns: frame times in FPGA time """ # Check that the dimensions match if np.any(gpio['indices'] >= ts.size): _logger.warning('GPIO events occurring beyond timestamps array length') keep = gpio['indices'] < ts.size gpio = {k: gpio[k][keep] for k, v in gpio.items()} assert audio and audio['times'].size > 0, 'no audio TTLs for session' assert audio['times'].size == audio[ 'polarities'].size, 'audio data dimension mismatch' # make sure that there are no 2 consecutive fall or consecutive rise events assert np.all(np.abs(np.diff(audio['polarities'])) == 2), 'consecutive high/low audio events' # make sure first TTL is high assert audio['polarities'][0] == 1 # make sure audio times in order assert np.all(np.diff(audio['times']) > 0) # make sure raw timestamps increase assert np.all(np.diff(ts) > 0), 'timestamps must strictly increase' # make sure there are state changes assert gpio['indices'].any(), 'no TTLs detected in GPIO' # # make sure first GPIO state is high assert gpio['polarities'][0] == 1 """ Some audio TTLs appear to be so short that they are not recorded by the camera. These can be as short as a few microseconds. Applying a cutoff based on framerate was unsuccessful. Assigning each audio TTL to each pin state change is not easy because some onsets occur very close together (sometimes < 70ms), on the order of the delay between TTL and frame time. Also, the two clocks have some degree of drift, so the delay between audio TTL and pin state change may be zero or even negative. Here we split the events into audio onsets (lo->hi) and audio offsets (hi->lo). For each uptick in the GPIO pin state, we take the first audio onset time that was within 100ms of it. We ensure that each audio TTL is assigned only once, so a TTL that is closer to frame 3 than frame 1 may still be assigned to frame 1. """ ifronts = gpio['indices'] # The pin state flips audio_times = audio['times'] if ifronts.size != audio['times'].size: _logger.warning( 'more audio TTLs than GPIO state changes, assigning timestamps') to_remove = np.zeros(ifronts.size, dtype=bool) # unassigned GPIO fronts to remove low2high = ifronts[gpio['polarities'] == 1] high2low = ifronts[gpio['polarities'] == -1] assert low2high.size >= high2low.size # Remove and/or fuse short TTLs if min_diff > 0: short, = np.where(np.diff(audio['times']) < min_diff) audio_times = np.delete(audio['times'], np.r_[short, short + 1]) _logger.debug(f'Removed {short.size * 2} fronts TLLs less than ' f'{min_diff * 1e3:.0f}ms apart') assert audio_times.size > 0, f'all audio TTLs less than {min_diff}s' # Onsets ups = ts[low2high] - ts[low2high][ 0] # times relative to first GPIO high onsets = audio_times[::2] - audio_times[ 0] # audio times relative to first onset # assign GPIO fronts to audio onset assigned = attribute_times(onsets, ups, tol=tolerance, take=take) unassigned = np.setdiff1d(np.arange(onsets.size), assigned[assigned > -1]) if unassigned.size > 0: _logger.debug( f'{unassigned.size} audio TTL rises were not detected by the camera' ) # Check that all pin state upticks could be attributed to an onset TTL missed = assigned == -1 if np.any(missed): # if np.any(missed := assigned == -1): # py3.8 _logger.warning(f'{sum(missed)} pin state rises could ' f'not be attributed to an audio TTL') if display: ax = plt.subplot() vertical_lines(ups[assigned > -1], linestyle='-', color='g', ax=ax, label='assigned GPIO up state') vertical_lines(ups[missed], linestyle='-', color='r', ax=ax, label='unassigned GPIO up state') vertical_lines(onsets[unassigned], linestyle=':', color='k', ax=ax, alpha=0.3, label='audio onset') vertical_lines(onsets[assigned], linestyle=':', color='b', ax=ax, label='assigned audio onset') plt.legend() plt.show() # Remove the missed fronts to_remove = np.in1d(gpio['indices'], low2high[missed]) assigned = assigned[~missed] onsets_ = audio_times[::2][assigned] # Offsets downs = ts[high2low] - ts[high2low][0] offsets = audio_times[1::2] - audio_times[1] assigned = attribute_times(offsets, downs, tol=tolerance, take=take) unassigned = np.setdiff1d(np.arange(offsets.size), assigned[assigned > -1]) if unassigned.size > 0: _logger.debug( f'{unassigned.size} audio TTL falls were not detected by the camera' ) # Check that all pin state downticks could be attributed to an offset TTL missed = assigned == -1 if np.any(missed): # if np.any(missed := assigned == -1): # py3.8 _logger.warning(f'{sum(missed)} pin state falls could ' f'not be attributed to an audio TTL') # Remove the missed fronts to_remove |= np.in1d(gpio['indices'], high2low[missed]) assigned = assigned[~missed] offsets_ = audio_times[1::2][assigned] # Audio groomed if np.any(to_remove): # Check for any orphaned fronts (only one pin state edge was assigned) to_remove = np.pad(to_remove, (0, to_remove.size % 2), 'edge') # Ensure even size # Perform xor to find GPIOs where only onset or offset is marked for removal orphaned = to_remove.reshape(-1, 2).sum(axis=1) == 1 if orphaned.any(): """If there are orphaned GPIO fronts (i.e. only one edge was assigned to an audio front), remove the orphaned front its assigned audio TTL. In other words if both edges cannot be assigned to an audio TTL, we ignore the TTL entirely. This is a sign that the assignment was bad and extraction may fail.""" _logger.warning( 'Some onsets but not offsets (or vice versa) were not assigned; ' 'this may be a sign of faulty wiring or clock drift') # Find indices of GPIO upticks where only the downtick was marked for removal orphaned_onsets, = np.where(~to_remove.reshape(-1, 2)[:, 0] & orphaned) # The onsets_ array already has the other TTLs removed (same size as to_remove == # False) so subtract the number of removed elements from index. for i, v in enumerate(orphaned_onsets): orphaned_onsets[i] -= to_remove.reshape(-1, 2)[:v, 0].sum() # Same for offsets... orphaned_offsets, = np.where(~to_remove.reshape(-1, 2)[:, 1] & orphaned) for i, v in enumerate(orphaned_offsets): orphaned_offsets[i] -= to_remove.reshape(-1, 2)[:v, 1].sum() # Remove orphaned audio onsets and offsets onsets_ = np.delete( onsets_, orphaned_onsets[orphaned_onsets < onsets_.size]) offsets_ = np.delete( offsets_, orphaned_offsets[orphaned_offsets < offsets_.size]) _logger.debug(f'{orphaned.sum()} orphaned TTLs removed') to_remove.reshape(-1, 2)[orphaned] = True # Remove those unassigned GPIOs gpio = {k: v[~to_remove[:v.size]] for k, v in gpio.items()} ifronts = gpio['indices'] # Assert that we've removed discrete TTLs # A failure means e.g. an up-going front of one TTL was missed # but not the down-going one. assert np.all(np.abs(np.diff(gpio['polarities'])) == 2) assert gpio['polarities'][0] == 1 audio_ = { 'times': np.empty(ifronts.size), 'polarities': gpio['polarities'] } audio_['times'][::2] = onsets_ audio_['times'][1::2] = offsets_ else: audio_ = audio # Align the frame times to FPGA fcn_a2b, drift_ppm = dsp.sync_timestamps(ts[ifronts], audio_['times']) _logger.debug(f'frame audio alignment drift = {drift_ppm:.2f}ppm') # Add times to GPIO dict gpio['times'] = fcn_a2b(ts[ifronts]) if display: # Plot all the onsets and offsets ax = plt.subplot() # All Audio TTLS squares(audio['times'], audio['polarities'], ax=ax, label='audio TTLs', linestyle=':', color='k', yrange=[0, 1], alpha=0.3) # GPIO x = np.insert(gpio['times'], 0, 0) y = np.arange(x.size) % 2 squares(x, y, ax=ax, label='GPIO') y = within_ranges(np.arange(ts.size), ifronts.reshape(-1, 2)) # 0 or 1 for each frame ax.plot(fcn_a2b(ts), y, 'kx', label='cam times') # Assigned audio squares(audio_['times'], audio_['polarities'], ax=ax, label='assigned audio TTL', linestyle=':', color='g', yrange=[0, 1]) ax.legend() plt.xlabel('FPGA time (s)') ax.set_yticks([0, 1]) ax.set_title('GPIO - audio TTL alignment') plt.show() return gpio, audio_, fcn_a2b(ts)
def extract_behaviour_sync(sync, chmap=None, display=False, bpod_trials=None, tmax=np.inf): """ Extract wheel positions and times from sync fronts dictionary :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans :param chmap: dictionary containing channel index. Default to constant. chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15} :param display: bool or matplotlib axes: show the full session sync pulses display defaults to False :return: trials dictionary """ bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax) if bpod.times.size == 0: raise err.SyncBpodFpgaException( 'No Bpod event found in FPGA. No behaviour extraction. ' 'Check channel maps.') frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax) frame2ttl = _clean_frame2ttl(frame2ttl) audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod( bpod['times'], bpod['polarities']) # one issue is that sometimes bpod pulses may not have been detected, in this case # perform the sync bpod/FPGA, and add the start that have not been detected if bpod_trials: bpod_start = bpod_trials['intervals_bpod'][:, 0] fcn, drift, ibpod, ifpga = dsp.utils.sync_timestamps( bpod_start, t_trial_start, return_indices=True) # if it's drifting too much if drift > 200 and bpod_start.size != t_trial_start.size: raise err.SyncBpodFpgaException("sync cluster f*ck") missing_bpod = fcn(bpod_start[np.setxor1d(ibpod, np.arange(len(bpod_start)))]) t_trial_start = np.sort(np.r_[t_trial_start, missing_bpod]) else: _logger.warning( "Deprecation Warning: calling FPGA trials extraction without a bpod trials" " dictionary will result in an error.") t_ready_tone_in, t_error_tone_in = _assign_events_audio( audio['times'], audio['polarities']) trials = Bunch({ 'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'), 'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in), 'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open), 'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2), 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'), 'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']), 'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in) }) # feedback times are valve open on good trials and error tone in on error trials trials['feedback_times'] = np.copy(trials['valveOpen_times']) ind_err = np.isnan(trials['valveOpen_times']) trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err] trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']] if display: width = 0.5 ymax = 5 if isinstance(display, bool): plt.figure("Ephys FPGA Sync") ax = plt.gca() else: ax = display r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0']) plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k') plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k') plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k') plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k') plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax, ax=ax, label='goCue_times', color='b', linewidth=width) plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax, ax=ax, label='start_trial', color='m', linewidth=width) plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax, ax=ax, label='error tone', color='r', linewidth=width) plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax, ax=ax, label='valveOpen_times', color='g', linewidth=width) plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax, ax=ax, label='stimFreeze_times', color='y', linewidth=width) plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax, ax=ax, label='stim off', color='c', linewidth=width) plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax, ax=ax, label='stimOn_times', color='tab:orange', linewidth=width) c = _get_sync_fronts(sync, chmap['left_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['right_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['body_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k') ax.legend() ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', '']) ax.set_yticks([0, 1, 2, 3, 4, 5]) ax.set_ylim([0, 5]) return trials
def extract_behaviour_sync(sync, output_path=None, save=False, chmap=None, display=False, tmax=np.inf): """ Extract wheel positions and times from sync fronts dictionary :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans :param output_path: where to save the data :param save: True/False :param chmap: dictionary containing channel index. Default to constant. chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15} :param display: bool or matplotlib axes: show the full session sync pulses display defaults to False :return: trials dictionary """ bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax) if bpod.times.size == 0: raise err.SyncBpodFpgaException( 'No Bpod event found in FPGA. No behaviour extraction. ' 'Check channel maps.') frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax) audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _bpod_events_extraction( bpod['times'], bpod['polarities']) t_ready_tone_in, t_error_tone_in = _audio_events_extraction( audio['times'], audio['polarities']) # stim off time is the first frame2ttl rise/fall after the trial start # does not apply for 1st trial ind = np.searchsorted(frame2ttl['times'], t_iti_in, side='left') t_stim_off = frame2ttl['times'][np.minimum(ind, frame2ttl.times.size - 1)] t_stim_freeze = frame2ttl['times'][np.maximum(ind - 1, 0)] # stimOn_times: first fram2ttl change after trial start trials = Bunch({ 'ready_tone_in': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'), 'error_tone_in': _assign_events_to_trial(t_trial_start, t_error_tone_in), 'valve_open': _assign_events_to_trial(t_trial_start, t_valve_open), 'stim_freeze': _assign_events_to_trial(t_trial_start, t_stim_freeze), 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'), 'stimOff_times': _assign_events_to_trial(t_trial_start, t_stim_off), 'iti_in': _assign_events_to_trial(t_trial_start, t_iti_in) }) # goCue_times corresponds to the tone_in event trials['goCue_times'] = np.copy(trials['ready_tone_in']) # feedback times are valve open on good trials and error tone in on error trials trials['feedback_times'] = np.copy(trials['valve_open']) ind_err = np.isnan(trials['valve_open']) trials['feedback_times'][ind_err] = trials['error_tone_in'][ind_err] trials['intervals'] = np.c_[t_trial_start, trials['iti_in']] if display: width = 0.5 ymax = 5 if isinstance(display, bool): plt.figure("Ephys FPGA Sync") ax = plt.gca() else: ax = display r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0']) plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k') plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k') plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k') plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k') plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax, ax=ax, label='ready tone in', color='b', linewidth=width) plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax, ax=ax, label='start_trial', color='m', linewidth=width) plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax, ax=ax, label='error tone', color='r', linewidth=width) plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax, ax=ax, label='valve open', color='g', linewidth=width) plots.vertical_lines(t_stim_freeze, ymin=0, ymax=ymax, ax=ax, label='stim freeze', color='y', linewidth=width) plots.vertical_lines(t_stim_off, ymin=0, ymax=ymax, ax=ax, label='stim off', color='c', linewidth=width) plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax, ax=ax, label='stim on', color='tab:orange', linewidth=width) ax.legend() ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', '']) ax.set_ylim([0, 5]) if save and output_path: output_path = Path(output_path) np.save(output_path / '_ibl_trials.goCue_times.npy', trials['goCue_times']) np.save(output_path / '_ibl_trials.stimOn_times.npy', trials['stimOn_times']) np.save(output_path / '_ibl_trials.intervals.npy', trials['intervals']) np.save(output_path / '_ibl_trials.feedback_times.npy', trials['feedback_times']) return trials
gabor_fixtures = np.cumsum(tdelays)[igabor] valve_fixtures = np.cumsum(tdelays)[np.where(fixture['ids'] == 'V')[0]] gb_diff_ts = np.diff(t_gabor[0::2]) pearson_r = np.corrcoef(np.diff(gabor_fixtures), gb_diff_ts)[1, 0] assert pearson_r > 0.95 DEBUG_PLOTS = True if DEBUG_PLOTS: # plots for debug t0 = np.median(t_valve_open - valve_fixtures) from ibllib.plots import squares, vertical_lines, color_cycle import matplotlib.pyplot as plt pl, ax = plt.subplots(2, 1) for i, lab in enumerate(['frame2ttl', 'audio', 'bpod']): sy = ephys_fpga._get_sync_fronts(sync, sync_map[lab], tmin=t_start_passive) squares(sy['times'], sy['polarities'], yrange=[0.1 + i, 0.9 + i], color='k', ax=ax[0]) vertical_lines(np.r_[t_start_passive, t_starts, t_ends], ymin=-1, ymax=4, color=color_cycle(0), ax=ax[0], label='spacers') vertical_lines(gabor_fixtures + t0, ymin=-1, ymax=4, color=color_cycle(1), ax=ax[0], label='fixtures gabor') vertical_lines(t_valve_open, ymin=-1, ymax=4, color=color_cycle(2), ax=ax[0], label='valve') vertical_lines(valve_fixtures + t0, ymin=-1, ymax=4, color=color_cycle(2), ax=ax[0], linestyle='--', label='fixtures valve') ax[0].legend() ax[1].plot([0, 3], [0, 3], linewidth=2.0) # plt.plot(diff_delays, gb_diff_ts, '.') plt.xlabel('saved delays diff [s]') plt.ylabel('measured times diff [s]')
def extract_behaviour_sync(sync, output_path=None, save=False, chmap=None): """ Extract wheel positions and times from sync fronts dictionary :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans :param output_path: where to save the data :param save: True/False :param chmap: dictionary containing channel index. Default to constant. chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15} :return: trials dictionary """ bpod = _get_sync_fronts(sync, chmap['bpod']) frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl']) audio = _get_sync_fronts(sync, chmap['audio']) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _bpod_events_extraction( bpod['times'], bpod['polarities']) t_ready_tone_in, t_error_tone_in = _audio_events_extraction( audio['times'], audio['polarities']) # stim off time is the first frame2ttl rise/fall after the trial start # does not apply for 1st trial ind = np.searchsorted(frame2ttl['times'], t_iti_in, side='left') t_stim_off = frame2ttl['times'][ind] t_stim_freeze = frame2ttl['times'][ind - 1] if DEBUG_PLOTS: plt.figure() ax = plt.gca() plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, label='bpod=1', color='k') plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, label='frame2ttl=2', color='k') plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, label='audio=3', color='k') plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=4, ax=ax, label='ready tone in', color='b', linewidth=0.5) plots.vertical_lines(t_trial_start, ymin=0, ymax=4, ax=ax, label='start_trial', color='m', linewidth=0.5) plots.vertical_lines(t_error_tone_in, ymin=0, ymax=4, ax=ax, label='error tone', color='r', linewidth=0.5) plots.vertical_lines(t_valve_open, ymin=0, ymax=4, ax=ax, label='valve open', color='g', linewidth=0.5) plots.vertical_lines(t_stim_freeze, ymin=0, ymax=4, ax=ax, label='stim freeze', color='y', linewidth=0.5) plots.vertical_lines(t_stim_off, ymin=0, ymax=4, ax=ax, label='stim off', color='c', linewidth=0.5) ax.legend() # stimOn_times: first fram2ttl change after trial start trials = { 'ready_tone_in': _assign_events_to_trial(t_trial_start, t_ready_tone_in), 'error_tone_in': _assign_events_to_trial(t_trial_start, t_error_tone_in), 'valve_open': _assign_events_to_trial(t_trial_start, t_valve_open), 'stim_freeze': _assign_events_to_trial(t_trial_start, t_stim_freeze), 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'), 'iti_in': _assign_events_to_trial(t_trial_start, t_iti_in) } # goCue_times corresponds to the tone_in event trials['goCue_times'] = trials['ready_tone_in'] # response_times is TONE_IN to STIM freeze trials['response_times'] = trials['stim_freeze'] - trials['ready_tone_in'] # feedback times are valve open on good trials and error tone in on error trials trials['feedback_times'] = trials['valve_open'] ind_err = np.isnan(trials['valve_open']) trials['feedback_times'][ind_err] = trials['error_tone_in'][ind_err] # # # # this is specific to version 4 trials['iti_in'] = trials['valve_open'] + 1. trials['iti_in'][ind_err] = trials['error_tone_in'][ind_err] + 2. trials['intervals'] = np.c_[t_trial_start, trials['iti_in']] # # # # end of specific to version 4 if save and output_path: output_path = Path(output_path) np.save(output_path / '_ibl_trials.goCue_times.npy', trials['goCue_times']) np.save(output_path / '_ibl_trials.response_times.npy', trials['response_times']) np.save(output_path / '_ibl_trials.stimOn_times.npy', trials['stimOn_times']) np.save(output_path / '_ibl_trials.intervals.npy', trials['intervals']) np.save(output_path / '_ibl_trials.feedback_times.npy', trials['feedback_times']) return trials
def extract_behaviour_sync(sync, chmap=None, display=False, tmax=np.inf): """ Extract wheel positions and times from sync fronts dictionary :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans :param chmap: dictionary containing channel index. Default to constant. chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15} :param display: bool or matplotlib axes: show the full session sync pulses display defaults to False :return: trials dictionary """ bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax) if bpod.times.size == 0: raise err.SyncBpodFpgaException( 'No Bpod event found in FPGA. No behaviour extraction. ' 'Check channel maps.') frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax) audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax) # extract events from the fronts for each trace t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod( bpod['times'], bpod['polarities']) t_ready_tone_in, t_error_tone_in = _assign_events_audio( audio['times'], audio['polarities']) trials = Bunch({ 'goCue_times': _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'), 'errorCue_times': _assign_events_to_trial(t_trial_start, t_error_tone_in), 'valveOpen_times': _assign_events_to_trial(t_trial_start, t_valve_open), 'stimFreeze_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2), 'stimOn_times': _assign_events_to_trial(t_trial_start, frame2ttl['times'], take='first'), 'stimOff_times': _assign_events_to_trial(t_trial_start, frame2ttl['times']), 'itiIn_times': _assign_events_to_trial(t_trial_start, t_iti_in) }) # feedback times are valve open on good trials and error tone in on error trials trials['feedback_times'] = np.copy(trials['valveOpen_times']) ind_err = np.isnan(trials['valveOpen_times']) trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err] trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']] if display: width = 0.5 ymax = 5 if isinstance(display, bool): plt.figure("Ephys FPGA Sync") ax = plt.gca() else: ax = display r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0']) plots.squares(bpod['times'], bpod['polarities'] * 0.4 + 1, ax=ax, color='k') plots.squares(frame2ttl['times'], frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k') plots.squares(audio['times'], audio['polarities'] * 0.4 + 3, ax=ax, color='k') plots.squares(r0['times'], r0['polarities'] * 0.4 + 4, ax=ax, color='k') plots.vertical_lines(t_ready_tone_in, ymin=0, ymax=ymax, ax=ax, label='goCue_times', color='b', linewidth=width) plots.vertical_lines(t_trial_start, ymin=0, ymax=ymax, ax=ax, label='start_trial', color='m', linewidth=width) plots.vertical_lines(t_error_tone_in, ymin=0, ymax=ymax, ax=ax, label='error tone', color='r', linewidth=width) plots.vertical_lines(t_valve_open, ymin=0, ymax=ymax, ax=ax, label='valveOpen_times', color='g', linewidth=width) plots.vertical_lines(trials['stimFreeze_times'], ymin=0, ymax=ymax, ax=ax, label='stimFreeze_times', color='y', linewidth=width) plots.vertical_lines(trials['stimOff_times'], ymin=0, ymax=ymax, ax=ax, label='stim off', color='c', linewidth=width) plots.vertical_lines(trials['stimOn_times'], ymin=0, ymax=ymax, ax=ax, label='stimOn_times', color='tab:orange', linewidth=width) c = _get_sync_fronts(sync, chmap['left_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['right_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k') c = _get_sync_fronts(sync, chmap['body_camera']) plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k') ax.legend() ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', '']) ax.set_yticks([0, 1, 2, 3, 4, 5]) ax.set_ylim([0, 5]) return trials
def create_plots(self, axes, wheel_axes=None, trial_events=None, color_map=None, linestyle=None): """ Plots the data for bnc1 (sound) and bnc2 (frame2ttl) :param axes: An axes handle on which to plot the TTL events :param wheel_axes: An axes handle on which to plot the wheel trace :param trial_events: A list of Bpod trial events to plot, e.g. ['stimFreeze_times'], if None, valve, sound and stimulus events are plotted :param color_map: A color map to use for the events, default is the tableau color map linestyle: A line style map to use for the events, default is random. :return: None """ color_map = color_map or TABLEAU_COLORS.keys() if trial_events is None: # Default trial events to plot as vertical lines trial_events = [ 'goCue_times', 'goCueTrigger_times', 'feedback_times', 'stimFreeze_times', 'stimOff_times', 'stimOn_times' ] plot_args = { 'ymin': 0, 'ymax': 4, 'linewidth': 2, 'ax': axes } bnc1 = self.extractor.frame_ttls bnc2 = self.extractor.audio_ttls trial_data = self.extractor.data plots.squares(bnc1['times'], bnc1['polarities'] * 0.4 + 1, ax=axes, color='k') plots.squares(bnc2['times'], bnc2['polarities'] * 0.4 + 2, ax=axes, color='k') linestyle = linestyle or random.choices(('-', '--', '-.', ':'), k=len(trial_events)) if self.extractor.bpod_ttls is not None: bpttls = self.extractor.bpod_ttls plots.squares(bpttls['times'], bpttls['polarities'] * 0.4 + 3, ax=axes, color='k') plot_args['ymax'] = 4 ylabels = ['', 'frame2ttl', 'sound', 'bpod', ''] else: plot_args['ymax'] = 3 ylabels = ['', 'frame2ttl', 'sound', ''] for event, c, l in zip(trial_events, cycle(color_map), linestyle): plots.vertical_lines(trial_data[event], label=event, color=c, linestyle=l, **plot_args) axes.legend(loc='upper left', fontsize='xx-small', bbox_to_anchor=(1, 0.5)) axes.set_yticklabels(ylabels) axes.set_yticks(list(range(plot_args['ymax'] + 1))) axes.set_ylim([0, plot_args['ymax']]) if wheel_axes: wheel_plot_args = { 'ax': wheel_axes, 'ymin': self.wheel_data['re_pos'].min(), 'ymax': self.wheel_data['re_pos'].max()} plot_args = {**plot_args, **wheel_plot_args} wheel_axes.plot(self.wheel_data['re_ts'], self.wheel_data['re_pos'], 'k-x') for event, c, ln in zip(trial_events, cycle(color_map), linestyle): plots.vertical_lines(trial_data[event], label=event, color=c, linestyle=ln, **plot_args)