def check_wheel_angle(eid):

    Plot = True

    one = ONE()
    #eid = 'e1023140-50c1-462a-b80e-5e05626d7f0e' # at least 9 bad cases

    #eid = one.search(subject='ZM_2104', date='2019-09-19', number=1)
    dsets = ['_ibl_trials.feedback_times.npy', '_ibl_trials.feedbackType.npy']
    feedback_times, feedback_type = one.load_datasets(eid,
                                                      dsets,
                                                      collections='alf')

    wheel = one.load_object(eid, 'wheel', collection='alf')
    reward_success = feedback_times[feedback_type == 1]
    reward_failure = feedback_times[feedback_type == -1]

    if Plot:
        plt.plot(wheel['times'], wheel['position'], linestyle='', marker='o')

        #iblplt.vertical_lines(trials['stimOn_times'], ymin=-100, ymax=100,
        #                      color='r', linewidth=0.5, label='stimOn_times')

        #iblplt.vertical_lines(reward_failure, ymin=-100, ymax=100,
        #                      color='b', linewidth=0.5, label='reward_failure')

        iblplt.vertical_lines(reward_success,
                              ymin=-100,
                              ymax=100,
                              color='k',
                              linewidth=0.5,
                              label='reward_success')

        plt.legend()
        plt.xlabel('time [sec]')
        plt.ylabel('wheel linear displacement [cm]')
        plt.show()

    # get fraction of reward deliveries with silent wheel time_delay before the reward
    time_delay = 0.5

    bad_cases1 = []
    for rew in reward_success:

        left = wheel['times'][find_nearest(wheel['times'], rew - time_delay)]
        right = wheel['times'][find_nearest(wheel['times'], rew)]

        if left == right:
            if left < rew - time_delay:
                bad_cases1.append(rew)

    if len(bad_cases1) == 0:
        print('Good news, no impossible case found.')
    else:
        print('Bad news, at least one impossible case found.')
        return len(bad_cases1)
Пример #2
0
def plot_rfmapping(times_interp_RF, ax=None):
    if ax is None:
        f, ax = plt.subplots(1, 1)

    vertical_lines(times_interp_RF,
                   ymin=0,
                   ymax=1,
                   color=color_cycle(9),
                   ax=ax,
                   label="RFframe_times")

    ax.legend()
Пример #3
0
def plot_passive_periods(passivePeriods_df, ax=None):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    # Update plot
    vertical_lines(
        np.r_[passivePeriods_df.loc['start'], passivePeriods_df.loc['stop']],
        ymin=-1,
        ymax=4,
        color=color_cycle(0),
        ax=ax,
        label="spacers",
    )
    ax.legend()
Пример #4
0
    def test_attribute_times(self, display=False):
        # Create two timestamp arrays at two different frequencies
        tsa = np.linspace(0, 60, 60 * 4)[:60]  # 240bpm
        tsb = np.linspace(0, 60, 60 * 3)[:45]  # 180bpm
        tsa = np.sort(np.append(tsa, .4))  # Add ambiguous front
        tsb = np.sort(np.append(tsb, .41))
        if display:
            from ibllib.plots import vertical_lines
            import matplotlib.pyplot as plt
            vertical_lines(tsb, linestyle=':', color='r', label='tsb')
            vertical_lines(tsa, linestyle=':', color='b', label='tsa')
            plt.legend()

        # Check with default args
        matches = camera.attribute_times(tsa, tsb)
        expected = np.array([
            0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24,
            25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46,
            48, 49, -1, 52, 53, -1, 56, 57, -1, 60
        ])
        np.testing.assert_array_equal(matches, expected)
        self.assertEqual(matches.size, tsb.size)

        # Taking closest instead of first should change index of ambiguous front
        matches = camera.attribute_times(tsa, tsb, take='nearest')
        expected[np.r_[1:3]] = expected[1:3] + 1
        np.testing.assert_array_equal(matches, expected)

        # Lower tolerance
        matches = camera.attribute_times(tsa, tsb, tol=0.05)
        expected = np.array(
            [0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])
        np.testing.assert_array_equal(matches[matches > -1], expected)

        # Remove injective assert
        matches = camera.attribute_times(tsa,
                                         tsb,
                                         injective=False,
                                         take='nearest')
        expected = np.array([
            0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24,
            25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45, 46,
            48, 49, -1, 52, 53, -1, 56, 57, -1, 60
        ])
        np.testing.assert_array_equal(matches, expected)

        # Check input validation
        with self.assertRaises(ValueError):
            camera.attribute_times(tsa, tsb, injective=False, take='closest')
Пример #5
0
def _assign_events_audio(audio_t,
                         audio_polarities,
                         return_indices=False,
                         display=False):
    """
    From detected fronts on the audio sync traces, outputs the synchronisation events
    related to tone in

    :param audio_t: numpy vector containing times of fronts
    :param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
    :param return_indices (False): returns indices of tones
    :param display (False): for debug mode, displays the raw fronts overlaid with detections
    :return: numpy arrays t_ready_tone_in, t_error_tone_in
    :return: numpy arrays ind_ready_tone_in, ind_error_tone_in if return_indices=True
    """
    # make sure that there are no 2 consecutive fall or consecutive rise events
    assert (np.all(np.abs(np.diff(audio_polarities)) == 2))
    # take only even time differences: ie. from rising to falling fronts
    dt = np.diff(audio_t)
    # detect ready tone by length below 110 ms
    i_ready_tone_in = np.where(
        np.logical_and(dt <= 0.11, audio_polarities[:-1] == 1))[0]
    t_ready_tone_in = audio_t[i_ready_tone_in]
    # error tones are events lasting from 400ms to 1200ms
    i_error_tone_in = np.where(
        np.logical_and(np.logical_and(0.4 < dt, dt < 1.2),
                       audio_polarities[:-1] == 1))[0]
    t_error_tone_in = audio_t[i_error_tone_in]
    if display:  # pragma: no cover
        from ibllib.plots import squares, vertical_lines
        squares(
            audio_t,
            audio_polarities,
            yrange=[-1, 1],
        )
        vertical_lines(t_ready_tone_in, ymin=-.8, ymax=.8)
        vertical_lines(t_error_tone_in, ymin=-.8, ymax=.8)

    if return_indices:
        return t_ready_tone_in, t_error_tone_in, i_ready_tone_in, i_error_tone_in
    else:
        return t_ready_tone_in, t_error_tone_in
Пример #6
0
def plot_gabor_times(passiveGabor_df, ax=None):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    # Update plot
    vertical_lines(
        passiveGabor_df["start"].values,
        ymin=0,
        ymax=1,
        color=color_cycle(1),
        ax=ax,
        label="GaborOn_times",
    )
    vertical_lines(
        passiveGabor_df["stop"].values,
        ymin=0,
        ymax=1,
        color=color_cycle(2),
        ax=ax,
        label="GaborOff_times",
    )
    ax.legend()
Пример #7
0
def plot_valve_times(passiveValve_intervals, ax=None):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    # Update the plot
    vertical_lines(
        passiveValve_intervals[:, 0],
        ymin=2,
        ymax=3,
        color=color_cycle(3),
        ax=ax,
        label="ValveOn_times",
    )
    vertical_lines(
        passiveValve_intervals[:, 1],
        ymin=2,
        ymax=3,
        color=color_cycle(4),
        ax=ax,
        label="ValveOff_times",
    )
    ax.legend()
Пример #8
0
def plot_trial(trial_number, R, times):
    '''
    Plot a rasterplot for a given trial,
    ordered by insertion depth, with
    'stimOn_times','feedback_times' and 'stimOff_times'
    '''

    a = list(trial_numbers)
    first = a.index(trial_number)
    last = len(a) - 1 - a[::-1].index(trial_number)

    plt.imshow(R[:, first:last],
               aspect='auto',
               cmap='binary',
               vmax=T_BIN / 0.001 / 4,
               extent=np.r_[times[[first, last]], Clusters[[0, -1]]],
               origin='lower')

    def restrict_timestamplist(q):

        li = []
        for i in q:
            if i > times[first] and i < times[last]:
                li.append(i)
        return li

    iblplt.vertical_lines(restrict_timestamplist(trials['stimOn_times']),
                          ymin=0,
                          ymax=Clusters[-1],
                          color='m',
                          linewidth=0.5,
                          label='stimOn_times')

    iblplt.vertical_lines(restrict_timestamplist(trials['feedback_times']),
                          ymin=0,
                          ymax=Clusters[-1],
                          color='b',
                          linewidth=0.5,
                          label='feedback_times')

    iblplt.vertical_lines(restrict_timestamplist(trials['stimOff_times']),
                          ymin=0,
                          ymax=Clusters[-1],
                          color='g',
                          linewidth=0.5,
                          label='stimOff_times')

    plt.xlabel('Time (s)')
    plt.ylabel('Cluster #; ordered by depth')
    plt.legend()
    plt.tight_layout()
Пример #9
0
def plot_audio_times(passiveTone_intervals, passiveNoise_intervals, ax=None):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    # Look at it
    vertical_lines(
        passiveTone_intervals[:, 0],
        ymin=1,
        ymax=2,
        color=color_cycle(5),
        ax=ax,
        label="toneOn_times",
    )
    vertical_lines(
        passiveTone_intervals[:, 1],
        ymin=1,
        ymax=2,
        color=color_cycle(6),
        ax=ax,
        label="toneOff_times",
    )
    vertical_lines(
        passiveNoise_intervals[:, 0],
        ymin=1,
        ymax=2,
        color=color_cycle(7),
        ax=ax,
        label="noiseOn_times",
    )
    vertical_lines(
        passiveNoise_intervals[:, 1],
        ymin=1,
        ymax=2,
        color=color_cycle(8),
        ax=ax,
        label="noiseOff_times",
    )

    ax.legend()
Пример #10
0
def plot_pupil_diameter_single_trial(trial_numbers, trial_number, diameter,
                                     times, trials):

    a = list(trial_numbers)
    first = a.index(trial_number)
    last = len(a) - 1 - a[::-1].index(trial_number)

    plt.plot(times[first:last], diameter[first:last])

    def restrict_timestamplist(q):
        li = []
        for i in q:
            if i > times[first] and i < times[last]:
                li.append(i)
        return li

    iblplt.vertical_lines(restrict_timestamplist(trials['stimOn_times']),
                          ymin=10,
                          ymax=20,
                          color='m',
                          linewidth=0.5,
                          label='stimOn_times')

    iblplt.vertical_lines(restrict_timestamplist(trials['feedback_times']),
                          ymin=10,
                          ymax=20,
                          color='b',
                          linewidth=0.5,
                          label='feedback_times')

    iblplt.vertical_lines(restrict_timestamplist(trials['stimOff_times']),
                          ymin=10,
                          ymax=20,
                          color='g',
                          linewidth=0.5,
                          label='stimOff_times')

    plt.xlabel('Time (s)')
    plt.ylabel('pupil diameter [px]')
    plt.title('Trial number %s' % trial_number)
    plt.legend()
    plt.tight_layout()
Пример #11
0
def groom_pin_state(gpio,
                    audio,
                    ts,
                    tolerance=2.,
                    display=False,
                    take='first',
                    min_diff=0.):
    """
    Align the GPIO pin state to the FPGA audio TTLs.  Any audio TTLs not reflected in the pin
    state are removed from the dict and the times of the detected fronts are converted to FPGA
    time.  At the end of this the number of GPIO fronts should equal the number of audio fronts.

    Note:
      - This function is ultra safe: we probably don't need assign all the ups and down fronts
      separately and could potentially even align the timestamps without removing the missed fronts
      - The input gpio and audio dicts may be modified by this function
      - For training sessions the frame rate is only 30Hz and the TTLs tend to be broken up by
      small gaps.  Setting the min_diff to 5ms helps the timestamp assignment accuracy.
    :param gpio: array of GPIO pin state values
    :param audio: dict of FPGA audio TTLs (see ibllib.io.extractors.ephys_fpga._get_sync_fronts)
    :param ts: camera frame times
    :param tolerance: two pulses need to be within this many seconds to be considered related
    :param take:  If 'first' the first value within tolerance is assigned; if 'nearest' the
    closest value is assigned
    :param display: If true, the resulting timestamps are plotted against the raw audio signal
    :param min_diff: Audio TTL fronts less than min_diff seconds apart will be removed
    :returns: dict of GPIO FPGA front indices, polarities and FPGA aligned times
    :returns: audio times and polarities sans the TTLs not detected in the frame data
    :returns: frame times in FPGA time
    """
    # Check that the dimensions match
    if np.any(gpio['indices'] >= ts.size):
        _logger.warning('GPIO events occurring beyond timestamps array length')
        keep = gpio['indices'] < ts.size
        gpio = {k: gpio[k][keep] for k, v in gpio.items()}
    assert audio and audio['times'].size > 0, 'no audio TTLs for session'
    assert audio['times'].size == audio[
        'polarities'].size, 'audio data dimension mismatch'
    # make sure that there are no 2 consecutive fall or consecutive rise events
    assert np.all(np.abs(np.diff(audio['polarities'])) ==
                  2), 'consecutive high/low audio events'
    # make sure first TTL is high
    assert audio['polarities'][0] == 1
    # make sure audio times in order
    assert np.all(np.diff(audio['times']) > 0)
    # make sure raw timestamps increase
    assert np.all(np.diff(ts) > 0), 'timestamps must strictly increase'
    # make sure there are state changes
    assert gpio['indices'].any(), 'no TTLs detected in GPIO'
    # # make sure first GPIO state is high
    assert gpio['polarities'][0] == 1
    """
    Some audio TTLs appear to be so short that they are not recorded by the camera.  These can
    be as short as a few microseconds.  Applying a cutoff based on framerate was unsuccessful.
    Assigning each audio TTL to each pin state change is not easy because some onsets occur very
    close together (sometimes < 70ms), on the order of the delay between TTL and frame time.
    Also, the two clocks have some degree of drift, so the delay between audio TTL and pin state
    change may be zero or even negative.

    Here we split the events into audio onsets (lo->hi) and audio offsets (hi->lo).  For each
    uptick in the GPIO pin state, we take the first audio onset time that was within 100ms of it.
    We ensure that each audio TTL is assigned only once, so a TTL that is closer to frame 3 than
    frame 1 may still be assigned to frame 1.
    """
    ifronts = gpio['indices']  # The pin state flips
    audio_times = audio['times']
    if ifronts.size != audio['times'].size:
        _logger.warning(
            'more audio TTLs than GPIO state changes, assigning timestamps')
        to_remove = np.zeros(ifronts.size,
                             dtype=bool)  # unassigned GPIO fronts to remove
        low2high = ifronts[gpio['polarities'] == 1]
        high2low = ifronts[gpio['polarities'] == -1]
        assert low2high.size >= high2low.size

        # Remove and/or fuse short TTLs
        if min_diff > 0:
            short, = np.where(np.diff(audio['times']) < min_diff)
            audio_times = np.delete(audio['times'], np.r_[short, short + 1])
            _logger.debug(f'Removed {short.size * 2} fronts TLLs less than '
                          f'{min_diff * 1e3:.0f}ms apart')
            assert audio_times.size > 0, f'all audio TTLs less than {min_diff}s'

        # Onsets
        ups = ts[low2high] - ts[low2high][
            0]  # times relative to first GPIO high
        onsets = audio_times[::2] - audio_times[
            0]  # audio times relative to first onset
        # assign GPIO fronts to audio onset
        assigned = attribute_times(onsets, ups, tol=tolerance, take=take)
        unassigned = np.setdiff1d(np.arange(onsets.size),
                                  assigned[assigned > -1])
        if unassigned.size > 0:
            _logger.debug(
                f'{unassigned.size} audio TTL rises were not detected by the camera'
            )
        # Check that all pin state upticks could be attributed to an onset TTL
        missed = assigned == -1
        if np.any(missed):
            # if np.any(missed := assigned == -1):  # py3.8
            _logger.warning(f'{sum(missed)} pin state rises could '
                            f'not be attributed to an audio TTL')
            if display:
                ax = plt.subplot()
                vertical_lines(ups[assigned > -1],
                               linestyle='-',
                               color='g',
                               ax=ax,
                               label='assigned GPIO up state')
                vertical_lines(ups[missed],
                               linestyle='-',
                               color='r',
                               ax=ax,
                               label='unassigned GPIO up state')
                vertical_lines(onsets[unassigned],
                               linestyle=':',
                               color='k',
                               ax=ax,
                               alpha=0.3,
                               label='audio onset')
                vertical_lines(onsets[assigned],
                               linestyle=':',
                               color='b',
                               ax=ax,
                               label='assigned audio onset')
                plt.legend()
                plt.show()
            # Remove the missed fronts
            to_remove = np.in1d(gpio['indices'], low2high[missed])
            assigned = assigned[~missed]
        onsets_ = audio_times[::2][assigned]

        # Offsets
        downs = ts[high2low] - ts[high2low][0]
        offsets = audio_times[1::2] - audio_times[1]
        assigned = attribute_times(offsets, downs, tol=tolerance, take=take)
        unassigned = np.setdiff1d(np.arange(offsets.size),
                                  assigned[assigned > -1])
        if unassigned.size > 0:
            _logger.debug(
                f'{unassigned.size} audio TTL falls were not detected by the camera'
            )
        # Check that all pin state downticks could be attributed to an offset TTL
        missed = assigned == -1
        if np.any(missed):
            # if np.any(missed := assigned == -1):  # py3.8
            _logger.warning(f'{sum(missed)} pin state falls could '
                            f'not be attributed to an audio TTL')
            # Remove the missed fronts
            to_remove |= np.in1d(gpio['indices'], high2low[missed])
            assigned = assigned[~missed]
        offsets_ = audio_times[1::2][assigned]

        # Audio groomed
        if np.any(to_remove):
            # Check for any orphaned fronts (only one pin state edge was assigned)
            to_remove = np.pad(to_remove, (0, to_remove.size % 2),
                               'edge')  # Ensure even size
            # Perform xor to find GPIOs where only onset or offset is marked for removal
            orphaned = to_remove.reshape(-1, 2).sum(axis=1) == 1
            if orphaned.any():
                """If there are orphaned GPIO fronts (i.e. only one edge was assigned to an
                audio front), remove the orphaned front its assigned audio TTL. In other words
                if both edges cannot be assigned to an audio TTL, we ignore the TTL entirely.
                This is a sign that the assignment was bad and extraction may fail."""
                _logger.warning(
                    'Some onsets but not offsets (or vice versa) were not assigned; '
                    'this may be a sign of faulty wiring or clock drift')
                # Find indices of GPIO upticks where only the downtick was marked for removal
                orphaned_onsets, = np.where(~to_remove.reshape(-1, 2)[:, 0]
                                            & orphaned)
                # The onsets_ array already has the other TTLs removed (same size as to_remove ==
                # False) so subtract the number of removed elements from index.
                for i, v in enumerate(orphaned_onsets):
                    orphaned_onsets[i] -= to_remove.reshape(-1, 2)[:v, 0].sum()
                # Same for offsets...
                orphaned_offsets, = np.where(~to_remove.reshape(-1, 2)[:, 1]
                                             & orphaned)
                for i, v in enumerate(orphaned_offsets):
                    orphaned_offsets[i] -= to_remove.reshape(-1, 2)[:v,
                                                                    1].sum()
                # Remove orphaned audio onsets and offsets
                onsets_ = np.delete(
                    onsets_, orphaned_onsets[orphaned_onsets < onsets_.size])
                offsets_ = np.delete(
                    offsets_,
                    orphaned_offsets[orphaned_offsets < offsets_.size])
                _logger.debug(f'{orphaned.sum()} orphaned TTLs removed')
                to_remove.reshape(-1, 2)[orphaned] = True

            # Remove those unassigned GPIOs
            gpio = {k: v[~to_remove[:v.size]] for k, v in gpio.items()}
            ifronts = gpio['indices']

            # Assert that we've removed discrete TTLs
            # A failure means e.g. an up-going front of one TTL was missed
            # but not the down-going one.
            assert np.all(np.abs(np.diff(gpio['polarities'])) == 2)
            assert gpio['polarities'][0] == 1

        audio_ = {
            'times': np.empty(ifronts.size),
            'polarities': gpio['polarities']
        }
        audio_['times'][::2] = onsets_
        audio_['times'][1::2] = offsets_
    else:
        audio_ = audio

    # Align the frame times to FPGA
    fcn_a2b, drift_ppm = dsp.sync_timestamps(ts[ifronts], audio_['times'])
    _logger.debug(f'frame audio alignment drift = {drift_ppm:.2f}ppm')
    # Add times to GPIO dict
    gpio['times'] = fcn_a2b(ts[ifronts])

    if display:
        # Plot all the onsets and offsets
        ax = plt.subplot()
        # All Audio TTLS
        squares(audio['times'],
                audio['polarities'],
                ax=ax,
                label='audio TTLs',
                linestyle=':',
                color='k',
                yrange=[0, 1],
                alpha=0.3)
        # GPIO
        x = np.insert(gpio['times'], 0, 0)
        y = np.arange(x.size) % 2
        squares(x, y, ax=ax, label='GPIO')
        y = within_ranges(np.arange(ts.size),
                          ifronts.reshape(-1, 2))  # 0 or 1 for each frame
        ax.plot(fcn_a2b(ts), y, 'kx', label='cam times')
        # Assigned audio
        squares(audio_['times'],
                audio_['polarities'],
                ax=ax,
                label='assigned audio TTL',
                linestyle=':',
                color='g',
                yrange=[0, 1])
        ax.legend()
        plt.xlabel('FPGA time (s)')
        ax.set_yticks([0, 1])
        ax.set_title('GPIO - audio TTL alignment')
        plt.show()

    return gpio, audio_, fcn_a2b(ts)
Пример #12
0
def align_with_audio(timestamps,
                     audio,
                     pin_state,
                     count,
                     extrapolate_missing=True,
                     display=False):
    """
    Groom the raw FPGA or Bpod camera timestamps using the frame embedded audio TTLs and frame
    counter.
    :param timestamps: An array of raw FPGA or Bpod camera timestamps
    :param audio: An array of FPGA or Bpod audio TTL times
    :param pin_state: An array of camera pin states
    :param count: An array of frame numbers
    :param extrapolate_missing: If true and the number of timestamps is fewer than the number of
    frame counts, the remaining timestamps are extrapolated based on the frame rate, otherwise
    they are NaNs
    :param display: Plot the resulting timestamps
    :return: The corrected frame timestamps
    """
    # Some assertions made on the raw data
    # assert count.size == pin_state.size, 'frame count and pin state size mismatch'
    assert all(np.diff(count) > 0), 'frame count not strictly increasing'
    assert all(np.diff(timestamps) > 0
               ), 'FPGA/Bpod camera times not strictly increasing'
    same_n_ttl = pin_state['times'].size == audio['times'].size
    assert same_n_ttl, 'more audio TTLs detected on camera than TTLs sent'
    """Here we will ensure that the FPGA camera times match the number of video frames in
    length.  We will make the following assumptions:

    1. The number of FPGA camera times is equal to or greater than the number of video frames.
    2. No TTLs were missed between the camera and FPGA.
    3. No pin states were missed by Bonsai.
    4  No pixel count data was missed by Bonsai.

    In other words the count and pin state arrays accurately reflect the number of frames
    sent by the camera and should therefore be the same length, and the length of the frame
    counter should match the number of saved video frames.

    The missing frame timestamps are removed in three stages:

    1. Remove any timestamps that occurred before video frame acquisition in Bonsai.
    2. Remove any timestamps where the frame counter reported missing frames, i.e. remove the
    dropped frames which occurred throughout the session.
    3. Remove the trailing timestamps at the end of the session if the camera was turned off
    in the wrong order.
    """
    # Align on first pin state change
    first_uptick = pin_state['indices'][0]
    first_ttl = np.searchsorted(timestamps, audio['times'][0])
    """Here we find up to which index in the FPGA times we discard by taking the difference
    between the index of the first pin state change (when the audio TTL was reported by the
    camera) and the index of the first audio TTL in FPGA time.  We subtract the difference
    between the frame count at the first pin state change and the index to account for any
    video frames that were not saved during this period (we will remove those from the
    camera FPGA times later).
    """
    # Minus any frames that were dropped between the start of frame acquisition and the
    # first TTL
    start = first_ttl - first_uptick - (count[first_uptick] - first_uptick)
    # Get approximate frame rate for extrapolating timestamps (if required)
    frate = round(1 / np.nanmedian(np.diff(timestamps)))

    if start < 0:
        n_missing = abs(start)
        _logger.warning(
            f'{n_missing} missing FPGA/Bpod timestamp(s) at start; '
            f'{"extrapolating" if extrapolate_missing else "prepending nans"}')
        to_app = (timestamps[0] - (np.arange(n_missing, 0, -1) + 1) / frate
                  if extrapolate_missing else np.full(n_missing, np.nan))
        timestamps = np.r_[to_app, timestamps]  # Prepend the missing times
        start = 0

    # Remove the extraneous timestamps from the beginning and end
    end = count[-1] + 1 + start
    ts = timestamps[start:end]
    n_missing = count[-1] - ts.size + 1
    if n_missing > 0:
        # if (n_missing := count[-1] - ts.size + 1) > 0:  # py3.8
        """
        For ephys sessions there may be fewer FPGA times than frame counts if SpikeGLX is turned
        off before the video acquisition workflow.  For Bpod this always occurs because Bpod
        finishes before the camera workflow.  For Bpod the times are already extrapolated for
        these late frames."""
        _logger.warning(
            f'{n_missing} fewer FPGA/Bpod timestamps than frame counts; '
            f'{"extrapolating" if extrapolate_missing else "appending nans"}')
        to_app = ((np.arange(n_missing, ) + 1) / frate + ts[-1]
                  if extrapolate_missing else np.full(n_missing, np.nan))
        ts = np.r_[ts, to_app]  # Append the missing times
    assert ts.size >= count.size, 'fewer timestamps than frame counts'
    assert ts.size == count[
        -1] + 1, 'more frames recorded in frame count than timestamps '

    # Remove the rest of the dropped frames
    ts = ts[count]
    assert np.searchsorted(ts, audio['times'][0]) == first_uptick,\
        'time of first audio TTL doesn\'t match after alignment'
    if ts.size != count.size:
        _logger.error(
            'number of timestamps and frames don\'t match after alignment')

    if display:
        # Plot to check
        fig, axes = plt.subplots(1, 1)
        y = within_ranges(np.arange(ts.size),
                          pin_state['indices'].reshape(-1, 2)).astype(float)
        y *= 1e-5  # For scale when zoomed in
        axes.plot(ts,
                  y,
                  marker='d',
                  color='blue',
                  drawstyle='steps-pre',
                  label='GPIO')
        axes.plot(ts, np.zeros_like(ts), 'kx', label='FPGA timestamps')
        vertical_lines(audio['times'],
                       ymin=0,
                       ymax=1e-5,
                       color='r',
                       linestyle=':',
                       ax=axes,
                       label='audio TTL')
        plt.legend()

    return ts
Пример #13
0
def extract_behaviour_sync(sync, chmap=None, display=False, tmax=np.inf):
    """
    Extract wheel positions and times from sync fronts dictionary

    :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans
    :param chmap: dictionary containing channel index. Default to constant.
        chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}
    :param display: bool or matplotlib axes: show the full session sync pulses display
    defaults to False
    :return: trials dictionary
    """
    bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax)
    if bpod.times.size == 0:
        raise err.SyncBpodFpgaException(
            'No Bpod event found in FPGA. No behaviour extraction. '
            'Check channel maps.')
    frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax)
    audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax)
    # extract events from the fronts for each trace
    t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod(
        bpod['times'], bpod['polarities'])
    t_ready_tone_in, t_error_tone_in = _assign_events_audio(
        audio['times'], audio['polarities'])
    trials = Bunch({
        'goCue_times':
        _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'),
        'errorCue_times':
        _assign_events_to_trial(t_trial_start, t_error_tone_in),
        'valveOpen_times':
        _assign_events_to_trial(t_trial_start, t_valve_open),
        'stimFreeze_times':
        _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2),
        'stimOn_times':
        _assign_events_to_trial(t_trial_start,
                                frame2ttl['times'],
                                take='first'),
        'stimOff_times':
        _assign_events_to_trial(t_trial_start, frame2ttl['times']),
        'itiIn_times':
        _assign_events_to_trial(t_trial_start, t_iti_in)
    })
    # feedback times are valve open on good trials and error tone in on error trials
    trials['feedback_times'] = np.copy(trials['valveOpen_times'])
    ind_err = np.isnan(trials['valveOpen_times'])
    trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err]
    trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']]

    if display:
        width = 0.5
        ymax = 5
        if isinstance(display, bool):
            plt.figure("Ephys FPGA Sync")
            ax = plt.gca()
        else:
            ax = display
        r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0'])
        plots.squares(bpod['times'],
                      bpod['polarities'] * 0.4 + 1,
                      ax=ax,
                      color='k')
        plots.squares(frame2ttl['times'],
                      frame2ttl['polarities'] * 0.4 + 2,
                      ax=ax,
                      color='k')
        plots.squares(audio['times'],
                      audio['polarities'] * 0.4 + 3,
                      ax=ax,
                      color='k')
        plots.squares(r0['times'],
                      r0['polarities'] * 0.4 + 4,
                      ax=ax,
                      color='k')
        plots.vertical_lines(t_ready_tone_in,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='goCue_times',
                             color='b',
                             linewidth=width)
        plots.vertical_lines(t_trial_start,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='start_trial',
                             color='m',
                             linewidth=width)
        plots.vertical_lines(t_error_tone_in,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='error tone',
                             color='r',
                             linewidth=width)
        plots.vertical_lines(t_valve_open,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='valveOpen_times',
                             color='g',
                             linewidth=width)
        plots.vertical_lines(trials['stimFreeze_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stimFreeze_times',
                             color='y',
                             linewidth=width)
        plots.vertical_lines(trials['stimOff_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stim off',
                             color='c',
                             linewidth=width)
        plots.vertical_lines(trials['stimOn_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stimOn_times',
                             color='tab:orange',
                             linewidth=width)
        c = _get_sync_fronts(sync, chmap['left_camera'])
        plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k')
        c = _get_sync_fronts(sync, chmap['right_camera'])
        plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k')
        c = _get_sync_fronts(sync, chmap['body_camera'])
        plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k')
        ax.legend()
        ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', ''])
        ax.set_yticks([0, 1, 2, 3, 4, 5])
        ax.set_ylim([0, 5])

    return trials
Пример #14
0
def extract_behaviour_sync(sync,
                           chmap=None,
                           display=False,
                           bpod_trials=None,
                           tmax=np.inf):
    """
    Extract wheel positions and times from sync fronts dictionary

    :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans
    :param chmap: dictionary containing channel index. Default to constant.
        chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}
    :param display: bool or matplotlib axes: show the full session sync pulses display
    defaults to False
    :return: trials dictionary
    """
    bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax)
    if bpod.times.size == 0:
        raise err.SyncBpodFpgaException(
            'No Bpod event found in FPGA. No behaviour extraction. '
            'Check channel maps.')
    frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax)
    frame2ttl = _clean_frame2ttl(frame2ttl)
    audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax)
    # extract events from the fronts for each trace
    t_trial_start, t_valve_open, t_iti_in = _assign_events_bpod(
        bpod['times'], bpod['polarities'])
    # one issue is that sometimes bpod pulses may not have been detected, in this case
    # perform the sync bpod/FPGA, and add the start that have not been detected
    if bpod_trials:
        bpod_start = bpod_trials['intervals_bpod'][:, 0]
        fcn, drift, ibpod, ifpga = dsp.utils.sync_timestamps(
            bpod_start, t_trial_start, return_indices=True)
        # if it's drifting too much
        if drift > 200 and bpod_start.size != t_trial_start.size:
            raise err.SyncBpodFpgaException("sync cluster f*ck")
        missing_bpod = fcn(bpod_start[np.setxor1d(ibpod,
                                                  np.arange(len(bpod_start)))])
        t_trial_start = np.sort(np.r_[t_trial_start, missing_bpod])
    else:
        _logger.warning(
            "Deprecation Warning: calling FPGA trials extraction without a bpod trials"
            " dictionary will result in an error.")
    t_ready_tone_in, t_error_tone_in = _assign_events_audio(
        audio['times'], audio['polarities'])
    trials = Bunch({
        'goCue_times':
        _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'),
        'errorCue_times':
        _assign_events_to_trial(t_trial_start, t_error_tone_in),
        'valveOpen_times':
        _assign_events_to_trial(t_trial_start, t_valve_open),
        'stimFreeze_times':
        _assign_events_to_trial(t_trial_start, frame2ttl['times'], take=-2),
        'stimOn_times':
        _assign_events_to_trial(t_trial_start,
                                frame2ttl['times'],
                                take='first'),
        'stimOff_times':
        _assign_events_to_trial(t_trial_start, frame2ttl['times']),
        'itiIn_times':
        _assign_events_to_trial(t_trial_start, t_iti_in)
    })
    # feedback times are valve open on good trials and error tone in on error trials
    trials['feedback_times'] = np.copy(trials['valveOpen_times'])
    ind_err = np.isnan(trials['valveOpen_times'])
    trials['feedback_times'][ind_err] = trials['errorCue_times'][ind_err]
    trials['intervals'] = np.c_[t_trial_start, trials['itiIn_times']]

    if display:
        width = 0.5
        ymax = 5
        if isinstance(display, bool):
            plt.figure("Ephys FPGA Sync")
            ax = plt.gca()
        else:
            ax = display
        r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0'])
        plots.squares(bpod['times'],
                      bpod['polarities'] * 0.4 + 1,
                      ax=ax,
                      color='k')
        plots.squares(frame2ttl['times'],
                      frame2ttl['polarities'] * 0.4 + 2,
                      ax=ax,
                      color='k')
        plots.squares(audio['times'],
                      audio['polarities'] * 0.4 + 3,
                      ax=ax,
                      color='k')
        plots.squares(r0['times'],
                      r0['polarities'] * 0.4 + 4,
                      ax=ax,
                      color='k')
        plots.vertical_lines(t_ready_tone_in,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='goCue_times',
                             color='b',
                             linewidth=width)
        plots.vertical_lines(t_trial_start,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='start_trial',
                             color='m',
                             linewidth=width)
        plots.vertical_lines(t_error_tone_in,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='error tone',
                             color='r',
                             linewidth=width)
        plots.vertical_lines(t_valve_open,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='valveOpen_times',
                             color='g',
                             linewidth=width)
        plots.vertical_lines(trials['stimFreeze_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stimFreeze_times',
                             color='y',
                             linewidth=width)
        plots.vertical_lines(trials['stimOff_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stim off',
                             color='c',
                             linewidth=width)
        plots.vertical_lines(trials['stimOn_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stimOn_times',
                             color='tab:orange',
                             linewidth=width)
        c = _get_sync_fronts(sync, chmap['left_camera'])
        plots.squares(c['times'], c['polarities'] * 0.4 + 5, ax=ax, color='k')
        c = _get_sync_fronts(sync, chmap['right_camera'])
        plots.squares(c['times'], c['polarities'] * 0.4 + 6, ax=ax, color='k')
        c = _get_sync_fronts(sync, chmap['body_camera'])
        plots.squares(c['times'], c['polarities'] * 0.4 + 7, ax=ax, color='k')
        ax.legend()
        ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', ''])
        ax.set_yticks([0, 1, 2, 3, 4, 5])
        ax.set_ylim([0, 5])

    return trials
Пример #15
0
def extract_behaviour_sync(sync,
                           output_path=None,
                           save=False,
                           chmap=None,
                           display=False,
                           tmax=np.inf):
    """
    Extract wheel positions and times from sync fronts dictionary

    :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans
    :param output_path: where to save the data
    :param save: True/False
    :param chmap: dictionary containing channel index. Default to constant.
        chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}
    :param display: bool or matplotlib axes: show the full session sync pulses display
    defaults to False
    :return: trials dictionary
    """
    bpod = _get_sync_fronts(sync, chmap['bpod'], tmax=tmax)
    if bpod.times.size == 0:
        raise err.SyncBpodFpgaException(
            'No Bpod event found in FPGA. No behaviour extraction. '
            'Check channel maps.')
    frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'], tmax=tmax)
    audio = _get_sync_fronts(sync, chmap['audio'], tmax=tmax)
    # extract events from the fronts for each trace
    t_trial_start, t_valve_open, t_iti_in = _bpod_events_extraction(
        bpod['times'], bpod['polarities'])
    t_ready_tone_in, t_error_tone_in = _audio_events_extraction(
        audio['times'], audio['polarities'])
    # stim off time is the first frame2ttl rise/fall after the trial start
    # does not apply for 1st trial
    ind = np.searchsorted(frame2ttl['times'], t_iti_in, side='left')
    t_stim_off = frame2ttl['times'][np.minimum(ind, frame2ttl.times.size - 1)]
    t_stim_freeze = frame2ttl['times'][np.maximum(ind - 1, 0)]
    # stimOn_times: first fram2ttl change after trial start
    trials = Bunch({
        'ready_tone_in':
        _assign_events_to_trial(t_trial_start, t_ready_tone_in, take='first'),
        'error_tone_in':
        _assign_events_to_trial(t_trial_start, t_error_tone_in),
        'valve_open':
        _assign_events_to_trial(t_trial_start, t_valve_open),
        'stim_freeze':
        _assign_events_to_trial(t_trial_start, t_stim_freeze),
        'stimOn_times':
        _assign_events_to_trial(t_trial_start,
                                frame2ttl['times'],
                                take='first'),
        'stimOff_times':
        _assign_events_to_trial(t_trial_start, t_stim_off),
        'iti_in':
        _assign_events_to_trial(t_trial_start, t_iti_in)
    })
    # goCue_times corresponds to the tone_in event
    trials['goCue_times'] = np.copy(trials['ready_tone_in'])
    # feedback times are valve open on good trials and error tone in on error trials
    trials['feedback_times'] = np.copy(trials['valve_open'])
    ind_err = np.isnan(trials['valve_open'])
    trials['feedback_times'][ind_err] = trials['error_tone_in'][ind_err]
    trials['intervals'] = np.c_[t_trial_start, trials['iti_in']]

    if display:
        width = 0.5
        ymax = 5
        if isinstance(display, bool):
            plt.figure("Ephys FPGA Sync")
            ax = plt.gca()
        else:
            ax = display
        r0 = _get_sync_fronts(sync, chmap['rotary_encoder_0'])
        plots.squares(bpod['times'],
                      bpod['polarities'] * 0.4 + 1,
                      ax=ax,
                      color='k')
        plots.squares(frame2ttl['times'],
                      frame2ttl['polarities'] * 0.4 + 2,
                      ax=ax,
                      color='k')
        plots.squares(audio['times'],
                      audio['polarities'] * 0.4 + 3,
                      ax=ax,
                      color='k')
        plots.squares(r0['times'],
                      r0['polarities'] * 0.4 + 4,
                      ax=ax,
                      color='k')
        plots.vertical_lines(t_ready_tone_in,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='ready tone in',
                             color='b',
                             linewidth=width)
        plots.vertical_lines(t_trial_start,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='start_trial',
                             color='m',
                             linewidth=width)
        plots.vertical_lines(t_error_tone_in,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='error tone',
                             color='r',
                             linewidth=width)
        plots.vertical_lines(t_valve_open,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='valve open',
                             color='g',
                             linewidth=width)
        plots.vertical_lines(t_stim_freeze,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stim freeze',
                             color='y',
                             linewidth=width)
        plots.vertical_lines(t_stim_off,
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stim off',
                             color='c',
                             linewidth=width)
        plots.vertical_lines(trials['stimOn_times'],
                             ymin=0,
                             ymax=ymax,
                             ax=ax,
                             label='stim on',
                             color='tab:orange',
                             linewidth=width)
        ax.legend()
        ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio', 're_0', ''])
        ax.set_ylim([0, 5])

    if save and output_path:
        output_path = Path(output_path)
        np.save(output_path / '_ibl_trials.goCue_times.npy',
                trials['goCue_times'])
        np.save(output_path / '_ibl_trials.stimOn_times.npy',
                trials['stimOn_times'])
        np.save(output_path / '_ibl_trials.intervals.npy', trials['intervals'])
        np.save(output_path / '_ibl_trials.feedback_times.npy',
                trials['feedback_times'])
    return trials
Пример #16
0
gb_diff_ts = np.diff(t_gabor[0::2])
pearson_r = np.corrcoef(np.diff(gabor_fixtures), gb_diff_ts)[1, 0]
assert pearson_r > 0.95

DEBUG_PLOTS = True
if DEBUG_PLOTS:
    # plots for debug
    t0 = np.median(t_valve_open - valve_fixtures)
    from ibllib.plots import squares, vertical_lines, color_cycle
    import matplotlib.pyplot as plt
    pl, ax = plt.subplots(2, 1)
    for i, lab in enumerate(['frame2ttl', 'audio', 'bpod']):
        sy = ephys_fpga._get_sync_fronts(sync, sync_map[lab], tmin=t_start_passive)
        squares(sy['times'], sy['polarities'], yrange=[0.1 + i, 0.9 + i], color='k', ax=ax[0])

    vertical_lines(np.r_[t_start_passive, t_starts, t_ends], ymin=-1, ymax=4, color=color_cycle(0),
                   ax=ax[0], label='spacers')
    vertical_lines(gabor_fixtures + t0, ymin=-1, ymax=4, color=color_cycle(1),
                   ax=ax[0], label='fixtures gabor')
    vertical_lines(t_valve_open, ymin=-1, ymax=4, color=color_cycle(2), ax=ax[0], label='valve')
    vertical_lines(valve_fixtures + t0, ymin=-1, ymax=4, color=color_cycle(2), ax=ax[0],
                   linestyle='--', label='fixtures valve')

    ax[0].legend()

    ax[1].plot([0, 3], [0, 3], linewidth=2.0)
    # plt.plot(diff_delays, gb_diff_ts, '.')
    plt.xlabel('saved delays diff [s]')
    plt.ylabel('measured times diff [s]')
    # scatter plot
    plt.scatter(np.diff(gabor_fixtures), gb_diff_ts,
                c=(fixture['ids'][igabor - 1] == 'G')[:-1], s=10)
Пример #17
0
def extract_behaviour_sync(sync, output_path=None, save=False, chmap=None):
    """
    Extract wheel positions and times from sync fronts dictionary

    :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace for all 16 chans
    :param output_path: where to save the data
    :param save: True/False
    :param chmap: dictionary containing channel index. Default to constant.
        chmap = {'bpod': 7, 'frame2ttl': 12, 'audio': 15}
    :return: trials dictionary
    """
    bpod = _get_sync_fronts(sync, chmap['bpod'])
    frame2ttl = _get_sync_fronts(sync, chmap['frame2ttl'])
    audio = _get_sync_fronts(sync, chmap['audio'])
    # extract events from the fronts for each trace
    t_trial_start, t_valve_open, t_iti_in = _bpod_events_extraction(
        bpod['times'], bpod['polarities'])
    t_ready_tone_in, t_error_tone_in = _audio_events_extraction(
        audio['times'], audio['polarities'])
    # stim off time is the first frame2ttl rise/fall after the trial start
    # does not apply for 1st trial
    ind = np.searchsorted(frame2ttl['times'], t_iti_in, side='left')
    t_stim_off = frame2ttl['times'][ind]
    t_stim_freeze = frame2ttl['times'][ind - 1]

    if DEBUG_PLOTS:
        plt.figure()
        ax = plt.gca()
        plots.squares(bpod['times'],
                      bpod['polarities'] * 0.4 + 1,
                      ax=ax,
                      label='bpod=1',
                      color='k')
        plots.squares(frame2ttl['times'],
                      frame2ttl['polarities'] * 0.4 + 2,
                      ax=ax,
                      label='frame2ttl=2',
                      color='k')
        plots.squares(audio['times'],
                      audio['polarities'] * 0.4 + 3,
                      ax=ax,
                      label='audio=3',
                      color='k')
        plots.vertical_lines(t_ready_tone_in,
                             ymin=0,
                             ymax=4,
                             ax=ax,
                             label='ready tone in',
                             color='b',
                             linewidth=0.5)
        plots.vertical_lines(t_trial_start,
                             ymin=0,
                             ymax=4,
                             ax=ax,
                             label='start_trial',
                             color='m',
                             linewidth=0.5)
        plots.vertical_lines(t_error_tone_in,
                             ymin=0,
                             ymax=4,
                             ax=ax,
                             label='error tone',
                             color='r',
                             linewidth=0.5)
        plots.vertical_lines(t_valve_open,
                             ymin=0,
                             ymax=4,
                             ax=ax,
                             label='valve open',
                             color='g',
                             linewidth=0.5)
        plots.vertical_lines(t_stim_freeze,
                             ymin=0,
                             ymax=4,
                             ax=ax,
                             label='stim freeze',
                             color='y',
                             linewidth=0.5)
        plots.vertical_lines(t_stim_off,
                             ymin=0,
                             ymax=4,
                             ax=ax,
                             label='stim off',
                             color='c',
                             linewidth=0.5)
        ax.legend()

    # stimOn_times: first fram2ttl change after trial start
    trials = {
        'ready_tone_in':
        _assign_events_to_trial(t_trial_start, t_ready_tone_in),
        'error_tone_in':
        _assign_events_to_trial(t_trial_start, t_error_tone_in),
        'valve_open':
        _assign_events_to_trial(t_trial_start, t_valve_open),
        'stim_freeze':
        _assign_events_to_trial(t_trial_start, t_stim_freeze),
        'stimOn_times':
        _assign_events_to_trial(t_trial_start,
                                frame2ttl['times'],
                                take='first'),
        'iti_in':
        _assign_events_to_trial(t_trial_start, t_iti_in)
    }
    # goCue_times corresponds to the tone_in event
    trials['goCue_times'] = trials['ready_tone_in']
    # response_times is TONE_IN to STIM freeze
    trials['response_times'] = trials['stim_freeze'] - trials['ready_tone_in']
    # feedback times are valve open on good trials and error tone in on error trials
    trials['feedback_times'] = trials['valve_open']
    ind_err = np.isnan(trials['valve_open'])
    trials['feedback_times'][ind_err] = trials['error_tone_in'][ind_err]
    # # # # this is specific to version 4
    trials['iti_in'] = trials['valve_open'] + 1.
    trials['iti_in'][ind_err] = trials['error_tone_in'][ind_err] + 2.
    trials['intervals'] = np.c_[t_trial_start, trials['iti_in']]
    # # # # end of specific to version 4
    if save and output_path:
        output_path = Path(output_path)
        np.save(output_path / '_ibl_trials.goCue_times.npy',
                trials['goCue_times'])
        np.save(output_path / '_ibl_trials.response_times.npy',
                trials['response_times'])
        np.save(output_path / '_ibl_trials.stimOn_times.npy',
                trials['stimOn_times'])
        np.save(output_path / '_ibl_trials.intervals.npy', trials['intervals'])
        np.save(output_path / '_ibl_trials.feedback_times.npy',
                trials['feedback_times'])
    return trials
Пример #18
0
def plot_raster_single_trial(one, eid, trial_number, probe):
    '''
    Plot a rasterplot for a given trial,
    ordered by insertion depth, with
    'stimOn_times','feedback_times' and 'stimOff_times'
    '''

    dataset_types = [
        'clusters.depth', 'spikes.times', 'spikes.depths', 'spikes.clusters',
        'trials.intervals'
    ]

    D = one.load(eid, dataset_types=dataset_types, dclass_output=True)

    alf_path = Path(D.local_path[0]).parent.parent / 'alf'
    if str(alf_path.parent)[-3:] == 'alf':
        alf_path = alf_path.parent
    probe_path = alf_path / probe

    spikes = alf.io.load_object(probe_path, 'spikes')
    trials = alf.io.load_object(alf_path, 'trials')

    T_BIN = 0.01  # time bin in sec

    # bin spikes
    R, times, Clusters = bincount2D(spikes['times'], spikes['clusters'], T_BIN)

    # Order activity by cortical depth of neurons
    d = dict(zip(spikes['clusters'], spikes['depths']))
    y = sorted([[i, d[i]] for i in d])
    isort = np.argsort([x[1] for x in y])
    R = R[isort, :]

    # get trial number for each time bin
    trial_numbers = np.digitize(times, trials['intervals'][:, 0])
    print('Range of trials: ', [trial_numbers[0], trial_numbers[-1]])

    plt.figure('2')
    plt.title('%s_%s_trial: %s' % (eid, probe, trial_number))
    trial_number = trial_number + 1
    a = list(trial_numbers)
    first = a.index(trial_number)
    last = len(a) - 1 - a[::-1].index(trial_number)

    plt.imshow(R[:, first:last],
               aspect='auto',
               cmap='binary',
               vmax=T_BIN / 0.001 / 4,
               extent=np.r_[times[[first, last]], Clusters[[0, -1]]],
               origin='lower')

    def restrict_timestamplist(q):

        li = []
        for i in q:
            if i > times[first] and i < times[last]:
                li.append(i)
        return li

    iblplt.vertical_lines(restrict_timestamplist(trials['stimOn_times']),
                          ymin=0,
                          ymax=Clusters[-1],
                          color='m',
                          linewidth=0.5,
                          label='stimOn_times')

    iblplt.vertical_lines(restrict_timestamplist(trials['feedback_times']),
                          ymin=0,
                          ymax=Clusters[-1],
                          color='b',
                          linewidth=0.5,
                          label='feedback_times')

    #    iblplt.vertical_lines(restrict_timestamplist(
    #        trials['stimOff_times']), ymin=0, ymax=Clusters[-1],
    #        color='g', linewidth=0.5, label='stimOff_times')

    plt.xlabel('Time (s)')
    plt.ylabel('Cluster #; ordered by depth')
    plt.legend()
    plt.tight_layout()
    plt.show()
Пример #19
0
channels = ioalf.load_object(session_path, 'channels')
trials = ioalf.load_object(session_path, '_ibl_trials')

# compute raster map as a function of cluster number
R, times, clusters = bincount2D(spikes['times'], spikes['clusters'], T_BIN)

# plot raster map
plt.imshow(R,
           aspect='auto',
           cmap='binary',
           vmax=T_BIN / 0.001 / 4,
           extent=np.r_[times[[0, -1]], clusters[[0, -1]]],
           origin='lower')
# plot trial start and reward time
reward = trials['feedback_times'][trials['feedbackType'] == 1]
iblplt.vertical_lines(trials['intervals'][:, 0],
                      ymin=0,
                      ymax=clusters[-1],
                      color='k',
                      linewidth=0.5,
                      label='trial starts')
iblplt.vertical_lines(reward,
                      ymin=0,
                      ymax=clusters[-1],
                      color='m',
                      linewidth=0.5,
                      label='valve openings')
plt.xlabel('Time (s)')
plt.ylabel('Cluster #')
plt.legend()
Пример #20
0
# by similarity of activity (requires R to contain floats)
model = rastermap.mapping.Rastermap().fit(R.astype(float))
isort = np.argsort(model.embedding[:, 0])
R = R[isort, :]

# Alternatively, order activity by cortical depth of neurons
# d=dict(zip(spikes['clusters'],spikes['depths']))
# y=sorted([[i,d[i]] for i in d])
# isort=argsort([x[1] for x in y])
# R=R[isort,:]

# plot raster map
plt.imshow(R,
           aspect='auto',
           cmap='binary',
           vmax=T_BIN / 0.001 / 4,
           extent=np.r_[times[[0, -1]], clusters[[0, -1]]],
           origin='lower')
# plot trial start and reward time
reward = trials['goCue_times']

iblplt.vertical_lines(reward,
                      ymin=0,
                      ymax=clusters[-1],
                      color='m',
                      linewidth=0.5,
                      label='valve openings')
plt.xlabel('Time (s)')
plt.ylabel('Cluster #')
plt.legend()
Пример #21
0
def plot_stims_times(passiveStims_df, ax=None):
    if ax is None:
        f, ax = plt.subplots(1, 1)
    # Look at it
    vertical_lines(
        passiveStims_df["valveOn"].values,
        ymin=2,
        ymax=3,
        color=color_cycle(3),
        ax=ax,
        label="ValveOn_times",
    )
    vertical_lines(
        passiveStims_df["valveOff"].values,
        ymin=2,
        ymax=3,
        color=color_cycle(4),
        ax=ax,
        label="ValveOff_times",
    )
    ax.legend()
    vertical_lines(
        passiveStims_df["toneOn"].values,
        ymin=1,
        ymax=2,
        color=color_cycle(5),
        ax=ax,
        label="toneOn_times",
    )
    vertical_lines(
        passiveStims_df["toneOff"].values,
        ymin=1,
        ymax=2,
        color=color_cycle(6),
        ax=ax,
        label="toneOff_times",
    )
    vertical_lines(
        passiveStims_df["noiseOn"].values,
        ymin=1,
        ymax=2,
        color=color_cycle(7),
        ax=ax,
        label="noiseOn_times",
    )
    vertical_lines(
        passiveStims_df["noiseOff"].values,
        ymin=1,
        ymax=2,
        color=color_cycle(8),
        ax=ax,
        label="noiseOff_times",
    )

    ax.legend()
Пример #22
0
_, tspi, xspi = overlay_spikes(eqc_butt, spikes, clusters, channels)
overlay_spikes(eqc_dest, spikes, clusters, channels)
overlay_spikes(eqc_ks2, spikes, clusters, channels)

# Do the driftmap
driftmap(spikes['times'], spikes['depths'], t_bin=0.1, d_bin=5, ax=axes[1])

##
import alf.io

eid = dsets[0]['session'][-36:]
tdsets = one.alyx.rest('datasets',
                       'list',
                       session=eid,
                       django='name__icontains,trials.')
one.download_datasets(tdsets)
trials = alf.io.load_object(one.path_from_eid(eid).joinpath('alf'), 'trials')

rewards = trials['feedback_times'][trials['feedbackType'] == 1]

##

rewards = trials['feedback_times'][trials['feedbackType'] == 1]

## do drift map
fig, ax = plt.subplots()
driftmap(spikes['times'], spikes['depths'], t_bin=0.1, d_bin=5, ax=ax)
from ibllib.plots import vertical_lines

vertical_lines(rewards, ymin=0, ymax=3800, ax=ax)
Пример #23
0
def check_wheel_angle(eid):

    Plot = True

    one = ONE()
    #eid = 'e1023140-50c1-462a-b80e-5e05626d7f0e' # at least 9 bad cases

    #eid = one.search(subject='ZM_2104', date='2019-09-19', number=1)
    Dataset_types = [
        'wheel.position', 'wheel.timestamps', 'trials.feedback_times',
        'trials.feedbackType'
    ]

    D = one.load(eid,
                 dataset_types=Dataset_types,
                 clobber=False,
                 download_only=True)
    session_path = Path(D[0]).parent

    wheel = alf.io.load_object(session_path, 'wheel')
    trials = alf.io.load_object(session_path, 'trials')
    reward_success = trials['feedback_times'][trials['feedbackType'] == 1]
    reward_failure = trials['feedback_times'][trials['feedbackType'] == -1]

    if Plot:
        plt.plot(wheel['times'], wheel['position'], linestyle='', marker='o')

        #iblplt.vertical_lines(trials['stimOn_times'], ymin=-100, ymax=100,
        #                      color='r', linewidth=0.5, label='stimOn_times')

        #iblplt.vertical_lines(reward_failure, ymin=-100, ymax=100,
        #                      color='b', linewidth=0.5, label='reward_failure')

        iblplt.vertical_lines(reward_success,
                              ymin=-100,
                              ymax=100,
                              color='k',
                              linewidth=0.5,
                              label='reward_success')

        plt.legend()
        plt.xlabel('time [sec]')
        plt.ylabel('wheel linear displacement [cm]')
        plt.show()

    # get fraction of reward deliveries with silent wheel time_delay before the reward
    time_delay = 0.5

    bad_cases1 = []
    for rew in reward_success:

        left = wheel['times'][find_nearest(wheel['times'], rew - time_delay)]
        right = wheel['times'][find_nearest(wheel['times'], rew)]

        if left == right:
            if left < rew - time_delay:
                bad_cases1.append(rew)

    if len(bad_cases1) == 0:
        print('Good news, no impossible case found.')
    else:
        print('Bad news, at least one impossible case found.')
        return len(bad_cases1)
Пример #24
0
    def create_plots(self, axes,
                     wheel_axes=None, trial_events=None, color_map=None, linestyle=None):
        """
        Plots the data for bnc1 (sound) and bnc2 (frame2ttl)
        :param axes: An axes handle on which to plot the TTL events
        :param wheel_axes: An axes handle on which to plot the wheel trace
        :param trial_events: A list of Bpod trial events to plot, e.g. ['stimFreeze_times'],
        if None, valve, sound and stimulus events are plotted
        :param color_map: A color map to use for the events, default is the tableau color map
        linestyle: A line style map to use for the events, default is random.
        :return: None
        """
        color_map = color_map or TABLEAU_COLORS.keys()
        if trial_events is None:
            # Default trial events to plot as vertical lines
            trial_events = [
                'goCue_times',
                'goCueTrigger_times',
                'feedback_times',
                'stimFreeze_times',
                'stimOff_times',
                'stimOn_times'
            ]

        plot_args = {
            'ymin': 0,
            'ymax': 4,
            'linewidth': 2,
            'ax': axes
        }

        bnc1 = self.extractor.frame_ttls
        bnc2 = self.extractor.audio_ttls
        trial_data = self.extractor.data

        plots.squares(bnc1['times'], bnc1['polarities'] * 0.4 + 1, ax=axes, color='k')
        plots.squares(bnc2['times'], bnc2['polarities'] * 0.4 + 2, ax=axes, color='k')
        linestyle = linestyle or random.choices(('-', '--', '-.', ':'), k=len(trial_events))

        if self.extractor.bpod_ttls is not None:
            bpttls = self.extractor.bpod_ttls
            plots.squares(bpttls['times'], bpttls['polarities'] * 0.4 + 3, ax=axes, color='k')
            plot_args['ymax'] = 4
            ylabels = ['', 'frame2ttl', 'sound', 'bpod', '']
        else:
            plot_args['ymax'] = 3
            ylabels = ['', 'frame2ttl', 'sound', '']

        for event, c, l in zip(trial_events, cycle(color_map), linestyle):
            plots.vertical_lines(trial_data[event], label=event, color=c, linestyle=l, **plot_args)

        axes.legend(loc='upper left', fontsize='xx-small', bbox_to_anchor=(1, 0.5))
        axes.set_yticklabels(ylabels)
        axes.set_yticks(list(range(plot_args['ymax'] + 1)))
        axes.set_ylim([0, plot_args['ymax']])

        if wheel_axes:
            wheel_plot_args = {
                'ax': wheel_axes,
                'ymin': self.wheel_data['re_pos'].min(),
                'ymax': self.wheel_data['re_pos'].max()}
            plot_args = {**plot_args, **wheel_plot_args}

            wheel_axes.plot(self.wheel_data['re_ts'], self.wheel_data['re_pos'], 'k-x')
            for event, c, ln in zip(trial_events, cycle(color_map), linestyle):
                plots.vertical_lines(trial_data[event],
                                     label=event, color=c, linestyle=ln, **plot_args)