Exemple #1
0
    def check_wheel_alignment(self, tolerance=(1, 2), display=False):
        """Check wheel motion in video correlates with the rotary encoder signal

        Check is skipped for body camera videos as the wheel is often obstructed

        :param tolerance: maximum absolute offset in frames.  If two values, the maximum value
        is taken as the warning threshold
        :param display: if true, the wheel motion energy is plotted against the rotary encoder
        :returns: outcome string, frame offset
        """
        wheel_present = data_for_keys(('position', 'timestamps', 'period'),
                                      self.data['wheel'])
        if not wheel_present or self.side == 'body':
            return 'NOT_SET'

        # Check the selected wheel movement period occurred within camera timestamp time
        camera_times = self.data['timestamps']
        in_range = within_ranges(camera_times,
                                 self.data['wheel']['period'].reshape(-1, 2))
        if not in_range.any():
            # Check if any camera timestamps overlap with the wheel times
            if np.any(
                    np.logical_and(
                        camera_times > self.data['wheel']['timestamps'][0],
                        camera_times < self.data['wheel']['timestamps'][-1])):
                _log.warning('Unable to check wheel alignment: '
                             'chosen movement is not during video')
                return 'NOT_SET'
            else:
                # No overlap, return fail
                return 'FAIL'
        aln = MotionAlignment(self.eid,
                              self.one,
                              self.log,
                              session_path=self.session_path)
        aln.data = self.data.copy()
        aln.data['camera_times'] = {self.side: camera_times}
        aln.video_paths = {self.side: self.video_path}
        offset, *_ = aln.align_motion(period=self.data['wheel'].period,
                                      display=display,
                                      side=self.side)
        if offset is None:
            return 'NOT_SET'
        if display:
            aln.plot_alignment()

        # Determine the outcome.  If there are two values for the tolerance, one is taken to be
        # a warning threshold, the other a failure threshold.
        out_map = {0: 'FAIL', 1: 'WARNING', 2: 'PASS'}
        passed = np.abs(offset) <= np.sort(np.array(tolerance))
        return out_map[sum(passed)], int(offset)
Exemple #2
0
def groom_pin_state(gpio,
                    audio,
                    ts,
                    tolerance=2.,
                    display=False,
                    take='first',
                    min_diff=0.):
    """
    Align the GPIO pin state to the FPGA audio TTLs.  Any audio TTLs not reflected in the pin
    state are removed from the dict and the times of the detected fronts are converted to FPGA time

    Note:
      - This function is ultra safe: we probably don't need assign all the ups and down fronts
      separately and could potentially even align the timestamps without removing the missed fronts
      - The input gpio and audio dicts may be modified by this function
      - For training sessions the frame rate is only 30Hz and the TTLs tend to be broken up by
      small gaps.  Setting the min_diff to 5ms helps the timestamp assignment accuracy.
    :param gpio: array of GPIO pin state values
    :param audio: dict of FPGA audio TTLs (see ibllib.io.extractors.ephys_fpga._get_sync_fronts)
    :param ts: camera frame times
    :param tolerance: two pulses need to be within this many seconds to be considered related
    :param take:  If 'first' the first value within tolerance is assigned; if 'nearest' the
    closest value is assigned
    :param display: If true, the resulting timestamps are plotted against the raw audio signal
    :param min_diff: Audio TTL fronts less than min_diff seconds apart will be removed
    :returns: dict of GPIO FPGA front indices, polarities and FPGA aligned times
    :returns: audio times and polarities sans the TTLs not detected in the frame data
    :returns: frame times in FPGA time
    """
    # Check that the dimensions match
    if np.any(gpio['indices'] >= ts.size):
        _logger.warning('GPIO events occurring beyond timestamps array length')
        keep = gpio['indices'] < ts.size
        gpio = {k: gpio[k][keep] for k, v in gpio.items()}
    assert audio['times'].size == audio[
        'polarities'].size, 'audio data dimension mismatch'
    # make sure that there are no 2 consecutive fall or consecutive rise events
    assert (np.all(np.abs(np.diff(audio['polarities'])) == 2)
            ), 'consecutive high/low audio events'
    # make sure first TTL is high
    assert audio['polarities'][0] == 1
    # make sure audio times in order
    assert np.all(np.diff(audio['times']) > 0)
    # make sure raw timestamps increase
    assert np.all(np.diff(ts) > 0), 'timestamps must strictly increase'
    # make sure there are state changes
    assert gpio['indices'].any(), 'no TTLs detected in GPIO'
    # # make sure first GPIO state is high
    assert gpio['polarities'][0] == 1
    """
    Some audio TTLs appear to be so short that they are not recorded by the camera.  These can
    be as short as a few microseconds.  Applying a cutoff based on framerate was unsuccessful.
    Assigning each audio TTL to each pin state change is not easy because some onsets occur very
    close together (sometimes < 70ms), on the order of the delay between TTL and frame time.
    Also, the two clocks have some degree of drift, so the delay between audio TTL and pin state
    change may be zero or even negative.

    Here we split the events into audio onsets (lo->hi) and audio offsets (hi->lo).  For each
    uptick in the GPIO pin state, we take the first audio onset time that was within 100ms of it.
    We ensure that each audio TTL is assigned only once, so a TTL that is closer to frame 3 than
    frame 1 may still be assigned to frame 1.
    """
    ifronts = gpio['indices']  # The pin state flips
    audio_times = audio['times']
    if ifronts.size != audio['times'].size:
        _logger.warning(
            'more audio TTLs than GPIO state changes, assigning timestamps')
        to_remove = np.zeros(ifronts.size,
                             dtype=bool)  # unassigned GPIO fronts to remove
        low2high = ifronts[gpio['polarities'] == 1]
        high2low = ifronts[gpio['polarities'] == -1]
        assert low2high.size >= high2low.size

        # Remove and/or fuse short TTLs
        if min_diff > 0:
            short, = np.where(np.diff(audio['times']) < min_diff)
            audio_times = np.delete(audio['times'], np.r_[short, short + 1])
            _logger.debug(f'Removed {short.size * 2} fronts TLLs less than '
                          f'{min_diff * 1e3:.0f}ms apart')

        # Onsets
        ups = ts[low2high] - ts[low2high][
            0]  # times relative to first GPIO high
        onsets = audio_times[::2] - audio_times[
            0]  # audio times relative to first onset
        # assign GPIO fronts to audio onset
        assigned = attribute_times(onsets, ups, tol=tolerance, take=take)
        unassigned = np.setdiff1d(np.arange(onsets.size),
                                  assigned[assigned > -1])
        if unassigned.size > 0:
            _logger.debug(
                f'{unassigned.size} audio TTL rises were not detected by the camera'
            )
        # Check that all pin state upticks could be attributed to an onset TTL
        missed = assigned == -1
        if np.any(missed):
            # if np.any(missed := assigned == -1):  # py3.8
            _logger.warning(f'{sum(missed)} pin state rises could '
                            f'not be attributed to an audio TTL')
            if display:
                ax = plt.subplot()
                vertical_lines(ups[assigned > -1],
                               linestyle='-',
                               color='g',
                               ax=ax,
                               label='assigned GPIO up state')
                vertical_lines(ups[missed],
                               linestyle='-',
                               color='r',
                               ax=ax,
                               label='unassigned GPIO up state')
                vertical_lines(onsets[unassigned],
                               linestyle=':',
                               color='k',
                               ax=ax,
                               alpha=0.3,
                               label='audio onset')
                vertical_lines(onsets[assigned],
                               linestyle=':',
                               color='b',
                               ax=ax,
                               label='assigned audio onset')
                plt.legend()
                plt.show()
            # Remove the missed fronts
            to_remove = np.in1d(gpio['indices'], low2high[missed])
            assigned = assigned[~missed]
        onsets_ = audio_times[::2][assigned]

        # Offsets
        downs = ts[high2low] - ts[high2low][0]
        offsets = audio_times[1::2] - audio_times[1]
        assigned = attribute_times(offsets, downs, tol=tolerance, take=take)
        unassigned = np.setdiff1d(np.arange(onsets.size),
                                  assigned[assigned > -1])
        if unassigned.size > 0:
            _logger.debug(
                f'{unassigned.size} audio TTL falls were not detected by the camera'
            )
        # Check that all pin state downticks could be attributed to an offset TTL
        missed = assigned == -1
        if np.any(missed):
            # if np.any(missed := assigned == -1):  # py3.8
            _logger.warning(f'{sum(missed)} pin state falls could '
                            f'not be attributed to an audio TTL')
            # Remove the missed fronts
            to_remove = np.logical_or(
                to_remove, np.in1d(gpio['indices'], high2low[missed]))
            assigned = assigned[~missed]
        offsets_ = audio_times[1::2][assigned]

        # Audio groomed
        if np.any(to_remove):
            # Check for any orphaned fronts (only one pin state edge was assigned)
            to_remove = np.pad(to_remove, (0, to_remove.size % 2),
                               'edge')  # Ensure even size
            # Perform xor to find GPIOs where only onset or offset is marked for removal
            orphaned = to_remove.reshape(-1, 2).sum(axis=1) == 1
            if orphaned.any():
                """If there are orphaned GPIO fronts (i.e. only one edge was assigned to an
                audio front), remove the orphaned front its assigned audio TTL. In other words
                if both edges cannot be assigned to an audio TTL, we ignore the TTL entirely.
                This is a sign that the assignment was bad and extraction may fail."""
                _logger.warning(
                    'Some onsets but not offsets (or vice versa) were not assigned; '
                    'this may be a sign of faulty wiring or clock drift')
                # Remove orphaned onsets and offsets
                orphaned_onsets, = np.where(~to_remove.reshape(-1, 2)[:, 0]
                                            & orphaned)
                orphaned_offsets, = np.where(~to_remove.reshape(-1, 2)[:, 1]
                                             & orphaned)
                onsets_ = np.delete(onsets_, orphaned_onsets)
                offsets_ = np.delete(offsets_, orphaned_offsets)
                to_remove.reshape(-1, 2)[orphaned] = True

            # Remove those unassigned GPIOs
            gpio = {k: v[~to_remove[:v.size]] for k, v in gpio.items()}
            ifronts = gpio['indices']

            # Assert that we've removed discrete TTLs
            # A failure means e.g. an up-going front of one TTL was missed
            # but not the down-going one.
            assert (np.all(np.abs(np.diff(gpio['polarities'])) == 2))
            assert gpio['polarities'][0] == 1

        audio_ = {
            'times': np.empty(ifronts.size),
            'polarities': gpio['polarities']
        }
        audio_['times'][::2] = onsets_
        audio_['times'][1::2] = offsets_
    else:
        audio_ = audio

    # Align the frame times to FPGA
    fcn_a2b, drift_ppm = dsp.sync_timestamps(ts[ifronts], audio_['times'])
    _logger.debug(f'frame audio alignment drift = {drift_ppm:.2f}ppm')
    # Add times to GPIO dict
    gpio['times'] = fcn_a2b(ts[ifronts])

    if display:
        # Plot all the onsets and offsets
        ax = plt.subplot()
        # All Audio TTLS
        squares(audio['times'],
                audio['polarities'],
                ax=ax,
                label='audio TTLs',
                linestyle=':',
                color='k',
                yrange=[0, 1],
                alpha=0.3)
        # GPIO
        x = np.insert(gpio['times'], 0, 0)
        y = np.arange(x.size) % 2
        squares(x, y, ax=ax, label='GPIO')
        y = within_ranges(np.arange(ts.size),
                          ifronts.reshape(-1, 2))  # 0 or 1 for each frame
        ax.plot(fcn_a2b(ts), y, 'kx', label='cam times')
        # Assigned audio
        squares(audio_['times'],
                audio_['polarities'],
                ax=ax,
                label='assigned audio TTL',
                linestyle=':',
                color='g',
                yrange=[0, 1])
        ax.legend()
        plt.xlabel('FPGA time (s)')
        ax.set_yticks([0, 1])
        ax.set_title('GPIO - audio TTL alignment')
        plt.show()

    return gpio, audio_, fcn_a2b(ts)
Exemple #3
0
def align_with_audio(timestamps,
                     audio,
                     pin_state,
                     count,
                     extrapolate_missing=True,
                     display=False):
    """
    Groom the raw FPGA or Bpod camera timestamps using the frame embedded audio TTLs and frame
    counter.
    :param timestamps: An array of raw FPGA or Bpod camera timestamps
    :param audio: An array of FPGA or Bpod audio TTL times
    :param pin_state: An array of camera pin states
    :param count: An array of frame numbers
    :param extrapolate_missing: If true and the number of timestamps is fewer than the number of
    frame counts, the remaining timestamps are extrapolated based on the frame rate, otherwise
    they are NaNs
    :param display: Plot the resulting timestamps
    :return: The corrected frame timestamps
    """
    # Some assertions made on the raw data
    # assert count.size == pin_state.size, 'frame count and pin state size mismatch'
    assert all(np.diff(count) > 0), 'frame count not strictly increasing'
    assert all(np.diff(timestamps) > 0
               ), 'FPGA/Bpod camera times not strictly increasing'
    same_n_ttl = pin_state['times'].size == audio['times'].size
    assert same_n_ttl, 'more audio TTLs detected on camera than TTLs sent'
    """Here we will ensure that the FPGA camera times match the number of video frames in
    length.  We will make the following assumptions:

    1. The number of FPGA camera times is equal to or greater than the number of video frames.
    2. No TTLs were missed between the camera and FPGA.
    3. No pin states were missed by Bonsai.
    4  No pixel count data was missed by Bonsai.

    In other words the count and pin state arrays accurately reflect the number of frames
    sent by the camera and should therefore be the same length, and the length of the frame
    counter should match the number of saved video frames.

    The missing frame timestamps are removed in three stages:

    1. Remove any timestamps that occurred before video frame acquisition in Bonsai.
    2. Remove any timestamps where the frame counter reported missing frames, i.e. remove the
    dropped frames which occurred throughout the session.
    3. Remove the trailing timestamps at the end of the session if the camera was turned off
    in the wrong order.
    """
    # Align on first pin state change
    first_uptick = pin_state['indices'][0]
    first_ttl = np.searchsorted(timestamps, audio['times'][0])
    """Here we find up to which index in the FPGA times we discard by taking the difference
    between the index of the first pin state change (when the audio TTL was reported by the
    camera) and the index of the first audio TTL in FPGA time.  We subtract the difference
    between the frame count at the first pin state change and the index to account for any
    video frames that were not saved during this period (we will remove those from the
    camera FPGA times later).
    """
    # Minus any frames that were dropped between the start of frame acquisition and the
    # first TTL
    start = first_ttl - first_uptick - (count[first_uptick] - first_uptick)
    # Get approximate frame rate for extrapolating timestamps (if required)
    frate = round(1 / np.nanmedian(np.diff(timestamps)))

    if start < 0:
        n_missing = abs(start)
        _logger.warning(
            f'{n_missing} missing FPGA/Bpod timestamp(s) at start; '
            f'{"extrapolating" if extrapolate_missing else "prepending nans"}')
        to_app = (timestamps[0] - (np.arange(n_missing, 0, -1) + 1) / frate
                  if extrapolate_missing else np.full(n_missing, np.nan))
        timestamps = np.r_[to_app, timestamps]  # Prepend the missing times
        start = 0

    # Remove the extraneous timestamps from the beginning and end
    end = count[-1] + 1 + start
    ts = timestamps[start:end]
    n_missing = count[-1] - ts.size + 1
    if n_missing > 0:
        # if (n_missing := count[-1] - ts.size + 1) > 0:  # py3.8
        """
        For ephys sessions there may be fewer FPGA times than frame counts if SpikeGLX is turned
        off before the video acquisition workflow.  For Bpod this always occurs because Bpod
        finishes before the camera workflow.  For Bpod the times are already extrapolated for
        these late frames."""
        _logger.warning(
            f'{n_missing} fewer FPGA/Bpod timestamps than frame counts; '
            f'{"extrapolating" if extrapolate_missing else "appending nans"}')
        to_app = ((np.arange(n_missing, ) + 1) / frate + ts[-1]
                  if extrapolate_missing else np.full(n_missing, np.nan))
        ts = np.r_[ts, to_app]  # Append the missing times
    assert ts.size >= count.size
    assert ts.size == count[-1] + 1

    # Remove the rest of the dropped frames
    ts = ts[count]
    assert np.searchsorted(ts, audio['times'][0]) == first_uptick
    if ts.size != count.size:
        _logger.error(
            'number of timestamps and frames don\'t match after alignment')

    if display:
        # Plot to check
        fig, axes = plt.subplots(1, 1)
        y = within_ranges(np.arange(ts.size),
                          pin_state['indices'].reshape(-1, 2)).astype(float)
        y *= 1e-5  # For scale when zoomed in
        axes.plot(ts,
                  y,
                  marker='d',
                  color='blue',
                  drawstyle='steps-pre',
                  label='GPIO')
        axes.plot(ts, np.zeros_like(ts), 'kx', label='FPGA timestamps')
        vertical_lines(audio['times'],
                       ymin=0,
                       ymax=1e-5,
                       color='r',
                       linestyle=':',
                       ax=axes,
                       label='audio TTL')
        plt.legend()

    return ts