Ejemplo n.º 1
0
 def test_load_camera_ssv_times(self):
     session = Path(__file__).parent.joinpath('extractors', 'data', 'session_ephys')
     with self.assertRaises(ValueError):
         raw.load_camera_ssv_times(session, 'tail')
     bonsai, camera = raw.load_camera_ssv_times(session, 'body')
     self.assertTrue(bonsai.size == camera.size == 6001)
     self.assertEqual(bonsai.dtype.str, '<M8[ns]')
     self.assertEqual(str(bonsai[0]), '2020-08-19T16:42:57.790361600')
     expected = np.array([69.466875, 69.5, 69.533, 69.566125, 69.59925])
     np.testing.assert_array_equal(expected, camera[:5])
     # Many sessions have the columns in the wrong order.  Here we write 5 lines from the
     # fixture file to another file in a temporary folder, with the columns swapped.
     from_file = session.joinpath('raw_video_data', '_iblrig_bodyCamera.timestamps.ssv')
     with tempfile.TemporaryDirectory() as tempdir:
         # New file with columns swapped
         to_file = Path(tempdir).joinpath('raw_video_data', '_iblrig_leftCamera.timestamps.ssv')
         to_file.parent.mkdir(exist_ok=True)
         with open(from_file, 'r') as a, open(to_file, 'w') as b:
             for i in range(5):
                 # Read line from fixture file and write into file in swapped order
                 b.write('{1} {0} {2}'.format(*a.readline().split(' ')))
         assert to_file.exists(), 'failed to write test file'
         bonsai, camera = raw.load_camera_ssv_times(to_file.parents[1], 'left')
         # Verify that values returned in the same order as before
         self.assertEqual(bonsai.dtype.str, '<M8[ns]')
         self.assertEqual(camera.dtype.str, '<f8')
         self.assertAlmostEqual(69.466875, camera[0])
Ejemplo n.º 2
0
 def test_load_camera_ssv_times(self):
     session = Path(__file__).parent.joinpath('extractors', 'data',
                                              'session_ephys')
     with self.assertRaises(ValueError):
         raw.load_camera_ssv_times(session, 'tail')
     bonsai, camera = raw.load_camera_ssv_times(session, 'body')
     self.assertTrue(bonsai.size == camera.size == 6001)
     self.assertEqual(bonsai.dtype.str, '<M8[ns]')
     self.assertEqual(str(bonsai[0]), '2020-08-19T16:42:57.790361600')
     expected = np.array([69.466875, 69.5, 69.533, 69.566125, 69.59925])
     np.testing.assert_array_equal(expected, camera[:5])
Ejemplo n.º 3
0
    def test_groom_pin_state(self):
        # ibl_witten_27\2021-01-14\001  # Can't assign a pin state
        # CSK-im-002\2021-01-16\001  # Another example
        root = self.data_path
        session_path = root.joinpath('ephys', 'ephys_choice_world_task',
                                     'ibl_witten_27', '2021-01-21', '001')
        _, ts = raw.load_camera_ssv_times(session_path, 'left')
        _, (*_, gpio) = raw.load_embedded_frame_data(session_path, 'left')
        bpod_trials = raw.load_data(session_path)
        _, audio = raw.load_bpod_fronts(session_path, bpod_trials)
        # NB: syncing the timestamps to the audio doesn't work very well but we don't need it to
        # for the extraction, so long as the audio and GPIO fronts match.
        gpio, audio, _ = camio.groom_pin_state(gpio, audio, ts)
        # Do some checks
        self.assertEqual(gpio['indices'].size, audio['times'].size)
        expected = np.array([164179, 164391, 164397, 164900, 164906], dtype=int)
        np.testing.assert_array_equal(gpio['indices'][-5:], expected)
        expected = np.array([2734.4496, 2737.9659, 2738.0659, 2746.4488, 2746.5488])
        np.testing.assert_array_almost_equal(audio['times'][-5:], expected)

        # Verify behaviour when audio and GPIO match in size
        _, audio_, _ = camio.groom_pin_state(gpio, audio, ts, take='nearest', tolerance=.5)
        self.assertEqual(audio, audio_)

        # Verify behaviour when there are GPIO fronts beyond number of video frames
        ts_short = ts[:gpio['indices'].max() - 10]
        gpio_, *_ = camio.groom_pin_state(gpio, audio, ts_short)
        self.assertFalse(np.any(gpio_['indices'] >= ts.size))
Ejemplo n.º 4
0
    def test_groom_pin_state(self):
        """
        e7098000-62a0-46a4-99df-981ee2b56988 (ZFM-01867/2/2021-03-23)
            In this session there were occasions where the GPIO would change twice after
            an audio TTL, perhaps because the audio TTLs are often split up into two short
            TTLs on Bpod, some of which are caught by the camera, others not.

            The function removes the short audio TTLs then assigns the rest to the GPIO fronts.
            The unassigned audio TTL fronts and GPIO changes are removed.  Usually if the TTL
            low-to-high is not assigned to a GPIO front, neither is the high-to-low, so both are
            removed.  However sometimes because of mis-assigning (due to clock drift, short TTLs,
            faulty wiring, etc.) there are some 'orphaned' TTLs/GPIO fronts leaving us with two
            low-to-high fronts (or high-to-low) in a row.  These so-called orphaned fronts
            should be removed too.  The goal is to end up with an array of audio TTL times and
            GPIO times that are the same length.

            Debugging output states:
            - 2316 fronts TLLs less than 5ms apart
            - 11 audio TTL rises were not detected by the camera
            - 346 pin state rises could not be attributed to an audio TTL
            - 10 audio TTL falls were not detected by the camera
            - 345 pin state falls could not be attributed to an audio TTL
            - 3 orphaned TTLs removed

            The output arrays are not aligned per se, but should at least have *most* GPIO fronts
            correctly assigned to the corresponding audio TTLs.
        :return:
        """
        root = self.data_path
        session_path = root.joinpath('camera', 'ZFM-01867', '2021-03-23', '002')
        _, ts = raw.load_camera_ssv_times(session_path, 'left')
        _, (*_, gpio) = raw.load_embedded_frame_data(session_path, 'left')
        bpod_trials = raw.load_data(session_path)
        _, audio = raw.load_bpod_fronts(session_path, bpod_trials)

        # NB: syncing the timestamps to the audio doesn't work very well but we don't need it to
        # for the extraction, so long as the audio and GPIO fronts match.
        gpio, audio, _ = camio.groom_pin_state(gpio, audio, ts,
                                               take='nearest', tolerance=.5, min_diff=5e-3)
        # Do some checks
        self.assertEqual(gpio['indices'].size, audio['times'].size)
        expected = np.array([446328, 446812, 446814, 447251, 447253], dtype=int)
        np.testing.assert_array_equal(gpio['indices'][-5:], expected)
        expected = np.array([4448.100798, 4452.912398, 4452.934398, 4457.313998, 4457.335998])
        np.testing.assert_array_almost_equal(audio['times'][-5:], expected)
Ejemplo n.º 5
0
    def test_check_camera_times(self):
        outcome = self.qc.check_camera_times()
        self.assertEqual('NOT_SET', outcome)

        # Verify passes
        self.qc.side = 'body'
        ts_path = Path(__file__).parents[1].joinpath('extractors', 'data',
                                                     'session_ephys')
        ssv_times = load_camera_ssv_times(ts_path, self.qc.side)
        self.qc.data.bonsai_times, self.qc.data.camera_times = ssv_times
        self.qc.data.video = Bunch({'length': self.qc.data.bonsai_times.size})

        outcome, _ = self.qc.check_camera_times()
        self.assertEqual('PASS', outcome)

        # Verify warning
        n_over = 14
        self.qc.data.video['length'] -= n_over
        outcome, actual = self.qc.check_camera_times()

        self.assertEqual('WARNING', outcome)
        self.assertEqual(n_over, actual)
Ejemplo n.º 6
0
    def _extract(self,
                 sync=None,
                 chmap=None,
                 video_path=None,
                 display=False,
                 extrapolate_missing=True):
        """
        The raw timestamps are taken from the FPGA. These are the times of the camera's frame TTLs.
        If the pin state file exists, these timestamps are aligned to the video frames using the
        audio TTLs.  Frames missing from the embedded frame count are removed from the timestamps
        array.
        If the pin state file does not exist, the left and right camera timestamps may be aligned
        using the wheel data.
        :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace.
        :param chmap: dictionary containing channel indices. Default to constant.
        :param video_path: an optional path for fetching the number of frames.  If None,
        the video is loaded from the session path.  If an int is provided this is taken to be
        the total number of frames.
        :param display: if True, the audio and GPIO fronts are plotted.
        :param extrapolate_missing: if True, any missing timestamps at the beginning and end of
        the session are extrapolated based on the median frame rate, otherwise they will be NaNs.
        :return: a numpy array of camera timestamps
        """
        fpga_times = extract_camera_sync(sync=sync, chmap=chmap)
        count, (*_,
                gpio) = raw.load_embedded_frame_data(self.session_path,
                                                     self.label)
        raw_ts = fpga_times[self.label]

        if video_path is None:
            filename = f'_iblrig_{self.label}Camera.raw.mp4'
            video_path = self.session_path.joinpath('raw_video_data', filename)
        # Permit the video path to be the length for development and debugging purposes
        length = (video_path if isinstance(video_path, int) else
                  get_video_length(video_path))
        _logger.debug(f'Number of video frames = {length}')

        if gpio is not None and gpio['indices'].size > 1:
            _logger.info('Aligning to audio TTLs')
            # Extract audio TTLs
            audio = get_sync_fronts(sync, chmap['audio'])
            _, ts = raw.load_camera_ssv_times(self.session_path, self.label)
            try:
                """
                NB: Some of the audio TTLs occur very close together, and are therefore not
                reflected in the pin state.  This function removes those.  Also converts frame
                times to FPGA time.
                """
                gpio, audio, ts = groom_pin_state(gpio,
                                                  audio,
                                                  ts,
                                                  display=display)
                """
                The length of the count and pin state are regularly longer than the length of
                the video file.  Here we assert that the video is either shorter or the same
                length as the arrays, and  we make an assumption that the missing frames are
                right at the end of the video.  We therefore simply shorten the arrays to match
                the length of the video.
                """
                if count.size > length:
                    count = count[:length]
                else:
                    assert length == count.size, 'fewer counts than frames'
                raw_ts = fpga_times[self.label]
                assert raw_ts.shape[0] > 0, 'no timestamps found in channel indicated for ' \
                                            f'{self.label} camera'
                return align_with_audio(
                    raw_ts,
                    audio,
                    gpio,
                    count,
                    display=display,
                    extrapolate_missing=extrapolate_missing)
            except AssertionError as ex:
                _logger.critical('Failed to extract using audio: %s', ex)

        # If you reach here extracting using audio TTLs was not possible
        _logger.warning('Alignment by wheel data not yet implemented')
        if length < raw_ts.size:
            df = raw_ts.size - length
            _logger.info(f'Discarding first {df} pulses')
            raw_ts = raw_ts[df:]
        return raw_ts
Ejemplo n.º 7
0
    def _extract(self,
                 video_path=None,
                 display=False,
                 extrapolate_missing=True):
        """
        The raw timestamps are taken from the Bpod. These are the times of the camera's frame TTLs.
        If the pin state file exists, these timestamps are aligned to the video frames using the
        audio TTLs.  Frames missing from the embedded frame count are removed from the timestamps
        array.
        If the pin state file does not exist, the left camera timestamps may be aligned using the
        wheel data.
        :param video_path: an optional path for fetching the number of frames.  If None,
        the video is loaded from the session path.  If an int is provided this is taken to be
        the total number of frames.
        :param display: if True, the audio and GPIO fronts are plotted.
        :param extrapolate_missing: if True, any missing timestamps at the beginning and end of
        the session are extrapolated based on the median frame rate, otherwise they will be NaNs.
        :return: a numpy array of camera timestamps
        """
        raw_ts = self._times_from_bpod()
        count, (*_,
                gpio) = raw.load_embedded_frame_data(self.session_path, 'left')
        if video_path is None:
            filename = '_iblrig_leftCamera.raw.mp4'
            video_path = self.session_path.joinpath('raw_video_data', filename)
        # Permit the video path to be the length for development and debugging purposes
        length = video_path if isinstance(
            video_path, int) else get_video_length(video_path)
        _logger.debug(f'Number of video frames = {length}')

        # Check if the GPIO is usable for extraction.  GPIO is None if the file does not exist,
        # is empty, or contains only one value (i.e. doesn't change)
        if gpio is not None and gpio['indices'].size > 1:
            _logger.info('Aligning to audio TTLs')
            # Extract audio TTLs
            _, audio = raw.load_bpod_fronts(self.session_path,
                                            self.bpod_trials)
            _, ts = raw.load_camera_ssv_times(self.session_path, 'left')
            """
            There are many audio TTLs that are for some reason missed by the GPIO.  Conversely
            the last GPIO doesn't often correspond to any audio TTL.  These will be removed.
            The drift appears to be less severe than the FPGA, so when assigning TTLs we'll take
            the nearest TTL within 500ms.  The go cue TTLs comprise two short pulses ~3ms apart.
            We will fuse any TTLs less than 5ms apart to make assignment more accurate.
            """
            try:
                gpio, audio, ts = groom_pin_state(gpio,
                                                  audio,
                                                  ts,
                                                  take='nearest',
                                                  tolerance=.5,
                                                  min_diff=5e-3,
                                                  display=display)
                if count.size > length:
                    count = count[:length]
                else:
                    assert length == count.size, 'fewer counts than frames'

                return align_with_audio(raw_ts,
                                        audio,
                                        gpio,
                                        count,
                                        extrapolate_missing,
                                        display=display)
            except AssertionError as ex:
                _logger.critical('Failed to extract using audio: %s', ex)

        # If you reach here extracting using audio TTLs was not possible
        _logger.warning('Alignment by wheel data not yet implemented')
        # Extrapolate at median frame rate
        n_missing = length - raw_ts.size
        if n_missing > 0:
            _logger.warning(
                f'{n_missing} fewer Bpod timestamps than frames; '
                f'{"extrapolating" if extrapolate_missing else "appending nans"}'
            )
            frate = np.median(np.diff(raw_ts))
            to_app = ((np.arange(n_missing, ) + 1) / frate + raw_ts[-1]
                      if extrapolate_missing else np.full(n_missing, np.nan))
            raw_ts = np.r_[raw_ts, to_app]  # Append the missing times
        elif n_missing < 0:
            _logger.warning(
                f'{abs(n_missing)} fewer frames than Bpod timestamps')
            _logger.info(f'Discarding first {abs(n_missing)} pulses')
            raw_ts = raw_ts[abs(n_missing):]

        return raw_ts
Ejemplo n.º 8
0
    def load_data(self,
                  download_data: bool = None,
                  extract_times: bool = False,
                  load_video: bool = True) -> None:
        """Extract the data from raw data files
        Extracts all the required task data from the raw data files.

        Data keys:
            - count (int array): the sequential frame number (n, n+1, n+2...)
            - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
            - audio (float array): timestamps of audio TTL fronts
            - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
            - timestamps (float array): extracted video timestamps (the camera.times ALF)
            - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
            - camera_times (float array): camera frame timestamps extracted from frame headers
            - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
            - video (Bunch): video meta data, including dimensions and FPS
            - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)

        :param download_data: if True, any missing raw data is downloaded via ONE.
        Missing data will raise an AssertionError
        :param extract_times: if True, the camera.times are re-extracted from the raw data
        :param load_video: if True, calls the load_video_data method
        """
        assert self.session_path, 'no session path set'
        if download_data is not None:
            self.download_data = download_data
        if self.download_data and self.eid and self.one and not self.one.offline:
            self.ensure_required_data()
        _log.info('Gathering data for QC')

        # Get frame count and pin state
        self.data['count'], self.data['pin_state'] = \
            raw.load_embedded_frame_data(self.session_path, self.label, raw=True)

        # Load the audio and raw FPGA times
        if self.type == 'ephys':
            sync, chmap = ephys_fpga.get_main_probe_sync(self.session_path)
            audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio'])
            self.data['audio'] = audio_ttls['times']  # Get rises
            # Load raw FPGA times
            cam_ts = extract_camera_sync(sync, chmap)
            self.data['fpga_times'] = cam_ts[self.label]
        else:
            bpod_data = raw.load_data(self.session_path)
            _, audio_ttls = raw.load_bpod_fronts(self.session_path, bpod_data)
            self.data['audio'] = audio_ttls['times']

        # Load extracted frame times
        alf_path = self.session_path / 'alf'
        try:
            assert not extract_times
            self.data['timestamps'] = alfio.load_object(
                alf_path, f'{self.label}Camera', short_keys=True)['times']
        except AssertionError:  # Re-extract
            kwargs = dict(video_path=self.video_path, labels=self.label)
            if self.type == 'ephys':
                kwargs = {**kwargs, 'sync': sync, 'chmap': chmap}  # noqa
            outputs, _ = extract_all(self.session_path,
                                     self.type,
                                     save=False,
                                     **kwargs)
            self.data['timestamps'] = outputs[
                f'{self.label}_camera_timestamps']
        except ALFObjectNotFound:
            _log.warning('no camera.times ALF found for session')

        # Get audio and wheel data
        wheel_keys = ('timestamps', 'position')
        try:
            self.data['wheel'] = alfio.load_object(alf_path,
                                                   'wheel',
                                                   short_keys=True)
        except ALFObjectNotFound:
            # Extract from raw data
            if self.type == 'ephys':
                wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
            else:
                wheel_data = training_wheel.get_wheel_position(
                    self.session_path)
            self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data))

        # Find short period of wheel motion for motion correlation.
        if data_for_keys(
                wheel_keys,
                self.data['wheel']) and self.data['timestamps'] is not None:
            self.data['wheel'].period = self.get_active_wheel_period(
                self.data['wheel'])

        # Load Bonsai frame timestamps
        try:
            ssv_times = raw.load_camera_ssv_times(self.session_path,
                                                  self.label)
            self.data['bonsai_times'], self.data['camera_times'] = ssv_times
        except AssertionError:
            _log.warning('No Bonsai video timestamps file found')

        # Gather information from video file
        if load_video:
            _log.info('Inspecting video file...')
            self.load_video_data()