Exemplo n.º 1
0
def compare_wheel_fpga_behaviour(session_path, display=DISPLAY):
    alf_path = session_path.joinpath('alf')
    shutil.rmtree(alf_path, ignore_errors=True)
    sync, chmap = ephys_fpga.get_main_probe_sync(session_path,
                                                 bin_exists=False)
    fpga_t, fpga_pos = ephys_fpga.extract_wheel_sync(sync, chmap=chmap)
    bpod_t, bpod_pos = training_wheel.get_wheel_position(session_path,
                                                         display=display)
    data, _ = ephys_fpga.extract_all(session_path)
    bpod2fpga = scipy.interpolate.interp1d(data['intervals_bpod'][:, 0],
                                           data['intervals'][:, 0],
                                           fill_value="extrapolate")
    # resample both traces to the same rate and compute correlation coeff
    bpod_t = bpod2fpga(bpod_t)
    tmin = max([np.min(fpga_t), np.min(bpod_t)])
    tmax = min([np.max(fpga_t), np.max(bpod_t)])
    wheel = {'tscale': np.arange(tmin, tmax, 0.01)}
    wheel['fpga'] = scipy.interpolate.interp1d(fpga_t,
                                               fpga_pos)(wheel['tscale'])
    wheel['bpod'] = scipy.interpolate.interp1d(bpod_t,
                                               bpod_pos)(wheel['tscale'])
    if display:
        plt.figure()
        plt.plot(fpga_t - bpod2fpga(0), fpga_pos, '*')
        plt.plot(bpod_t - bpod2fpga(0), bpod_pos, '.')
    raw_wheel = {
        'fpga_t': fpga_t,
        'fpga_pos': fpga_pos,
        'bpod_t': bpod_t,
        'bpod_pos': bpod_pos
    }
    return raw_wheel, wheel
Exemplo n.º 2
0
    def extract_data(self):
        """Extracts and loads behaviour data for QC
        NB: partial extraction when bpod_only attribute is False requires intervals and
        intervals_bpod to be assigned to the data attribute before calling this function.
        :return:
        """
        self.log.info(f"Extracting session: {self.session_path}")
        self.type = self.type or get_session_extractor_type(self.session_path)
        self.wheel_encoding = 'X4' if (self.type == 'ephys' and not self.bpod_only) else 'X1'

        if not self.raw_data:
            self.load_raw_data()
        # Run extractors
        if self.type == 'ephys' and not self.bpod_only:
            data, _ = ephys_fpga.extract_all(self.session_path)
            bpod2fpga = interp1d(data['intervals_bpod'][:, 0], data['table']['intervals_0'],
                                 fill_value='extrapolate')
            # Add Bpod wheel data
            re_ts, pos = get_wheel_position(self.session_path, self.raw_data)
            data['wheel_timestamps_bpod'] = bpod2fpga(re_ts)
            data['wheel_position_bpod'] = pos
        else:
            kwargs = dict(save=False, bpod_trials=self.raw_data, settings=self.settings)
            trials, wheel, _ = bpod_trials.extract_all(self.session_path, **kwargs)
            n_trials = np.unique(list(map(lambda k: trials[k].shape[0], trials)))[0]
            if self.type == 'habituation':
                data = trials
                data['position'] = np.array([t['position'] for t in self.raw_data])
                data['phase'] = np.array([t['stim_phase'] for t in self.raw_data])
                # Nasty hack to trim last trial due to stim off events happening at trial num + 1
                data = {k: v[:n_trials] for k, v in data.items()}
            else:
                data = {**trials, **wheel}
        # Update the data attribute with extracted data
        self.data = self.rename_data(data)
Exemplo n.º 3
0
 def load_raw_data(self):
     _logger.info(f"Loading raw data from {self.session_path}")
     self.raw_data = raw.load_data(self.session_path)
     self.details = raw.load_settings(self.session_path)
     self.BNC1, self.BNC2 = raw.load_bpod_fronts(self.session_path,
                                                 data=self.raw_data)
     # NOTE: wheel_position is actually an extractor needs _iblrig_encoderPositions.raw
     # to be there but not as input... FIXME: we should have the extractor use the data
     # without assuming it's there
     ts, pos = get_wheel_position(self.session_path, bp_data=self.raw_data)
     self.wheel_data = {'re_ts': ts, 're_pos': pos}
     assert np.all(np.diff(self.wheel_data["re_ts"]) > 0)
Exemplo n.º 4
0
 def test_wheel_extraction_training(self):
     for rbf in self.root_path.rglob('raw_behavior_data'):
         session_path = alf.io.get_session_path(rbf)
         _logger.info(f"TRAINING: {session_path}")
         bpod_t, _ = training_wheel.get_wheel_position(session_path)
         self.assertTrue(bpod_t.size)
Exemplo n.º 5
0
    def load_data(self,
                  download_data: bool = None,
                  extract_times: bool = False,
                  load_video: bool = True) -> None:
        """Extract the data from raw data files
        Extracts all the required task data from the raw data files.

        Data keys:
            - count (int array): the sequential frame number (n, n+1, n+2...)
            - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
            - audio (float array): timestamps of audio TTL fronts
            - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
            - timestamps (float array): extracted video timestamps (the camera.times ALF)
            - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
            - camera_times (float array): camera frame timestamps extracted from frame headers
            - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
            - video (Bunch): video meta data, including dimensions and FPS
            - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)

        :param download_data: if True, any missing raw data is downloaded via ONE.
        Missing data will raise an AssertionError
        :param extract_times: if True, the camera.times are re-extracted from the raw data
        :param load_video: if True, calls the load_video_data method
        """
        assert self.session_path, 'no session path set'
        if download_data is not None:
            self.download_data = download_data
        if self.download_data and self.eid and self.one and not self.one.offline:
            self.ensure_required_data()
        _log.info('Gathering data for QC')

        # Get frame count and pin state
        self.data['count'], self.data['pin_state'] = \
            raw.load_embedded_frame_data(self.session_path, self.label, raw=True)

        # Load the audio and raw FPGA times
        if self.type == 'ephys':
            sync, chmap = ephys_fpga.get_main_probe_sync(self.session_path)
            audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio'])
            self.data['audio'] = audio_ttls['times']  # Get rises
            # Load raw FPGA times
            cam_ts = extract_camera_sync(sync, chmap)
            self.data['fpga_times'] = cam_ts[self.label]
        else:
            bpod_data = raw.load_data(self.session_path)
            _, audio_ttls = raw.load_bpod_fronts(self.session_path, bpod_data)
            self.data['audio'] = audio_ttls['times']

        # Load extracted frame times
        alf_path = self.session_path / 'alf'
        try:
            assert not extract_times
            self.data['timestamps'] = alfio.load_object(
                alf_path, f'{self.label}Camera', short_keys=True)['times']
        except AssertionError:  # Re-extract
            kwargs = dict(video_path=self.video_path, labels=self.label)
            if self.type == 'ephys':
                kwargs = {**kwargs, 'sync': sync, 'chmap': chmap}  # noqa
            outputs, _ = extract_all(self.session_path,
                                     self.type,
                                     save=False,
                                     **kwargs)
            self.data['timestamps'] = outputs[
                f'{self.label}_camera_timestamps']
        except ALFObjectNotFound:
            _log.warning('no camera.times ALF found for session')

        # Get audio and wheel data
        wheel_keys = ('timestamps', 'position')
        try:
            self.data['wheel'] = alfio.load_object(alf_path,
                                                   'wheel',
                                                   short_keys=True)
        except ALFObjectNotFound:
            # Extract from raw data
            if self.type == 'ephys':
                wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
            else:
                wheel_data = training_wheel.get_wheel_position(
                    self.session_path)
            self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data))

        # Find short period of wheel motion for motion correlation.
        if data_for_keys(
                wheel_keys,
                self.data['wheel']) and self.data['timestamps'] is not None:
            self.data['wheel'].period = self.get_active_wheel_period(
                self.data['wheel'])

        # Load Bonsai frame timestamps
        try:
            ssv_times = raw.load_camera_ssv_times(self.session_path,
                                                  self.label)
            self.data['bonsai_times'], self.data['camera_times'] = ssv_times
        except AssertionError:
            _log.warning('No Bonsai video timestamps file found')

        # Gather information from video file
        if load_video:
            _log.info('Inspecting video file...')
            self.load_video_data()
Exemplo n.º 6
0
    def extract_data(self, partial=False):
        """Extracts and loads behaviour data for QC
        NB: partial extraction when bpod_only sttricbute is False requires intervals and
        intervals_bpod to be assigned to the data attribute before calling this function.
        :param partial: If True, extracts only the required data that aren't usually saved to ALFs
        :return:
        """
        self.log.info(f"Extracting session: {self.session_path}")
        self.type = self.type or raw.get_session_extractor_type(
            self.session_path)
        self.wheel_encoding = 'X4' if (self.type == 'ephys'
                                       and not self.bpod_only) else 'X1'

        # Partial extraction for FPGA sessions only worth it if intervals already extracted and
        # assigned to the data attribute
        data_assigned = self.data and {'intervals', 'intervals_bpod'}.issubset(
            self.data)
        if partial and self.type == 'ephys' and not self.bpod_only and not data_assigned:
            partial = False  # Requires intervals for converting to FPGA time

        if not self.raw_data:
            self.load_raw_data()

        # Signals and parameters not usually saved to file
        if self.type == 'habituation':
            extractors = [
                habit.StimCenterTimes, habit.StimCenterTriggerTimes,
                habit.ItiInTimes, habit.StimOffTriggerTimes
            ]
        else:
            extractors = [
                StimOnTriggerTimes, StimOffTriggerTimes, StimOnOffFreezeTimes,
                StimFreezeTriggerTimes, ErrorCueTriggerTimes, ItiInTimes
            ]

        # Extract the data that are usually saved to file;
        # this must be after the Bpod extractors in the list
        if not partial:
            if self.type == 'ephys' and not self.bpod_only:
                extractors.append(FpgaTrials)
            elif self.type == 'habituation':
                extractors.append(habit.HabituationTrials)
            else:
                extractors.extend([
                    Choice, FeedbackType, Intervals, StimOnTimes,
                    GoCueTriggerTimes, Wheel, GoCueTimes, RewardVolume,
                    ResponseTimes, FeedbackTimes, ProbabilityLeft
                ])
                # if type == 'biased':
                #     # FIXME ContrastLR fails on old sessions (contrast is a float, not a dict)
                #     extractors.append(ContrastLR)

        # Run behaviour extractors
        kwargs = dict(save=False,
                      bpod_trials=self.raw_data,
                      settings=self.settings)
        data, _ = run_extractor_classes(extractors,
                                        session_path=self.session_path,
                                        **kwargs)

        n_trials = np.unique(list(map(lambda k: data[k].shape[0], data)))[0]

        # Extract some parameters
        if self.type == 'ephys':
            # For ephys sessions extract quiescence and phase from pre-generated file
            data.update(_get_pregenerated_events(self.raw_data, self.settings))

            if not self.bpod_only:
                # Get the extracted intervals for sync.  For partial ephys extraction attempt to
                # get intervals from data attribute.
                intervals, intervals_bpod = [
                    data[key] if key in data else self.data[key]
                    for key in ('intervals', 'intervals_bpod')
                ]
                # We need to sync the extra extracted data to FPGA time
                # 0.5s iti already removed during extraction so we set duration to 0 here
                ibpod, _, bpod2fpga = bpod_fpga_sync(intervals_bpod,
                                                     intervals,
                                                     iti_duration=0)
                # These fields have to be re-synced
                sync_fields = [
                    'stimOnTrigger_times', 'stimOffTrigger_times',
                    'stimFreeze_times', 'stimFreezeTrigger_times',
                    'errorCueTrigger_times', 'itiIn_times'
                ]
                bpod_fields = [
                    'probabilityLeft', 'contrastLeft', 'contrastRight',
                    'position', 'contrast', 'quiescence', 'phase'
                ]
                if partial:
                    # Remove any extraneous fields, i.e. bpod stimOn, stimOff
                    data = {
                        k: v
                        for k, v in data.items()
                        if k in sync_fields + bpod_fields
                    }
                # Build trials output
                data.update(
                    {k: bpod2fpga(data[k][ibpod])
                     for k in sync_fields})
                data.update({k: data[k][ibpod] for k in bpod_fields})
                # Add Bpod wheel data
                re_ts, pos = get_wheel_position(self.session_path,
                                                self.raw_data)
                data['wheel_timestamps_bpod'] = bpod2fpga(re_ts)
                data['wheel_position_bpod'] = pos

        elif self.type == 'habituation':
            data['position'] = np.array([t['position'] for t in self.raw_data])
            data['phase'] = np.array([t['stim_phase'] for t in self.raw_data])
            # Nasty hack to trim last trial due to stim off events happening at trial num + 1
            data = {k: v[:n_trials] for k, v in data.items()}
        else:
            data['quiescence'] = \
                np.array([t['quiescent_period'] for t in self.raw_data[:n_trials]])
            data['position'] = np.array(
                [t['position'] for t in self.raw_data[:n_trials]])
            # FIXME Check this is valid for biased choiceWorld
            data['phase'] = np.array(
                [t['stim_phase'] for t in self.raw_data[:n_trials]])

        # Update the data attribute with extracted data
        if self.data:
            self.data.update(data)
            self.rename_data(self.data)
        else:
            self.data = data if partial else self.rename_data(data)