コード例 #1
0
def get_wheel_positions(sync, chmap):
    """
    Gets the wheel position from synchronisation pulses
    :param sync:
    :param chmap:
    :return:wheel: dictionary with keys 'timestamps' and 'position'
            moves: dictionary with keys 'intervals' and 'peakAmplitude'
    """
    ts, pos = extract_wheel_sync(sync=sync, chmap=chmap)
    moves = extract_wheel_moves(ts, pos)
    wheel = {'timestamps': ts, 'position': pos}
    return wheel, moves
コード例 #2
0
    def test_extract_wheel_moves(self):
        test_data = self.test_data[1]
        # Wrangle data into expected form
        re_ts = test_data[0][0]
        re_pos = test_data[0][1]

        logger = logging.getLogger('ibllib')
        with self.assertLogs(logger, level='INFO') as cm:
            wheel_moves = extract_wheel_moves(re_ts, re_pos)
            self.assertEqual(
                ['INFO:ibllib:Wheel in cm units using X2 encoding'], cm.output)

        n = 56  # expected number of movements
        self.assertTupleEqual(
            wheel_moves['intervals'].shape, (n, 2),
            'failed to return the correct number of intervals')
        self.assertEqual(wheel_moves['peakAmplitude'].size, n)
        self.assertEqual(wheel_moves['peakVelocity_times'].size, n)

        # Check the first 3 intervals
        ints = np.array([[24.78462599,
                          25.22562599], [29.58762599, 31.15062599],
                         [31.64262599, 31.81662599]])
        actual = wheel_moves['intervals'][:3, ]
        self.assertIsNone(np.testing.assert_allclose(actual, ints),
                          'unexpected intervals')

        # Check amplitudes
        actual = wheel_moves['peakAmplitude'][-3:]
        expected = [0.50255486, -1.70103154, 1.00740789]
        self.assertIsNone(np.testing.assert_allclose(actual, expected),
                          'unexpected amplitudes')

        # Check peak velocities
        actual = wheel_moves['peakVelocity_times'][-3:]
        expected = [175.13662599, 176.65762599, 178.57262599]
        self.assertIsNone(np.testing.assert_allclose(actual, expected),
                          'peak times')

        # Test extraction in rad
        re_pos = wh.cm_to_rad(re_pos)
        with self.assertLogs(logger, level='INFO') as cm:
            wheel_moves = ephys_fpga.extract_wheel_moves(re_ts, re_pos)
            self.assertEqual(
                ['INFO:ibllib:Wheel in rad units using X2 encoding'],
                cm.output)

        # Check the first 3 intervals.  As position thresholds are adjusted by units and
        # encoding, we should expect the intervals to be identical to above
        actual = wheel_moves['intervals'][:3, ]
        self.assertIsNone(np.testing.assert_allclose(actual, ints),
                          'unexpected intervals')
コード例 #3
0
    def _extract(self, sync=None, chmap=None, **kwargs):
        """Extracts ephys trials by combining Bpod and FPGA sync pulses"""
        # extract the behaviour data from bpod
        if sync is None or chmap is None:
            _sync, _chmap = get_main_probe_sync(self.session_path, bin_exists=False)
            sync = sync or _sync
            chmap = chmap or _chmap
        # load the bpod data and performs a biased choice world training extraction
        bpod_raw = raw_data_loaders.load_data(self.session_path)
        assert bpod_raw is not None, "No task trials data in raw_behavior_data - Exit"
        bpod_trials, _ = biased_trials.extract_all(
            session_path=self.session_path, save=False, bpod_trials=bpod_raw)
        bpod_trials['intervals_bpod'] = np.copy(bpod_trials['intervals'])
        fpga_trials = extract_behaviour_sync(sync=sync, chmap=chmap, bpod_trials=bpod_trials,
                                             tmax=bpod_trials['intervals'][-1, -1] + 60)
        # checks consistency and compute dt with bpod
        self.bpod2fpga, drift_ppm, ibpod, ifpga = dsp.utils.sync_timestamps(
            bpod_trials['intervals_bpod'][:, 0], fpga_trials.pop('intervals')[:, 0],
            return_indices=True)
        nbpod = bpod_trials['intervals_bpod'].shape[0]
        npfga = fpga_trials['feedback_times'].shape[0]
        nsync = len(ibpod)
        _logger.info(f"N trials: {nbpod} bpod, {npfga} FPGA, {nsync} merged, sync {drift_ppm} ppm")
        if drift_ppm > BPOD_FPGA_DRIFT_THRESHOLD_PPM:
            _logger.warning('BPOD/FPGA synchronization shows values greater than %i ppm',
                            BPOD_FPGA_DRIFT_THRESHOLD_PPM)
        # those fields get directly in the output
        bpod_fields = ['feedbackType', 'choice', 'rewardVolume', 'intervals_bpod']
        # those fields have to be resynced
        bpod_rsync_fields = ['intervals', 'response_times', 'goCueTrigger_times',
                             'stimOnTrigger_times', 'stimOffTrigger_times',
                             'stimFreezeTrigger_times', 'errorCueTrigger_times']

        # build trials output
        out = OrderedDict()
        out.update({k: bpod_trials[k][ibpod] for k in bpod_fields})
        out.update({k: self.bpod2fpga(bpod_trials[k][ibpod]) for k in bpod_rsync_fields})
        out.update({k: fpga_trials[k][ifpga] for k in sorted(fpga_trials.keys())})

        # extract the wheel data
        from ibllib.io.extractors.training_wheel import extract_first_movement_times
        ts, pos = extract_wheel_sync(sync=sync, chmap=chmap)
        moves = extract_wheel_moves(ts, pos)
        settings = raw_data_loaders.load_settings(session_path=self.session_path)
        min_qt = settings.get('QUIESCENT_PERIOD', None)
        first_move_onsets, *_ = extract_first_movement_times(moves, out, min_qt=min_qt)
        out.update({'firstMovement_times': first_move_onsets})

        assert tuple(filter(lambda x: 'wheel' not in x, self.var_names)) == tuple(out.keys())
        return [out[k] for k in out] + [ts, pos, moves['intervals'], moves['peakAmplitude']]
コード例 #4
0
ファイル: one.py プロジェクト: LiuDaveLiu/ibllib
def load_wheel_reaction_times(eid, one=None):
    """
    Return the calculated reaction times for session.  Reaction times are defined as the time
    between the go cue (onset tone) and the onset of the first substantial wheel movement.   A
    movement is considered sufficiently large if its peak amplitude is at least 1/3rd of the
    distance to threshold (~0.1 radians).

    Negative times mean the onset of the movement occurred before the go cue.  Nans may occur if
    there was no detected movement withing the period, or when the goCue_times or feedback_times
    are nan.

    Parameters
    ----------
    eid : str
        Session UUID
    one : oneibl.ONE
        An instance of ONE for loading data.  If None a new one is instantiated using the defaults.

    Returns
    ----------
    array-like
        reaction times
    """
    if one is None:
        one = ONE()

    trials = one.load_object(eid, 'trials')
    # If already extracted, load and return
    if trials and 'firstMovement_times' in trials:
        return trials['firstMovement_times'] - trials['goCue_times']
    # Otherwise load wheelMoves object and calculate
    moves = one.load_object(eid, 'wheelMoves')
    # Re-extract wheel moves if necessary
    if not moves or 'peakAmplitude' not in moves:
        wheel = one.load_object(eid, 'wheel')
        moves = extract_wheel_moves(wheel['timestamps'], wheel['position'])
    assert trials and moves, 'unable to load trials and wheelMoves data'
    firstMove_times, is_final_movement, ids = extract_first_movement_times(
        moves, trials)
    return firstMove_times - trials['goCue_times']
コード例 #5
0
ファイル: ephys_fpga.py プロジェクト: EXYNOS-999/ibllib
    def _extract(self, sync=None, chmap=None):
        # extracts trials
        # extract the behaviour data from bpod
        if sync is None or chmap is None:
            _sync, _chmap = _get_main_probe_sync(self.session_path,
                                                 bin_exists=False)
            sync = sync or _sync
            chmap = chmap or _chmap
        bpod_raw = raw_data_loaders.load_data(self.session_path)
        tmax = bpod_raw[-1]['behavior_data']['States timestamps'][
            'exit_state'][0][-1] + 60
        bpod_trials, _ = biased_trials.extract_all(
            session_path=self.session_path, save=False, bpod_trials=bpod_raw)
        bpod_trials['intervals_bpod'] = np.copy(bpod_trials['intervals'])
        fpga_trials = extract_behaviour_sync(sync=sync, chmap=chmap, tmax=tmax)
        # checks consistency and compute dt with bpod
        ibpod, ifpga, fcn_bpod2fpga = bpod_fpga_sync(
            bpod_trials['intervals_bpod'], fpga_trials['intervals'])
        # those fields get directly in the output
        bpod_fields = [
            'feedbackType', 'choice', 'rewardVolume', 'intervals_bpod'
        ]
        # those fields have to be resynced
        bpod_rsync_fields = [
            'intervals', 'response_times', 'goCueTrigger_times'
        ]
        # ephys fields to save in the output
        fpga_fields = [
            'stimOn_times', 'stimOff_times', 'goCue_times', 'feedback_times'
        ]
        # get ('probabilityLeft', 'contrastLeft', 'contrastRight') from the custom ephys extractors
        pclcr, _ = ProbaContrasts(self.session_path).extract(
            bpod_trials=bpod_raw, save=False)
        # build trials output
        out = OrderedDict()
        out.update({
            k: pclcr[i][ifpga]
            for i, k in enumerate(ProbaContrasts.var_names)
        })
        out.update({k: bpod_trials[k][ibpod] for k in bpod_fields})
        out.update({
            k: fcn_bpod2fpga(bpod_trials[k][ibpod])
            for k in bpod_rsync_fields
        })
        out.update({k: fpga_trials[k][ifpga] for k in fpga_fields})

        # extract the wheel data
        from ibllib.io.extractors.training_wheel import extract_first_movement_times
        ts, pos = extract_wheel_sync(sync=sync, chmap=chmap)
        moves = extract_wheel_moves(ts, pos)
        settings = raw_data_loaders.load_settings(
            session_path=self.session_path)
        min_qt = settings.get('QUIESCENT_PERIOD', None)
        first_move_onsets, *_ = extract_first_movement_times(moves,
                                                             out,
                                                             min_qt=min_qt)
        out.update({'firstMovement_times': first_move_onsets})

        assert tuple(filter(lambda x: 'wheel' not in x,
                            self.var_names)) == tuple(out.keys())
        return [out[k] for k in out
                ] + [ts, pos, moves['intervals'], moves['peakAmplitude']]
コード例 #6
0
ファイル: wheel.py プロジェクト: vathes/IBL-pipeline
    def make(self, key, one=None):
        # Load the wheel for this session
        move_key = key.copy()
        change_key = move_key.copy()
        one = one or ONE()
        eid, ver = (acquisition.Session & key).fetch1('session_uuid', 'task_protocol')
        logger.info('WheelMoves for session %s, %s', str(eid), ver)

        try:  # Should be able to remove this
            wheel = one.load_object(str(eid), 'wheel')
            all_loaded = \
                all([isinstance(wheel[lab], np.ndarray) for lab in wheel]) and \
                all(k in wheel for k in ('timestamps', 'position'))
            assert all_loaded, 'wheel data missing'

            # If times and timestamps present, drop times
            if {'times', 'timestamps'}.issubset(wheel):
                wheel.pop('times')
            wheel_moves = extract_wheel_moves(wheel.timestamps, wheel.position)
        except ValueError:
            logger.exception('Failed to find movements')
            raise
        except AssertionError as ex:
            logger.exception(str(ex))
            raise
        except Exception as ex:
            logger.exception(str(ex))
            raise

        # Build list of table entries
        keys = ('move_id', 'movement_onset', 'movement_offset', 'max_velocity', 'movement_amplitude')
        on_off, amp, vel_t = wheel_moves.values()  # Unpack into short vars
        moves = [dict(zip(keys, (i, on, off, vel_t[i], amp[i])), **move_key)
                 for i, (on, off) in enumerate(on_off)]

        # Calculate direction changes
        Fs = 1000
        re_ts, re_pos = wheel.timestamps, wheel.position
        if len(re_ts.shape) != 1:
            logger.info('2D wheel timestamps')
            if len(re_pos.shape) > 1:  # Ensure 1D array of positions
                re_pos = re_pos.flatten()
            # Linearly interpolate the times
            x = np.arange(re_pos.size)
            re_ts = np.interp(x, re_ts[:, 0], re_ts[:, 1])

        pos, ts = wh.interpolate_position(re_pos, re_ts, freq=Fs)
        vel, _ = wh.velocity_smoothed(pos, Fs)
        change_mask = np.insert(np.diff(np.sign(vel)) != 0, 0, 0)

        changes = []
        for i, (on, off) in enumerate(on_off.reshape(-1, 2)):
            mask = np.logical_and(ts > on, ts < off)
            ind = np.logical_and(mask, change_mask)
            changes.extend(
                dict(change_key, move_id=i, change_id=j, change_time=t) for j, t in enumerate(ts[ind])
            )

        # Get the units of the position data
        units, *_ = infer_wheel_units(wheel.position)
        key['n_movements'] = wheel_moves['intervals'].shape[0]  # total number of movements within the session
        key['total_displacement'] = float(np.diff(wheel.position[[0, -1]]))  # total displacement of the wheel during session
        key['total_distance'] = float(np.abs(np.diff(wheel.position)).sum())  # total movement of the wheel
        key['n_direction_changes'] = sum(change_mask)  # total number of direction changes
        if units == 'cm':  # convert to radians
            key['total_displacement'] = wh.cm_to_rad(key['total_displacement'])
            key['total_distance'] = wh.cm_to_rad(key['total_distance'])
            wheel_moves['peakAmplitude'] = wh.cm_to_rad(wheel_moves['peakAmplitude'])

        # Insert the keys in order
        self.insert1(key)
        self.Move.insert(moves)
        self.DirectionChange.insert(changes)