Пример #1
0
def constant_reaction_time(eid, rt, st, stype='stim'):
    '''
    getting trial numbers, feedback time interval
    '''

    one = ONE()
    if stype == 'motion':
        wheelMoves = one.load_object(eid, 'wheelMoves')
    trials = one.load_object(eid, 'trials')
    d = {}  # dictionary, trial number and still interval
    evts = [
        'goCue_times', 'feedback_times', 'probabilityLeft', 'choice',
        'feedbackType'
    ]

    for tr in range(len(trials['intervals'])):
        if stype == 'motion':
            a = wheelMoves['intervals'][:, 0]
        b = trials['goCue_times'][tr]
        c = trials['feedback_times'][tr]
        ch = trials['choice'][tr]
        pl = trials['probabilityLeft'][tr]
        ft = trials['feedbackType'][tr]

        if any(np.isnan([trials[k][tr] for k in evts])):
            continue

        if c - b > 10:  # discard too long trials
            continue

        if stype == 'motion':
            # making sure the motion onset time is in a coupled interval
            ind = np.where((a > b) & (a < c), True, False)
            if all(~ind):
                #print(f'non-still start, trial {tr} and eid {eid}')
                continue

            a = a[ind][0]
            react = np.round(a - b, 3)

        if np.isnan(trials['contrastLeft'][tr]):
            cont = trials['contrastRight'][tr]
            side = 0
        else:
            cont = trials['contrastLeft'][tr]
            side = 1

        if stype == 'feedback':
            d[tr] = [c + st, rt, cont, side, ch, ft]
        if stype == 'stim':
            d[tr] = [b + st, rt, cont, side, ch, ft]
        if stype == 'motion':
            d[tr] = [a + st, rt, cont, side, ch, ft]

    print(f"cut {len(d)} of {len(trials['intervals'])} full trials segments")
    return d
Пример #2
0
def get_micro_manipulator_data(subject, one=None, force_extract=False):
    """
    Looks for all ephys sessions for a given subject and get the probe micro-manipulator
    trajectories.
    If probes ALF object not on flat-iron, attempts to perform the extraction from meta-data
    and task settings file.
    """
    if not one:
        one = ONE()

    eids, sessions = one.search(subject=subject,
                                task_protocol='ephys',
                                details=True)
    dtypes = [
        'probes.description',
        'probes.trajectory',
    ]
    probes = alf.io.AlfBunch({})
    for ses in sessions:
        sess_path = Path(ses['local_path'])
        probe = None
        if not force_extract:
            probe = one.load_object(ses['url'], 'probes')
        if not probe:
            _logger.warning(f"Re-extraction probe info for {sess_path}")
            dtypes = ['_iblrig_taskSettings.raw', 'ephysData.raw.meta']
            raw_files = one.load(ses['url'],
                                 dataset_types=dtypes,
                                 download_only=True)
            if all([rf is None for rf in raw_files]):
                _logger.warning(
                    f"no raw settings files nor ephys data found for"
                    f" {ses['local_path']}. Skip this session.")
                continue
            extract_probes(sess_path, bin_exists=False)
            probe = alf.io.load_object(sess_path.joinpath('alf'), 'probes')
        one.load(ses['url'],
                 dataset_types='channels.localCoordinates',
                 download_only=True)
        # get for each insertion the sites local mapping: if not found assumes checkerboard pattern
        probe['sites_coordinates'] = []
        for prb in probe.description:
            chfile = Path(ses['local_path']).joinpath(
                'alf', prb['label'], 'channels.localCoordinates.npy')
            if chfile.exists():
                probe['sites_coordinates'].append(np.load(chfile))
            else:
                _logger.warning(
                    f"no channel.localCoordinates found for {ses['local_path']}."
                    f"Assumes checkerboard pattern")
                probe['sites_coordinates'].append(SITES_COORDINATES)
        # put the session information in there
        probe['session'] = [ses] * len(probe.description)
        probes = probes.append(probe)
    return probes
Пример #3
0
def load_wheel_reaction_times(eid, one=None):
    """
    Return the calculated reaction times for session.  Reaction times are defined as the time
    between the go cue (onset tone) and the onset of the first substantial wheel movement.   A
    movement is considered sufficiently large if its peak amplitude is at least 1/3rd of the
    distance to threshold (~0.1 radians).

    Negative times mean the onset of the movement occurred before the go cue.  Nans may occur if
    there was no detected movement withing the period, or when the goCue_times or feedback_times
    are nan.

    Parameters
    ----------
    eid : str
        Session UUID
    one : oneibl.ONE
        An instance of ONE for loading data.  If None a new one is instantiated using the defaults.

    Returns
    ----------
    array-like
        reaction times
    """
    if one is None:
        one = ONE()

    trials = one.load_object(eid, 'trials')
    # If already extracted, load and return
    if trials and 'firstMovement_times' in trials:
        return trials['firstMovement_times'] - trials['goCue_times']
    # Otherwise load wheelMoves object and calculate
    moves = one.load_object(eid, 'wheelMoves')
    # Re-extract wheel moves if necessary
    if not moves or 'peakAmplitude' not in moves:
        wheel = one.load_object(eid, 'wheel')
        moves = extract_wheel_moves(wheel['timestamps'], wheel['position'])
    assert trials and moves, 'unable to load trials and wheelMoves data'
    firstMove_times, is_final_movement, ids = extract_first_movement_times(
        moves, trials)
    return firstMove_times - trials['goCue_times']
Пример #4
0
import brainbox.behavior.wheel as wh
from ibllib.io.extractors.ephys_fpga import extract_wheel_moves
from ibllib.io.extractors.training_wheel import extract_first_movement_times
# from ibllib.misc.exp_ref import eid2ref
from oneibl.one import ONE

one = ONE()

sns.set_style('whitegrid')
device_info = (
    'The wheel diameter is {} cm and the number of ticks is {} per revolution'.
    format(wh.WHEEL_DIAMETER, wh.ENC_RES))
print(device_info)

eid = one.search(subject='dop_12', date_range='2020-12-11')[0]
wheel = one.load_object(eid, 'wheel')
wheel_moves = one.load_object(eid, 'wheelMoves')
rt = load_wheel_reaction_times(eid)
trial_data = one.load_object(eid, 'trials')

firstMove_times, is_final_movement, ids = extract_first_movement_times(
    wheel_moves, trial_data)
print('Trials where mouse sticked to one movement:  {}%'.format(
    np.sum(is_final_movement) / len(is_final_movement)))

# Plot some random trials
n_trials = 3  # Number of trials to plot
# Randomly select the trials to plot
trial_ids = np.random.randint(trial_data['choice'].size, size=n_trials)
fig, axs = plt.subplots(1, n_trials, figsize=(8.5, 2.5))
plt.tight_layout()
Пример #5
0
                     'list',
                     task_protocol='ephys',
                     django='project__name__'
                     'icontains,ibl_neuropixel_brainwide_01')

if download_data:
    perfs = []
    n_trialses = []
    ps20s = []
    ps80s = []
    rts = []
    not_found = 0
    for i, s in enumerate(sess):
        print(i)
        try:
            trials_all = one.load_object(s['url'][-36:], 'trials')
            trials = dict()
            trials['temp_key'] = trials_all
            perf_easy, n_trials, ps20, ps80, rt = training.compute_bias_info(
                trials, trials_all)
            perfs.append(perf_easy[0])
            n_trialses.append(n_trials[0])
            ps20s.append(ps20)
            ps80s.append(ps80)
            rts.append(rt)
        except Exception as e:
            print(e)
            not_found += 1
    total_n = i

    pickle.dump(perfs, open('perfs', 'wb'))
Пример #6
0
    def make(self, key):
        # Load the wheel for this session
        move_key = key.copy()
        one = ONE()
        eid, ver = (acquisition.Session & key).fetch1('session_uuid',
                                                      'task_protocol')
        logger.info('WheelMoves for session %s, %s', str(eid), ver)

        try:  # Should be able to remove this
            wheel = one.load_object(str(eid), 'wheel')
            all_loaded = \
                all([isinstance(wheel[lab], np.ndarray) for lab in wheel]) and \
                all(k in wheel for k in ('timestamps', 'position'))
            assert all_loaded, 'wheel data missing'
            alf.io.check_dimensions(wheel)
            if len(wheel['timestamps'].shape) == 1:
                assert wheel['timestamps'].size == wheel[
                    'position'].size, 'wheel data dimension mismatch'
                assert np.all(
                    np.diff(wheel['timestamps']) > 0
                ), 'wheel timestamps not monotonically increasing'
            else:
                logger.debug('2D timestamps')
            # Check the values and units of wheel position
            res = np.array([wh.ENC_RES, wh.ENC_RES / 2, wh.ENC_RES / 4])
            min_change_rad = 2 * np.pi / res
            min_change_cm = wh.WHEEL_DIAMETER * np.pi / res
            pos_diff = np.abs(np.ediff1d(wheel['position']))
            if pos_diff.min() < min_change_cm.min():
                # Assume values are in radians
                units = 'rad'
                encoding = np.argmin(np.abs(min_change_rad - pos_diff.min()))
                min_change = min_change_rad[encoding]
            else:
                units = 'cm'
                encoding = np.argmin(np.abs(min_change_cm - pos_diff.min()))
                min_change = min_change_cm[encoding]
            enc_names = {0: '4X', 1: '2X', 2: '1X'}
            logger.info('Wheel in %s units using %s encoding', units,
                        enc_names[int(encoding)])
            if '_iblrig_tasks_ephys' in ver:
                assert np.allclose(pos_diff, min_change,
                                   rtol=1e-05), 'wheel position skips'
        except ValueError:
            logger.exception('Inconsistent wheel data')
            raise
        except AssertionError as ex:
            logger.exception(str(ex))
            raise
        except Exception as ex:
            logger.exception(str(ex))
            raise

        try:
            # Convert the pos threshold defaults from samples to correct unit
            thresholds = wh.samples_to_cm(np.array([8, 1.5]),
                                          resolution=res[encoding])
            if units == 'rad':
                thresholds = wh.cm_to_rad(thresholds)
            kwargs = {
                'pos_thresh': thresholds[0],
                'pos_thresh_onset': thresholds[1]
            }
            #  kwargs = {'make_plots': True, **kwargs}
            # Interpolate and get onsets
            pos, t = wh.interpolate_position(wheel['timestamps'],
                                             wheel['position'],
                                             freq=1000)
            on, off, amp, peak_vel = wh.movements(t, pos, freq=1000, **kwargs)
            assert on.size == off.size, 'onset/offset number mismatch'
            assert np.all(np.diff(on) > 0) and np.all(np.diff(
                off) > 0), 'onsets/offsets not monotonically increasing'
            assert np.all((off - on) > 0), 'not all offsets occur after onset'
        except ValueError:
            logger.exception('Failed to find movements')
            raise
        except AssertionError as ex:
            logger.exception('Wheel integrity check failed: ' + str(ex))
            raise

        key['n_movements'] = on.size  # total number of movements within the session
        key['total_displacement'] = float(np.diff(
            pos[[0, -1]]))  # total displacement of the wheel during session
        key['total_distance'] = float(np.abs(
            np.diff(pos)).sum())  # total movement of the wheel
        if units is 'cm':  # convert to radians
            key['total_displacement'] = wh.cm_to_rad(key['total_displacement'])
            key['total_distance'] = wh.cm_to_rad(key['total_distance'])
            amp = wh.cm_to_rad(amp)

        self.insert1(key)

        keys = ('move_id', 'movement_onset', 'movement_offset', 'max_velocity',
                'movement_amplitude')
        moves = [
            dict(zip(keys, (i, on[i], off[i], amp[i], peak_vel[i])))
            for i in np.arange(on.size)
        ]
        [x.update(move_key) for x in moves]

        self.Move.insert(moves)
Пример #7
0
    def make(self, key):
        THRESH = .1  # peak amp should be at least .1 rad; ~1/3rd of the threshold
        eid, ver = (acquisition.Session & key).fetch1(
            'session_uuid', 'task_protocol')  # For logging purposes
        logger.info('MovementTimes for session %s, %s', str(eid), ver)
        query = (WheelMoveSet.Move & key).proj('move_id', 'movement_onset',
                                               'movement_offset',
                                               'movement_amplitude')
        wheel_move_data = query.fetch(order_by='move_id')

        query = (behavior.TrialSet.Trial
                 & key).proj('trial_response_choice', 'trial_response_time',
                             'trial_stim_on_time', 'trial_go_cue_time',
                             'trial_feedback_time', 'trial_start_time')
        trial_data = query.fetch(order_by='trial_id')

        if trial_data.size == 0 or wheel_move_data.size == 0:
            logger.warning('Missing DJ trial or move data')
            return

        all_move_onsets = wheel_move_data['movement_onset']
        peak_amp = wheel_move_data['movement_amplitude']
        flinch = abs(peak_amp) < THRESH
        go_trial = trial_data['trial_response_choice'] != 'No Go'
        feedback_times = trial_data['trial_feedback_time']
        cue_times = trial_data['trial_go_cue_time']

        # Check integrity of feedback and start times
        try:
            # Log presence of nans in feedback times (common)
            nan_trial = np.isnan(feedback_times)
            if nan_trial.any():
                n_feedback_nans = np.count_nonzero(nan_trial)
                logger.warning('%i feedback_times nan',
                               np.count_nonzero(nan_trial))
                response_times = trial_data['trial_response_time']
                if n_feedback_nans > np.count_nonzero(
                        np.isnan(response_times)):
                    logger.warning(
                        'using response times instead of feedback times')
                    feedback_times = response_times
                    nan_trial = np.isnan(feedback_times)

            # Assert all feedback times are monotonically increasing
            assert np.all(np.diff(feedback_times[~nan_trial]) > 0
                          ), 'feedback times not monotonically increasing'
            # Log presence of nans in go cue times times (common)
            if np.isnan(cue_times).any():
                # If all nan, use stim on
                if np.isnan(cue_times).all():
                    logger.warning(
                        'trial_go_cue_time is all nan, using trial_stim_on_time'
                    )
                    cue_times = trial_data['trial_stim_on_time']
                    if np.isnan(cue_times).any():
                        n_nan = 'all' if np.isnan(cue_times).all() else str(
                            np.count_nonzero(np.isnan(cue_times)))
                        logger.warning('trial_stim_on_time nan for %s trials',
                                       n_nan)
                else:
                    logger.warning('trial_go_cue_time is nan for %i trials',
                                   np.count_nonzero(np.isnan(cue_times)))
            # Assert all cue times are montonically increasing
            assert np.all(np.diff(cue_times[~np.isnan(cue_times)]) > 0
                          ), 'cue times not monotonically increasing'
            # Assert all start times occur before feedback times
            # assert np.all((feedback_times[~nan_trial] - start_times) > 0), 'feedback occurs before start time'
        except AssertionError as ex:
            logger.exception('Movement integrity check failed: ' + str(ex))
            raise

        # Get minimum quiescent period for session
        try:
            one = ONE()
            task_params = one.load_object(str(eid), '_iblrig_taskSettings.raw')
            min_qt = task_params['raw']['QUIESCENT_PERIOD']
            if len(min_qt) > len(cue_times):
                min_qt = np.array(min_qt[0:cue_times.size])
        except BaseException:
            logger.warning('failed to load min quiescent time')
            min_qt = 0.2

        # Find first significant movement for each trial.  To be counted, the movement must
        # occur between go cue / stim on and before feedback / response time.  The movement
        # onset is sometimes just before the cue (occurring in the gap between quiescence end and
        # cue start, or during the quiescence period but sub-threshold).  The movement is
        # sufficiently large if it is greater than or equal to THRESH

        # Initialize as nans
        onsets = np.full(trial_data['trial_id'].shape, np.nan)
        ids = np.full(trial_data['trial_id'].shape, int(-1))
        final_movement = np.zeros(trial_data['trial_id'].shape, bool)
        # Iterate over trials, extracting onsets approx. within closed-loop period
        for i, (t1, t2) in enumerate(zip(cue_times - min_qt, feedback_times)):
            if ~np.isnan(t2 - t1):  # If both timestamps defined
                mask = (all_move_onsets > t1) & (all_move_onsets < t2)
                if np.any(mask):  # If any onsets for this trial
                    trial_onset_ids, = np.where(mask)
                    if np.any(~flinch[mask]
                              ):  # If any trial moves were sufficiently large
                        ids[i] = trial_onset_ids[~flinch[mask]][
                            0]  # Find first large move id
                        onsets[i] = all_move_onsets[
                            ids[i]]  # Save first large move onset
                        final_movement[i] = ids[i] == trial_onset_ids[
                            -1]  # Final move of trial
                else:  # Check if trial was no-go
                    if ~go_trial[i]:  # Report if not no-go
                        logger.warning(
                            'failed to find any onsets for trial id %i', i + 1)
            else:  # Log missing timestamps
                logger.warning('no reliable times for trial id %i', i + 1)

        # Create matrix of values for insertion into table
        movement_data = np.c_[trial_data['trial_id'],  # trial_id
                              ids,  # wheel_move_id
                              onsets - cue_times,  # reaction_time
                              final_movement,  # final_movement
                              feedback_times - onsets,  # movement_time
                              feedback_times - cue_times,  # response_time
                              onsets  # movement_onset
                              ]
        data = []
        for row in movement_data:
            if np.isnan(row).any():  # don't insert; leave as null
                logger.warning('nan found for trial %i', row[0])
            else:  # insert row
                data.append(tuple([*key.values(), *list(row)]))
        self.insert(data)
import matplotlib.pyplot as plt
from oneibl.one import ONE

import brainbox.plot as bbp

one = ONE()
eid = one.search(lab='wittenlab', date='2019-08-04')[0]
probe_label = 'probe00'

spikes = one.load_object(eid, 'spikes', collection=f'alf/{probe_label}')
trials = one.load_object(eid, 'trials', collection='alf')

# For a simple peth plot without a raster, all we need to input is spike times, clusters, event
# times, and the identity of the cluster we want to plot, e.g. in this case cluster 121

ax = bbp.peri_event_time_histogram(spikes.times, spikes.clusters,
                                   trials.goCue_times, 121)

# Or we can include a raster plot below the PETH:

fig = plt.figure()
ax = plt.gca()
bbp.peri_event_time_histogram(
    spikes.times,  # Spike times first
    spikes.clusters,  # Then cluster ids
    trials.goCue_times,  # Event markers we want to plot against
    121,  # Identity of the cluster we plot
    t_before=0.4,
    t_after=0.4,  # Time before and after the event
    error_bars='sem',  # Whether we want Stdev, SEM, or no error
    include_raster=True,  # adds a raster to the bottom
Пример #9
0
eids = one.search(dataset_types=['spikes.times', 'probes.trajectory'],
                  task_protocol='_iblrig_tasks_ephysChoiceWorld')

# Select only the ephysChoiceWorld sessions and sort by eid
recordings = recordings[recordings['eid'].isin(eids)]
recordings = recordings.sort_values('eid').reset_index()

timeconstant = pd.DataFrame()
for i, eid in enumerate(recordings['eid'].values):

    # Load in data (only when not already loaded from other probe)
    print('Processing recording %d of %d' % (i+1, len(recordings)))
    if i == 0:
        try:
            spikes, clusters, channels = bbone.load_spike_sorting_with_channel(eid, one=one)
            trials = one.load_object(eid, 'trials')
        except:
            continue
    elif recordings.loc[i-1, 'eid'] != recordings.loc[i, 'eid']:
        try:
            spikes, clusters, channels = bbone.load_spike_sorting_with_channel(eid, one=one)
            trials = one.load_object(eid, 'trials')
        except:
            continue

    # Get probe
    probe = recordings.loc[i, 'probe']
    if probe not in spikes.keys():
        continue

    # Only use single units
Пример #10
0
points scaled by spike amplitude
"""

import numpy as np
from brainbox.ephys_plots import scatter_raster_plot
from brainbox.plot_base import plot_scatter
from oneibl.one import ONE
import matplotlib.pyplot as plt
import matplotlib

one = ONE()

eid = '671c7ea7-6726-4fbe-adeb-f89c2c8e489b'
probe = 'probe00'

spikes = one.load_object(eid, obj='spikes', collection=f'alf/{probe}')
metrics = one.load_dataset(eid, dataset='clusters.metrics', collection=f'alf/{probe}')

# Find the clusters that have been labelled as good and their corresponding spike indices
good_clusters = np.where(metrics.label == 1)
spike_idx = np.where(np.isin(spikes['clusters'], good_clusters))[0]

# Also filter for nans in amplitude and depth
kp_idx = spike_idx[np.where(~np.isnan(spikes['depths'][spike_idx])
                            & ~np.isnan(spikes['amps'][spike_idx]))[0]]

# Get ScatterPlot object
data = scatter_raster_plot(spikes['amps'][kp_idx], spikes['depths'][kp_idx],
                           spikes['times'][kp_idx])

# Add v lines 10s after start and 10s before end or recording
Пример #11
0
def plot_wheel_position(eid):
    '''
    illustrate wheel position next to distance plot
    '''
    T_BIN = 0.02
    rt = 2
    st = -0.5

    d = constant_reaction_time(eid, rt, st)

    one = ONE()
    wheel = one.load_object(eid, 'wheel')
    pos, t = wh.interpolate_position(wheel.timestamps,
                                     wheel.position,
                                     freq=1 / T_BIN)
    whe_left = []
    whe_right = []

    for i in d:

        start_idx = find_nearest(t, d[i][0])
        end_idx = start_idx + int(d[i][1] / T_BIN)

        wheel_pos = pos[start_idx:end_idx]
        if len(wheel_pos) == 1:
            print(i, [start_idx, end_idx])

        wheel_pos = wheel_pos - wheel_pos[0]

        if d[i][4] == -1:
            whe_left.append(wheel_pos)
        if d[i][4] == 1:
            whe_right.append(wheel_pos)

    xs = np.arange(len(whe_left[0])) * T_BIN
    times = np.concatenate([
        -1 * np.array(list(reversed(xs[:int(len(xs) * abs(st / rt))]))),
        np.array(xs[:int(len(xs) * (1 - abs(st / rt)))])
    ])

    for i in range(len(whe_left)):
        plt.plot(times, whe_left[i], c='#1f77b4', alpha=0.5, linewidth=0.05)
    for i in range(len(whe_right)):
        plt.plot(times, whe_right[i], c='darkred', alpha=0.5, linewidth=0.05)

    plt.plot(times,
             np.mean(whe_left, axis=0),
             c='#1f77b4',
             linewidth=2,
             label='left')
    plt.plot(times,
             np.mean(whe_right, axis=0),
             c='darkred',
             linewidth=2,
             label='right')

    plt.axhline(y=0.26, linestyle='--', c='k')
    plt.axhline(y=-0.26, linestyle='--', c='k', label='reward boundary')
    plt.axvline(x=0, linestyle='--', c='g', label='stimOn')
    axes = plt.gca()
    #axes.set_xlim([0,rt])
    axes.set_ylim([-0.27, 0.27])
    plt.xlabel('time [sec]')
    plt.ylabel('wheel position [rad]')
    plt.legend(loc='lower right')
    plt.title('wheel positions colored by choice')
    plt.tight_layout()
Пример #12
0
def motion_energy_PSTH(eid):
    '''
    ME PSTH
    canonical session
    eid = '15f742e1-1043-45c9-9504-f1e8a53c1744'
    '''

    rt = 2  # duration of window
    st = -0.5  # lag of window wrt to stype
    stype = 'stimOn_times'

    ME = {}
    one = ONE()
    trials = one.load_object(eid, 'trials')
    ts = trials.intervals[0][0]
    te = trials.intervals[-1][1]

    try:
        for video_type in ['left', 'right', 'body']:
            t, m = get_ME(eid, video_type)
            m = zscore(m, nan_policy='omit')

            sta, end = find_nearest(t, ts), find_nearest(t, te)
            t = t[sta:end]
            m = m[sta:end]

            ME[video_type] = [t, m]

        # align to body cam
        for video_type in ['left', 'right']:

            # align time series camera/neural
            interpolater = interp1d(ME[video_type][0],
                                    np.arange(len(ME[video_type][0])),
                                    kind="cubic",
                                    fill_value="extrapolate")

            idx_aligned = np.round(interpolater(ME['body'][0])).astype(int)
            ME[video_type] = [ME['body'][0], ME[video_type][1][idx_aligned]]

        D = {}

        fs = 30
        xs = np.arange(rt * fs)  # number of frames
        xs = np.concatenate([
            -1 * np.array(list(reversed(xs[:int(abs(st) * fs)]))),
            np.arange(rt * fs)[1:1 + len(xs[int(abs(st) * fs):])]
        ])
        xs = xs / float(fs)

        cols = {'left': 'r', 'right': 'b', 'body': 'g'}

        for video_type in ME:
            # that's centered at feedback time

            D[video_type] = []

            times, s = ME[video_type]

            trs = trials[stype][20:-20]
            for i in trs:

                start_idx = int(find_nearest(times, i) + st * 30)
                end_idx = int(start_idx + rt * 30)

                D[video_type].append(s[start_idx:end_idx])

            MEAN = np.mean(D[video_type], axis=0)
            STD = np.std(D[video_type], axis=0) / np.sqrt(len(trs))

            plt.plot(xs,
                     MEAN,
                     label=video_type,
                     color=cols[video_type],
                     linewidth=2)
            plt.fill_between(xs,
                             MEAN + STD,
                             MEAN - STD,
                             color=cols[video_type],
                             alpha=0.2)

        ax = plt.gca()
        ax.axvline(x=0, label='stimOn', linestyle='--', c='k')
        plt.title('Motion Energy PSTH')
        plt.xlabel('time [sec]')
        plt.ylabel('z-scored motion energy [a.u.]')
        plt.legend(loc='lower right')

    except:
        plt.title('No motion energy available!')
Пример #13
0
# Get list of recordings
eids, ses_info = one.search(dataset_types='spikes.times',
                            task_protocol='_iblrig_tasks_ephysChoiceWorld',
                            details=True)

# Set path to save plots
DATA_PATH, FIG_PATH, SAVE_PATH = paths()
FIG_PATH = join(FIG_PATH, 'WholeBrain')

resp = pd.DataFrame()
for i, eid in enumerate(eids):

    # Load in data
    print('Processing session %d of %d' % (i + 1, len(eids)))
    session_path = one_session_path(eid)
    trials = one.load_object(eid, 'trials')
    probes = one.load_object(eid, 'probes', download_only=False)
    if ((not hasattr(trials, 'stimOn_times'))
            or (len(trials.feedback_times) != len(trials.feedbackType))
            or (len(trials.stimOn_times) != len(trials.probabilityLeft))
            or (not hasattr(probes, 'trajectory'))):
        continue
    for p in range(len(probes['trajectory'])):
        probe_path = session_path.joinpath('alf',
                                           probes['description'][p]['label'])
        try:
            spikes = alf.io.load_object(probe_path, object='spikes')
            clusters = alf.io.load_object(probe_path, object='clusters')
        except Exception:
            continue
        if not hasattr(spikes, 'times'):
Пример #14
0
import numpy as np
from brainbox.population import decode
from sklearn.utils import shuffle
from oneibl.one import ONE
import brainbox.io.one as bbone

# %% Load in data
one = ONE()
eid = one.search(subject='ZM_2240', date_range=['2020-01-23', '2020-01-23'])
spikes, clusters = bbone.load_spike_sorting(eid[0], one=one)
trials = one.load_object(eid[0], 'trials')

# %% Only use units with KS2 label 'good' from probe00

spikes = spikes['probe00']
clusters = clusters['probe00']

clusters_to_use = clusters.metrics.ks2_label == 'good'
spikes.times = spikes.times[np.isin(
    spikes.clusters, clusters.metrics.cluster_id[clusters_to_use])]
spikes.clusters = spikes.clusters[np.isin(
    spikes.clusters, clusters.metrics.cluster_id[clusters_to_use])]
cluster_ids = clusters.metrics.cluster_id[clusters_to_use]

# %% Do decoding
print('Decoding whether the stimulus was on the left or the right..')

stim_times = trials.goCue_times
stim_sides = np.isnan(trials.contrastLeft).astype(int)

# Decode left vs right stimulus from a 1 second window after stimulus onset using default settings:
Пример #15
0
        xlabel='Frequency (Hz)',
        title='Channel %d' % random_ch[0])

ax3 = fig.add_subplot(gs[1, 1])
ax3.plot(coh_freqs, coh)
ax3.set(xlim=[1, 140],
        ylabel='Coherence',
        xlabel='Frequency (Hz)',
        title='Channel %d and %d' % (random_ch[0], random_ch[1]))

plt.tight_layout(pad=5)

# %% Calculate spike triggered average

# Read in spike data
spikes = one.load_object(eid[0], 'spikes')
clusters = one.load_object(eid[0], 'clusters')

# Pick two random neurons
random_neurons = np.random.choice(
    clusters.metrics.cluster_id[clusters.metrics.ks2_label == 'good'], 2)
spiketrain = spikes.times[spikes.clusters == random_neurons[0]]
sta, time = bb.lfp.spike_triggered_average(signal[random_ch[0], :], spiketrain)

# %% Plot spike triggered LFP

f, ax1 = plt.subplots(1, 1)

ax1.plot(time, sta)
ax1.set(ylabel='Spike triggered LFP average (uV)', xlabel='Time (ms)')
Пример #16
0
def load_trials_df(eid,
                   one=None,
                   maxlen=None,
                   t_before=0.,
                   t_after=0.,
                   ret_wheel=False,
                   ret_abswheel=False,
                   wheel_binsize=0.02):
    """
    Generate a pandas dataframe of per-trial timing information about a given session.
    Each row in the frame will correspond to a single trial, with timing values indicating timing
    session-wide (i.e. time in seconds since session start). Can optionally return a resampled
    wheel velocity trace of either the signed or absolute wheel velocity.

    The resulting dataframe will have a new set of columns, trial_start and trial_end, which define
    via t_before and t_after the span of time assigned to a given trial.
    (useful for bb.modeling.glm)

    Parameters
    ----------
    eid : str
        Session UUID string to pass to ONE
    one : oneibl.one.OneAlyx, optional
        one object to use for loading. Will generate internal one if not used, by default None
    maxlen : float, optional
        Maximum trial length for inclusion in df. Trials where feedback - response is longer
        than this value will not be included in the dataframe, by default None
    t_before : float, optional
        Time before stimulus onset to include for a given trial, as defined by the trial_start
        column of the dataframe. If zero, trial_start will be identical to stimOn, by default 0.
    t_after : float, optional
        Time after feedback to include in the trail, as defined by the trial_end
        column of the dataframe. If zero, trial_end will be identical to feedback, by default 0.
    ret_wheel : bool, optional
        Whether to return the time-resampled wheel velocity trace, by default False
    ret_abswheel : bool, optional
        Whether to return the time-resampled absolute wheel velocity trace, by default False
    wheel_binsize : float, optional
        Time bins to resample wheel velocity to, by default 0.02

    Returns
    -------
    pandas.DataFrame
        Dataframe with trial-wise information. Indices are the actual trial order in the original
        data, preserved even if some trials do not meet the maxlen criterion. As a result will not
        have a monotonic index. Has special columns trial_start and trial_end which define start
        and end times via t_before and t_after
    """
    if not one:
        one = ONE()

    if ret_wheel and ret_abswheel:
        raise ValueError('ret_wheel and ret_abswheel cannot both be true.')

    # Define which datatypes we want to pull out
    trialstypes = [
        'trials.choice',
        'trials.probabilityLeft',
        'trials.feedbackType',
        'trials.feedback_times',
        'trials.contrastLeft',
        'trials.contrastRight',
        'trials.goCue_times',
        'trials.stimOn_times',
    ]

    # A quick function to remap probabilities in those sessions where it was not computed correctly
    def remap_trialp(probs):
        # Block probabilities in trial data aren't accurate and need to be remapped
        validvals = np.array([0.2, 0.5, 0.8])
        diffs = np.abs(np.array([x - validvals for x in probs]))
        maps = diffs.argmin(axis=1)
        return validvals[maps]

    starttimes = one.load(eid, dataset_types=['trials.stimOn_times'])[0]
    endtimes = one.load(eid, dataset_types=['trials.feedback_times'])[0]
    tmp = one.load(eid, dataset_types=trialstypes)

    if maxlen is not None:
        with np.errstate(invalid='ignore'):
            keeptrials = (endtimes - starttimes) <= maxlen
    else:
        keeptrials = range(len(starttimes))
    trialdata = {
        x.split('.')[1]: tmp[i][keeptrials]
        for i, x in enumerate(trialstypes)
    }
    trialdata['probabilityLeft'] = remap_trialp(trialdata['probabilityLeft'])
    trialsdf = pd.DataFrame(trialdata)
    if maxlen is not None:
        trialsdf.set_index(np.nonzero(keeptrials)[0], inplace=True)
    trialsdf['trial_start'] = trialsdf['stimOn_times'] - t_before
    trialsdf['trial_end'] = trialsdf['feedback_times'] + t_after
    if not ret_wheel and not ret_abswheel:
        return trialsdf

    wheel = one.load_object(eid, 'wheel')
    whlpos, whlt = wheel.position, wheel.timestamps
    starttimes = trialsdf['trial_start']
    endtimes = trialsdf['trial_end']
    wh_endlast = 0
    trials = []
    for (start, end) in np.vstack((starttimes, endtimes)).T:
        wh_startind = np.searchsorted(whlt[wh_endlast:], start) + wh_endlast
        wh_endind = np.searchsorted(whlt[wh_endlast:], end,
                                    side='right') + wh_endlast + 4
        wh_endlast = wh_endind
        tr_whlpos = whlpos[wh_startind - 1:wh_endind + 1]
        tr_whlt = whlt[wh_startind - 1:wh_endind + 1] - start
        tr_whlt[0] = 0.  # Manual previous-value interpolation
        whlseries = TimeSeries(tr_whlt, tr_whlpos, columns=['whlpos'])
        whlsync = sync(wheel_binsize, timeseries=whlseries, interp='previous')
        trialstartind = np.searchsorted(whlsync.times, 0)
        trialendind = np.ceil((end - start) / wheel_binsize).astype(int)
        trpos = whlsync.values[trialstartind:trialendind + trialstartind]
        whlvel = trpos[1:] - trpos[:-1]
        whlvel = np.insert(whlvel, 0, 0)
        if np.abs((trialendind - len(whlvel))) > 0:
            raise IndexError(
                'Mismatch between expected length of wheel data and actual.')
        if ret_wheel:
            trials.append(whlvel)
        elif ret_abswheel:
            trials.append(np.abs(whlvel))
    trialsdf['wheel_velocity'] = trials
    return trialsdf
'''
Check status of specific trajectory, if:
- failed behavior
- failed histology tracing
- fail session QC
and compute behavioral parameters
'''
import brainbox.behavior.training as training
from oneibl.one import ONE
one = ONE()

traj_id = 'f06d6cd9-a6b8-49a4-90d1-7905d04c2f8b'

traj = one.alyx.rest('trajectories', 'list', id=traj_id)
eid = traj[0]['session']['id']
trials_all = one.load_object(eid, 'trials')
trials = dict()
trials['temp_key'] = trials_all
perf_easy, n_trials, _, _, _ = training.compute_bias_info(trials, trials_all)
print('Performance: %.1f%%, Number of trials: %d' %
      (perf_easy * 100, n_trials))
fail_behav = one.alyx.rest(
    'trajectories',
    'list',
    provenance='Planned',
    id=traj_id,
    django='probe_insertion__session__extended_qc__behavior,0')
if len(fail_behav) > 0:
    print("Behavior criterion FAILED")
else:
    print('Behavior criterion PASSED')