コード例 #1
0
    # re-populate 2nd column, converting tone/wobble 0/1 codes to 1/2
    events[:, 1] = [int(d['trial_id'][0][0][5]) + 1 for d in exp_data]
    keep = np.ones(300, bool)
    # only keep trials that have sufficient spacing before them
    dt = np.diff(events[:, 0]) / raw.info['sfreq']
    keep[1:] = np.logical_and(keep[1:], dt >= t_max - t_min)
    # ... and after them
    keep[:-1] = np.logical_and(keep[:-1], dt >= t_max - t_min)
    # only keep correct trials
    keep = np.logical_and(keep, corrects)
    # also must not have a recent button press
    keep[post_press_idx] = False
    events[~keep, 1] = 999  # effectively remove them
    # restore original shape, to match raws (list of arrays, 1 per exp. block)
    events = [events[l1:l2] for l1, l2 in zip(elims[:-1], elims[1:])]
    epochs = pyeparse.Epochs(raws, events, dict(std=1, dev=2), t_min, t_max)
    zs = epochs.pupil_zscores()
    print('  %s events' % [(epochs.events[:, 1] == jj).sum() for jj in [1, 2]])
    all_data.append([zs[epochs.events[:, 1] == jj] for jj in [1, 2]])
    data.append(
        [np.median(zs[epochs.events[:, 1] == jj], axis=0) for jj in [1, 2]])
    t = epochs.times.copy()
    # bookkeeping for why trials got omitted
    corr.append(corrects)
    ppi.append(post_press_idx)
    dts.append(dt <= t_max - t_min)
data = np.array(data)
np.savez_compressed(avg_data_file,
                    data=data,
                    t=t,
                    hmfx=hmfx,
コード例 #2
0
import pyeparse as pp

fname = '../pyeparse/tests/data/test_raw.edf'

raw = pp.read_raw(fname)

# visualize initial calibration
raw.plot_calibration(title='5-Point Calibration')

# create heatmap
raw.plot_heatmap(start=3., stop=60.)

# find events and epoch data
events = raw.find_events('SYNCTIME', event_id=1)
tmin, tmax, event_id = -0.5, 1.5, 1
epochs = pp.Epochs(raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax)

# access pandas data frame and plot single epoch
fig, ax = plt.subplots()
ax.plot(epochs[3].get_data('xpos')[0], epochs[3].get_data('ypos')[0])

# iterate over and access numpy arrays.
# find epochs withouth loss of tracking / blinks
print(len([e for e in epochs if not np.isnan(e).any()]))

fig, ax = plt.subplots()
ax.set_title('Superimposed saccade responses')
n_trials = 12  # first 12 trials
for epoch in epochs[:n_trials]:
    ax.plot(epochs.times * 1e3, epoch[0].T)
コード例 #3
0
ファイル: _pupillometry.py プロジェクト: nordme/genz
def find_pupil_dynamic_range(ec, el, prompt=True, verbose=None):
    """Find pupil dynamic range

    Parameters
    ----------
    ec : instance of ExperimentController
        The experiment controller.
    el : instance of EyelinkController
        The Eyelink controller.
    fname : str | None
        If str, the filename will be used to process the data from the
        eyelink. If None, a recording will be started.
    prompt : bool
        If True, a standard prompt message will be displayed.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see expyfun.verbose).

    Returns
    -------
    bgcolor : array
        The background color that maximizes dynamic range.
    fcolor : array
        The corresponding fixation dot color.
    levels : array
        The levels shown.
    responses : array
        The average responses to each level.

    Notes
    -----
    If ``el.dummy_mode`` is on, the test will run at around 10x the speed.
    """
    _check_pyeparse()
    import pyeparse
    if el.recording:
        el.stop()
    el.calibrate()
    if prompt:
        ec.screen_prompt('We will now determine the dynamic '
                         'range of your pupil.\n\n'
                         'Press a button to continue.')
    levels = np.concatenate(([0.], 2**np.arange(8) / 255.))
    fixs = levels + 0.2
    n_rep = 2
    # inter-rep interval (allow system to reset)
    iri = 10.0 if not el.dummy_mode else 1.0
    # amount of time between levels
    settle_time = 3.0 if not el.dummy_mode else 0.3
    fix = FixationDot(ec)
    fix.set_colors([fixs[0] * np.ones(3), 'k'])
    ec.set_background_color('k')
    fix.draw()
    ec.flip()
    for ri in range(n_rep):
        ec.wait_secs(iri)
        for ii, (lev, fc) in enumerate(zip(levels, fixs)):
            ec.identify_trial(ec_id='FPDR_%02i' % (ii + 1),
                              el_id=[ii + 1],
                              ttl_id=())
            bgcolor = np.ones(3) * lev
            fcolor = np.ones(3) * fc
            ec.set_background_color(bgcolor)
            fix.set_colors([fcolor, bgcolor])
            fix.draw()
            ec.start_stimulus()
            ec.wait_secs(settle_time)
            ec.check_force_quit()
            ec.stop()
            ec.trial_ok()
        ec.set_background_color('k')
        fix.set_colors([fixs[0] * np.ones(3), 'k'])
        fix.draw()
        ec.flip()
    el.stop()  # stop the recording
    ec.screen_prompt('Processing data, please wait...',
                     max_wait=0,
                     clear_after=False)

    # now we need to parse the data
    if el.dummy_mode:
        resp = sigmoid(np.tile(levels, n_rep), 1000, 3000, 0.01, -100)
        resp += np.random.rand(*resp.shape) * 500 - 250
    else:
        # Pull data locally
        assert len(el.file_list) >= 1
        raw, events = _load_raw(el, el.file_list[-1])
        assert len(events) == len(levels) * n_rep
        epochs = pyeparse.Epochs(raw, events, 1, -0.5, settle_time)
        assert len(epochs) == len(levels) * n_rep
        idx = epochs.n_times // 2
        resp = np.median(epochs.get_data('ps')[:, idx:], 1)
    bgcolor = np.mean(resp.reshape((n_rep, len(levels))), 0)
    idx = np.argmin(np.diff(bgcolor)) + 1
    bgcolor = levels[idx] * np.ones(3)
    fcolor = fixs[idx] * np.ones(3)
    logger.info('Pupillometry: optimal background color {0}'.format(bgcolor))
    return bgcolor, fcolor, np.tile(levels, n_rep), resp
コード例 #4
0
def process_fixations(sub, n, epoch_epoch_start, epoch_epoch_end,
                      eyetracking_dir, behavioural_dir, simulated_dir,
                      left_aoi, right_aoi, phase):
    """
    Produces a dataframe of fixation bias indices for each trial and each subject

    Args:
        epoch_epoch_start: Start of the epoch (post trial phase onset)
        epoch_epoch_end: End of the epoch
        eyetracking_dir: Directory containing eyetracking data
        behavioural_data: Directory containing behavioural data for all subjects
        simulated_data: Directory containing simulated data for all subjects
        left_aoi: matplotlib path containing left aoi
        right_aoi: matplotlib path containing right aoi

    Returns:
        A dataframe

    """

    # Load eyetracking data
    et_path = os.path.join(eyetracking_dir, [
        i for i in os.listdir(eyetracking_dir)
        if str(sub) in i and 'eyetracker' in i
    ][0])

    raw = pp.read_raw(et_path)

    # Get trials
    preoutcome_events = raw.find_events('_preoutcome', 1)
    outcome_events = raw.find_events('_outcome', 1)

    for n, i in enumerate(raw.discrete['messages']):
        m = re.search('Trial_[0-9]+$', i[1])
        if m:
            raw.discrete['messages'][n] = (i[0], i[1] + '_start')
    trial_start_events = raw.find_events('_start', 1)

    # Trial numbers
    trial_numbers = [
        int(re.search('\d+', i[1]).group()) for i in raw.discrete['messages']
        if phase in i[1]
    ]
    trial_start_events = trial_start_events[
        trial_numbers]  # Remove non-existent trials and shift in time

    tmin, tmax, event_id = epoch_epoch_start, epoch_epoch_end, 1
    preoutcome_epochs = pp.Epochs(raw,
                                  events=preoutcome_events,
                                  event_id=event_id,
                                  tmin=tmin,
                                  tmax=tmax)

    if len(trial_numbers) > len(preoutcome_epochs):
        trial_numbers = trial_numbers[:len(preoutcome_epochs)]

    preoutcome_fixations = {}

    preoutcome_duration_total = 0
    blink_duration_total = 0

    # The preoutcome phase doesn't have a standard duration so we need to customise this for each trial
    for n in range(len(preoutcome_epochs)):
        if 'preoutcome' in phase:
            preoutcome_duration = raw.times[outcome_events[n][0]] - raw.times[
                preoutcome_events[n][0]]
        else:
            preoutcome_duration = raw.times[
                trial_start_events[n][0]] - raw.times[outcome_events[n][0]]
            if preoutcome_duration > 7:
                preoutcome_duration = 4  # durations before breaks can be a longer than they should be so set to minimum expected
        preoutcome_fixations[trial_numbers[n]] = preoutcome_epochs[
            n].fixations[0][preoutcome_epochs[n].fixations[0]['stime'] <
                            preoutcome_duration]
        preoutcome_fixations[trial_numbers[n]]['etime'][
            preoutcome_fixations[trial_numbers[n]]['etime'] >
            preoutcome_duration] = preoutcome_duration

        preoutcome_blinks = preoutcome_epochs[n].blinks[0][
            preoutcome_epochs[n].blinks[0]['stime'] < preoutcome_duration]
        preoutcome_blinks['etime'][preoutcome_blinks['etime'] >
                                   preoutcome_duration] = preoutcome_duration
        blink_duration_total += np.sum(
            np.array([blink[1] - blink[0] for blink in preoutcome_blinks]))
        preoutcome_duration_total += preoutcome_duration

        if np.any(preoutcome_fixations[trial_numbers[n]]['etime'] -
                  preoutcome_fixations[trial_numbers[n]]['stime'] < 0):
            print preoutcome_fixations[trial_numbers[n]]
            raise ValueError(
                "Fixations of less than 0 seconds on trial {0}".format(n))

    # Load behavioural
    behavioural_path = [
        i for i in os.listdir(behavioural_dir)
        if str(sub) in i and 'behaviour' in i
    ][0]
    behavioural = pd.read_csv(os.path.join(behavioural_dir, behavioural_path))
    behavioural = behavioural[(~behavioural.A_shock_prob.isnull()) &
                              (behavioural.trial_number != 999)].reset_index()

    # Load behavioural & simulated data
    simulated = pd.read_csv(
        os.path.join(simulated_dir, '{0}_simulated_data.txt'.format(sub)))
    simulated.loc[:,
                  'A_pe'] = simulated.loc[:,
                                          'A_Outcome'] - simulated.loc[:,
                                                                       'A_True_response']
    simulated.loc[:,
                  'B_pe'] = simulated.loc[:,
                                          'B_Outcome'] - simulated.loc[:,
                                                                       'B_True_response']
    simulated.loc[:, 'abs_pe_RL_diff'] = np.abs(simulated.A_pe) - np.abs(
        simulated.B_pe)
    simulated.loc[:, 'abs_pe_RL_diff'] = np.roll(
        simulated.loc[:, 'abs_pe_RL_diff'], 1)
    simulated.loc[:,
                  'prob_estimate_RL_diff'] = simulated.A_True_response - simulated.B_True_response
    simulated.loc[:,
                  'model_prob_estimate_RL_diff'] = simulated.A_Response - simulated.B_Response
    simulated.loc[:, 'var_RL_diff'] = simulated.A_var - simulated.B_var
    simulated.loc[:,
                  'objective_prob_RL_diff'] = behavioural.A_shock_prob - behavioural.B_shock_prob

    # trial number correction
    simulated.trial_number += 1

    # Get bias
    bias = []
    # Fixation durations (total for each trial)
    l_durations = []
    r_durations = []
    # First fixation durations for each stimulus
    l_first_durations = []
    r_first_durations = []
    outside_durations = []
    # First fixation location
    first_fix_locations = []

    l_duration_total = 0
    r_duration_total = 0
    outside_duration_total = 0

    for e in range(1, 161):
        if e not in trial_numbers:
            bias.append(np.nan)
            l_durations.append(np.nan)
            r_durations.append(np.nan)
            l_first_durations.append(np.nan)
            r_first_durations.append(np.nan)
            outside_durations.append(np.nan)
            first_fix_locations.append(np.nan)
        else:
            fix_locs = np.array([
                get_aoi(fix[2], fix[3], left_aoi, right_aoi)
                for fix in preoutcome_fixations[e]
            ])
            fix_duration = np.array(
                [fix[1] - fix[0] for fix in preoutcome_fixations[e]])

            l_duration = (fix_duration[fix_locs < 0]).sum()
            l_duration_total += l_duration
            r_duration = (fix_duration[fix_locs > 0]).sum()
            r_duration_total += r_duration

            stim_fix_locs = [i for i in fix_locs if not np.isnan(i)]
            if not len(stim_fix_locs):
                first_fix_locations.append(np.nan)
            else:
                first_fix_locations.append(stim_fix_locs[0])

            # First fixations
            if len(fix_duration[fix_locs < 0]):
                l_first_duration = fix_duration[fix_locs < 0][0]
            else:
                l_first_duration = 0

            if len(fix_duration[fix_locs > 0]):
                r_first_duration = fix_duration[fix_locs > 0][0]
            else:
                r_first_duration = 0

            outside_duration = (fix_duration[np.isnan(fix_locs)]).sum()
            outside_duration_total += outside_duration
            if l_duration > 0 or r_duration > 0:
                b = l_duration / (l_duration + r_duration)
            else:
                b = np.nan
            bias.append(b)
            l_durations.append(l_duration)
            r_durations.append(r_duration)
            l_first_durations.append(l_first_duration)
            r_first_durations.append(r_first_duration)
            outside_durations.append(outside_duration)

    bias = np.array(bias)
    bias[np.isnan(bias)] = np.nanmean(bias)  # Impute nans

    bias_df = behavioural[['Subject', 'trial_number',
                           'Outcome_image_L']].copy()
    bias_df.loc[:, 'bias'] = bias
    bias_df.loc[:, 'l_duration'] = l_durations
    bias_df.loc[:, 'r_duration'] = r_durations
    bias_df.loc[:, 'l_first_duration'] = l_first_durations
    bias_df.loc[:, 'r_first_duration'] = r_first_durations
    bias_df.loc[:, 'first_fixation_location'] = first_fix_locations
    bias_df.loc[:, 'outside_duration'] = outside_durations
    bias_df.loc[:, 'l_prop'] = bias_df.loc[:, 'l_duration'] / (
        bias_df.loc[:, 'l_duration'] + bias_df.loc[:, 'r_duration'] +
        bias_df.loc[:, 'outside_duration'])
    bias_df.loc[:, 'r_prop'] = bias_df.loc[:, 'r_duration'] / (
        bias_df.loc[:, 'l_duration'] + bias_df.loc[:, 'r_duration'] +
        bias_df.loc[:, 'outside_duration'])
    bias_df.loc[:, 'l_prop'] = np.nanmean(bias_df.loc[:, 'l_prop'])
    bias_df.loc[:, 'r_prop'] = np.nanmean(bias_df.loc[:, 'r_prop'])

    bias_df.loc[:, 'preoutcome_duration_total'] = preoutcome_duration_total
    bias_df.loc[:, 'blink_duration_total'] = blink_duration_total
    bias_df.loc[:,
                'blink_proportion'] = blink_duration_total / preoutcome_duration_total
    bias_df.loc[:, 'l_duration_total'] = l_duration_total
    bias_df.loc[:, 'r_duration_total'] = r_duration_total
    bias_df.loc[:, 'outside_duration_total'] = outside_duration_total
    bias_df.loc[:, 'left_proportion'] = l_duration_total / (r_duration_total +
                                                            l_duration_total)
    bias_df.loc[:, 'right_proportion'] = r_duration_total / (r_duration_total +
                                                             l_duration_total)
    bias_df.loc[:, 'outside_proportion'] = outside_duration_total / (
        r_duration_total + l_duration_total + outside_duration_total)

    bias_df = pd.merge(bias_df,
                       simulated[[
                           'trial_number', 'prob_estimate_RL_diff',
                           'var_RL_diff', 'objective_prob_RL_diff', 'A_var',
                           'A_True_response', 'B_var', 'B_True_response',
                           'A_Outcome', 'B_Outcome', 'abs_pe_RL_diff', 'A_pe',
                           'B_pe', 'model_prob_estimate_RL_diff'
                       ]],
                       on='trial_number')

    bias_df2 = bias_df[[c for c in bias_df.columns if 'B_' not in c]].copy()
    bias_df3 = bias_df[[c for c in bias_df.columns if 'A_' not in c]].copy()
    bias_df2.loc[bias_df2.Outcome_image_L == 'B',
                 'l_prop'] = bias_df2['r_prop']
    bias_df3.loc[bias_df2.Outcome_image_L == 'A',
                 'l_prop'] = bias_df3['r_prop']

    bias_df2['stimulus'] = 0
    bias_df3['stimulus'] = 1

    bias_df2.columns = [c.replace('A_', '') for c in bias_df2.columns]
    bias_df3.columns = [c.replace('B_', '') for c in bias_df3.columns]

    duration_df = pd.concat([bias_df2, bias_df3])

    return bias_df, duration_df
コード例 #5
0
ファイル: _pupillometry.py プロジェクト: nordme/genz
def find_pupil_tone_impulse_response(ec,
                                     el,
                                     bgcolor,
                                     fcolor,
                                     prompt=True,
                                     verbose=None,
                                     targ_is_fm=True):
    """Find pupil impulse response using responses to tones

    Parameters
    ----------
    ec : instance of ExperimentController
        The experiment controller.
    el : instance of EyelinkController
        The Eyelink controller.
    bgcolor : color
        Background color to use.
    fcolor : color
        Fixation dot color to use.
    prompt : bool
        If True, a standard prompt message will be displayed.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see expyfun.verbose).
    targ_is_fm : bool
        If ``True`` then use frequency modulated tones as the target and
        constant frequency tones as the non-target stimuli. Otherwise use
        constant frequency tones are targets and fm tones as non-targets.

    Returns
    -------
    srf : array
        The pupil response function to sound.
    t : array
        The time points for the response function.
    std_err : array
        The standard error as a function of time.

    Notes
    -----
    If ``el.dummy_mode`` is on, the test will run at around 10x the speed.
    """
    _check_pyeparse()
    import pyeparse
    if el.recording:
        el.stop()

    #
    # Determine parameters / randomization
    #
    n_stimuli = 300 if not el.dummy_mode else 10
    cal_stim = [0, 75, 150, 225]  # when to offer the subject a break

    delay_range = (3.0, 5.0) if not el.dummy_mode else (0.3, 0.5)
    delay_range = np.array(delay_range)
    targ_prop = 0.25
    stim_dur = 100e-3
    f0 = 1000.  # Hz

    rng = np.random.RandomState(0)
    isis = np.linspace(*delay_range, num=n_stimuli)
    n_targs = int(targ_prop * n_stimuli)
    targs = np.zeros(n_stimuli, bool)
    targs[np.linspace(0, n_stimuli - 1, n_targs + 2)[1:-1].astype(int)] = True
    while (True):  # ensure we randomize but don't start with a target
        idx = rng.permutation(np.arange(n_stimuli))
        isis = isis[idx]
        targs = targs[idx]
        if not targs[0]:
            break

    #
    # Generate stimuli
    #
    fs = ec.stim_fs
    n_samp = int(fs * stim_dur)
    t = np.arange(n_samp).astype(float) / fs
    steady = np.sin(2 * np.pi * f0 * t)
    wobble = np.sin(
        np.cumsum(f0 + 100 * np.sin(2 * np.pi * (1 / stim_dur) * t)) / fs * 2 *
        np.pi)
    std_stim, dev_stim = (steady, wobble) if targ_is_fm else (wobble, steady)
    std_stim = window_edges(std_stim * ec._stim_rms * np.sqrt(2), fs)
    dev_stim = window_edges(dev_stim * ec._stim_rms * np.sqrt(2), fs)

    #
    # Subject "Training"
    #
    ec.stop()
    ec.set_background_color(bgcolor)
    targstr, tonestr = ('wobble', 'beep') if targ_is_fm else ('beep', 'wobble')
    instr = ('Remember to press the button as quickly as possible following '
             'each "{}" sound.\n\nPress the response button to '
             'continue.'.format(targstr))
    if prompt:
        notes = [('We will now determine the response of your pupil to sound '
                  'changes.\n\nYour job is to press the repsonse button '
                  'as quickly as possible when you hear a "{1}" instead '
                  'of a "{0}".\n\nPress a button to hear the "{0}".'
                  ''.format(tonestr, targstr)),
                 ('Now press a button to hear the "{}".'.format(targstr))]
        for text, stim in zip(notes, (std_stim, dev_stim)):
            ec.screen_prompt(text)
            ec.load_buffer(stim)
            ec.wait_secs(0.5)
            ec.play()
            ec.wait_secs(0.5)
            ec.stop()
        ec.screen_prompt(instr)

    fix = FixationDot(ec, colors=[fcolor, bgcolor])
    flip_times = list()
    presses = list()
    assert 0 in cal_stim
    for ii, (isi, targ) in enumerate(zip(isis, targs)):
        if ii in cal_stim:
            if ii != 0:
                el.stop()
                perc = round((100. * ii) / n_stimuli)
                ec.screen_prompt('Great work! You are {0}% done.\n\nFeel '
                                 'free to take a break, then press the '
                                 'button to continue.'.format(perc))
            el.calibrate()
            ec.screen_prompt(instr)
            # let's put the initial color up to allow the system to settle
            fix.draw()
            ec.flip()
            ec.wait_secs(10.0)  # let the pupil settle
        fix.draw()
        ec.load_buffer(dev_stim if targ else std_stim)
        ec.identify_trial(ec_id='TONE_{0}'.format(int(targ)),
                          el_id=[int(targ)],
                          ttl_id=[int(targ)])
        flip_times.append(ec.start_stimulus())
        presses.append(ec.wait_for_presses(isi))
        ec.stop()
        ec.trial_ok()
    el.stop()  # stop the recording
    ec.screen_prompt('Processing data, please wait...',
                     max_wait=0,
                     clear_after=False)

    flip_times = np.array(flip_times)
    tmin = -0.5
    if el.dummy_mode:
        pk = pyeparse.utils.pupil_kernel(el.fs, 3.0 - tmin)
        response = np.zeros(len(pk))
        offset = int(el.fs * 0.5)
        response[offset:] = pk[:-offset]
        std_err = np.ones_like(response) * 0.1 * response.max()
        std_err += np.random.rand(std_err.size) * 0.1 * response.max()
    else:
        raws = list()
        events = list()
        assert len(el.file_list) >= 4
        for fname in el.file_list[-4:]:
            raw, event = _load_raw(el, fname)
            raws.append(raw)
            events.append(event)
        assert sum(len(event) for event in events) == n_stimuli
        epochs = pyeparse.Epochs(raws,
                                 events,
                                 1,
                                 tmin=tmin,
                                 tmax=delay_range[0])
        response = epochs.pupil_zscores()
        assert response.shape[0] == n_stimuli
        std_err = np.std(response[~targs], axis=0)
        std_err /= np.sqrt(np.sum(~targs))
        response = np.mean(response[~targs], axis=0)
    t = np.arange(len(response)).astype(float) / el.fs + tmin
    return response, t, std_err