コード例 #1
0
def screen_to_space(coords):
    origin = screen_corners['upper left']

    right_vector = fh.to_unit(screen_corners['upper right'] - origin)
    down_vector = fh.to_unit(screen_corners['lower left'] - origin)

    width = np.linalg.norm(screen_corners['upper right'] - origin)
    height = np.linalg.norm(screen_corners['lower left'] - origin)

    return (coords / np.array([1920, 1080]) *
            np.array([width, height]) @ np.vstack(
                (right_vector, down_vector))) + origin
コード例 #2
0
def markers_to_ortho(*markers):

    if len(markers) == 1:
        marker1 = markers[0][..., 0:3]
        marker2 = markers[0][..., 3:6]
        marker3 = markers[0][..., 6:9]

    elif len(markers) == 3:
        marker1 = markers[0]
        marker2 = markers[1]
        marker3 = markers[2]

    else:
        raise Exception(
            'Input must be either three marker arrays with 3 coordinates or one array with 9.'
        )

    side1 = freehead.to_unit(marker2 - marker1)
    side2 = freehead.to_unit(marker3 - marker1)
    normal1 = freehead.to_unit(np.cross(
        side1, side2))  # should be unit length but let's be sure
    normal2 = freehead.to_unit(np.cross(side1, normal1))

    # these are all Nx3
    v1 = side1
    v2 = normal1
    v3 = normal2

    # single rotation matrix if there is only one rigidbody
    if len(v1.shape) == 1:
        V = np.empty((3, 3), np.float)

        V[:, 0] = v1
        V[:, 1] = v2
        V[:, 2] = v3

        return V

    else:
        length = v1.shape[0]

        # add the orthonormal basis vectors to the array of rotation matrices as column vectors
        V = np.empty((length, 3, 3), np.float)
        # V[marker, row, column]
        V[:, :, 0] = v1
        V[:, :, 1] = v2
        V[:, :, 2] = v3

        return V
コード例 #3
0
def normals_nonlinear_angular_transform(normals, *polynom_params):

    if normals.ndim == 1:
        normals = normals[None, :]

    if len(polynom_params) == 1:
        aa = polynom_params[0][0:2]
        bb = polynom_params[0][2:4]
        cc = polynom_params[0][4:6]

    elif len(polynom_params) == 3:
        aa = polynom_params[0]
        bb = polynom_params[1]
        cc = polynom_params[2]

    else:
        raise Exception('Polynomial parameters must either be three arrays (aa, bb, cc) or one array (aabbcc).')

        # uses x right, y forward, z up assumption
    elev_azim = np.arctan2(normals[:, [0, 2]], normals[:, 1, None])
    elev_azim_transformed = aa * (elev_azim ** 2) + bb * elev_azim + cc

    xz_transformed = np.tan(elev_azim_transformed)
    xyz = np.ones(normals.shape, dtype=np.float64)
    xyz[:, [0, 2]] = xz_transformed
    xyz_normalized = fh.to_unit(xyz).squeeze()

    return xyz_normalized
コード例 #4
0
 def is_eye_within_led_threshold(led, threshold):
     eye_to_led = fh.to_unit(self.rig_leds[led, :] - T_eye_world)
     gaze_normals_world = R_head_world @ fh.normals_nonlinear_angular_transform(
         self.R_eye_head @ gaze_normals, self.nonlinear_parameters)
     eye_to_led_azim_elev = np.rad2deg(
         np.abs(
             fh.to_azim_elev(gaze_normals_world) -
             fh.to_azim_elev(eye_to_led)))
     # threshold is only horizontal right now because of increased vertical angle noise and spikes
     is_within_threshold = eye_to_led_azim_elev[0] <= threshold
     return is_within_threshold
コード例 #5
0
 def precalculate_data(self):
     fh.array_apply(
         self.df,
         OrderedDict([
             ('target_led', lambda r: r['fixation_led'] + r['amplitude']),
             ('shifted_target_led', lambda r: r['target_led'] + r['shift']),
             # optotrak data interpolated onto pupil labs timestamps
             ('o_data_interp',
              lambda r: fh.interpolate_a_onto_b_time(r['o_data'][:, 3:15],
                                                     r['o_data'][:, 30],
                                                     r['p_data'][:, 2],
                                                     kind='linear')),
             # gaze data
             ('gaze_normals', lambda r: r['p_data'][:, 3:6]),
             # rotation of head rigidbody
             ('R_head_world',
              lambda r: r['helmet'].solve(r['o_data_interp'].reshape(
                  (-1, 4, 3)))[0]),
             # yaw pitch roll head rigidbody
             ('ypr_head_world',
              lambda r: fh.to_yawpitchroll(r['R_head_world'])),
             # reference positions of head rigidbody
             ('Ts_head_world',
              lambda r: r['helmet'].solve(r['o_data_interp'].reshape(
                  (-1, 4, 3)))[1]),
             # position of target led
             ('target_pos', lambda r: self.rig_leds[r['target_led'], :]),
             # vector from eye to target position
             ('eye_to_target', lambda r: fh.to_unit(r['target_pos'] - r[
                 'Ts_head_world'][:, 5, :])),
             # gaze vector in head without distortion correction
             ('gaze_head_distorted', lambda r:
              (r['R_eye_head'] @ r['gaze_normals'].T).T),
             # gaze vector in head with distortion correction
             ('gaze_head', lambda r: fh.normals_nonlinear_angular_transform(
                 r['gaze_head_distorted'], r['nonlinear_parameters'])),
             # gaze vector in world
             ('gaze_world', lambda r: np.einsum('tij,tj->ti', r[
                 'R_head_world'], r['gaze_head'])),
             # gaze angles in world
             ('gaze_ang_world',
              lambda r: np.rad2deg(fh.to_azim_elev(r['gaze_world']))),
             # angles from eye to target in world
             ('eye_ang_target',
              lambda r: np.rad2deg(fh.to_azim_elev(r['eye_to_target']))),
             # difference of eye to target angles and gaze in world
             ('d_ang_gaze_eye_target',
              lambda r: r['gaze_ang_world'] - r['eye_ang_target']),
             # time steps
         ]),
         add_inplace=True)
コード例 #6
0
    def err_func(T_eye_head):

        T_eye_world = np.einsum('tij,j->ti', R_head_world,
                                T_eye_head) + T_head_world
        eye_to_target = fh.to_unit(T_target_world - T_eye_world)

        gaze_normals_world = np.einsum(
            'tij,tj->ti', R_head_world,
            np.einsum('ij,tj->ti', R_eye_head, gaze_normals))

        angles = np.rad2deg(
            np.arccos(np.einsum('ti,ti->t', gaze_normals_world,
                                eye_to_target)))
        return np.sum(angles)
コード例 #7
0
def err_func_nonlinear_direct(ypr_aa_bb_cc):
    ypr = ypr_aa_bb_cc[0:3]
    aa = ypr_aa_bb_cc[None, 3:5]
    bb = ypr_aa_bb_cc[None, 5:7]
    cc = ypr_aa_bb_cc[None, 7:9]
    R = fh.from_yawpitchroll(ypr)
    gaze_rotated = (R @ gaze_normals.T).T
    gaze_transformed = gaze_rotated.copy()
    gaze_transformed[:,
                     [0, 2]] = aa * (gaze_transformed[:, [0, 2]]**
                                     2) + bb * gaze_transformed[:, [0, 2]] + cc
    gaze_transformed = fh.to_unit(gaze_transformed)
    azim_elev_nonlinear = np.rad2deg(
        np.arctan2(gaze_transformed[:, [0, 2]], gaze_transformed[:, 1, None]))
    return ((angles_from_eye - azim_elev_nonlinear)**2).sum()
コード例 #8
0
    def err_func(parameters):
        R_eye_head = fh.from_yawpitchroll(parameters[0:3])
        T_eye_head = parameters[3:6]

        T_eye_world = np.einsum('tij,j->ti', R_head_world,
                                T_eye_head) + T_head_world
        eye_to_target = fh.to_unit(T_target_world - T_eye_world)

        gaze_normals_world = np.einsum(
            'tij,tj->ti', R_head_world,
            np.einsum('ij,tj->ti', R_eye_head, gaze_normals))

        angles = np.rad2deg(
            np.arccos(np.einsum('ti,ti->t', gaze_normals_world,
                                eye_to_target)))
        return np.mean(angles)
コード例 #9
0
        def err_func(params):
            T_eye_head = ini_T_eye_head
            R_eye_head = fh.from_yawpitchroll(params[0:3])
            aa = params[3:5]
            bb = params[5:7]
            cc = params[7:9]

            T_eye_world = np.einsum('tij,j->ti', R_head_world,
                                    T_eye_head) + T_head_world
            eye_to_target = fh.to_unit(T_target_world - T_eye_world)

            gaze_normals_head = np.einsum('ij,tj->ti', R_eye_head,
                                          gaze_normals)
            gaze_normals_head_transformed = fh.normals_nonlinear_angular_transform(
                gaze_normals_head, aa, bb, cc)

            gaze_normals_world = np.einsum('tij,tj->ti', R_head_world,
                                           gaze_normals_head_transformed)

            angles = np.rad2deg(
                np.arccos(
                    np.einsum('ti,ti->t', gaze_normals_world, eye_to_target)))
            return angles.mean()
コード例 #10
0
def normals_nonlinear_transform(normals, *polynom_params):

    if normals.ndim == 1:
        normals = normals[None, :]

    if len(polynom_params) == 1:
        aa = polynom_params[0][0:2]
        bb = polynom_params[0][2:4]
        cc = polynom_params[0][4:6]

    elif len(polynom_params) == 3:
        aa = polynom_params[0]
        bb = polynom_params[1]
        cc = polynom_params[2]

    else:
        raise Exception('Polynomial parameters must either be three arrays (aa, bb, cc) or one array (aabbcc).')

    xyz = normals.copy()
    xyz[:, [0, 2]] = aa * (xyz[:, [0, 2]] ** 2) + bb * xyz[:, [0, 2]] + cc

    xyz_normalized = fh.to_unit(xyz).squeeze()

    return xyz_normalized
コード例 #11
0
    def create_helmet(self):
        head_measurement_points = [
            'head straight', 'nasion', 'inion', 'right eye'
        ]

        for i, measurement_point in enumerate(head_measurement_points):
            print('Press space to measure: ' + measurement_point)
            # light up an led to signal which measurement is going on
            signal_led = 255
            signal_length = 1
            while True:
                self.athread.write_uint8(
                    signal_led, 255, 255,
                    255)  # bright light to start and see something
                fh.wait_for_keypress(pygame.K_SPACE)
                current_sample = self.othread.current_sample.copy()
                helmet_leds = current_sample[HELMET].reshape((4, 3))

                if np.any(np.isnan(helmet_leds)):
                    print('Helmet LEDs not all visible. Try again.')
                    self.athread.write_uint8(signal_led, 255, 0,
                                             0)  # red light for failure
                    time.sleep(signal_length)
                    continue

                if i == I_BARY:
                    helmet = fh.Rigidbody(helmet_leds)
                    self.athread.write_uint8(signal_led, 0, 255,
                                             0)  # green light for success
                    time.sleep(signal_length)
                    break
                else:
                    _, probe_tip = fh.FourMarkerProbe().solve(
                        current_sample[PROBE].reshape((4, 3)))
                    if np.any(np.isnan(probe_tip)):
                        print('Probe not visible. Try again.')
                        self.athread.write_uint8(signal_led, 255, 0,
                                                 0)  # red light for failure
                        time.sleep(signal_length)
                        continue
                    helmet.add_reference_points(helmet_leds, probe_tip)

                    # replace eye measurement
                    if i == I_EYE:
                        nasion_to_inion = fh.to_unit(
                            helmet.ref_points[I_INION, :] -
                            helmet.ref_points[I_NASION, :])
                        # estimate eye at 15 mm inwards from probe in nasion inion direction
                        estimated_eye_position = helmet.ref_points[
                            I_EYE, :] + 15 * nasion_to_inion
                        # replace measured value with estimation
                        helmet.ref_points[I_EYE, :] = estimated_eye_position

                    self.athread.write_uint8(signal_led, 0, 255,
                                             0)  # green light for success
                    time.sleep(signal_length)
                    break

        self.helmet = helmet
        print('Helmet creation done.')
        self.athread.write_uint8(255, 0, 0, 0)  # turn off leds
コード例 #12
0
            print('NaN values in pupil data, repeat')
            continue
        break
    R_head_world[i, :, :] = rotation
    T_head_world[i, :] = ref_points[0, :]
    gaze_normals[i, :] = gaze_normal

athread.write_uint8(255, 0, 0, 0)
#%%
calibration_result = fh.calibrate_pupil(
    T_head_world,
    R_head_world,
    gaze_normals,
    T_target_world,
    ini_T_eye_head=(helmet.ref_points[5, :] - helmet.ref_points[0, :]) +
    15 * fh.to_unit(helmet.ref_points[2, :] - helmet.ref_points[1, :]
                    ),  # add 15 mm of nasion to inion to measured eye position
    bounds_mm=50)

R_eye_head = fh.from_yawpitchroll(calibration_result.x[0:3])
T_eye_head = calibration_result.x[3:6]

#%%
helmet.ref_points[5, :] = T_eye_head + helmet.ref_points[0, :]

# %%

settings = {
    'trials_per_shift': 2,
    'shift_magnitudes': np.arange(-5, 6) * 4,
    'default_fixation_led_index': 50,
    'default_target_led_index': 200,
コード例 #13
0
            break

start_time = time.monotonic()
duration = 60
last_i = None
while time.monotonic() - start_time < duration:
    current_i = othread.i_current_sample
    if current_i == last_i:
        time.sleep(0)
        continue
    last_i = current_i
    current_sample = othread.current_sample.copy()
    rotation, ref_points = helmet.solve(extract_helmet(current_sample))
    if np.any(np.isnan(rotation)):
        continue
    inion_to_nasion = fh.to_unit(ref_points[1, :] - ref_points[2, :]).squeeze()
    nasion_to_leds = fh.to_unit(rig_led_positions - ref_points[1, :])
    angles = np.rad2deg(
        np.arccos(np.einsum('li,i->l', nasion_to_leds, inion_to_nasion)))
    closest_led = int(np.argmin(angles))
    if angles.min() > 5:
        closest_led = 255
    arduino.write(struct.pack('>B', closest_led))

othread.should_stop.set()
othread.join()

pygame.quit()

arduino.write(struct.pack('>B', 255))
arduino.close()
コード例 #14
0
def apply_analysis_pipeline_for_all_trials(df: pd.DataFrame):

    warnings.filterwarnings('ignore', category=np.RankWarning)
    
    df.rename(columns={'shift_percent_approx': 'shift_percent'}, inplace=True)

    fh.array_apply(
        df,
        OrderedDict([
            # chosen so that to target direction is positive (right to left is positive angle in mathematics)
            ('direction_sign', lambda r: -1 if r['left_to_right'] else +1),
            ('fixation_led', lambda r: r['fixation_led'] if r['left_to_right'] else 254 - r['fixation_led']),
            (('df', 'target_led'), lambda df: df['fixation_led'] - df['direction_sign'] * df['amplitude']),
            (('df', 'starget_led'), lambda df: df['target_led'] - df['direction_sign'] * df['shift']),
            ('is_outward_response', lambda r: r['response'] == ('right' if r['left_to_right'] else 'left')),
            ('response_ward', lambda r: 'outward' if r['is_outward_response'] else 'inward'),
            ('correct_response', lambda r: None if r['shift'] == 0 else 'right' if (r['shift'] > 0) == r['left_to_right'] else 'left'),
            ('is_correct', lambda r: None if r['correct_response'] is None else r['correct_response'] == r['response']),
            ('shift_percent_uni', lambda r: r['shift_percent'] if r['left_to_right'] else -r['shift_percent']),
            # new time index for upsampling
            ('t_sacc', lambda r: np.arange(-400, 801, 5)),
            # pupil data in around saccade interval upsampled
            ('p_data_upsampled', lambda r: fh.interpolate_a_onto_b_time(r['p_data'][:, 2:5],
                                                                1000 * (r['p_data'][:, 0] - r['t_saccade_started']),
                                                                r['t_sacc'], kind='linear')),
            # optotrak data upsampled
            ('o_data_upsampled', lambda r: fh.interpolate_a_onto_b_time(r['o_data'][:, 3:15],
                                                                  1000 * (r['o_data'][:, 30] - r['t_saccade_started']),
                                                                  r['t_sacc'], kind='linear')),
            # latency of pupil signal
            ('pupil_latency', lambda r: fh.interpolate_a_onto_b_time(r['p_data'][:, 1] - r['p_data'][:, 0], 1000 * (
                        r['p_data'][:, 0] - r['t_saccade_started']), r['t_sacc'], kind='linear')),
            # rotation of head rigidbody
            ('R_head_world', lambda r: r['helmet'].solve(r['o_data_upsampled'].reshape((-1, 4, 3)))[0]),
            # yaw pitch roll head rigidbody
            ('ypr_head_world', lambda r: fh.to_yawpitchroll(r['R_head_world'])),
            # reference positions of head rigidbody
            ('Ts_head_world', lambda r: r['helmet'].solve(r['o_data_upsampled'].reshape((-1, 4, 3)))[1]),
            # position of fixation led
            ('fixation_pos', lambda r: r['rig'][r['fixation_led'], :]),
            # position of target led
            ('target_pos', lambda r: r['rig'][r['target_led'], :]),
            # position of shifted target led
            ('starget_pos', lambda r: r['rig'][r['starget_led'], :]),
            # vector from eye to target position
            ('eye_to_fixation', lambda r: fh.to_unit(r['fixation_pos'] - r['Ts_head_world'][:, 3, :])),
            # vector from eye to target position
            ('eye_to_target', lambda r: fh.to_unit(r['target_pos'] - r['Ts_head_world'][:, 3, :])),
            # vector from eye to shifted target position
            ('eye_to_starget', lambda r: fh.to_unit(r['starget_pos'] - r['Ts_head_world'][:, 3, :])),
            # gaze vector in head without distortion correction
            ('gaze_in_head_distorted', lambda r: (r['R_eye_head'] @ r['p_data_upsampled'].T).T),
            # gaze vector in head with distortion correction
            ('gaze_in_head',
             lambda r: fh.normals_nonlinear_angular_transform(r['gaze_in_head_distorted'], r['nonlinear_parameters'])),
            # gaze angles in head
            ('gaze_in_head_ang', lambda r: np.rad2deg(fh.to_azim_elev(r['gaze_in_head']))),
            # gaze vector in world
            ('gaze_in_world', lambda r: np.einsum('tij,tj->ti', r['R_head_world'], r['gaze_in_head'])),
            # gaze angles in world
            ('gaze_in_world_ang', lambda r: np.rad2deg(fh.to_azim_elev(r['gaze_in_world']))),
            # angles from eye to target in world
            ('eye_to_target_ang', lambda r: np.rad2deg(fh.to_azim_elev(r['eye_to_target']))),
            # difference of eye to target angles and gaze in world
            ('gaze_angle_vs_target', lambda r: r['gaze_in_world_ang'] - r['eye_to_target_ang']),
            # angles from eye to shifted target in world
            ('eye_to_starget_ang', lambda r: np.rad2deg(fh.to_azim_elev(r['eye_to_starget']))),
            # difference of eye to shifted target angles and gaze in world
            ('gaze_angle_vs_starget', lambda r: r['gaze_in_world_ang'] - r['eye_to_starget_ang']),
            # angles from eye to fixation in world
            ('eye_to_fixation_ang', lambda r: np.rad2deg(fh.to_azim_elev(r['eye_to_fixation']))),
            # difference of eye to fixation angles and gaze in world
            ('gaze_angle_vs_fixation', lambda r: r['gaze_in_world_ang'] - r['eye_to_fixation_ang']),
            # time steps
            ('dt', lambda r: fh.padded_diff(r['t_sacc'])),
            # velocity of difference of eye to target angles and gaze in world
            ('gaze_angvel_vs_target', lambda r: fh.padded_diff(r['gaze_angle_vs_target']) / r['dt'][:, None]),
            ('gaze_angvel_vs_target_savgol',
                lambda r: prepend_nan(
                    savgol_filter(r['gaze_angvel_vs_target'][1:, ...], 3, 1, axis=0),
                    axis=0)),
            # saccade detection engbert & mergenthaler
            ('eng_merg', lambda r: fh.sacc_dec_engb_merg_horizontal(r['gaze_angle_vs_target'][:, 0],
                                                                    r['gaze_angvel_vs_target_savgol'][:, 0], 6, 5)),
        ]),
        add_inplace=True,
        print_log=True
    )

    df.drop(
        columns=[
            'p_data',
            'o_data',
            # 'helmet',
            'o_data_upsampled',
            'p_data_upsampled',
            'gaze_in_head_distorted',
        ],
        inplace=True
    )
コード例 #15
0
            dt_fixation = 0
            continue
        R_head_world, ref_points = helmet.solve(
            extract_helmet(othread.current_sample.copy()))
        if np.any(np.isnan(R_head_world)):
            if phase != fix_phase:
                athread.write_uint8(led_fixation, *should_fix_color)
                phase = fix_phase
            was_fixating_eye = False
            t_started_fixating = False
            dt_fixation = 0
            continue
        T_eye_world = ref_points[5, :]

        if phase == fix_phase:
            eye_to_fixpoint = fh.to_unit(rig_led_positions[led_fixation, :] -
                                         T_eye_world)
            gaze_normals_world = R_head_world @ R_eye_head @ gaze_normals
            eye_to_fixpoint_angle = np.rad2deg(
                np.arccos(eye_to_fixpoint @ gaze_normals_world))
            is_fixating_eye = eye_to_fixpoint_angle <= fix_threshold_eye

            if not is_fixating_eye and was_fixating_eye:
                athread.write_uint8(led_fixation, *should_fix_color)
                was_fixating_eye = False

            if is_fixating_eye and not was_fixating_eye:
                athread.write_uint8(led_fixation, *is_fix_color)
                was_fixating_eye = True
                t_started_fixating = time.monotonic()
            if is_fixating_eye and was_fixating_eye:
                if dt_fixation < fixation_duration:
コード例 #16
0
def apply_analysis_pipeline_for_valid_trials(df: pd.DataFrame):

    df.drop(
        df[df.apply(lambda r: r['eng_merg'] is None, axis=1)].index,
        inplace=True
    )

    def i_shift_done_rel(r):
        difference = r['t_led_shift_done'] - r['t_saccade_started']
        in_ms = 1000 * difference
        distances_to_available_timestamps = in_ms - r['t_sacc']
        i_closest_timestamp = np.argmin(np.abs(distances_to_available_timestamps))
        return i_closest_timestamp

    fh.array_apply(
        df,
        OrderedDict([
            # index of fastest saccade
            ('i_max_amp_sacc', lambda r: np.argmax(np.abs(r['eng_merg'][3]))),
            ('max_sacc_amp', lambda r: r['eng_merg'][3][r['i_max_amp_sacc']]),
            (('df', 'max_sacc_amp_uni'), lambda df: df['direction_sign'] * df['max_sacc_amp']),
            ('i_start_max_amp_sacc', lambda r: r['eng_merg'][0][r['i_max_amp_sacc']][0]),
            ('i_end_max_amp_sacc', lambda r: r['eng_merg'][0][r['i_max_amp_sacc']][1]),
            ('start_max_amp_sacc', lambda r: r['t_sacc'][r['i_start_max_amp_sacc']]),
            ('end_max_amp_sacc', lambda r: r['t_sacc'][r['i_end_max_amp_sacc']]),
            # real amplitude dependent on eye position
            ('real_amplitude',
                lambda r: r['eye_to_target_ang'][r['i_start_max_amp_sacc'] - 5, 0]  # 5 samples before saccade onset
                          - r['eye_to_fixation_ang'][r['i_start_max_amp_sacc'] - 5, 0]),
            (('df', 'real_amplitude_uni'), lambda df: df['real_amplitude'] * df['direction_sign']),
            # detected saccade related timestamps
            (('df', 't_det_sacc'), lambda df: df['t_sacc'] - df['start_max_amp_sacc']),

            # relative led shift
            ('i_shift_done_rel', i_shift_done_rel),

            # amplitude of saccade with highest peak velocity
            ('vpeak_max_amp_sacc', lambda r: r['eng_merg'][1][r['i_max_amp_sacc']]),
            # duration of fastest saccade
            (('df','dur_max_amp_sacc'), lambda df: df['end_max_amp_sacc'] - df['start_max_amp_sacc']),
            # landing points of main saccades
            ('land_pos_sacc_hor', lambda r: r['gaze_angle_vs_target'][r['i_end_max_amp_sacc'], 0]),
            ('land_pos_sacc_ver', lambda r: r['gaze_angle_vs_target'][r['i_end_max_amp_sacc'], 1]),
            # ratio max amplitude real amplitude
            (('df', 'ratio_max_real_amp'), lambda df: df['max_sacc_amp'] / df['real_amplitude']),
            # landing error
            (('df', 'landing_error'), lambda df: df['ratio_max_real_amp'] - 1),
            # abs landing error
            (('df', 'landing_err_abs'), lambda df: df['landing_error'].abs()),
            # head contribution
            ('head_contrib_sacc',
                lambda r: r['ypr_head_world'][r['i_end_max_amp_sacc'], 0] - r['ypr_head_world'][r['i_start_max_amp_sacc'], 0]),
            # head contribution unidirectional
            (('df', 'head_contrib_sacc_uni'), lambda df: df['head_contrib_sacc'] * df['direction_sign']),
            # relative head contribution
            (('df', 'rel_head_contrib_sacc'), lambda df: df['head_contrib_sacc'] / df['max_sacc_amp']),
            # head contribution
            ('head_contrib_shift_done',
                lambda r: r['ypr_head_world'][r['i_shift_done_rel'], 0] - r['ypr_head_world'][r['i_start_max_amp_sacc'], 0]),
            # relative head contribution
            ('rel_head_contrib_shift_done', lambda r: r['head_contrib_shift_done'] / r['max_sacc_amp']),
            # max head velocity
            ('max_head_velocity',
                lambda r: np.nanmax(np.abs(np.diff(r['ypr_head_world'][:, 0]) / np.diff(r['t_det_sacc'])))),
            # was the max amp saccade detected after the recorded trigger? then it should be dismissed
            ('saccade_after_threshold', lambda r: r['start_max_amp_sacc'] > 0),
            # did the led change happen before the saccade was over? if not, it should be dismissed
            ('led_change_before_saccade_end',
                lambda r: r['end_max_amp_sacc'] >= (
                    (r['t_target_turned_off'] if r['blanking_duration'] > 0 else r['t_led_shift_done'])
                        - r['t_saccade_started'])),
            # max amp saccade ratio criterion
            ('max_amp_saccade_length_valid', lambda r: 1.25 > r['ratio_max_real_amp'] > 0.75),
            # do all criteria apply?
            ('valid_trials', lambda r: (not r['saccade_after_threshold']) and r['led_change_before_saccade_end'] and r[
                'max_amp_saccade_length_valid']),
            # direction vectors from inion to nasion in world
            ('inion_nasion_world', lambda r: fh.to_unit(r['Ts_head_world'][:, 1, :] - r['Ts_head_world'][:, 2, :])),
            # angles for inion to nasion
            ('inion_nasion_ang', lambda r: np.rad2deg(fh.to_azim_elev(r['inion_nasion_world']))),
            # difference of inion->nasion and eye->target vector angles
            ('d_ininas_eye_target_ang', lambda r: r['inion_nasion_ang'] - r['eye_to_target_ang']),
            # difference of inion->nasion and eye->fixation vector angles
            ('d_ininas_eye_fix_ang', lambda r: r['inion_nasion_ang'] - r['eye_to_fixation_ang']),
            # difference of inion->nasion and gaze vector angles
            ('d_gaze_ininas_ang', lambda r: r['inion_nasion_ang'] - r['gaze_in_world_ang']),
            # inion nasion angle relative to head for centering the in head angles
            ('ininas_ref_ang',
             lambda r: np.rad2deg(fh.to_azim_elev(r['helmet'].ref_points[1, :] - r['helmet'].ref_points[2, :]))),
            # centered gaze ang head
            ('gaze_ang_head_centered', lambda r: r['gaze_in_head_ang'] - r['ininas_ref_ang'])
        ]),
        add_inplace=True,
        print_log=True
    )
コード例 #17
0
    return ((angles_from_eye - azim_elev_nonlinear)**2).sum()


optim_result_nonlinear_2 = minimize(err_func_nonlinear_direct, np.zeros(9))

R_eye_nonlinear_2 = fh.from_yawpitchroll(optim_result_nonlinear_2.x)
aa_2 = optim_result_nonlinear_2.x[3:5]
bb_2 = optim_result_nonlinear_2.x[5:7]
cc_2 = optim_result_nonlinear_2.x[7:9]

gaze_corrected_nonlinear_2 = (R_eye_nonlinear_2 @ gaze_normals.T).T
gaze_transformed_2 = gaze_corrected_nonlinear_2.copy()
gaze_transformed_2[:, [0, 2]] = aa_2 * (
    gaze_transformed_2[:, [0, 2]]**
    2) + bb_2 * gaze_transformed_2[:, [0, 2]] + cc_2
gaze_transformed_2 = fh.to_unit(gaze_transformed_2)
azim_elev_nonlinear_2 = np.rad2deg(
    np.arctan2(gaze_transformed_2[:, [0, 2]], gaze_transformed_2[:, 1, None]))

#%%

plt.figure()
plt.scatter(angles_from_eye[:, 0],
            angles_from_eye[:, 1],
            c=np.arange(angles_from_eye.shape[0]),
            cmap='winter',
            label='real')
plt.scatter(azim_elev[:, 0],
            azim_elev[:, 1],
            marker='x',
            c=np.arange(azim_elev.shape[0]),