예제 #1
0
 def err_func(parameters):
     ypr = parameters[0:3]
     T_leds = parameters[3:6]
     R_leds = freehead.from_yawpitchroll(ypr, in_degrees=in_degrees)
     # apply inverse transform
     calculated_positions = np.einsum('ij,tj->ti', np.linalg.pinv(R_leds),
                                      probe_tips - T_leds)
     position_error = np.sum((calculated_positions - probed_leds)**2)
     return position_error
예제 #2
0
def err_func_nonlinear(ypr_aa_bb_cc):
    ypr = ypr_aa_bb_cc[0:3]
    R = fh.from_yawpitchroll(ypr)
    gaze_rotated = (R @ gaze_normals.T).T
    azim_elev = np.rad2deg(
        np.arctan2(gaze_rotated[:, [0, 2]], gaze_rotated[:, 1, None]))
    aa = ypr_aa_bb_cc[None, 3:5]
    bb = ypr_aa_bb_cc[None, 5:7]
    cc = ypr_aa_bb_cc[None, 7:9]
    azim_elev_nonlinear = aa * (azim_elev**2) + bb * azim_elev + cc
    return ((angles_from_eye - azim_elev_nonlinear)**2).sum()
예제 #3
0
    def err_func(ypr):
        R_eye_head = fh.from_yawpitchroll(ypr)

        T_eye_world = np.einsum('tij,j->ti', R_head_world,
                                T_eye_head) + T_head_world
        eye_to_target = fh.to_unit(T_target_world - T_eye_world)

        gaze_normals_world = np.einsum(
            'tij,tj->ti', R_head_world,
            np.einsum('ij,tj->ti', R_eye_head, gaze_normals))

        angles = np.rad2deg(
            np.arccos(np.einsum('ti,ti->t', gaze_normals_world,
                                eye_to_target)))
        return np.sum(angles)
예제 #4
0
def err_func_nonlinear_direct(ypr_aa_bb_cc):
    ypr = ypr_aa_bb_cc[0:3]
    aa = ypr_aa_bb_cc[None, 3:5]
    bb = ypr_aa_bb_cc[None, 5:7]
    cc = ypr_aa_bb_cc[None, 7:9]
    R = fh.from_yawpitchroll(ypr)
    gaze_rotated = (R @ gaze_normals.T).T
    gaze_transformed = gaze_rotated.copy()
    gaze_transformed[:,
                     [0, 2]] = aa * (gaze_transformed[:, [0, 2]]**
                                     2) + bb * gaze_transformed[:, [0, 2]] + cc
    gaze_transformed = fh.to_unit(gaze_transformed)
    azim_elev_nonlinear = np.rad2deg(
        np.arctan2(gaze_transformed[:, [0, 2]], gaze_transformed[:, 1, None]))
    return ((angles_from_eye - azim_elev_nonlinear)**2).sum()
예제 #5
0
        def err_func(params):
            T_eye_head = ini_T_eye_head
            R_eye_head = fh.from_yawpitchroll(params[0:3])
            aa = params[3:5]
            bb = params[5:7]
            cc = params[7:9]

            T_eye_world = np.einsum('tij,j->ti', R_head_world,
                                    T_eye_head) + T_head_world
            eye_to_target = fh.to_unit(T_target_world - T_eye_world)

            gaze_normals_head = np.einsum('ij,tj->ti', R_eye_head,
                                          gaze_normals)
            gaze_normals_head_transformed = fh.normals_nonlinear_angular_transform(
                gaze_normals_head, aa, bb, cc)

            gaze_normals_world = np.einsum('tij,tj->ti', R_head_world,
                                           gaze_normals_head_transformed)

            angles = np.rad2deg(
                np.arccos(
                    np.einsum('ti,ti->t', gaze_normals_world, eye_to_target)))
            return angles.mean()
예제 #6
0
    def calibrate(self):

        calibration_point = 127

        self.athread.write_uint8(calibration_point, 128, 0, 0)
        print('Press space to reset eye calibration.')
        fh.wait_for_keypress(pygame.K_SPACE)
        self.pthread.reset_3d_eye_model()
        self.athread.write_uint8(calibration_point, 0, 0, 128)

        time.sleep(self.after_reset_wait)

        while True:

            self.athread.write_uint8(calibration_point, 128, 0, 0)

            print('Calibration pending. Press space to start.')
            fh.wait_for_keypress(pygame.K_SPACE)

            self.athread.write_uint8(calibration_point, 255, 0, 0)

            self.othread.reset_data_buffer()
            self.pthread.reset_data_buffer()

            print('\nCalibration starting.\n')

            start_time = time.monotonic()
            while time.monotonic() - start_time < self.calib_duration:
                time.sleep(0.1)

            self.athread.write_uint8(255, 0, 0, 0)

            print('\nCalibration over. Optimizing parameters...\n')

            odata = self.othread.get_shortened_data().copy()
            pdata = self.pthread.get_shortened_data().copy()
            gaze_normals = pdata[:, NORMALS]

            f_interpolate = interp1d(odata[:, OTIME],
                                     odata[:, HELMET],
                                     axis=0,
                                     bounds_error=False,
                                     fill_value=np.nan)
            odata_interpolated = f_interpolate(pdata[:, PTIME]).reshape(
                (-1, 4, 3))

            R_head_world, ref_points = self.helmet.solve(odata_interpolated)
            T_head_world = ref_points[:, I_BARY, :]

            confidence_enough = pdata[:, CONFIDENCE] > 0.3
            rotations_valid = ~np.any(np.isnan(R_head_world).reshape((-1, 9)),
                                      axis=1)
            chosen_mask = confidence_enough & rotations_valid

            T_target_world = np.tile(self.rig_leds[calibration_point, :],
                                     (chosen_mask.sum(), 1))

            ini_T_eye_head = self.helmet.ref_points[
                I_EYE, :] - self.helmet.ref_points[I_BARY, :]

            calibration_result = fh.calibrate_pupil_nonlinear(
                T_head_world[chosen_mask, ...],
                R_head_world[chosen_mask, ...],
                gaze_normals[chosen_mask, ...],
                T_target_world,
                ini_T_eye_head=ini_T_eye_head,
                leave_T_eye_head=True)

            print('Optimization done.\n')
            print('Error: ', calibration_result.fun, '\n')
            print('Parameters: ', calibration_result.x, '\n')

            # signal quality of calibration via leds
            if calibration_result.fun <= 0.7:
                self.athread.write_uint8(127, 0, 255, 0)  # green
            elif 0.7 < calibration_result.fun <= 1:
                self.athread.write_uint8(127, 255, 128, 0)  # yellow
            else:
                self.athread.write_uint8(127, 255, 0, 0)  # red

            print('Accept calibration? Yes: Space, No: Escape')
            key = fh.wait_for_keypress(pygame.K_SPACE, pygame.K_ESCAPE)

            if key == pygame.K_SPACE:
                self.R_eye_head = fh.from_yawpitchroll(
                    calibration_result.x[0:3])
                self.nonlinear_parameters = calibration_result.x[3:9]
                # self.helmet.ref_points[5, :] = self.helmet.ref_points[0, :] + calibration_result.x[9:12]
                break

            elif key == pygame.K_ESCAPE:
                continue
        self.athread.write_uint8(255, 0, 0, 0)  # leds off
예제 #7
0
        print('Press space to record led with index', led_rig_index)
        while True:
            fh.wait_for_keypress(pygame.K_SPACE)
            current_sample = othread.current_sample.copy()
            rotation, tip = probe.solve(extract_probe(current_sample))
            if np.any(np.isnan(tip)):
                print('Probe not visible, try again.')
                continue
            probe_tips[i, :] = tip
            break
    arduino.write(struct.pack('>B', 255))

    led_rig_transform_result = fh.get_rig_transform(probe_tips,
                                                    led_rig_indices)

    R_rig = fh.from_yawpitchroll(led_rig_transform_result.x[0:3])
    T_rig = led_rig_transform_result.x[3:6]

    rig_led_positions = np.einsum('ij,tj->ti', R_rig, fh.LED_POSITIONS) + T_rig
else:
    print(
        'rig_led_positions calibration already present. Delete if you want to recalibrate.'
    )

head_measurement_points = [
    'head straight', 'nasion', 'inion', 'right ear', 'left ear'
]

for i, measurement_point in enumerate(head_measurement_points):
    print('Press space to measure: ' + measurement_point)
    while True:
    def __init__(self, exp_df_path, trial_df_path, rig_leds_path):

        exp_df = pd.read_pickle(exp_df_path)
        trial_df = pd.read_pickle(trial_df_path)
        self.df = exp_df.join(trial_df.drop('block', axis=1),
                              on='trial_number')
        self.rig_leds = np.load(rig_leds_path)

        self.precalculate_data()

        verts, faces, normals, nothin = vispy.io.read_mesh(
            os.path.join(fh.PACKAGE_DIR, '../datafiles', 'head.obj'))
        verts = np.einsum('ni,ji->nj', (verts - verts.mean(axis=0)),
                          fh.from_yawpitchroll(180, 90, 0))
        #verts = verts - verts.mean(axis=0)

        # add SceneCanvas first to create QApplication object before widget
        self.vispy_canvas = vispy.scene.SceneCanvas(create_native=True,
                                                    vsync=True,
                                                    show=True,
                                                    bgcolor=(0.2, 0.2, 0.2, 0))

        super(ExperimentVisualization, self).__init__()

        #self.setAttribute(Qt.WA_TranslucentBackground)

        self.timer = vispy.app.Timer(1 / 30,
                                     start=False,
                                     connect=self.advance_frame)

        self.n_trials = len(self.df)
        self.current_trial = 0
        self.i_frame = 0
        self.current_row = self.df.iloc[self.current_trial]
        self.current_R_helmet = None
        self.current_gaze_normals = None
        self.current_ref_points = None

        self.vispy_view = self.vispy_canvas.central_widget.add_view()
        self.vispy_view.camera = 'turntable'
        self.vispy_view.camera.center = self.rig_leds[127, :] + (
            self.rig_leds[0, :] - self.rig_leds[127, :]) + (
                self.rig_leds[254, :] - self.rig_leds[127, :])
        self.vispy_view.camera.fov = 40
        self.vispy_view.camera.distance = 1500

        self.main_layout = QtWidgets.QVBoxLayout()
        self.setLayout(self.main_layout)

        self.frame_slider = QtWidgets.QSlider(Qt.Horizontal)
        self.frame_slider.setMinimum(0)
        self.frame_slider.valueChanged.connect(self.on_slider_change)

        self.trial_picker = QtWidgets.QSpinBox()
        self.trial_picker.setMaximum(self.n_trials)
        self.trial_picker.valueChanged.connect(self.on_picker_change)

        self.trial_changed.connect(self.load_trial)
        self.frame_changed.connect(self.load_frame)

        self.picker_slider_layout = QtWidgets.QHBoxLayout()

        self.main_layout.addWidget(self.vispy_canvas.native)
        self.main_layout.addLayout(self.picker_slider_layout)

        self.animation_button = QtWidgets.QPushButton('Start Animation')
        self.picker_slider_layout.addWidget(self.animation_button)
        self.animation_button.clicked.connect(self.toggle_animation)

        self.frame_label = QtWidgets.QLabel('Frame')
        self.picker_slider_layout.addWidget(self.frame_label)
        self.picker_slider_layout.addWidget(self.frame_slider)
        self.picker_slider_layout.addWidget(QtWidgets.QLabel('Trial'))
        self.picker_slider_layout.addWidget(self.trial_picker)

        self.rig_vis = visuals.Markers()
        self.rig_vis.set_gl_state(depth_test=False)
        self.rig_vis.antialias = 0
        self.vispy_view.add(self.rig_vis)

        self.helmet_vis = visuals.Markers()
        self.vispy_view.add(self.helmet_vis)

        self.gaze_vis = visuals.Line()
        self.vispy_view.add(self.gaze_vis)

        self.head_mesh = visuals.Mesh(vertices=verts,
                                      shading='smooth',
                                      faces=faces,
                                      mode='triangles',
                                      color=(0.5, 0.55, 0.7))
        self.head_mesh.shininess = 0
        self.head_mesh.light_dir = [0, 1, 1]
        # self.head_mesh.light_color = np.array((1, 1, 0.95)) * 0.8
        self.head_mesh.ambient_light_color = np.array((0.98, 0.98, 1)) * 0.2
        self.head_mesh.attach(Alpha(0.3))
        self.head_mesh.set_gl_state(depth_test=True, cull_face=True)
        self.head_mesh_transform = MatrixTransform()
        self.head_mesh.transform = self.head_mesh_transform
        self.vispy_view.add(self.head_mesh)

        self.trial_changed.emit(0)
        self.frame_changed.emit(0)

        self.show()
        vispy.app.run()
예제 #9
0
        print('Press space to record led with index', led_rig_index)
        while True:
            fh.wait_for_keypress(pygame.K_SPACE)
            current_sample = othread.current_sample.copy()
            rotation, tip = probe.solve(extract_probe(current_sample))
            if np.any(np.isnan(tip)):
                print('Probe not visible, try again.')
                continue
            probe_tips[i, :] = tip
            break
    athread.write_uint8(255, 0, 0, 0)

    led_rig_transform_result = fh.get_rig_transform(probe_tips,
                                                    led_rig_indices)

    R_rig = fh.from_yawpitchroll(led_rig_transform_result.x[0:3])
    T_rig = led_rig_transform_result.x[3:6]

    rig_led_positions = np.einsum('ij,tj->ti', R_rig, fh.LED_POSITIONS) + T_rig
else:
    print(
        'rig_led_positions calibration already present. Delete if you want to recalibrate.'
    )

head_measurement_points = [
    'head straight', 'nasion', 'inion', 'right ear', 'left ear', 'right eye'
]

for i, measurement_point in enumerate(head_measurement_points):
    print('Press space to measure: ' + measurement_point)
    while True:
예제 #10
0
    T_head_world[i, :] = ref_points[0, :]
    gaze_normals[i, :] = gaze_normal

athread.write_uint8(255, 0, 0, 0)
#%%
calibration_result = fh.calibrate_pupil(
    T_head_world,
    R_head_world,
    gaze_normals,
    T_target_world,
    ini_T_eye_head=(helmet.ref_points[5, :] - helmet.ref_points[0, :]) +
    15 * fh.to_unit(helmet.ref_points[2, :] - helmet.ref_points[1, :]
                    ),  # add 15 mm of nasion to inion to measured eye position
    bounds_mm=50)

R_eye_head = fh.from_yawpitchroll(calibration_result.x[0:3])
T_eye_head = calibration_result.x[3:6]

#%%
helmet.ref_points[5, :] = T_eye_head + helmet.ref_points[0, :]

# %%

settings = {
    'trials_per_shift': 2,
    'shift_magnitudes': np.arange(-5, 6) * 4,
    'default_fixation_led_index': 50,
    'default_target_led_index': 200,
    'max_random_led_offset': 10,
    'before_fixation_color': (1, 0, 0),
    'during_fixation_color': (0, 1, 0),
예제 #11
0
def err_func(ypr):
    R = fh.from_yawpitchroll(ypr)
    gaze_rotated = (R @ gaze_normals.T).T
    azim_elev = np.rad2deg(
        np.arctan2(gaze_rotated[:, [0, 2]], gaze_rotated[:, 1, None]))
    return ((angles_from_eye - azim_elev)**2).sum()
예제 #12
0
#%%
gaze_normals = np.vstack([p[3:6] for p in pupil_samples])


# find rotation matrix
def err_func(ypr):
    R = fh.from_yawpitchroll(ypr)
    gaze_rotated = (R @ gaze_normals.T).T
    azim_elev = np.rad2deg(
        np.arctan2(gaze_rotated[:, [0, 2]], gaze_rotated[:, 1, None]))
    return ((angles_from_eye - azim_elev)**2).sum()


optim_result = minimize(err_func, np.zeros(3))
R_eye = fh.from_yawpitchroll(optim_result.x)

gaze_corrected = (R_eye @ gaze_normals.T).T
#%%

azim_elev = np.rad2deg(
    np.arctan2(gaze_corrected[:, [0, 2]], gaze_corrected[:, 1, None]))

#%%


def err_func_nonlinear(ypr_aa_bb_cc):
    ypr = ypr_aa_bb_cc[0:3]
    R = fh.from_yawpitchroll(ypr)
    gaze_rotated = (R @ gaze_normals.T).T
    azim_elev = np.rad2deg(