예제 #1
0
 def display_gl(self, modelview, projection):
     self._check_devices()
     GL.glEnable(GL.GL_DEPTH_TEST)
     GL.glUseProgram(self.shader)
     GL.glUniformMatrix4fv(0, 1, False, projection)
     for i in range(1, len(self.poses)):
         pose = self.poses[i]
         if not pose.bPoseIsValid:
             continue
         model_name = openvr.VRSystem().getStringTrackedDeviceProperty(
             i, openvr.Prop_RenderModelName_String)
         if model_name not in self.meshes:
             continue  # Come on, we already tried to load it a moment ago. Maybe next time.
         mesh = self.meshes[model_name]
         mesh.display_gl(modelview, projection, pose)
예제 #2
0
 def b_init(self):
     glfw.init()
     glfw.window_hint(glfw.SAMPLES, 4)
     self.window = glfw.create_window(self.companion_width,
                                      self.companion_height, 'hello_vr',
                                      None, None)
     glfw.set_key_callback(self.window, self.key_callback)
     glfw.make_context_current(self.window)
     #
     self.hmd = openvr.init(openvr.VRApplication_Scene)
     #
     vr_sys = openvr.VRSystem()
     driver = vr_sys.getStringTrackedDeviceProperty(
         openvr.k_unTrackedDeviceIndex_Hmd,
         openvr.Prop_TrackingSystemName_String,
     )
     display = vr_sys.getStringTrackedDeviceProperty(
         openvr.k_unTrackedDeviceIndex_Hmd,
         openvr.Prop_SerialNumber_String,
     )
     glfw.set_window_title(self.window, f'hello_vr -- {driver} {display}')
     self.b_init_gl()
     assert openvr.VRCompositor()
     action_path = sys.path[0] + '/res/hellovr_actions.json'
     openvr.VRInput().setActionManifestPath(action_path)
     self.action_hide_cubes = openvr.VRInput().getActionHandle(
         '/actions/demo/in/HideCubes')
     self.action_hide_this_controller = openvr.VRInput().getActionHandle(
         '/actions/demo/in/HideThisController')
     self.action_trigger_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/in/TriggerHaptic')
     self.action_analog_input = openvr.VRInput().getActionHandle(
         '/actions/demo/in/AnalogInput')
     self.action_set_demo = openvr.VRInput().getActionSetHandle(
         '/actions/demo')
     self.hand[Left].action_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/out/Haptic_Left')
     self.hand[Left].source = openvr.VRInput().getInputSourceHandle(
         '/user/hand/left')
     self.hand[Left].action_pose = openvr.VRInput().getActionHandle(
         '/actions/demo/in/Hand_Left')
     self.hand[Right].action_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/out/Haptic_Right')
     self.hand[Right].source = openvr.VRInput().getInputSourceHandle(
         '/user/hand/right')
     self.hand[Right].action_pose = openvr.VRInput().getActionHandle(
         '/actions/demo/in/Hand_Right')
     return True
예제 #3
0
def get_controller_ids(vrsys=None):
    if vrsys is None:
        vrsys = openvr.VRSystem()
    else:
        vrsys = vrsys
    left = None
    right = None
    for i in range(openvr.k_unMaxTrackedDeviceCount):
        device_class = vrsys.getTrackedDeviceClass(i)
        if device_class == openvr.TrackedDeviceClass_Controller:
            role = vrsys.getControllerRoleForTrackedDeviceIndex(i)
            if role == openvr.TrackedControllerRole_RightHand:
                right = i
            if role == openvr.TrackedControllerRole_LeftHand:
                left = i
    return left, right
예제 #4
0
    def __init__(self, x=0, y=-0.4, z=-0.35, size=0.55):
        self.vrsys = openvr.VRSystem()
        self.vroverlay = openvr.IVROverlay()
        result, self.wheel = self.vroverlay.createOverlay(
            'keyiiii'.encode(), 'keyiiii'.encode())
        check_result(result)

        check_result(self.vroverlay.setOverlayColor(self.wheel, 1, 1, 1))
        check_result(self.vroverlay.setOverlayAlpha(self.wheel, 1))
        check_result(self.vroverlay.setOverlayWidthInMeters(self.wheel, size))

        this_dir = os.path.abspath(os.path.dirname(__file__))
        wheel_img = os.path.join(this_dir, 'media', 'steering_wheel.png')

        check_result(
            self.vroverlay.setOverlayFromFile(self.wheel, wheel_img.encode()))

        result, transform = self.vroverlay.setOverlayTransformAbsolute(
            self.wheel, openvr.TrackingUniverseSeated)

        transform[0][0] = 1.0
        transform[0][1] = 0.0
        transform[0][2] = 0.0
        transform[0][3] = x

        transform[1][0] = 0.0
        transform[1][1] = 1.0
        transform[1][2] = 0.0
        transform[1][3] = y

        transform[2][0] = 0.0
        transform[2][1] = 0.0
        transform[2][2] = 1.0
        transform[2][3] = z

        self.transform = transform
        self.size = size

        fn = self.vroverlay.function_table.setOverlayTransformAbsolute
        pmatTrackingOriginToOverlayTransform = transform
        result = fn(self.wheel, openvr.TrackingUniverseSeated,
                    openvr.byref(pmatTrackingOriginToOverlayTransform))

        check_result(result)
        check_result(self.vroverlay.showOverlay(self.wheel))
예제 #5
0
    def __init__(self, configfile_path=None):
        # Initialize OpenVR in the
        self.vr = openvr.init(openvr.VRApplication_Other)
        self.vrsystem = openvr.VRSystem()

        # Initializing object to hold indexes for various tracked objects
        self.object_names = {
            "Tracking Reference": [],
            "HMD": [],
            "Controller": [],
            "Tracker": []
        }
        self.devices = {}
        self.trackers = {}
        self.device_index_map = {}
        poses = self.vr.getDeviceToAbsoluteTrackingPose(
            openvr.TrackingUniverseStanding, 0,
            openvr.k_unMaxTrackedDeviceCount)

        # Loading config file
        if configfile_path:
            try:
                with open(configfile_path, 'r') as json_data:
                    config = json.load(json_data)
            except EnvironmentError:  # parent of IOError, OSError *and* WindowsError where available
                print('config.json not found.')
                exit(1)

            # Iterate through the pose list to find the active devices and determine their type
            for i in range(openvr.k_unMaxTrackedDeviceCount):
                if poses[i].bDeviceIsConnected:
                    device_serial = self.vr.getStringTrackedDeviceProperty(
                        i, openvr.Prop_SerialNumber_String).decode('utf-8')
                    for device in config['devices']:
                        if device_serial == device['serial']:
                            device_name = device['name']
                            self.object_names[device['type']].append(
                                device_name)
                            self.devices[device_name] = vr_tracked_device(
                                self.vr, i, device['type'])
        else:
            # Iterate through the pose list to find the active devices and determine their type
            for i in range(openvr.k_unMaxTrackedDeviceCount):
                if poses[i].bDeviceIsConnected:
                    self.add_tracked_device(i)
예제 #6
0
    def __init__(self):
        self.logger = utils.get_logger('PositionTracker')

        try:
            openvr.init(openvr.VRApplication_Other)
        except openvr.error_code.InitError:
            self.logger.error(
                'Could not init OpenVR. Please make sure that the Link box and HMD is connected and the '
                'Link box is powered on.')
            raise
        self.logger.info(
            'Successfully initialized OpenVR for the position tracking.')

        # get the VR System, that is an object to access all connected VR devices
        self.vr_system = openvr.VRSystem()

        self.tracker_idx_rcv = None
        self.tracker_idx_src = None
예제 #7
0
    def update_page(self):

        openvr.VRCompositor().waitGetPoses(self.poses, len(self.poses), None,
                                           0)
        vrsys = openvr.VRSystem()

        poses = {}
        hmd_index = openvr.k_unTrackedDeviceIndex_Hmd
        beacon_indices = []
        controller_indices = []

        for i in range(len(self.poses)):

            device_class = vrsys.getTrackedDeviceClass(i)
            if device_class == openvr.TrackedDeviceClass_Invalid:
                continue
            elif device_class == openvr.TrackedDeviceClass_Controller:
                controller_indices.append(i)
            elif device_class == openvr.TrackedDeviceClass_TrackingReference:
                beacon_indices.append(i)

            model_name = vrsys.getStringTrackedDeviceProperty(
                i, openvr.Prop_RenderModelName_String)
            pose = self.poses[i]

            poses[i] = dict(
                model_name=model_name,
                device_is_connected=pose.bDeviceIsConnected,
                valid=pose.bPoseIsValid,
                tracking_result=pose.eTrackingResult,
                d2a=pose.mDeviceToAbsoluteTracking,
                velocity=pose.vVelocity,  # m/s
                angular_velocity=pose.vAngularVelocity  # radians/s?
            )

        template = jinja_env.get_template('status.html')
        html = template.render(poses=poses,
                               hmd_index=hmd_index,
                               controller_indices=controller_indices,
                               beacon_indices=beacon_indices)

        self.webview.setHtml(html)
        self.update()
예제 #8
0
 def handle_controller_buttons(self, poses):
     vr_system = openvr.VRSystem()
     openvr.VRControllerState_t
     for d in range(openvr.k_unMaxTrackedDeviceCount):
         dc = vr_system.getTrackedDeviceClass(d)
         if dc != openvr.TrackedDeviceClass_Controller:
             continue
         if dc == openvr.TrackedDeviceClass_HMD:
             continue
         pose = poses[d]
         if not pose.bPoseIsValid:
             continue
         result, state = vr_system.getControllerState(d)
         if state.ulButtonPressed & (1 << openvr.k_EButton_SteamVR_Trigger):
             if not self._was_pressed:
                 print('pressed')
                 self.start_pulse(d)
                 self._was_pressed = True
             self.check_pulse(d)
         else:
             self._was_pressed = False
예제 #9
0
    def update_controller_states(self):
        new_event = openvr.VREvent_t()
        while openvr.VRSystem().pollNextEvent(new_event):
            self._check_controller_drag(new_event)
        now_is_dragging = self.left_controller.is_dragging or self.right_controller.is_dragging

        xform = self._compute_controllers_transform()
        if xform is not None:
            obj.model_matrix *= xform

        # Check for drag begin/end
        if self.is_dragging and not now_is_dragging:
            # print ("drag released!")
            # maybe record velocity
            self._begin_inertial_coast()
        elif now_is_dragging and not self.is_dragging:
            # print ("drag started!")
            self.translation_history.clear()
            self.speed = 0.0
        elif now_is_dragging:  # continued drag
            pass
        else:  # not dragging, so maybe slow inertial coasting
            if self.speed > 0:
                dt = time.time() - self.previous_update_time
                dv = dt * self.velocity_damping
                self.speed -= dv
                if self.speed < 0:  # stay positive
                    self.speed = 0.0
                elif self.speed < self.min_velocity:  # static friction takes over at the very end
                    self.speed = 0.0
                else:
                    # print ("speed = %.3f meters per second" % self.speed)
                    dx = self.speed * dt * self.direction
                    obj.model_matrix *= MyTransform.translation(dx)
        self.previous_update_time = time.time()

        # Remember drag state
        self.is_dragging = now_is_dragging
예제 #10
0
    def _draw(self, headset_id):
        poses = openvr.VRSystem().getDeviceToAbsoluteTrackingPose(
            openvr.TrackingUniverseRawAndUncalibrated, 0, 16)
        pose = poses[headset_id]

        headset_pose = pose.mDeviceToAbsoluteTracking
        for name, i in self._devices_to_show_remote.items():
            if not DEBUG:
                try:
                    if name not in self._device_visualisers:
                        self._device_visualisers[name] = DeviceVisualiser(name)
                    self._device_visualisers[name].update(
                        headset_pose, i.x, i.y, i.z)
                except openvr.OpenVRError:
                    print(
                        "Got an partner device, but can't show it due to Overlay Error"
                    )
                    print(name, i)
            else:
                print(
                    "Got an partner device, but can't show it due to HEADLESS DEBUG MODE"
                )
                print(name, i)
예제 #11
0
    def __init__(self, global_offset, global_rotation):
        dll = os.path.join(os.path.dirname(__file__), 'pyopenvr_emu_c_wrapper',
                           'x64', 'Release', 'pyopenvr_emu_c_wrapper.dll')

        self.global_offset = global_offset
        self.global_rotation = global_rotation

        vrinputemu = ctypes.cdll.LoadLibrary(dll)
        self.vrinputemu = vrinputemu
        cr = vrinputemu.create_connection
        cr.restype = ctypes.c_void_p
        emu = vrinputemu.create_connection()
        emu_p = ctypes.c_void_p(emu)
        self.emu_p = emu_p

        openvr.init(openvr.VRApplication_Background)
        self.vrsys = openvr.VRSystem()

        hmd_id = get_hmd_id()
        self.hmd_id = hmd_id

        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()

        set_pose = vrinputemu.set_virtual_device_pose
        set_pose.argtypes = [
            ctypes.c_void_p,
            ctypes.c_uint32,
            ctypes.c_double,
            ctypes.c_double,
            ctypes.c_double,
            ctypes.c_double,
            ctypes.c_double,
            ctypes.c_double,
            ctypes.c_double,
        ]
        self.set_tracker_pose = set_pose
def take_steamvr_images(save_dir, num_images, delay_between_images):
    plt.show()

    openvr.init(openvr.VRApplication_Scene)

    convert_coordinate_system = np.identity(4)
    convert_coordinate_system[:3, :3] = Rotation.from_euler(
        'XYZ', (180, 0, 0), degrees=True).as_matrix()

    device = openvr.k_unTrackedDeviceIndex_Hmd

    num_cameras = openvr.VRSystem().getInt32TrackedDeviceProperty(
        device, openvr.Prop_NumCameras_Int32)

    camera_to_head_mat = (openvr.HmdMatrix34_t * num_cameras)()

    openvr.VRSystem().getArrayTrackedDeviceProperty(
        device, openvr.Prop_CameraToHeadTransforms_Matrix34_Array,
        openvr.k_unHmdMatrix34PropertyTag, camera_to_head_mat,
        48 * num_cameras)

    cam = openvr.VRTrackedCamera()

    cam_handle = cam.acquireVideoStreamingService(device)

    width, height, buffer_size = cam.getCameraFrameSize(
        device, openvr.VRTrackedCameraFrameType_MaximumUndistorted)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_xlabel('x axis - metres')
    ax.set_ylabel('z axis - metres')
    ax.set_zlabel('y axis - metres')
    ax.set_xlim(-0.5, 0.5)
    ax.set_ylim(-0.5, 0.5)
    ax.set_zlim(0, 1)

    save_dir = ColmapFolder(save_dir)

    db = COLMAPDatabase.connect(save_dir.database_path)
    db.create_tables()

    init_params = np.array(
        (420.000000, (width / num_cameras) / 2, height / 2, 0.000000))

    camera_model = CAMERA_MODEL_NAMES['SIMPLE_RADIAL']

    cameras = {}
    camera_to_head_transforms = {}

    for i in range(num_cameras):
        cam_id = db.add_camera(camera_model.model_id, width / 2, height,
                               init_params)
        camera_to_head_transforms[cam_id] = hmd_matrix_to_numpy(
            camera_to_head_mat[i])
        cameras[cam_id] = Camera(id=cam_id,
                                 model=camera_model.model_name,
                                 width=width / num_cameras,
                                 height=height,
                                 params=init_params)

    poses = []  # Let waitGetPoses populate the poses structure the first time
    cam_positions = []

    images = []

    for i in range(num_images):

        poses, game_poses = openvr.VRCompositor().waitGetPoses(poses, None)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]

        if not hmd_pose.bPoseIsValid:
            print("Pose not valid")
            continue

        world_to_head = hmd_matrix_to_numpy(hmd_pose.mDeviceToAbsoluteTracking)

        world_to_cams = {
            id_: world_to_head @ head_to_cam @ convert_coordinate_system
            for (id_, head_to_cam) in camera_to_head_transforms.items()
        }

        image_buffer = (ctypes.c_ubyte * buffer_size)()
        try:
            cam.getVideoStreamFrameBuffer(
                cam_handle, openvr.VRTrackedCameraFrameType_MaximumUndistorted,
                image_buffer, buffer_size)
        except:
            print("Error getting video stream buffer")
            continue

        image_array = np.array(image_buffer)

        image_array = image_array.reshape((height, width, 4))

        image_array = image_array[:, :, 0:3]

        image_array = np.clip(image_array, 0, 255)

        for j, (cam_id, world_to_cam) in enumerate(world_to_cams.items()):
            image = Image.fromarray(
                image_array[:,
                            int(width / num_cameras) *
                            j:int(width / num_cameras) * (j + 1), :])

            name = f"{i:03d}_cam{j}.jpg"

            image.save(save_dir.images_path / name)

            image_obj = read_write_model.Image(
                camera_id=cam_id,
                name=name,
                transformation_matrix=world_to_cam)

            images.append(image_obj)

            draw_axes(ax, transform_mat=world_to_cam)

        fig.show()

        fig.canvas.draw()
        fig.canvas.flush_events()
        time.sleep(delay_between_images)
        print(f"Picture taken :{i}")

    image_dict = {}

    print("All pictures taken")

    with open(save_dir.geo_reg_path, 'w') as geo_reg_file:

        for image in images:
            image_id = db.add_image(image=image)
            image.id = image_id
            image_dict[image_id] = image
            geo_reg_file.write(
                f"{name} {' '.join(map(str, image.transformation_matrix[0:3, 3]))}\n"
            )

    read_write_model.write_model(cameras, image_dict, {}, save_dir.sparse_path,
                                 '.txt')

    db.commit()
    db.close()

    print("Metadata saved")

    openvr.shutdown()
    plt.show()
예제 #13
0
 def edit_mode(self, left_ctr, right_ctr):
     result, state_r = openvr.VRSystem().getControllerState(right_ctr.id)
     if state_r.ulButtonPressed:
         if list(reversed(bin(state_r.ulButtonPressed)[2:])).index('1') == openvr.k_EButton_SteamVR_Trigger:
              self.move_wheel(right_ctr, left_ctr)
     super().edit_mode(left_ctr, right_ctr)
예제 #14
0
 def check_pulse(self, device_id):
     if self._pulse_remaining > 0:
         openvr.VRSystem().triggerHapticPulse(device_id, 0, 2000)
         self._pulse_remaining -= 1
예제 #15
0
import sys
import time
import openvr

openvr.init(openvr.VRApplication_Overlay)
ids = []

leftId = openvr.VRSystem().getTrackedDeviceIndexForControllerRole(
    openvr.TrackedControllerRole_LeftHand)
rightId = openvr.VRSystem().getTrackedDeviceIndexForControllerRole(
    openvr.TrackedControllerRole_RightHand)


def to_percent(value):
    return str(value * 100)[0:2]


def writefile(left, right):
    datafile = open("../storage/ovr.txt", "w")
    left = to_percent(left)
    right = to_percent(right)
    datafile.write("{:2s}.{:2s}".format(str(left), str(right)))
    datafile.close()


while True:
    overlay = openvr.IVROverlay()
    notification = openvr.IVRNotifications()
    left_battery = openvr.VRSystem().getFloatTrackedDeviceProperty(
        leftId, openvr.Prop_DeviceBatteryPercentage_Float)
    right_battery = openvr.VRSystem().getFloatTrackedDeviceProperty(
예제 #16
0
    def __init__(self, left_ctr, right_ctr):
        self._handl_closed = False
        self._handr_closed = False
        self.left_ctr = left_ctr
        self.right_ctr = right_ctr
        hand_size = 0.15

        self.vrsys = openvr.VRSystem()
        self.vroverlay = openvr.IVROverlay()

        result, self.l_ovr = self.vroverlay.createOverlay(
            'left_hand'.encode(), 'left_hand'.encode())
        result, self.r_ovr = self.vroverlay.createOverlay(
            'right_hand'.encode(), 'right_hand'.encode())

        check_result(self.vroverlay.setOverlayColor(self.l_ovr, 1, 1, 1))
        check_result(self.vroverlay.setOverlayColor(self.r_ovr, 1, 1, 1))
        check_result(self.vroverlay.setOverlayAlpha(self.l_ovr, 1))
        check_result(self.vroverlay.setOverlayAlpha(self.r_ovr, 1))
        check_result(
            self.vroverlay.setOverlayWidthInMeters(self.l_ovr, hand_size))
        check_result(
            self.vroverlay.setOverlayWidthInMeters(self.r_ovr, hand_size))

        this_dir = os.path.abspath(os.path.dirname(__file__))

        self.l_open_png = os.path.join(this_dir, 'media', 'hand_open_l.png')
        self.r_open_png = os.path.join(this_dir, 'media', 'hand_open_r.png')
        self.l_close_png = os.path.join(this_dir, 'media', 'hand_closed_l.png')
        self.r_close_png = os.path.join(this_dir, 'media', 'hand_closed_r.png')

        check_result(
            self.vroverlay.setOverlayFromFile(self.l_ovr,
                                              self.l_open_png.encode()))
        check_result(
            self.vroverlay.setOverlayFromFile(self.r_ovr,
                                              self.r_open_png.encode()))

        result, transform = self.vroverlay.setOverlayTransformTrackedDeviceRelative(
            self.l_ovr, self.left_ctr.id)
        result, transform = self.vroverlay.setOverlayTransformTrackedDeviceRelative(
            self.r_ovr, self.right_ctr.id)

        transform[0][0] = 1.0
        transform[0][1] = 0.0
        transform[0][2] = 0.0
        transform[0][3] = 0

        transform[1][0] = 0.0
        transform[1][1] = 1.0
        transform[1][2] = 0.0
        transform[1][3] = 0

        transform[2][0] = 0.0
        transform[2][1] = 0.0
        transform[2][2] = 1.0
        transform[2][3] = 0

        self.transform = transform

        rotate = initRotationMatrix(0, -pi / 2)
        self.transform = matMul33(rotate, self.transform)

        fn = self.vroverlay.function_table.setOverlayTransformTrackedDeviceRelative
        result = fn(self.l_ovr, self.left_ctr.id, openvr.byref(self.transform))
        result = fn(self.r_ovr, self.right_ctr.id,
                    openvr.byref(self.transform))

        check_result(result)
        check_result(self.vroverlay.showOverlay(self.l_ovr))
        check_result(self.vroverlay.showOverlay(self.r_ovr))
예제 #17
0
 def limiter(self, left_ctr, right_ctr):
     if abs(self._wheel_angles[-1]) / (2 * pi) > (
             self.config.wheel_degrees / 360) / 2:
         self._wheel_angles[-1] = self._wheel_angles[-2]
         openvr.VRSystem().triggerHapticPulse(left_ctr.id, 0, 3000)
         openvr.VRSystem().triggerHapticPulse(right_ctr.id, 0, 3000)
예제 #18
0
 def handle_input(self):
     # Note: Key events are handled by glfw in key_callback
     # Process SteamVR events
     event = openvr.VREvent_t()
     has_events = True
     while has_events:
         has_events = self.hmd.pollNextEvent(event)
         self.process_vr_event(event)
     # Process SteamVR action state
     # UpdateActionState is called each frame to update the state of the actions themselves. The application
     # controls which action sets are active with the provided array of VRActiveActionSet_t structs.
     action_sets = (openvr.VRActiveActionSet_t * 1)()
     action_set = action_sets[0]
     action_set.ulActionSet = self.action_set_demo
     openvr.VRInput().updateActionState(action_sets)
     #
     self.show_cubes = not get_digital_action_state(
         self.action_hide_cubes)[0]
     #
     bH, haptic_device = get_digital_action_rising_edge(
         self.action_trigger_haptic, True)
     if bH:
         for hand in self.hand:
             if haptic_device == hand.source:
                 openvr.VRInput().triggerHapticVibrationAction(
                     hand.action_haptic, 0, 1, 4, 1,
                     openvr.k_ulInvalidInputValueHandle)
     analog_data = openvr.VRInput().getAnalogActionData(
         self.action_analog_input, openvr.k_ulInvalidInputValueHandle)
     self.analog_value[0] = analog_data.x
     self.analog_value[
         1] = analog_data.y  # TODO: these seem to be unused...
     self.hand[Left].show_controller = True
     self.hand[Right].show_controller = True
     do_hide, hide_device = get_digital_action_state(
         self.action_hide_this_controller, True)
     if do_hide:
         for hand in self.hand:
             if hide_device == hand.source:
                 hand.show_controller = False
     for hand in self.hand:
         pose_data = openvr.VRInput().getPoseActionDataForNextFrame(
             hand.action_pose,
             openvr.TrackingUniverseStanding,
             openvr.k_ulInvalidInputValueHandle,
         )
         if not pose_data.bActive:
             hand.show_controller = False
             continue
         if not pose_data.pose.bPoseIsValid:
             hand.show_controller = False
             continue
         hand.pose = convert_steam_vr_matrix(
             pose_data.pose.mDeviceToAbsoluteTracking)
         origin_info = openvr.VRInput().getOriginTrackedDeviceInfo(
             pose_data.activeOrigin, )
         if origin_info.trackedDeviceIndex != openvr.k_unTrackedDeviceIndexInvalid:
             render_model_name = openvr.VRSystem(
             ).getStringTrackedDeviceProperty(
                 origin_info.trackedDeviceIndex,
                 openvr.Prop_RenderModelName_String)
             hand.render_model = self.find_or_load_render_model(
                 render_model_name)
             hand.render_model_name = render_model_name
예제 #19
0
    interval = 1 / 250

    # initialize OSC client
    client = udp_client.SimpleUDPClient(args.ip, args.port)

    # print some stuff
    print(Fore.GREEN + "\rSending OSC tracking data on " + args.ip + ":" +
          str(args.port),
          end="\n\n")
    print(Fore.YELLOW + '{0: <13}'.format("OSC address") +
          '{0: <9}'.format("X") + '{0: <9}'.format("Y") +
          '{0: <9}'.format("Z") + '{0: <9}'.format("Yaw") +
          '{0: <9}'.format("Pitch") + '{0: <9}'.format("Roll"))

    # Get the controllers ID's
    vrsystem = openvr.VRSystem()

    left_id, right_id = None, None
    print("===========================")
    print("Waiting for controllers...")
    try:
        while left_id is None or right_id is None:
            left_id, right_id = get_controller_ids(vrsystem)
            if left_id and right_id:
                break
            print("Waiting for controllers...")
            time.sleep(1.0)
    except KeyboardInterrupt:
        print("Control+C pressed, shutting down...")
        openvr.shutdown()
예제 #20
0
import openvr

openvr.init(openvr.VRApplication_Background)

while True:
    print()
    for c in range(16):
        result, pControllerState = openvr.VRSystem().getControllerState(c)
        for i in pControllerState.rAxis:
            if i.x != 0 or i.y != 0:
                print(i.x, i.y)

예제 #21
0
def data():
    # print("Receiving data...")

    try:
        JSpose = json.loads(request.body.read())
    except Exception as error:
        print(error)

    # Go over the extra data in the JSpose
    try:
        for data in JSpose["extraData"]:
            if data["name"] == "xOffset":
                global x_offset
                x_offset = float(data["value"])
            elif data["name"] == "yOffset":
                global y_offset
                y_offset = float(data["value"])
            elif data["name"] == "xMultiplier":
                global x_multiplier
                x_multiplier = float(data["value"])
            elif data["name"] == "yMultiplier":
                global y_multiplier
                y_multiplier = float(data["value"])
    except KeyError:
        pass

    # Telja hversu oft er verið að uppfæra
    global last_poses_per_second
    global poses_this_second
    global time_of_last_pose

    if math.floor(time.time()) == time_of_last_pose:
        poses_this_second += 1
    else:
        time_of_last_pose = math.floor(time.time())
        last_poses_per_second = poses_this_second
        poses_this_second = 0

    # print("Poses per second:",last_poses_per_second)

    #  ====================
    #  OpenVR
    #  ====================

    # Uppfæra staðsetningar fjastrýnga og
    global poses
    poses = openvr.VRSystem().getDeviceToAbsoluteTrackingPose(
        openvr.TrackingUniverseStanding, 0, poses)

    # HMD
    hmdPos = getPosOfDevice(openvr.k_unTrackedDeviceIndex_Hmd)
    # print(hmdPos)

    # Right controller
    rightControllerPos = getPosOfDevice(rightController)

    # Left controller
    leftControllerPos = getPosOfDevice(leftController)

    #  ====================
    #  Calculate diffrence
    #  and apply positions
    #  ====================

    # print(rightControllerPos[0], "\t", rightControllerPos[0])
    # print(leftControllerPos[0], "\t", leftControllerPos[1])

    if JSpose["hip"]:
        setTrackerLocation(hip_virtual_tracker,
                           calculate_x_part(JSpose["hip"]["x"]),
                           calculate_y_part(JSpose["hip"]["y"]), hmdPos[2])
    if JSpose["left_foot"]:
        setTrackerLocation(left_foot_virtual_tracker,
                           calculate_x_part(JSpose["left_foot"]["x"]),
                           calculate_y_part(JSpose["left_foot"]["y"]),
                           hmdPos[2])
    if JSpose["right_foot"]:
        setTrackerLocation(right_foot_virtual_tracker,
                           calculate_x_part(JSpose["right_foot"]["x"]),
                           calculate_y_part(JSpose["right_foot"]["y"]),
                           hmdPos[2])

    print_string = ""
    print_string += "RightController x: " + str(
        rightControllerPos[0]) + "\ty: " + str(rightControllerPos[1]) + "\n"
    print_string += "LeftController  x: " + str(
        leftControllerPos[0]) + "\ty: " + str(leftControllerPos[1]) + "\n"
    print_string += "\n"
    if JSpose["hip"]:
        print_string += "HipTracker       x: " + str(
            calculate_x_part(JSpose["hip"]["x"])) + "\ty: " + str(
                calculate_y_part(JSpose["hip"]["y"])) + "\n"
    if JSpose["right_foot"]:
        print_string += "RightFootTracker x: " + str(
            calculate_x_part(JSpose["right_foot"]["x"])) + "\ty: " + str(
                calculate_y_part(JSpose["right_foot"]["y"])) + "\n"
    if JSpose["left_foot"]:
        print_string += "LeftFootTracker  x: " + str(
            calculate_x_part(JSpose["left_foot"]["x"])) + "\ty: " + str(
                calculate_y_part(JSpose["left_foot"]["y"])) + "\n"
    print_string += "---------------------------------------------------------------"
    print(print_string)

    #  ====================
    #  Finish
    #  ====================

    stdout.flush()
예제 #22
0
 def limiter(self, left_ctr, right_ctr):
     if abs(self._wheel_angles[-1]) / (2 * pi) > FULLTURN / 2:
         self._wheel_angles[-1] = self._wheel_angles[-2]
         openvr.VRSystem().triggerHapticPulse(left_ctr.id, 0, 3000)
         openvr.VRSystem().triggerHapticPulse(right_ctr.id, 0, 3000)
예제 #23
0
def main():

    # arguments parsing
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--listeners",
        default="127.0.0.1:9001",
        help=
        "The ip:port of the OSC programs listening, different listeners must be separated with ';', if multiple ports are used on one ip, it possible to use this form ip1:port1-1;port1-2;port1-3;ip2:port2-1;port2-2..."
    )
    parser.add_argument(
        "--origin_serial",
        default=None,
        help="The serial number of the tracker used for origin calibration")
    parser.add_argument(
        "--real_world_points",
        default=None,
        help=
        "The JSON file containing list of points to use for origin calibration"
    )
    parser.add_argument(
        "--framerate",
        type=int,
        default=60,
        help=
        "Expected framerate - used to slow down OSC messages if OSC listener can't handle the framerate"
    )
    parser.add_argument(
        "--openvr_coordinates",
        default=False,
        action="store_true",
        help=
        "Use openVR native coordinate system if set, or Unity coordinate system if not set"
    )
    parser.add_argument("--steam",
                        default=False,
                        action="store_true",
                        help="Open SteamVR when the script is launched")
    parser.add_argument(
        "--oscpattern",
        default="/iss/tracker",
        help="The OSC pattern used to send messages, default is '/iss/tracker'"
    )
    parser.add_argument("--bundles",
                        default=False,
                        action="store_true",
                        help="Send single frame messages as a OSC bundle")
    args = parser.parse_args()

    # global program variables
    listeners = []  # array of OSC clients
    origin_serial = args.origin_serial
    real_world_points = []
    real_world_points_array = []
    different_h = False  # to check if all points are at the same height
    current_point_index = -1
    calib_points = []
    calib_mat = np.identity(4)
    # booleans for keystroke interactions
    to_unity_world = True  # can be modified by --openvr_coordinates option or 'u' key
    set_origin = False  # change state with 'o' key
    calib_next_point = False
    escape = False  # change state with 'escape' key
    # filter used to smooth framerate computation for display
    t0_list = []
    t0_list_max_size = args.framerate / 2
    # array of ids of currently tracked trackers (used to identify newly tracked trackers)
    trackers_list = []

    print("======================================")
    print("")
    print("---------------------------------------")
    print("   Coordinate system and calibration   ")
    print("---------------------------------------")
    # if --openvr_coordinates option is used, compute coordinates for openvr coordinate system
    # if not, compute for Unity coordinate system
    to_unity_world = not args.openvr_coordinates
    coordsys = "Unity"
    if not to_unity_world:
        coordsys = "openVR"
    print("Coordinate system is set for {0}".format(coordsys))

    # load last origin matrix from origin_mat.rtm file,
    # the file must be located in the same folder as this script
    origin_file_path = os.path.dirname(
        os.path.abspath(__file__)) + "\origin_mat.rtm"
    #print("try to open origin file at {0}".format(origin_file_path))
    origin_file = Path(origin_file_path)
    if origin_file.exists():
        with open(origin_file_path, 'rb') as loaded_file:
            depickler = pickle.Unpickler(loaded_file)
            calib_mat = depickler.load()
            print("origin loaded from file " + origin_file_path)
            #print(calib_mat)
    else:
        print(
            "origin file not found at " + origin_file_path +
            ", please place the reference and press 'o' key to set a new origin"
        )

    if origin_serial:
        print("Calibration can be done with tracker serial " + origin_serial)
        if args.real_world_points:
            fp = open(args.real_world_points, 'r')
            real_world_points = json.load(fp)
            if len(real_world_points) < 4:
                real_world_points = None
                print("Calibration file must contain at least 4 points")
            else:
                print("Load real world points from JSON file for calibration")
                real_world_points_array = np.zeros((len(real_world_points), 3),
                                                   np.float32)
                different_h = False
                last_h = None
                for i, pt in enumerate(real_world_points):
                    if to_unity_world:
                        real_world_points_array[i][0] = -pt['x']
                        real_world_points_array[i][1] = pt['z']
                        real_world_points_array[i][2] = -pt['y']
                    else:
                        real_world_points_array[i][0] = pt['x']
                        real_world_points_array[i][1] = pt['y']
                        real_world_points_array[i][2] = pt['z']
                    # check if there is at least one point outside of floor plan
                    h = real_world_points_array[i][2]
                    if last_h is None:
                        last_h = h
                    if last_h != h:
                        different_h = True
                    last_h = h

                    print("[{0}] : {1:3.2f}, {2:3.2f}, {3:3.2f}".format(
                        pt['id'], real_world_points_array[i][0],
                        real_world_points_array[i][1],
                        real_world_points_array[i][2]))
                # if all points are at the same height,
                # create a virtual point and append it to the array
                if not different_h:
                    print(
                        "All points are at the same height, creating a virtual point for calibration"
                    )
                    A = real_world_points_array[0]
                    B = real_world_points_array[1]
                    C = real_world_points_array[2]
                    if collinear(A, B, C):
                        print(
                            "ERROR : the 3 first points in real_world_points JSON file must not be aligned"
                        )
                    virtual_point = A + np.cross(np.subtract(B, A),
                                                 np.subtract(C, A))
                    real_world_points_array = np.vstack(
                        [real_world_points_array, virtual_point])
                    print("[{0}] : {1:3.2f}, {2:3.2f}, {3:3.2f}".format(
                        'virtual point', virtual_point[0], virtual_point[1],
                        virtual_point[2]))

    print("")
    print("---------------------------------------")
    print("             OSC parameters            ")
    print("---------------------------------------")
    # parse the ip and ports from listeners and create OSC clients
    listeners_str = args.listeners.split(";")
    overall_ip = "127.0.0.1"
    for l in listeners_str:
        listener = l.split(":")
        port = -1
        if len(listener) == 2:
            overall_ip = listener[0]
            port = int(listener[1])
        elif len(listener) == 1:
            port = int(listener[0])
        ip = overall_ip
        print("Add OSC listener at {0}:{1}".format(ip, port))
        listeners.append(udp_client.SimpleUDPClient(ip, port))

    print("OSC pattern : " + args.oscpattern)

    send_bundles = "Send separate OSC messages for each tracker"
    if args.bundles:
        send_bundles = "Send simultaneous tracker OSC messages as bundles"
    print(send_bundles)

    print("")
    print("---------------------------------------")
    print("                 OpenVR                ")
    print("---------------------------------------")
    # init OpenVR
    # VRApplication_Other will not open SteamVR
    # VRApplication_Scene will open SteamVR
    vrapplication_type = openvr.VRApplication_Other
    if args.steam:
        vrapplication_type = openvr.VRApplication_Scene
        print("open SteamVR")
    print("Initialize OpenVR ... ", end='')
    try:
        openvr.init(vrapplication_type)
    except openvr.OpenVRError as e:
        print("Impossible to intialize OpenVR")
        print(e)
        exit(0)
    vrsystem = openvr.VRSystem()
    print("OK")
    print("======================================")
    print("")

    program_t0 = time.perf_counter()
    while (not escape):

        # keep track of loop time
        loop_t0 = time.perf_counter()
        t0_list.append(loop_t0)
        if len(t0_list) >= t0_list_max_size:
            del t0_list[0]

        # handle keyboard inputs
        key = kbfunc()
        if key != 0:
            if key == ord('o'):
                if origin_serial:
                    # set up origin
                    set_origin = True
                    print("\nset new origin : ")
                    if real_world_points:
                        current_point_index = -1
                        print("Multi-points calibration")
                        calib_next_point = True
                    else:
                        print("Single point calibration")
            elif key == ord('n'):
                calib_next_point = True

            elif key == ord('r'):
                print("\nreset origin")
                calib_mat = np.identity(4)
            elif key == ord('u'):
                to_unity_world = not to_unity_world
                print("\nto unity world = {0}".format(to_unity_world))
            elif key == 27:  # escape
                escape = True
                openvr.shutdown()

        # get all devices currently tracked, it include HMD, controllers, lighthouses and trackers
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        tracked_devices = poses_t()
        tracked_devices = vrsystem.getDeviceToAbsoluteTrackingPose(
            openvr.TrackingUniverseStanding, 0, len(tracked_devices))
        # array to store content that must be sent over OSC
        osc_content = []

        current_loop_trackers_list = []
        for _i, device in enumerate(tracked_devices):
            # broswe the array and keey only correctly tracked trackers
            if device.bPoseIsValid and vrsystem.getTrackedDeviceClass(
                    _i) == openvr.TrackedDeviceClass_GenericTracker:
                tracker_id = vrsystem.getStringTrackedDeviceProperty(
                    _i, openvr.Prop_SerialNumber_String)
                current_loop_trackers_list.append(tracker_id)
                # add tracker_id to list if not already in it
                if tracker_id not in trackers_list:
                    trackers_list.append(tracker_id)
                    print("\nNew tracker found : {}".format(trackers_list[-1]))
                # compute relative position (vector3) and rotation(quaternion) from 3x4 openvr matrix
                m_4x4 = vive_pose_to_numpy_matrix_4x4(
                    device.mDeviceToAbsoluteTracking)
                m_corrected = np.matmul(calib_mat, m_4x4)

                m_rot = np.identity(3)
                for x in range(0, 3):
                    for y in range(0, 3):
                        m_rot[x][y] = m_corrected[x][y]
                quat = quaternion.from_rotation_matrix(m_rot)

                # append computed pos/rot to the list
                content = [
                    tracker_id, m_corrected[0][3], m_corrected[1][3],
                    m_corrected[2][3], quat.x, quat.y, quat.z, quat.w
                ]
                if to_unity_world:
                    # switch and invert coordinates if Unity coordinate system is required
                    content = [
                        tracker_id, -m_corrected[0][3], -m_corrected[2][3],
                        m_corrected[1][3], quat.x, quat.z, -quat.y, quat.w
                    ]
                osc_content.append(content)

                # set new origin if requested
                if vrsystem.getStringTrackedDeviceProperty(
                        _i, openvr.Prop_SerialNumber_String
                ) == origin_serial and set_origin:
                    # perform multi-points calibration
                    if len(real_world_points) >= 4:
                        if calib_next_point:
                            if current_point_index < len(
                                    real_world_points) - 1:
                                print("Place your origin tracker on point")
                                print(real_world_points[current_point_index +
                                                        1])
                                print("and press 'n'")
                            if current_point_index < 0:
                                pass
                            else:
                                openvr_pt = m_4x4[0:3, 3]
                                calib_points.append(openvr_pt)
                            calib_next_point = False
                            current_point_index = current_point_index + 1
                            if current_point_index >= len(real_world_points):
                                print("Computing calibration with {} points".
                                      format(len(real_world_points)))
                                calib_points = np.stack(calib_points)
                                if not different_h:
                                    print(
                                        "All real world points are at same height, creating virtual point"
                                    )
                                    A = calib_points[0]
                                    B = calib_points[1]
                                    C = calib_points[2]
                                    virtual_calib_point = A + np.cross(
                                        np.subtract(B, A), np.subtract(C, A))
                                    calib_points = np.vstack(
                                        [calib_points, virtual_calib_point])

                                retval, M, inliers = cv2.estimateAffine3D(
                                    calib_points, real_world_points_array)
                                calib_mat = np.vstack([M, [0, 0, 0, 1]])
                                with open(origin_file_path,
                                          'wb') as saved_file:
                                    pickler = pickle.Pickler(saved_file)
                                    pickler.dump(calib_mat)
                                print(calib_mat.round(3))
                                set_origin = False

                    # perform single point calibration (e.g, tracker position is origin, rotation matters)
                    else:
                        set_origin = False
                        calib_mat = np.linalg.inv(m_4x4)
                        with open(origin_file_path, 'wb') as saved_file:
                            pickler = pickle.Pickler(saved_file)
                            pickler.dump(calib_mat)
                        print(calib_mat.round(3))
        # remove trackers that are not tracked any more
        for t in trackers_list:
            if t not in current_loop_trackers_list:
                print("\n\tTracker lost : {}".format(t))
                trackers_list.remove(t)

        # send osc content to all listeners if needed
        for l in listeners:
            if args.bundles:
                bundle_builder = osc_bundle_builder.OscBundleBuilder(
                    osc_bundle_builder.IMMEDIATELY)
                for c in osc_content:
                    msg = osc_message_builder.OscMessageBuilder(
                        address=args.oscpattern)
                    for oscarg in c:
                        msg.add_arg(oscarg)
                    bundle_builder.add_content(msg.build())
                l.send(bundle_builder.build())
            else:
                for c in osc_content:
                    l.send_message(args.oscpattern, c)

        #calulate fps
        fps = 0
        if len(t0_list) > 1:
            fps = len(t0_list) / (t0_list[-1] - t0_list[0])
        # update state display
        if (not set_origin):
            print(
                "\rtime : {0:8.1f}, fps : {1:4.1f}, nb trackers : {2}        ".
                format(loop_t0 - program_t0, fps, len(osc_content)),
                end="")

        # ensure fps is respected to avoid OSC messages queue overflow
        while time.perf_counter() - loop_t0 <= 1.0 / args.framerate:
            pass
import openvr
from sys import stdout
from time import sleep
import pprint

poses = []
pp = pprint.PrettyPrinter(indent=4)
openvr.init(openvr.VRApplication_Background)

while True:
    poses = openvr.VRSystem().getDeviceToAbsoluteTrackingPose(
        openvr.TrackingUniverseStanding, 0, poses)
    hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
    absolute_hmd_pose = hmd_pose.mDeviceToAbsoluteTracking

    hmd_pos = [
        absolute_hmd_pose[0][3], absolute_hmd_pose[1][3],
        absolute_hmd_pose[2][3]
    ]

    pp.pprint(hmd_pos)

    stdout.flush()
    sleep(.5)
예제 #25
0
def get_hmd_id():
    vrsys = openvr.VRSystem()
    for i in range(openvr.k_unMaxTrackedDeviceCount):
        device_class = vrsys.getTrackedDeviceClass(i)
        if device_class == openvr.TrackedDeviceClass_HMD:
            return i
예제 #26
0
 )
 # obj = ObjMesh(open("AIv6b_699.obj", 'r'))
 renderer = OpenVrGlRenderer(multisample=2)
 renderer.append(SkyActor())
 # renderer.append(ColorCubeActor())
 controllers = TrackedDevicesActor(renderer.poses)
 controllers.show_controllers_only = True
 renderer.append(controllers)
 renderer.append(obj)
 renderer.append(FloorActor())
 new_event = openvr.VREvent_t()
 with GlfwApp(renderer, "mouse brain") as glfwApp:
     while not glfw.window_should_close(glfwApp.window):
         glfwApp.render_scene()
         # Update controller drag state when buttons are pushed
         while openvr.VRSystem().pollNextEvent(new_event):
             check_controller_drag(new_event)
         tx1 = right_controller.check_drag(renderer.poses)
         tx2 = left_controller.check_drag(renderer.poses)
         if tx1 is None and tx2 is None:
             pass  # No dragging this time
         elif tx1 is not None and tx2 is not None:
             # TODO - combined transform
             # Translate to average of two translations
             tx = 0.5 * (tx1 + tx2)
             # obj.model_matrix *= tx
             # TODO - scale
             mat_left = left_controller.current_pose.mDeviceToAbsoluteTracking.m
             mat_right = right_controller.current_pose.mDeviceToAbsoluteTracking.m
             pos_left = numpy.array([mat_left[i][3] for i in range(3)])
             pos_right = numpy.array([mat_right[i][3] for i in range(3)])