def get_data(self):
        action_sets = (openvr.VRActiveActionSet_t * 1)()
        action_set = action_sets[0]
        action_set.ulActionSet = openvr.VRInput().getActionSetHandle(
            '/actions/demo')
        openvr.VRInput().updateActionState(action_sets)

        pose_data = openvr.VRInput().getPoseActionDataForNextFrame(
            openvr.VRInput().getActionHandle('/actions/demo/in/Hand_Right'),
            openvr.TrackingUniverseStanding,
            openvr.k_ulInvalidInputValueHandle,
        )
        if not pose_data.bActive:
            print("b")
            return
        if not pose_data.pose.bPoseIsValid:
            print("a")
            return
        #print(get_zeros(pose_data.pose.mDeviceToAbsoluteTracking))
        print(self.get_alden_angle(pose_data.pose.mDeviceToAbsoluteTracking))

        active = self.get_digital_action_state(
            openvr.VRInput().getActionHandle('/actions/demo/in/hidecubes'))[0]
        print(active)
        print()
Exemple #2
0
def get_digital_action_rising_edge(action, device_path=None):
    """
    Purpose: Returns true if the action is active and had a rising edge
    """
    action_data = openvr.VRInput().getDigitalActionData(
        action, openvr.k_ulInvalidInputValueHandle)
    if device_path is not None:
        if action_data.bActive:
            origin_info = openvr.VRInput().getOriginTrackedDeviceInfo(
                action_data.activeOrigin)
            device_path = origin_info.devicePath
    return action_data.bActive and action_data.bChanged and action_data.bState, device_path
    def setup(self):
        max_init_retries = 4
        retries = 0
        print("===========================")
        print("Initializing OpenVR...")
        while retries < max_init_retries:
            try:
                openvr.init(openvr.VRApplication_Scene)
                break
            except openvr.OpenVRError as e:
                print("Error when initializing OpenVR (try {} / {})".format(
                    retries + 1, max_init_retries))
                print(e)
                retries += 1
                time.sleep(2.0)
        else:
            print("Could not initialize OpenVR, aborting.")
            print(
                "Make sure the system is correctly plugged, you can also try")
            print("to do:")
            print("killall -9 vrcompositor vrmonitor vrdashboard")
            print("Before running this program again.")
            exit(0)

        print("Success!")
        print("===========================")
        self.vrsystem = openvr.VRSystem()
        openvr.VRInput().setActionManifestPath(
            "C:\\Users\\alexweiss\\Desktop\\actions.json")
Exemple #4
0
    def init(self, near=0.2, far=500.0, root=None):
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        width, height = self.vr_system.getRecommendedRenderTargetSize()
        self.compositor = openvr.VRCompositor()
        self.vr_input = openvr.VRInput()
        if self.compositor is None:
            raise Exception("Unable to create compositor")

        if root is None:
            root = render
        self.tracking_space = root.attach_new_node('tracking-space')
        self.hmd_anchor = self.tracking_space.attach_new_node('hmd-anchor')
        self.left_eye_anchor = self.hmd_anchor.attach_new_node('left-eye')
        self.right_eye_anchor = self.hmd_anchor.attach_new_node('right-eye')

        self.projection_left = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Left, near, far))
        self.projection_right = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Right, near, far))

        left_cam_node = self.create_camera('left-cam', self.projection_left)
        right_cam_node = self.create_camera('right-cam', self.projection_right)

        self.left_cam = self.left_eye_anchor.attach_new_node(left_cam_node)
        self.right_cam = self.right_eye_anchor.attach_new_node(right_cam_node)

        self.left_texture = self.create_renderer('left-buffer', self.left_cam,
                                                 width, height, self.left_cb)
        self.right_texture = self.create_renderer('right-buffer',
                                                  self.right_cam, width,
                                                  height, self.right_cb)

        self.disable_main_cam()
        self.replicate(self.left_texture)

        self.init_action()

        taskMgr.add(self.update_poses_task, "openvr-update-poses", sort=-1000)
Exemple #5
0
    def init(self,
             near=0.2,
             far=500.0,
             root=None,
             submit_together=True,
             msaa=0,
             replicate=1,
             srgb=None,
             hidden_area_mesh=True):
        """
        Initialize OpenVR. This method will create the rendering buffers, the cameras associated with each eyes
        and the various anchors in the tracked space. It will also start the tasks responsible for the correct
        scheduling. This method should be called only once.

        * near and far parameters can be used to specify the near and far planes of the cameras.

        * root parameter, when not None, is used as the root node to which the tracked space will attached.
          Otherwise the tracked space is attached to render.

        * submit_together specifies if each eye buffer is submitted just after being rendered or both at once.

        * msaa specifies the multisampling anti-aliasing level to use, set to 0 to disable.

        * replicate specifies which eye is replicated on the application window.
          0: No replication, 1: left eye, 2: right eye.

        * srgb : If None, OpenVR will detect the color space from the texture format. If set to True, sRGB color
          space will be enforced; if set to False, linear color space will be enforced.

        * hidden_area_mesh : If True, a mask will be applied on each camera to cover the area not seen from the HMD
          This will trigger the early-z optimization on the GPU and avoid rendering unseen pixels.
        """

        self.submit_together = submit_together
        if srgb is None:
            self.color_space = openvr.ColorSpace_Auto
        else:
            if srgb:
                self.color_space = openvr.ColorSpace_Gamma
            else:
                self.color_space = openvr.ColorSpace_Linear

        # Create a OpenVR array that will store the pose of all the tracked devices.
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()

        # Initialise OpenVR and retrieve the main components
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        self.vr_applications = openvr.VRApplications()
        width, height = self.vr_system.getRecommendedRenderTargetSize()
        self.compositor = openvr.VRCompositor()
        self.vr_input = openvr.VRInput()
        if self.compositor is None:
            raise Exception("Unable to create compositor")

        # Create the tracking space anchors
        if root is None:
            root = self.base.render
        self.tracking_space = root.attach_new_node('tracking-space')
        self.hmd_anchor = self.tracking_space.attach_new_node('hmd-anchor')
        self.left_eye_anchor = self.hmd_anchor.attach_new_node('left-eye')
        self.right_eye_anchor = self.hmd_anchor.attach_new_node('right-eye')

        # Create the projection matrices for the left and right camera.
        # TODO: This should be updated in the update task in the case the user update the IOD
        self.projection_left = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Left, near, far))
        self.projection_right = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Right, near, far))

        # Create the cameras and attach them in the tracking space
        left_cam_node = self.create_camera('left-cam', self.projection_left)
        right_cam_node = self.create_camera('right-cam', self.projection_right)

        self.left_cam = self.left_eye_anchor.attach_new_node(left_cam_node)
        self.right_cam = self.right_eye_anchor.attach_new_node(right_cam_node)

        # Create the renderer linked to each camera
        self.left_texture = self.create_renderer('left-buffer', self.left_cam,
                                                 width, height, msaa,
                                                 self.left_cb)
        self.right_texture = self.create_renderer('right-buffer',
                                                  self.right_cam, width,
                                                  height, msaa, self.right_cb)

        # The main camera is useless, so we disable it
        self.disable_main_cam()

        if hidden_area_mesh:
            # If the hidden area mesh is used, assign a mask on each camera to hide the opposite mesh
            left_cam_node.set_camera_mask(BitMask32.bit(0))
            right_cam_node.set_camera_mask(BitMask32.bit(1))
            self.attach_hidden_area_mesh(openvr.Eye_Left, self.left_eye_anchor,
                                         1)
            self.attach_hidden_area_mesh(openvr.Eye_Right,
                                         self.right_eye_anchor, 0)

        if replicate == 1:
            if self.verbose:
                print("Replicating left eye")
            self.replicate(self.left_texture)
        elif replicate == 2:
            if self.verbose:
                print("Replicating right eye")
            self.replicate(self.right_texture)
        else:
            if self.verbose:
                print("Eye replication disabled")

        if hasattr(self, 'init_action'):
            print(
                "WARNING: init_action() is deprecated and will be removed in a future release"
            )
            self.init_action()

        # Launch the main task that will synchronize Panda3D with OpenVR
        # TODO: The sort number should be configurable and by default be placed after the gc tasks.
        self.task = taskMgr.add(self.update_poses_task,
                                "openvr-update-poses",
                                sort=-1000)
Exemple #6
0
 def handle_input(self):
     # Note: Key events are handled by glfw in key_callback
     # Process SteamVR events
     event = openvr.VREvent_t()
     has_events = True
     while has_events:
         has_events = self.hmd.pollNextEvent(event)
         self.process_vr_event(event)
     # Process SteamVR action state
     # UpdateActionState is called each frame to update the state of the actions themselves. The application
     # controls which action sets are active with the provided array of VRActiveActionSet_t structs.
     action_sets = (openvr.VRActiveActionSet_t * 1)()
     action_set = action_sets[0]
     action_set.ulActionSet = self.action_set_demo
     openvr.VRInput().updateActionState(action_sets)
     #
     self.show_cubes = not get_digital_action_state(
         self.action_hide_cubes)[0]
     #
     bH, haptic_device = get_digital_action_rising_edge(
         self.action_trigger_haptic, True)
     if bH:
         for hand in self.hand:
             if haptic_device == hand.source:
                 openvr.VRInput().triggerHapticVibrationAction(
                     hand.action_haptic, 0, 1, 4, 1,
                     openvr.k_ulInvalidInputValueHandle)
     analog_data = openvr.VRInput().getAnalogActionData(
         self.action_analog_input, openvr.k_ulInvalidInputValueHandle)
     self.analog_value[0] = analog_data.x
     self.analog_value[
         1] = analog_data.y  # TODO: these seem to be unused...
     self.hand[Left].show_controller = True
     self.hand[Right].show_controller = True
     do_hide, hide_device = get_digital_action_state(
         self.action_hide_this_controller, True)
     if do_hide:
         for hand in self.hand:
             if hide_device == hand.source:
                 hand.show_controller = False
     for hand in self.hand:
         pose_data = openvr.VRInput().getPoseActionDataForNextFrame(
             hand.action_pose,
             openvr.TrackingUniverseStanding,
             openvr.k_ulInvalidInputValueHandle,
         )
         if not pose_data.bActive:
             hand.show_controller = False
             continue
         if not pose_data.pose.bPoseIsValid:
             hand.show_controller = False
             continue
         hand.pose = convert_steam_vr_matrix(
             pose_data.pose.mDeviceToAbsoluteTracking)
         origin_info = openvr.VRInput().getOriginTrackedDeviceInfo(
             pose_data.activeOrigin, )
         if origin_info.trackedDeviceIndex != openvr.k_unTrackedDeviceIndexInvalid:
             render_model_name = openvr.VRSystem(
             ).getStringTrackedDeviceProperty(
                 origin_info.trackedDeviceIndex,
                 openvr.Prop_RenderModelName_String)
             hand.render_model = self.find_or_load_render_model(
                 render_model_name)
             hand.render_model_name = render_model_name
Exemple #7
0
 def b_init(self):
     glfw.init()
     glfw.window_hint(glfw.SAMPLES, 4)
     self.window = glfw.create_window(self.companion_width,
                                      self.companion_height, 'hello_vr',
                                      None, None)
     glfw.set_key_callback(self.window, self.key_callback)
     glfw.make_context_current(self.window)
     #
     self.hmd = openvr.init(openvr.VRApplication_Scene)
     #
     vr_sys = openvr.VRSystem()
     driver = vr_sys.getStringTrackedDeviceProperty(
         openvr.k_unTrackedDeviceIndex_Hmd,
         openvr.Prop_TrackingSystemName_String,
     )
     display = vr_sys.getStringTrackedDeviceProperty(
         openvr.k_unTrackedDeviceIndex_Hmd,
         openvr.Prop_SerialNumber_String,
     )
     glfw.set_window_title(self.window, f'hello_vr -- {driver} {display}')
     self.b_init_gl()
     assert openvr.VRCompositor()
     action_path = sys.path[0] + '/res/hellovr_actions.json'
     openvr.VRInput().setActionManifestPath(action_path)
     self.action_hide_cubes = openvr.VRInput().getActionHandle(
         '/actions/demo/in/HideCubes')
     self.action_hide_this_controller = openvr.VRInput().getActionHandle(
         '/actions/demo/in/HideThisController')
     self.action_trigger_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/in/TriggerHaptic')
     self.action_analog_input = openvr.VRInput().getActionHandle(
         '/actions/demo/in/AnalogInput')
     self.action_set_demo = openvr.VRInput().getActionSetHandle(
         '/actions/demo')
     self.hand[Left].action_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/out/Haptic_Left')
     self.hand[Left].source = openvr.VRInput().getInputSourceHandle(
         '/user/hand/left')
     self.hand[Left].action_pose = openvr.VRInput().getActionHandle(
         '/actions/demo/in/Hand_Left')
     self.hand[Right].action_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/out/Haptic_Right')
     self.hand[Right].source = openvr.VRInput().getInputSourceHandle(
         '/user/hand/right')
     self.hand[Right].action_pose = openvr.VRInput().getActionHandle(
         '/actions/demo/in/Hand_Right')
     return True
    def init(self,
             near=0.2,
             far=500.0,
             root=None,
             submit_together=True,
             msaa=0,
             replicate=1):
        """
        Initialize OpenVR. This method will create the rendering buffers, the cameras associated with each eyes
        and the various anchors in the tracked space. It will also start the tasks responsible for the correct
        scheduling. This method should be called only once.

        * near and far parameters can be used to specify the near and far planes of the cameras.

        * root parameter, when not None, is used as the root node to which the tracked space will attached.
          Otherwise the tracked space is attached to render.

        * submit_together specifies if each eye buffer is submitted just after being rendered or both at once.

        * msaa specifies the multisampling anti-aliasing level to use, set to 0 to disable.

        * replicate specifies which eye is replicated on the application window.
          0: No replication, 1: left eye, 2: right eye.
        """

        self.submit_together = submit_together
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        width, height = self.vr_system.getRecommendedRenderTargetSize()
        self.compositor = openvr.VRCompositor()
        self.vr_input = openvr.VRInput()
        if self.compositor is None:
            raise Exception("Unable to create compositor")

        if root is None:
            root = render
        self.tracking_space = root.attach_new_node('tracking-space')
        self.hmd_anchor = self.tracking_space.attach_new_node('hmd-anchor')
        self.left_eye_anchor = self.hmd_anchor.attach_new_node('left-eye')
        self.right_eye_anchor = self.hmd_anchor.attach_new_node('right-eye')

        self.projection_left = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Left, near, far))
        self.projection_right = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Right, near, far))

        left_cam_node = self.create_camera('left-cam', self.projection_left)
        right_cam_node = self.create_camera('right-cam', self.projection_right)

        self.left_cam = self.left_eye_anchor.attach_new_node(left_cam_node)
        self.right_cam = self.right_eye_anchor.attach_new_node(right_cam_node)

        self.left_texture = self.create_renderer('left-buffer', self.left_cam,
                                                 width, height, msaa,
                                                 self.left_cb)
        self.right_texture = self.create_renderer('right-buffer',
                                                  self.right_cam, width,
                                                  height, msaa, self.right_cb)

        self.disable_main_cam()

        if replicate == 1:
            if self.verbose:
                print("Replicating left eye")
            self.replicate(self.left_texture)
        elif replicate == 2:
            if self.verbose:
                print("Replicating right eye")
            self.replicate(self.right_texture)
        else:
            if self.verbose:
                print("Eye replication disabled")

        self.init_action()

        taskMgr.add(self.update_poses_task, "openvr-update-poses", sort=-1000)