Example #1
0
    def render_frame(self):
        if self.hmd:
            self.render_controller_axes()
            self.render_stereo_targets()
            self.render_companion_window()
            left_eye_texture = openvr.Texture_t(
                handle=self.left_eye_desc.resolve_texture_id,
                eType=openvr.TextureType_OpenGL,
                eColorSpace=openvr.ColorSpace_Gamma,
            )
            right_eye_texture = openvr.Texture_t(
                handle=self.right_eye_desc.resolve_texture_id,
                eType=openvr.TextureType_OpenGL,
                eColorSpace=openvr.ColorSpace_Gamma,
            )
            try:
                openvr.VRCompositor().submit(openvr.Eye_Left, left_eye_texture)
                openvr.VRCompositor().submit(openvr.Eye_Right,
                                             right_eye_texture)
            except openvr.error_code.CompositorError_DoNotHaveFocus:
                pass  # First frame fails because waitGetPoses has not been called yet

        if (self.tracked_controller_count !=
                self.tracked_controller_count_previous
                or self.valid_pose_count != self.valid_pose_count_previous):
            self.valid_pose_count_previous = self.valid_pose_count
            self.tracked_controller_count_previous = self.tracked_controller_count
            print(
                f'PoseCount:{self.valid_pose_count}({self.pose_classes}) Controllers:{self.tracked_controller_count}'
            )
        self.update_hmd_pose()
Example #2
0
def v_render():
    ''' render to vr and window '''
    global hmd, ctx, window
    # resolve multi-sample offscreen buffer
    gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, ctx.con.offFBO)
    gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
    gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, ctx.con.offFBO_r)
    gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0)
    gl.glBlitFramebuffer(0, 0, 2 * hmd.width, hmd.height, 0, 0, 2 * hmd.width,
                         hmd.height, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
    # blit to window, left only, window is half-size
    gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, ctx.con.offFBO_r)
    gl.glReadBuffer(gl.GL_COLOR_ATTACHMENT0)
    gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, 0)
    gl.glDrawBuffer(gl.GL_BACK if ctx.con.windowDoublebuffer else gl.GL_FRONT)
    gl.glBlitFramebuffer(0, 0, hmd.width, hmd.height, 0, 0, hmd.width // 2,
                         hmd.height // 2, gl.GL_COLOR_BUFFER_BIT,
                         gl.GL_NEAREST)
    # blit to vr texture
    gl.glActiveTexture(gl.GL_TEXTURE2)
    gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, ctx.con.offFBO_r)
    gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1,
                              gl.GL_TEXTURE_2D, hmd.idtex, 0)
    gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT1)
    gl.glBlitFramebuffer(0, 0, 2 * hmd.width, hmd.height, 0, 0, 2 * hmd.width,
                         hmd.height, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
    gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1,
                              gl.GL_TEXTURE_2D, 0, 0)
    gl.glDrawBuffer(gl.GL_COLOR_ATTACHMENT0)
    openvr.VRCompositor().submit(openvr.Eye_Left, hmd.vTex, hmd.boundLeft)
    openvr.VRCompositor().submit(openvr.Eye_Right, hmd.vTex, hmd.boundRight)
    # swap if window is double-buffered, flush just in case
    if ctx.con.windowDoublebuffer:
        glfw.swap_buffers(window)
    gl.glFlush()
Example #3
0
 def __init__(self, multisample=0, znear=0.1, zfar=1000, poll_tracked_device_frequency=None):
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.vr_framebuffers = (OpenVRFramebuffer(w, h, multisample=multisample),
                             OpenVRFramebuffer(w, h, multisample=multisample))
     self._multisample = multisample
     self.vr_compositor = openvr.VRCompositor()
     if self.vr_compositor is None:
         raise Exception('unable to create compositor')
     self.vr_framebuffers[0].init_gl()
     self.vr_framebuffers[1].init_gl()
     self._poses = (openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount)()
     self.projection_matrices = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Left,
                                                                                                     znear, zfar))),
                                 np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Right,
                                                                                                     znear, zfar))))
     self.eye_transforms = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
                            np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I))
     self.view = np.eye(4, dtype=np.float32)
     self.view_matrices  = (np.empty((4,4), dtype=np.float32),
                            np.empty((4,4), dtype=np.float32))
     self.controllers = TrackedDevicesActor(self._poses)
     #self.controllers.show_controllers_only = False
     self.controllers.init_gl()
     self.vr_event = openvr.VREvent_t()
     self._poll_tracked_device_count()
     self._poll_tracked_device_frequency = poll_tracked_device_frequency
     self._frames_rendered = 0
     self._pulse_t0 = 0.0
 def init_gl(self, clear_color=(0.0, 0.0, 0.0, 0.0)):
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.render_target_size = np.array((w, h), dtype=np.float32)
     self.vr_framebuffers = (OpenVRFramebuffer(
         w, h, multisample=self.multisample),
                             OpenVRFramebuffer(
                                 w, h, multisample=self.multisample))
     self.vr_compositor = openvr.VRCompositor()
     if self.vr_compositor is None:
         raise Exception('unable to create compositor')
     self.vr_framebuffers[0].init_gl()
     self.vr_framebuffers[1].init_gl()
     self.update_projection_matrix()
     self.eye_to_head_transforms = (
         asarray(
             matrixForOpenVRMatrix(
                 self.vr_system.getEyeToHeadTransform(openvr.Eye_Left))),
         asarray(
             matrixForOpenVRMatrix(
                 self.vr_system.getEyeToHeadTransform(openvr.Eye_Right))))
     self.eye_transforms = (asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
                            asarray(
                                matrixForOpenVRMatrix(
                                    self.vr_system.getEyeToHeadTransform(
                                        openvr.Eye_Right)).I))
     gl.glClearColor(*clear_color)
     gl.glEnable(gl.GL_DEPTH_TEST)
Example #5
0
 def init_gl(self):
     "allocate OpenGL resources"
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.left_fb = OpenVrFramebuffer(w, h, multisample=self.multisample)
     self.right_fb = OpenVrFramebuffer(w, h, multisample=self.multisample)
     self.compositor = openvr.VRCompositor()
     if self.compositor is None:
         raise Exception("Unable to create compositor")
     self.left_fb.init_gl()
     self.right_fb.init_gl()
     # Compute projection matrix
     zNear = 0.2
     zFar = 500.0
     self.projection_left = numpy.asarray(matrixForOpenVrMatrix(self.vr_system.getProjectionMatrix(
             openvr.Eye_Left,
             zNear, zFar)))
     self.projection_right = numpy.asarray(matrixForOpenVrMatrix(self.vr_system.getProjectionMatrix(
             openvr.Eye_Right,
             zNear, zFar)))
     self.view_left = matrixForOpenVrMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I # head_X_eye in Kane notation
     self.view_right = matrixForOpenVrMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I # head_X_eye in Kane notation
     for actor in self:
         actor.init_gl()
Example #6
0
def main():
    openvr.init(openvr.VRApplication_Scene)

    poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
    poses = poses_t()
    port = "5556"
    if len(sys.argv) > 1:
        port =  sys.argv[1]
        int(port)
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://*:%s" % port)
   
    while True:
        openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
        #print(str(hmd_pose.mDeviceToAbsoluteTracking))
        rotation = getQuaternion(hmd_pose.mDeviceToAbsoluteTracking)
        location = getLocation(hmd_pose.mDeviceToAbsoluteTracking)
        #print(rotation.x, rotation.y, rotation.z, rotation.w)
        #print("one"+hmd_pose.mDeviceToAbsoluteTracking[1])
        #print("two"+hmd_pose.mDeviceToAbsoluteTracking[2])
        #sys.stdout.flush()
        #  Wait for next request from client
        #i = time.time()
        message = socket.recv()
        print("Received a request: ", message)
        socket.send_string(send_json(rotation=rotation,location=location))
        time.sleep(.00166) 
Example #7
0
 def update_hmd_pose(self):
     if not self.hmd:
         return
     self.poses, _ = openvr.VRCompositor().waitGetPoses(self.poses, None)
     self.valid_pose_count = 0
     self.pose_classes = ''
     for nDevice, pose in enumerate(self.poses):
         if pose.bPoseIsValid:
             self.valid_pose_count += 1
             if nDevice not in self.dev_class_char:
                 c = self.hmd.getTrackedDeviceClass(nDevice)
                 if c == openvr.TrackedDeviceClass_Controller:
                     self.dev_class_char[nDevice] = 'C'
                 elif c == openvr.TrackedDeviceClass_HMD:
                     self.dev_class_char[nDevice] = 'H'
                 elif c == openvr.TrackedDeviceClass_Invalid:
                     self.dev_class_char[nDevice] = 'I'
                 elif c == openvr.TrackedDeviceClass_GenericTracker:
                     self.dev_class_char[nDevice] = 'G'
                 elif c == openvr.TrackedDeviceClass_TrackingReference:
                     self.dev_class_char[nDevice] = 'T'
                 else:
                     self.dev_class_char[nDevice] = '?'
             self.pose_classes += self.dev_class_char[nDevice]
     hp = self.poses[openvr.k_unTrackedDeviceIndex_Hmd]
     if hp.bPoseIsValid:
         p = convert_steam_vr_matrix(hp.mDeviceToAbsoluteTracking)
         self.hmd_pose = inv(p)
Example #8
0
    def run(self):
        while self.vive_run_flag:
            openvr.VRCompositor().waitGetPoses(self.poses, len(self.poses),
                                               None, 0)
            #if left_id:
            #    left_pose = poses[left_id]
            #    if self.show:
            #        print(left_pose.mDeviceToAbsoluteTracking)
            #        result, pControllerState = vrsystem.getControllerState(left_id)
            #        d = self.from_controller_state_to_dict(pControllerState)
            #        #if show_only_new_events and last_unPacketNum_left != d['unPacketNum']:
            #        last_unPacketNum_left = d['unPacketNum']
            #        print("Left controller:")
            #        pp.pprint(d)
            #if left_id and right_id and self.show:
            #    print()
            #    print()

            if self.right_id:
                right_pose = self.poses[self.right_id]
                result, pControllerState = self.vrsystem.getControllerState(
                    self.right_id)
                d = self.from_controller_state_to_dict(pControllerState)
                #if show_only_new_events and last_unPacketNum_right != d['unPacketNum']:
                last_unPacketNum_right = d['unPacketNum']
                self.state = d
                temp = right_pose.mDeviceToAbsoluteTracking
                self.pose[0, 0:4] = temp[0][0:4]
                self.pose[1, 0:4] = temp[1][0:4]
                self.pose[2, 0:4] = temp[2][0:4]
Example #9
0
 def wait_update_poses(self):
     if self.is_shutdown:
         return
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     poses = poses_t()
     openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
     for device in self.devices.values():
         device.update_pose(poses)
Example #10
0
 def poll(self):
     openvr.init(openvr.VRApplication_Scene)
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     poses = poses_t()
     while self._keep_polling:
         openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
         self.handle_controller_buttons(poses)
     openvr.shutdown()
Example #11
0
 def submit(self, eye):
     if self.multisample > 0:
         glBindFramebuffer(GL_READ_FRAMEBUFFER, self.fb)
         glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.resolve_fb)
         glBlitFramebuffer(0, 0, self.width, self.height, 0, 0, self.width,
                           self.height, GL_COLOR_BUFFER_BIT, GL_LINEAR)
         glBindFramebuffer(GL_READ_FRAMEBUFFER, 0)
         glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
     openvr.VRCompositor().submit(eye, self.texture)
Example #12
0
def v_update():
    ''' update vr poses and controller states '''
    global ctx, hmd
    openvr.VRCompositor().waitGetPoses(hmd.poses, openvr.k_unMaxTrackedDeviceCount, None, 0)
    m = np.array(hmd.poses[openvr.k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking.m)
    hmd.roompos, hmd.roommat = m[0:3, 3], m[0:3, 0:3]
    for n in range(2):
        ctx.scn.camera[n].pos[:] = hmd.roompos + np.matmul(hmd.roommat, hmd.eyeoffset[n])
        ctx.scn.camera[n].forward[0:3] = -hmd.roommat[:, 2]
        ctx.scn.camera[n].up[0:3] = hmd.roommat[:, 1]
Example #13
0
    def run(self):
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        self.vr_compositor = openvr.VRCompositor()
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()
        self.w, self.h = self.vr_system.getRecommendedRenderTargetSize()
        SDL_Init(SDL_INIT_VIDEO)
        self.window = SDL_CreateWindow(b"test", SDL_WINDOWPOS_CENTERED,
                                       SDL_WINDOWPOS_CENTERED, 100, 100,
                                       SDL_WINDOW_SHOWN | SDL_WINDOW_OPENGL)
        self.context = SDL_GL_CreateContext(self.window)
        SDL_GL_MakeCurrent(self.window, self.context)
        self.depth_buffer = glGenRenderbuffers(1)
        self.frame_buffers = glGenFramebuffers(2)
        self.texture_ids = glGenTextures(2)
        self.textures = [None] * 2
        self.eyes = [openvr.Eye_Left, openvr.Eye_Right]
        self.camToHead = [None] * 2
        self.proj_raw = [None] * 2
        self.nearZ = 0.01
        self.farZ = 500

        for eye in range(2):
            glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffers[eye])
            glBindRenderbuffer(GL_RENDERBUFFER, self.depth_buffer)
            glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, self.w,
                                  self.h)
            glFramebufferRenderbuffer(GL_FRAMEBUFFER,
                                      GL_DEPTH_STENCIL_ATTACHMENT,
                                      GL_RENDERBUFFER, self.depth_buffer)
            glBindTexture(GL_TEXTURE_2D, self.texture_ids[eye])
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self.w, self.h, 0,
                         GL_RGBA, GL_UNSIGNED_BYTE, None)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
                                   GL_TEXTURE_2D, self.texture_ids[eye], 0)
            texture = openvr.Texture_t()
            texture.handle = int(self.texture_ids[eye])
            texture.eType = openvr.TextureType_OpenGL
            texture.eColorSpace = openvr.ColorSpace_Gamma
            self.textures[eye] = texture
            self.proj_raw[eye] = self.vr_system.getProjectionRaw(
                self.eyes[eye]
            )  #void GetProjectionRaw( Hmd_Eye eEye, float *pfLeft, float *pfRight, float *pfTop, float *pfBottom )
            eyehead = self.vr_system.getEyeToHeadTransform(
                self.eyes[eye])  #[0][3] is eye-center distance
            self.camToHead[eye] = numpy.array(
                [[eyehead.m[j][i] for i in range(4)] for j in range(3)])

        self.setupcameras()
        self.setupscene()
        while self._running:
            self.draw()
Example #14
0
 def __init__(self):
     "One time initialization"
     # Glut
     glutInit()
     glutInitDisplayMode(GLUT_RGBA)
     # Create a regular desktop window, just so we can have an OpenGL context to play with
     glutInitWindowSize(400, 400)
     glutInitWindowPosition(50, 50)
     self.win = glutCreateWindow(b"Pink world")
     # Set up callback methods for use during the GLUT main loop
     glutDisplayFunc(self.display)
     glutIdleFunc(self.display)
     glutReshapeFunc(self.resize_gl)
     glutKeyboardFunc(self.key_press)
     # OpenVR
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     self.vr_width, self.vr_height = self.vr_system.getRecommendedRenderTargetSize(
     )
     self.compositor = openvr.VRCompositor()
     if self.compositor is None:
         raise Exception("Unable to create compositor")
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     self.poses = poses_t()
     #
     # Set up framebuffer and render textures
     self.fb = glGenFramebuffers(1)
     glBindFramebuffer(GL_FRAMEBUFFER, self.fb)
     self.depth_buffer = glGenRenderbuffers(1)
     glBindRenderbuffer(GL_RENDERBUFFER, self.depth_buffer)
     glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8,
                           self.vr_width, self.vr_height)
     glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
                               GL_RENDERBUFFER, self.depth_buffer)
     self.texture_id = glGenTextures(1)
     glBindTexture(GL_TEXTURE_2D, self.texture_id)
     glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self.vr_width, self.vr_height,
                  0, GL_RGBA, GL_UNSIGNED_BYTE, None)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0)
     glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
                            GL_TEXTURE_2D, self.texture_id, 0)
     status = glCheckFramebufferStatus(GL_FRAMEBUFFER)
     if status != GL_FRAMEBUFFER_COMPLETE:
         glBindFramebuffer(GL_FRAMEBUFFER, 0)
         raise Exception("Incomplete framebuffer")
     glBindFramebuffer(GL_FRAMEBUFFER, 0)
     # OpenVR texture data
     self.texture = openvr.Texture_t()
     self.texture.handle = self.texture_id
     self.texture.eType = openvr.TextureType_OpenGL
     self.texture.eColorSpace = openvr.ColorSpace_Gamma
Example #15
0
def main():
    for i in range(10000):
        openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
        #print(str(hmd_pose.mDeviceToAbsoluteTracking))
        rotation = getQuaternion(hmd_pose.mDeviceToAbsoluteTracking)
        print(rotation.x, rotation.y, rotation.z, rotation.w)
        #print("one"+hmd_pose.mDeviceToAbsoluteTracking[1])
        #print("two"+hmd_pose.mDeviceToAbsoluteTracking[2])
        sys.stdout.flush()
        time.sleep(0.01)
Example #16
0
def v_initPre():
    ''' init vr before MuJoCo init '''
    global hmd
    hmd.system = openvr.init(openvr.VRApplication_Scene)
    hmd.roompos = np.zeros(3)
    hmd.roommat = np.eye(3)
    hmd.eyeoffset = np.zeros((2, 3))
    openvr.VRCompositor().setTrackingSpace(openvr.TrackingUniverseStanding)
    hmd.width, hmd.height = hmd.system.getRecommendedRenderTargetSize()
    for n in range(2):
        hmd.eyeoffset[n] = np.array(hmd.system.getEyeToHeadTransform(n).m)[0:3, 3]
Example #17
0
    def __init__(self):
        super().__init__()

        vr_sys = openvr.init(openvr.VRApplication_Scene)
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        poses = poses_t()

        print("Recommended W H:", vr_sys.getRecommendedRenderTargetSize())

        ctrls = []

        for i in range(5):
            c = vr_sys.getTrackedDeviceClass(i)
            if c == openvr.TrackedDeviceClass_HMD:
                HMD_id = i
            elif c == openvr.TrackedDeviceClass_Controller:
                print("Controller:", i)
                ctrls.append(i)

        self.ov = openvr.VROverlay()
        self.cmp = openvr.VRCompositor()
        self.vrsys = openvr.VRSystem()

        self.CONTROLLERS = ctrls

        self.VRposes = poses

        k = random.randint(0, 1000)
        self.vrBuf1 = self.ov.createOverlay("Buffer 1 " + str(k), "A")
        self.vrBuf2 = self.ov.createOverlay("Buffer 2 " + str(k), "B")

        # FOV 80 size 0.2
        # FOV 120 size 0.6
        self.ov.setOverlayWidthInMeters(self.vrBuf1, 0.6)
        self.ov.setOverlayWidthInMeters(self.vrBuf2, 0.6)

        self.ov.setOverlayFromFile(self.vrBuf1, PATH + "lib/Combat.png")
        self.ov.setOverlayFromFile(self.vrBuf2, PATH + "lib/Combat.png")
        self.ov.showOverlay(self.vrBuf1)
        self.ov.showOverlay(self.vrBuf2)

        mt = [(ctypes.c_float * 4)(1, 0, 0, 0.02), (ctypes.c_float * 4)(0, 1,
                                                                        0, 0),
              (ctypes.c_float * 4)(0, 0, 1, -0.2)]

        y = openvr.HmdMatrix34_t(*mt)

        self.ov.setOverlayTransformTrackedDeviceRelative(
            self.vrBuf1, HMD_id, y)
        self.ov.setOverlayTransformTrackedDeviceRelative(
            self.vrBuf2, HMD_id, y)

        self.VRMode = True
Example #18
0
    def __init__(self, emulateOculus=True, emulationData=None):

        if emulateOculus is False:
            openvr.init(openvr.VRApplication_Scene)
            self.VRSystem = openvr.VRSystem()
            self.VRSystem.resetSeatedZeroPose()
            self.VRCompositor = openvr.VRCompositor()
        #else:
        self.emulateOculus = emulateOculus
        self.emulationData = emulationData
        self.emulationDataIndex = 0
        self.emulationGrip = True
Example #19
0
 def init_vr_device(self):
     try:
         self.hmd = openvr.init(openvr.VRApplication_Scene)
         #self.vr_render_models = openvr.getGenericInterface(openvr.IVRRenderModels_Version)
         poses = []  # will be populated with proper type after first call
         for i in range(100):
             poses, _ = openvr.VRCompositor().waitGetPoses(poses, None)
             hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
             print(hmd_pose.mDeviceToAbsoluteTracking)
             sys.stdout.flush()
             time.sleep(0.2)
         openvr.shutdown()
     except openvr.error_code.InitError_Init_HmdNotFound:
         print("VR Device not found")
Example #20
0
 def b_init(self):
     glfw.init()
     glfw.window_hint(glfw.SAMPLES, 4)
     self.window = glfw.create_window(self.companion_width,
                                      self.companion_height, 'hello_vr',
                                      None, None)
     glfw.set_key_callback(self.window, self.key_callback)
     glfw.make_context_current(self.window)
     #
     self.hmd = openvr.init(openvr.VRApplication_Scene)
     #
     vr_sys = openvr.VRSystem()
     driver = vr_sys.getStringTrackedDeviceProperty(
         openvr.k_unTrackedDeviceIndex_Hmd,
         openvr.Prop_TrackingSystemName_String,
     )
     display = vr_sys.getStringTrackedDeviceProperty(
         openvr.k_unTrackedDeviceIndex_Hmd,
         openvr.Prop_SerialNumber_String,
     )
     glfw.set_window_title(self.window, f'hello_vr -- {driver} {display}')
     self.b_init_gl()
     assert openvr.VRCompositor()
     action_path = pkg_resources.resource_filename('samples',
                                                   'hellovr_actions.json')
     openvr.VRInput().setActionManifestPath(action_path)
     self.action_hide_cubes = openvr.VRInput().getActionHandle(
         '/actions/demo/in/HideCubes')
     self.action_hide_this_controller = openvr.VRInput().getActionHandle(
         '/actions/demo/in/HideThisController')
     self.action_trigger_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/in/TriggerHaptic')
     self.action_analog_input = openvr.VRInput().getActionHandle(
         '/actions/demo/in/AnalogInput')
     self.action_set_demo = openvr.VRInput().getActionSetHandle(
         '/actions/demo')
     self.hand[Left].action_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/out/Haptic_Left')
     self.hand[Left].source = openvr.VRInput().getInputSourceHandle(
         '/user/hand/left')
     self.hand[Left].action_pose = openvr.VRInput().getActionHandle(
         '/actions/demo/in/Hand_Left')
     self.hand[Right].action_haptic = openvr.VRInput().getActionHandle(
         '/actions/demo/out/Haptic_Right')
     self.hand[Right].source = openvr.VRInput().getInputSourceHandle(
         '/user/hand/right')
     self.hand[Right].action_pose = openvr.VRInput().getActionHandle(
         '/actions/demo/in/Hand_Right')
     return True
Example #21
0
    def setController(self):
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        poses = poses_t()
        openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)

        matrix = poses[self.ctrl_index_r].mDeviceToAbsoluteTracking
        matrix2 = poses[self.ctrl_index_l].mDeviceToAbsoluteTracking

        try:
            camera = bpy.data.objects["Camera"]
            ctrl = bpy.data.objects["Controller.R"]
            ctrl_l = bpy.data.objects["Controller.L"]


            self.trans_matrix = camera.matrix_world * bpy.data.objects['Origin'].matrix_world
            RTS_matrix = Matrix(((matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3]),
                                 (matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3]),
                                 (matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3]),
                                 (0, 0, 0, 1)))

            RTS_matrix2 = Matrix(((matrix2[0][0], matrix2[0][1], matrix2[0][2], matrix2[0][3]),
                                 (matrix2[1][0], matrix2[1][1], matrix2[1][2], matrix2[1][3]),
                                 (matrix2[2][0], matrix2[2][1], matrix2[2][2], matrix2[2][3]),
                                 (0, 0, 0, 1)))

            # Interaction state active
            if(self.rotFlag):
                ctrl.matrix_world = self.trans_matrix * RTS_matrix
                bpy.data.objects["Text.R"].location = ctrl.location
                bpy.data.objects["Text.R"].rotation_quaternion = ctrl.rotation_quaternion * Quaternion((0.707, -0.707, 0, 0))

                ctrl_l.matrix_world = self.trans_matrix * RTS_matrix2
                bpy.data.objects["Text.L"].location = ctrl_l.location
                bpy.data.objects["Text.L"].rotation_quaternion = ctrl_l.rotation_quaternion * Quaternion((0.707, -0.707, 0, 0))

            # Navigation state active
            else:
                diff_rot_matr = self.diff_rot.to_matrix()
                inverted_matrix = RTS_matrix * diff_rot_matr.to_4x4()
                inverted_matrix = inverted_matrix.inverted()
                stMatrix = self.diff_trans_matrix * inverted_matrix
                quat = stMatrix.to_quaternion()
                camera.rotation_quaternion = quat



        except:
            print("ERROR: ")
    def get_devices_poses_and_extract_matrix_from_them(self):
        """
        Getting poses from VR system. Poses as [list] contain all 
        geo-data of devices which is registred in VRSystem. 
        To extract data for exact device we need to get it from poses[] 
        with device's index obtained from get_controllers_id(). 
        """

        poses = []
        poses, _ = openvr.VRCompositor().waitGetPoses(poses, None)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
        right_controller_pose = poses[self.right_controller_id_in_VRsystem]
        left_controller_pose = poses[self.left_controller_id_in_VRsystem]
        self.hmd_matrix = hmd_pose.mDeviceToAbsoluteTracking
        self.right_cont_matrix = right_controller_pose.mDeviceToAbsoluteTracking
        self.left_cont_matrix = left_controller_pose.mDeviceToAbsoluteTracking
Example #23
0
 def __init__(self,
              multisample=0,
              znear=0.1,
              zfar=1000,
              window_size=(960, 1080)):
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.window_size = window_size
     self.multisample = multisample
     self.vr_framebuffers = (OpenVRFramebuffer(w,
                                               h,
                                               multisample=multisample),
                             OpenVRFramebuffer(w,
                                               h,
                                               multisample=multisample))
     self.vr_compositor = openvr.VRCompositor()
     if self.vr_compositor is None:
         raise Exception('unable to create compositor')
     self.vr_framebuffers[0].init_gl()
     self.vr_framebuffers[1].init_gl()
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     self.poses = poses_t()
     self.projection_matrices = (np.asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getProjectionMatrix(openvr.Eye_Left, znear,
                                                zfar))),
                                 np.asarray(
                                     matrixForOpenVRMatrix(
                                         self.vr_system.getProjectionMatrix(
                                             openvr.Eye_Right, znear,
                                             zfar))))
     self.eye_transforms = (np.asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
                            np.asarray(
                                matrixForOpenVRMatrix(
                                    self.vr_system.getEyeToHeadTransform(
                                        openvr.Eye_Right)).I))
     self.view_matrices = (np.empty(
         (4, 4), dtype=np.float32), np.empty((4, 4), dtype=np.float32))
     self.hmd_matrix = np.eye(4, dtype=np.float32)
     self.vr_event = openvr.VREvent_t()
     self._controller_indices = []
     for i in range(openvr.k_unMaxTrackedDeviceCount):
         if self.vr_system.getTrackedDeviceClass(
                 i) == openvr.TrackedDeviceClass_Controller:
             self._controller_indices.append(i)
Example #24
0
    def init_gl(self):
        "allocate OpenGL resources"
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        w, h = self.vr_system.getRecommendedRenderTargetSize()
        self.left_fb = OpenVrFramebuffer(w, h, multisample=self.multisample)
        self.right_fb = OpenVrFramebuffer(w, h, multisample=self.multisample)
        self.compositor = openvr.VRCompositor()
        if self.compositor is None:
            raise Exception("Unable to create compositor")
        self.left_fb.init_gl()
        self.right_fb.init_gl()
        # Compute projection matrix
        zNear = 0.2
        zFar = 500.0
        self.projection_left = numpy.asarray(
            matrixForOpenVrMatrix(
                self.vr_system.getProjectionMatrix(openvr.Eye_Left, zNear,
                                                   zFar)))
        self.projection_right = numpy.asarray(
            matrixForOpenVrMatrix(
                self.vr_system.getProjectionMatrix(openvr.Eye_Right, zNear,
                                                   zFar)))
        self.view_left = matrixForOpenVrMatrix(
            self.vr_system.getEyeToHeadTransform(
                openvr.Eye_Left)).I  # head_X_eye in Kane notation
        self.view_right = matrixForOpenVrMatrix(
            self.vr_system.getEyeToHeadTransform(
                openvr.Eye_Right)).I  # head_X_eye in Kane notation

        self.zedTextureID_L = glGenTextures(1)
        self.zedTextureID_R = glGenTextures(1)

        self.texture_t_left = openvr.Texture_t()
        self.texture_t_left.eType = openvr.TextureType_OpenGL
        self.texture_t_left.eColorSpace = openvr.ColorSpace_Gamma

        self.texture_t_right = openvr.Texture_t()
        self.texture_t_right.eType = openvr.TextureType_OpenGL
        self.texture_t_right.eColorSpace = openvr.ColorSpace_Gamma

        # Create left ZED image texture
        self.zedTextureID_L = glGenTextures(1)

        # Create right ZED image texture
        self.zedTextureID_R = glGenTextures(1)
Example #25
0
    def update_page(self):

        openvr.VRCompositor().waitGetPoses(self.poses, len(self.poses), None,
                                           0)
        vrsys = openvr.VRSystem()

        poses = {}
        hmd_index = openvr.k_unTrackedDeviceIndex_Hmd
        beacon_indices = []
        controller_indices = []

        for i in range(len(self.poses)):

            device_class = vrsys.getTrackedDeviceClass(i)
            if device_class == openvr.TrackedDeviceClass_Invalid:
                continue
            elif device_class == openvr.TrackedDeviceClass_Controller:
                controller_indices.append(i)
            elif device_class == openvr.TrackedDeviceClass_TrackingReference:
                beacon_indices.append(i)

            model_name = vrsys.getStringTrackedDeviceProperty(
                i, openvr.Prop_RenderModelName_String)
            pose = self.poses[i]

            poses[i] = dict(
                model_name=model_name,
                device_is_connected=pose.bDeviceIsConnected,
                valid=pose.bPoseIsValid,
                tracking_result=pose.eTrackingResult,
                d2a=pose.mDeviceToAbsoluteTracking,
                velocity=pose.vVelocity,  # m/s
                angular_velocity=pose.vAngularVelocity  # radians/s?
            )

        template = jinja_env.get_template('status.html')
        html = template.render(poses=poses,
                               hmd_index=hmd_index,
                               controller_indices=controller_indices,
                               beacon_indices=beacon_indices)

        self.webview.setHtml(html)
        self.update()
Example #26
0
    def init(self, near=0.2, far=500.0, root=None):
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        width, height = self.vr_system.getRecommendedRenderTargetSize()
        self.compositor = openvr.VRCompositor()
        self.vr_input = openvr.VRInput()
        if self.compositor is None:
            raise Exception("Unable to create compositor")

        if root is None:
            root = render
        self.tracking_space = root.attach_new_node('tracking-space')
        self.hmd_anchor = self.tracking_space.attach_new_node('hmd-anchor')
        self.left_eye_anchor = self.hmd_anchor.attach_new_node('left-eye')
        self.right_eye_anchor = self.hmd_anchor.attach_new_node('right-eye')

        self.projection_left = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Left, near, far))
        self.projection_right = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Right, near, far))

        left_cam_node = self.create_camera('left-cam', self.projection_left)
        right_cam_node = self.create_camera('right-cam', self.projection_right)

        self.left_cam = self.left_eye_anchor.attach_new_node(left_cam_node)
        self.right_cam = self.right_eye_anchor.attach_new_node(right_cam_node)

        self.left_texture = self.create_renderer('left-buffer', self.left_cam,
                                                 width, height, self.left_cb)
        self.right_texture = self.create_renderer('right-buffer',
                                                  self.right_cam, width,
                                                  height, self.right_cb)

        self.disable_main_cam()
        self.replicate(self.left_texture)

        self.init_action()

        taskMgr.add(self.update_poses_task, "openvr-update-poses", sort=-1000)
Example #27
0
    q['x'] = sqrt(max(0, 1 + matrix[0][0] - matrix[1][1] - matrix[2][2])) / 2.0
    q['y'] = sqrt(max(0, 1 - matrix[0][0] + matrix[1][1] - matrix[2][2])) / 2.0
    q['z'] = sqrt(max(0, 1 - matrix[0][0] - matrix[1][1] + matrix[2][2])) / 2.0

    #Turn matrix into XYZ
    q['x'] = copysign(q['x'], matrix[2][1] - matrix[1][2])
    q['y'] = copysign(q['y'], matrix[0][2] - matrix[2][0])
    q['z'] = copysign(q['z'], matrix[1][0] - matrix[0][1])

    #Save to dictionary
    pose['position'] = position
    pose['orientation'] = q

    return pose


openvr.init(openvr.VRApplication_Scene)

poses = []  # Let waitGetPoses populate the poses structure the first time

# Print converted XYZ and Quaternion rotation 100 times
for i in range(100):
    poses, game_poses = openvr.VRCompositor().waitGetPoses(poses, None)
    hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
    xyz = matrixToXYZ(hmd_pose.mDeviceToAbsoluteTracking)
    print(xyz)
    sys.stdout.flush()
    time.sleep(0.2)

openvr.shutdown()
def take_steamvr_images(save_dir, num_images, delay_between_images):
    plt.show()

    openvr.init(openvr.VRApplication_Scene)

    convert_coordinate_system = np.identity(4)
    convert_coordinate_system[:3, :3] = Rotation.from_euler(
        'XYZ', (180, 0, 0), degrees=True).as_matrix()

    device = openvr.k_unTrackedDeviceIndex_Hmd

    num_cameras = openvr.VRSystem().getInt32TrackedDeviceProperty(
        device, openvr.Prop_NumCameras_Int32)

    camera_to_head_mat = (openvr.HmdMatrix34_t * num_cameras)()

    openvr.VRSystem().getArrayTrackedDeviceProperty(
        device, openvr.Prop_CameraToHeadTransforms_Matrix34_Array,
        openvr.k_unHmdMatrix34PropertyTag, camera_to_head_mat,
        48 * num_cameras)

    cam = openvr.VRTrackedCamera()

    cam_handle = cam.acquireVideoStreamingService(device)

    width, height, buffer_size = cam.getCameraFrameSize(
        device, openvr.VRTrackedCameraFrameType_MaximumUndistorted)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_xlabel('x axis - metres')
    ax.set_ylabel('z axis - metres')
    ax.set_zlabel('y axis - metres')
    ax.set_xlim(-0.5, 0.5)
    ax.set_ylim(-0.5, 0.5)
    ax.set_zlim(0, 1)

    save_dir = ColmapFolder(save_dir)

    db = COLMAPDatabase.connect(save_dir.database_path)
    db.create_tables()

    init_params = np.array(
        (420.000000, (width / num_cameras) / 2, height / 2, 0.000000))

    camera_model = CAMERA_MODEL_NAMES['SIMPLE_RADIAL']

    cameras = {}
    camera_to_head_transforms = {}

    for i in range(num_cameras):
        cam_id = db.add_camera(camera_model.model_id, width / 2, height,
                               init_params)
        camera_to_head_transforms[cam_id] = hmd_matrix_to_numpy(
            camera_to_head_mat[i])
        cameras[cam_id] = Camera(id=cam_id,
                                 model=camera_model.model_name,
                                 width=width / num_cameras,
                                 height=height,
                                 params=init_params)

    poses = []  # Let waitGetPoses populate the poses structure the first time
    cam_positions = []

    images = []

    for i in range(num_images):

        poses, game_poses = openvr.VRCompositor().waitGetPoses(poses, None)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]

        if not hmd_pose.bPoseIsValid:
            print("Pose not valid")
            continue

        world_to_head = hmd_matrix_to_numpy(hmd_pose.mDeviceToAbsoluteTracking)

        world_to_cams = {
            id_: world_to_head @ head_to_cam @ convert_coordinate_system
            for (id_, head_to_cam) in camera_to_head_transforms.items()
        }

        image_buffer = (ctypes.c_ubyte * buffer_size)()
        try:
            cam.getVideoStreamFrameBuffer(
                cam_handle, openvr.VRTrackedCameraFrameType_MaximumUndistorted,
                image_buffer, buffer_size)
        except:
            print("Error getting video stream buffer")
            continue

        image_array = np.array(image_buffer)

        image_array = image_array.reshape((height, width, 4))

        image_array = image_array[:, :, 0:3]

        image_array = np.clip(image_array, 0, 255)

        for j, (cam_id, world_to_cam) in enumerate(world_to_cams.items()):
            image = Image.fromarray(
                image_array[:,
                            int(width / num_cameras) *
                            j:int(width / num_cameras) * (j + 1), :])

            name = f"{i:03d}_cam{j}.jpg"

            image.save(save_dir.images_path / name)

            image_obj = read_write_model.Image(
                camera_id=cam_id,
                name=name,
                transformation_matrix=world_to_cam)

            images.append(image_obj)

            draw_axes(ax, transform_mat=world_to_cam)

        fig.show()

        fig.canvas.draw()
        fig.canvas.flush_events()
        time.sleep(delay_between_images)
        print(f"Picture taken :{i}")

    image_dict = {}

    print("All pictures taken")

    with open(save_dir.geo_reg_path, 'w') as geo_reg_file:

        for image in images:
            image_id = db.add_image(image=image)
            image.id = image_id
            image_dict[image_id] = image
            geo_reg_file.write(
                f"{name} {' '.join(map(str, image.transformation_matrix[0:3, 3]))}\n"
            )

    read_write_model.write_model(cameras, image_dict, {}, save_dir.sparse_path,
                                 '.txt')

    db.commit()
    db.close()

    print("Metadata saved")

    openvr.shutdown()
    plt.show()
Example #29
0
def start_tracker():

    # Default orientation values
    rollOffset = 0
    pitchOffset = 0
    yawOffset = 0
    roll = 0
    pitch = 0
    yaw = 0
    Filterset = 0

    oscIdentifier = '/pyBinSim'
    ip = '127.0.0.1'
    port = 10000
    nSources = 0
    minAngleDifference = 4

    run = True

    if (len(sys.argv)) > 1:
        for i in range(len(sys.argv)):

            if (sys.argv[i] == '-port'):
                port = int(sys.argv[i + 1])

            if (sys.argv[i] == '-ip'):
                ip = sys.argv[i + 1]

            if (sys.argv[i] == '-nSources'):
                nSources = int(sys.argv[i + 1])

            if (sys.argv[i] == '-angleStep'):
                minAngleDifference = int(sys.argv[i + 1])

    # Internal settings:
    positionVector = np.arange(360)
    positionVectorSubSampled = range(0, 360, minAngleDifference)

    # Create OSC client
    parser = argparse.ArgumentParser()
    parser.add_argument("--ip", default=ip, help="The ip of the OSC server")
    parser.add_argument("--port",
                        type=int,
                        default=port,
                        help="The port the OSC server is listening on")
    args = parser.parse_args()

    client = udp_client.SimpleUDPClient(args.ip, args.port)

    # init openvr for HTC Vive
    help(openvr.VRSystem)
    openvr.init(openvr.VRApplication_Scene)

    poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
    poses = poses_t()

    try:
        while 1:
            openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
            hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
            v = hmd_pose.mDeviceToAbsoluteTracking

            ## extraction of angles from rotation matrix
            ## to get yaw from 0 to 360 degree, axis 0 and 1 have been switched

            yawRad = np.arctan2(v[0][2], v[2][2])
            yaw = int(round(np.degrees(yawRad)))

            pitchRad = np.arctan2(
                -v[1][2], np.sqrt(np.square(v[0][2]) + np.square(v[2][2])))
            pitch = int(round(np.degrees(pitchRad)))

            rollRad = np.arctan2(v[1][0], v[1][1])
            roll = int(round(np.degrees(rollRad)))

            posX = v[0][3]
            posY = v[1][3]
            posZ = v[2][3]

            #print(['YAW: ',yaw,' PITCH: ',pitch,'ROLL: ',roll])
            #print(['X: ',round(posX,2),' Y: ',round(posY,2),'Z: ',round(posZ,2)])

            # adjustment to desired global origin
            posZ = posZ + 0.8
            if yaw < 0:
                yaw = 360 + yaw

            yaw = 360 - yaw

            # select filterset according to Z-value of listener
            Filterset = int(round(posZ / 0.25))  # 25cm resolution

            if Filterset > 8:
                Filterset = 8
            elif Filterset < 0:
                Filterset = 0

            # Build and send OSC message
            #channel valueOne valueTwo ValueThree valueFour valueFive ValueSix
            yaw = min(positionVectorSubSampled, key=lambda x: abs(x - yaw))
            binSimParameters = [0, yaw, Filterset, 0, 0, 0, 0]
            print(' Yaw: ', yaw, ' Filterset: ', Filterset, ' PosY ', posZ)
            client.send_message(oscIdentifier, binSimParameters)

            sys.stdout.flush()

            time.sleep(0.02)

    except KeyboardInterrupt:  # Break if ctrl+c is pressed

        # Console output
        print("Done")
 def __init__(self,
              multisample=0,
              znear=0.1,
              zfar=1000,
              window_size=(960, 1080)):
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.render_target_size = np.array((w, h), dtype=np.float32)
     self.window_size = np.array(window_size, dtype=np.int64)
     self.multisample = multisample
     self.vr_framebuffers = (OpenVRFramebuffer(w,
                                               h,
                                               multisample=multisample),
                             OpenVRFramebuffer(w,
                                               h,
                                               multisample=multisample))
     self.vr_compositor = openvr.VRCompositor()
     if self.vr_compositor is None:
         raise Exception('unable to create compositor')
     self.vr_framebuffers[0].init_gl()
     self.vr_framebuffers[1].init_gl()
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     self.poses = poses_t()
     self.znear, self.zfar = znear, zfar
     self.projection_matrices = (np.asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getProjectionMatrix(openvr.Eye_Left, znear,
                                                zfar))),
                                 np.asarray(
                                     matrixForOpenVRMatrix(
                                         self.vr_system.getProjectionMatrix(
                                             openvr.Eye_Right, znear,
                                             zfar))))
     self.projection_lrbts = (np.array(
         self.vr_system.getProjectionRaw(openvr.Eye_Left)),
                              np.array(
                                  self.vr_system.getProjectionRaw(
                                      openvr.Eye_Right)))
     self.eye_to_head_transforms = (
         np.asarray(
             matrixForOpenVRMatrix(
                 self.vr_system.getEyeToHeadTransform(openvr.Eye_Left))),
         np.asarray(
             matrixForOpenVRMatrix(
                 self.vr_system.getEyeToHeadTransform(openvr.Eye_Right))))
     self.eye_transforms = (np.asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
                            np.asarray(
                                matrixForOpenVRMatrix(
                                    self.vr_system.getEyeToHeadTransform(
                                        openvr.Eye_Right)).I))
     self.eye_matrices = (np.eye(4, dtype=np.float32),
                          np.eye(4, dtype=np.float32))
     self.camera_matrices = (np.eye(4, dtype=np.float32),
                             np.eye(4, dtype=np.float32))
     self.hmd_matrix = np.eye(4, dtype=np.float32)
     self.hmd_matrix_inv = np.eye(4, dtype=np.float32)
     self.vr_event = openvr.VREvent_t()
     self._nframes = 0
     self._poll_for_controllers()