Пример #1
0
 def __init__(self, multisample=0, znear=0.1, zfar=1000, poll_tracked_device_frequency=None):
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.vr_framebuffers = (OpenVRFramebuffer(w, h, multisample=multisample),
                             OpenVRFramebuffer(w, h, multisample=multisample))
     self._multisample = multisample
     self.vr_compositor = openvr.VRCompositor()
     if self.vr_compositor is None:
         raise Exception('unable to create compositor')
     self.vr_framebuffers[0].init_gl()
     self.vr_framebuffers[1].init_gl()
     self._poses = (openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount)()
     self.projection_matrices = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Left,
                                                                                                     znear, zfar))),
                                 np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Right,
                                                                                                     znear, zfar))))
     self.eye_transforms = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
                            np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I))
     self.view = np.eye(4, dtype=np.float32)
     self.view_matrices  = (np.empty((4,4), dtype=np.float32),
                            np.empty((4,4), dtype=np.float32))
     self.controllers = TrackedDevicesActor(self._poses)
     #self.controllers.show_controllers_only = False
     self.controllers.init_gl()
     self.vr_event = openvr.VREvent_t()
     self._poll_tracked_device_count()
     self._poll_tracked_device_frequency = poll_tracked_device_frequency
     self._frames_rendered = 0
     self._pulse_t0 = 0.0
Пример #2
0
    def __init__(self):
        """
        A representation of the openvr sdk
        """

        self.openvr = openvr.init(openvr.VRApplication_Overlay)
        self.hmd = HMD(vr=self.openvr)
Пример #3
0
 def init_gl(self):
     "allocate OpenGL resources"
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.left_fb = OpenVrFramebuffer(w, h, multisample=self.multisample)
     self.right_fb = OpenVrFramebuffer(w, h, multisample=self.multisample)
     self.compositor = openvr.VRCompositor()
     if self.compositor is None:
         raise Exception("Unable to create compositor")
     self.left_fb.init_gl()
     self.right_fb.init_gl()
     # Compute projection matrix
     zNear = 0.2
     zFar = 500.0
     self.projection_left = numpy.asarray(matrixForOpenVrMatrix(self.vr_system.getProjectionMatrix(
             openvr.Eye_Left,
             zNear, zFar)))
     self.projection_right = numpy.asarray(matrixForOpenVrMatrix(self.vr_system.getProjectionMatrix(
             openvr.Eye_Right,
             zNear, zFar)))
     self.view_left = matrixForOpenVrMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I # head_X_eye in Kane notation
     self.view_right = matrixForOpenVrMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I # head_X_eye in Kane notation
     for actor in self:
         actor.init_gl()
Пример #4
0
def v_initPre():
    ''' init vr before MuJoCo init '''
    global hmd
    hmd.system = openvr.init(openvr.VRApplication_Scene)
    hmd.roompos = np.zeros(3)
    hmd.roommat = np.eye(3)
    hmd.eyeoffset = np.zeros((2, 3))
    openvr.VRCompositor().setTrackingSpace(openvr.TrackingUniverseStanding)
    hmd.width, hmd.height = hmd.system.getRecommendedRenderTargetSize()
    for n in range(2):
        hmd.eyeoffset[n] = np.array(hmd.system.getEyeToHeadTransform(n).m)[0:3, 3]
Пример #5
0
    def __init__(self, vr=None):
        """
        A base object for all tracked openvr elements

        :param vr: An already initialised version of openvr, else create a new
        :type vr: openvr.IVRSystem
        """

        if vr is None:
            self.openvr = openvr.init(openvr.VRApplication_Utility)
        else:
            self.openvr = vr
Пример #6
0
 def __init__(self):
     "One time initialization"
     # Glut
     glutInit()
     glutInitDisplayMode(GLUT_RGBA)
     # Create a regular desktop window, just so we can have an OpenGL context to play with
     glutInitWindowSize(400, 400)
     glutInitWindowPosition(50, 50)
     self.win = glutCreateWindow(b"Pink world")
     # Set up callback methods for use during the GLUT main loop
     glutDisplayFunc(self.display)
     glutIdleFunc(self.display)
     glutReshapeFunc(self.resize_gl)
     glutKeyboardFunc(self.key_press)
     # OpenVR
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     self.vr_width, self.vr_height = self.vr_system.getRecommendedRenderTargetSize()
     self.compositor = openvr.VRCompositor()
     if self.compositor is None:
         raise Exception("Unable to create compositor") 
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     self.poses = poses_t()
     #
     # Set up framebuffer and render textures
     self.fb = glGenFramebuffers(1)
     glBindFramebuffer(GL_FRAMEBUFFER, self.fb)
     self.depth_buffer = glGenRenderbuffers(1)
     glBindRenderbuffer(GL_RENDERBUFFER, self.depth_buffer)
     glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, self.vr_width, self.vr_height)
     glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, self.depth_buffer)
     self.texture_id = glGenTextures(1)
     glBindTexture(GL_TEXTURE_2D, self.texture_id)
     glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self.vr_width, self.vr_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
     glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0)  
     glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.texture_id, 0)
     status = glCheckFramebufferStatus(GL_FRAMEBUFFER)
     if status != GL_FRAMEBUFFER_COMPLETE:
         glBindFramebuffer(GL_FRAMEBUFFER, 0)
         raise Exception("Incomplete framebuffer")
     glBindFramebuffer(GL_FRAMEBUFFER, 0)
     # OpenVR texture data
     self.texture = openvr.Texture_t()
     self.texture.handle = self.texture_id
     self.texture.eType = openvr.TextureType_OpenGL
     self.texture.eColorSpace = openvr.ColorSpace_Gamma 
Пример #7
0
    def __init__(self):
        QWidget.__init__(self)
        
        self.resize(1600, 940)
        self.setWindowTitle('OpenVR tracking data')
        
        self.webview = QWebView()            

        self.button = QPushButton('Quit', self)
        self.button.clicked.connect(self.close)
        layout = QVBoxLayout(self)
        layout.setSpacing(0)
        layout.setMargin(0)
        layout.addWidget(self.button)
        layout.addWidget(self.webview)
        
        # XXX check result
        openvr.init(openvr.VRApplication_Scene)        
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()
        
        self.timer = QTimer()
        self.timer.timeout.connect(self.update_page)
        self.timer.start(50)   # ms
import openvr
from sys import stdout

# Init kóði
poses = []
openvr.init(openvr.VRApplication_Background)

# Finna fjarstýringu
# for index in range(openvr.length):
# print(openvr.VRSystem().getTrackedDeviceClass(3))
print(openvr.IVRSystem().getStringTrackedDeviceProperty(1, 1000))

IVRSystem = openvr.IVRSystem()

print("Searching for controllers...")
# Keyrir í gegnum öll tækin tengd við OpenVR

rightController = None
leftController = None

for index in range(0, openvr.k_unMaxTrackedDeviceCount):

    # Tækið er fjarstýring
    if IVRSystem.getTrackedDeviceClass(
            index) == openvr.TrackedDeviceClass_Controller:
        print("ID:", index, " Controller role:",
              IVRSystem.getControllerRoleForTrackedDeviceIndex(index))

        # Ef fjarstýringin sem er verið að lúppa yfir er hægri
        if IVRSystem.getControllerRoleForTrackedDeviceIndex(
                index) == openvr.TrackedControllerRole_RightHand:
Пример #9
0
def start_tracker():

    # Default orientation values
    rollOffset = 0
    pitchOffset = 0
    yawOffset = 0
    roll = 0
    pitch = 0
    yaw = 0
    Filterset = 0

    oscIdentifier = '/pyBinSim'
    ip = '127.0.0.1'
    port = 10000
    nSources = 0
    minAngleDifference = 4

    run = True

    if (len(sys.argv)) > 1:
        for i in range(len(sys.argv)):

            if (sys.argv[i] == '-port'):
                port = int(sys.argv[i + 1])

            if (sys.argv[i] == '-ip'):
                ip = sys.argv[i + 1]

            if (sys.argv[i] == '-nSources'):
                nSources = int(sys.argv[i + 1])

            if (sys.argv[i] == '-angleStep'):
                minAngleDifference = int(sys.argv[i + 1])

    # Internal settings:
    positionVector = np.arange(360)
    positionVectorSubSampled = range(0, 360, minAngleDifference)

    # Create OSC client
    parser = argparse.ArgumentParser()
    parser.add_argument("--ip", default=ip, help="The ip of the OSC server")
    parser.add_argument("--port",
                        type=int,
                        default=port,
                        help="The port the OSC server is listening on")
    args = parser.parse_args()

    client = udp_client.SimpleUDPClient(args.ip, args.port)

    # init openvr for HTC Vive
    help(openvr.VRSystem)
    openvr.init(openvr.VRApplication_Scene)

    poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
    poses = poses_t()

    try:
        while 1:
            openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
            hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
            v = hmd_pose.mDeviceToAbsoluteTracking

            ## extraction of angles from rotation matrix
            ## to get yaw from 0 to 360 degree, axis 0 and 1 have been switched

            yawRad = np.arctan2(v[0][2], v[2][2])
            yaw = int(round(np.degrees(yawRad)))

            pitchRad = np.arctan2(
                -v[1][2], np.sqrt(np.square(v[0][2]) + np.square(v[2][2])))
            pitch = int(round(np.degrees(pitchRad)))

            rollRad = np.arctan2(v[1][0], v[1][1])
            roll = int(round(np.degrees(rollRad)))

            posX = v[0][3]
            posY = v[1][3]
            posZ = v[2][3]

            #print(['YAW: ',yaw,' PITCH: ',pitch,'ROLL: ',roll])
            #print(['X: ',round(posX,2),' Y: ',round(posY,2),'Z: ',round(posZ,2)])

            # adjustment to desired global origin
            posZ = posZ + 0.8
            if yaw < 0:
                yaw = 360 + yaw

            yaw = 360 - yaw

            # select filterset according to Z-value of listener
            Filterset = int(round(posZ / 0.25))  # 25cm resolution

            if Filterset > 8:
                Filterset = 8
            elif Filterset < 0:
                Filterset = 0

            # Build and send OSC message
            #channel valueOne valueTwo ValueThree valueFour valueFive ValueSix
            yaw = min(positionVectorSubSampled, key=lambda x: abs(x - yaw))
            binSimParameters = [0, yaw, Filterset, 0, 0, 0, 0]
            print(' Yaw: ', yaw, ' Filterset: ', Filterset, ' PosY ', posZ)
            client.send_message(oscIdentifier, binSimParameters)

            sys.stdout.flush()

            time.sleep(0.02)

    except KeyboardInterrupt:  # Break if ctrl+c is pressed

        # Console output
        print("Done")
Пример #10
0
 def __init__(self,
              multisample=0,
              znear=0.1,
              zfar=1000,
              window_size=(960, 1080)):
     self.vr_system = openvr.init(openvr.VRApplication_Scene)
     w, h = self.vr_system.getRecommendedRenderTargetSize()
     self.render_target_size = np.array((w, h), dtype=np.float32)
     self.window_size = np.array(window_size, dtype=np.int64)
     self.multisample = multisample
     self.vr_framebuffers = (OpenVRFramebuffer(w,
                                               h,
                                               multisample=multisample),
                             OpenVRFramebuffer(w,
                                               h,
                                               multisample=multisample))
     self.vr_compositor = openvr.VRCompositor()
     if self.vr_compositor is None:
         raise Exception('unable to create compositor')
     self.vr_framebuffers[0].init_gl()
     self.vr_framebuffers[1].init_gl()
     poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
     self.poses = poses_t()
     self.znear, self.zfar = znear, zfar
     self.projection_matrices = (np.asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getProjectionMatrix(openvr.Eye_Left, znear,
                                                zfar))),
                                 np.asarray(
                                     matrixForOpenVRMatrix(
                                         self.vr_system.getProjectionMatrix(
                                             openvr.Eye_Right, znear,
                                             zfar))))
     self.projection_lrbts = (np.array(
         self.vr_system.getProjectionRaw(openvr.Eye_Left)),
                              np.array(
                                  self.vr_system.getProjectionRaw(
                                      openvr.Eye_Right)))
     self.eye_to_head_transforms = (
         np.asarray(
             matrixForOpenVRMatrix(
                 self.vr_system.getEyeToHeadTransform(openvr.Eye_Left))),
         np.asarray(
             matrixForOpenVRMatrix(
                 self.vr_system.getEyeToHeadTransform(openvr.Eye_Right))))
     self.eye_transforms = (np.asarray(
         matrixForOpenVRMatrix(
             self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
                            np.asarray(
                                matrixForOpenVRMatrix(
                                    self.vr_system.getEyeToHeadTransform(
                                        openvr.Eye_Right)).I))
     self.eye_matrices = (np.eye(4, dtype=np.float32),
                          np.eye(4, dtype=np.float32))
     self.camera_matrices = (np.eye(4, dtype=np.float32),
                             np.eye(4, dtype=np.float32))
     self.hmd_matrix = np.eye(4, dtype=np.float32)
     self.hmd_matrix_inv = np.eye(4, dtype=np.float32)
     self.vr_event = openvr.VREvent_t()
     self._nframes = 0
     self._poll_for_controllers()
Пример #11
0
#!/bin/env python

import sys
import time
import openvr

openvr.init(openvr.VRApplication_Scene)

# Create data structure to hold locations of tracked devices such as headsets, controllers, and lighthouses
poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
poses = poses_t()

# Print out headset transform five times a second for 20 seconds
for i in range(100):
    openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
    hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
    print(hmd_pose.mDeviceToAbsoluteTracking)
    sys.stdout.flush()
    time.sleep(0.2)

openvr.shutdown()
Пример #12
0
#!/bin/env python

import sys
import time
import openvr

print ("OpenVR test program")

if openvr.isHmdPresent():
    print ("VR head set found")

if openvr.isRuntimeInstalled():
    print ("Runtime is installed")

vr_system = openvr.init(openvr.VRApplication_Scene)

print (openvr.runtimePath())

print (vr_system.getRecommendedRenderTargetSize())

print (vr_system.isDisplayOnDesktop())

for i in range(10):
    xform = vr_system.getEyeToHeadTransform(openvr.Eye_Left)
    print (xform)
    sys.stdout.flush()
    time.sleep(0.2)

openvr.shutdown()
Пример #13
0
import sys
import time
import openvr
import math
import numpy
openvr.init(openvr.VRApplication_Scene)

poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
poses = poses_t()


def main():
    for i in range(10000):
        openvr.VRCompositor().waitGetPoses(poses, len(poses), None, 0)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]
        #print(str(hmd_pose.mDeviceToAbsoluteTracking))
        rotation = getQuaternion(hmd_pose.mDeviceToAbsoluteTracking)
        print(rotation.x, rotation.y, rotation.z, rotation.w)
        #print("one"+hmd_pose.mDeviceToAbsoluteTracking[1])
        #print("two"+hmd_pose.mDeviceToAbsoluteTracking[2])
        sys.stdout.flush()
        time.sleep(0.01)


def getQuaternion(matrix):
    q = openvr.HmdQuaternion_t()
    q.w = math.sqrt(
        numpy.fmax(0,
                   1 + matrix.m[0][0] + matrix.m[1][1] + matrix.m[2][2])) / 2
    q.x = math.sqrt(
        numpy.fmax(0,
def getBsPos():
    """
    Get the position of the base stations.
 
    2019-10-02 Code for getting the base station positions taken from:
    https://github.com/bitcraze/crazyflie-firmware/blob/master/tools/lighthouse/get_bs_position.py
    """

    print("Opening OpenVR")
    vr = openvr.init(openvr.VRApplication_Other)

    print("OpenVR Opened")
    devices = {}
    poses = vr.getDeviceToAbsoluteTrackingPose(
        openvr.TrackingUniverseStanding, 0, openvr.k_unMaxTrackedDeviceCount)

    if CENTER_AROUND_CONTROLLER:
        offset = None
        # Acquire offset
        for i in range(openvr.k_unMaxTrackedDeviceCount):
            if poses[i].bPoseIsValid:
                device_class = vr.getTrackedDeviceClass(i)
                if device_class == openvr.TrackedDeviceClass_Controller or \
                        device_class == openvr.TrackedDeviceClass_GenericTracker:
                    pose = poses[i].mDeviceToAbsoluteTracking
                    offset = [pose[0][3], pose[1][3], pose[2][3]]
                    break

        if offset is None:
            print(
                "Controller not found, place controller at the origin of the space"
            )
            openvr.shutdown()
            sys.exit(1)
    else:
        offset = [0, 0, 0]

    print("Origin: {}", offset)

    print("-------------------------------")

    bs_poses = [None, None]

    for i in range(openvr.k_unMaxTrackedDeviceCount):
        if poses[i].bPoseIsValid:
            device_class = vr.getTrackedDeviceClass(i)
            if (device_class == openvr.TrackedDeviceClass_TrackingReference):

                mode = vr.getStringTrackedDeviceProperty(
                    i, openvr.Prop_ModeLabel_String)
                try:
                    mode = mode.decode("utf-8")
                except:
                    # likely already decoded
                    pass

                pose = poses[i].mDeviceToAbsoluteTracking

                # Mode 'B' is master
                if mode == 'B':
                    bs_poses[0] = pose
                elif mode == 'A' or mode == 'C':
                    bs_poses[1] = pose
                else:
                    print("Base station with mode {} detected.".format(mode))
                    print(
                        "This script can only work with base station V1 (mode A, B or C). Exiting."
                    )
                    sys.exit(1)

    for pose in bs_poses:
        if pose is None:
            continue

        position = [
            pose[0][3] - offset[0], pose[1][3] - offset[1],
            pose[2][3] - offset[2]
        ]
        rotation = [pose[0][:3], pose[1][:3], pose[2][:3]]

        print("{.origin = {", end='')
        for i in position:
            print("{:0.6f}, ".format(i), end='')

        print("}, .mat = {", end='')

        for i in rotation:
            print("{", end='')
            for j in i:
                print("{:0.6f}, ".format(j), end='')
            print("}, ", end='')

        print("}},")

    openvr.shutdown()
    return bs_poses, offset
Пример #15
0
from openvr.color_cube_actor import ColorCubeActor
from sdl_app import SdlApp
import sys
import time
import openvr
"""
Minimal sdl programming example which colored OpenGL cube scene that can be closed by pressing ESCAPE.
"""

print("OpenVR test program")

if openvr.isHmdPresent():
    print("VR head set found")

if openvr.isRuntimeInstalled():
    print("Runtime is installed")

vr_system = openvr.init(openvr.VRApplication_Scene)

print(openvr.runtimePath())

print(vr_system.getRecommendedRenderTargetSize())

print(vr_system.isDisplayOnDesktop())

if __name__ == "__main__":
    renderer = OpenVrGlRenderer(multisample=2)
    renderer.append(ColorCubeActor())
    with SdlApp(renderer, "sdl2 OpenVR color cube") as app:
        app.run_loop()
Пример #16
0
    def __init__(self):
        # Initialize OpenVR in the
        self.vr = openvr.init(openvr.VRApplication_Other)

        # Initializing object to hold indexes for various tracked objects
        self.object_names = {
            "Tracking Reference": [],
            "HMD": [],
            "Controller": [],
            "Tracker": []
        }
        self.devices = {}
        poses = self.vr.getDeviceToAbsoluteTrackingPose(
            openvr.TrackingUniverseStanding, 0,
            openvr.k_unMaxTrackedDeviceCount)

        # Loading config file
        self.config = None
        try:
            with open('config.json') as json_data:
                self.config = json.load(json_data)
        except EnvironmentError:  # parent of IOError, OSError *and* WindowsError where available
            print('config.json not found, arbitrary id will be chosen.')

        if self.config != None:
            # Iterate through the pose list to find the active devices and determine their type
            for i in range(openvr.k_unMaxTrackedDeviceCount):
                if poses[i].bPoseIsValid:
                    device_serial = self.vr.getStringTrackedDeviceProperty(
                        i, openvr.Prop_SerialNumber_String).decode('utf-8')
                    for device in self.config['devices']:
                        if device_serial == device['serial']:
                            device_name = device['name']
                            self.object_names[device['type']].append(
                                device_name)
                            self.devices[device_name] = vr_tracked_device(
                                self.vr, i, device['type'])
        else:
            # Iterate through the pose list to find the active devices and determine their type
            for i in range(openvr.k_unMaxTrackedDeviceCount):
                if poses[i].bPoseIsValid:
                    device_class = self.vr.getTrackedDeviceClass(i)
                    if (device_class == openvr.TrackedDeviceClass_Controller):
                        device_name = "controller_" + str(
                            len(self.object_names["Controller"]) + 1)
                        self.object_names["Controller"].append(device_name)
                        self.devices[device_name] = vr_tracked_device(
                            self.vr, i, "Controller")
                    elif (device_class == openvr.TrackedDeviceClass_HMD):
                        device_name = "hmd_" + str(
                            len(self.object_names["HMD"]) + 1)
                        self.object_names["HMD"].append(device_name)
                        self.devices[device_name] = vr_tracked_device(
                            self.vr, i, "HMD")
                    elif (device_class ==
                          openvr.TrackedDeviceClass_GenericTracker):
                        device_name = "tracker_" + str(
                            len(self.object_names["Tracker"]) + 1)
                        self.object_names["Tracker"].append(device_name)
                        self.devices[device_name] = vr_tracked_device(
                            self.vr, i, "Tracker")
                    elif (device_class ==
                          openvr.TrackedDeviceClass_TrackingReference):
                        device_name = "tracking_reference_" + str(
                            len(self.object_names["Tracking Reference"]) + 1)
                        self.object_names["Tracking Reference"].append(
                            device_name)
                        self.devices[device_name] = vr_tracking_reference(
                            self.vr, i, "Tracking Reference")
Пример #17
0
 def setUp(self):
     self.vr_sys = openvr.init(openvr.VRApplication_Other)
    def __init__(s):
        s.vr_system = openvr.init(openvr.VRApplication_Scene)
        s.vr_compositor = openvr.VRCompositor()
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        s.poses = poses_t()
        s.w, s.h = s.vr_system.getRecommendedRenderTargetSize()
        SDL_Init(SDL_INIT_VIDEO)
        s.window = SDL_CreateWindow(b"test", SDL_WINDOWPOS_CENTERED,
                                    SDL_WINDOWPOS_CENTERED, 100, 100,
                                    SDL_WINDOW_SHOWN | SDL_WINDOW_OPENGL)
        s.context = SDL_GL_CreateContext(s.window)
        SDL_GL_MakeCurrent(s.window, s.context)
        s.depth_buffer = glGenRenderbuffers(1)
        s.frame_buffers = glGenFramebuffers(2)
        s.texture_ids = glGenTextures(2)
        s.textures = [None] * 2
        s.eyes = [openvr.Eye_Left, openvr.Eye_Right]
        s.cameraToProjection = [None] * 2
        s.headToCamera = [None] * 2
        s.col3 = [0, 0, 0, 1]

        vertexShader = compileShader(
            shader_string("""
          layout (location=0) uniform mat4 cameraToProjection;
          layout (location=1) uniform mat4 modelToCamera;
          void main() {
            float angle = gl_VertexID * (3.14159*2/3);
            vec4 modelPos = vec4(cos(angle), sin(angle), -2, 1);
            gl_Position = cameraToProjection * (modelToCamera * modelPos);
          }
        """), GL_VERTEX_SHADER)
        fragmentShader = compileShader(
            shader_string("""
          out vec4 colour;
          void main() {
            colour = vec4(1, 0, 0, 1);
          }
        """), GL_FRAGMENT_SHADER)
        s.program = compileProgram(vertexShader, fragmentShader)
        s.vertexBuffer = glGenVertexArrays(1)

        for eye in range(2):
            glBindFramebuffer(GL_FRAMEBUFFER, s.frame_buffers[eye])
            glBindRenderbuffer(GL_RENDERBUFFER, s.depth_buffer)
            glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, s.w,
                                  s.h)
            glFramebufferRenderbuffer(GL_FRAMEBUFFER,
                                      GL_DEPTH_STENCIL_ATTACHMENT,
                                      GL_RENDERBUFFER, s.depth_buffer)
            glBindTexture(GL_TEXTURE_2D, s.texture_ids[eye])
            glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, s.w, s.h, 0, GL_RGBA,
                         GL_UNSIGNED_BYTE, None)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
                                   GL_TEXTURE_2D, s.texture_ids[eye], 0)
            texture = openvr.Texture_t()
            texture.handle = int(s.texture_ids[eye])
            texture.eType = openvr.TextureType_OpenGL
            texture.eColorSpace = openvr.ColorSpace_Gamma
            s.textures[eye] = texture
            proj = s.vr_system.getProjectionMatrix(s.eyes[eye], 0.2, 500.0)
            s.cameraToProjection[eye] = numpy.matrix(
                [[proj.m[i][j] for i in range(4)] for j in range(4)],
                numpy.float32)
            camToHead = s.vr_system.getEyeToHeadTransform(s.eyes[eye])
            s.headToCamera[eye] = numpy.matrix(
                [[camToHead.m[i][j] for i in range(3)] + [s.col3[j]]
                 for j in range(4)], numpy.float32).I
Пример #19
0
    def init(self,
             near=0.2,
             far=500.0,
             root=None,
             submit_together=True,
             msaa=0,
             replicate=1):
        """
        Initialize OpenVR. This method will create the rendering buffers, the cameras associated with each eyes
        and the various anchors in the tracked space. It will also start the tasks responsible for the correct
        scheduling. This method should be called only once.

        * near and far parameters can be used to specify the near and far planes of the cameras.

        * root parameter, when not None, is used as the root node to which the tracked space will attached.
          Otherwise the tracked space is attached to render.

        * submit_together specifies if each eye buffer is submitted just after being rendered or both at once.

        * msaa specifies the multisampling anti-aliasing level to use, set to 0 to disable.

        * replicate specifies which eye is replicated on the application window.
          0: No replication, 1: left eye, 2: right eye.
        """

        self.submit_together = submit_together
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        width, height = self.vr_system.getRecommendedRenderTargetSize()
        self.compositor = openvr.VRCompositor()
        self.vr_input = openvr.VRInput()
        if self.compositor is None:
            raise Exception("Unable to create compositor")

        if root is None:
            root = render
        self.tracking_space = root.attach_new_node('tracking-space')
        self.hmd_anchor = self.tracking_space.attach_new_node('hmd-anchor')
        self.left_eye_anchor = self.hmd_anchor.attach_new_node('left-eye')
        self.right_eye_anchor = self.hmd_anchor.attach_new_node('right-eye')

        self.projection_left = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Left, near, far))
        self.projection_right = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Right, near, far))

        left_cam_node = self.create_camera('left-cam', self.projection_left)
        right_cam_node = self.create_camera('right-cam', self.projection_right)

        self.left_cam = self.left_eye_anchor.attach_new_node(left_cam_node)
        self.right_cam = self.right_eye_anchor.attach_new_node(right_cam_node)

        self.left_texture = self.create_renderer('left-buffer', self.left_cam,
                                                 width, height, msaa,
                                                 self.left_cb)
        self.right_texture = self.create_renderer('right-buffer',
                                                  self.right_cam, width,
                                                  height, msaa, self.right_cb)

        self.disable_main_cam()

        if replicate == 1:
            if self.verbose:
                print("Replicating left eye")
            self.replicate(self.left_texture)
        elif replicate == 2:
            if self.verbose:
                print("Replicating right eye")
            self.replicate(self.right_texture)
        else:
            if self.verbose:
                print("Eye replication disabled")

        self.init_action()

        taskMgr.add(self.update_poses_task, "openvr-update-poses", sort=-1000)
import time

import openvr

import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.syncLogger import SyncLogger

# URI to the Crazyflie to connect to
uri0 = 'radio://0/80/2M'
uri1 = 'radio://0/80/2M/E7E7E7E701'

print('Opening')
vr = openvr.init(openvr.VRApplication_Other)
print('Opened')

# Find first controller or tracker
controllerId = None
poses = vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
                                           openvr.k_unMaxTrackedDeviceCount)
for i in range(openvr.k_unMaxTrackedDeviceCount):
    if poses[i].bPoseIsValid:
        device_class = vr.getTrackedDeviceClass(i)
        if device_class == openvr.TrackedDeviceClass_Controller or \
           device_class == openvr.TrackedDeviceClass_GenericTracker:
            controllerId = i
            break

if controllerId is None:
Пример #21
0
import sys
import time
import openvr

openvr.init(openvr.VRApplication_Overlay)
ids = []

leftId = openvr.VRSystem().getTrackedDeviceIndexForControllerRole(
    openvr.TrackedControllerRole_LeftHand)
rightId = openvr.VRSystem().getTrackedDeviceIndexForControllerRole(
    openvr.TrackedControllerRole_RightHand)


def to_percent(value):
    return str(value * 100)[0:2]


def writefile(left, right):
    datafile = open("../storage/ovr.txt", "w")
    left = to_percent(left)
    right = to_percent(right)
    datafile.write("{:2s}.{:2s}".format(str(left), str(right)))
    datafile.close()


while True:
    overlay = openvr.IVROverlay()
    notification = openvr.IVRNotifications()
    left_battery = openvr.VRSystem().getFloatTrackedDeviceProperty(
        leftId, openvr.Prop_DeviceBatteryPercentage_Float)
    right_battery = openvr.VRSystem().getFloatTrackedDeviceProperty(
Пример #22
0
 def __init__(self, topic_prefix):
     self.vr_system = openvr.init(openvr.VRApplication_Utility)
     self.topic_prefix = topic_prefix
     self.publishers = {}
Пример #23
0
    def init(self,
             near=0.2,
             far=500.0,
             root=None,
             submit_together=True,
             msaa=0,
             replicate=1,
             srgb=None,
             hidden_area_mesh=True):
        """
        Initialize OpenVR. This method will create the rendering buffers, the cameras associated with each eyes
        and the various anchors in the tracked space. It will also start the tasks responsible for the correct
        scheduling. This method should be called only once.

        * near and far parameters can be used to specify the near and far planes of the cameras.

        * root parameter, when not None, is used as the root node to which the tracked space will attached.
          Otherwise the tracked space is attached to render.

        * submit_together specifies if each eye buffer is submitted just after being rendered or both at once.

        * msaa specifies the multisampling anti-aliasing level to use, set to 0 to disable.

        * replicate specifies which eye is replicated on the application window.
          0: No replication, 1: left eye, 2: right eye.

        * srgb : If None, OpenVR will detect the color space from the texture format. If set to True, sRGB color
          space will be enforced; if set to False, linear color space will be enforced.

        * hidden_area_mesh : If True, a mask will be applied on each camera to cover the area not seen from the HMD
          This will trigger the early-z optimization on the GPU and avoid rendering unseen pixels.
        """

        self.submit_together = submit_together
        if srgb is None:
            self.color_space = openvr.ColorSpace_Auto
        else:
            if srgb:
                self.color_space = openvr.ColorSpace_Gamma
            else:
                self.color_space = openvr.ColorSpace_Linear

        # Create a OpenVR array that will store the pose of all the tracked devices.
        poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
        self.poses = poses_t()

        # Initialise OpenVR and retrieve the main components
        self.vr_system = openvr.init(openvr.VRApplication_Scene)
        self.vr_applications = openvr.VRApplications()
        width, height = self.vr_system.getRecommendedRenderTargetSize()
        self.compositor = openvr.VRCompositor()
        self.vr_input = openvr.VRInput()
        if self.compositor is None:
            raise Exception("Unable to create compositor")

        # Create the tracking space anchors
        if root is None:
            root = self.base.render
        self.tracking_space = root.attach_new_node('tracking-space')
        self.hmd_anchor = self.tracking_space.attach_new_node('hmd-anchor')
        self.left_eye_anchor = self.hmd_anchor.attach_new_node('left-eye')
        self.right_eye_anchor = self.hmd_anchor.attach_new_node('right-eye')

        # Create the projection matrices for the left and right camera.
        # TODO: This should be updated in the update task in the case the user update the IOD
        self.projection_left = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Left, near, far))
        self.projection_right = self.coord_mat_inv * self.convert_mat(
            self.vr_system.getProjectionMatrix(openvr.Eye_Right, near, far))

        # Create the cameras and attach them in the tracking space
        left_cam_node = self.create_camera('left-cam', self.projection_left)
        right_cam_node = self.create_camera('right-cam', self.projection_right)

        self.left_cam = self.left_eye_anchor.attach_new_node(left_cam_node)
        self.right_cam = self.right_eye_anchor.attach_new_node(right_cam_node)

        # Create the renderer linked to each camera
        self.left_texture = self.create_renderer('left-buffer', self.left_cam,
                                                 width, height, msaa,
                                                 self.left_cb)
        self.right_texture = self.create_renderer('right-buffer',
                                                  self.right_cam, width,
                                                  height, msaa, self.right_cb)

        # The main camera is useless, so we disable it
        self.disable_main_cam()

        if hidden_area_mesh:
            # If the hidden area mesh is used, assign a mask on each camera to hide the opposite mesh
            left_cam_node.set_camera_mask(BitMask32.bit(0))
            right_cam_node.set_camera_mask(BitMask32.bit(1))
            self.attach_hidden_area_mesh(openvr.Eye_Left, self.left_eye_anchor,
                                         1)
            self.attach_hidden_area_mesh(openvr.Eye_Right,
                                         self.right_eye_anchor, 0)

        if replicate == 1:
            if self.verbose:
                print("Replicating left eye")
            self.replicate(self.left_texture)
        elif replicate == 2:
            if self.verbose:
                print("Replicating right eye")
            self.replicate(self.right_texture)
        else:
            if self.verbose:
                print("Eye replication disabled")

        if hasattr(self, 'init_action'):
            print(
                "WARNING: init_action() is deprecated and will be removed in a future release"
            )
            self.init_action()

        # Launch the main task that will synchronize Panda3D with OpenVR
        # TODO: The sort number should be configurable and by default be placed after the gc tasks.
        self.task = taskMgr.add(self.update_poses_task,
                                "openvr-update-poses",
                                sort=-1000)
Пример #24
0
def init():
    """ Initialises openvr, returns a handle for use in all openvr calls """
    return openvr.init(openvr.VRApplication_Other)
    j.buttons = [
        d['trigger'] == 1.0, d['trackpad_touched'], d['trackpad_pressed'],
        d['menu_button'], d['grip_button']
    ]
    new_msg = prev_unPacketNum != d['unPacketNum']
    return new_msg, j


if __name__ == '__main__':
    print("===========================")
    print("Initializing OpenVR...")
    retries = 0
    max_init_retries = 4
    while retries < max_init_retries:
        try:
            openvr.init(openvr.VRApplication_Utility)
            break
        except openvr.OpenVRError as e:
            print("Error when initializing OpenVR (try {} / {})".format(
                retries + 1, max_init_retries))
            print(e)
            retries += 1
            time.sleep(2.0)
    else:
        print("Could not initialize OpenVR, aborting.")
        print("Make sure the system is correctly plugged, you can also try")
        print("to do:")
        print("killall -9 vrcompositor vrmonitor vrdashboard")
        print("Before running this program again.")
        exit(0)
Пример #26
0
#!/usr/bin/env python3
# Scripts acquiring the base-stations position from a running SteamVR system
# At least one object, HDM controller or tracker, should be tracked in order
# for the script to work.
#
# The output can be directly copy-pasted in the Crazyflie firmware lighthouse.c
# lighthouse deck driver.

import sys
import openvr

CENTER_AROUND_CONTROLLER = False

print("Openning OpenVR")
vr = openvr.init(openvr.VRApplication_Other)

print("OpenVR Oppened")
devices = {}
poses = vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
                                           openvr.k_unMaxTrackedDeviceCount)

if CENTER_AROUND_CONTROLLER:
    offset = None
    # Acquire offset
    for i in range(openvr.k_unMaxTrackedDeviceCount):
        if poses[i].bPoseIsValid:
            device_class = vr.getTrackedDeviceClass(i)
            if device_class == openvr.TrackedDeviceClass_Controller or \
               device_class == openvr.TrackedDeviceClass_GenericTracker:
                pose = poses[i].mDeviceToAbsoluteTracking
                offset = [pose[0][3], pose[1][3], pose[2][3]]
Пример #27
0
 def initializePositioning(self):
     self.vrSystem = openvr.init(openvr.VRApplication_Other)
     self.baseStationController = BaseStationController(self)
def take_steamvr_images(save_dir, num_images, delay_between_images):
    plt.show()

    openvr.init(openvr.VRApplication_Scene)

    convert_coordinate_system = np.identity(4)
    convert_coordinate_system[:3, :3] = Rotation.from_euler(
        'XYZ', (180, 0, 0), degrees=True).as_matrix()

    device = openvr.k_unTrackedDeviceIndex_Hmd

    num_cameras = openvr.VRSystem().getInt32TrackedDeviceProperty(
        device, openvr.Prop_NumCameras_Int32)

    camera_to_head_mat = (openvr.HmdMatrix34_t * num_cameras)()

    openvr.VRSystem().getArrayTrackedDeviceProperty(
        device, openvr.Prop_CameraToHeadTransforms_Matrix34_Array,
        openvr.k_unHmdMatrix34PropertyTag, camera_to_head_mat,
        48 * num_cameras)

    cam = openvr.VRTrackedCamera()

    cam_handle = cam.acquireVideoStreamingService(device)

    width, height, buffer_size = cam.getCameraFrameSize(
        device, openvr.VRTrackedCameraFrameType_MaximumUndistorted)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_xlabel('x axis - metres')
    ax.set_ylabel('z axis - metres')
    ax.set_zlabel('y axis - metres')
    ax.set_xlim(-0.5, 0.5)
    ax.set_ylim(-0.5, 0.5)
    ax.set_zlim(0, 1)

    save_dir = ColmapFolder(save_dir)

    db = COLMAPDatabase.connect(save_dir.database_path)
    db.create_tables()

    init_params = np.array(
        (420.000000, (width / num_cameras) / 2, height / 2, 0.000000))

    camera_model = CAMERA_MODEL_NAMES['SIMPLE_RADIAL']

    cameras = {}
    camera_to_head_transforms = {}

    for i in range(num_cameras):
        cam_id = db.add_camera(camera_model.model_id, width / 2, height,
                               init_params)
        camera_to_head_transforms[cam_id] = hmd_matrix_to_numpy(
            camera_to_head_mat[i])
        cameras[cam_id] = Camera(id=cam_id,
                                 model=camera_model.model_name,
                                 width=width / num_cameras,
                                 height=height,
                                 params=init_params)

    poses = []  # Let waitGetPoses populate the poses structure the first time
    cam_positions = []

    images = []

    for i in range(num_images):

        poses, game_poses = openvr.VRCompositor().waitGetPoses(poses, None)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]

        if not hmd_pose.bPoseIsValid:
            print("Pose not valid")
            continue

        world_to_head = hmd_matrix_to_numpy(hmd_pose.mDeviceToAbsoluteTracking)

        world_to_cams = {
            id_: world_to_head @ head_to_cam @ convert_coordinate_system
            for (id_, head_to_cam) in camera_to_head_transforms.items()
        }

        image_buffer = (ctypes.c_ubyte * buffer_size)()
        try:
            cam.getVideoStreamFrameBuffer(
                cam_handle, openvr.VRTrackedCameraFrameType_MaximumUndistorted,
                image_buffer, buffer_size)
        except:
            print("Error getting video stream buffer")
            continue

        image_array = np.array(image_buffer)

        image_array = image_array.reshape((height, width, 4))

        image_array = image_array[:, :, 0:3]

        image_array = np.clip(image_array, 0, 255)

        for j, (cam_id, world_to_cam) in enumerate(world_to_cams.items()):
            image = Image.fromarray(
                image_array[:,
                            int(width / num_cameras) *
                            j:int(width / num_cameras) * (j + 1), :])

            name = f"{i:03d}_cam{j}.jpg"

            image.save(save_dir.images_path / name)

            image_obj = read_write_model.Image(
                camera_id=cam_id,
                name=name,
                transformation_matrix=world_to_cam)

            images.append(image_obj)

            draw_axes(ax, transform_mat=world_to_cam)

        fig.show()

        fig.canvas.draw()
        fig.canvas.flush_events()
        time.sleep(delay_between_images)
        print(f"Picture taken :{i}")

    image_dict = {}

    print("All pictures taken")

    with open(save_dir.geo_reg_path, 'w') as geo_reg_file:

        for image in images:
            image_id = db.add_image(image=image)
            image.id = image_id
            image_dict[image_id] = image
            geo_reg_file.write(
                f"{name} {' '.join(map(str, image.transformation_matrix[0:3, 3]))}\n"
            )

    read_write_model.write_model(cameras, image_dict, {}, save_dir.sparse_path,
                                 '.txt')

    db.commit()
    db.close()

    print("Metadata saved")

    openvr.shutdown()
    plt.show()
Пример #29
0
def vive(label, filename, video_length_in_s, process_name, autoplay_enabled, overwrite_enabled, acr):
    """
    Obtains and saves the data captured by the HTC Vive in a JSON file.

    :param label: The label of the subject.
    :param filename: The filename of the video.
    :param video_length_in_s: The length of the video in s.
    :param process_name: The name of the process to monitor.
    :param autoplay_enabled: Whether automatic playback of the video is enabled or not.
    :param overwrite_enabled: Whether overwriting the current dataset is enabled or not.
    """
    video_playing_started = False  # Necessary for getting information about playback state of video
    data = {'filename': filename, 'video_length_in_s': float(video_length_in_s),
            'hmd': 'vive', 'pitch_yaw_roll_data_hmd': []}
    if acr:
        data["acr"] = 0

    if autoplay_enabled:
        # Sleep a few seconds until the video is completely loaded by Whirligig
        if process_name == "Whirligig64bit.exe":
            helpers.autoplay_sleep(float(4))  # Adjust value based on your computing power (between 4 and 6 is normal)
    else:
        print("If the player has loaded the video, press the SPACE key to start the measurement.")

    # Initialize OpenVR as background application, so that the player won't crash.
    openvr.init(openvr.VRApplication_Background)
    while video_playing_started == False:
        if autoplay_enabled:
            pyautogui.press("space")
            start_time = timer()
            print("Recording of Pitch/Yaw/Roll/Time data will start now..")
            video_playing_started = True
        else:
            if keyboard.is_pressed("space"):
                start_time = timer()
                print("Recording of Pitch/Yaw/Roll/Time data will start now...")
                video_playing_started = True
    try:
        pyautogui.press("r")  # Press R key to assure the user starts watching the video at initial position
        current_playback_state = timer() - start_time
        pitch, yaw, roll = getPYRDataFromVive()
        # Define the current position of the user (very first yaw value) as initial position
        initial_yaw = yaw
        # Get Head orientation data from Vive in loop until the playback is finished
        while video_length_in_s > current_playback_state:
            pitch, yaw, roll = getPYRDataFromVive()
            current_playback_state = timer() - start_time
            # Adjust the yaw value according to the very first recorded yaw value
            yaw = helpers.shift_yaw(yaw, initial_yaw)
            if current_playback_state is not None:
                data["pitch_yaw_roll_data_hmd"].append({'pitch': pitch, 'yaw': yaw, 'roll': roll,
                                                        'sec': current_playback_state})
                # By adjusting this value, you can set the recording frequency (currently 200Hz).
                time.sleep(float(0.005))
        if acr:
            data["acr"] = helpers.get_acr_by_cmd_input()
        helpers.write_to_json(label, data, overwrite_enabled)
        openvr.shutdown()
        print("Measurement done. Opening the next video if available.")
    except KeyboardInterrupt:  # Break if ctrl+c is pressed
        openvr.shutdown()
        print("Current Measurement is interrupted and will not be saved. The program will exit now.")