def __init__(self, trackers, obj=None): """ Initialize the object which handles the grabs and zooming. """ viz.EventClass.__init__(self) self._obj = obj if len(trackers) > 2: raise ValueError( "Currently only up to 2 trackers are supported. Please use less trackers." ) # variables to save the current state self._grabs = {0: None, 1: None} self._zooming_object = None # Grabber objects which follow the controllers self._grabbers = [] for tracker in trackers: tmp = viz.addGroup() viz.link(tracker, tmp) self._grabbers.append(tmp) # Zooming objects which follow the controllers and are needed for the zooming calculation. # Different objects than grabbers are needed to use different linking modes. self._zoomers = [] for tracker in trackers: tmp = viz.addGroup() viz.link(tracker, tmp, mask=viz.LINK_POS) self._zoomers.append(tmp)
def _setup_vizard_configuration(self): """ Setup some stuff for Vizard """ viz.setMultiSample(cfg.multi_sample_level) # Disable the head light for better lighting viz.MainView.getHeadLight().disable() # Disable the mouse with controller controls. # Without deactivation, the controller would move the mouse. if cfg.control_scheme == "controller": # Trap the mouse in the graphics window. viz.mouse.setTrap(viz.ON) # Make the mouse invisible. viz.mouse.setVisible(viz.OFF) # Display models for the trackers tracker_names = ['r_hand_tracker', 'l_hand_tracker'] for tracker_name in tracker_names: controller = vizconnect.getRawTracker(tracker_name) if cfg.control_scheme == "steamvr": if not isinstance(controller, viz.VizGroup): # Display HTC Vive controller model if the controller is connected model = controller.addModel() viz.link(controller, model) elif cfg.control_scheme == "controller": # Display a sphere, when controlled with a gamepad model = vizshape.addSphere(.05, color=viz.GRAY) viz.link(controller, model)
def calibrateHand(): if exp.optotrak: optoLink = viz.link(hand, m_hand) text_line1 = create_text2d( 'Please put finger on physical start position\n press -space- to start calibration' ) yield viztask.waitKeyDown(' ') text_line1.message("Calibration in progress") #collect samples samples = [] old_sample = m_hand.getPosition() samples.append(old_sample) while len(samples) < 100: new_sample = m_hand.getPosition() if not (new_sample == old_sample): samples.append(new_sample) old_sample = new_sample yield viz.waitTime(0.001) check_position = np.mean(samples, axis=0) position_offset = np.array(exp.start_pos) - np.array(check_position) print position_offset hand0 = optofilter.position(hand, offset=(position_offset[0], position_offset[1] + 0.006, position_offset[2])) if exp.optotrak: optoLink = viz.link(hand0, m_hand) text_line1.message("Calibration done") yield viz.waitTime(0.75) text_line1.visible(viz.OFF)
def main(): global masterTimerG, recordingTimer setUp() masterTimerG = vizact.ontimer(0, masterTimer) #recordingTimer = vizact.ontimer(0, recordCurrentSubjectStatus) if not KEYBOARD_NAVIGATE: addKeyPresses() else: # Setup keyboard/mouse tracker tracker = vizcam.addWalkNavigate(moveScale=10.0) tracker.setPosition([0,1.8,0]) viz.link(tracker,viz.MainView) viz.mouse.setVisible(False) def printLocation(): print viz.MainView.getPosition(), viz.MainView.getEuler() vizact.onkeydown('p', printLocation) #Add Wiimote support # wii = viz.add('wiimote.dle') # #Connect to first available wiimote # wiimote = wii.addWiimote() # wiimote.led = wii.LED_4 # #Add button functions # vizact.onsensordown(wiimote,wii.BUTTON_A,TurnFlyingOn) # vizact.onsensordown(wiimote,wii.BUTTON_B,TurnFlyingOff) # vizact.onsensordown(wiimote,wii.BUTTON_1,endChildHappy) # #Hacky bug fix below, I apologize -Mark if not FLYING_F and YOKE_F: viztask.schedule(fixStartHeliPos)
def CheckForMarkers(): global Tracking, Controllers Trackers = steamvr.getTrackerList() ControllerLocal = steamvr.getControllerList() Tracking = [] Controllers = [] print("Trackers: ") print(Tracking) print("Controllers: ") print(Controllers) i = 0 for x in Trackers: Tracking.append(x) if DebugFlag == True: viz.link(Tracking[i], Tracking[i].addModel()) i += 1 i = 0 for x in ControllerLocal: Controllers.append(x) if DebugFlag == True: viz.link(Controllers[i], Controllers[i].addModel()) i += 1 print("Trackers: ") print(Tracking) print("Controllers: ") print(Controllers)
def reward_feedback(self, head_hits, duration, show_duration): self.coin.visible(viz.ON) completed = viz.link(viz.MainView, self.coin) completed.preTrans([-.3, 0, 2]) reward = .33 if duration <= 300: dur_coin = self.coin.copy() fast = viz.link(viz.MainView, dur_coin) fast.preTrans([.2, 0, 2]) reward += .33 if head_hits <= 10: head_coin = self.coin.copy() precise = viz.link(viz.MainView, head_coin) precise.preTrans([.7, 0, 2]) reward += .33 print 'Reward:' + str(reward) yield viztask.waitTime(show_duration) self.coin.visible(viz.OFF) completed.remove() if duration <= 300: dur_coin.visible(viz.OFF) fast.remove() if head_hits <= 10: head_coin.visible(viz.OFF) precise.remove()
def start_vr(self): hmd = steamvr.HMD() if not hmd.getSensor(): sys.exit('SteamVR HMD not detected') viz.link(hmd.getSensor(), viz.MainView) # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions hmd_stream = self.subject.create_non_phasespace_rigid_body_stream('headRigid', 0) # stream 6dof of HMD as pos (x,y,z) and ori(x,y,z,w) --> quaternion vizact.onupdate(0, self.subject.update_and_push_rigid_body, viz.MainView, self.subject.head_sphere, hmd_stream) # connecting present controllers trackers = steamvr.getTrackerList() self.controller = steamvr.getControllerList()[0] print self.controller tracker_names = ['handRigid', 'armRigid', 'torsoRigid'] find_out_tracker = vizact.onupdate(0, self.assign_trackers, trackers) yield viztask.waitTime(5) # wait two seconds to figure out which tracker is more to the front in z direction = hand tracker find_out_tracker.remove() # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions # stream 6dof as pos (x,y,z) and ori(x,y,z,w) --> quaternion hand_stream = self.subject.create_non_phasespace_rigid_body_stream(tracker_names[self.hand_tracker_id], 0) vizact.onupdate(0, self.subject.update_and_push_rigid_body, trackers[self.hand_tracker_id], self.subject.right_hand_sphere, hand_stream) # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions arm_stream = self.subject.create_non_phasespace_rigid_body_stream(tracker_names[self.arm_tracker_id], 0) vizact.onupdate(0, self.subject.update_and_push_rigid_body, trackers[self.arm_tracker_id], None, arm_stream) # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions torso_stream = self.subject.create_non_phasespace_rigid_body_stream(tracker_names[self.torso_tracker_id], 0) vizact.onupdate(0, self.subject.update_and_push_rigid_body, trackers[self.torso_tracker_id], None, torso_stream)
def setUpSound(): global windAudio, endingAudio, aboveLocation, endingAudioBasic, trumpetAudio if USE_HMD_F: #Adjusted head location to spatalize towards center of room #Next, create 4 subviews to attach sounds to around the room for whooshing userView = viz.addView() userView.setPosition(0,1.6,0) headLocation = viz.addGroup() viz.link(userView, headLocation) #Good adjustment for reverb and room, might not work as well for the city #vizsonic.setReverb (30.0, 0.2, 0.5, 0.9, 0.1) vizsonic.setSimulatedRoomRadius(30,30) vizsonic.setShaker(1.0) viz.setOption('sound3d.useViewRotation', 0) #Set auarlizer to play towards center of room viz.setListenerSound3D(headLocation) #Turn on sound debugging? viz.setDebugSound3D(False) #Turn on windy city sounds vizsonic.setAmbient('windy_edit.wav', 0.75, 0) #Configure wind playing based on speed, play at higher location aboveView = viz.addView() aboveView.setPosition(0,3,0) aboveLocation = viz.addGroup() viz.link(aboveView, aboveLocation) windAudio = aboveLocation.playsound('windMono.wav', viz.STOP, volume=WIND_MIN_VOLUME) trumpetAudio = aboveLocation.playsound('audio/hero_trumpet.wav', viz.STOP, volume = 0.75) chooseExpStateNLoadSound()
def recordViewAnimation(): ### replace with your own application setup import viz import vizcam import vizact viz.setMultiSample(4) viz.go() vizcam.WalkNavigate() piazza = viz.addChild("piazza.osgb") ### ### Add this at the bottom ''' Create an AnimationPathRecorder and link it to any node, which needs to have it's transformation documented. If 'start' is set to True the recording will start automatically, otherwise you need to start manually. you can specify the file name under which the animation will be saved. '.txt' is automatically added. ''' rec = AnimationPathRecorder(start=False) viz.link(viz.MainView, rec) # toggle path recording and saving finished recording to a file named 'test_animation.txt' def toggleRecord(rec): if rec.isRunning(): rec.stop() rec.writeToFile("test_animation") print "Animation path saved to test_animation.txt" else: rec.start() print "Animation path recording started." vizact.onkeydown('r', toggleRecord, rec)
def attachViewToGlasses(self, visNode): """ Create tracker object that represents the users head position, specifically the center of the eyes. The position provided by the head tracker must be in the same reference frame as the cave wall coordinates. This will normally be a tracking sensor, but for this example we will simulate a head tracker using the keyboard (WASD keys). """ self.head_tracker = viz.link(visNode, viz.NullLinkable, srcFlag=viz.ABS_PARENT) """ Create CaveView object for manipulating the virtual viewpoint. cave_origin is a node that controls the position of the cave within the virtual world. For example, if you wanted to simulate the cave user flying through an environment, you would apply the transformation to the cave_origin node. """ cave_origin = vizcave.CaveView(self.head_tracker) """ The cave_origin node is a standard Vizard node that you can apply any position/rotation to. In this example we will create a keyboard/mouse tracker (using arrow keys) and link it to the cave_origin node, allowing us to fly the cave user through the virtual environment. """ origin_tracker = viztracker.KeyboardMouse6DOF() origin_link = viz.link(origin_tracker, cave_origin) origin_link.setMask(viz.LINK_POS) #head_tracker.setMask(viz.LINK_POS) """ Pass the head tracker to the cave object so it can automatically update the view frustums every frame based on the current head position relative to each wall. """ self.cave.setTracker(self.head_tracker)
def steamvr_setup_vm2(self, right_hand_object, head_object): hmd = steamvr.HMD() if not hmd.getSensor(): sys.exit('SteamVR HMD not detected') viz.link(hmd.getSensor(), viz.MainView) # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions hmd_stream = self.create_non_phasespace_rigid_body_stream( 'headRigid', 0) # stream 6dof of HMD as pos (x,y,z) and ori(x,y,z,w) --> quaternion vizact.onupdate(18, self.update_and_push_rigid_body, viz.MainView, head_object, hmd_stream) # connecting present controllers trackers = steamvr.getTrackerList() self.controller = steamvr.getControllerList()[0] tracker_names = ['handRigid', 'torsoRigid'] for i in range(len(trackers)): # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions tracker_stream = self.create_non_phasespace_rigid_body_stream( tracker_names[i], 0) # stream 6dof as pos (x,y,z) and ori(x,y,z,w) --> quaternion print(trackers[i].getData()) print(trackers[i].getPosition()) if i == 0: vizact.onupdate(19, self.update_and_push_rigid_body, trackers[i], right_hand_object, tracker_stream) else: vizact.onupdate(19, self.update_and_push_rigid_body, trackers[i], None, tracker_stream)
def ZombieGame(): Status = True while (Status): print viz.MainScene viz.scene(1) ActiveProgram = pregame.Intro() #Destructions info yield ActiveProgram.done.wait() #Wailt for intro screen to be done sceneManager.switchtoScene('Charactor') ActiveProgram = pregame.CharacterCreation() playerName = yield ActiveProgram.done.wait( ) #Wailt for charactor creation to be done playerName = playerName.data sceneManager.switchtoScene('GameWorld') ActiveProgram = GameWrap.GameWrap(playerName) viz.link(viz.MainView, ActiveProgram.player.model) winStatus = yield ActiveProgram.done.wait() print 'You won?' print winStatus.data viz.scene(4) ActiveProgram = postgame.WinLose(winStatus.data) Status = yield ActiveProgram.done.wait() Status = Status.data print 'SHOULD I RESTART THE GAME' print Status
def _phone_ring(phone_node): global phone_light global enabled print 'phone will be ringing now' phone_node.playsound(RINGTONE_FILE) # phone_light if phone_light: if not enabled: phone_light.enable() enabled = True else: phone_light.disable() enabled = False else: phone_light = vizfx.addSpotLight(scene=SCENE) pos = phone_node.getPosition() euler = phone_node.getEuler() phone_light.position(pos[0],pos[1],pos[2]) phone_light.setEuler(-euler[0],euler[1],euler[2]) phone_light.direction(1,1,0) phone_light.spread(10) phone_light.intensity(2) phone_light.spotexponent(1) viz.link(phone_light, phone_node) phone_light.enable() enabled = True
def __init__(self): viz.EventClass.__init__(self) self.EH = 1.2 Proj_Dist = 1.0 #front projection distance to Eye Proj_V_F = 1.115 #vertical extent of projection (m) Proj_H_F = 1.985 #1.96#horizontal extent of projection (m) Proj_HfG = .665 #Front projection height from ground. FB = Proj_HfG #FrontBottom FT = Proj_HfG + Proj_V_F #FrontTop FL = -Proj_H_F / 2 #Front Left FR = Proj_H_F / 2 #Front Right FC0 = FL, FB, Proj_Dist # Front Wall: FC0,FC1,FC2,FC3 FC1 = FR, FB, Proj_Dist FC2 = FL, FT, Proj_Dist FC3 = FR, FT, Proj_Dist self.FrontWall = vizcave.Wall(upperLeft=FC2, upperRight=FC3, lowerLeft=FC0, lowerRight=FC1, name='Front Wall') #Create front wall viz.setMultiSample(8) #set anti-aliasing #Initialize graphics window viz.go() # viz.eyeheight(1.2) #for monitor in viz.window.getMonitorList(): # print monitor.name # print ' ',monitor viz.window.setFullscreenMonitor(2) viz.window.setFullscreen(viz.ON) self.cave = vizcave.Cave(stereo=0) self.cave.addWall(self.FrontWall) #,window=self.frontWindow) # print ("1_N: ", self.cave.getNearPlane()) #default is [.1, -1.] # print ("1_F: ", self.cave.getFarPlane()) #set near and far plane. self.cave.setNearPlane(1.0) self.cave.setFarPlane(100.0) view = viz.MainView self.track = viztracker.Keyboard6DOF() #tracker object self.track.setPosition(0, self.EH, 0) viz.link(self.track, view) #linked to mainview self.cave.setTracker(pos=self.track) ##Create CaveView object for manipulating the entire cave environment ##The caveorigin is a node that can be adjusted to move the entire cave around the virtual environment, it needs a tracker object to initialise it. self.caveview = vizcave.CaveView(self.track)
def MainTask(): """Top level task that controls the game""" # Display instructions and wait for key press to continue yield DisplayInstructionsTask() tracker = vizcam.addWalkNavigate(moveScale=2.0) tracker.setPosition([0, 2.5, 0]) viz.link(tracker, viz.MainView) viz.mouse.setVisible(False) # Create panel to display trial results resultPanel = vizinfo.InfoPanel('', align=viz.ALIGN_CENTER, fontSize=25, icon=False, key=None) resultPanel.visible(False) # Reset score score = 0 UpdateScore(score) sensors() found = True number = 0 while found and number < 7: # Perform a trial found = False found = yield TrialTask() # Update score and display status text if found: score += 1 UpdateScore(score) resultPanel.setText(TRIAL_SUCCESS) else: viztask.schedule(FadeToGrayTask()) resultPanel.setText(TRIAL_FAIL) viz.mouse.setVisible(True) Run_function() #Display success/failure message resultPanel.visible(True) # Add delay before starting next trial yield viztask.waitTime(TRIAL_DELAY) resultPanel.visible(False) number += 1 # Disable gray effect gray_effect.setEnabled(False) #Display results and ask to quit or play again resultPanel.setText(RESULTS.format(score, TRIAL_COUNT)) resultPanel.visible(True) yield viztask.waitTime(5) resultPanel.visible(False) UpdateScore(10)
def _setupRift(PPT1, OH): import oculus global hmd hmd = oculus.Rift() if PPT1: _setupHeadTrackers(hmd, OH) else: viz.link(hmd.getSensor(), MainView, mask=viz.LINK_ORI)
def addUser(): global mainUser # ---- Trackers ---- # Initialize an empty composite object to store all the trackers # The composite.storeTracker() method is used to combine the individual trackers for the user's body within the composite composite = VU.VUCompositeTrackers() vrpn = viz.add('vrpn7.dle') headPos = vrpn.addTracker( 'PPT0@'+PPT_MACHINE,PPT_HEAD_ID) if not OPTICAL_HEADING: iLabs = viz.addExtension( 'inertiallabs.dle' ) headOri = iLabs.addSensorBus(port=INERTIALLABS_HEAD_PORT)[0] VU.onkeydownspecial('r', resetHead, headOri, 90 ) # ---- Display ---- import sensics sensics.zSight_60() if not OPTICAL_HEADING: headTracker = viz.mergeLinkable( headPos, headOri ) else: headTracker = headPos composite.storeTracker (composite.HEAD, headTracker ) viz.setOption('viz.fullscreen', 1 ) # Go fullscreen on monitor 1 viz.setOption('viz.fov', [37.5, 1.25]) # Set fov to match sensics specs viz.setOption('viz.setDisplayMode', [1280,1024]) # Change resolution of displays # ---- Input ---- wandpos = vrpn.addTracker('PPT0@' + PPT_MACHINE, PPT_WAND_ID) wandori = iLabs.addSensorBus(port=INERTIALLABS_HAND_PORT)[0] wandtracker = viz.mergeLinkable( wandpos, wandori ) # wandjoy = VU.VUJoystickPPTWandVRPN(hostname=PPT_MACHINE, markerid=PPT_WAND_ID+1) # wandflyer = VU.VUTrackerWandFlyerSmooth(wandjoy, wandtracker,accelerationSteps=Config.WAND_ACCELERATION_STEPS, decelerationSteps=Config.WAND_DECELERATION_STEPS, speed=Config.WAND_SPEED_SCALE, keystrokes=[Config.WAND_BUTTON1,Config.WAND_BUTTON2,Config.WAND_BUTTON3,Config.WAND_BUTTON4,Config.WAND_BUTTON5,Config.WAND_BUTTON6],buttonReset=None, buttonForward=None, buttonFist=None, oriSteer=False ) # wandflyer.getHandSensor().joystick = wandjoy # composite.addDriverNode(wandflyer) # composite.storeTracker( composite.RHAND, wandtracker ) #the following is beta: global main_sphere composite.storeTracker(composite.RHAND,wandtracker) viz.link(wandtracker, main_sphere) #end beta. #not sure if u need this in beta: composite.createRightHand(wandori) VU.onkeydownspecial('r', resetHand, wandori, 90 ) # composite.storeTracker( composite.RHAND, wandtracker ) #composite.createRightHand(wandori) # viz.link(wandtracker,ball) # ---- Avatar ---- composite.createAvatarNone() # ---- Finalize Composite ---- composite.finishTrackers() # Build up internal links for all the tracking devices composite.defineViewpoint() # Attach viewpoint to default location on the user mainUser = composite manager.addComposite(mainUser, 'Main-User')
def _setupVive(PPT1, OH, **kwargs): import steamvr global hmd hmd = steamvr.HMD(**kwargs) if PPT1: viveViewLink = viz.link(hmd.getSensor(), viz.MainView) else: viveViewLink = viz.link(hmd.getSensor(), viz.MainView)
def SetShoes(): global Tracking, RFootLink, LFootLink, rFoot, lFoot #global tFootExtra, TLink rFootTracker = Tracking[0] RFootLink = viz.link(Tracking[0], rFoot, offset=(0,-Tracker_Offset,0, viz.REL_PARENT)) lFootTracker = Tracking[1] LFootLink = viz.link(Tracking[1], lFoot, offset=(0,-Tracker_Offset,0, viz.REL_PARENT))
def addBusLight(scene=viz.MainScene): busLightModel = viz.add("Bus Light.vzf", scene=scene) light = viz.addLight(scene=scene, group=1) viz.link(busLightModel, light) light.position(0, 0, 0, 1) busLightModel.emissive(viz.WHITE) busLightModel.setEuler([0, 90.0, 0]) light.color(viz.WHITE) intensity = 3 light.intensity(intensity) light.quadraticattenuation(1) return busLightModel
def addAimer(imageFile='crosshair.png', size=.3, scene=viz.MainScene, incomingColor = viz.WHITE): # print 'ADD AIMER CALLED' global crosshair global crosshairLink # scene=viz.MainScene if vhil_devkit.hmd is not None: hud = vhil_devkit.hmd.getHUD() crosshairPos = [0.0, 0.0, 1.0] crosshairScale = 0.05 crosshair = viz.addText3D("O", pos=crosshairPos, parent=hud) crosshairScale = 0.025 crosshair.setScale(crosshairScale, crosshairScale, crosshairScale) crosshair.drawOrder(1000000) if incomingColor is viz.GREEN: crosshairScale = 0.05 crosshair.setScale(crosshairScale, crosshairScale, crosshairScale) shift = -0.01 crosshairPos = [shift, shift, 1.0] crosshair.setPosition(crosshairPos) crosshair.color(incomingColor) crosshair.disable(viz.INTERSECTION) crosshair.disable(viz.LIGHTING) crosshair.disable(viz.DEPTH_TEST) # crosshair = viz.addTexQuad(texture=viz.add(imageFile), size=size, scene=scene, parent=hud) crosshairLink = viz.link(viz.MainView, crosshair) crosshairLink.disable() # crosshairLink.preTrans([0,0,4]) # crosshairLink.preTrans([0,0,1.0]) # crosshair.disable(viz.INTERSECTION) # crosshair.disable(viz.LIGHTING) # # # Appear above other nodes # crosshair.drawOrder(100) # crosshair.disable(viz.DEPTH_TEST) print "HERE" else: crosshair = viz.addTexQuad(texture=viz.add(imageFile), size=size, scene=scene) crosshairLink = viz.link(viz.MainView, crosshair) crosshairLink.preTrans([0,0,4]) crosshair.disable(viz.INTERSECTION) crosshair.disable(viz.LIGHTING) # Appear above other nodes crosshair.drawOrder(100) crosshair.disable(viz.DEPTH_TEST) return crosshair, crosshairLink
def _setupRift(PPT1, OH, **kwargs): import oculus global hmd hmd = oculus.Rift(**kwargs) if PPT1: oculusViewLink = _setupHeadTrackers(hmd, OH) else: if RIFT_CAM: oculusViewLink = viz.link(hmd.getSensor(), MainView, mask=viz.LINK_POS|viz.LINK_ORI) oculusViewLink.postTrans(HOME_POS) else: oculusViewLink = viz.link(hmd.getSensor(), MainView, mask=viz.LINK_ORI) oculusViewLink.postEuler(HOME_ORI)
def configureSound(): global subview subview = viz.addView() subview.setPosition(0,1.6,-1) headLocation = viz.addGroup() viz.link(subview, headLocation) vizsonic.setReverb (6.0, 0.2, 0.5, 0.9, 0.1) vizsonic.setSimulatedRoomRadius(3,1.5) vizsonic.setShaker(1.0) viz.setOption('sound3d.useViewRotation', 0) #Set auarlizer to play towards center of room viz.setListenerSound3D(subview) #Turn on sound debugging? viz.setDebugSound3D(False)
def addPoliceLights(scene=viz.MainScene): policeLight = vizfx.addSpotLight(scene=scene) policeLight.setEuler(-105, 0, 0) policeLight.color(viz.WHITE) policeLight.emissive(viz.WHITE) policeLight.spotexponent(1) policeLight.quadraticattenuation(1) policeLight.intensity(10) orb = vizshape.addSphere(radius=0.4, flipFaces=True, scene=scene) orb.color(viz.WHITE) orb.emissive(viz.WHITE) orb.alpha(1) viz.link(policeLight, orb) return policeLight
def setUpNav(): global RIFT global useKeyboard global viewLink global home global navigationNode if RIFT: #add oculus settings hmd = oculus.Rift() viz.link(hmd.getSensor(), viz.MainView) viewLink = viz.link(navigationNode, viz.MainView) viewLink.setOffset(home) viewLink.preMultLinkable(hmd.getSensor()) else: vizcam.addWalkNavigate()
def addAppendages(avatar, numberOfArms): global chestHorn, leftShoulderHorn, rightShoulderHorn, headHorn #adding virtual models of arms if some_global_variables.isLeftHanded == 0: #right-handed some_global_variables.appendage3 = addArm(PATH_TO_STUFF + "skin_tones/thirdArm.png", resources.RIGHT_ARM) some_global_variables.appendage4 = addArm(PATH_TO_STUFF + "skin_tones/fourthArm.png", resources.LEFT_ARM) some_global_variables.appendage5 = addArm(PATH_TO_STUFF + "skin_tones/fifthArm.png", resources.RIGHT_ARM) else: #left-handed some_global_variables.appendage3 = addArm(PATH_TO_STUFF + "skin_tones/thirdArm.png", resources.LEFT_ARM) some_global_variables.appendage4 = addArm(PATH_TO_STUFF + "skin_tones/fourthArm.png", resources.RIGHT_ARM) some_global_variables.appendage5 = addArm(PATH_TO_STUFF + "skin_tones/fifthArm.png", resources.LEFT_ARM) TouchCube.thirdAppendage = some_global_variables.appendage3 #adding horns for attaching the arms to chestHorn = viz.addGroup() chestHornLink = viz.link(avatar.getBone('Bip01 Spine2'), chestHorn) chestHornLink.preTrans([0.0, 0.0, 0.0]) chestHornLink.preEuler([-90.0, 0.0, 0.0]) leftShoulderHorn = viz.addGroup() leftShoulderHornLink = viz.link(avatar.getBone('Bip01 L Clavicle'), leftShoulderHorn) leftShoulderHornLink.preTrans([-0.2, 0.0, 0.1]) leftShoulderHornLink.preEuler([-90.0, 0.0, 0.0]) rightShoulderHorn = viz.addGroup() rightShoulderHornLink = viz.link(avatar.getBone('Bip01 R Clavicle'), rightShoulderHorn) rightShoulderHornLink.preTrans([0.2, 0.0, 0.1]) rightShoulderHornLink.preEuler([-90.0, 0.0, 0.0]) headHorn = viz.addGroup() headHornLink = viz.link(avatar.getBone('Bip01 Head'), headHorn) headHornLink.preTrans([0.0, 0.2, 0.1]) headHornLink.preEuler([-90.0, 0.0, 0.0]) #setting the horns for fourth and fifth arms if some_global_variables.isLeftHanded == 0: #right-handed some_global_variables.appendage4.setParent(leftShoulderHorn) else: #left-handed some_global_variables.appendage4.setParent(rightShoulderHorn) some_global_variables.appendage5.setParent(headHorn) #switching horn attachment to the appendages based on the number of arms to be used switchControlSchema(numberOfArms)
def link_pencil(pencil_3d_object, tracker, tool): """ Args: pencil_3d_object: tracker: tool: Returns: """ draw_link = viz.link(tracker, pencil_3d_object) draw_link.postMultLinkable(viz.MainView) viz.link(draw_link, tool)
def loadStuffAtStart(): global city #loading fadeSphere# globals_oa.fadingSphere = vizshape.addSphere(0.5, flipFaces=True) globals_oa.fadingSphere.alpha(0) globals_oa.fadingSphere.color(viz.BLACK) sphereLink = viz.link(viz.MainView, globals_oa.fadingSphere) #loading the Crystal Ball Scene globals_oa.globeScene = vizfx.addChild(globals_oa.FILE_PATH_TO_GLOBE) globals_oa.globeScene.setScale([0.2,0.2,0.2]) globals_oa.globeScene.setPosition([-1,0,0]) globals_oa.globeScene.disable(viz.RENDERING) #load city scene assets city = vizfx.addChild(globals_oa.FILE_PATH_TO_CITY) city.setPosition([-8,0,11]) city.disable(viz.RENDERING) if not globals_oa.transportSetUpCalled: transport_vhil.setUp() if not globals_oa.loadAudioAssetsCalled: audioCache.loadAudioAssets() # if not globals_oa.initBoatCalled: # boat_scene.loadStuffAtStartAndHideThemALL() if not globals_oa.cg_scene_init_called: CG_scenes.loadStuffAtStartAndHideThemALL()
def __init__(self, fig): self.canvasData = viz.Data(lock=threading.Lock(), havenewData=False) self.fig = fig self.t = threading.Thread(target=self.computeFigureThread) self.t.start() #IMPORTANT: Wait for thread to finish before exiting vizact.onexit(self.t.join) #Create a blank texture to display canvas data self.tex = viz.addBlankTexture([1, 1]) #Create onscreen quad to display texture self.quad = viz.addTexQuad(parent=viz.ORTHO, texture=self.tex) self.quad.alpha(0.5) self.link = viz.link(viz.MainWindow.CenterCenter, self.quad) # self.link.setOffset([400,200,0]) self.drawer = vizact.ontimer(0, self.drawPlot) self.rate_ctr = time.time() self.drawrate_txt = viz.addText('', viz.SCREEN) self.drawrate_txt.setPosition(0, 0.01) self.drawrate_txt.scale(.7, .7) self.drawrate_txt.color(viz.WHITE) self.drawrate_txt.visible(0) self.rate = 0 #scale variables to change size of plot image self.scale_x = 1 self.scale_y = 1
def __init__(self, fig): self.canvasData = viz.Data(lock = threading.Lock(), havenewData=False ) self.fig = fig self.t = threading.Thread(target=self.computeFigureThread) self.t.start() #IMPORTANT: Wait for thread to finish before exiting vizact.onexit(self.t.join) #Create a blank texture to display canvas data self.tex = viz.addBlankTexture([1,1]) #Create onscreen quad to display texture self.quad = viz.addTexQuad(parent=viz.ORTHO,texture=self.tex) self.quad.alpha(0.5) self.link = viz.link(viz.MainWindow.CenterCenter,self.quad) # self.link.setOffset([400,200,0]) self.drawer = vizact.ontimer(0, self.drawPlot) self.rate_ctr = time.time() self.drawrate_txt = viz.addText('', viz.SCREEN) self.drawrate_txt.setPosition(0,0.01) self.drawrate_txt.scale(.7,.7) self.drawrate_txt.color(viz.WHITE) self.drawrate_txt.visible(0) self.rate = 0 #scale variables to change size of plot image self.scale_x = 1 self.scale_y = 1
def addAppendage(avatar, skinChoices, skin = 0, appendage = 0): global thirdAppendage, usingInvisibleRod thirdAppendageHorn = viz.addGroup() if(appendage == 0): #Third Arm usingInvisibleRod = False thirdAppendage = viz.addChild(resources.ARM) if skin == 9: texture = viz.addTexture("skin_tones/010.png") texture.wrap(viz.WRAP_S,viz.REPEAT) texture.wrap(viz.WRAP_T,viz.REPEAT) else: texture = viz.addTexture("skin_tones/00" + skinChoices[skin] + ".png") texture.wrap(viz.WRAP_S,viz.REPEAT) texture.wrap(viz.WRAP_T,viz.REPEAT) thirdAppendage.texture(texture) thirdAppendage.emissive([.75, .75, .75]) thirdAppendage.setScale(resources.ARM_SCALE) elif(appendage == 1): #Cylinder usingInvisibleRod = False thirdAppendage = viz.addChild(resources.CYLINDER) thirdAppendage.setScale(resources.ARM_SCALE) thirdAppendage.setParent(thirdAppendageHorn) global tahLink tahLink = viz.link(avatar.getBone('Bip01 Spine'), thirdAppendageHorn) global offsetOp offsetOp = tahLink.preTrans([0, 0.3, 0.1]) tahLink.preEuler([-90.0, 0.0, 0.0]) thirdAppendage.collideBox() thirdAppendage.disable(viz.DYNAMICS) TouchCube.thirdAppendage = thirdAppendage
def __init__(self): super(self.__class__,self).__init__() # --Override Key commands self.KEYS = { 'forward' : 'w' ,'back' : 's' ,'left' : 'a' ,'right' : 'd' ,'camera' : 'c' ,'restart' : viz.KEY_END ,'home' : viz.KEY_HOME ,'utility' : ' ' ,'reset' : 0 ,'showMenu' : 1 ,'down' : 2 ,'orient' : 3 ,'up' : 4 ,'mode' : 5 ,'builder' : 6 ,'walk' : 7 ,'angles' : 8 ,'road' : 10 ,'esc' : 999 ,'slideFar' : 0 ,'slideNear': 180 ,'env' : '-' ,'grid' : '-' ,'snapMenu' : viz.KEY_CONTROL_L ,'interact' : viz.MOUSEBUTTON_LEFT ,'rotate' : viz.MOUSEBUTTON_RIGHT ,'proxi' : 'p' ,'viewer' : 'o' ,'collide' : 'c' ,'stereo' : 'm' ,'hand' : 'h' ,'capslock' : viz.KEY_CAPS_LOCK } # Get device from extension if not specified self.device = None if self.device is None: allDevices = getDevices() if allDevices: self.device = allDevices[0] else: viz.logError('** ERROR: Failed to detect Joystick') # Connect to selected device self.joy = getExtension().addJoystick(self.device) if not self.joy: viz.logError('** ERROR: Failed to connect to Joystick') return None # Set dead zone threshold so small movements of joystick are ignored self.joy.setDeadZone(0.2) #Override parameters self.ORIGIN_POS = [0,self.EYE_HEIGHT,0] self.VIEW_LINK.remove() self.VIEW_LINK = viz.link(self.VIEW,self.NODE)
def grabActionOnThirdAppendage(grabTrigger): global thirdAppendage, thirdAppendageReadyForGrab, thirdAppendageGrabbed, ghostAvatar, matrixOfThirdAppendage, grabLink, posOfThirdAppendage, tahLink if (thirdAppendageReadyForGrab is True) and (grabTrigger is True): #Grabbing the third appendage now ExitProximity(True) thirdAppendageGrabbed = True thirdAppendageReadyForGrab = False matrixOfThirdAppendage = thirdAppendage.getMatrix(mode=viz.ABS_GLOBAL) posOfThirdAppendage = thirdAppendage.getPosition() tahLink.disable() #grabLink = viz.grab(pptextensionDK2.rhPPT, thirdAppendage) grabLink = viz.link(pptextensionDK2.rhPPT, thirdAppendage) grabLink.setMask(viz.LINK_POS_OP) grabLink.reset(viz.RESET_POS) grabLink.setSrcFlag(viz.ABS_LOCAL) grabLink.setDstFlag(viz.ABS_PARENT) #grabLink.swapPos(-3, 2, -1) grabLink.postEuler([-90, 0, 0], target = viz.LINK_POS_OP) #grabLink.postTrans([0.3, 0, 0.1], target = viz.LINK_POS_OP) #just the following two statements will work alone, but not very accurate #grabLink.postEuler([-90, 0, 0], target = viz.LINK_POS_OP) #grabLink.setOffset([1, 0, 1.5]) # ghostAvatar.getBone('Bip01 R Forearm').getPosition(viz.ABS_GLOBAL)) else: thirdAppendageGrabbed = False if grabLink: grabLink.remove() grabLink = None #thirdAppendage.setMatrix(matrixOfThirdAppendage) tahLink.enable() thirdAppendage.setPosition(posOfThirdAppendage)
def __init__(self): viz.mouse.setVisible(viz.OFF) #Activate NVIS HMD nvis.nvisorSX111() #nvis.nvisorSX60() viz.cursor(viz.OFF) #isense = viz.add('intersense.dls') vrpn = viz.add('vrpn7.dle') view = viz.MainView self.markers = [] headMarker = vrpn.addTracker('PPT0@' + PPT_HOSTNAME, 0) self.markers.append(headMarker) self.markers.append( vrpn.addTracker('PPT0@' + PPT_HOSTNAME, 1) ) self.markers.append( vrpn.addTracker('PPT0@' + PPT_HOSTNAME, 2) ) self.markers.append( vrpn.addTracker('PPT0@' + PPT_HOSTNAME, 3) ) self.markers.append( vrpn.addTracker('PPT0@' + PPT_HOSTNAME, 4) ) filter = viz.add("filter.dle") headMarker_filter = filter.average(headMarker, samples = 7) headPos = viz.link(headMarker_filter, view, priority = 0) headPos.setOffset(DEFAULT_OFFSET) self.posLink = headPos #self.posLink.postScale(DEFAULT_SCALE) self.headMarker = headMarker
def createCustomComposite(id=0): viz.logNotice('MotionNode Head Tracking') # Use general VRPN tracker. vrpn = viz.add('vrpn7.dle') PPT_VRPN = 'Tracker0@localhost' head = vrpn.addTracker(PPT_VRPN) # Need to change rotation axes from MotionNode # to Vizard conventions. head.swapQuat([-3, -2, -1, 4]) # Or, use the built in MotionNode tracker # support. #MotionNode = viz.add('MotionNode.dle') #head = MotionNode.addSensor() headfinal = viz.addGroup() headlink = viz.link(head, headfinal, enabled=False) import vizact vizact.onupdate(viz.PRIORITY_PLUGINS + 1, headlink.update) headlink.postTrans([0, 0.1, 0]) # Apply 10 cm translate in Y axis import vizuniverse as VU comp = VU.VUCompositeTrackers() comp.storeTracker(comp.HEAD, headfinal) comp.createAvatarNone() comp.defineViewpoint() comp.finishTrackers() return comp
def snitchPath(self, pathNum): #Add the path. self.path = viz.addAnimationPath() #Add control points to the path, along with their time stamp. if (pathNum < 4): self.path.addControlPoint(0, pos=(22, 31, 20), euler=(90, 0, 0)) self.path.addControlPoint(3, pos=(-22, 21, 26), euler=(0, 90, 0)) self.path.addControlPoint(5, pos=(-22, 41, 46), euler=(0, 0, 90)) self.path.addControlPoint(6.5, pos=(-10, 10, 60), euler=(90, 0, 0)) self.path.addControlPoint(8, pos=(12, 21, 26), euler=(0, 90, 0)) self.path.addControlPoint(10, pos=(42, 45, 50), euler=(0, 0, 90)) self.path.addControlPoint(11.5, pos=(17, 35, 25), euler=(0, 90, 0)) self.path.addControlPoint(12, pos=(22, 31, 20), euler=(90, 0, 0)) else: pass #Loop the path to loop back to replay from beginning self.path.setLoopMode(viz.LOOP) self.path.computeTangents() self.path.setTranslateMode(viz.CUBIC_BEZIER) #Link the model to a path. self.link = viz.link(self.path, self.snitch) #Play the path. self.path.play()
def showVRText(msg, color=[1.0, 1.0, 1.0], distance=2.0, scale=0.05, duration=2.0): """ Display head-locked message in VR, e.g. for instructions. Args: msg (str): Message text color: RBG 3-tuple of color values distance (float): Z rendering distance from MainView scale (float): Text node scaling factor duration (float): Message display duration (seconds) """ # Create 3D text object text = viz.addText3D(msg, scale=[scale, scale, scale], color=color) text.resolution(1.0) text.setThickness(0.1) text.alignment(viz.ALIGN_CENTER) # Lock text to user viewpoint at fixed distance text_link = viz.link(viz.MainView, text, enabled=True) text_link.preTrans([0.0, 0.0, distance]) # Fade text away after <duration> seconds fadeout = vizact.fadeTo(0, time=0.7) yield viztask.waitTime(duration) text.addAction(fadeout) yield viztask.waitActionEnd(text, fadeout) text.remove()
def displaySnail(): global rotatingSnail, snailLink rotatingSnail = vizfx.addChild(globals_oa.FAUNA_DIRECTORY + globals_oa.ROTATING_SNAIL) rotatingSnail.setScale([0.1,0.1,0.1]) snailLink = viz.link(globals_oa.rhViveTracker, rotatingSnail) snailLink.preTrans([0.01,-0.03,-0.13]) snailLink.preEuler([0,0,180])
def finalDemo(): sky.remove() Panel.remove() piazza = viz.add('gallery.osgb') Panel2 = vizinfo.InfoPanel( "You may now take off the headset, Thank you for your participation", parent=canvas, align=viz.ALIGN_CENTER, fontSize=22, icon=False, title="Finished") Panel2.alpha(.8) navigationNode = viz.addGroup() viewLink = viz.link(navigationNode, viz.MainView) viewLink.preMultLinkable(hmd.getSensor()) viewLink.setOffset([0, 1.8, 0]) viz.link(viz.CenterBottom, Panel2, offset=(400, 230, 0))
def attachToHand(self, handTracker): if self._handLink != None: self._handLink.remove() self.unstick() self._handLink = viz.link(handTracker, self, mask=viz.LINK_POS) self.initEuler() self.initVelocity() self._isLinked = True
def SetRotation(rot): global navigationNode global viewLink navigationNode.setEuler(rot, 0, 0) # Alter rotation on the X axis viewLink = viz.link(navigationNode, viz.MainView) # Update navigation node linking viewLink.preMultLinkable(hmd.getSensor())
def useDefaultView(self): """ The cave_origin node is a standard Vizard node that you can apply any position/rotation to. In this example we will create a keyboard/mouse tracker (using arrow keys) and link it to the cave_origin node, allowing us to fly the cave user through the virtual environment. """ origin_tracker = viztracker.KeyboardMouse6DOF() origin_link = viz.link(origin_tracker, cave_origin)
def __init__(self): super(self.__class__,self).__init__() #Override parameters self.ORIGIN_POS = [0,self.EYE_HEIGHT,0] #Override view link self.VIEW_LINK.remove() self.VIEW_LINK = viz.link(self.VIEW, self.NODE)
def setup(self): #Disable the headlight on the room viz.MainView.getHeadLight().disable() #Add a light to the empty node self.mylight = viz.addLight() #Set the light parameters self.mylight.position(0,0,0) self.mylight.direction(0,0,1) self.mylight.spread(15) self.mylight.intensity(self.start) self.mylight.spotexponent(100) self.mylight.setPosition([0,1,0]) #Link the torch to the MainView viz.link(viz.MainView, self.mylight) return None
def setup(self): #Disable the headlight on the room viz.MainView.getHeadLight().disable() #Add a light to the empty node self.mylight = viz.addLight() #Set the light parameters self.mylight.position(0, 0, 0) self.mylight.direction(0, 0, 1) self.mylight.spread(15) self.mylight.intensity(self.start) self.mylight.spotexponent(100) self.mylight.setPosition([0, 1, 0]) #Link the torch to the MainView viz.link(viz.MainView, self.mylight) return None
def linkMainViewToHead(self): print 'Helmet-mainview linked' helmetRigid = self.allRigids_ridx[self.rigidHMDIdx] helmetRigid.vizNodeLink = viz.link(helmetRigid.vizNode, viz.MainView) helmetRigid.isVisible = 0 helmetRigid.vizNode.visible(viz.OFF) #Make the object visible. self.mainViewLinkedToHead = 1
def linkMainViewToHead(self): print 'Helmet-mainview linked' helmetRigid = self.allRigids_ridx[self.rigidHMDIdx] helmetRigid.vizNodeLink = viz.link(helmetRigid.vizNode,viz.MainView) helmetRigid.isVisible = 0 helmetRigid.vizNode.visible( viz.OFF ) #Make the object visible. self.mainViewLinkedToHead = 1
def flyOut(): global specialMole, skipKey yield viztask.waitTime(1) path = viz.addAnimationPath() path.addControlPoint(0, pos=[-0.4,1.3,0.2], euler=(90,0,0), scale= ([4,4,4])) path.addControlPoint(1, pos=[-0.4,1.5,0.6], euler=(55,0,0), scale= ([4,4,4])) path.addControlPoint(2, pos=[-0.4,3.3,0.1], euler=(100,0,0), scale= ([4,4,4])) path.addControlPoint(3, pos=[-0.4,4.8,0.8], euler=(75,0,0), scale= ([4,4,4])) path.addControlPoint(50, pos=[-0.4,70,0.4], euler=(90,0,0), scale = ([4,4,4])) pathLink = viz.link(path, specialMole) path.play()
def SetPos(pos): global navigationNode global viewLink global yOffset import vizact pos[1] *= yOffset navigationNode.setPosition(pos) # Move navigation node to new position viewLink = viz.link(navigationNode, viz.MainView) # Link the navigation node and main view viewLink.preMultLinkable(hmd.getSensor()) # Also link hmd sensor
def MovePlayer(): import vizact if (hmdconnected): global navigationNode global viewLink navigationNode.addAction( vizact.move(-4, 0, 0, 60) ) # Move player on the X axis for 60 seconds, at the same speed as the train -- Rather than updating the position relitive to the train every frame, which resulted in jittery movement. viewLink = viz.link(navigationNode, viz.MainView) viewLink.preMultLinkable(hmd.getSensor()) else: viz.MainView.addAction(vizact.move(-4, 0, 0, 60))
def flyingCO2(): global bigCO2, co2Path yield viztask.waitTime(1) co2Path = viz.addAnimationPath() co2Path.addControlPoint(0,pos=(0,21,20),euler=(0,0,0), scale=(8,8,8)) co2Path.addControlPoint(2, pos=(0.5,15,17), euler=(20,0,0), scale=(8,8,8)) co2Path.addControlPoint(4, pos=(0,10,14), euler=(-30,0,0), scale=(8,8,8)) co2Path.addControlPoint(6, pos=(0.7,7,10), euler=(15,0,0), scale=(8,8,8)) co2Path.addControlPoint(8, pos=(0.3,4,6), euler=(-20,0,0), scale=(8,8,8)) co2Path.addControlPoint(10, pos=(-0.1,1.7,2), euler=(0,0,0), scale=(8,8,8)) co2Link = viz.link(co2Path, bigCO2) co2Path.play()
def CreateScorePanel(self): self._newScore = None #holds the temp score after each update self._scorePanel = vizdlg.GridPanel(window=self._window, skin=vizdlg.ModernSkin, spacing=-10, align=vizdlg.ALIGN_RIGHT_TOP, margin=0) row1text = viz.addText(self.tooltips['points']) row1text.font("Segoe UI") self._scoreIcon = viz.addTexQuad(size=25, texture=viz.add('textures/star_yellow_256.png')) self._score= viz.addText('000') self._score.font("Segoe UI") self._score.alignment(viz.ALIGN_RIGHT_BASE) self._scorePanel.addRow([self._scoreIcon, row1text, self._score]) row2text = viz.addText(self.tooltips['oil']) row2text.font("Segoe UI") self._oilIcon = viz.addTexQuad(size=25, texture=viz.add('textures/oil_icon.png')) self._oil= viz.addText('000') self._oil.font("Segoe UI") self._oil.alignment(viz.ALIGN_RIGHT_BASE) self._scorePanel.addRow([self._oilIcon, row2text, self._oil]) self._scorePanel.setCellAlignment(vizdlg.ALIGN_RIGHT_TOP) #place the score board at the top right corner of the window viz.link(self._window.RightTop, self._scorePanel, offset=(-10,-45,0))
def setZepPath(ZEP, positions = getRandomPath()): #Generate random values for position print "blimp path: " + str(positions) #Create an animation path path = viz.addAnimationPath() for x,pos in enumerate(positions): #Add a ball at each control point and make it #semi-transparent, so the user can see where the #control points are #b = viz.addChild('beachball.osgb',cache=viz.CACHE_CLONE) #b.setScale(10,10,10) #b.setPosition(pos) #b.alpha(0.2) #Add the control point to the animation path #at the new time path.addControlPoint(x+1,pos=pos) #Set the initial loop mode to circular path.setLoopMode(viz.CIRCULAR) #make it go in a bezier between points circle path.setTranslateMode(viz.CUBIC_BEZIER) #Automatically compute tangent vectors for cubic bezier translations path.computeTangents() #Automatically rotate the path path.setAutoRotate(viz.ON) #Link the ZEPPELIN to the path viz.link(path, ZEP) #Play the animation path path.play() #Set the animation path speed path.speed(.006)
def draw_maze_task(self): # remove collide events viz.phys.disable() # position frame in front of subject after reorienting pos = self.subject.head_sphere.getPosition() self.scene.drawing_frame.setPosition( [pos[0] - .2, pos[1] - .5, pos[2] + .6]) self.scene.drawing_frame.visible(viz.ON) self.scene.change_instruction( "Bitte zeichnen Sie den Raum in den Rahmen ein.\nStart mit Klick.") print '!!! DRAWING TASK, TRIGGER TO START !!!' yield self.hide_inst_continue_trigger() print '!!! DRAWING STARTED, MOUSECLICK TO SAVE !!!' # enable drawing functionality self.subject.right_hand_sphere.alpha(1) self.subject.right_hand_sphere.setScale(2, 2, 2) self.subject.right_hand_sphere.color(viz.WHITE) draw_link = viz.link(self.subject.right_hand_sphere, self.scene.draw_tool) # drawing update function called every frame and handling states of input device self.scene.draw_tool.setUpdateFunction(self.draw) # send drawing task start marker self.log_exp_progress('type:drawing_start;') start = viz.tick() # wait until drawing is saved and continue with the experiment yield self.hide_inst_continue_left_mouse() print '!!! DRAWING SAVED !!!' # send drawing task end marker duration_drawing = viz.tick() - start self.log_exp_progress('type:drawing_end;duration_drawing:' + str(round(duration_drawing, 2)) + ';') # save screenshot of drawing filename = 'subject_' + str(self.subject_id) + '_sketchmap_' + str( self.current_maze) viz.window.screenCapture(filename + '.bmp') yield viztask.waitTime(0.5) # remove drawing and draw_tool self.scene.drawing_frame.visible(viz.OFF) draw_link.remove() self.scene.draw_tool.clear() self.subject.right_hand_sphere.alpha(0) self.subject.right_hand_sphere.setScale(1, 1, 1)
def addCameraBounds(self): """Create node for rendering tracking boundary of HMD camera""" if not self._sensor: return None if (self._sensor.getSrcMask() & viz.LINK_POS) == 0: return None left,right,bottom,top,near,far = self._sensor.getCameraFrustum() s = far / near fleft, fright, fbottom, ftop = left*s, right*s, bottom*s, top*s viz.startLayer(viz.LINES) # Boundary lines viz.vertex([0,0,0]) viz.vertex([fleft,ftop,far]) viz.vertex([0,0,0]) viz.vertex([fright,ftop,far]) viz.vertex([0,0,0]) viz.vertex([fleft,fbottom,far]) viz.vertex([0,0,0]) viz.vertex([fright,fbottom,far]) # Near plane viz.vertex([left,top,near]) viz.vertex([right,top,near]) viz.vertex([right,top,near]) viz.vertex([right,bottom,near]) viz.vertex([right,bottom,near]) viz.vertex([left,bottom,near]) viz.vertex([left,bottom,near]) viz.vertex([left,top,near]) # Far plane viz.vertex([fleft,ftop,far]) viz.vertex([fright,ftop,far]) viz.vertex([fright,ftop,far]) viz.vertex([fright,fbottom,far]) viz.vertex([fright,fbottom,far]) viz.vertex([fleft,fbottom,far]) viz.vertex([fleft,fbottom,far]) viz.vertex([fleft,ftop,far]) lines = viz.endLayer(color=viz.GREEN) lines.drawOrder(100) lines.disable([viz.LIGHTING,viz.DEPTH_TEST,viz.SHADOW_CASTING,viz.INTERSECTION,viz.PICKING]) lines.setReferenceFrame(viz.RF_VIEW) link = viz.link(self._sensor, lines, srcFlag=ORI_CAMERA) link.postMultInverseLinkable(self._sensor) return lines
def __init__(self, window=None, clientMask=viz.MASTER, color=viz.WHITE): if window is None: window = viz.MainWindow view = window.getView() self._color = color self._clientMask = clientMask # Add red quad to flash screen after falling self._flashSphere = vizshape.addSphere(10.0, flipFaces=True) link = viz.link(view, self._flashSphere) link.preTrans([0, 0, 10.1]) self._flashSphere.color(self._color) self._flashSphere.visible(False) self._flashSphere.renderOnlyToWindows([window])
def _setupHeadTrackers(hmd, OH): global vrpn if vrpn is None: vrpn = viz.add('vrpn7.dle') POS_TRACKER = vrpn.addTracker(PPT_ADDRESS, 0) ORI_TRACKER = hmd.getSensor() if OH: print 'vhil_devkit: Optical heading setup not yet implemented - set OH to False.' else: tracker = viz.mergeLinkable(POS_TRACKER, ORI_TRACKER) oculusViewLink = viz.link(tracker, MainView) oculusViewLink.preTrans(OCULUSVIEWLINK_PRETRANS)
def setup(conf): """ Set up all the hardware used in the ball catching experiment :param conf: config['hardware'] entry of full config dict :return: None """ global config global hmd global head_tracker global eye_tracker global link config = conf print '\nSetting up hardware modules...' # configure Oculus Rift if config['use_hmd']: hmd = oculus.Rift() hmd.setMonoMirror(True) hmd.getSensor().reset() # setup position tracking (WorldViz PPTX) if config['use_ppt']: head_tracker = ppt.add_tracker(0) link = ppt.link(tracker=head_tracker, ori=hmd.getSensor(), target=viz.MainView, pre_trans=[-0.05, -0.05, 0.02]) head_tracker = head_tracker # no ppt else: link = viz.link(hmd.getSensor(), viz.MainView, mask=viz.LINK_ORI) link.setOffset([0, 1.8, 0]) viz.MainView.setPosition([0, 1.8, -3]) # setup eye tracker if config['eye_tracking']: eye_tracker = EyeTracker() # configure screen setup else: viz.MainView.setPosition(0, 0, -4, viz.REL_LOCAL) viz.setOption('viz.fullscreen.monitor', 2) keyboard_cam = vizcam.KeyboardCamera() link = link # keys for hardware control viz.callback(viz.KEYDOWN_EVENT, on_key_down)
def __init__(self, frame_weight=0.5, aperture_scale=0.5, node=None, **kwargs): # node to reference this instance in the scenegraph self._node = node if self._node == None: self._node = viz.addGroup() viz.VizNode.__init__(self, id=self._node.id, **kwargs) # Create two render textures which we will swap between when updating self._output_texture = viz.addRenderTexture() self._last_frame_texture = viz.addRenderTexture() ### INIT CAMERA # Create render node for camera self._cam = viz.addRenderNode(size=(1024, 1024)) self._cam.renderOnlyToWindows([viz.MainWindow]) self._cam.setInheritView(False) self._cam.drawOrder(1000) self._cam.setFov(90.0, 1.0, 0.1, 1000.0) # Only render once per frame, in case stereo is enabled self._cam.setRenderLimit(viz.RENDER_LIMIT_FRAME) # set the starting render textures for output and input self._cam.setRenderTexture(self._output_texture) self._cam.texture(self._last_frame_texture, unit=4) # link camera to capture viz.link(self._node, self._cam) # affect camera so its render texture will be computed using the defined shading pipeline self._projector = ViewProjector() self._projector.setFrameWeight(frame_weight) self._projector.setApertureScale(aperture_scale) self._projector.affect(self._cam) self._update_event = vizact.onupdate(100, self.update)
def _setup_stressindicator(self): """ Setup all objects for the stessindicator. """ # Create a 3D text to display the measured stress self._stressindicator_text = viz.addText3D('3D Text', pos=[0, -1, 0]) self._stressindicator_text.alignment(viz.ALIGN_LEFT_BOTTOM) self._stressindicator_text.color(viz.GREEN) self._stressindicator_text.setScale([.05, .05, .05]) # Create a green sphere where the measurement takes place self._stressindicator = vizshape.addSphere(radius=0.01, color=viz.GREEN) self._stressindicator_text.setParent(self._stressindicator) self._stressindicator_text.setPosition([0, .1, 0], viz.ABS_PARENT) # Create a group to add an offset to the measurement sphere tmp = viz.addGroup() self._stressindicator.setParent(tmp) self._stressindicator.setPosition([0, -.01, .05], viz.ABS_PARENT) self._stressindicator.setEuler([0, 90, 0], viz.ABS_PARENT) # Connect the stressindicator to the right controller viz.link(vizconnect.getRawTracker('r_hand_tracker'), tmp) # Hide the objects until the stressindicator gets used self._stressindicator.visible(False)