コード例 #1
0
ファイル: controlAppendages.py プロジェクト: vhilab/thirdArm
def createCollisionBody(avatar):
    # create collision boxes around bones that matter
    global collisionLimbBoxes
    for limbName in COLLISION_LIMBS:
        limb = avatar.getBone(limbName)
        # create box for limb
        limbBox = vizshape.addBox(color=viz.WHITE)
        limbBox.disable(viz.RENDERING)
        # find the right size for the box
        size = findBoneSize(limbName)
        limbBox.setScale(size)
        # set up collision properties
        # BL:start
        limbBox.collideBox(bounce=0.0)
        if limbName == "Bip01 L Forearm":
            leftHandIDs.append(limbBox.id)
        elif limbName == "Bip01 R Forearm":
            rightHandIDS.append(limbBox.id)
        # 		if not (limbName == 'Bip01 L Forearm' or limbName == 'Bip01 R Forearm'):
        else:
            TouchCube.disabledTouchers.append(limbBox)

        # BL:end
        # only enable collide notify when animating bone
        # limbBox.enable(viz.COLLIDE_NOTIFY)
        limbBox.disable(viz.DYNAMICS)
        collisionLimbBoxes.append(limbBox)
    pptExtension.limbBoxes = collisionLimbBoxes
    vizact.onupdate(0, updateCollisionBoxes, avatar)
    return
コード例 #2
0
ファイル: appendagemanager.py プロジェクト: vhilab/fiveArms
def setControl(selectedControlSchema, isLeftHanded = 0):
#	global rightHandPitchJitters, leftHandPitchJitters, rightHandYawJitters, leftHandYawJitters
#	rightHandPitchJitters = Queue(maxsize=some_global_variables.numberOfRenderFramesToAverageThreshold)
#	leftHandPitchJitters = Queue(maxsize=some_global_variables.numberOfRenderFramesToAverageThreshold)
#	rightHandYawJitters = Queue(maxsize=some_global_variables.numberOfRenderFramesToAverageThreshold)
#	leftHandYawJitters = Queue(maxsize=some_global_variables.numberOfRenderFramesToAverageThreshold)
#	while not rightHandPitchJitters.full():
#		rightHandPitchJitters.put(0.0)
#		leftHandPitchJitters.put(0.0)
#		rightHandYawJitters.put(0.0)
#		leftHandYawJitters.put(0.0)
	
	vizact.onupdate(0, controlSchemaForAppendages)
	global rightForearmSphere, rightWristSphere, rightFingerSphere, leftForearmSphere, leftWristSphere, leftFingerSphere
	rightForearmSphere = vizshape.addSphere(radius=0.02)
	rightForearmSphere.color(viz.RED)
	rightWristSphere = vizshape.addSphere(radius=0.02)
	rightWristSphere.color(viz.GREEN)
	rightFingerSphere = vizshape.addSphere(radius=0.02)
	rightFingerSphere.color(viz.BLUE)
	leftForearmSphere = vizshape.addSphere(radius=0.02)
	leftForearmSphere.color(viz.CYAN)
	leftWristSphere = vizshape.addSphere(radius=0.02)
	leftWristSphere.color(viz.MAGENTA)
	leftFingerSphere = vizshape.addSphere(radius=0.02)
	leftFingerSphere.color(viz.YELLOW)
	if not some_global_variables.SHOW_SPHERES:
		rightWristSphere.visible(False)
		rightFingerSphere.visible(False)
		leftWristSphere.visible(False)
		leftFingerSphere.visible(False)
		rightForearmSphere.visible(False)
		leftForearmSphere.visible(False)
コード例 #3
0
def createCustomComposite(id=0):
    viz.logNotice('MotionNode Head Tracking')

    # Use general VRPN tracker.
    vrpn = viz.add('vrpn7.dle')
    PPT_VRPN = 'Tracker0@localhost'
    head = vrpn.addTracker(PPT_VRPN)
    # Need to change rotation axes from MotionNode
    # to Vizard conventions.
    head.swapQuat([-3, -2, -1, 4])

    # Or, use the built in MotionNode tracker
    # support.
    #MotionNode = viz.add('MotionNode.dle')
    #head = MotionNode.addSensor()

    headfinal = viz.addGroup()
    headlink = viz.link(head, headfinal, enabled=False)
    import vizact
    vizact.onupdate(viz.PRIORITY_PLUGINS + 1, headlink.update)
    headlink.postTrans([0, 0.1, 0])  # Apply 10 cm translate in Y axis

    import vizuniverse as VU
    comp = VU.VUCompositeTrackers()
    comp.storeTracker(comp.HEAD, headfinal)

    comp.createAvatarNone()
    comp.defineViewpoint()
    comp.finishTrackers()
    return comp
コード例 #4
0
ファイル: physEnv.py プロジェクト: YanjieGao/ExperimentBase
	def __init__(self):
		
		viz.EventClass.__init__(self)
		
		print 'physEnv.init(): Frame-rate hardcoded at 1/60!'
		
		self.frameRate = 1.0/60
		
		if( type(self.frameRate) is not float ):
			print 'physEnv.init(): frame-rate must be a float!'
			return
			
		# Keep track of physnodes in here
		self.physNodes_phys = []
		
		# This will be turned to TRUE when a collision has been detected
		self.collisionDetected = False
		
		# ODE initialization steps
		self.world = ode.World()
		
		print 'physEnv.init(): FIX:  Grav hardcoded at 9.8. Should accept gravity as a parameter, or include a function to change gravity'
		self.world.setGravity( [0,-9.8,0] )
		
		#self.world.setCFM(0.00001)
		#self.world.setERP(0.05)
		
		self.world.setCFM(0.00001)
		self.world.setERP(0.1)
		
		#self.world.setContactSurfaceLayer(0.001)
		

		##bounce_vel is the minimum incoming velocity to cause a bounce
		
		# Collision space where geoms live and collisions are simulated
		# 0 for a 'simple' space (faster and less accurate), 1 for a hash space
		self.space = ode.Space(1) 
		self.minBounceVel = .2 # min vel to cause a bounce
		
		####  A better description: 
		##Spaces are containers for geom objects that are the actual objects tested for collision. 
		##For the collision detection a Space is the same as the World for the dynamics simulation, and a geom object corresponds to a body object. 
		##For the pure dynamics simulation the actual shape of an object doesn't matter, you only have to know its mass properties. 
		##However, to do collision detection you need to know what an object actually looks like, and this is what's the difference between a body and a geom.

		# A joint group for the contact joints that are generated whenever two bodies collide
		self.jointGroup = ode.JointGroup()
		self.collisionList_idx = []
		self.contactJoints_idx = []
		self.contactObjects_idx = []
		# A list of non-collision joints, such as fixed joints, etc
		self.joints_jIdx = []
		
		############################################################################################
		############################################################################################
		## Contact/collision functions
		
		vizact.onupdate( viz.PRIORITY_PHYSICS, self.stepPhysics)
コード例 #5
0
    def __init__(self, subject, start_from_trial=0):
        """
        Setup data path, room, ball, baseball glove and trial list. Initialize events and actions.
        :param subject: subject nr
        :param start_from_trial: number of first trial (used when experiment crashed)
        """
        # call superclass constructor
        viz.EventClass.__init__(self)

        # create data path
        self.data_dir = 'data/{}/'.format(subject)

        # read experiment info (config dict)
        with open(self.data_dir + 'exp_info.txt') as info_file:
            config = json.load(info_file)

        # setup hardware
        hardware.setup(config['hardware'])

        # initialize visual environment
        print '\nSetting up visual environment...'
        self.room = Room(config)
        self.config = config['experiment']

        # add ball
        ball_size = self.config['ball_size']
        ball_initial_pos = [0, ball_size, 36.22]
        self.ball = Ball(room=self.room, size=ball_size, position=ball_initial_pos, color=[1, 1, 1])
        self.ball.setAlpha(0)

        self.glove = None
        self.catch_sound = None
        # add glove
        if self.config['glove']:
            self.glove = BaseballGlove(room=self.room, size=self.config['glove_radius'], marker_num=1,
                                       hand=config['experiment']['hand'])
            self.catch_sound = viz.addAudio('res/sounds/applause.wav')

        # initialize trial list
        self.trials = pd.read_csv(self.data_dir + 'trials.csv').to_dict('records')
        self.trial_num = start_from_trial - 1
        self.current_trial = None
        self.current_block = None

        self.message_panel = None
        if hardware.hmd:
            # HMD message panel for showing messages between blocks and at the end of the experiment
            self.message_panel = hardware.hmd.addMessagePanel('You did it!')
            self.message_panel.visible(False)

        # key-press events (for starting the experiment)
        self.callback(viz.KEYDOWN_EVENT, self.on_key_down)

        # handle collisions (register when glove touches ball)
        vizact.onupdate(0, self.handle_collisions)

        # play sound if participant leaves allowed area
        self.allowed_area_action = vizact.ontimer(0.7, self.check_allowed_area)
コード例 #6
0
    def start_vr(self):
        hmd = steamvr.HMD()
        if not hmd.getSensor():
            sys.exit('SteamVR HMD not detected')
        viz.link(hmd.getSensor(), viz.MainView)

        # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions
        hmd_stream = self.subject.create_non_phasespace_rigid_body_stream('headRigid', 0)
        # stream 6dof of HMD as pos (x,y,z) and ori(x,y,z,w) --> quaternion
        vizact.onupdate(0, self.subject.update_and_push_rigid_body, viz.MainView, self.subject.head_sphere, hmd_stream)

        #  connecting present controllers
        trackers = steamvr.getTrackerList()
        self.controller = steamvr.getControllerList()[0]
        print self.controller
        tracker_names = ['handRigid', 'armRigid', 'torsoRigid']

        find_out_tracker = vizact.onupdate(0, self.assign_trackers, trackers)
        yield viztask.waitTime(5) # wait two seconds to figure out which tracker is more to the front in z direction = hand tracker
        find_out_tracker.remove()
        
        # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions
        # stream 6dof as pos (x,y,z) and ori(x,y,z,w) --> quaternion
        hand_stream = self.subject.create_non_phasespace_rigid_body_stream(tracker_names[self.hand_tracker_id], 0)
        vizact.onupdate(0, self.subject.update_and_push_rigid_body, trackers[self.hand_tracker_id], self.subject.right_hand_sphere, hand_stream)
        
        # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions
        arm_stream = self.subject.create_non_phasespace_rigid_body_stream(tracker_names[self.arm_tracker_id], 0)
        vizact.onupdate(0, self.subject.update_and_push_rigid_body, trackers[self.arm_tracker_id], None, arm_stream)

        # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions
        torso_stream = self.subject.create_non_phasespace_rigid_body_stream(tracker_names[self.torso_tracker_id], 0)
        vizact.onupdate(0, self.subject.update_and_push_rigid_body, trackers[self.torso_tracker_id], None, torso_stream)
コード例 #7
0
ファイル: gazeTools.py プロジェクト: bmj8778/CatchB
	def dynamicCalibrationMethod(self):
		
		if ( self.calibrationInProgress == False ):
			self.calibrationInProgress = True
			self.toggleRoomWallsVisibility()
			self.calibrationCounter = 27
			self.calibrationSphereRadius = self.initialValue
			self.calibrationSphere = vizshape.addSphere(self.calibrationSphereRadius, color = viz.PURPLE)
			self.calibrationSphere.emissive(viz.PURPLE)
			self.calibrationSphere.setParent(self.parentNode)
			newPos = [-3,-.5,12]
			self.setSphereRadius(self.parentNode.getPosition(viz.ABS_GLOBAL), newPos, 0)
			self.calibrationSphere.setPosition(newPos[0], newPos[1], newPos[2],viz.ABS_PARENT)
			self.targetMovingAction = vizact.onupdate(viz.PRIORITY_INPUT+1, self.checkActionDone)

			self.text_object = viz.addText('')
			self.text_object.setParent(self.calibrationSphere)
			self.text_object.renderOnlyToWindows([self.renderToWindows])
			self.localAction = vizact.onupdate(viz.PRIORITY_INPUT+1,self.calculateAngularError, self.cyclopEyeSphere.node3D, 0.0, self.text_object)#self.currentTrial.ballObj.node3D

			print 'Dynamic Calibration Procedure Started'
			#Use a moveTo action to move a node to the point [0,0,25] at 2 meters per second  
			self.moveAction = vizact.moveTo([-3,-0.5,12],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([3,-0.5,12],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([3,3,12],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([-3,3,12],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([-3,-0.5,12],speed=2)
			self.calibrationSphere.addAction(self.moveAction)

			self.moveAction = vizact.moveTo([-3,-0.5,6],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([3,-0.5,6],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([3,3,6],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([-3,3,6],speed=2)
			self.calibrationSphere.addAction(self.moveAction)
			self.moveAction = vizact.moveTo([-3,0,6],speed=2)
			self.calibrationSphere.addAction(self.moveAction)



		else:
			self.localAction.remove()
			self.text_object.remove()
			self.calibrationInProgress = False
			self.calibrationCounter = 0
			self.calibrationSphere.remove()
			self.toggleRoomWallsVisibility()
			print 'Quit Dynamic Calibration!!'
コード例 #8
0
	def __init__(self):		
		super(self.__class__,self).__init__()
		
		# Link navigation node to main view
#		self.NODE = viz.addGroup()
#		self.VIEW_LINK = viz.link(self.NODE, self.VIEW)
		
		# --add oculus as HMD
		self.hmd = oculus.Rift()
		
		if not self.hmd.getSensor():
			viz.logError('** ERROR: Failed to detect Oculus Rift')
		else:
			# Reset HMD orientation
			self.hmd.getSensor().reset()

			# Setup navigation node and link to main view
#			self.NODE = viz.addGroup()
#			self.VIEW_LINK = viz.link(self.NODE, viz.VIEW)
			self.VIEW_LINK.preMultLinkable(self.hmd.getSensor())

			# --Apply user profile eye height to view
			profile = self.hmd.getProfile()
			if profile:
				self.VIEW_LINK.setOffset([0,profile.eyeHeight,0])
				viz.logNotice('Oculus profile name:', profile.name)
				viz.logNotice('Oculus IPD:', profile.ipd)
				viz.logNotice('Oculus player height:', profile.playerHeight)
				viz.logNotice('Oculus eye height:', profile.eyeHeight)
			else: 
				self.VIEW_LINK.setOffset([0,self.EYE_HEIGHT,0])
				
			# Check if HMD supports position tracking
			supportPositionTracking = self.hmd.getSensor().getSrcMask() & viz.LINK_POS
			if supportPositionTracking:
				
				# Add camera bounds model
				self.camera_bounds = self.hmd.addCameraBounds()
				self.camera_bounds.visible(False)

				# Change color of bounds to reflect whether position was tracked
				def checkPositionTracked():
					if self.hmd.getSensor().getStatus() & oculus.STATUS_POSITION_TRACKED:
						self.camera_bounds.color(viz.GREEN)
					else:
						self.camera_bounds.color(viz.RED)
				vizact.onupdate(0, checkPositionTracked)

				# Setup camera bounds toggle key
				def toggleBounds():
					self.camera_bounds.visible(viz.TOGGLE)
				vizact.onkeydown(self.KEYS['camera'], toggleBounds)
コード例 #9
0
ファイル: physEnv.py プロジェクト: bmj8778/CatchB
    def __init__(self):

        viz.EventClass.__init__(self)

        # Keep track of physnodes in here
        self.physNodes_phys = []

        # This will be turned to TRUE when a collision has been detected
        self.collisionDetected = False

        # ODE initialization steps
        self.world = ode.World()

        print(
            'physEnv.init(): FIX:  Grav hardcoded at 9.8. Should accept gravity as a parameter, or include a function to change gravity'
        )
        self.world.setGravity([0, -9.8, 0])

        #self.world.setCFM(0.00001)
        #self.world.setERP(0.05)

        self.world.setCFM(0.00001)
        self.world.setERP(0.1)

        #self.world.setContactSurfaceLayer(0.001)

        ##bounce_vel is the minimum incoming velocity to cause a bounce

        # Collision space where geoms live and collisions are simulated
        # 0 for a 'simple' space (faster and less accurate), 1 for a hash space
        self.space = ode.Space(1)
        self.minBounceVel = .2  # min vel to cause a bounce

        ####  A better description:
        ##Spaces are containers for geom objects that are the actual objects tested for collision.
        ##For the collision detection a Space is the same as the World for the dynamics simulation, and a geom object corresponds to a body object.
        ##For the pure dynamics simulation the actual shape of an object doesn't matter, you only have to know its mass properties.
        ##However, to do collision detection you need to know what an object actually looks like, and this is what's the difference between a body and a geom.

        # A joint group for the contact joints that are generated whenever two bodies collide
        self.jointGroup = ode.JointGroup()
        self.collisionList_idx = []
        self.contactJoints_idx = []
        self.contactObjects_idx = []
        # A list of non-collision joints, such as fixed joints, etc
        self.joints_jIdx = []

        ############################################################################################
        ############################################################################################
        ## Contact/collision functions

        vizact.onupdate(viz.PRIORITY_PHYSICS, self.stepPhysics)
コード例 #10
0
ファイル: path.py プロジェクト: rajeshnarasimha/arimmersion
 def runExperimentPathSet(self, pathNum, myTimeline=None):
     global speedMultiplier
     # speedMultipler = 1
     # for ps in self.pathSets:
     print pathNum
     print len(self.pathSets)
     ps = self.pathSets[pathNum]
     if myTimeline != None:
         timeline = myTimeline
     else:
         timeline = self.timelines[pathNum]
     self.peopleset = []
     newPs = PathSet()
     for personPath in ps.peoplePaths:
         p = people.a_person(speedMultiplier)
         p.custom_walk(personPath.getFullPath())
         self.peopleset.append(p)
     tophat = people.a_person(speedMultiplier, 1)
     self.abe = tophat
     tophat.custom_walk(ps.abePath.getFullPath())
     self.peopleset.append(tophat)
     self.starttime = viz.tick()
     self.errlist = []
     self.timelist = []
     # error_timer = vizact.ontimer(0.1/speedMultiplier,self.checkError,tophat,self.errlist)
     error_timer = vizact.onupdate(-10, self.checkError, tophat)
     yield self.runPathSet(self.peopleset, newPs, tophat, 1, timeline)
     vizact.removeEvent(error_timer)
コード例 #11
0
ファイル: Interaction.py プロジェクト: tokola/GitHub_OLiVE
 def __init__ (self, win, PlayViewObj=None, device='RUMBLEPAD'):
     self.joystick = vizjoy.add()
     self.view = win.getView()
     self.window = win
     self.toolActive = 0
     self.moveCursor = False 
     self.filter = LowPassDynamicFilter(0.5, 5, 10.0, 200.0)
     self.player = PlayViewObj
     #Call super class constructor to create different callbacks for every joystick
     viz.EventClass.__init__(self)
     self.callback(vizjoy.BUTTONDOWN_EVENT, self.joydown)
     self.callback(vizjoy.BUTTONUP_EVENT, self.joyup)
     self.callback(vizjoy.HAT_EVENT, self.joyhat)
     #decide the button actions based on the joystick
     self._updateFunc = vizact.onupdate(0, self.UpdateJoystick)
     if 'Rumblepad' in self.joystick.getName():
         self.device = 'RUMBLEPAD'
         self.actions = {'prev':[1,5], 'next':[3,6], 'pick':[2, 7, 8, 11, 12], 'drop':4, 'hud':9}
     elif 'XBOX' in self.joystick.getName().upper():
         self.device = 'XBOX'
         self.actions = {'prev':[3], 'next':[2], 'pick':[1, 9, 10, 5, 6], 'drop':4, 'hud':7}
         self.triggerActive = True   #False after trigger buttons are pressed
         #Create a callback to handle the expiring trigger (de)activation events
         self.callback(viz.TIMER_EVENT, self.timerExpire)
     else:
         self.device = 'XBOX'
         vizinput.message('Joystick not detected! Xbox will be used instead with limited functionality.')
コード例 #12
0
ファイル: gazeTools.py プロジェクト: bmj8778/CatchB
	def staticCalibrationMethod(self):
		
		if ( self.calibrationInProgress == False ):
			self.calibrationBlockCounter += 100
			self.toggleRoomWallsVisibility()
			self.calibrationInProgress = True
			self.calibrationCounter = 0
			self.calibrationSphereRadius = self.initialValue
			self.calibrationSphere = vizshape.addSphere(self.calibrationSphereRadius, color = viz.PURPLE)
			self.calibrationSphere.emissive(viz.PURPLE)
			self.calibrationSphere.setParent(self.parentNode)
			self.setSphereRadius(self.parentNode.getPosition(viz.ABS_GLOBAL), self.calibrationPositions[self.calibrationCounter,:], 0)
			print 'FirstPos', self.calibrationPositions[self.calibrationCounter,:]
			newPos = [self.calibrationPositions[self.calibrationCounter,0], self.calibrationPositions[self.calibrationCounter,1], self.calibrationPositions[self.calibrationCounter,2]]
			self.calibrationSphere.setPosition(newPos[0], newPos[1], newPos[2],viz.ABS_PARENT)

			self.text_object = viz.addText('')
			self.text_object.setParent(self.calibrationSphere)
			self.text_object.renderOnlyToWindows([self.renderToWindows])
			self.localAction = vizact.onupdate(viz.PRIORITY_INPUT+1,self.calculateAngularError, self.cyclopEyeSphere.node3D, 0.0, self.text_object)#self.currentTrial.ballObj.node3D

			print 'Static Calibration Started'
		else:
			self.calibrationInProgress = False
			self.calibrationCounter = 0
			self.calibrationSphere.remove()
			self.localAction.remove()
			self.text_object.remove()
			self.toggleRoomWallsVisibility()
			print 'Quit Static Calibration!!'
コード例 #13
0
ファイル: MirrorTouchCubes2.py プロジェクト: vhilab/thirdArm
def InitHMD():
	global collisionLimbBoxes
	if(USE_HMD):
		pptExtension.setup();
		viz.go(viz.FULLSCREEN)
		viz.clip(0.1)
		vizact.onupdate(0, pptExtension.pptUpdate, avatar, ghostAvatar, collisionLimbBoxes);
		if(len(collisionLimbBoxes) > 0):
			vizact.ontimer(1, pptExtension.refreshAvatar, ghostAvatar, avatar, collisionLimbBoxes);
		else:
			vizact.ontimer(1, pptExtension.refreshAvatar, ghostAvatar, avatar, None);
	else:
		pptSimulation.setup(avatar);
		viz.clip(0.1)
		viz.go(viz.FULLSCREEN);
		vizact.onupdate(0, pptSimulation.pptUpdate, avatar, ghostAvatar, collisionLimbBoxes);
コード例 #14
0
	def toggleUpdate(self):
	
		def moveGazeSphere():
			
			gazeSamp = []
			
			#if( self.eye == viz.BOTH_EYE):
			gazeSamp = self.eyeTracker.getLastSample()
			
			if( gazeSamp is None ):
				return
				
			#timestamp = gazeSamp.timestamp
			
			if( self.eye == viz.LEFT_EYE):
				gazeSamp = gazeSamp.leftEye;
			elif( self.eye == viz.RIGHT_EYE ):
				gazeSamp = gazeSamp.rightEye;
			
			
			#3D gaze is provided as a normalized gaze direction vector (gazeDirection) and a gaze base point (gazeBasePoint).
			#Gaze base point is given in mm with respect to the origin of the eyetracker coordinate system.
			# Note: you must flip X
			gazeDirXYZ = [ -gazeSamp.gazeDir_x, gazeSamp.gazeDir_y, gazeSamp.gazeDir_z]
			gazePointXYZ = self.sphereDistance * gazeDirXYZ
			
			#with viz.cluster.MaskedContext(viz.CLIENT1):# show
			self.node3D.setPosition( gazePointXYZ,viz.ABS_PARENT)
			
			
		self.node3D.enable(viz.RENDERING)
		
		self.updateAct = vizact.onupdate(viz.PRIORITY_INPUT+1,moveGazeSphere)
コード例 #15
0
ファイル: gazeTools.py プロジェクト: bmj8778/CatchB
	def toggleUpdate(self):
	
		def moveGazeSphere():
			
			gazeSamp = []
			
			#if( self.eye == viz.BOTH_EYE):
			gazeSamp = self.eyeTracker.getLastSample()
			
			if( gazeSamp is None ):
				return
				
			#timestamp = gazeSamp.timestamp
			
			if( self.eye == viz.LEFT_EYE):
				gazeSamp = gazeSamp.leftEye;
			elif( self.eye == viz.RIGHT_EYE ):
				gazeSamp = gazeSamp.rightEye;
			
			
			#3D gaze is provided as a normalized gaze direction vector (gazeDirection) and a gaze base point (gazeBasePoint).
			#Gaze base point is given in mm with respect to the origin of the eyetracker coordinate system.
			# Note: you must flip X
			gazeDirXYZ = [ -gazeSamp.gazeDirection.x, gazeSamp.gazeDirection.y, gazeSamp.gazeDirection.z]
			gazePointXYZ = self.sphereDistance * gazeDirXYZ
			
			#with viz.cluster.MaskedContext(viz.CLIENT1):# show
			self.node3D.setPosition( gazePointXYZ,viz.ABS_PARENT)
			
			
		self.node3D.enable(viz.RENDERING)
		
		self.updateAct = vizact.onupdate(viz.PRIORITY_INPUT+1,moveGazeSphere)
コード例 #16
0
ファイル: Robot.py プロジェクト: tokola/GitHub_OLiVE
	def setTracker(self,tracker,bodyTrackMode=BODY_TRACK_YAW,bodyDrag=True,bodyYawIncrement=0.0,bodyIncrementTime=1.0,priority=viz.PRIORITY_LINKS+1,flag=0):
		"""Set tracker for model"""

		# Clear existing update func
		if self._updateFunc is not None:
			self._updateFunc.remove()
			self._updateFunc = None
		self._incrementFunc = None

		# Save internal state variables
		self._tracker = tracker
		self._trackerFlag = flag
		self._bodyTrackMode = bodyTrackMode
		self._bodyYawIncrement = bodyYawIncrement
		self._bodyIncrementTime = bodyIncrementTime
		self._bodyDragPos = tracker.getPosition()

		# Reset root body orientation
		self.body_root.setQuat([0,0,0,1])

		# If no tracker, then reset body parts and return
		if tracker is None:
			m = viz.Matrix()
			self.head.setMatrix(m)
			self.body_root.setMatrix(m)
			self.body.setMatrix(m)
			return

		# Set position update function
		if bodyDrag:
			self._updatePosition = self._updatePositionDrag
		else:
			self._updatePosition = self._updatePositionNoDrag

		# Create update function callback
		if bodyTrackMode == BODY_TRACK_NONE:
			self._updateFunc = vizact.onupdate(priority,self._updateBodyTrackNone)
		elif bodyTrackMode == BODY_TRACK_YAW:
			if bodyYawIncrement > 0.0:
				yaw,pitch,roll = self._tracker.getEuler(self._trackerFlag)
				self._lastBodyYaw = yaw
				self.body.setEuler([yaw,0,0])
				self._updateFunc = vizact.onupdate(priority,self._updateBodyTrackYawIncrement)
			else:
				self._updateFunc = vizact.onupdate(priority,self._updateBodyTrackYaw)
		else:
			raise ValueError,'bodyTrackMode value is not recognized: ' + str(bodyTrackMode)
コード例 #17
0
    def linkPose(self, node3d):

        newTransMat = viz.Matrix()
        newTransMat.setQuat(self.getQuaternion())
        newTransMat.setPosition(self.body.getPosition())

        linkAction = vizact.onupdate(viz.PRIORITY_LINKS, node3d.setMatrix,
                                     newTransMat)
        return linkAction
コード例 #18
0
ファイル: fiveArms.py プロジェクト: vhilab/fiveArms
def InitHMD():
#BL:start		
	global collisionLimbBoxes
#	if USE_HMD:
	pptextension.setup()
	if some_global_variables.headTrackingActive:
		viz.go(viz.HMD | viz.TRACKER)
	else:
		viz.go()
	viz.window.setSize(1260, 950)		
	#viz.go(viz.FULLSCREEN)
#BL:end
	viz.clip(0.1)
	vizact.onupdate(0, pptextension.pptUpdate, some_global_variables.avatar, some_global_variables.ghostAvatar, collisionLimbBoxes);
	if(len(collisionLimbBoxes) > 0):
		vizact.ontimer(1, pptextension.refreshAvatar, some_global_variables.ghostAvatar, some_global_variables.avatar, collisionLimbBoxes);
	else:
		vizact.ontimer(1, pptextension.refreshAvatar, some_global_variables.ghostAvatar, some_global_variables.avatar, None);
コード例 #19
0
ファイル: Machinery.py プロジェクト: tokola/GitHub_OLiVE
	def SetMotion (self):
		self.PlayAudio('pump_oil')
		self.wheelP.addAction(vizact.spin(1,0,0, 90,viz.FOREVER))
		self.crankshaft.addAction(vizact.spin(1,0,0, 152,viz.FOREVER))
#		self.crankshaft.addAction(vizact.spin(0,0,1, 90,viz.FOREVER))
		try:
			self.capPrevYPos = self.cap.getPosition(viz.ABS_GLOBAL)[1]
			self._updateFunc.setEnabled(viz.ON)
		except AttributeError:
			self._updateFunc = vizact.onupdate(0, self.update)
コード例 #20
0
ファイル: Machinery.py プロジェクト: tokola/GitHub_OLiVE
	def Start (self):
		self.PlayAudio('steam_engine')
		self.engine_system.addAction(vizact.spin(0,0,1, 90,viz.FOREVER))
		self.watt.addAction(vizact.spin(0,0,1, 90, viz.FOREVER))
		self.spool.addAction(vizact.spin(0,1,0, 90, viz.FOREVER))
		try:
			self.capPrevZPos = self.cap.getPosition(viz.ABS_GLOBAL)[2]
			self._updateFunc.setEnabled(viz.ON)
		except AttributeError:
			self._updateFunc = vizact.onupdate(0, self.update)
コード例 #21
0
ファイル: Window.py プロジェクト: tokola/GitHub_OLiVE
	def __init__(self, view=None, win=None, winPos=[], player=None, fact=None, data=None, sm=None, fmap=None, lang=None):
		if view == None:
			view = viz.addView()
		self._view = view
		if win == None:
			win = viz.addWindow()
		self._window = win
		self._window.fov(60)
		self._window.setSize(.5,.5)
		self._window.setPosition(winPos)
		self._window.setView(self._view)
		self._size = self._window.getSize(viz.WINDOW_ORTHO)
		self._player = player
		self.LoadToolTips(lang)
		#check if this is a player window
		if player in [1,2,3]:
			self.PLAYERS[player] = self	#list of all player instances (used by FSM_Actions)
			self._name = data['name']
			self._view.setPosition(data['pos'])
			self._view.stepSize(0.1)
			self._view.collisionBuffer(0.25)
			self._view.getHeadLight().disable()
			self._window.clearcolor(viz.SKYBLUE)
			self.AddPanel(lang)
			#reset other variables
			self._toolbox = OrderedDict()
			self._selected = None	#object name being currently held/selected
			self._holding = None	#object being currently held/selected
			self._picking = None	#object name being intersected by cursor
			self._iMach = None		#machine interacting with (one of its components)
			self._nearMachine = None#machine being near to (based on proximity)
			self._updateAlerts = []	#a list of tuples (machine, error) for checking the alert update
			self._factory = fact	#factory object
			self.AddToToolbox('hand')
			self._fsm = sm			#FSM with all machine states
			self._mapWin = fmap		#the map (storing all alerts and messages)
			self._pressed = False	#True is player presses a button
			self._pickcolor = viz.GREEN
			self._feedback = None	#feedback message as result of interaction (not in FSM)
			self._iLog = []			#for logging picked and dropped tools from inventory
			self._proxLog = []		#for logging proximity to machines (enter, exit)
			self._pLog = []			#for logging position data
			self._collabAction = ''	#stores the collab action in 1P mode
			#set an update function to take care of window resizing (priority=0)
			self._updateFunc = vizact.onupdate(0, self.Update)
			#FSM_Actions.FSM_Actions.__init__(self)
		else:	#this is the map view
			self._window.clearcolor([.3,.3,.3])
			self._window.ortho(-25,25,-15,20,-1,1)
			self._view.setPosition(-3.8,0,0)
			self._view.setEuler(0,90,0)
			self._alerts = {}
			self._messages = OrderedDict()
			self.AddMap()
コード例 #22
0
ファイル: controlAppendages.py プロジェクト: vhilab/thirdArm
def InitHMD():
    # BL:start
    global collisionLimbBoxes, isLeftHanded
    if USE_HMD:
        viz.go(viz.HMD | viz.TRACKER)
        viz.window.setSize(1260, 950)
        pptExtension.setup(isLeftHanded)
        # viz.go(viz.FULLSCREEN)
        # BL:end
        viz.clip(0.1)
        vizact.onupdate(0, pptExtension.pptUpdate, avatar, ghostAvatar, collisionLimbBoxes)
        if len(collisionLimbBoxes) > 0:
            vizact.ontimer(1, pptExtension.refreshAvatar, ghostAvatar, avatar, collisionLimbBoxes)
        else:
            vizact.ontimer(1, pptExtension.refreshAvatar, ghostAvatar, avatar, None)
    else:
        pptSimulation.setup(avatar)
        viz.clip(0.1)
        viz.go(viz.FULLSCREEN)
        vizact.onupdate(0, pptSimulation.pptUpdate, avatar, ghostAvatar, collisionLimbBoxes)
コード例 #23
0
ファイル: visEnv.py プロジェクト: YanjieGao/ExperimentBase
 def toggleUpdatePhysWithVis(self):
     
     #self.removeUpdateAction()
     
     if( self.updatingPhysWithVis == False ):
         self.updatingPhysWithVis = True
         self.applyVisToPhysAction = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyVisToPhys)
         
     else:
         self.updatingPhysWithVis = False
         self.updateAction.remove()
コード例 #24
0
    def toggleUpdatePhysWithVis(self):

        # self.removeUpdateAction()

        if not self.updating_phys_with_vis:
            self.updating_phys_with_vis = True
            self.apply_vis_to_phys_action = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyVisToPhys)

        else:
            self.updating_phys_with_vis = False
            self.update_action.remove()
コード例 #25
0
ファイル: MirrorTouchCubes3.py プロジェクト: vhilab/thirdArm
def InitHMD():
	global collisionLimbBoxes
	if(USE_HMD):
#BL:start		
		viz.go(viz.HMD | viz.TRACKER)
		viz.window.setSize(1260, 950)		
		pptExtension.setup();
		#viz.go(viz.FULLSCREEN)
#BL:end
		viz.clip(0.1)
		vizact.onupdate(0, pptExtension.pptUpdate, avatar, ghostAvatar, collisionLimbBoxes);
		if(len(collisionLimbBoxes) > 0):
			vizact.ontimer(1, pptExtension.refreshAvatar, ghostAvatar, avatar, collisionLimbBoxes);
		else:
			vizact.ontimer(1, pptExtension.refreshAvatar, ghostAvatar, avatar, None);
	else:
		pptSimulation.setup(avatar);
		viz.clip(0.1)
		viz.go(viz.FULLSCREEN);
		vizact.onupdate(0, pptSimulation.pptUpdate, avatar, ghostAvatar, collisionLimbBoxes);
コード例 #26
0
    def __init__(self, tracker):

        # Initialize using group node
        group = viz.addGroup()
        viz.VizNode.__init__(self, group.id)

        self._offset = viz.Vector()
        self._tracker = tracker
        self._velocity = 0.0

        # Update tracker every frame
        self._updater = vizact.onupdate(0, self.update)
コード例 #27
0
	def __init__(self,tracker):

		# Initialize using group node
		group = viz.addGroup()
		viz.VizNode.__init__(self,group.id)

		self._offset = viz.Vector()
		self._tracker = tracker
		self._velocity = 0.0

		# Update tracker every frame
		self._updater = vizact.onupdate(0,self.update)
コード例 #28
0
ファイル: appendagemanager.py プロジェクト: vhilab/thirdArm
def setControl(selectedControlSchema, isLeftHanded = 0):
	global isCube1, isCube2
	# EXTENSION: Add rotational tracking of the two hands via intersense cube
	isense = viz.add('intersense.dle')
	if isLeftHanded == 0:
		#user right-handed; in unimanual setting, moves right hand
#		isCube1 = isense.addTracker(port=6)
		isCube1 = steamvr.getControllerList()[1]	#pptextensionDK2.lhPPTmarker #isense.addTracker(port=some_global_variables.isensePort1)
#		isCube2 = isense.addTracker(port=7)
		isCube2 = steamvr.getControllerList()[0]  #pptextensionDK2.rhPPTmarker #isense.addTracker(port=some_global_variables.isensePort2)
	else:
		#user left-handed; in unimanual setting, moves left hand
		isCube1 = steamvr.getControllerList()[0]	#pptextensionDK2.lhPPTmarker #isense.addTracker(port=some_global_variables.isensePort1)
#		isCube2 = isense.addTracker(port=7)
		isCube2 = steamvr.getControllerList()[1]  #pptextensionDK2.rhPPTmarker #isense.addTracker(port=some_global_variables.isensePort2)		
		
#		isCube1 = isense.addTracker(port=7)
		#isCube1 = pptextensionDK2.rhPPTmarker #isense.addTracker(port=some_global_variables.isensePort2)
#		isCube2 = isense.addTracker(port=6)
		#isCube2 = pptextensionDK2.lhPPTmarker #isense.addTracker(port=some_global_variables.isensePort1)

	
	# Link 
	pptextensionDK2.isCube1 = isCube1
	pptextensionDK2.isCube2 = isCube2

	global functionPointerForBimanualControl, functionPointerForUnimanualControl, functionPointerForHeadControl
	#Control schema switch
#	if (selectedControlSchema == 0):
#		functionPointerForBimanualControl = vizact.onupdate(0, controlWithBimanualWristRotation)
#	elif (selectedControlSchema == 1):
#		functionPointerForUnimanualControl = vizact.onupdate(0, controlWithUnimanualWristRotation)
#	elif (selectedControlSchema == 2):
#		functionPointerForHeadControl = vizact.onupdate(0, controlWithHeadRotation)
	functionPointerForBimanualControl = vizact.onupdate(0, controlWithBimanualWristRotation)
	functionPointerForUnimanualControl = vizact.onupdate(0, controlWithUnimanualWristRotation)
	functionPointerForHeadControl = vizact.onupdate(0, controlWithHeadRotation)
	global controlSchemaCounter
	controlSchemaCounter = selectedControlSchema
	setControlSchema()
コード例 #29
0
    def steamvr_setup_vm2(self, right_hand_object, head_object):

        hmd = steamvr.HMD()
        if not hmd.getSensor():
            sys.exit('SteamVR HMD not detected')
        viz.link(hmd.getSensor(), viz.MainView)

        # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions
        hmd_stream = self.create_non_phasespace_rigid_body_stream(
            'headRigid', 0)
        # stream 6dof of HMD as pos (x,y,z) and ori(x,y,z,w) --> quaternion
        vizact.onupdate(18, self.update_and_push_rigid_body, viz.MainView,
                        head_object, hmd_stream)

        #  connecting present controllers
        trackers = steamvr.getTrackerList()
        self.controller = steamvr.getControllerList()[0]
        tracker_names = ['handRigid', 'torsoRigid']

        for i in range(len(trackers)):
            # create LSL stream for MoBIlab pos and ori analysis --> ori should be in quaternions
            tracker_stream = self.create_non_phasespace_rigid_body_stream(
                tracker_names[i], 0)
            # stream 6dof as pos (x,y,z) and ori(x,y,z,w) --> quaternion

            print(trackers[i].getData())
            print(trackers[i].getPosition())

            if i == 0:
                vizact.onupdate(19, self.update_and_push_rigid_body,
                                trackers[i], right_hand_object, tracker_stream)
            else:
                vizact.onupdate(19, self.update_and_push_rigid_body,
                                trackers[i], None, tracker_stream)
コード例 #30
0
ファイル: main.py プロジェクト: YanjieGao/ExperimentBase
	def __init__(self):
		
		################################################################
		##  Eventflag
		
		# 1 ball launched
		# 2 * not used * 
		# 3 ball has hit floor
		# 4 ball has hit paddle
		# 5 ball has hit back wall
		# 6 trial end
		# 7 block end
		
		viz.EventClass.__init__(self)
		
		self.status = 0
		self.lastFrameUpdated = viz.getFrameNumber()
		self.currentValue = 0
		
		# On every frame, self.eventFlag should be set to 0
		# This should happen first, before any timer object has the chance to overwrite it!
		vizact.onupdate(viz.PRIORITY_FIRST_UPDATE,self._resetEventFlag)
コード例 #31
0
	def startStopWritingToFile(self):
		
		#If not already recording, setup a record event on every frame
		if not self.recording:
			self.flag_rightFootCollision = False
			self.flag_leftFootCollision = False
			self.recording = vizact.onupdate(0, self.writeToFile)
			print "Record start"
			
		#Else disable record event and set the record flag to none	
		else:
			self.recording.remove()
			self.recording = None
コード例 #32
0
	def __init__(self,**kw):
		group = viz.addGroup(**kw)
		viz.VizNode.__init__(self,group.id)

		self._quad = vizshape.addQuad([1,1],parent=self)
		self._quad.color(viz.RED)
		self._quad.polyMode(viz.POLY_WIRE)

		viz.startLayer(viz.LINES)
		viz.vertexColor(viz.RED)
		for x in range(8):
			viz.vertex([0,0,0])
		self._lines = viz.endLayer(parent=self)
		self._lines.dynamic()

		self.zoffset(-2)
		self.lineWidth(2)
		self.disable(viz.LIGHTING)

		self._wall = None
		self._tracker = None

		vizact.onupdate(viz.PRIORITY_LAST_UPDATE,self.updateFrustum)
コード例 #33
0
    def setContrastMode(self, mode, min=0.02):
        """
        Setup an onupdate action that updates the contrast in the specified mode
        :param mode: lin_dist, inv_dist, exp_dist, cos2angle, cos2gaze, bearing_vel, elevation_acc
        :param min: minimum contrast
        :return: None
        """
        self.updateInitialDist()

        self.contrast_mode = mode

        if self.contrast_action:
            self.contrast_action.remove()
        self.contrast_action = vizact.onupdate(viz.PRIORITY_DEFAULT, self.updateContrast, mode, min)
コード例 #34
0
	def linkObjectsUsingMocap(self):
			
			self.headTracker = vizconnect.getRawTracker('head_tracker')
			self.mocap.start_thread()
			
			trackerDict = vizconnect.getTrackerDict()
			
			if( 'rift_tracker' in trackerDict.keys() ):
				
				self.UpdateViewAct = vizact.onupdate(viz.PRIORITY_LINKS, self.updateHeadTracker)
				
			else:
				print '*** Experiment:linkObjectsUsingMocap: Rift not enabled as a tracker'
				return
コード例 #35
0
ファイル: visEnv.py プロジェクト: YanjieGao/ExperimentBase
 def toggleUpdateWithMarker(self):
     
     self.removeUpdateAction()
         
     if( self.markerNumber > -1  ):
         
         if( self.updatingWithMocap == False ):
             #print 'Now updating with mocap'
             self.updatingWithMocap = True
             self.updateAction = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyMarkerToVis)
         else:
             self.updatingWithMocap = False
             self.updateAction.remove()
     else:
         self.updateAction = 0
         print 'No marker defined'
コード例 #36
0
    def toggleUpdateWithMarker(self):

        self.removeUpdateAction()

        if self.marker_number > -1:

            if not self.updating_with_mocap:
                # print 'Now updating with mocap'
                self.updating_with_mocap = True
                self.update_action = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyMarkerToVis)
            else:
                self.updating_with_mocap = False
                self.update_action.remove()
        else:
            self.update_action = 0
            print 'No marker defined'
コード例 #37
0
ファイル: visEnv.py プロジェクト: YanjieGao/ExperimentBase
 def toggleUpdateWithPhys(self):
     
     self.removeUpdateAction()
     
     # Create a physNode
     if( self.physNode == 0 ):
         self.enablePhysNode();
     
     # Update with physics    
     if( self.updatingWithPhys == False ):
         print 'Now updating with physics'
         self.updatingWithPhys = True
         self.updateAction = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyPhysToVis)
         #self.physNode.enableMovement()
     else:
         print 'No longer updating with physics'
         self.updatingWithPhys = False
         self.physNode.disableMovement() #If you don't disble the physics component, it will keep on moving in the physworld
コード例 #38
0
    def toggleUpdateWithPhys(self):

        self.removeUpdateAction()

        # Create a physNode
        if self.phys_node == 0:
            self.enablePhysNode()

        # Update with physics    
        if not self.updating_with_phys:
            print 'Now updating with physics'
            self.updating_with_phys = True
            self.update_action = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyPhysToVis)
            self.phys_node.enableMovement()
        else:
            print 'No longer updating with physics'
            self.updating_with_phys = False
            self.phys_node.disableMovement()
コード例 #39
0
	def __init__(self, path = None, playback_speed = 1.0, start = True):
		# path to play
		self._path = path
		
		# direction of playback (forward = 1 or backward = -1)
		self._playback_direction = 1
		
		# speed factor for playback (real time = 1.0)
		self._playback_speed = playback_speed
		
		# flag that states if animation path capture is currently stopped
		self._stop = not start
		
		# stores frame time of last update call
		self._last_frame_time = viz.getFrameTime()

		# reference to event function called each frame
		self._on_update = vizact.onupdate(0, self._onUpdate)
コード例 #40
0
ファイル: visEnv.py プロジェクト: YanjieGao/ExperimentBase
    def toggleUpdateWithRigid(self):
        
        self.removeUpdateAction()
                
        if( self.rigidBodyFile ):
            if( self.updatingWithMocap == 0 ):
                print 'Now updating with mocap'
                
                self.updatingWithMocap = True
                self.updateAction = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyRigidToVis)
                
#                if( self.physNode ):
#                    self.applyVisToPhysAction = vizact.onupdate(viz.PRIORITY_FIRST_UPDATE, self.applyVisToPhys)
                
            else:
                print 'Not updating with mocap'
                self.updatingWithMocap = False
                self.updateAction.remove()
        else:
            self.updateAction = 0
            print 'No phyNode defined'  
コード例 #41
0
ファイル: gazeTools.py プロジェクト: bmj8778/CatchB
	def toggleUpdate(self):
		
		def moveGazeVector():
			
			gazeSamp = []
			
			gazeSamp = self.eyeTracker.getLastSample()
			
			if( gazeSamp is None ):
				return
				
			if( self.eye == viz.LEFT_EYE):
				gazeSamp = gazeSamp.leftEye;
			elif( self.eye == viz.RIGHT_EYE ):
				gazeSamp = gazeSamp.rightEye;
			
			
			#3D gaze is provided as a normalized gaze direction vector (gazeDirection) and a gaze base point (gazeBasePoint).
			#Gaze base point is given in mm with respect to the origin of the eyetracker coordinate system.
			# Note: you must flip X
			viewPos_XYZ = np.array(viz.MainView.getPosition(), dtype = float)
			gazeDir_XYZ = np.array([ -gazeSamp.gazeDirection.x, gazeSamp.gazeDirection.y, gazeSamp.gazeDirection.z], dtype = float)
			pupilPos_XYZ = [-gazeSamp.pupilPosition.x, gazeSamp.pupilPosition.y, gazeSamp.pupilPosition.z]
			pupilPos_XYZ = np.divide(pupilPos_XYZ, 1000)

			# Create a node3D
			#gazePoint_XYZ = [viewPos_XYZ[0] + gazeDir_XYZ[0], viewPos_XYZ[1] + gazeDir_XYZ[1], viewPos_XYZ[2] + gazeDir_XYZ[2]]
			gazePoint_XYZ = [gazeDir_XYZ[0], gazeDir_XYZ[1], gazeDir_XYZ[2]]
			#gazePoint_XYZ = np.multiply(1.0, gazePoint_XYZ)
			
			#self.gazeLine.setVertex(0, pupilPos_XYZ[0], pupilPos_XYZ[1], pupilPos_XYZ[2], viz.ABS_PARENT)
			self.gazeLine.setVertex(0, 0, 0, viz.ABS_PARENT)
			self.gazeLine.setVertex(1, gazePoint_XYZ[0], gazePoint_XYZ[1], gazePoint_XYZ[2], viz.ABS_PARENT)
			
			#print 'GazePoint=[', gazePoint_XYZ, '],[', pupilPos_XYZ,']' 
			
			
#		self.node3D.enable(viz.RENDERING)
		
		self.updateAct = vizact.onupdate(viz.PRIORITY_INPUT+1,moveGazeVector)
コード例 #42
0
	def toggleUpdate(self):
		
		def moveGazeVector():
			
			gazeSamp = []
			
			gazeSamp = self.eyeTracker.getLastSample()
			
			if( gazeSamp is None ):
				return
				
			if( self.eye == viz.LEFT_EYE):
				gazeSamp = gazeSamp.leftEye;
			elif( self.eye == viz.RIGHT_EYE ):
				gazeSamp = gazeSamp.rightEye;
			
			
			#3D gaze is provided as a normalized gaze direction vector (gazeDirection) and a gaze base point (gazeBasePoint).
			#Gaze base point is given in mm with respect to the origin of the eyetracker coordinate system.
			# Note: you must flip X
			viewPos_XYZ = np.array(viz.MainView.getPosition(), dtype = float)
			gazeDir_XYZ = np.array([ -gazeSamp.gazeDir_x, gazeSamp.gazeDir_y, gazeSamp.gazeDir_z], dtype = float)
			pupilPos_XYZ = [-gazeSamp.pupilPos_x, gazeSamp.pupilPos_y, gazeSamp.pupilPos_z]
			pupilPos_XYZ = np.divide(pupilPos_XYZ, 1000)

			# Create a node3D
			#gazePoint_XYZ = [viewPos_XYZ[0] + gazeDir_XYZ[0], viewPos_XYZ[1] + gazeDir_XYZ[1], viewPos_XYZ[2] + gazeDir_XYZ[2]]
			gazePoint_XYZ = [gazeDir_XYZ[0], gazeDir_XYZ[1], gazeDir_XYZ[2]]
			#gazePoint_XYZ = np.multiply(1.0, gazePoint_XYZ)
			
			#self.gazeLine.setVertex(0, pupilPos_XYZ[0], pupilPos_XYZ[1], pupilPos_XYZ[2], viz.ABS_PARENT)
			self.gazeLine.setVertex(0, 0, 0, viz.ABS_PARENT)
			self.gazeLine.setVertex(1, gazePoint_XYZ[0], gazePoint_XYZ[1], gazePoint_XYZ[2], viz.ABS_PARENT)
			
			#print 'GazePoint=[', gazePoint_XYZ, '],[', pupilPos_XYZ,']' 
			
			
#		self.node3D.enable(viz.RENDERING)
		
		self.updateAct = vizact.onupdate(viz.PRIORITY_INPUT+1,moveGazeVector)
コード例 #43
0
ファイル: Avatar.py プロジェクト: tokola/GitHub_OLiVE
	def __init__ (self, view, color, eyeH):
		self._view = view
#		self._player_matrix = viz.Matrix() #self.view.getMatrix()
		self._avatar = Robot.Steve()
		self._avatar.disable([viz.INTERSECTION, viz.PICKING], op=viz.OP_ROOT)
		self._avatar.head.setScale([1.5,1.5,1.5])
		self._avatar.body_root.setScale([1.5,1.5,1.5])
		#self._avatar.disable(viz.PHYSICS)
		bodyColor = [float(c)/255 for c in color[0]]
		shadeColor= [float(c)/255 for c in color[1]]
		self._avatar.setBodyColor(bodyColor)
		self._avatar.setShadeColor(shadeColor)
#		self._avatar.setTracker(self._player_matrix)
		self._avatar.setTracker(viz.link(self._view, viz.NullLinkable,offset=[0,-0.25,0]))
		self._view.collision(viz.ON)
		self._view.eyeheight(eyeH)
		# add the representation on the map
		self._mapAva = viz.addTexQuad(size=.75)
		#self._mapAva = vizshape.addSphere(.5,10,10)
		self._mapAva.texture(viz.addTexture('textures/mapAva_icon.png'),'',0)
		self._mapAva.color(bodyColor)
		self._updateFunc = vizact.onupdate(0, self.UpdatePlayer)
コード例 #44
0
    def __init__(self,
                 frame_weight=0.5,
                 aperture_scale=0.5,
                 node=None,
                 **kwargs):
        # node to reference this instance in the scenegraph
        self._node = node
        if self._node == None:
            self._node = viz.addGroup()

        viz.VizNode.__init__(self, id=self._node.id, **kwargs)

        # Create two render textures which we will swap between when updating
        self._output_texture = viz.addRenderTexture()
        self._last_frame_texture = viz.addRenderTexture()

        ### INIT CAMERA
        # Create render node for camera
        self._cam = viz.addRenderNode(size=(1024, 1024))
        self._cam.renderOnlyToWindows([viz.MainWindow])
        self._cam.setInheritView(False)
        self._cam.drawOrder(1000)
        self._cam.setFov(90.0, 1.0, 0.1, 1000.0)
        # Only render once per frame, in case stereo is enabled
        self._cam.setRenderLimit(viz.RENDER_LIMIT_FRAME)
        # set the starting render textures for output and input
        self._cam.setRenderTexture(self._output_texture)
        self._cam.texture(self._last_frame_texture, unit=4)
        # link camera to capture
        viz.link(self._node, self._cam)

        # affect camera so its render texture will be computed using the defined shading pipeline
        self._projector = ViewProjector()
        self._projector.setFrameWeight(frame_weight)
        self._projector.setApertureScale(aperture_scale)
        self._projector.affect(self._cam)

        self._update_event = vizact.onupdate(100, self.update)
コード例 #45
0
    def __init__(self, start=True, node=None, **kwargs):

        # node to reference this instance in the scenegraph
        self._node = node
        if self._node == None:
            self._node = viz.addGroup()

        viz.VizNode.__init__(self, id=self._node.id, **kwargs)

        # container to save recorded data
        self._data = []

        # flag that states if animation path capture is currently stopped
        self._stop = not start

        # start time
        self._start_time = -1

        # average fps of the recording
        self._avg_fps = -1.0

        # reference to event function called each frame
        self._onUpdate = vizact.onupdate(0, self._onUpdate)
コード例 #46
0
	def __setupTracking__(self):
		
		#get mocap interface
		self.mocap = Connector.getMocapInterface()

		#Get the shutter-glass rigid body
		self.shutterGlass = self.mocap.returnPointerToRigid(Connector.SHUTTERGLASSES)
		self.head_tracker = viz.link(self.shutterGlass.vizNode,viz.NullLinkable,srcFlag=viz.ABS_PARENT)
		self.cave.setTracker(self.head_tracker)

		# Create cave view
		cave_origin = vizcave.CaveView(self.head_tracker)
		
		#In case you want to override & translate the physical location of cave, uncommnet this
		
		'''
		origin_tracker = viztracker.KeyboardMouse6DOF()
		origin_link = viz.link(origin_tracker, cave_origin)
		origin_link.setMask(viz.LINK_POS)
		'''
		
		#Foot rigidBodies
		leftFootRigid = self.mocap.returnPointerToRigid(Connector.L_FOOT)
		rightFootRigid = self.mocap.returnPointerToRigid(Connector.R_FOOT)
		
		#Foot visuals, make them 100% transparent when not in debug mode
		self.leftFootViz = vizshape.addBox(size=(.2,.2,.2))
		self.rightFootViz = vizshape.addBox(size=(.2,.2,.2))
		
		if(self.debug):
			self.leftFootViz.alpha(0.025)
			self.rightFootViz.alpha(0.025)
		else:
			self.leftFootViz.alpha(0.0075)
			self.rightFootViz.alpha(0.0075)
		
		#Foot class objects
		self.leftFoot = Foot(mocapFootObj = leftFootRigid, name="Left")
		self.rightFoot = Foot(mocapFootObj = rightFootRigid, name="Right")
		
		#setup buffer updates
		vizact.onupdate(0, self.leftFoot.populateNextBufferElement)
		vizact.onupdate(0, self.rightFoot.populateNextBufferElement)
		vizact.onupdate(0, self.updateBooties)
		viz.callback(viz.COLLIDE_BEGIN_EVENT, self.collideDetected)
コード例 #47
0
def validation():
	'''
	Show same calibration points and compare calulated gaze point to ground truth. 
	(Displays both + angular error)
	'''
	
	# ask for the sub port
	req.send_string('SUB_PORT')
	sub_port = req.recv_string()

	# open a sub port to listen to pupil
	sub = ctx.socket(zmq.SUB)
	sub.connect("tcp://{}:{}".format(addr, sub_port))
	sub.setsockopt_string(zmq.SUBSCRIBE, u'gaze')

	# add gaze marker
	m_gaze = vizshape.addSphere(radius=sphere_size, color=viz.GREEN)
	m_gaze.disable(viz.INTERSECTION)
	if not gaze_marker_visible:
		m_gaze.disable(viz.RENDER) # invisible but phyisically present



	def get_gaze(eye_number):
		'''
		checks gaze stream for confident measures of the eye (eye_number) until it finds them
		Args:    eye_number (int): 1=left, 0=right
		Returns: [x, y] (float): normalized x and y values range [0,1]
		'''
		found_confident_val = False
		
		while found_confident_val == False:

			topic = sub.recv_string() # unused
			msg = sub.recv()
			msg = loads(msg, encoding='utf-8')
			
			confidence = msg['confidence']
			
			if msg['id'] == eye_number:
				
				if confidence > confidence_level:
				
					found_confident_val = True
					
					t = msg['timestamp'] # unused
					npx = msg['norm_pos'][0]
					npy = msg['norm_pos'][1]
					
					return [npx, npy]


	def updateGaze():	
		'''
		calls 'get_gaze function' and takes the average of two
		eyes for the normal values - they will be used to project a 
		sphere on where subjects look at
		'''
		
		# get gaze data
		norm_pos_x = np.mean([get_gaze(1)[0], get_gaze(0)[0]])
		norm_pos_y = np.mean([get_gaze(1)[1], get_gaze(0)[1]])
				
		# find the intersection and project sphere
		line = viz.MainWindow.screenToWorld([norm_pos_x, norm_pos_y])
		intersection = viz.intersect(line.begin, line.end)
		m_gaze.setPosition(intersection.point)
		
	# update cursor location on every sample		
	vizact.onupdate(viz.PRIORITY_LINKS+1, updateGaze)


	dot_norm_pos=[]
	gaze_norm_pos=[]
	ang_error = []


	yield showMessage('Zum Starten der Validierung die Leertaste drücken')
	
	for p in norm_positions:
		
		print('calibration point: ', p)
	
		norm_x = p[0]
		norm_y = p[1]
		
		first_run = True
		
		if first_run:
			
			first_run = False
		
			'''set up a plane 'right in front of user' on which we want to project the dots'''
	
			# target the current center of view
			p_line = viz.MainWindow.screenToWorld(.5, .5)
		
			# let's modify the line and call it line 2. Here we let the line end at the "depth" value
			p_line_2 = viz.Line(begin=p_line.begin, end=p_line.end, dir=p_line.dir, length=depth)
						
			# Add the plane and apply the matrix of our viewpoint to the plane
			plane = vizshape.addBox(size=[3.6, 2, .1])
			mymat = viz.MainView.getMatrix()
			plane.setMatrix(mymat)
			
			# Reposition the plane to the end of line 2
			plane.setPosition(p_line_2.end)
			plane.color([.25,.25,.25])
			plane.alpha(.95)
			
			# Lock it to user
			plane_link = viz.grab(viz.MainView, plane)
		
		
		# interpolate a line from norm values to 3d coordinates
		line = viz.MainWindow.screenToWorld([norm_x, norm_y])
		
		# find the intersection
		intersection = viz.intersect(line.begin, line.end)
		
		# place a dot (at depth level of line) 
		dot = vizshape.addSphere(radius=sphere_size)
		dot.setPosition(intersection.point)
		
		# lock dot to user
		view_link = viz.grab(viz.MainView, dot)
		
		print('ready')
		viz.playSound('beep500_200.wav')
		yield viztask.waitKeyDown(' ')
				
		for s in range(60):
			
			# get the current pupil time (pupil uses CLOCK_MONOTONIC with adjustable timebase).
			# You can set the pupil timebase to another clock and use that.
			t = get_pupil_timestamp()

			dot_norm_pos.append(p)
			gaze_norm_pos.append(viz.MainWindow.worldToScreen(m_gaze.getPosition()))

			yield viztask.waitTime(1/60.)
			print(t)
		
		dot.color(viz.RED)
		
		print('waiting for next position...')
		
		yield viztask.waitKeyDown(' ')
		
		yield dot.remove()
				
	time.sleep(2)
		
	showMessage('validation done!')
	
	for p in norm_positions:
		
		i = norm_positions.index(p)
		chunk = range(i*60, (i+1)*60)
		print(i)
		dmx = np.mean([gaze_norm_pos[x][0] for x in chunk])
		dmy = np.mean([gaze_norm_pos[y][1] for y in chunk])
		
		# interpolate a line from norm values to 3d coordinates
		line = viz.MainWindow.screenToWorld([p[0], p[1]])
		line_v = viz.MainWindow.screenToWorld([dmx, dmy])
		
		# find the intersection
		intersection = viz.intersect(line.begin, line.end)
		intersection_v = viz.intersect(line_v.begin, line_v.end)
		
		# place a dots (at depth level of line) for both ground truth and gaze point
		dot = vizshape.addSphere(radius=sphere_size)
		dot.setPosition(intersection.point)
		dot.color(viz.BLUE)
		
		dot_v = vizshape.addSphere(radius=sphere_size*0.75)
		dot_v.setPosition(intersection_v.point)
		dot_v.color(viz.YELLOW_ORANGE)
		
		# lock dots to user
		view_link = viz.grab(viz.MainView, dot)
		viel_link2 = viz.grab(viz.MainView, dot_v)
		
		# calculate angular error
		error = vizmat.AngleBetweenVector(line.dir, line_v.dir)
#		cosangle = np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))
#		angle = np.arccos(cosangle)
#		error = np.degrees(angle)
		
		ang_error.append(error)
		
		print('angle is: ', error, 'for ', p)

	
	showMessage('mean angular error is: {}'.format(np.mean(ang_error)))
	
	print('mean angular error is: ', np.mean(ang_error), ' deg/ visual angle')
コード例 #48
0
ファイル: customEvent.py プロジェクト: vhilab/VRITS-2015
import viz 
import vizact 

#check when mouse leaves and enters region in lower left
MOUSE_ENTER_SPECIAL_EVENT = viz.getEventID('MOUSE_ENTER_SPECIAL_EVENT') 
MOUSE_LEAVE_SPECIAL_EVENT = viz.getEventID('MOUSE_LEAVE_SPECIAL_EVENT') 

_mouseState = None 

def CheckMouse(): 
    global _mouseState 
    pos = viz.mouse.getPosition(viz.WINDOW_NORMALIZED,True) 
    state = (0 <= pos[0] <= .1) and (0 <= pos[1] <= .1) 
    if state != _mouseState: 
        _mouseState = state 
        if state: 
            viz.sendEvent(MOUSE_ENTER_SPECIAL_EVENT,pos) 
        else: 
            viz.sendEvent(MOUSE_LEAVE_SPECIAL_EVENT,pos) 
vizact.onupdate(0,CheckMouse)
コード例 #49
0
def captureViewIntensity():
    ### replace with your own application setup
    import viz
    import vizact
    import vizcam

    viz.setMultiSample(4)

    viz.go()

    piazza = viz.addChild("piazza.osgb")

    ### Add this at the bottom
    '''
	 - Load an animation file, which encapsulates the view rotation for a given time span.
	 - Samples per second determines how many samples are taken along the animation path over the duration of loaded animation.
	 - While the loaded animation path is playing the accumulator will accumulate view intensity values.
	 - After the animation player finished playing these intensities will be saved to a cubemap
	 - The intensity images is then used for final heatmap computation
	'''

    # load animation path
    loader = AnimationPathLoader("test_animation.txt")
    player = AnimationPathPlayer(path=loader.getAnimationPath())
    path_link = viz.link(loader.getAnimationPath(), viz.MainView)

    global accumulator
    accumulator = ViewAccumulatorCube(frame_weight=0.5, aperture_scale=0.5)
    accumulator.setPosition(viz.MainView.getPosition())

    global _capture_done
    _capture_done = False

    cube_textures = {
        viz.POSITIVE_X: accumulator.getOutputTexture(viz.POSITIVE_X),
        viz.NEGATIVE_X: accumulator.getOutputTexture(viz.NEGATIVE_X),
        viz.POSITIVE_Y: accumulator.getOutputTexture(viz.POSITIVE_Y),
        viz.NEGATIVE_Y: accumulator.getOutputTexture(viz.NEGATIVE_Y),
        viz.POSITIVE_Z: accumulator.getOutputTexture(viz.POSITIVE_Z),
        viz.NEGATIVE_Z: accumulator.getOutputTexture(viz.NEGATIVE_Z)
    }

    heat_projector = HeatmapVisualizer(cube_textures, auto_update=True)
    heat_projector.setPosition(viz.MainView.getPosition())
    heat_projector.affect(piazza)

    # update function capturing accumulating view intensity
    def updateFct(player, accumulator):
        global _capture_done

        if not player.isPlaying():
            if not _capture_done:
                accumulator.saveAll()
                _capture_done = True
                print "Intensity capture done."
            elif accumulator != None:
                accumulator.remove()
                accumulator = None
                path_link.remove()

    update = vizact.onupdate(1, updateFct, player, accumulator)
コード例 #50
0
ファイル: scene2.py プロジェクト: XingxinHE/DivineComedy_VR
mylight = viz.addLight()
mylight.position(0, 0, 0)
mylight.direction(0, 0, 1)
mylight.spread(30)
mylight.intensity(1.5)


def set_vie_with_position():
    my_light_pos = viz.MainView.getPosition()
    my_light_euler = viz.MainView.getEuler()

    mylight.setPosition(my_light_pos[0], my_light_pos[1], my_light_pos[2])
    mylight.setEuler(my_light_euler[0], my_light_euler[1], my_light_euler[2])


updateLight = vizact.onupdate(0, set_vie_with_position)

#------------------------------------------------add the direction light------------------------------------

#----------------------------------------add the sphere behaviour----------------------------------------
##----------------------------------------------------------sphere one---------------------------------

center_pos = ([2.93414, 1.54399, 0.42421], [3.55752, 0.85784, -3.36570],
              [5.35982, 0.66285,
               -1.46127], [8.87976, 0.83787,
                           -3.06492], [6.42717, 1.45201,
                                       -6.80490], [10.99663, 1.60214, 2.41580],
              [8.13910, 0.88974,
               4.26806], [10, -1.4,
                          -4.23], [16.99663, 1.50214,
                                   -1.3241580], [23.42136, 2.83873, -2.73446])
コード例 #51
0
    def rvd_task(self):

        # make 3D surface to put object on / like a table in front of participant
        self.rvd_table.setPosition(
            [self.maze.start_pos[0], .5,
             self.maze.start_pos[2] + .5])  # move a bit away
        self.rvd_table.visible(viz.ON)

        sc = .03  # manually set scale factor of 3D objects
        self.maze.global_landmark.setScale(.03 * sc, .03 * sc, .03 * sc)
        self.maze.maze_end_ground.setScale(sc, sc, sc)  # make a bit bigger
        self.maze.maze_start_ground.setScale(sc, sc, sc)  # make a bit bigger
        self.maze.maze_start_ground.color(viz.YELLOW)

        # set start position as anchor
        y_offset = 1.0
        z_offset = .4
        scale_factor = 20

        s_scaled = [
            x / scale_factor
            for x in self.maze.maze_start_ground.getPosition()
        ]
        l_scaled = [
            x / scale_factor for x in self.maze.maze_end_ground.getPosition()
        ]
        g_scaled = [
            x / scale_factor for x in self.maze.global_landmark.getPosition()
        ]

        # get difference to correct the offset of to the start position ground
        abs_x_diff = abs(self.maze.maze_start_ground.getPosition()[0]) - abs(
            s_scaled[0])
        # add/subtract offset in x direction
        if self.maze.maze_start_ground.getPosition()[0] < 0:
            s_scaled[0] = s_scaled[0] - abs_x_diff
            l_scaled[0] = l_scaled[0] - abs_x_diff
            g_scaled[0] = g_scaled[0] - abs_x_diff
        else:
            s_scaled[0] = s_scaled[0] + abs_x_diff
            l_scaled[0] = l_scaled[0] + abs_x_diff
            g_scaled[0] = g_scaled[0] + abs_x_diff

        # subtract z to table location and add offset on z direction
        combined_offset = self.maze.maze_start_ground.getPosition(
        )[2] + z_offset
        s_scaled[2] = s_scaled[2] + combined_offset
        l_scaled[2] = l_scaled[2] + combined_offset
        g_scaled[2] = g_scaled[2] + combined_offset

        # add height of table in y direction
        s_scaled[1] = s_scaled[1] + y_offset
        l_scaled[1] = l_scaled[1] + y_offset
        g_scaled[1] = g_scaled[1] + y_offset

        # set position of objects
        self.maze.maze_start_ground.setPosition(s_scaled)
        self.maze.maze_end_ground.setPosition(l_scaled)
        self.maze.global_landmark.setPosition(g_scaled)

        # # compute scaled, z_offseted (moved a bit forward) triangle of objects once and then make objects visible or not
        # scale_factor = 30
        #
        # # set start position as anchor
        # z_offset = .2
        #
        # self.maze.maze_start_ground.setPosition(self.maze.start_pos[0], 1.0, self.maze.start_pos[2] + z_offset)
        #
        # # set position of global landmark: only differs in z dimension from start position
        # z_dist_landmark_start = abs(self.maze.global_landmark.getPosition()[2] - self.maze.maze_start_ground.getPosition()[2]) / scale_factor
        # self.maze.global_landmark.setPosition([self.maze.maze_start_ground.getPosition()[0],
        #                                        self.maze.maze_start_ground.getPosition()[1],
        #                                        self.maze.maze_start_ground.getPosition()[2] + z_dist_landmark_start])
        #
        # # set position of local landmark: differs in x and z dimensions from start position
        # x_dist_local_start = abs(self.maze.maze_end_ground.getPosition()[0] - self.maze.maze_start_ground.getPosition()[0]) / scale_factor
        # z_dist_local_start = (self.maze.maze_end_ground.getPosition()[2] - self.maze.maze_start_ground.getPosition()[2]) / scale_factor
        #
        # if self.maze.maze_end_ground.getPosition()[0] < 0:
        #     self.maze.maze_end_ground.setPosition([self.maze.maze_start_ground.getPosition()[0] - x_dist_local_start,
        #                                            self.maze.maze_start_ground.getPosition()[1],
        #                                            self.maze.maze_start_ground.getPosition()[2] + z_dist_local_start])
        # else:
        #     self.maze.maze_end_ground.setPosition([self.maze.maze_start_ground.getPosition()[0] + x_dist_local_start,
        #                                            self.maze.maze_start_ground.getPosition()[1],
        #                                            self.maze.maze_start_ground.getPosition()[2] + z_dist_local_start])

        # send positions of all target objects
        if self.current_trial_run is 1:
            event = 'type:rvd_triangle;S:' + str(self.maze.maze_start_ground.getPosition()) + \
                ';L:' + str(self.maze.maze_end_ground.getPosition()) + \
                ';G:' + str(self.maze.global_landmark.getPosition()) + ';'
            self.log_exp_progress(event)

        # make different objects visible and guess third object
        object_to_guess = self.rvd_list_all[int(self.subject_id) - 1]
        object_to_guess = object_to_guess[int(self.current_trial_run) - 1]
        marker_object_to_guess = object_to_guess

        if object_to_guess is 'G':
            object_to_guess = self.maze.global_landmark
        elif object_to_guess is 'L':
            object_to_guess = self.maze.maze_end_ground
        elif object_to_guess is 'S':
            object_to_guess = self.maze.maze_start_ground

        # move somewhere out of sight before making visible so not to indicate correct solution for 1 frame
        object_to_guess.setPosition([0, 0, -10])
        self.maze.maze_end_ground.visible(viz.ON)
        self.maze.maze_start_ground.visible(viz.ON)
        self.maze.global_landmark.visible(viz.ON)

        # start tracking with mouseclick
        self.scene.change_instruction("Platzieren Sie das fehlende Objekt.")
        print '!!! EXPERIMENTER CLICK MOUSE TO START RVD TASK THEN PARTICIPANTS HAS TO CONFIRM PLACEMENT WITH TRIGGER!!!'
        yield self.hide_inst_continue_left_mouse()
        self.log_exp_progress("event:rvd_start;")

        # track object
        self.rvd_feedback = vizact.onupdate(0, self.track_rvd, object_to_guess)

        # place target with button
        yield self.hide_inst_continue_trigger()
        print '!!! OBJECT PLACED !!!'
        self.log_exp_progress("event:rvd_target_placed;object_location:"+\
                              str(object_to_guess.getPosition())+';'+\
                              'object:'+marker_object_to_guess +';')
        self.rvd_feedback.remove()
        self.scene.change_instruction("Danke! Einen Moment...")

        # keep in view for 2 seconds
        yield viztask.waitTime(3)
        self.log_exp_progress("event:rvd_end;")
        self.rvd_task_on = False

        # hide objects
        self.rvd_table.visible(viz.OFF)
        self.maze.maze_start_ground.color(viz.WHITE)
        self.maze.maze_start_ground.visible(viz.OFF)
        self.maze.global_landmark.visible(viz.OFF)
        self.maze.maze_end_ground.visible(viz.OFF)
コード例 #52
0
    def trial(self):

        # trial start marker with landmark positions
        start_pos = 'start_pos:' + str(self.maze.start_pos) + ';'
        end_pos = 'end_pos:' + str(self.maze.end_pos) + ';'
        global_pos = 'global_pos:' + str(
            self.maze.global_landmark.getPosition()) + ';'
        self.log_exp_progress('type:trial_start;' + start_pos + end_pos +
                              global_pos)

        # baseline
        print '!!! BASELINE START !!!'
        yield self.baseline()
        print '!!! BASELINE END -> FIND START !!!'

        # show start of maze and let participant go there
        self.maze.maze_start_sphere.visible(viz.ON)
        self.maze.start_arrow.visible(viz.ON)
        self.maze.maze_start_ground.visible(viz.ON)

        # add to proximity manager
        self.scene.poi_manager.addSensor(self.maze.maze_start_sphere_sensor)
        self.scene.poi_manager.addTarget(self.subject.right_hand_sphere_target)

        # yield wait enter hand sphere
        yield vizproximity.waitEnter(self.maze.maze_start_sphere_sensor)
        self.log_exp_progress('type:arrived_at_start_position;')
        self.maze.maze_start_sphere.visible(viz.OFF)
        self.maze.start_arrow.visible(viz.OFF)
        self.maze.maze_start_ground.visible(viz.OFF)

        # remove from proximity manager
        self.scene.poi_manager.removeSensor(self.maze.maze_start_sphere_sensor)
        self.scene.poi_manager.removeTarget(
            self.subject.right_hand_sphere_target)

        # wait for experimenter to click window once via remote to start trial
        print '!!! CLICK MOUSE TO START EXPLORATION !!!'
        yield self.hide_inst_continue_left_mouse()
        yield self.scene.countdown(5)  # countdown to start

        ### Trial Procedure Start ###
        self.log_exp_progress('type:navigation_start;')
        start = viz.tick()

        # enable collision
        viz.phys.enable()

        # start intersection check for both head and hand against the maze walls
        self.hand_t1_pos = self.subject.right_hand_sphere.getPosition()
        self.head_t1_pos = self.subject.head_sphere.getPosition()
        hand_feedback = vizact.onupdate(0, self.check_node_intersection,
                                        self.subject.right_hand_sphere,
                                        self.maze.walls)
        head_feedback = vizact.onupdate(0, self.check_node_intersection,
                                        self.subject.head_sphere,
                                        self.maze.walls)

        # add head sphere to proximity manager and register callbacks for end and start ground
        self.scene.poi_manager.addTarget(self.subject.head_sphere_target)
        self.scene.poi_manager.addSensor(self.maze.maze_start_position.ground)
        self.scene.poi_manager.addSensor(self.maze.maze_end_position.ground)
        head_landmark_enter = self.scene.poi_manager.onEnter(
            None, self.enter_local_landmark)
        head_landmark_exit = self.scene.poi_manager.onExit(
            None, self.exit_local_landmark)

        # [BPA 2019-04-29] make global landmark visible
        self.maze.global_landmark.visible(viz.ON)

        # now wait until subjects found the end of the maze
        yield vizproximity.waitEnter(self.maze.maze_end_position.ground)
        print '!!! END REACHED: POINTING TASK, PARTICIPANT NEEDS TO POINT AND CLICK WITH CONTROLLER !!!'

        # temporarily remove proximity target head while doing the pointing task when sensor is first entered
        self.scene.poi_manager.clearTargets()
        yield self.pointing(start)
        print '!!! POINTED BACK TO START NOW EXPLORE BACK TO START!!!'
        self.scene.poi_manager.addTarget(self.subject.head_sphere_target)

        # then wait till subjects returned to the start position
        yield vizproximity.waitEnter(self.maze.maze_start_position.ground)
        print '!!! START REACHED: RVD & REWALKING TASK COMING UP... PARTICIPANTS NEEDS TO CLICK TRIGGER TO START!!!'

        # performance measures for reward and analysis
        end = viz.tick()
        self.duration = end - start
        duration_return = end - self.start_return
        performance = 'type:navigation_end;duration:' + str(round(self.duration, 2)) + \
                      ';total_touches:' + str(self.hand_hits) + \
                      ';duration_return:' + str(round(duration_return, 2)) + ';'
        self.log_exp_progress(performance)

        # remove all the proximity sensors and the target and unregister the callbacks
        self.scene.poi_manager.clearTargets()
        self.scene.poi_manager.clearSensors()
        head_landmark_enter.remove()
        head_landmark_exit.remove()

        # remove wall collisions
        hand_feedback.remove()
        head_feedback.remove()

        # reset wall touches
        if self.feedback_hand is not None:
            self.reset_touching()

        # [BPA 2019-04-29] remove landmark
        self.scene.toggle_global_landmark(self.maze, "off")

        # show reward for 5 seconds
        self.log_exp_progress('type:reward_start;')
        yield self.scene.reward_feedback(self.head_hits, self.duration, 3)
        self.log_exp_progress('type:reward_end;')

        # start testaufgaben
        self.scene.change_instruction("Es folgen die Testaufgaben.\n"
                                      "weiter durch Klick!")
        yield self.hide_inst_continue_trigger()

        # start spatial tasks with turning around
        print '!!! REORIENTING... !!!'
        yield self.reorient()

        # RVD Task
        yield self.rvd_task()

        # walk maze task
        yield self.walk_maze_task()

        # trial end marker
        self.log_exp_progress('type:trial_end;')
        print '!!! TRIAL END !!!'
        ### Trial Procedure End ###

        if not self.current_maze is 'I' and self.current_trial_run < 3:
            # Beginn next trial mit Baseline
            print '!!! START NEXT RUN WITH N KEY !!!'
            self.scene.change_instruction("Bitte beim Versuchsleiter melden!")
            yield viztask.waitKeyDown('n')
            self.scene.hide_instruction()
コード例 #53
0
    def on_enter_wall(self, tracker, pos, dir):

        # hand wall enter
        # allow only one collision and reactivate after the next collision (enter, then exit first, then enter again)
        if tracker is self.subject.right_hand_sphere and not self.hand_in_wall:
            self.hand_in_wall = True
            # only do wall touch when no collision of the head is present
            if not self.head_in_wall and not self.pointing_task_on:
                # only allow if previous touch was completed
                if self.new_touch_allowed is True:
                    # start time of wall touch
                    self.feedback_start_time = viz.tick()
                    # display feedback along the wall, flips axis based on normal vector
                    if round(dir[0]) == 1.0 or round(dir[0]) == -1.0:
                        self.scene.feedback_sphere_right.setEuler([90, 0, 0])
                    else:
                        self.scene.feedback_sphere_right.setEuler([0, 0, 0])
                    # set feedback sphere color, alpha, scale and position:
                    self.scene.feedback_sphere_right.alpha(1)
                    self.scene.feedback_sphere_right.setPosition(pos)

                    # add action to display and fade out
                    self.scene.feedback_sphere_right.add(self.feedback)
                    self.new_touch_allowed = False
                    self.log_wall_touches(True, pos)

                    # check how long hand left in wall
                    self.feedback_hand = vizact.onupdate(
                        0, self.check_hand_in_wall)

        # hand wall exit
        elif tracker is self.subject.right_hand_sphere and self.hand_in_wall:
            self.hand_in_wall = False
            self.scene.hide_instruction()

        # head wall collision
        elif tracker is self.subject.head_sphere and not self.head_in_wall:
            self.head_in_wall = True

            # place arrow orthogonal to crossed wall
            self.scene.change_instruction('Bitte zurueck treten!')

            if round(dir[0]) == 1.0:
                self.scene.arrow.setEuler([-90, 0, 0])
                self.scene.arrow.setPosition(pos[0] - 1, pos[1] - .5, pos[2])
                self.help_sphere.setPosition(pos[0] + .5, pos[1] - .5, pos[2])
            elif round(dir[0], 1) == -1.0:
                self.scene.arrow.setEuler([90, 0, 0])
                self.scene.arrow.setPosition(pos[0] + 1, pos[1] - .5, pos[2])
                self.help_sphere.setPosition(pos[0] - .5, pos[1] - .5, pos[2])
            elif round(dir[2], 1) == 1.0:
                self.scene.arrow.setEuler([180, 0, 0])
                self.scene.arrow.setPosition(pos[0], pos[1] - .5, pos[2] - 1)
                self.help_sphere.setPosition(pos[0], pos[1] - .5, pos[2] + .5)
            elif round(dir[2], 1) == -1.0:
                self.scene.arrow.setEuler([0, 0, 0])
                self.scene.arrow.setPosition(pos[0], pos[1] - .5, pos[2] + 1)
                self.help_sphere.setPosition(pos[0], pos[1] - .5, pos[2] - .5)
            self.scene.arrow.visible(viz.ON)
            self.help_sphere.visible(viz.ON)

            # log the head collision
            self.log_wall_touches(False, pos)

        elif tracker is self.subject.head_sphere and self.head_in_wall:
            self.head_in_wall = False
            self.scene.arrow.visible(viz.OFF)
            self.help_sphere.visible(viz.OFF)
            self.scene.hide_instruction()
コード例 #54
0
ファイル: third_appendage.py プロジェクト: vhilab/thirdArm
def add(avatar, appendage, skinChoices, skin, controlSchema):
	global isCube1, isCube2, thirdAppendage, usingInvisibleRod
	
	thirdAppendageHorn = viz.addGroup()

	if(appendage == 0):
		#Third Arm
		usingInvisibleRod = False
		thirdAppendage = viz.addChild(resources.ARM)
		if skin == 9:
			texture = viz.addTexture("skin_tones/010.png")
			texture.wrap(viz.WRAP_S,viz.REPEAT)
			texture.wrap(viz.WRAP_T,viz.REPEAT)
		else:
			texture = viz.addTexture("skin_tones/00" + skinChoices[skin] + ".png")
			texture.wrap(viz.WRAP_S,viz.REPEAT)
			texture.wrap(viz.WRAP_T,viz.REPEAT)
		thirdAppendage.texture(texture)
		thirdAppendage.emissive([.75, .75, .75])
		thirdAppendage.setScale(resources.ARM_SCALE)
	elif(appendage == 1):
		#Cylinder
		usingInvisibleRod = False
		thirdAppendage = viz.addChild(resources.CYLINDER)
		thirdAppendage.setScale(resources.ARM_SCALE)
#	elif(appendage == 2):
#		#Floating hand
#		usingInvisibleRod = True
#		thirdAppendage = viz.addChild(resources.FLOATING_HAND)
#		thirdAppendage.setScale(resources.ARM_SCALE)
#		if skin == 9:
#			texture = viz.addTexture("skin_tones/010.png")
#			texture.wrap(viz.WRAP_S,viz.REPEAT)
#			texture.wrap(viz.WRAP_T,viz.REPEAT)
#		else:
#			texture = viz.addTexture("skin_tones/00" + skinChoices[skin] + ".png")
#			texture.wrap(viz.WRAP_S,viz.REPEAT)
#			texture.wrap(viz.WRAP_T,viz.REPEAT)
#		thirdAppendage.texture(texture)
#		thirdAppendage.emissive([.75, .75, .75])
#	elif(appendage ==3):
#		#Floating Chunk
#		usingInvisibleRod = True
#		thirdAppendage = viz.addChild(resources.FLOATING_CHUNK)
#		thirdAppendage.setScale(resources.ARM_SCALE)
	thirdAppendage.setParent(thirdAppendageHorn)
	global tahLink
	tahLink = viz.link(avatar.getBone('Bip01 Spine'), thirdAppendageHorn)
	global offsetOp
	offsetOp = tahLink.preTrans([0, 0.3, 0.1])
	tahLink.preEuler([-90.0, 0.0, 0.0])

	thirdAppendage.collideBox()
	thirdAppendage.disable(viz.DYNAMICS)
	TouchCube.thirdAppendage = thirdAppendage
	
	# EXTENSION: Add third appendage movement via intersense cube
	isense = viz.add('intersense.dle')
	isCube1 = isense.addTracker(port=6)
	isCube2 = isense.addTracker(port=7)
	
	# Link cubes
	pptextensionDK2.isCube1 = isCube1
	pptextensionDK2.isCube2 = isCube2

	
	# Setup thirdarm updating.
	#vizact.onupdate(0, updateThirdArm)
	
	#Control schema switch
	if (controlSchema == 0):
		vizact.onupdate(0, controlWithWristRotation)
	elif (controlSchema == 1):
		setupBimanualAsymmetricControl()
コード例 #55
0
viz.go(
    #viz.FULLSCREEN #run world in full screen
)

monoWindow = viz.addWindow(size=(1, 1), pos=(0, 1), scene=viz.addScene())
monoQuad = viz.addTexQuad(parent=viz.ORTHO, scene=monoWindow)
monoQuad.setBoxTransform(viz.BOX_ENABLED)
monoQuad.setTexQuadDisplayMode(viz.TEXQUAD_FILL)
texture = vizfx.postprocess.getEffectManager().getColorTexture()


def UpdateTexture():
    monoQuad.texture(texture)


vizact.onupdate(0, UpdateTexture)

global hmd
view = viz.addView
hmd = oculus.Rift()
hmd.getSensor()
#profile = hmd.getProfile()
#hmd.setIPD(profile.ipd)

OF1 = viz.addChild('OpticFlowBox5.osgb', scale=[3, 3, 3])
OF1.disable(viz.LIGHTING)
OF2 = viz.addChild('OpticFlowBox5.osgb', scale=[3, 3, 3])
OF2.disable(viz.LIGHTING)
OF2.setPosition(0, 0, 1.27 * 3)
OF3 = viz.addChild('OpticFlowBox5.osgb', scale=[3, 3, 3])
OF3.disable(viz.LIGHTING)
コード例 #56
0
    # a - Turn L		s - Back		d - Turn R
    # y - Face Up		r - Fly Up
    # h - Face Down		f - Fly Down
    if controlType == MONITOR:
        HZ = 60
        headTrack = viztracker.Keyboard6DOF()
        link = viz.link(headTrack, viz.MainView)
        headTrack.eyeheight(1.6)
        link.setEnabled(True)
        viz.go()
    elif controlType == HMD:
        HZ = 90
        vizconnect.go('vizconnect_config.py')
        # Overwrite headset ipd
        IPD = viz.MainWindow.getIPD()
        vizact.onupdate(viz.UPDATE_LINKS + 1, overwriteIPD)
        # add Odyssey tracker
        ODTracker = vizconnect.getTracker('head_tracker')

    # Use a large size of the viewing frustum
    viz.clip(.001, 1000)

    # loads experimental conditions
    inputFile = os.path.abspath(
        os.path.join(INPUT_DIR, 'exp_a_subj' + subject + '.csv'))

    with open(inputFile, 'r') as file:
        lines = file.read().split('\n')[1:-1]
        conditions = [[float(x) for x in line.split(',')] for line in lines]

    # Define the trial, with which the experiment starts
コード例 #57
0
# Check if HMD supports position tracking
supportPositionTracking = hmd.getSensor().getSrcMask() & viz.LINK_POS
if supportPositionTracking:
    global man
    # Add camera bounds model
    camera_bounds = hmd.addCameraBounds()
    camera_bounds.visible(True)

    # Change color of bounds to reflect whether position was tracked
    def CheckPositionTracked():
        if hmd.getSensor().getStatus() & oculus.STATUS_POSITION_TRACKED:
            camera_bounds.color(viz.GREEN)
        else:
            camera_bounds.color(viz.RED)

    vizact.onupdate(0, CheckPositionTracked)

# Setup navigation node and link to main view
navigationNode = viz.addGroup()
viewLink = viz.link(navigationNode, viz.MainView)
viewLink.preMultLinkable(hmd.getSensor())

#apply user profile data to view
profile = hmd.getProfile()
if profile:
    #	print(profile)
    viewLink.setOffset([0, subjectheight, 0])
    hmd.setIPD(profile.ipd)

global beltspeed
beltspeed = 1  #units must be meters/second
コード例 #58
0
    def linkOrientation(self):

        linkAction = vizact.onupdate(viz.PRIORITY_LINKS, node3d.setQuat,
                                     self.getQuaternion)
        return linkAction
コード例 #59
0
    def linkPosition(self, node3d):

        linkAction = vizact.onupdate(viz.PRIORITY_LINKS, node3d.setPosition,
                                     self.body.getPosition)
        return linkAction
コード例 #60
0
    def __init__(self,
                 world,
                 space,
                 shape,
                 pos,
                 size=[],
                 bounciness=1,
                 friction=0,
                 vertices=None,
                 indices=None):

        self.node3D = viz.addGroup()

        # If it is NOT linked it updates seld.node3D pose with pos/quat pose on each frame
        # If it is NOT linked it updates pos/quat pose with node3D pose on each frame
        # This allows a linked phsynode to be moved around by an external source via the node3D

        self.isLinked = 0
        self.geom = 0
        self.body = 0

        self.parentWorld = []
        self.parentSpace = []

        self.bounciness = bounciness
        self.friction = friction

        # A list of bodies that it will stick to upon collision
        self.stickTo_gIdx = []
        self.collisionPosLocal_XYZ = []

        if shape == 'plane':

            # print 'phsEnv.createGeom(): type=plane expects pos=ABCD,and NO size. SIze is auto infinite.'
            self.geom = ode.GeomPlane(space, [pos[0], pos[1], pos[2]], pos[3])
            self.parentSpace = space
            # No more work needed

        elif shape == 'sphere':

            #print 'Making sphere physNode'
            # print 'phsEnv.createGeom(): type=sphere expects pos=XYZ, and size=RADIUS'

            ################################################################################################
            ################################################################################################
            # Define the Body: something that moves as if under the
            # influence of environmental physical forces

            self.geomMass = ode.Mass()

            # set sphere properties automatically assuming a mass of 1 and self.radius
            mass = 1.0
            self.geomMass.setSphereTotal(mass, size)

            self.body = ode.Body(world)
            self.parentWorld = world
            self.body.setMass(self.geomMass)  # geomMass or 1 ?
            self.body.setPosition(pos)

            # Define the Geom: a geometric shape used to calculate collisions
            #size = radius!
            self.geom = ode.GeomSphere(space, size)
            self.geom.setBody(self.body)
            self.parentSpace = space

            ################################################################################################
            ################################################################################################
        #elif shape == 'cylinder':
        elif ('cylinder' in shape):
            #print 'Making cylinder physNode'

            # print 'phsEnv.createGeom(): type=sphere expects pos=XYZ, and size=RADIUS'

            ################################################################################################
            ################################################################################################
            # Define the Body: something that moves as if under the
            # influence of environmental physical forces
            radius = size[1]
            length = size[0]

            self.geomMass = ode.Mass()

            # set sphere properties automatically assuming a mass of 1 and self.radius
            mass = 1.0

            if (shape[-2:] == '_X'):
                direction = 1
            elif (shape[-2:] == '_Y'):
                direction = 2
            else:
                direction = 3  # direction - The direction of the cylinder (1=x axis, 2=y axis, 3=z axis)

            self.geomMass.setCylinderTotal(mass, direction, radius, length)

            self.body = ode.Body(world)
            self.parentWorld = world
            self.body.setMass(self.geomMass)  # geomMass or 1 ?
            self.body.setPosition(pos)

            # Define the Geom: a geometric shape used to calculate collisions
            #size = radius!
            self.geom = ode.GeomCylinder(space, radius, length)
            self.geom.setPosition(pos)

            self.geom.setBody(self.body)

            # This bit compensates for a problem with ODE
            # Note how the body is created in line with any axis
            # When I wrote this note, it was in-line with Y (direction=2)
            # The geom, however, can only be made in-line with the Z axis
            # This creates an offset to bring the two in-line
            vizOffsetTrans = viz.Transform()

            if (shape[-2:] == '_X'):
                vizOffsetTrans.setAxisAngle(1, 0, 0, 90)
            elif (shape[-2:] == '_Y'):
                vizOffsetTrans.setAxisAngle(0, 0, 1, 90)

            vizOffsetQuat = vizOffsetTrans.getQuat()

            odeRotMat = self.vizQuatToRotationMat(vizOffsetQuat)

            #print self.geom.getRotation()

            self.geom.setOffsetRotation(odeRotMat)

            self.parentSpace = space

        elif shape == 'box':

            ################################################################################################
            ################################################################################################
            # Define the Body: something that moves as if under the
            # influence of environmental physical forces

            length = size[1]
            width = size[2]
            height = size[0]

            self.geomMass = ode.Mass()

            # set sphere properties automatically assuming a mass of 1 and self.radius
            mass = 1.0

            self.geomMass.setBoxTotal(mass, length, width, height)

            self.body = ode.Body(world)
            self.parentWorld = world
            self.body.setMass(self.geomMass)  # geomMass or 1 ?
            self.body.setPosition(pos)

            # Define the Geom: a geometric shape used to calculate collisions
            #size = radius!
            self.geom = ode.GeomBox(space, [length, width, height])
            self.geom.setPosition(pos)

            self.geom.setBody(self.body)

            self.parentSpace = space

        elif shape == 'trimesh':

            if (vertices == None or indices == None):
                print 'physNode.init(): For trimesh, must pass in vertices and indices'

            self.body = ode.Body(world)
            self.parentWorld = world
            self.body.setMass(self.geomMass)  # geomMass or 1 ?
            self.body.setPosition(pos)

            triMeshData = ode.TrisMeshData()
            triMeshData.build(vertices, indices)
            self.geom = ode.GeomTriMesh(td, space)
            self.geom.setBody(self.body)

            ## Set parameters for drawing the trimesh
            body.shape = "trimesh"
            body.geom = self.geom

            self.parentSpace = space

        else:
            print 'physEnv.physNode.init(): ' + str(
                type) + ' not implemented yet!'
            return
            pass

        #print '**************UPDATING THE NODE *****************'
        self.updateNodeAct = vizact.onupdate(viz.PRIORITY_PHYSICS,
                                             self.updateNode3D)