def grab(self): """Grab in-range objects with the pointer""" if not self._lastBoxGrabbed: grabList = self._proximityList # Needed for disabling grab of grounded bones self._lastBoxGrabbed = self._boundingBoxes.values()[0] else: # if there is an active region, you cannot grab meshes on inactive regions meshGrabList = set.intersection(set(self._proximityList), set(self._lastBoxGrabbed._members)) boxGrabList = set.intersection(set(self._proximityList), set(self._boundingBoxes.values())) grabList = list(set.union(meshGrabList, boxGrabList)) if len(grabList) == 0 or self._grabFlag: return target = self.getClosestObject(model.pointer,grabList) if isinstance(target, bp3d.Mesh): if self._boundingBoxes.has_key(target.region): self.keyTarget = self._boundingBoxes[target.region]._keystones[0] else: # band-aid for test mode self.keyTarget = self._boundingBoxes['full']._keystones[0] if target.group.grounded: self.highlightGrabbedMesh(target, True) else: target.setGroupParent() self._gloveLink = viz.grab(model.pointer, target, viz.ABS_GLOBAL) self.score.event(event = 'grab', description = 'Grabbed bone', source = target.name) # self.transparency(target, 0.7) target.highlight(grabbed = True) if self._lastMeshGrabbed and self._lastMeshGrabbed is not target: if self._lastMeshGrabbed in grabList: self._lastMeshGrabbed.highlight(prox = True) else: self._lastMeshGrabbed.highlight(prox = False) self._lastMeshGrabbed = target elif isinstance(target, BoundingBox): target.grab() self._gloveLink = viz.grab(model.pointer, target, viz.ABS_GLOBAL) self.score.event(event = 'grab', description = 'Grabbed bounding box', source = target.name) target.highlight(True) self.hideNonActive(target) if self._lastBoxGrabbed and self._lastBoxGrabbed is not target: if self._lastBoxGrabbed in grabList: self._lastBoxGrabbed.highlight(True) else: self._lastBoxGrabbed.highlight(False) self._lastBoxGrabbed = target self._lastGrabbed = target self._grabFlag = True
def grab(self): """ Grab in-range objects with the pointer """ grabList = self._proximityList # Needed for disabling grab of grounded bones if len(grabList) > 0 and not self._grabFlag: target = self.getClosestBone(model.pointer,grabList) if target.group.grounded: self._meshesById[target.id].mesh.color(0,1,0.5) if menuMode != 'Quiz Mode': #quick fix for tool tip not being displayed in quiz mode(global menuMode is declared in def start) self._meshesById[target.id].tooltip.visible(viz.ON) else: target.setGroupParent() self._gloveLink = viz.grab(model.pointer, target, viz.ABS_GLOBAL) self.score.event(event = 'grab', description = 'Grabbed bone', source = target.name) self.transparency(target, 0.7) self._meshesById[target.id].mesh.color(0,1,0.5) if menuMode != 'Quiz Mode': self._meshesById[target.id].tooltip.visible(viz.ON) if target != self._lastGrabbed and self._lastGrabbed: self._meshesById[self._lastGrabbed.id].mesh.color([1.0,1.0,1.0]) for m in self._proximityList: if m == self._lastGrabbed: self._meshesById[self._lastGrabbed.id].mesh.color([1.0,1.0,0.5]) if menuMode != 'Quiz Mode': self._meshesById[self._lastGrabbed.id].tooltip.visible(viz.OFF) self._lastGrabbed = target self._grabFlag = True
def toggleMenu(node=viz.addGroup(),view=viz.MainView,menu=viz.addGUICanvas(),val=viz.TOGGLE): menu.visible(val) menuLink = None if menu.getVisible() is True: pos = view.getPosition() menu.setPosition(pos[0],pos[1]-1,pos[2]+5) # menuLink.remove() else: menuLink = viz.grab(node,menu)
def grabBall(): object = viz.pick( ) # Command detects which object the mouse is currently over and returns it if object == basketball: # Check to see if object is basketball print "You've clicked on the basketball" ser.writelines(b'T') # Turn led/thumb electrode on global link link = viz.grab(hand, basketball) # Use hand to grab basketball else: print "This is not a basketball"
def __init__(self, frame_weight=0.5, aperture_scale=0.5, node=None, **kwargs): # node to reference this instance in the scenegraph self._node = node if self._node == None: self._node = viz.addGroup() viz.VizNode.__init__(self, id=self._node.id, **kwargs) # init intensity capture for each face self._capture_faces = { viz.POSITIVE_X: ViewAccumulator(frame_weight=0.5, aperture_scale=0.5), viz.NEGATIVE_X: ViewAccumulator(frame_weight=0.5, aperture_scale=0.5), viz.POSITIVE_Y: ViewAccumulator(frame_weight=0.5, aperture_scale=0.5), viz.NEGATIVE_Y: ViewAccumulator(frame_weight=0.5, aperture_scale=0.5), viz.POSITIVE_Z: ViewAccumulator(frame_weight=0.5, aperture_scale=0.5), viz.NEGATIVE_Z: ViewAccumulator(frame_weight=0.5, aperture_scale=0.5) } cube_euler = { viz.POSITIVE_X: [90, 0, 0], viz.NEGATIVE_X: [-90, 0, 0], viz.POSITIVE_Y: [0, -90, 0], viz.NEGATIVE_Y: [0, 90, 0], viz.POSITIVE_Z: [0, 0, 0], viz.NEGATIVE_Z: [-180, 0, 0] } # transform face captures according to face orientation for face in self._capture_faces: self._capture_faces[face].setEuler(cube_euler[face]) viz.grab(self, self._capture_faces[face])
def grab(self): """ If the glove is not already linked to something, and the glove is within proximity of an object, link the object to the glove """ if self.outlineCenter.getAction() or self.dogCenter.getAction(): self.recordData.event(event = 'grab', result = 'Did Not Pick Up') return if not self.gloveLink and len(self.proxList)>0: target = self.proxList[0] self.gloveLink = viz.grab(model.pointer, target, viz.ABS_GLOBAL) self.recordData.event(event = 'grab', result = 'Picked Up') else: self.recordData.event(event = 'grab', result = 'Did Not Pick Up')
def grab(outline, dog): """ If the glove is not already linked to something, and the glove is within proximity of an object, link the object to the glove """ global gloveLink global grabFlag global glove if outline.getAction() or dog.getAction(): recordData.event(event="grab", result="Did Not Pick Up") return if not gloveLink and len(proxList) > 0: target = proxList[0] gloveLink = viz.grab(model.pointer, target, viz.ABS_GLOBAL) recordData.event(event="grab", result="Picked Up") else: recordData.event(event="grab", result="Did Not Pick Up")
def release(self): """ Unlink the glove and the object, and if the object is close enough to its target, and is within angular range, then the object is snapped to its target. """ eulerThres = 45 eulerDiff = vizmat.QuatDiff(self.outlineCenter.getQuat(), self.dogCenter.getQuat()) if snapFlag == True and eulerDiff <= eulerThres and gloveLink: recordData.event(event="release", result="Snapped!") snap(self.dogCenter, self.outlineCenter) else: recordData.event() if gloveLink: try: gloveLink.remove() except NameError: gloveLink.removeItems(viz.grab(model.pointer, target, viz.ABS_GLOBAL))
def release(self): """ Unlink the glove and the object, and if the object is close enough to its target, and is within angular range, then the object is snapped to its target. """ eulerThres = 45 eulerDiff = vizmat.QuatDiff(self.outlineCenter.getQuat(), self.dogCenter.getQuat()) if self.snapFlag == True and eulerDiff <= eulerThres and self.gloveLink: self.recordData.event(event = 'release', result = 'Snapped!') self.snap() else: self.recordData.event() if self.gloveLink: try: self.gloveLink.remove() except NameError: self.gloveLink.removeItems(viz.grab(model.pointer, target, viz.ABS_GLOBAL))
def grab(outline, dog): """ If the glove is not already linked to something, and the glove is within proximity of an object, link the object to the glove """ global gloveLink global grabFlag global glove if outline.getAction() or dog.getAction(): recordData.event(event='grab', result='Did Not Pick Up') return if not gloveLink and len(proxList) > 0: target = proxList[0] gloveLink = viz.grab(model.pointer, target, viz.ABS_GLOBAL) recordData.event(event='grab', result='Picked Up') else: recordData.event(event='grab', result='Did Not Pick Up')
def grabActionOnThirdAppendage(grabTrigger): global thirdAppendage, thirdAppendageReadyForGrab, thirdAppendageGrabbed, ghostAvatar, matrixOfThirdAppendage, grabLink, posOfThirdAppendage, tahLink if (thirdAppendageReadyForGrab is True) and (grabTrigger is True): thirdAppendageGrabbed = True thirdAppendage.alpha(1.0) matrixOfThirdAppendage = thirdAppendage.getMatrix(mode=viz.ABS_GLOBAL) posOfThirdAppendage = thirdAppendage.getPosition() tahLink.disable() grabLink = viz.grab(pptextensionDK2.rhPPT, thirdAppendage) grabLink.setMask(viz.LINK_POS) #thirdAppendage.setPosition( # ghostAvatar.getBone('Bip01 R Forearm').getPosition(viz.ABS_GLOBAL)) else: thirdAppendageGrabbed = False if grabLink: grabLink.remove() grabLink = None #thirdAppendage.setMatrix(matrixOfThirdAppendage) tahLink.enable() thirdAppendage.setPosition(posOfThirdAppendage)
def grabActionOnThirdAppendage(grabTrigger): global thirdAppendage, thirdAppendageReadyForGrab, thirdAppendageGrabbed, ghostAvatar, matrixOfThirdAppendage, grabLink, posOfThirdAppendage, tahLink if (thirdAppendageReadyForGrab is True) and (grabTrigger is True): #Grabbing the third appendage now ExitProximity(True) thirdAppendageGrabbed = True thirdAppendageReadyForGrab = False matrixOfThirdAppendage = thirdAppendage.getMatrix(mode=viz.ABS_GLOBAL) posOfThirdAppendage = thirdAppendage.getPosition() tahLink.disable() grabLink = viz.grab(pptextensionDK2.rhPPT, thirdAppendage) grabLink.setMask(viz.LINK_POS_OP) #grabLink.postEuler([90, 0, 0], target = viz.LINK_ORI_OP) #thirdAppendage.setPosition( # ghostAvatar.getBone('Bip01 R Forearm').getPosition(viz.ABS_GLOBAL)) else: thirdAppendageGrabbed = False if grabLink: grabLink.remove() grabLink = None #thirdAppendage.setMatrix(matrixOfThirdAppendage) tahLink.enable() thirdAppendage.setPosition(posOfThirdAppendage)
def grabBall(): gesture = int(sensor.get()[-1]) gestureText.message(gestureName[gesture]) object = viz.pick( ) # Command detects which object the mouse is currently over and returns it if (gesture == 0) and (object == basketball): # Check to see if object is basketball print "closed fist - gripped basketball" # ser.writelines(b'T') global link link = viz.grab(hand, basketball) # Use hand to grab basketball glove.getSensorRawAll() else: print "This is not a basketball" # ser.writelines(b'L') vizact.ontimer(1, releaseBall) vizact.ontimer(2, releaseBall) vizact.ontimer(3, releaseBall) vizact.ontimer(4, releaseBall) vizact.ontimer(5, releaseBall) vizact.ontimer(6, releaseBall) vizact.ontimer(7, releaseBall) vizact.ontimer(8, releaseBall) vizact.ontimer(9, releaseBall) vizact.ontimer(10, releaseBall) vizact.ontimer(11, releaseBall) vizact.ontimer(12, releaseBall) vizact.ontimer(13, releaseBall) vizact.ontimer(14, releaseBall) vizact.ontimer(15, releaseBall) vizact.ontimer(16, releaseBall)
def update(): global grab #print 'analog getData', analog.getData() #add the analog data to a list so that interactions can be made leap = analog.getData() #if a hand is present (stops movement when hand is taken away) if leap[98] > 0.3: #if gesture is a fist (grab) if leap[96] >= 0.5: if grab is None: #grab the object grab = viz.grab(view, h) elif leap[96] < 0.5: if grab is not None: grab.remove() grab = None if leap[2] < -40: view.move([0,0,MOVE_SPEED*viz.elapsed()],viz.BODY_ORI) if leap[2] > 40: view.move([0,0,-MOVE_SPEED*viz.elapsed()],viz.BODY_ORI) if leap[0] < -40: view.setEuler([-TURN_SPEED*viz.elapsed(),0,0],viz.BODY_ORI,viz.REL_PARENT) if leap[0] > 40: view.setEuler([TURN_SPEED*viz.elapsed(),0,0],viz.BODY_ORI,viz.REL_PARENT)
def validation(): ''' Show same calibration points and compare calulated gaze point to ground truth. (Displays both + angular error) ''' # ask for the sub port req.send_string('SUB_PORT') sub_port = req.recv_string() # open a sub port to listen to pupil sub = ctx.socket(zmq.SUB) sub.connect("tcp://{}:{}".format(addr, sub_port)) sub.setsockopt_string(zmq.SUBSCRIBE, u'gaze') # add gaze marker m_gaze = vizshape.addSphere(radius=sphere_size, color=viz.GREEN) m_gaze.disable(viz.INTERSECTION) if not gaze_marker_visible: m_gaze.disable(viz.RENDER) # invisible but phyisically present def get_gaze(eye_number): ''' checks gaze stream for confident measures of the eye (eye_number) until it finds them Args: eye_number (int): 1=left, 0=right Returns: [x, y] (float): normalized x and y values range [0,1] ''' found_confident_val = False while found_confident_val == False: topic = sub.recv_string() # unused msg = sub.recv() msg = loads(msg, encoding='utf-8') confidence = msg['confidence'] if msg['id'] == eye_number: if confidence > confidence_level: found_confident_val = True t = msg['timestamp'] # unused npx = msg['norm_pos'][0] npy = msg['norm_pos'][1] return [npx, npy] def updateGaze(): ''' calls 'get_gaze function' and takes the average of two eyes for the normal values - they will be used to project a sphere on where subjects look at ''' # get gaze data norm_pos_x = np.mean([get_gaze(1)[0], get_gaze(0)[0]]) norm_pos_y = np.mean([get_gaze(1)[1], get_gaze(0)[1]]) # find the intersection and project sphere line = viz.MainWindow.screenToWorld([norm_pos_x, norm_pos_y]) intersection = viz.intersect(line.begin, line.end) m_gaze.setPosition(intersection.point) # update cursor location on every sample vizact.onupdate(viz.PRIORITY_LINKS+1, updateGaze) dot_norm_pos=[] gaze_norm_pos=[] ang_error = [] yield showMessage('Zum Starten der Validierung die Leertaste drücken') for p in norm_positions: print('calibration point: ', p) norm_x = p[0] norm_y = p[1] first_run = True if first_run: first_run = False '''set up a plane 'right in front of user' on which we want to project the dots''' # target the current center of view p_line = viz.MainWindow.screenToWorld(.5, .5) # let's modify the line and call it line 2. Here we let the line end at the "depth" value p_line_2 = viz.Line(begin=p_line.begin, end=p_line.end, dir=p_line.dir, length=depth) # Add the plane and apply the matrix of our viewpoint to the plane plane = vizshape.addBox(size=[3.6, 2, .1]) mymat = viz.MainView.getMatrix() plane.setMatrix(mymat) # Reposition the plane to the end of line 2 plane.setPosition(p_line_2.end) plane.color([.25,.25,.25]) plane.alpha(.95) # Lock it to user plane_link = viz.grab(viz.MainView, plane) # interpolate a line from norm values to 3d coordinates line = viz.MainWindow.screenToWorld([norm_x, norm_y]) # find the intersection intersection = viz.intersect(line.begin, line.end) # place a dot (at depth level of line) dot = vizshape.addSphere(radius=sphere_size) dot.setPosition(intersection.point) # lock dot to user view_link = viz.grab(viz.MainView, dot) print('ready') viz.playSound('beep500_200.wav') yield viztask.waitKeyDown(' ') for s in range(60): # get the current pupil time (pupil uses CLOCK_MONOTONIC with adjustable timebase). # You can set the pupil timebase to another clock and use that. t = get_pupil_timestamp() dot_norm_pos.append(p) gaze_norm_pos.append(viz.MainWindow.worldToScreen(m_gaze.getPosition())) yield viztask.waitTime(1/60.) print(t) dot.color(viz.RED) print('waiting for next position...') yield viztask.waitKeyDown(' ') yield dot.remove() time.sleep(2) showMessage('validation done!') for p in norm_positions: i = norm_positions.index(p) chunk = range(i*60, (i+1)*60) print(i) dmx = np.mean([gaze_norm_pos[x][0] for x in chunk]) dmy = np.mean([gaze_norm_pos[y][1] for y in chunk]) # interpolate a line from norm values to 3d coordinates line = viz.MainWindow.screenToWorld([p[0], p[1]]) line_v = viz.MainWindow.screenToWorld([dmx, dmy]) # find the intersection intersection = viz.intersect(line.begin, line.end) intersection_v = viz.intersect(line_v.begin, line_v.end) # place a dots (at depth level of line) for both ground truth and gaze point dot = vizshape.addSphere(radius=sphere_size) dot.setPosition(intersection.point) dot.color(viz.BLUE) dot_v = vizshape.addSphere(radius=sphere_size*0.75) dot_v.setPosition(intersection_v.point) dot_v.color(viz.YELLOW_ORANGE) # lock dots to user view_link = viz.grab(viz.MainView, dot) viel_link2 = viz.grab(viz.MainView, dot_v) # calculate angular error error = vizmat.AngleBetweenVector(line.dir, line_v.dir) # cosangle = np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b)) # angle = np.arccos(cosangle) # error = np.degrees(angle) ang_error.append(error) print('angle is: ', error, 'for ', p) showMessage('mean angular error is: {}'.format(np.mean(ang_error))) print('mean angular error is: ', np.mean(ang_error), ' deg/ visual angle')
def calibration(): ''' The heart of the calibration routine. Presents points and collects data. ''' # start calibration routine and sample data n = {'subject':'calibration.should_start', 'hmd_video_frame_size':(2160,1200), 'outlier_threshold':35} print(send_recv_notification(n)) ref_data = [] yield showMessage('Zum Starten die Leertaste drücken') for p in norm_positions: print('calibration point: ', p) norm_x = p[0] norm_y = p[1] first_run = True if first_run: first_run = False '''set up a plane 'right in front of user' on which we want to project the dots''' # target the current center of view p_line = viz.MainWindow.screenToWorld(.5, .5) # let's modify the line and call it line 2. Here we let the line end at the "depth" value p_line_2 = viz.Line(begin=p_line.begin, end=p_line.end, dir=p_line.dir, length=depth) # Add the plane and apply the matrix of our viewpoint to the plane plane = vizshape.addBox(size=[3.6, 2, .1]) mymat = viz.MainView.getMatrix() plane.setMatrix(mymat) # Reposition the plane to the end of line 2 plane.setPosition(p_line_2.end) plane.color([.25,.25,.25]) plane.alpha(.95) # Lock it to user plane_link = viz.grab(viz.MainView, plane) # interpolate a line from norm values to 3d coordinates line = viz.MainWindow.screenToWorld([norm_x, norm_y]) # find the intersection intersection = viz.intersect(line.begin, line.end) # place a dot (at depth level of line) dot = vizshape.addSphere(radius=sphere_size) dot.setPosition(intersection.point) # lock dot to user view_link = viz.grab(viz.MainView, dot) print('ready') viz.playSound('beep500_200.wav') yield viztask.waitKeyDown(' ') for s in range(60): # get the current pupil time (pupil uses CLOCK_MONOTONIC with adjustable timebase). # You can set the pupil timebase to another clock and use that. t = get_pupil_timestamp() # here the left and right screen marker positions are identical. datum0 = {'norm_pos':p,'timestamp':t,'id':0} datum1 = {'norm_pos':p,'timestamp':t,'id':1} ref_data.append(datum0) ref_data.append(datum1) yield viztask.waitTime(1/60.) print(t) dot.color(viz.RED) print('waiting for next position...') yield viztask.waitKeyDown(' ') yield dot.remove() # send ref data to Pupil Capture/Service: # this notification can be sent once at the end or multiple times. # during one calibraiton all new data will be appended. n = {'subject':'calibration.add_ref_data','ref_data':ref_data} print(send_recv_notification(n)) # stop calibration # pupil will correlate pupil and ref data based on timestamps, # compute the gaze mapping params, and start a new gaze mapper. n = {'subject':'calibration.should_stop'} print(send_recv_notification(n)) time.sleep(2) showMessage('calibration done! - now validate!') plane.remove() yield viztask.waitTime(2)
def __init__(self, canvas): sf = 0.5 model.pointer.setEuler(0, 0, 0) model.pointer.setPosition(0, 0, 0) self.gloveStart = model.pointer.getPosition() self.iterations = 0 self.canvas = canvas self.origPosVec = config.positionVector self.origOrienVec = config.orientationVector # creating directions panel # viz.mouse.setVisible(False) # directions = vizinfo.InfoPanel('', fontSize = 10, parent = canvas, align = viz.ALIGN_LEFT_TOP, title = 'Tutorial', icon = False) # if config.pointerMode ==0: # directions.addItem(viz.addText('Keyboard Controls:')) # directions.addLabelItem(viz.addText('W')) # # if config.pointerMode ==1: # directions.addItem(viz.addText('Spacemouse Controls:')) # directions.addItem(viz.addTexQuad(size = 300, parent = canvas, texture = viz.addTexture('.\\mouse key.png'))) # creating tutorial objects self.dog = viz.addChild(".\\dataset\\dog\\dog.obj") self.dogOutline = viz.addChild(".\\dataset\\dog\\dog.obj") self.dogStart = self.dog.getPosition() self.dog.setScale([sf, sf, sf]) self.dogOutline.setScale([sf, sf, sf]) self.startColor = model.pointer.getColor() # creating dog outline self.dogOutline.alpha(0.8) self.dogOutline.color(0, 5, 0) self.dogOutline.texture(None) # creating proximity manager self.manager = vizproximity.Manager() """creating dog grab and snap sensors around sphere palced in the center of the dog""" self.dogCenter = vizshape.addSphere(0.1, pos=(self.dogStart)) self.dogSnapSensor = vizproximity.Sensor(vizproximity.Sphere(0.35, center=[0, 1, 0]), source=self.dogCenter) self.outlineCenter = vizshape.addSphere(0.1, pos=(self.dogStart)) self.dogCenter.setPosition([0, -0.35, 0]) self.outlineCenter.setPosition([0, -0.35, 0]) self.centerStart = self.dogCenter.getPosition() self.dogGrab = viz.grab(self.dogCenter, self.dog) self.outlineGrab = viz.grab(self.outlineCenter, self.dogOutline) self.dogCenter.color(5, 0, 0) self.outlineCenter.color(0, 5, 0) self.dogCenter.visible(viz.OFF) self.outlineCenter.visible(viz.OFF) self.dogSnapSensor = vizproximity.Sensor(vizproximity.Sphere(0.35, center=[0, 1, 0]), source=self.dogCenter) self.manager.addSensor(self.dogSnapSensor) self.dogGrabSensor = vizproximity.Sensor(vizproximity.Sphere(0.85, center=[0, 1, 0]), source=self.dogCenter) self.manager.addSensor(self.dogGrabSensor) """creating glove target and a dog target. the dog target is a sphere placed at the center of the dog outline""" self.gloveTarget = vizproximity.Target(model.pointer) self.manager.addTarget(self.gloveTarget) self.dogTargetMold = vizshape.addSphere(0.2, parent=self.dogOutline, pos=(self.dogStart)) self.dogTargetMold.setPosition([0, 1.2, 0]) self.dogTargetMold.visible(viz.OFF) self.dogTarget = vizproximity.Target(self.dogTargetMold) self.manager.addTarget(self.dogTarget) # manager proximity events self.manager.onEnter(self.dogGrabSensor, EnterProximity, self.gloveTarget, model.pointer) self.manager.onExit(self.dogGrabSensor, ExitProximity, model.pointer, self.startColor) self.manager.onEnter(self.dogSnapSensor, snapCheckEnter, self.dogTarget) self.manager.onExit(self.dogSnapSensor, snapCheckExit, self.dogTargetMold) # reset command self.keybindings = [] self.keybindings.append( vizact.onkeydown("l", resetGlove, self.manager, self.gloveStart, self.dogCenter, self.outlineCenter) ) self.keybindings.append(vizact.onkeydown("p", self.debugger)) # task schedule self.interface = viztask.schedule(self.interfaceTasks()) self.gameMechanics = viztask.schedule(self.mechanics())
def __init__(self, textures, auto_update = True, node = None, **kwargs): # node to reference this instance in the scenegraph self._node = node if self._node == None: self._node = viz.addGroup() viz.VizNode.__init__(self, id = self._node.id, **kwargs) # list of models affected by the shader effect self._affected_models = [] # save projective textures self._textures = textures ## Initialize depth cameras. # They are used to capture a depth buffer for each face. # Inverse shadow mapping is performed based on these buffers, # such that the projection only hits the fisrt surface along the projection direction. self._depth_cams = { viz.POSITIVE_X : viz.addRenderNode(inheritView = False), viz.NEGATIVE_X : viz.addRenderNode(inheritView = False), viz.POSITIVE_Y : viz.addRenderNode(inheritView = False), viz.NEGATIVE_Y : viz.addRenderNode(inheritView = False), viz.POSITIVE_Z : viz.addRenderNode(inheritView = False), viz.NEGATIVE_Z : viz.addRenderNode(inheritView = False) } # save projective depth textures self._depth_textures = { viz.POSITIVE_X : None, viz.NEGATIVE_X : None, viz.POSITIVE_Y : None, viz.NEGATIVE_Y : None, viz.POSITIVE_Z : None, viz.NEGATIVE_Z : None } cube_euler = { viz.POSITIVE_X : [90,0,0], viz.NEGATIVE_X : [-90,0,0], viz.POSITIVE_Y : [0,-90,0], viz.NEGATIVE_Y : [0,90,0], viz.POSITIVE_Z : [0,0,0], viz.NEGATIVE_Z : [-180,0,0] } proj_mat = viz.Matrix.perspective(90.0,1.0,0.1,100.0) for cam in self._depth_cams: self._depth_cams[cam].drawOrder(1000) self._depth_cams[cam].setRenderTexture( viz.addRenderTexture(format = viz.TEX_DEPTH), buffer = viz.RENDER_DEPTH ) self._depth_cams[cam].setPosition(self.getPosition()) self._depth_cams[cam].setEuler(cube_euler[cam]) self._depth_cams[cam].setProjectionMatrix(proj_mat) self._depth_cams[cam].setAutoClip(viz.OFF) viz.grab(self, self._depth_cams[cam]) self._depth_textures[cam] = self._depth_cams[cam].getRenderTexture(viz.RENDER_DEPTH) # load effect code = self._getShaderCode() self._effect = viz.addEffect(code) # set initial property values self._effect.setProperty("Tex_ProjMat", proj_mat) self._effect.setProperty("Inv_ViewMat", toGL(viz.MainView.getMatrix().inverse())) self._effect.setProperty("Tex_ViewMat_px", toGL(eulerMat(90,0,0)*self.getMatrix())) self._effect.setProperty("Tex_ViewMat_nx", toGL(eulerMat(-90,0,0)*self.getMatrix())) self._effect.setProperty("Tex_ViewMat_py", toGL(eulerMat(0,-90,0)*self.getMatrix())) self._effect.setProperty("Tex_ViewMat_ny", toGL(eulerMat(0,90,0)*self.getMatrix())) self._effect.setProperty("Tex_ViewMat_pz", toGL(eulerMat(0,0,0)*self.getMatrix())) self._effect.setProperty("Tex_ViewMat_nz", toGL(eulerMat(180,0,0)*self.getMatrix())) self._effect.setProperty("Tex_px", self._textures[viz.POSITIVE_X]) self._effect.setProperty("Tex_nx", self._textures[viz.NEGATIVE_X]) self._effect.setProperty("Tex_py", self._textures[viz.POSITIVE_Y]) self._effect.setProperty("Tex_ny", self._textures[viz.NEGATIVE_Y]) self._effect.setProperty("Tex_pz", self._textures[viz.POSITIVE_Z]) self._effect.setProperty("Tex_nz", self._textures[viz.NEGATIVE_Z]) self._effect.setProperty("TexDepth_px", self._depth_textures[viz.POSITIVE_X]) self._effect.setProperty("TexDepth_nx", self._depth_textures[viz.NEGATIVE_X]) self._effect.setProperty("TexDepth_py", self._depth_textures[viz.POSITIVE_Y]) self._effect.setProperty("TexDepth_ny", self._depth_textures[viz.NEGATIVE_Y]) self._effect.setProperty("TexDepth_pz", self._depth_textures[viz.POSITIVE_Z]) self._effect.setProperty("TexDepth_nz", self._depth_textures[viz.NEGATIVE_Z]) # init auto update self._auto_update = vizact.onupdate(0, self.update) self._auto_update.setEnabled(auto_update)
def __init__(self, canvas): sf = 0.5 model.pointer.setEuler(0, 0, 0) model.pointer.setPosition(0, 0, 0) self.gloveStart = model.pointer.getPosition() self.iterations = 0 self.canvas = canvas self.origPosVec = config.positionVector self.origOrienVec = config.orientationVector #creating directions panel # viz.mouse.setVisible(False) # directions = vizinfo.InfoPanel('', fontSize = 10, parent = canvas, align = viz.ALIGN_LEFT_TOP, title = 'Tutorial', icon = False) # if config.pointerMode ==0: # directions.addItem(viz.addText('Keyboard Controls:')) # directions.addLabelItem(viz.addText('W')) # # if config.pointerMode ==1: # directions.addItem(viz.addText('Spacemouse Controls:')) # directions.addItem(viz.addTexQuad(size = 300, parent = canvas, texture = viz.addTexture('.\\mouse key.png'))) #creating tutorial objects self.dog = viz.addChild('.\\dataset\\dog\\dog.obj') self.dogOutline = viz.addChild('.\\dataset\\dog\\dog.obj') self.dogStart = self.dog.getPosition() self.dog.setScale([sf, sf, sf]) self.dogOutline.setScale([sf, sf, sf]) self.startColor = model.pointer.getColor() #creating dog outline self.dogOutline.alpha(0.8) self.dogOutline.color(0, 5, 0) self.dogOutline.texture(None) #creating proximity manager self.manager = vizproximity.Manager() '''creating dog grab and snap sensors around sphere palced in the center of the dog''' self.dogCenter = vizshape.addSphere(0.1, pos=(self.dogStart)) self.dogSnapSensor = vizproximity.Sensor(vizproximity.Sphere( 0.35, center=[0, 1, 0]), source=self.dogCenter) self.outlineCenter = vizshape.addSphere(0.1, pos=(self.dogStart)) self.dogCenter.setPosition([0, -.35, 0]) self.outlineCenter.setPosition([0, -.35, 0]) self.centerStart = self.dogCenter.getPosition() self.dogGrab = viz.grab(self.dogCenter, self.dog) self.outlineGrab = viz.grab(self.outlineCenter, self.dogOutline) self.dogCenter.color(5, 0, 0) self.outlineCenter.color(0, 5, 0) self.dogCenter.visible(viz.OFF) self.outlineCenter.visible(viz.OFF) self.dogSnapSensor = vizproximity.Sensor(vizproximity.Sphere( 0.35, center=[0, 1, 0]), source=self.dogCenter) self.manager.addSensor(self.dogSnapSensor) self.dogGrabSensor = vizproximity.Sensor(vizproximity.Sphere( 0.85, center=[0, 1, 0]), source=self.dogCenter) self.manager.addSensor(self.dogGrabSensor) '''creating glove target and a dog target. the dog target is a sphere placed at the center of the dog outline''' self.gloveTarget = vizproximity.Target(model.pointer) self.manager.addTarget(self.gloveTarget) self.dogTargetMold = vizshape.addSphere(0.2, parent=self.dogOutline, pos=(self.dogStart)) self.dogTargetMold.setPosition([0, 1.2, 0]) self.dogTargetMold.visible(viz.OFF) self.dogTarget = vizproximity.Target(self.dogTargetMold) self.manager.addTarget(self.dogTarget) #manager proximity events self.manager.onEnter(self.dogGrabSensor, EnterProximity, self.gloveTarget, model.pointer) self.manager.onExit(self.dogGrabSensor, ExitProximity, model.pointer, self.startColor) self.manager.onEnter(self.dogSnapSensor, snapCheckEnter, self.dogTarget) self.manager.onExit(self.dogSnapSensor, snapCheckExit, self.dogTargetMold) #reset command self.keybindings = [] self.keybindings.append( vizact.onkeydown('l', resetGlove, self.manager, self.gloveStart, self.dogCenter, self.outlineCenter)) self.keybindings.append(vizact.onkeydown('p', self.debugger)) #task schedule self.interface = viztask.schedule(self.interfaceTasks()) self.gameMechanics = viztask.schedule(self.mechanics())