Ejemplo n.º 1
0
    def updateImage(self):
        # Update to next frame
        self.context.wait_and_update_all()

        # Extract informations of each tracked user
        self.data = skeleton.track(self.user, self.depth, self.data)

        # Get the whole depth map
        self.data.depth_map = np.asarray(
            self.depth.get_tuple_depth_map()).reshape(480, 640)

        # Create the frame from the raw depth map string and convert it to RGB
        frame = np.fromstring(self.depth.get_raw_depth_map_8(),
                              np.uint8).reshape(480, 640)
        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

        # Get the RGB image of the scene
        self.data.image = np.fromstring(self.image.get_raw_image_map_bgr(),
                                        dtype=np.uint8).reshape(480, 640, 3)

        # Will be used to specify the depth of the current hand wished
        currentDepth, showCurrentDepth = 0, ""

        if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
            # Highlight the head
            ui.drawPoint(frame, self.data.skeleton["head"][0],
                         self.data.skeleton["head"][1], 5)

            # Display lines from elbows to the respective hands
            ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"],
                             self.data.skeleton["hand"]["left"])
            ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"],
                             self.data.skeleton["hand"]["right"])

            # Get the pixel's depth from the coordinates of the hands
            leftPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["left"])
            rightPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["right"])

            if self.data.hand == self.settings.LEFT_HAND:
                currentDepth = leftPixel
            elif self.data.hand == self.settings.RIGHT_HAND:
                currentDepth = rightPixel

            # Get the shift of the boundaries around both hands
            leftShift = self.utils.getHandBoundShift(leftPixel)
            rightShift = self.utils.getHandBoundShift(rightPixel)

            # Display a rectangle around both hands
            ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"],
                                  leftShift, (50, 100, 255))
            ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"],
                                  rightShift, (200, 70, 30))

        # Record the current data if the user is ready
        if self.recordIfReady:
            cv2.putText(frame, str(self.data.getWishedDistance()), (470, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, (252, 63, 253), 5)

            if self.data.getWishedDistance(
            ) >= int(currentDepth) - 10 and self.data.getWishedDistance(
            ) <= int(currentDepth) + 10:
                self.record([])
                self.recordIfReady = False
            else:
                if int(currentDepth) < self.data.getWishedDistance():
                    showCurrentDepth = str(currentDepth) + " +"
                else:
                    showCurrentDepth = str(currentDepth) + " -"
        else:
            showCurrentDepth = str(currentDepth)

        cv2.putText(frame, showCurrentDepth, (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
                    2, (50, 100, 255), 5)

        # Update the frame
        self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))

        self.timerScreen.start()
	def updateImage(self):
		
		# Update to next frame
		self.context.wait_and_update_all()
		
		# Extract informations of each tracked user
		self.data = skeleton.track(self.user, self.depth, self.data)

		# Get the whole depth map
		self.data.depth_map = np.asarray(self.depth.get_tuple_depth_map()).reshape(480, 640)

		# Create the frame from the raw depth map string and convert it to RGB
		frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640)
		frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_GRAY2RGB)
		
		# Will be used to specify the depth of the current hand wished
		currentDepth, showCurrentDepth = 0, ""
		
		
		if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
			# Highlight the head
			#ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5)
    		
			# Display lines from elbows to the respective hands
			#ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"])
			#ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"])
			
			# Get the pixel's depth from the coordinates of the hands
			leftPixel = self.utils.getDepthFromMap(self.data.depth_map, self.data.skeleton["hand"]["left"])
			rightPixel = self.utils.getDepthFromMap(self.data.depth_map, self.data.skeleton["hand"]["right"])
			
			# Get the shift of the boundaries around both hands
			leftShift = self.utils.getHandBoundShift(leftPixel)
			rightShift = self.utils.getHandBoundShift(rightPixel)
			
			if self.data.hand == self.settings.LEFT_HAND:
				currentDepth = leftPixel
				# Display a rectangle around the current hand
				#ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30))
			elif self.data.hand == self.settings.RIGHT_HAND:
				currentDepth = rightPixel
				# Display a rectangle around the current hand
				#ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))
			#else:
				# Display a rectangle around both hands
				#ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30))
				#ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))
			
			
			# Test the data against the neural network if possible
			if self.data.hand != self.settings.NO_HAND:
				result = self.bpn.check(self.featureExtractor.getFeatures(self.data))
				self.resultLabel.setText(str(result[0]))
				
				# Highlight the finger tip
				if result[0] != False:
					ui.drawPoint(frame, self.featureExtractor.fingerTip[result[1]][0], self.featureExtractor.fingerTip[result[1]][1], 5)
				
					# Highlight the eye
					ui.drawPoint(frame, self.featureExtractor.eyePosition[result[1]][0], self.featureExtractor.eyePosition[result[1]][1], 5)
				
					# Line of sight
					ui.drawElbowLine(frame, self.featureExtractor.eyePosition[result[1]], self.featureExtractor.fingerTip[result[1]])
			
					# Indicate orientation
					cv2.putText(frame, self.featureExtractor.orientation[result[1]], (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 100, 255), 5)
		
		# Update the frame
		self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))
		
		self.timerScreen.start()
	def updateImage(self):
		# Update to next frame
		self.context.wait_and_update_all()
		
		# Extract informations of each tracked user
		self.data = skeleton.track(self.user, self.depth, self.data)
		
		# Get the whole depth map
		self.data.depth_map = np.asarray(self.depth.get_tuple_depth_map()).reshape(480, 640)
		
		# Create the frame from the raw depth map string and convert it to RGB
		frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640)
		frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
	
		# Get the RGB image of the scene
		self.data.image = np.fromstring(self.image.get_raw_image_map_bgr(), dtype=np.uint8).reshape(480, 640, 3)
		
		# Will be used to specify the depth of the current hand wished
		currentDepth, showCurrentDepth = 0, ""
		
		
		if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
			# Highlight the head
			ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5)
    		
			# Display lines from elbows to the respective hands
			ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"])
			ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"])
			
			# Get the pixel's depth from the coordinates of the hands
			leftPixel = self.utils.getDepthFromMap(self.data.depth_map, self.data.skeleton["hand"]["left"])
			rightPixel = self.utils.getDepthFromMap(self.data.depth_map, self.data.skeleton["hand"]["right"])
			
			if self.data.hand == self.settings.LEFT_HAND:
				currentDepth = leftPixel
			elif self.data.hand == self.settings.RIGHT_HAND:
				currentDepth = rightPixel
			
			# Get the shift of the boundaries around both hands
			leftShift = self.utils.getHandBoundShift(leftPixel)
			rightShift = self.utils.getHandBoundShift(rightPixel)
    		
			# Display a rectangle around both hands
			ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (50, 100, 255))
			ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))
		
		
		# Record the current data if the user is ready
		if self.recordIfReady:
			cv2.putText(frame, str(self.data.getWishedDistance()), (470, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (252, 63, 253), 5)
			
			if self.data.getWishedDistance()>=int(currentDepth)-10 and self.data.getWishedDistance()<=int(currentDepth)+10:
				self.record([])
				self.recordIfReady = False
			else:
				if int(currentDepth)<self.data.getWishedDistance():
					showCurrentDepth = str(currentDepth)+" +"
				else:
					showCurrentDepth = str(currentDepth)+" -"
		else:
			showCurrentDepth = str(currentDepth)
			
		cv2.putText(frame, showCurrentDepth, (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 100, 255), 5)
		
		# Update the frame
		self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))
		
		self.timerScreen.start()
Ejemplo n.º 4
0
	def updateImage(self, frame):
		self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))
    def updateImage(self):

        # Update to next frame
        self.context.wait_and_update_all()

        # Extract informations of each tracked user
        self.data = skeleton.track(self.user, self.depth, self.data)

        # Get the whole depth map
        self.data.depth_map = np.asarray(
            self.depth.get_tuple_depth_map()).reshape(480, 640)

        # Create the frame from the raw depth map string and convert it to RGB
        frame = np.fromstring(self.depth.get_raw_depth_map_8(),
                              np.uint8).reshape(480, 640)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_GRAY2RGB)

        # Will be used to specify the depth of the current hand wished
        currentDepth, showCurrentDepth = 0, ""

        if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
            # Highlight the head
            #ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5)

            # Display lines from elbows to the respective hands
            #ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"])
            #ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"])

            # Get the pixel's depth from the coordinates of the hands
            leftPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["left"])
            rightPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["right"])

            # Get the shift of the boundaries around both hands
            leftShift = self.utils.getHandBoundShift(leftPixel)
            rightShift = self.utils.getHandBoundShift(rightPixel)

            if self.data.hand == self.settings.LEFT_HAND:
                currentDepth = leftPixel
                # Display a rectangle around the current hand
                #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30))
            elif self.data.hand == self.settings.RIGHT_HAND:
                currentDepth = rightPixel
                # Display a rectangle around the current hand
                #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))
            #else:
            # Display a rectangle around both hands
            #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30))
            #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))

            # Test the data against the neural network if possible
            if self.data.hand != self.settings.NO_HAND:
                result = self.bpn.check(
                    self.featureExtractor.getFeatures(self.data))
                self.resultLabel.setText(str(result[0]))

                # Highlight the finger tip
                if result[0] != False:
                    ui.drawPoint(frame,
                                 self.featureExtractor.fingerTip[result[1]][0],
                                 self.featureExtractor.fingerTip[result[1]][1],
                                 5)

                    # Highlight the eye
                    ui.drawPoint(
                        frame, self.featureExtractor.eyePosition[result[1]][0],
                        self.featureExtractor.eyePosition[result[1]][1], 5)

                    # Line of sight
                    ui.drawElbowLine(
                        frame, self.featureExtractor.eyePosition[result[1]],
                        self.featureExtractor.fingerTip[result[1]])

                    # Indicate orientation
                    cv2.putText(frame,
                                self.featureExtractor.orientation[result[1]],
                                (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2,
                                (50, 100, 255), 5)

        # Update the frame
        self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))

        self.timerScreen.start()