예제 #1
0
class DatasetGui(QtWidgets.QWidget):

    utils = Utils()
    featureExtractor = FeatureExtractor()
    bpn = BPNHandler(True)
    accuracy = accuracy.Accuracy()

    # Constructor of the DatasetGui class
    #
    # @param	None
    # @return	None
    def __init__(self):
        super(DatasetGui, self).__init__()
        self.setWindowTitle("Pointing Gesture Recognition - Dataset recording")

        # Retrieve all settings
        self.settings = Settings()

        # Load sounds
        self.countdownSound = QtMultimedia.QSound(
            self.settings.getResourceFolder() + "countdown.wav")
        self.countdownEndedSound = QtMultimedia.QSound(
            self.settings.getResourceFolder() + "countdown-ended.wav")

        # Get the context and initialise it
        self.context = Context()
        self.context.init()

        # Create the depth generator to get the depth map of the scene
        self.depth = DepthGenerator()
        self.depth.create(self.context)
        self.depth.set_resolution_preset(RES_VGA)
        self.depth.fps = 30

        # Create the image generator to get an RGB image of the scene
        self.image = ImageGenerator()
        self.image.create(self.context)
        self.image.set_resolution_preset(RES_VGA)
        self.image.fps = 30

        # Create the user generator to detect skeletons
        self.user = UserGenerator()
        self.user.create(self.context)

        # Initialise the skeleton tracking
        skeleton.init(self.user)

        # Start generating
        self.context.start_generating_all()
        print "Starting to detect users.."

        # Create a new dataset item
        self.data = Dataset()

        # Create a timer for an eventual countdown before recording the data
        self.countdownTimer = QtCore.QTimer()
        self.countdownRemaining = 10
        self.countdownTimer.setInterval(1000)
        self.countdownTimer.setSingleShot(True)
        self.countdownTimer.timeout.connect(self.recordCountdown)

        # Create a timer to eventually record data for a heat map
        self.heatmapRunning = False
        self.heatmapTimer = QtCore.QTimer()
        self.heatmapTimer.setInterval(10)
        self.heatmapTimer.setSingleShot(True)
        self.heatmapTimer.timeout.connect(self.recordHeatmap)

        # Create the global layout
        self.layout = QtWidgets.QVBoxLayout(self)

        # Create custom widgets to hold sensor's images
        self.depthImage = SensorWidget()
        self.depthImage.setGeometry(10, 10, 640, 480)

        # Add these custom widgets to the global layout
        self.layout.addWidget(self.depthImage)

        # Hold the label indicating the number of dataset taken
        self.numberLabel = QtWidgets.QLabel()
        self.updateDatasetNumberLabel()

        # Create the acquisition form elements
        self.createAcquisitionForm()

        # Register a dialog window to prompt the target position
        self.dialogWindow = DatasetDialog(self)

        # Allow to save the data when the right distance is reached
        self.recordIfReady = False

        # Create and launch a timer to update the images
        self.timerScreen = QtCore.QTimer()
        self.timerScreen.setInterval(30)
        self.timerScreen.setSingleShot(True)
        self.timerScreen.timeout.connect(self.updateImage)
        self.timerScreen.start()

    # Update the depth image displayed within the main window
    #
    # @param	None
    # @return	None
    def updateImage(self):
        # Update to next frame
        self.context.wait_and_update_all()

        # Extract informations of each tracked user
        self.data = skeleton.track(self.user, self.depth, self.data)

        # Get the whole depth map
        self.data.depth_map = np.asarray(
            self.depth.get_tuple_depth_map()).reshape(480, 640)

        # Create the frame from the raw depth map string and convert it to RGB
        frame = np.fromstring(self.depth.get_raw_depth_map_8(),
                              np.uint8).reshape(480, 640)
        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

        # Get the RGB image of the scene
        self.data.image = np.fromstring(self.image.get_raw_image_map_bgr(),
                                        dtype=np.uint8).reshape(480, 640, 3)

        # Will be used to specify the depth of the current hand wished
        currentDepth, showCurrentDepth = 0, ""

        if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
            # Highlight the head
            ui.drawPoint(frame, self.data.skeleton["head"][0],
                         self.data.skeleton["head"][1], 5)

            # Display lines from elbows to the respective hands
            ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"],
                             self.data.skeleton["hand"]["left"])
            ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"],
                             self.data.skeleton["hand"]["right"])

            # Get the pixel's depth from the coordinates of the hands
            leftPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["left"])
            rightPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["right"])

            if self.data.hand == self.settings.LEFT_HAND:
                currentDepth = leftPixel
            elif self.data.hand == self.settings.RIGHT_HAND:
                currentDepth = rightPixel

            # Get the shift of the boundaries around both hands
            leftShift = self.utils.getHandBoundShift(leftPixel)
            rightShift = self.utils.getHandBoundShift(rightPixel)

            # Display a rectangle around both hands
            ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"],
                                  leftShift, (50, 100, 255))
            ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"],
                                  rightShift, (200, 70, 30))

        # Record the current data if the user is ready
        if self.recordIfReady:
            cv2.putText(frame, str(self.data.getWishedDistance()), (470, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 2, (252, 63, 253), 5)

            if self.data.getWishedDistance(
            ) >= int(currentDepth) - 10 and self.data.getWishedDistance(
            ) <= int(currentDepth) + 10:
                self.record([])
                self.recordIfReady = False
            else:
                if int(currentDepth) < self.data.getWishedDistance():
                    showCurrentDepth = str(currentDepth) + " +"
                else:
                    showCurrentDepth = str(currentDepth) + " -"
        else:
            showCurrentDepth = str(currentDepth)

        cv2.putText(frame, showCurrentDepth, (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
                    2, (50, 100, 255), 5)

        # Update the frame
        self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))

        self.timerScreen.start()

    # Update the label indicating the number of dataset elements saved so far for the current type
    #
    # @param	None
    # @return	None
    def updateDatasetNumberLabel(self):
        if self.data.type == Dataset.TYPE_POSITIVE:
            self.numberLabel.setText("Dataset #%d" %
                                     (self.utils.getFileNumberInFolder(
                                         self.settings.getPositiveFolder())))
        elif self.data.type == Dataset.TYPE_NEGATIVE:
            self.numberLabel.setText("Dataset #%d" %
                                     (self.utils.getFileNumberInFolder(
                                         self.settings.getNegativeFolder())))
        elif self.data.type == Dataset.TYPE_ACCURACY:
            self.numberLabel.setText("Dataset #%d" %
                                     (self.utils.getFileNumberInFolder(
                                         self.settings.getAccuracyFolder())))
        else:
            self.numberLabel.setText("Dataset #%d" %
                                     (self.utils.getFileNumberInFolder(
                                         self.settings.getDatasetFolder())))

    # Record the actual informations
    #
    # @param	obj					Initiator of the event
    # @return	None
    def record(self, obj):
        # If the user collects data to check accuracy, prompts additional informations
        if self.data.type == Dataset.TYPE_ACCURACY:
            self.saveForTarget()
        # If the user collects data for a heat map, let's do it
        elif self.data.type == Dataset.TYPE_HEATMAP:
            # The same button will be used to stop recording
            if not self.heatmapRunning:
                self.startRecordHeatmap()
            else:
                self.stopRecordHeatmap()
        else:
            # Directly save the dataset and update the label number
            self.data.save()
            self.countdownEndedSound.play()
            self.updateDatasetNumberLabel()

    # Handle a countdown as a mean to record the informations with a delay
    #
    # @param	None
    # @return	None
    def recordCountdown(self):
        # Decrease the countdown and check if it needs to continue
        self.countdownRemaining -= 1

        if self.countdownRemaining <= 0:
            # Re-initialise the timer and record the data
            self.countdownTimer.stop()
            self.countdownButton.setText("Saving..")
            self.countdownRemaining = 10
            self.record([])
        else:
            self.countdownTimer.start()
            self.countdownSound.play()

        # Display the actual reminaining
        self.countdownButton.setText("Save in %ds" % (self.countdownRemaining))

    # Record a heatmap representation of the informations by successive captures
    #
    # @param	None
    # @return	None
    def recordHeatmap(self):
        if self.data.hand == self.settings.NO_HAND:
            print "Unable to record as no hand is selected"
            return False

        if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
            # Input the data into the feature extractor
            result = self.bpn.check(
                self.featureExtractor.getFeatures(self.data))

            # Add the depth of the finger tip
            point = self.featureExtractor.fingerTip[result[1]]
            point.append(self.utils.getDepthFromMap(self.data.depth_map,
                                                    point))

            # Verify that informations are correct
            if point[0] != 0 and point[1] != 0 and point[2] != 0:
                # Add the result of the neural network
                point.append(result[0])

                self.heatmap.append(point)
                self.countdownSound.play()

        # Loop timer
        self.heatmapTimer.start()

    # Start the recording of the heatmap
    #
    # @param	None
    # @return	None
    def startRecordHeatmap(self):
        self.saveButton.setText("Stop recording")
        self.heatmapRunning = True
        self.heatmapTimer.start()

    # Stop the recording of the heatmap
    #
    # @param	None
    # @return	None
    def stopRecordHeatmap(self):
        self.heatmapTimer.stop()
        self.heatmapRunning = False
        self.countdownEndedSound.play()

        self.saveButton.setText("Record")

        self.accuracy.showHeatmap(self.heatmap, "front")
        self.heatmap = []

    # Raise a flag to record the informations when the chosen distance will be met
    #
    # @param	None
    # @return	None
    def startRecordWhenReady(self):
        self.recordIfReady = True

    # Hold the current informations to indicate the position of the target thanks to the dialog window
    #
    # @param	None
    # @return	None
    def saveForTarget(self):
        # Freeze the data
        self.timerScreen.stop()
        self.countdownEndedSound.play()

        # Translate the depth values to a frame and set it in the dialog window
        frame = np.fromstring(self.depth.get_raw_depth_map_8(),
                              np.uint8).reshape(480, 640)
        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
        self.dialogWindow.setFrame(frame)

        # Prompt the position of the target
        self.dialogWindow.exec_()

    # Toggle the type of dataset chosen
    #
    # @param	value				Identifier of the new type of dataset
    # @return	None
    def toggleType(self, value):
        self.data.toggleType(value)

        if value == self.data.TYPE_HEATMAP:
            self.saveButton.setText("Record")
            self.countdownButton.setText("Record in %ds" %
                                         (self.countdownRemaining))
            self.readyButton.setEnabled(False)

            # Create an array to hold all points
            self.heatmap = []
        else:
            self.updateDatasetNumberLabel()
            if hasattr(self, 'saveButton'):
                self.saveButton.setText("Save")
                self.countdownButton.setText("Save in %ds" %
                                             (self.countdownRemaining))
                self.readyButton.setEnabled(True)

    # Create the acquisition form of the main window
    #
    # @param	None
    # @return	None
    def createAcquisitionForm(self):
        globalLayout = QtWidgets.QHBoxLayout()
        vlayout = QtWidgets.QVBoxLayout()

        # Drop down menu of the distance to record the informations when the pointing hand meet the corresponding value
        hlayout = QtWidgets.QHBoxLayout()
        label = QtWidgets.QLabel("Distance")
        label.setFixedWidth(100)
        comboBox = QtWidgets.QComboBox()
        comboBox.currentIndexChanged.connect(self.data.toggleDistance)
        comboBox.setFixedWidth(200)
        comboBox.addItem("550")
        comboBox.addItem("750")
        comboBox.addItem("1000")
        comboBox.addItem("1250")
        comboBox.addItem("1500")
        comboBox.addItem("1750")
        comboBox.addItem("2000")
        comboBox.setCurrentIndex(0)
        hlayout.addWidget(label)
        hlayout.addWidget(comboBox)
        vlayout.addLayout(hlayout)

        # Drop down menu to select the type of hand of the dataset
        hlayout = QtWidgets.QHBoxLayout()
        label = QtWidgets.QLabel("Pointing hand")
        label.setFixedWidth(100)
        comboBox = QtWidgets.QComboBox()
        comboBox.currentIndexChanged.connect(self.data.toggleHand)
        comboBox.setFixedWidth(200)
        comboBox.addItem("Left")
        comboBox.addItem("Right")
        comboBox.addItem("None")
        comboBox.setCurrentIndex(0)
        hlayout.addWidget(label)
        hlayout.addWidget(comboBox)
        vlayout.addLayout(hlayout)

        # Drop down menu of the dataset type
        hlayout = QtWidgets.QHBoxLayout()
        label = QtWidgets.QLabel("Type")
        label.setFixedWidth(100)
        comboBox = QtWidgets.QComboBox()
        comboBox.currentIndexChanged.connect(self.toggleType)
        comboBox.setFixedWidth(200)
        comboBox.addItem("Positive")
        comboBox.addItem("Negative")
        comboBox.addItem("Accuracy")
        comboBox.addItem("Heat map")
        comboBox.setCurrentIndex(0)
        hlayout.addWidget(label)
        hlayout.addWidget(comboBox)
        vlayout.addLayout(hlayout)

        globalLayout.addLayout(vlayout)
        vlayout = QtWidgets.QVBoxLayout()

        self.numberLabel.setAlignment(QtCore.Qt.AlignCenter)
        vlayout.addWidget(self.numberLabel)

        # Action buttons to record the way that suits the most
        hLayout = QtWidgets.QHBoxLayout()
        self.readyButton = QtWidgets.QPushButton(
            'Save when ready', clicked=self.startRecordWhenReady)
        self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record)
        hLayout.addWidget(self.readyButton)
        vlayout.addLayout(hLayout)

        item_layout = QtWidgets.QHBoxLayout()
        self.countdownButton = QtWidgets.QPushButton(
            "Save in %ds" % (self.countdownRemaining),
            clicked=self.countdownTimer.start)
        self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record)
        item_layout.addWidget(self.countdownButton)
        item_layout.addWidget(self.saveButton)
        vlayout.addLayout(item_layout)

        globalLayout.addLayout(vlayout)
        self.layout.addLayout(globalLayout)
예제 #2
0
class UnitTesting:
	
	featureExtractor = FeatureExtractor()
	utils = Utils()
	bpn = BPNHandler(True)
	
	currentPassed = 0
	currentFailed = 0
	currentTotal = 0
	passed = 0
	failed = 0
	total = 0
	
	
	# Check that the result parameter is conform to another expectation parameter
	# 
	# @param	expectation			Targeted result
	# @param	result				Actual result to test
	# @param	method				Name of the method currently tested
	# @param	test				Name of the current assertion
	# @return	None
	def check(self, expectation, result, method, test):
		self.currentTotal += 1
		self.total += 1
		
		if type(expectation).__module__ == np.__name__:
			# np.array_equiv, np.array_equal, np.testing.assert_allclose all fail with np.NaN values
			tmp = True
			if type(result).__module__ == np.__name__ and expectation.size==result.size:
				shape = expectation.shape
				
				# Only considers 2D arrays
				for i in range(shape[0]):
					if len(shape)>1:
						for j in range(shape[1]):
							if expectation[i][j]!=None and np.isnan(expectation[i][j]):
								if result[i][j]==None or  not np.isnan(expectation[i][j]):
									tmp = False
							# Handle float approximation
							elif (type(expectation[i][j])==float and ('%0.5f'%expectation[i][j])!=('%0.5f'%result[i][j])) or (expectation[i][j]!=result[i][j]):
							#elif expectation[i][j] != result[i][j]:
								tmp = False
					elif expectation[i]!=None and np.isnan(expectation[i]):
						if result[i]==None or  not np.isnan(expectation[i]):
							tmp = False
					elif expectation[i] != result[i]:
						tmp = False
			else:
				tmp = False
		elif type(expectation)==list:
			
			if type(result)==list and len(expectation)==len(result):
				tmp = True
				for i in range(len(expectation)):
					if expectation[i]!=result[i]:
						tmp = False
			else:
				tmp = False
		elif expectation!=None and np.isnan(expectation):
			if np.isnan(result):
				tmp = True
			else:
				tmp = False
		elif expectation==result:
			tmp = True
		else:
			tmp = False
		
		
		if tmp:
			self.currentPassed += 1
			self.passed += 1
			print "--- Success \t{0}: \t{1}".format(method, test)
		else:
			self.currentFailed += 1
			self.failed += 1
			print "--- Failure \t{0}: \t{1} \t{2} while expecting {3}".format(method, test, result, expectation)
	
	
	# Display the results of the unit-tests of a category
	# 
	# @param	None
	# @return	None
	def getResults(self):
		print "------------------------\n--- Unit Testing results:"
		if self.currentTotal>0:
			print("--- {0} passed ({1}%)".format(self.currentPassed, int((self.currentPassed/self.currentTotal)*100)))
			print("--- {0} failed ({1}%)".format(self.currentFailed, int((self.currentFailed/self.currentTotal)*100)))
			print("--- Total asserted: {0}".format(self.currentTotal))
		else:
			print "--- None yet..."
		print "------------------------"
		
		self.currentPassed = 0
		self.currentFailed = 0
		self.currentTotal = 0
	
	
	# Display the final results of the unit-tests
	# 
	# @param	None
	# @return	None
	def getFinalResults(self):
		print "\n------------------------\n--- Final Unit Testing results:"
		if self.total>0:
			print("--- {0} passed ({1}%)".format(self.passed, int((self.passed/self.total)*100)))
			print("--- {0} failed ({1}%)".format(self.failed, int((self.failed/self.total)*100)))
			print("--- Total asserted: {0}".format(self.total))
		else:
			print "--- None yet..."
		print "------------------------"
		
		self.passed = 0
		self.failed = 0
		self.total = 0
		
	
	# Assert the FeaturesExtractor class
	# 
	# @param	None
	# @return	None
	def assertFeatureExtractor(self):
		print "\n--- FeaturesExtractor ---"
		
		
		
		# Assert the thresholdBinary method
		# Expected outputs:	0|1
		self.check(0, self.featureExtractor.thresholdBinary(1, 2, 1), "thresholdBinary", "x<start")
		self.check(0, self.featureExtractor.thresholdBinary(2, 2, 1), "thresholdBinary", "x>end")
		self.check(0, self.featureExtractor.thresholdBinary(0, -1, 0), "thresholdBinary", "x==0")
		self.check(1, self.featureExtractor.thresholdBinary(1, 1, 1), "thresholdBinary", "x==start and x==end")
		self.check(1, self.featureExtractor.thresholdBinary(2, 1, 2), "thresholdBinary", "x>start and x==end")
		self.check(1, self.featureExtractor.thresholdBinary(2, 2, 3), "thresholdBinary", "x==start and x<end")
		self.check(1, self.featureExtractor.thresholdBinary(2, 1, 3), "thresholdBinary", "x>start and x<end")
		
		
		
		# Assert the thresholdExtracted method
		# Expected outputs:	np.NaN|(x>=start and x<=end and x!=0)
		self.check(np.NaN, self.featureExtractor.thresholdExtracted(1, 2, 1), "thresholdExtracted", "x<start")
		self.check(np.NaN, self.featureExtractor.thresholdExtracted(2, 2, 1), "thresholdExtracted", "x>end")
		self.check(np.NaN, self.featureExtractor.thresholdExtracted(0, -1, 0), "thresholdExtracted", "x==0")
		self.check(1, self.featureExtractor.thresholdExtracted(1, 1, 1), "thresholdExtracted", "x==start and x==end")
		self.check(2, self.featureExtractor.thresholdExtracted(2, 1, 2), "thresholdExtracted", "x>start and x==end")
		self.check(2, self.featureExtractor.thresholdExtracted(2, 2, 3), "thresholdExtracted", "x==start and x<end")
		self.check(2, self.featureExtractor.thresholdExtracted(2, 1, 3), "thresholdExtracted", "x>start and x<end")
		
		
		
		# Assert the thresholdExtracted method
		# Expected outputs:	None|(index of nearest value 1)
		self.check(None, self.featureExtractor.findNearestValue([], 0), "findNearestValue", "empty array")
		self.check(None, self.featureExtractor.findNearestValue([0], 1), "findNearestValue", "index out of bound")
		self.check(None, self.featureExtractor.findNearestValue([0], -1), "findNearestValue", "index out of bound")
		self.check(None, self.featureExtractor.findNearestValue([0,0], 1), "findNearestValue", "no 1 in small even array")
		self.check(None, self.featureExtractor.findNearestValue([0,0,0], 1), "findNearestValue", "no 1 in small odd array")
		self.check(None, self.featureExtractor.findNearestValue([0,0,0,0,0], 1), "findNearestValue", "no 1 in big even array")
		self.check(None, self.featureExtractor.findNearestValue([0,0,0,0,0,0], 1), "findNearestValue", "no 1 in big odd array")
		self.check(None, self.featureExtractor.findNearestValue([0,0,0,0,1], 1), "findNearestValue", "far initial index not in middle of odd array")
		self.check(0, self.featureExtractor.findNearestValue([1,0,0,0,0], 1), "findNearestValue", "close initial index not in middle of odd array")
		self.check(3, self.featureExtractor.findNearestValue([0,0,0,1], 1), "findNearestValue", "far initial index not in middle of even array")
		self.check(0, self.featureExtractor.findNearestValue([1,0,0,0], 1), "findNearestValue", "close initial index not in middle of even array")
		self.check(0, self.featureExtractor.findNearestValue([1,0,0], 1), "findNearestValue", "1 as first index in small array")
		self.check(1, self.featureExtractor.findNearestValue([0,1,0], 1), "findNearestValue", "1 as middle index in small array")
		self.check(2, self.featureExtractor.findNearestValue([0,0,1], 1), "findNearestValue", "1 as last index in small array")
		self.check(0, self.featureExtractor.findNearestValue([1,0,0,0,0], 2), "findNearestValue", "1 as first index in big array")
		self.check(2, self.featureExtractor.findNearestValue([0,0,1,0,0], 2), "findNearestValue", "1 as middle index in big array")
		self.check(4, self.featureExtractor.findNearestValue([0,0,0,0,1], 2), "findNearestValue", "1 as last index in big array")
		
		
		
		# Assert the tarExtracted method
		# Expected outputs:	(same array with reduced values based on the minimum)|(same array if only NaN values)
		self.featureExtractor.currentExtracted = np.array([])
		self.featureExtractor.tarExtracted()
		self.check(np.array([]), self.featureExtractor.currentExtracted, "tarExtracted", "empty array")
		
		self.featureExtractor.currentExtracted = np.array([np.NaN,np.NaN])
		self.featureExtractor.tarExtracted()
		self.check(np.array([np.NaN,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "array of NaN values")
		
		self.featureExtractor.currentExtracted = np.array([-1,0,np.NaN])
		self.featureExtractor.tarExtracted()
		self.check(np.array([0,1,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "negative minimal value")
		
		self.featureExtractor.currentExtracted = np.array([0,2,np.NaN])
		self.featureExtractor.tarExtracted()
		self.check(np.array([0,2,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "zero minimal value")
		
		self.featureExtractor.currentExtracted = np.array([1,3,np.NaN])
		self.featureExtractor.tarExtracted()
		self.check(np.array([0,2,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "positive minimal value")
		
		self.featureExtractor.currentExtracted = np.array([[np.NaN,np.NaN],[np.NaN,np.NaN]])
		self.featureExtractor.tarExtracted()
		self.check(np.array([[np.NaN,np.NaN],[np.NaN,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array of NaN values")
		
		self.featureExtractor.currentExtracted = np.array([[1,0,np.NaN],[-1,0,np.NaN]])
		self.featureExtractor.tarExtracted()
		self.check(np.array([[2,1,np.NaN],[0,1,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array with negative minimal value")
		
		self.featureExtractor.currentExtracted = np.array([[0,2,np.NaN],[1,2,np.NaN]])
		self.featureExtractor.tarExtracted()
		self.check(np.array([[0,2,np.NaN],[1,2,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array with zero minimal value")
		
		self.featureExtractor.currentExtracted = np.array([[1,2,np.NaN],[3,4,np.NaN]])
		self.featureExtractor.tarExtracted()
		self.check(np.array([[0,1,np.NaN],[2,3,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array with positive minimal value")
		
		
		
		# Assert the removeEmptyColumnsRows method
		# Expected outputs:	same arrays without empty rows and columns
		self.featureExtractor.currentExtracted = np.array([])
		self.featureExtractor.currentBinary = np.array([])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "empty array: currentExtracted")
		self.check(np.array([]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "empty array: currentBinary")
		
		self.featureExtractor.currentExtracted = np.array([[0,0],[0,0]])
		self.featureExtractor.currentBinary = np.array([[0,0],[0,0]])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([[]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "array of 0: currentExtracted")
		self.check(np.array([[]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "array of 0: currentBinary")
		
		self.featureExtractor.currentExtracted = np.array([[1,2],[3,4]])
		self.featureExtractor.currentBinary = np.array([[1,2],[3,4]])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "array of non-zero values: currentExtracted")
		self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "array of non-zero values: currentBinary")
		
		self.featureExtractor.currentExtracted = np.array([[0,0],[1,2]])
		self.featureExtractor.currentBinary = np.array([[0,0],[1,2]])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([[1,2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "first row empty: currentExtracted")
		self.check(np.array([[1,2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "first row empty: currentBinary")
		
		self.featureExtractor.currentExtracted = np.array([[1,2],[0,0]])
		self.featureExtractor.currentBinary = np.array([[1,2],[0,0]])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([[1,2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "last row empty: currentExtracted")
		self.check(np.array([[1,2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "last row empty: currentBinary")
		
		self.featureExtractor.currentExtracted = np.array([[0,1],[0,2]])
		self.featureExtractor.currentBinary = np.array([[0,1],[0,2]])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([[1],[2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "first column empty: currentExtracted")
		self.check(np.array([[1],[2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "first column empty: currentBinary")
		
		self.featureExtractor.currentExtracted = np.array([[1,0],[2,0]])
		self.featureExtractor.currentBinary = np.array([[1,0],[2,0]])
		self.featureExtractor.removeEmptyColumnsRows()
		self.check(np.array([[1],[2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "last column empty: currentExtracted")
		self.check(np.array([[1],[2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "last column empty: currentBinary")
		
		
		
		# Assert the removeEmptyColumnsRows method
		# Expected outputs:	rotated matrice by the rotationAngle
		# Expected outputs:	(-1|0|1|2) for rotationAngle
		self.featureExtractor.currentExtracted = np.array([[1,2],[3,4]])
		self.featureExtractor.currentBinary = np.array([[1,2],[3,4]])
		
		self.featureExtractor.rotate([0,0],[0,0])
		self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentExtracted, "rotate", "elbow/hand on same position: currentExtracted")
		self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentBinary, "rotate", "elbow/hand on same position: currentBinary")
		self.check(0, self.featureExtractor.rotationAngle, "rotate", "elbow/hand on same position: rotationAngle")
		
		self.featureExtractor.rotate([1,1],[0,0])
		self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentExtracted, "rotate", "elbow up left: currentExtracted")
		self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentBinary, "rotate", "elbow up left: currentBinary")
		self.check(1, self.featureExtractor.rotationAngle, "rotate", "elbow up left: rotationAngle")
		
		self.featureExtractor.rotate([0,1],[0,0])
		self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentExtracted, "rotate", "elbow up: currentExtracted")
		self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentBinary, "rotate", "elbow up: currentBinary")
		self.check(1, self.featureExtractor.rotationAngle, "rotate", "elbow up: rotationAngle")
		
		self.featureExtractor.rotate([0,1],[1,0])
		self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentExtracted, "rotate", "elbow up right: currentExtracted")
		self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentBinary, "rotate", "elbow up right: currentBinary")
		self.check(1, self.featureExtractor.rotationAngle, "rotate", "elbow up right: rotationAngle")
		
		self.featureExtractor.rotate([0,0],[1,0])
		self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentExtracted, "rotate", "elbow right: currentExtracted")
		self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentBinary, "rotate", "elbow right: currentBinary")
		self.check(0, self.featureExtractor.rotationAngle, "rotate", "elbow right: rotationAngle")
		
		self.featureExtractor.rotate([0,0],[1,1])
		self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentExtracted, "rotate", "elbow down right: currentExtracted")
		self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentBinary, "rotate", "elbow down right: currentBinary")
		self.check(-1, self.featureExtractor.rotationAngle, "rotate", "elbow down right: rotationAngle")
		
		self.featureExtractor.rotate([0,0],[0,1])
		self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentExtracted, "rotate", "elbow down: currentExtracted")
		self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentBinary, "rotate", "elbow down: currentBinary")
		self.check(-1, self.featureExtractor.rotationAngle, "rotate", "elbow down: rotationAngle")
		
		self.featureExtractor.rotate([1,0],[0,1])
		self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentExtracted, "rotate", "elbow down left: currentExtracted")
		self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentBinary, "rotate", "elbow down left: currentBinary")
		self.check(-1, self.featureExtractor.rotationAngle, "rotate", "elbow down left: rotationAngle")
		
		self.featureExtractor.rotate([1,0],[0,0])
		self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentExtracted, "rotate", "elbow left: currentExtracted")
		self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentBinary, "rotate", "elbow left: currentBinary")
		self.check(2, self.featureExtractor.rotationAngle, "rotate", "elbow left: rotationAngle")
		
		
		
		# Assert the keepRange method
		# Expected outputs:	integer value between 0 and max
		self.check(0, self.featureExtractor.keepRange(-1, 2), "keepRange", "negative value")
		self.check(0, self.featureExtractor.keepRange(0, 2), "keepRange", "zero value")
		self.check(1, self.featureExtractor.keepRange(1, 2), "keepRange", "positive value < max")
		self.check(2, self.featureExtractor.keepRange(2, 2), "keepRange", "positive value == max")
		self.check(2, self.featureExtractor.keepRange(3, 2), "keepRange", "positive value > max")
		self.check(0, self.featureExtractor.keepRange(-1, 0), "keepRange", "negative value and max==0")
		self.check(0, self.featureExtractor.keepRange(0, 0), "keepRange", "zero value and max==0")
		self.check(0, self.featureExtractor.keepRange(1, 0), "keepRange", "positive value and max==0")
		self.check(0, self.featureExtractor.keepRange(-1, -1), "keepRange", "negative value and negative max")
		self.check(0, self.featureExtractor.keepRange(0, -1), "keepRange", "zero value and negative max")
		self.check(0, self.featureExtractor.keepRange(1, -1), "keepRange", "positive value and negative max")
		
		
		
		# Assert the keepRange method
		# Expected outputs:	(percentage of actual data within a restricted area)|(0 as a fallback)
		self.featureExtractor.currentW = 0
		self.featureExtractor.currentH = 0
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "zero currentW and currentH")
		
		self.featureExtractor.currentW = 0
		self.featureExtractor.currentH = 1
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "zero currentW")
		
		self.featureExtractor.currentW = 1
		self.featureExtractor.currentH = 0
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "zero currentH")
		
		self.featureExtractor.currentW = -1
		self.featureExtractor.currentH = -1
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "negative currentW and currentH")
		
		self.featureExtractor.currentW = -1
		self.featureExtractor.currentH = 1
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "negative currentW")
		
		self.featureExtractor.currentW = 1
		self.featureExtractor.currentH = -1
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "negative currentH")
		
		self.featureExtractor.currentW = 1
		self.featureExtractor.currentH = 1
		self.check(0, self.featureExtractor.countWithinArea(np.array([]),1,0,0,1,1), "countWithinArea", "empty array")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),0,0,0,1,1), "countWithinArea", "zero total")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 0,0), "countWithinArea", "v1==v2 and h1==h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 1,0), "countWithinArea", "v1==v2 and h1<h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 1,0, 0,0), "countWithinArea", "v1==v2 and h1>h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 0,1), "countWithinArea", "v1<v2 and h1==h2")
		self.check(10, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 1,1), "countWithinArea", "v1<v2 and h1<h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 1,0, 0,1), "countWithinArea", "v1<v2 and h1>h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,1, 0,0), "countWithinArea", "v1>v2 and h1==h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,1, 1,0), "countWithinArea", "v1>v2 and h1<h2")
		self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 1,1, 0,0), "countWithinArea", "v1>v2 and h1>h2")
		
		
		
		# Assert the getElbowHandAlignment method
		# Expected outputs:	[(-1|0|1),(-1|0|1)]
		self.check([0,0], self.featureExtractor.getElbowHandAlignment(-1, 0,0,0,0, 0), "getElbowHandAlignment", "negative depth")
		self.check([0,0], self.featureExtractor.getElbowHandAlignment(0, 0,0,0,0, 0), "getElbowHandAlignment", "zero depth")
		self.check([0,0], self.featureExtractor.getElbowHandAlignment(1, 0,0,0,0, 0), "getElbowHandAlignment", "positive depth")
		
		self.check([1,0-1], self.featureExtractor.getElbowHandAlignment(1000,  61,61, 0,0, 0), "getElbowHandAlignment", "left down")
		self.check([-1,0-1], self.featureExtractor.getElbowHandAlignment(1000, 61,0,  0,61, 0), "getElbowHandAlignment", "right down")
		self.check([0,0-1], self.featureExtractor.getElbowHandAlignment(1000,  61,1,  0,0, 0), "getElbowHandAlignment", "front down")
		
		self.check([1,1], self.featureExtractor.getElbowHandAlignment(1000,  0,61, 61,0, 0), "getElbowHandAlignment", "left up")
		self.check([-1,1], self.featureExtractor.getElbowHandAlignment(1000, 0,0,  61,61, 0), "getElbowHandAlignment", "right up")
		self.check([0,1], self.featureExtractor.getElbowHandAlignment(1000,  0,1,  61,0, 0),  "getElbowHandAlignment", "front up")
		
		self.check([1,0], self.featureExtractor.getElbowHandAlignment(1000,  0,61, 0,0, 0), "getElbowHandAlignment", "left lateral")
		self.check([-1,0], self.featureExtractor.getElbowHandAlignment(1000, 0,0,  0,61, 0), "getElbowHandAlignment", "right lateral")
		self.check([0,0], self.featureExtractor.getElbowHandAlignment(1000,  0,1,  0,0, 0), "getElbowHandAlignment", "front lateral")
		
		
		
		# Assert the normalizeInput method
		# Expected outputs:	[normalized values in the range -1 to 1]
		self.check([], self.featureExtractor.normalizeInput([]), "normalizeInput", "empty array")
		self.check([-1,-1,-1], self.featureExtractor.normalizeInput([0,0,0],0,2), "normalizeInput", "low range values")
		self.check([0,0,0], self.featureExtractor.normalizeInput([1,1,1],0,2), "normalizeInput", "middle range values")
		self.check([1,1,1], self.featureExtractor.normalizeInput([2,2,2],0,2), "normalizeInput", "top range values")
		
		
		
		# Assert the processFeatures method
		# Expected outputs:	[6 normalized features]
		self.check([-1,-1,-1,-1,-1,-1], self.featureExtractor.processFeatures(0,0,0, 0,0,0, np.array([]), [0,0,0]), "processFeatures", "empty array")
		self.check([-1,-1,-1,-1,-1,-1], self.featureExtractor.processFeatures(0,0,0, 0,0,0, np.array([0,0,0]), [0,0,0]), "processFeatures", "1 dimensional array")
		self.check([-1,-1,-1,-1,-1,-1], self.featureExtractor.processFeatures(0,0,0, 0,0,0, np.array([[0,0,0],[0,0,0],[0,0,0]]), [0,0,0]), "processFeatures", "zero array")
		
		
		
		# Assert the getFingerTip method
		# Expected outputs:	[v,h] non negative values
		self.featureExtractor.cropLeft = 0
		self.featureExtractor.emptyLeft = 0
		self.featureExtractor.cropTop = 0
		self.featureExtractor.emptyTop = 0
		self.featureExtractor.rotationAngle = 0
		self.featureExtractor.currentBinary = np.array([[]])
		self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "empty array")
		
		self.featureExtractor.currentBinary = np.array([[0,0,0,0,0]])
		self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "zero array")
		
		self.featureExtractor.rotationAngle = -1
		self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]])
		self.check([0,4], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=-1 and extrem left value")
		self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]])                     
		self.check([0,2], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=-1 and middle value")
		self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]])                     
		self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=-1 and extrem right value")
		
		self.featureExtractor.rotationAngle = 0
		self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]])
		self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=0 and extrem left value")
		self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]])
		self.check([2,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=0 and middle value")
		self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]])
		self.check([4,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=0 and extrem right value")
		
		self.featureExtractor.rotationAngle = 1
		self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]])
		self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=1 and extrem left value")
		self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]])
		self.check([0,2], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=1 and middle value")
		self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]])
		self.check([0,4], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=1 and extrem right value")
		
		self.featureExtractor.rotationAngle = 2
		self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]])
		self.check([4,1], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=2 and extrem left value")
		self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]])
		self.check([2,1], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=2 and middle value")
		self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]])
		self.check([0,1], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=2 and extrem right value")
		
		
		
		# Assert the getEyePosition method
		# Expected outputs:	[v,h] non negative values
		self.check([0,0], self.featureExtractor.getEyePosition(np.array([[]]), [0,0,0], [0,0]), "getEyePosition", "empty array")
		self.check([0,0], self.featureExtractor.getEyePosition(np.array([0,0,0]), [0,0,0], [0,0]), "getEyePosition", "1 dimensional array")
		self.check([0,0], self.featureExtractor.getEyePosition(np.array([[0,0,0],[0,0,0],[0,0,0]]), [0,0,0], [0,0]), "getEyePosition", "zero array")
	
	
	# Assert the Utils class
	# 
	# @param	None
	# @return	None
	def assertUtils(self):
		print "\n--- Utils ---"
		
		
		
		# Assert the getDepthFromMap method
		# Expected outputs:	non negative integer
		self.check(0, self.utils.getDepthFromMap(np.array([]), [0,0]), "getDepthFromMap", "empty array")
		self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [-1,0]), "getDepthFromMap", "y index out of bond (negative)")
		self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [2,0]), "getDepthFromMap", "y index out of bond (>=len)")
		self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [0,-1]), "getDepthFromMap", "x index out of bond (negative)")
		self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [0,2]), "getDepthFromMap", "x index out of bond (>=len)")
		self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), []), "getDepthFromMap", "empty position array")
		self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [1]), "getDepthFromMap", "unexpected position array")
		self.check(42, self.utils.getDepthFromMap(np.array([[0,0],[0,42]]), [1,1]), "getDepthFromMap", "correct value")
		
		
		
		# Assert the getHandBoundShift method
		# Expected outputs:	integer
		self.check(-90, self.utils.getHandBoundShift(-1000), "getHandBoundShift", "negative depth")
		self.check(90, self.utils.getHandBoundShift(0), "getHandBoundShift", "zero depth")
		self.check(90, self.utils.getHandBoundShift(1000), "getHandBoundShift", "positive depth")
	
	
	# Assert the BPNHandler class
	# 
	# @param	None
	# @return	None
	def assertBPNHandler(self):
		print "\n--- BPNHandler ---"
		
		
		
		# Assert the check method
		# Expected outputs:	[Boolean, (0|1)]
		self.check([False,0], self.bpn.check([[0,0,0,0,0,0]]), "check", "zero array")
예제 #3
0
class Validating():
	
	# Load required classes
	bpn = BPNHandler(True)
	datasetManager = DatasetManager()
	featureExtractor = FeatureExtractor()
	settings = Settings()
	utils = Utils()
	
	
	# Evaluate the complete dataset
	# 
	# @param	type					Type of dataset to be evaluated
	# @return	None
	def complete(self, type):
		positiveValidating = self.datasetManager.getPositiveCompleteMixed(type)
		negativeValidating = self.datasetManager.getMainNegative(type)
		
		# run the network
		self.run(positiveValidating, negativeValidating)
	
	
	# Evaluate the restrained dataset
	# 
	# @param	type					Type of dataset to be evaluated
	# @return	None
	def restrained(self, type):
		positiveValidating = self.datasetManager.getPositiveRestrainedMixed(type)
		negativeValidating = self.datasetManager.getNegativeMainRestrained(type)
		
		# run the network
		self.run(positiveValidating, negativeValidating)
		
		
	# Evaluate the given informations
	# 
	# @param	positiveValidating		Array of all positive files to process
	# @param	negativeValidating		Array of all negative files to process
	# @param	getData					Flag to retrieve the data in order to bypass a future loading
	# @return	None
	def run(self, positiveValidating, negativeValidating, getData=False):
		# Load all dataset files
		positive = self.datasetManager.loadDataset(positiveValidating)
		negative = self.datasetManager.loadDataset(negativeValidating)
		
		# Process all features
		print "Processing features..."
		positiveInput = []
		for data in positive:
			positiveInput.extend(self.featureExtractor.getFeatures(data))

		
		negativeInput = []
		for data in negative:
			negativeInput.extend(self.featureExtractor.getFeatures(data))
		
		
		# Check if we need to print the data or run the network
		if getData:
			self.utils.getPythonInitCode(positiveInput, "positiveInput")
			self.utils.getPythonInitCode(negativeInput, "negativeInput")
			
		else:
			# Run the validation against the network
			
			if len(positiveInput)>0:
				print "Positive validation"
				
				goodPositive = 0
				badPositive = 0
				count = 0
				for positive in positiveInput:
					result = self.bpn.check([positive])
					
					if result[0] == False:
						badPositive += 1
						print("{0} is erroneous".format(count))
					else:
						goodPositive += 1
					
					count += 1
				print
				print "{0} corrects and {1} bad --> {2:0.2f}%".format(goodPositive, badPositive, (goodPositive/float(goodPositive+badPositive)*100))
				print
			
			if len(negativeInput)>0:
				print "Negative validation"
				goodNegative = 0
				badNegative = 0
				count = 0
				for negative in negativeInput:
					result = self.bpn.check([negative])
					
					if result[0] == True:
						badNegative += 1
						print("{0} is erroneous".format(count))
					else:
						goodNegative += 1
					
					count += 1
				print
				print "{0} corrects and {1} bad --> {2:0.2f}%".format(goodNegative, badNegative, (goodNegative/float(goodNegative+badNegative)*100))
				print "Final score = {0:0.2f}%".format(((goodPositive+goodNegative)/float(goodPositive+badPositive+goodNegative+badNegative))*100)
		
			if len(positiveInput)==0 and len(negativeInput)==0:
				print "No input to validate..."
class Training():
	
	# Load required classes
	bpn = BPNHandler()
	datasetManager = DatasetManager()
	featureExtractor = FeatureExtractor()
	settings = Settings()
	utils = Utils()
	
	
	# Returns the array of the positive targets based on the parameter
	# 
	# @param	data				Data to evaluate
	# @param	positiveTarget		Array of the positive targets
	# @return	array				Array of the positive targets based on the parameter
	def getPositiveTargetArray(self, data, positiveTarget):
		output = []
		for i in range(len(data)):
			for j in range(len(data[i])):
				output.append(positiveTarget[i])
		
		return output
	
	
	# Returns the array of the negative targets based on the parameter
	# 
	# @param	data					Data to evaluate
	# @param	positiveTargetLength	Length of the array of positive targets
	# @return	array					Array of the negative targets based on the parameter
	def getNegativeTargetArray(self, data, positiveTargetLength):
		# Create the negative target thanks to the lenth of the positive one
		negativeTarget = np.zeros(positiveTargetLength).astype(int)
		
		output = []
		for i in range(len(data)):
			for j in range(len(data[i])):
				output.append(negativeTarget)
		
		return output
	
	
	# Train the network with the complete set of data
	# 
	# @param	None
	# @return	None
	def complete(self):
		positiveTraining = self.datasetManager.getPositiveCompleteMixed("training")
		negativeTraining = self.datasetManager.getMainNegative("training")
		positiveTesting = self.datasetManager.getPositiveCompleteMixed("testing")
		negativeTesting = self.datasetManager.getMainNegative("testing")
		positiveTarget = self.datasetManager.getCompleteMixedTarget()
		
		# run the network
		self.run(positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, True)
	
	
	# Train the network with the restrained set of data
	# 
	# @param	None
	# @return	None
	def restrained(self):
		positiveTraining = self.datasetManager.getPositiveRestrained("training")
		negativeTraining = self.datasetManager.getNegativeMainRestrained("training")
		positiveTesting = self.datasetManager.getPositiveRestrained("testing")
		negativeTesting = self.datasetManager.getNegativeMainRestrained("testing")
		positiveTarget = self.datasetManager.getRestrainedTarget()
		
		# run the network
		self.run(positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, True)
		
	
	# Train the network with pre-computed recent values to bypass loading
	# 
	# @param	None
	# @return	None
	def recentValues(self):
		trainingInput = self.datasetManager.getRecentValuesRestrained(trainingInput=True)
		trainingTarget = self.datasetManager.getRecentValuesRestrained(trainingTarget=True)
		testingInput = self.datasetManager.getRecentValuesRestrained(testingInput=True)
		testingTarget = self.datasetManager.getRecentValuesRestrained(testingTarget=True)
		
		# run the network
		self.bpn.run(trainingInput, trainingTarget, testingInput, testingTarget, learningRate=0.05, momentum=0.1, optimal=True)
		
	
	# Train the network with the complete set of data
	# 
	# @param	positiveTraining		Array of positive data from the training set
	# @param	negativeTraining		Array of negative data from the training set
	# @param	positiveTesting			Array of positive data from the testing set
	# @param	negativeTesting			Array of negative data from the testing set
	# @param	positiveTarget			Array of positive targets to reach
	# @param	getData					Flag to output the processed features in order to bypass loading the next time
	# @return	None
	def run(self, positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, getData=False):
		# Load all dataset files and gather them accordingly
		training = self.datasetManager.loadDataset(positiveTraining)
		training.extend(self.datasetManager.loadDataset(negativeTraining))
		
		testing = self.datasetManager.loadDataset(positiveTesting)
		testing.extend(self.datasetManager.loadDataset(negativeTesting))
		
		# Process all features
		print "Processing features..."
		trainingInput = []
		for data in training:
			trainingInput.extend(self.featureExtractor.getFeatures(data))
		
		testingInput = []
		for data in testing:
			testingInput.extend(self.featureExtractor.getFeatures(data))
		
		
		# Build the target arrays
		trainingTarget = self.getPositiveTargetArray(positiveTraining, positiveTarget)
		trainingTarget.extend(self.getNegativeTargetArray(negativeTraining, len(positiveTarget)))
		
		testingTarget = self.getPositiveTargetArray(positiveTesting, positiveTarget)
		testingTarget.extend(self.getNegativeTargetArray(negativeTesting, len(positiveTarget)))
		
		
		# Check if we need to print the data or run the network
		if getData:
			self.utils.getPythonInitCode(trainingInput, "trainingInput")
			self.utils.getPythonInitCode(trainingTarget, "trainingTarget")
			self.utils.getPythonInitCode(testingInput, "testingInput")
			self.utils.getPythonInitCode(testingTarget, "testingTarget")
			
		else:
			# Run the network
			self.bpn.run(trainingInput, trainingTarget, testingInput, testingTarget, learningRate=0.05, momentum=0.1, optimal=False)
class Accuracy:
    bpn = BPNHandler(True)
    datasetManager = DatasetManager()
    featureExtractor = FeatureExtractor()
    settings = Settings()
    trigonometry = Trigonometry()
    utils = Utils()

    expectedRadius = 2000

    direction = [
        "back-right", "right", "front-right", "front", "front-left", "left",
        "back-left"
    ]

    # Evaluate the pointed direction and display the average distance and angle
    #
    # @param	None
    # @return	None
    def processedPointedDirection(self):
        dataset = self.datasetManager.loadDataset(
            self.datasetManager.getAccuracyComplete())
        outputDistance = []
        outputAngle = []
        outputAngleCamera = []
        outputDistanceAt2m = []

        for data in dataset:
            features = self.featureExtractor.getFeatures(data)

            depthMap = data.depth_map
            targetCoordinates = data.target

            fingerTipCoordinates = self.featureExtractor.fingerTip[0]
            fingerTipCoordinates.append(
                self.utils.getDepthFromMap(depthMap, fingerTipCoordinates))

            eyeCoordinates = self.featureExtractor.eyePosition[0]
            eyeCoordinates.append(
                self.utils.getDepthFromMap(depthMap, eyeCoordinates))

            # Retrieve the distance between the actual target and the closest impact
            distance = self.trigonometry.findIntersectionDistance(
                fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                self.expectedRadius)
            if distance == None:
                print "Missed..."
            else:
                outputDistance.append(distance)

                # Retrieve the distance between the target and the fingertip:
                targetDistance = float(data.distance)

                # Calculate the error angles
                angle = math.degrees(math.asin(distance / targetDistance))
                outputAngle.append(angle)

                angleCamera = math.degrees(
                    math.asin(distance / targetCoordinates[2]))
                outputAngleCamera.append(angleCamera)

                distanceAt2m = math.asin(distance / targetDistance) * 2000
                outputDistanceAt2m.append(distanceAt2m)

                print "--- Impact distance: {0:0.1f} mm\t Impact at 2m: {1:0.1f}\t Error angle (fingertip): {2:0.1f} deg\t Error angle (camera): {3:0.1f} deg".format(
                    distance, distanceAt2m, angle, angleCamera)

        print "---\n--- Average impact distance of {0:0.1f} mm.".format(
            np.average(outputDistance))
        print "--- Average impact distance at 2 m of {0:0.1f} mm.".format(
            np.average(outputDistanceAt2m))
        print "--- Average eror angle of {0:0.1f} deg at the fingertip.".format(
            np.average(outputAngle))
        print "--- Average eror angle of {0:0.1f} deg at the camera.".format(
            np.average(outputAngleCamera))

    # Evaluate the pointed direction by category and display the average distance and angle
    #
    # @param	None
    # @return	None
    def processedPointedDirectionByCategory(self):
        datasets = self.datasetManager.getAccuracyComplete()

        # Load all categories separately
        dataset = []
        for data in datasets:
            dataset.append(self.datasetManager.loadDataset([data]))

        for category in range(len(dataset)):
            outputDistance = []
            outputAngle = []
            outputAngleCamera = []
            outputDistanceAt2m = []

            print "\n--- {0}".format(self.direction[category])

            for data in dataset[category]:
                features = self.featureExtractor.getFeatures(data)

                depthMap = data.depth_map
                targetCoordinates = data.target

                fingerTipCoordinates = self.featureExtractor.fingerTip[0]
                fingerTipCoordinates.append(
                    self.utils.getDepthFromMap(depthMap, fingerTipCoordinates))

                eyeCoordinates = self.featureExtractor.eyePosition[0]
                eyeCoordinates.append(
                    self.utils.getDepthFromMap(depthMap, eyeCoordinates))

                # Retrieve the distance between the actual target and the closest impact
                distance = self.trigonometry.findIntersectionDistance(
                    fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                    self.expectedRadius)
                if distance == None:
                    print "Missed..."
                else:
                    outputDistance.append(distance)

                    # Retrieve the distance between the target and the fingertip:
                    targetDistance = float(data.distance)

                    # Calculate the error angles
                    angle = math.degrees(math.asin(distance / targetDistance))
                    outputAngle.append(angle)

                    angleCamera = math.degrees(
                        math.asin(distance / targetCoordinates[2]))
                    outputAngleCamera.append(angleCamera)

                    distanceAt2m = math.asin(distance / targetDistance) * 2000
                    outputDistanceAt2m.append(distanceAt2m)

                    print "--- Impact distance: {0:0.1f} mm\t Impact at 2m: {1:0.1f}\t Error angle (fingertip): {2:0.1f} deg\t Error angle (camera): {3:0.1f} deg".format(
                        distance, distanceAt2m, angle, angleCamera)

            print "---\n--- Average impact distance of {0:0.1f} mm.".format(
                np.average(outputDistance))
            print "--- Average impact distance at 2 m of {0:0.1f} mm.".format(
                np.average(outputDistanceAt2m))
            print "--- Average eror angle of {0:0.1f} deg at the fingertip.".format(
                np.average(outputAngle))
            print "--- Average eror angle of {0:0.1f} deg at the camera.".format(
                np.average(outputAngleCamera))

    # Draw a graphic with centered trajectories' origins
    #
    # @param	None
    # @return	None
    def drawUnifiedTrajectories(self):
        # Load the dataset
        dataset = self.datasetManager.loadDataset(
            self.datasetManager.getAccuracyComplete())

        # Create the scene
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.set_aspect("equal")

        ax.set_xlabel('X (horizontal)')
        ax.set_ylabel('Y (vertical)')
        ax.set_zlabel('Z (depth)')

        for data in dataset:

            result = self.featureExtractor.getFeatures(data)

            # Processed data
            fingerTipCoordinates = self.featureExtractor.fingerTip[0]
            eyeCoordinates = self.featureExtractor.eyePosition[0]
            targetCoordinates = data.target
            depthMap = data.depth_map

            fingerTipCoordinates.append(
                self.utils.getDepthFromMap(depthMap, fingerTipCoordinates))
            eyeCoordinates.append(
                self.utils.getDepthFromMap(depthMap, eyeCoordinates))

            closest = self.trigonometry.findIntersection(
                fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                self.expectedRadius)

            if closest != None:
                x = [
                    fingerTipCoordinates[0] - targetCoordinates[0],
                    closest[0] - targetCoordinates[0]
                ]
                y = [
                    fingerTipCoordinates[1] - targetCoordinates[1],
                    closest[1] - targetCoordinates[1]
                ]
                z = [
                    fingerTipCoordinates[2] - targetCoordinates[2],
                    closest[2] - targetCoordinates[2]
                ]

                # Draw the trajectory
                ax.plot(x, y, z)

        # Draw the target point
        ax.scatter(0, 0, 0, c="#000000", marker="o", s=2000)
        plt.show()

    # Draw a 3D graphic with the closests impacts
    #
    # @param	None
    # @return	None
    def drawImpacts(self):
        # Load the dataset
        dataset = self.datasetManager.loadDataset(
            self.datasetManager.getAccuracyComplete())

        # Create the scene
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.set_aspect("equal")

        ax.set_xlabel('X (horizontal in mm)')
        ax.set_ylabel('Y (vertical in mm)')
        ax.set_zlabel('Z (depth in mm)')

        colorConverter = ColorConverter()

        for data in dataset:
            result = self.featureExtractor.getFeatures(data)

            # Processed data
            fingerTipCoordinates = self.featureExtractor.fingerTip[0]
            eyeCoordinates = self.featureExtractor.eyePosition[0]
            targetCoordinates = data.target
            depthMap = data.depth_map

            fingerTipCoordinates.append(
                self.utils.getDepthFromMap(depthMap, fingerTipCoordinates))
            eyeCoordinates.append(
                self.utils.getDepthFromMap(depthMap, eyeCoordinates))

            closest = self.trigonometry.findIntersection(
                fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                self.expectedRadius)

            if closest != None:
                x = closest[0] - targetCoordinates[0]
                y = closest[1] - targetCoordinates[1]
                z = closest[2] - targetCoordinates[2]

                distance = self.trigonometry.findIntersectionDistance(
                    fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                    self.expectedRadius)

                red = 1 - (distance / 200)
                if red < 0:
                    red = 0
                elif red > 1:
                    red = 1

                blue = 0 + (distance / 200)
                if blue < 0:
                    blue = 0
                elif blue > 1:
                    blue = 1

                cc = colorConverter.to_rgba((red, 0, blue), 0.4)

                # Draw the impact point
                ax.scatter(x, y, z, color=cc, marker="o", s=50)

        # Draw the target point
        ax.scatter(0, 0, 0, c="#000000", marker="o", color="#000000", s=100)

        plt.show()

    # Draw a 2D graphic with the closests impacts
    #
    # @param	x					Flag to display the horizontal axis
    # @param	y					Flag to display the vertical axis
    # @param	z					Flag to display the depth axis
    # @return	None
    def drawImpacts2D(self, x=True, y=True, z=False):
        # Load the dataset
        dataset = self.datasetManager.loadDataset(
            self.datasetManager.getAccuracyComplete())

        plt.axis("equal")
        colorConverter = ColorConverter()

        for data in dataset:
            result = self.featureExtractor.getFeatures(data)

            # Processed data
            fingerTipCoordinates = self.featureExtractor.fingerTip[0]
            eyeCoordinates = self.featureExtractor.eyePosition[0]
            targetCoordinates = data.target
            depthMap = data.depth_map

            fingerTipCoordinates.append(
                self.utils.getDepthFromMap(depthMap, fingerTipCoordinates))
            eyeCoordinates.append(
                self.utils.getDepthFromMap(depthMap, eyeCoordinates))

            closest = self.trigonometry.findIntersection(
                fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                self.expectedRadius)

            if closest != None:
                x = closest[0] - targetCoordinates[0]
                y = closest[1] - targetCoordinates[1]
                z = closest[2] - targetCoordinates[2]

                distance = self.trigonometry.findIntersectionDistance(
                    fingerTipCoordinates, eyeCoordinates, targetCoordinates,
                    self.expectedRadius)

                red = 1 - (distance / 200)
                if red < 0:
                    red = 0
                elif red > 1:
                    red = 1

                blue = 0 + (distance / 200)
                if blue < 0:
                    blue = 0
                elif blue > 1:
                    blue = 1

                cc = colorConverter.to_rgba((red, 0, blue), 0.4)

                if not x:
                    plt.scatter(y, z, color=cc, marker="o", s=50)
                elif not y:
                    plt.scatter(x, z, color=cc, marker="o", s=50)
                else:
                    plt.scatter(x, y, color=cc, marker="o", s=50)

        plt.show()

    # Draw a 3D heatmap from several recorded points
    #
    # @param	points				Array of all recorded points
    # @param	blurred				Flag to add a blurry effect to all points
    # @return	numeric				Depth of the given coordinates
    def createHeatmap(self, points, blurred=False):
        # Create the scene
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.set_aspect("equal")

        # Create axes
        ax.set_xlabel('X (horizontal in mm)')
        ax.set_ylabel('Y (vertical in mm)')
        ax.set_zlabel('Z (depth in mm)')

        points = np.array(points)

        # Retrieve extrem values
        maxX, maxY, maxZ, tmp = points.max(axis=0)
        minX, minY, minZ, tmp = points.min(axis=0)

        # Retrieve middle values
        midX = minX + (maxX - minX) / 2
        midY = minY + (maxY - minY) / 2
        midZ = minZ + (maxZ - minZ) / 2

        # Draw center axis
        ax.plot([minX, maxX], [midY, midY], [midZ, midZ])
        ax.plot([midX, midX], [minY, maxY], [midZ, midZ])
        ax.plot([midX, midX], [midY, midY], [minZ, maxZ])

        # Add points
        for point in points:
            print "[{0},{1},{2},{3}],".format(point[0], point[1], point[2],
                                              point[3])

            # Add a blurr effect to points if needed
            if blurred:
                for i in np.arange(0.1, 1.01, 0.1):
                    if point[3] == True:
                        c = (1, 0, 0, 0.3 / i / 10)
                    else:
                        c = (0, 0, 1, 0.3 / i / 10)
                    ax.scatter(point[0],
                               point[1],
                               point[2],
                               s=(50 * i * (0.55))**2,
                               color=c)
            else:
                if point[3] == True:
                    c = (1, 0, 0, 0.3)
                else:
                    c = (0, 0, 1, 0.3)
                ax.scatter(point[0], point[1], point[2], s=50, color=c)

        # Set the correct view
        #ax.view_init(azim=-128, elev=-163)
        ax.view_init(azim=-89, elev=-74)

        # Display the graph
        plt.show()

    # Draw a 2D heatmap from several recorded points
    #
    # @param	points				Array of all recorded points
    # @param	view				Point of view to display the proper axes
    # @param	blurred				Flag to add a blurry effect to all points
    # @return	numeric				Depth of the given coordinates
    def showHeatmap(self, points, view, blurred=True):
        if view != "top" and view != "front":
            raise ValueError("Invalid view.. Please specify 'top' or 'front'.",
                             view)

        # Create the scene
        if view == "top":
            ax = plt.subplot(111, aspect=0.3)
        else:
            ax = plt.subplot(111, aspect=1)

        points = np.array(points)

        # Retrieve extrem values
        maxX, maxY, maxZ, tmp = points.max(axis=0)
        minX, minY, minZ, tmp = points.min(axis=0)

        margin = 50

        ax.set_xlim([minX - margin, maxX + margin])
        if view == "top":
            ax.set_ylim([minZ - margin, maxZ + margin])
        else:
            ax.set_ylim([minY - margin, maxY + margin])

        # Retrieve middle values
        midX = minX + (maxX - minX) / 2
        midY = minY + (maxY - minY) / 2
        midZ = minZ + (maxZ - minZ) / 2

        # Draw center axis
        if view == "top":
            ax.plot([minX + (midX / 2), maxX - (midX / 2)], [midZ, midZ])
            ax.plot([midX, midX], [minZ + (midZ / 2), maxZ - (midZ / 2)])
        else:
            ax.plot([minX + (midX / 2), maxX - (midX / 2)], [midY, midY])
            ax.plot([midX, midX], [minY + (midY / 2), maxY - (midY / 2)])

        # Add points
        for point in points:
            print "[{0},{1},{2},{3}],".format(point[0], point[1], point[2],
                                              point[3])

            # Add a blurr effect to points if needed
            if blurred:
                for i in np.arange(0.1, 1.01, 0.1):
                    if point[3] == True:
                        c = (1, 0, 0, 0.3 / i / 10)
                    else:
                        c = (0, 0, 1, 0.1 / i / 10)

                    if view == "top":
                        ax.scatter(point[0],
                                   point[2],
                                   s=(50 * i * (0.55))**2,
                                   color=c)
                    else:
                        ax.scatter(point[0],
                                   point[1],
                                   s=(50 * i * (0.55))**2,
                                   color=c)
            else:
                if point[3] == True:
                    c = (1, 0, 0, 0.3)
                else:
                    c = (0, 0, 1, 0.1)

                if view == "top":
                    ax.scatter(point[0], point[2], s=50, color=c)
                else:
                    ax.scatter(point[0], point[1], s=50, color=c)

        # Display the graph
        ax.invert_xaxis()
        ax.invert_yaxis()
        plt.show()
class LiveGui(QtWidgets.QWidget):

    utils = Utils()
    featureExtractor = FeatureExtractor()
    bpn = BPNHandler(True)

    # Constructor of the LiveGui class
    #
    # @param	None
    # @return	None
    def __init__(self):
        super(LiveGui, self).__init__()
        self.setWindowTitle("Pointing Gesture Recognition - Live")

        # Retrieve all settings
        self.settings = Settings()

        # Get the context and initialise it
        self.context = Context()
        self.context.init()

        # Create the depth generator to get the depth map of the scene
        self.depth = DepthGenerator()
        self.depth.create(self.context)
        self.depth.set_resolution_preset(RES_VGA)
        self.depth.fps = 30

        # Create the user generator to detect skeletons
        self.user = UserGenerator()
        self.user.create(self.context)

        # Initialise the skeleton tracking
        skeleton.init(self.user)

        # Start generating
        self.context.start_generating_all()
        print "Starting to detect users.."

        # Create a new dataset item
        self.data = LiveDataset()

        # Create the global layout
        self.layout = QtWidgets.QVBoxLayout(self)

        # Create custom widgets to hold sensor's images
        self.depthImage = SensorWidget()
        self.depthImage.setGeometry(10, 10, 640, 480)

        # Add these custom widgets to the global layout
        self.layout.addWidget(self.depthImage)

        # Set the default result text
        self.resultLabel = QtWidgets.QLabel()
        self.resultLabel.setText("No")

        # Create the acquisition form elements
        self.createAcquisitionForm()

        # Create and launch a timer to update the images
        self.timerScreen = QtCore.QTimer()
        self.timerScreen.setInterval(30)
        self.timerScreen.setSingleShot(True)
        self.timerScreen.timeout.connect(self.updateImage)
        self.timerScreen.start()

    # Update the depth image displayed within the main window
    #
    # @param	None
    # @return	None
    def updateImage(self):

        # Update to next frame
        self.context.wait_and_update_all()

        # Extract informations of each tracked user
        self.data = skeleton.track(self.user, self.depth, self.data)

        # Get the whole depth map
        self.data.depth_map = np.asarray(
            self.depth.get_tuple_depth_map()).reshape(480, 640)

        # Create the frame from the raw depth map string and convert it to RGB
        frame = np.fromstring(self.depth.get_raw_depth_map_8(),
                              np.uint8).reshape(480, 640)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_GRAY2RGB)

        # Will be used to specify the depth of the current hand wished
        currentDepth, showCurrentDepth = 0, ""

        if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
            # Highlight the head
            #ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5)

            # Display lines from elbows to the respective hands
            #ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"])
            #ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"])

            # Get the pixel's depth from the coordinates of the hands
            leftPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["left"])
            rightPixel = self.utils.getDepthFromMap(
                self.data.depth_map, self.data.skeleton["hand"]["right"])

            # Get the shift of the boundaries around both hands
            leftShift = self.utils.getHandBoundShift(leftPixel)
            rightShift = self.utils.getHandBoundShift(rightPixel)

            if self.data.hand == self.settings.LEFT_HAND:
                currentDepth = leftPixel
                # Display a rectangle around the current hand
                #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30))
            elif self.data.hand == self.settings.RIGHT_HAND:
                currentDepth = rightPixel
                # Display a rectangle around the current hand
                #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))
            #else:
            # Display a rectangle around both hands
            #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30))
            #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30))

            # Test the data against the neural network if possible
            if self.data.hand != self.settings.NO_HAND:
                result = self.bpn.check(
                    self.featureExtractor.getFeatures(self.data))
                self.resultLabel.setText(str(result[0]))

                # Highlight the finger tip
                if result[0] != False:
                    ui.drawPoint(frame,
                                 self.featureExtractor.fingerTip[result[1]][0],
                                 self.featureExtractor.fingerTip[result[1]][1],
                                 5)

                    # Highlight the eye
                    ui.drawPoint(
                        frame, self.featureExtractor.eyePosition[result[1]][0],
                        self.featureExtractor.eyePosition[result[1]][1], 5)

                    # Line of sight
                    ui.drawElbowLine(
                        frame, self.featureExtractor.eyePosition[result[1]],
                        self.featureExtractor.fingerTip[result[1]])

                    # Indicate orientation
                    cv2.putText(frame,
                                self.featureExtractor.orientation[result[1]],
                                (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2,
                                (50, 100, 255), 5)

        # Update the frame
        self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame))

        self.timerScreen.start()

    # Create the acquisition form of the main window
    #
    # @param	None
    # @return	None
    def createAcquisitionForm(self):
        globalLayout = QtWidgets.QHBoxLayout()

        hlayout = QtWidgets.QHBoxLayout()
        label = QtWidgets.QLabel("Pointing hand")
        label.setFixedWidth(100)
        comboBox = QtWidgets.QComboBox()
        comboBox.currentIndexChanged.connect(self.data.toggleHand)
        comboBox.setFixedWidth(200)
        comboBox.addItem("Left")
        comboBox.addItem("Right")
        comboBox.addItem("None")
        comboBox.addItem("Both")
        comboBox.setCurrentIndex(3)
        hlayout.addWidget(label)
        hlayout.addWidget(comboBox)
        globalLayout.addLayout(hlayout)

        self.resultLabel.setAlignment(QtCore.Qt.AlignCenter)
        globalLayout.addWidget(self.resultLabel)

        self.layout.addLayout(globalLayout)
class Live():

    utils = Utils()
    featureExtractor = FeatureExtractor()
    bpn = BPNHandler(True)
    testing = Testing()

    # Constructor of the Live class
    #
    # @param	None
    # @return	None
    def __init__(self):
        # Retrieve all settings
        self.settings = Settings()

        # Get the context and initialise it
        self.context = Context()
        self.context.init()

        # Create the depth generator to get the depth map of the scene
        self.depth = DepthGenerator()
        self.depth.create(self.context)
        self.depth.set_resolution_preset(RES_VGA)
        self.depth.fps = 30

        # Create the user generator to detect skeletons
        self.user = UserGenerator()
        self.user.create(self.context)

        # Initialise the skeleton tracking
        skeleton.init(self.user)

        # Start generating
        self.context.start_generating_all()

        # Create a new dataset item
        self.data = LiveDataset()
        self.data.hand = self.settings.BOTH_HAND

        # Update the frame
        Timer(0.001, self.updateImage, ()).start()

    # Update the captured depth image
    #
    # @param	None
    # @return	None
    def updateImage(self):
        # Update to next frame
        self.context.wait_and_update_all()

        # Extract informations of each tracked user
        self.data = skeleton.track(self.user, self.depth, self.data)

        # Get the whole depth map
        self.testing.startTimer()
        self.data.depth_map = np.asarray(
            self.depth.get_tuple_depth_map()).reshape(480, 640)
        self.testing.timerMarker("Depth map acquisition and conversion")
        self.testing.stopTimer()

        # Create dummy values
        recognition = False
        hand = None
        origin = [0, 0, 0]
        end = [0, 0, 0]

        if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0:
            # Test the data against the neural network if possible
            if self.data.hand != self.settings.NO_HAND:
                result = self.bpn.check(
                    self.featureExtractor.getFeatures(self.data))

                if result[0] != False:
                    recognition = True
                    hand = result[1]
                    origin = [
                        self.featureExtractor.eyePosition[result[1]][0],
                        self.featureExtractor.eyePosition[result[1]][1],
                        self.utils.getDepthFromMap(
                            self.data.depth_map,
                            self.featureExtractor.eyePosition[result[1]])
                    ]
                    end = [
                        self.featureExtractor.fingerTip[result[1]][0],
                        self.featureExtractor.fingerTip[result[1]][1],
                        self.utils.getDepthFromMap(
                            self.data.depth_map,
                            self.featureExtractor.fingerTip[result[1]])
                    ]

        # Output the result
        print '{{"pointing":{0},"hand":{1},"origin":{2},"end":{3}}}'.format(
            recognition, hand, origin, end)

        Timer(0.001, self.updateImage, ()).start()