def hack(self): print(f"Applying Trinity Hack:\n\n\n") Utils.decrypt_game(self.user.decrypt_key, self.decrypt_dir, self.working_dir / "PBOOT.PBP", self.game.id) Utils.encrypt_game(self.user.decrypt_key, self.decrypt_dir, self.hack_dir) Utils.check_issue( os.path.getsize(self.hack_dir / 'game' / 'game.psvimg') > 1000000, "Hack Too Small") Utils.replace_folder(self.hack_dir, self.game.path) print( f"\n\n\nTrinity Applied. Please refresh your QCMA database and transfer your game back to your Vita." )
def main(args): parser = argparse.ArgumentParser(description='PRM Path Planning Algorithm') parser.add_argument('--numSamples', type=int, default=1000, metavar='N', help='Number of sampled points') args = parser.parse_args() numSamples = args.numSamples env = open("environment.txt", "r") l1 = env.readline().split(";") current = list(map(int, l1[0].split(","))) destination = list(map(int, l1[1].split(","))) print("Current: {} Destination: {}".format(current, destination)) print("****Obstacles****") allObs = [] for l in env: if (";" in l): line = l.strip().split(";") topLeft = list(map(int, line[0].split(","))) bottomRight = list(map(int, line[1].split(","))) obs = Obstacle(topLeft, bottomRight) obs.printFullCords() allObs.append(obs) utils = Utils() utils.drawMap(allObs, current, destination) prm = PRMController(numSamples, allObs, current, destination) # Initial random seed to try initialRandomSeed = 0 prm.runPRM(initialRandomSeed)
def setup_dirs(self): base_dir = Path(os.getcwd()) if Utils.check_issue(os.access(Utils.get_home() / 'Desktop', os.W_OK), "No Working Dir Permissions", False): base_dir = Utils.get_home() / 'Desktop' else: Utils.check_issue(os.access(base_dir, os.W_OK), "No Working Dir Permissions") self.working_dir = Utils.make_dir(base_dir / f"FinTrinity{Utils.get_timestamp()}") self.hack_dir = Utils.make_dir(self.working_dir / f"{self.game.id}.hacked") self.decrypt_dir = self.working_dir / f"{self.game.id}.decrypted" self.backup_dir = self.working_dir / f"{self.game.id}.backup" print(f"Created Working Directory: {self.working_dir}")
def download_dependencies(self): print(f"Downloading and Extracting Dependencies") psvimgtools = None if platform.system() == "Windows": if platform.machine() == "AMD64": psvimgtools = "https://github.com/yifanlu/psvimgtools/releases/download/v0.1/psvimgtools-0.1-win64.zip" else: psvimgtools = "https://github.com/yifanlu/psvimgtools/releases/download/v0.1/psvimgtools-0.1-win32.zip" elif platform.system() == "Linux": psvimgtools = "https://github.com/yifanlu/psvimgtools/releases/download/v0.1/psvimgtools-0.1-linux64.zip" elif platform.system() == "Darwin": psvimgtools = "https://github.com/yifanlu/psvimgtools/releases/download/v0.1/psvimgtools-0.1-osx.zip" Utils.download( 'https://github.com/TheOfficialFloW/Trinity/releases/download/v1.0/PBOOT.PBP', self.working_dir) Utils.download(psvimgtools, self.working_dir) Utils.extract(self.working_dir / psvimgtools.split("/")[-1], self.decrypt_dir)
class DatasetGui(QtWidgets.QWidget): utils = Utils() featureExtractor = FeatureExtractor() bpn = BPNHandler(True) accuracy = accuracy.Accuracy() # Constructor of the DatasetGui class # # @param None # @return None def __init__(self): super(DatasetGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Dataset recording") # Retrieve all settings self.settings = Settings() # Load sounds self.countdownSound = QtMultimedia.QSound( self.settings.getResourceFolder() + "countdown.wav") self.countdownEndedSound = QtMultimedia.QSound( self.settings.getResourceFolder() + "countdown-ended.wav") # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the image generator to get an RGB image of the scene self.image = ImageGenerator() self.image.create(self.context) self.image.set_resolution_preset(RES_VGA) self.image.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = Dataset() # Create a timer for an eventual countdown before recording the data self.countdownTimer = QtCore.QTimer() self.countdownRemaining = 10 self.countdownTimer.setInterval(1000) self.countdownTimer.setSingleShot(True) self.countdownTimer.timeout.connect(self.recordCountdown) # Create a timer to eventually record data for a heat map self.heatmapRunning = False self.heatmapTimer = QtCore.QTimer() self.heatmapTimer.setInterval(10) self.heatmapTimer.setSingleShot(True) self.heatmapTimer.timeout.connect(self.recordHeatmap) # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Hold the label indicating the number of dataset taken self.numberLabel = QtWidgets.QLabel() self.updateDatasetNumberLabel() # Create the acquisition form elements self.createAcquisitionForm() # Register a dialog window to prompt the target position self.dialogWindow = DatasetDialog(self) # Allow to save the data when the right distance is reached self.recordIfReady = False # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start() # Update the depth image displayed within the main window # # @param None # @return None def updateImage(self): # Update to next frame self.context.wait_and_update_all() # Extract informations of each tracked user self.data = skeleton.track(self.user, self.depth, self.data) # Get the whole depth map self.data.depth_map = np.asarray( self.depth.get_tuple_depth_map()).reshape(480, 640) # Create the frame from the raw depth map string and convert it to RGB frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) # Get the RGB image of the scene self.data.image = np.fromstring(self.image.get_raw_image_map_bgr(), dtype=np.uint8).reshape(480, 640, 3) # Will be used to specify the depth of the current hand wished currentDepth, showCurrentDepth = 0, "" if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Highlight the head ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5) # Display lines from elbows to the respective hands ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"]) ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"]) # Get the pixel's depth from the coordinates of the hands leftPixel = self.utils.getDepthFromMap( self.data.depth_map, self.data.skeleton["hand"]["left"]) rightPixel = self.utils.getDepthFromMap( self.data.depth_map, self.data.skeleton["hand"]["right"]) if self.data.hand == self.settings.LEFT_HAND: currentDepth = leftPixel elif self.data.hand == self.settings.RIGHT_HAND: currentDepth = rightPixel # Get the shift of the boundaries around both hands leftShift = self.utils.getHandBoundShift(leftPixel) rightShift = self.utils.getHandBoundShift(rightPixel) # Display a rectangle around both hands ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (50, 100, 255)) ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30)) # Record the current data if the user is ready if self.recordIfReady: cv2.putText(frame, str(self.data.getWishedDistance()), (470, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (252, 63, 253), 5) if self.data.getWishedDistance( ) >= int(currentDepth) - 10 and self.data.getWishedDistance( ) <= int(currentDepth) + 10: self.record([]) self.recordIfReady = False else: if int(currentDepth) < self.data.getWishedDistance(): showCurrentDepth = str(currentDepth) + " +" else: showCurrentDepth = str(currentDepth) + " -" else: showCurrentDepth = str(currentDepth) cv2.putText(frame, showCurrentDepth, (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 100, 255), 5) # Update the frame self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame)) self.timerScreen.start() # Update the label indicating the number of dataset elements saved so far for the current type # # @param None # @return None def updateDatasetNumberLabel(self): if self.data.type == Dataset.TYPE_POSITIVE: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getPositiveFolder()))) elif self.data.type == Dataset.TYPE_NEGATIVE: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getNegativeFolder()))) elif self.data.type == Dataset.TYPE_ACCURACY: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getAccuracyFolder()))) else: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getDatasetFolder()))) # Record the actual informations # # @param obj Initiator of the event # @return None def record(self, obj): # If the user collects data to check accuracy, prompts additional informations if self.data.type == Dataset.TYPE_ACCURACY: self.saveForTarget() # If the user collects data for a heat map, let's do it elif self.data.type == Dataset.TYPE_HEATMAP: # The same button will be used to stop recording if not self.heatmapRunning: self.startRecordHeatmap() else: self.stopRecordHeatmap() else: # Directly save the dataset and update the label number self.data.save() self.countdownEndedSound.play() self.updateDatasetNumberLabel() # Handle a countdown as a mean to record the informations with a delay # # @param None # @return None def recordCountdown(self): # Decrease the countdown and check if it needs to continue self.countdownRemaining -= 1 if self.countdownRemaining <= 0: # Re-initialise the timer and record the data self.countdownTimer.stop() self.countdownButton.setText("Saving..") self.countdownRemaining = 10 self.record([]) else: self.countdownTimer.start() self.countdownSound.play() # Display the actual reminaining self.countdownButton.setText("Save in %ds" % (self.countdownRemaining)) # Record a heatmap representation of the informations by successive captures # # @param None # @return None def recordHeatmap(self): if self.data.hand == self.settings.NO_HAND: print "Unable to record as no hand is selected" return False if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Input the data into the feature extractor result = self.bpn.check( self.featureExtractor.getFeatures(self.data)) # Add the depth of the finger tip point = self.featureExtractor.fingerTip[result[1]] point.append(self.utils.getDepthFromMap(self.data.depth_map, point)) # Verify that informations are correct if point[0] != 0 and point[1] != 0 and point[2] != 0: # Add the result of the neural network point.append(result[0]) self.heatmap.append(point) self.countdownSound.play() # Loop timer self.heatmapTimer.start() # Start the recording of the heatmap # # @param None # @return None def startRecordHeatmap(self): self.saveButton.setText("Stop recording") self.heatmapRunning = True self.heatmapTimer.start() # Stop the recording of the heatmap # # @param None # @return None def stopRecordHeatmap(self): self.heatmapTimer.stop() self.heatmapRunning = False self.countdownEndedSound.play() self.saveButton.setText("Record") self.accuracy.showHeatmap(self.heatmap, "front") self.heatmap = [] # Raise a flag to record the informations when the chosen distance will be met # # @param None # @return None def startRecordWhenReady(self): self.recordIfReady = True # Hold the current informations to indicate the position of the target thanks to the dialog window # # @param None # @return None def saveForTarget(self): # Freeze the data self.timerScreen.stop() self.countdownEndedSound.play() # Translate the depth values to a frame and set it in the dialog window frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) self.dialogWindow.setFrame(frame) # Prompt the position of the target self.dialogWindow.exec_() # Toggle the type of dataset chosen # # @param value Identifier of the new type of dataset # @return None def toggleType(self, value): self.data.toggleType(value) if value == self.data.TYPE_HEATMAP: self.saveButton.setText("Record") self.countdownButton.setText("Record in %ds" % (self.countdownRemaining)) self.readyButton.setEnabled(False) # Create an array to hold all points self.heatmap = [] else: self.updateDatasetNumberLabel() if hasattr(self, 'saveButton'): self.saveButton.setText("Save") self.countdownButton.setText("Save in %ds" % (self.countdownRemaining)) self.readyButton.setEnabled(True) # Create the acquisition form of the main window # # @param None # @return None def createAcquisitionForm(self): globalLayout = QtWidgets.QHBoxLayout() vlayout = QtWidgets.QVBoxLayout() # Drop down menu of the distance to record the informations when the pointing hand meet the corresponding value hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Distance") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleDistance) comboBox.setFixedWidth(200) comboBox.addItem("550") comboBox.addItem("750") comboBox.addItem("1000") comboBox.addItem("1250") comboBox.addItem("1500") comboBox.addItem("1750") comboBox.addItem("2000") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) # Drop down menu to select the type of hand of the dataset hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Pointing hand") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleHand) comboBox.setFixedWidth(200) comboBox.addItem("Left") comboBox.addItem("Right") comboBox.addItem("None") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) # Drop down menu of the dataset type hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Type") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.toggleType) comboBox.setFixedWidth(200) comboBox.addItem("Positive") comboBox.addItem("Negative") comboBox.addItem("Accuracy") comboBox.addItem("Heat map") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) globalLayout.addLayout(vlayout) vlayout = QtWidgets.QVBoxLayout() self.numberLabel.setAlignment(QtCore.Qt.AlignCenter) vlayout.addWidget(self.numberLabel) # Action buttons to record the way that suits the most hLayout = QtWidgets.QHBoxLayout() self.readyButton = QtWidgets.QPushButton( 'Save when ready', clicked=self.startRecordWhenReady) self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record) hLayout.addWidget(self.readyButton) vlayout.addLayout(hLayout) item_layout = QtWidgets.QHBoxLayout() self.countdownButton = QtWidgets.QPushButton( "Save in %ds" % (self.countdownRemaining), clicked=self.countdownTimer.start) self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record) item_layout.addWidget(self.countdownButton) item_layout.addWidget(self.saveButton) vlayout.addLayout(item_layout) globalLayout.addLayout(vlayout) self.layout.addLayout(globalLayout)
class Validating(): # Load required classes bpn = BPNHandler(True) datasetManager = DatasetManager() featureExtractor = FeatureExtractor() settings = Settings() utils = Utils() # Evaluate the complete dataset # # @param type Type of dataset to be evaluated # @return None def complete(self, type): positiveValidating = self.datasetManager.getPositiveCompleteMixed(type) negativeValidating = self.datasetManager.getMainNegative(type) # run the network self.run(positiveValidating, negativeValidating) # Evaluate the restrained dataset # # @param type Type of dataset to be evaluated # @return None def restrained(self, type): positiveValidating = self.datasetManager.getPositiveRestrainedMixed(type) negativeValidating = self.datasetManager.getNegativeMainRestrained(type) # run the network self.run(positiveValidating, negativeValidating) # Evaluate the given informations # # @param positiveValidating Array of all positive files to process # @param negativeValidating Array of all negative files to process # @param getData Flag to retrieve the data in order to bypass a future loading # @return None def run(self, positiveValidating, negativeValidating, getData=False): # Load all dataset files positive = self.datasetManager.loadDataset(positiveValidating) negative = self.datasetManager.loadDataset(negativeValidating) # Process all features print "Processing features..." positiveInput = [] for data in positive: positiveInput.extend(self.featureExtractor.getFeatures(data)) negativeInput = [] for data in negative: negativeInput.extend(self.featureExtractor.getFeatures(data)) # Check if we need to print the data or run the network if getData: self.utils.getPythonInitCode(positiveInput, "positiveInput") self.utils.getPythonInitCode(negativeInput, "negativeInput") else: # Run the validation against the network if len(positiveInput)>0: print "Positive validation" goodPositive = 0 badPositive = 0 count = 0 for positive in positiveInput: result = self.bpn.check([positive]) if result[0] == False: badPositive += 1 print("{0} is erroneous".format(count)) else: goodPositive += 1 count += 1 print print "{0} corrects and {1} bad --> {2:0.2f}%".format(goodPositive, badPositive, (goodPositive/float(goodPositive+badPositive)*100)) print if len(negativeInput)>0: print "Negative validation" goodNegative = 0 badNegative = 0 count = 0 for negative in negativeInput: result = self.bpn.check([negative]) if result[0] == True: badNegative += 1 print("{0} is erroneous".format(count)) else: goodNegative += 1 count += 1 print print "{0} corrects and {1} bad --> {2:0.2f}%".format(goodNegative, badNegative, (goodNegative/float(goodNegative+badNegative)*100)) print "Final score = {0:0.2f}%".format(((goodPositive+goodNegative)/float(goodPositive+badPositive+goodNegative+badNegative))*100) if len(positiveInput)==0 and len(negativeInput)==0: print "No input to validate..."
class DatasetDialog(QtWidgets.QDialog): utils = Utils() # Constructor of the DatasetDialog class # # @param parent Parent instance to exchange informations up to the main window # @return None def __init__(self, parent=None): # Initialise a dialog window super(DatasetDialog, self).__init__(parent) self.parent = parent self.setWindowTitle("Indicate the position of the target") self.layout = QtWidgets.QVBoxLayout(self) # Reserve some space for the depth image self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Register action for the depth image action = functools.partial(self.imageClicked, self.depthImage) self.depthImage.clicked.connect(action) # Create OK|Cancel buttons self.buttonBox = QtWidgets.QDialogButtonBox(self) self.buttonBox.setFixedWidth(170) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) # Register actions for the buttons self.buttonBox.button(QtWidgets.QDialogButtonBox.Cancel).clicked.connect(self.reject) self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).clicked.connect(self.accept) hlayout = QtWidgets.QHBoxLayout() # Create target distance text field and the output of the clicked depth groupbox = QtWidgets.QGroupBox() groupbox.setTitle("Target") groupbox_layout = QtWidgets.QVBoxLayout() self.targetDistance = self.add_text_field(groupbox_layout, "Distance between the target and the fingertip:", 0, self.parent.data.setDistance) self.pickedDepth = QtWidgets.QLabel("") self.pickedDepth.setAlignment(QtCore.Qt.AlignLeft) groupbox_layout.addWidget(self.pickedDepth) groupbox.setLayout(groupbox_layout) hlayout.addWidget(groupbox) hlayout.addWidget(self.buttonBox) # Insert all elements to the layout self.layout.addWidget(self.depthImage) self.layout.addLayout(hlayout) # This will assert that the image has been clicked before saving self.target = [] # If the user hit the save button without indicating the target, display an alert self.messageBox = QtWidgets.QMessageBox() self.messageBox.setText("Please, indicate the position of the target.") # Add a text input and its corresponding label to the layout # # @param parent_layout Layout of the parent to add the widget accordingly # @param title Label of the input text field # @param value Default value of the text field # @param function Function to trigger when the value of the input changes # @return QLineEdit Instance of the text field def add_text_field(self, parent_layout, title, value, function): hlayout = QtWidgets.QHBoxLayout() text_label = QtWidgets.QLabel(title) text_label.setFixedWidth(270) text_field = QtWidgets.QLineEdit() text_field.setValidator(QtGui.QIntValidator(0, 31337)) hlayout.addWidget(text_label) hlayout.addWidget(text_field) parent_layout.addLayout(hlayout) # Connect changed signal to the GUI element text_field.textChanged.connect(function) # Set the text field value and trigger the value update text_field.setText(str(value)) return text_field # Hold a naked version and update the image of the window # # @param frame Image informations # @return None def setFrame(self, frame): self.naked_frame = frame self.updateImage(frame) # Update the image of the window # # @param frame Image informations # @return None def updateImage(self, frame): self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame)) # Slot triggered when the image receive a click event # # @param obj Initiator of the event # @param event Informations about the current event # @return None @QtCore.pyqtSlot() def imageClicked(self, obj, event): self.target = [event.x(), event.y()] # Get the depth value and show it depth = self.utils.getDepthFromMap(self.parent.data.depth_map, self.target) self.pickedDepth.setText("Distance between the target and the camera: %d mm."%(int(depth))) # Ignore all previous drawings by doing a deep copy of the naked frame and add the new position dot frame = deepcopy(self.naked_frame) ui.drawPoint(frame, self.target[0]-2, self.target[1]-2, 2) self.updateImage(frame) # Slot triggered when the OK button is used # # @param None # @return None @QtCore.pyqtSlot() def accept(self): if len(self.target) == 2: # Get the depth value of the target and set it to the dataset self.target.append(self.utils.getDepthFromMap(self.parent.data.depth_map, self.target)) self.parent.data.target = self.target # Save the dataset self.parent.data.save() # Close the dialog self.reject() else: # Display an error self.messageBox.exec_() # Slot triggered when the Cancel button is used or the dialog window closed or the echap button pressed # # @param None # @return None @QtCore.pyqtSlot() def reject(self): # Reset an eventual finger tip position self.target = [] self.pickedDepth.setText("") # Restart the GUI screen timer and update the dataset number label self.parent.timerScreen.start() self.parent.updateDatasetNumberLabel() # Close the dialog super(DatasetDialog, self).reject()
class Training(): # Load required classes bpn = BPNHandler() datasetManager = DatasetManager() featureExtractor = FeatureExtractor() settings = Settings() utils = Utils() # Returns the array of the positive targets based on the parameter # # @param data Data to evaluate # @param positiveTarget Array of the positive targets # @return array Array of the positive targets based on the parameter def getPositiveTargetArray(self, data, positiveTarget): output = [] for i in range(len(data)): for j in range(len(data[i])): output.append(positiveTarget[i]) return output # Returns the array of the negative targets based on the parameter # # @param data Data to evaluate # @param positiveTargetLength Length of the array of positive targets # @return array Array of the negative targets based on the parameter def getNegativeTargetArray(self, data, positiveTargetLength): # Create the negative target thanks to the lenth of the positive one negativeTarget = np.zeros(positiveTargetLength).astype(int) output = [] for i in range(len(data)): for j in range(len(data[i])): output.append(negativeTarget) return output # Train the network with the complete set of data # # @param None # @return None def complete(self): positiveTraining = self.datasetManager.getPositiveCompleteMixed("training") negativeTraining = self.datasetManager.getMainNegative("training") positiveTesting = self.datasetManager.getPositiveCompleteMixed("testing") negativeTesting = self.datasetManager.getMainNegative("testing") positiveTarget = self.datasetManager.getCompleteMixedTarget() # run the network self.run(positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, True) # Train the network with the restrained set of data # # @param None # @return None def restrained(self): positiveTraining = self.datasetManager.getPositiveRestrained("training") negativeTraining = self.datasetManager.getNegativeMainRestrained("training") positiveTesting = self.datasetManager.getPositiveRestrained("testing") negativeTesting = self.datasetManager.getNegativeMainRestrained("testing") positiveTarget = self.datasetManager.getRestrainedTarget() # run the network self.run(positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, True) # Train the network with pre-computed recent values to bypass loading # # @param None # @return None def recentValues(self): trainingInput = self.datasetManager.getRecentValuesRestrained(trainingInput=True) trainingTarget = self.datasetManager.getRecentValuesRestrained(trainingTarget=True) testingInput = self.datasetManager.getRecentValuesRestrained(testingInput=True) testingTarget = self.datasetManager.getRecentValuesRestrained(testingTarget=True) # run the network self.bpn.run(trainingInput, trainingTarget, testingInput, testingTarget, learningRate=0.05, momentum=0.1, optimal=True) # Train the network with the complete set of data # # @param positiveTraining Array of positive data from the training set # @param negativeTraining Array of negative data from the training set # @param positiveTesting Array of positive data from the testing set # @param negativeTesting Array of negative data from the testing set # @param positiveTarget Array of positive targets to reach # @param getData Flag to output the processed features in order to bypass loading the next time # @return None def run(self, positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, getData=False): # Load all dataset files and gather them accordingly training = self.datasetManager.loadDataset(positiveTraining) training.extend(self.datasetManager.loadDataset(negativeTraining)) testing = self.datasetManager.loadDataset(positiveTesting) testing.extend(self.datasetManager.loadDataset(negativeTesting)) # Process all features print "Processing features..." trainingInput = [] for data in training: trainingInput.extend(self.featureExtractor.getFeatures(data)) testingInput = [] for data in testing: testingInput.extend(self.featureExtractor.getFeatures(data)) # Build the target arrays trainingTarget = self.getPositiveTargetArray(positiveTraining, positiveTarget) trainingTarget.extend(self.getNegativeTargetArray(negativeTraining, len(positiveTarget))) testingTarget = self.getPositiveTargetArray(positiveTesting, positiveTarget) testingTarget.extend(self.getNegativeTargetArray(negativeTesting, len(positiveTarget))) # Check if we need to print the data or run the network if getData: self.utils.getPythonInitCode(trainingInput, "trainingInput") self.utils.getPythonInitCode(trainingTarget, "trainingTarget") self.utils.getPythonInitCode(testingInput, "testingInput") self.utils.getPythonInitCode(testingTarget, "testingTarget") else: # Run the network self.bpn.run(trainingInput, trainingTarget, testingInput, testingTarget, learningRate=0.05, momentum=0.1, optimal=False)
async def on_chat_message(self, msg): content_type, chat_type, chat_id = telepot.glance(msg) print(chat_id) print(self.bot_state) if content_type == "location": keyboard = ReplyKeyboardMarkup( keyboard=[[KeyboardButton(text="16", request_contact=False, request_location=False), KeyboardButton(text="15", request_contact=False, request_location=False), KeyboardButton(text="14", request_contact=False, request_location=False) ], [KeyboardButton(text="12", request_contact=False, request_location=False), KeyboardButton(text="10", request_contact=False, request_location=False), KeyboardButton(text="8", request_contact=False, request_location=False)], [KeyboardButton(text="Cancel", request_contact=False, request_location=False)]], resize_keyboard=True, one_time_keyboard=True, selective=True ) self.location = (msg['location']['latitude'], msg['location']['longitude']) self.bot_state = BotStates.AWAITING_ZOOM await self.sender.sendMessage("Select the zoom level (or write your desired zoom between 3 and 17)", reply_markup=keyboard) elif content_type == "text" and self.bot_state == BotStates.AWAITING_ZOOM and self.location: remove_keyboard = ReplyKeyboardRemove(remove_keyboard=True, selective=False) try: user_input = msg['text'].rstrip('\r\n') except ValueError: await self.sender.sendMessage('It seems that your message is invalid. I\'m sorry', reply_markup=remove_keyboard) return if user_input == "Cancel": self.bot_state = BotStates.AWAITING_LOCATION await self.sender.sendMessage("Ok, I'll wait for you to send me another location. See you soon!", reply_markup=remove_keyboard) elif Utils.is_valid_int(user_input): await self.sender.sendMessage("Hang on, I'll take your screenshot and send to you in a few seconds.", reply_markup=remove_keyboard) (res, driver) = intel.prepare_intel((self.location[0], self.location[1]), self.iitc_mail, self.iitc_pass, int(user_input)) if res: screenshot = intel.screenshot(driver) with open(screenshot, 'rb') as Screenshot: await self.sender.sendPhoto(Screenshot) os.remove(screenshot) self.close() else: screenshot = intel.screenshot(driver) await self.sender.sendMessage("I'm so sorry but there was an error taking your screenshot, " "please try again later or contact my creator @d0nzok.", parse_mode='Markdown') self.close() else: self.bot_state = BotStates.AWAITING_ZOOM keyboard = ReplyKeyboardMarkup( keyboard=[[KeyboardButton(text="16", request_contact=False, request_location=False), KeyboardButton(text="15", request_contact=False, request_location=False), KeyboardButton(text="14", request_contact=False, request_location=False) ], [KeyboardButton(text="12", request_contact=False, request_location=False), KeyboardButton(text="10", request_contact=False, request_location=False), KeyboardButton(text="8", request_contact=False, request_location=False)], [KeyboardButton(text="Cancel", request_contact=False, request_location=False)]], resize_keyboard=True, one_time_keyboard=True, selective=True ) await self.sender.sendMessage("I'm sorry, but I need you to select one of the zoom levels in the " "keyboard below or write a zoom level between 3 and 17", reply_markup=keyboard) elif content_type == "text" and self.bot_state == BotStates.AWAITING_LOCATION: remove_keyboard = ReplyKeyboardRemove(remove_keyboard=True, selective=False) await self.sender.sendMessage("I'm sorry, but I need you to send me a location first, you can do it by " "clicking the 'attatch' button and selecting 'location'", reply_markup=remove_keyboard)
class Relevancy(): # Load required classes datasetManager = DatasetManager() settings = Settings() utils = Utils() repartition = ["training", "testing", "validating"] direction = [ "back-right", "right", "front-right", "front", "front-left", "left", "back-left" ] orientation = ["up", "lateral", "down"] negativeType = ["closed", "opened", "four", "three", "peace", "rock"] # Returns the repartition between positive and negative files # # @param None # @return tuple Tuple of the repartition for positive and negative files def getRepartition(self): # Get detailed counts positive = {} negative = {} for repartition in self.repartition: positive[repartition] = {} negative[repartition] = {} for direction in self.direction: positive[repartition][direction] = {} negative[repartition][direction] = {} for orientation in self.orientation: positive[repartition][direction][ orientation] = self.getDetailedPositiveRepartition( repartition, direction, orientation) for negativeType in self.negativeType: negative[repartition][direction][ negativeType] = self.getDetailedNegativeRepartition( repartition, direction, negativeType) return (positive, negative) # Returns the number of files in a given positive folder # # @param type Type of dataset # @param direction Direction featured in the dataset # @param orientation Orientation featured in the dataset # @return numeric Number of files in a given positive folder def getDetailedPositiveRepartition(self, type, direction, orientation=""): return self.utils.getFileNumberInFolder( self.settings.getPositiveFolder() + type + "/" + direction + "/" + orientation + "/") # Returns the number of files in a given negative folder # # @param type Type of dataset # @param direction Direction featured in the dataset # @param orientation Orientation featured in the dataset # @return numeric Number of files in a given negative folder def getDetailedNegativeRepartition(self, type, direction, orientation=""): return self.utils.getFileNumberInFolder( self.settings.getNegativeFolder() + type + "/" + direction + "/" + orientation + "/") # Display the general repartition # # @param None # @return None def showRepartition(self): positive, negative = self.getRepartition() print "\n\nPositive repartition\n" positive = self.showPositiveRepartition(positive) print "\n\nNegative repartition\n" negative = self.showNegativeRepartition(negative) print "\n\nTotal repartition\n" self.showTotalRepartition(positive, negative) # Display and returns the positive repartition # # @param positive Array of all positive file repartition # @return dict Informations about the repartition of the positive dataset def showPositiveRepartition(self, positive): totalPositive = 0 totalTraining = 0 totalTesting = 0 totalValidating = 0 for direction in self.direction: training = 0 testing = 0 validating = 0 for orientation in self.orientation: if len(direction + orientation) < 10: shift = "\t" else: shift = "" print( "--- {0} {1}{2}\tTraining: {3} \t\tTesting: {4} \t\tValidating: {5}" .format(direction, orientation, shift, positive["training"][direction][orientation], positive["testing"][direction][orientation], positive["validating"][direction][orientation])) training += positive["training"][direction][orientation] testing += positive["testing"][direction][orientation] validating += positive["validating"][direction][orientation] tmp = training + testing + validating totalTraining += training totalTesting += testing totalValidating += validating print( "--- {0}\t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)\n---" .format(direction, training, (training / float(tmp)) * 100, testing, (testing / float(tmp)) * 100, validating, (validating / float(tmp)) * 100)) totalPositive = totalTraining + totalTesting + totalValidating print( "--- Total: {0} \t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format(totalPositive, totalTraining, (totalTraining / float(totalPositive)) * 100, totalTesting, (totalTesting / float(totalPositive)) * 100, totalValidating, (totalValidating / float(totalPositive)) * 100)) return { "total": totalPositive, "totalTraining": totalTraining, "totalTesting": totalTesting, "totalValidating": totalValidating } # Display and returns the negative repartition # # @param negative Array of all negative file repartition # @return dict Informations about the repartition of the negative dataset def showNegativeRepartition(self, negative): totalNegative = 0 totalTraining = 0 totalTesting = 0 totalValidating = 0 for direction in self.direction: training = 0 testing = 0 validating = 0 for negativeType in self.negativeType: if len(direction + negativeType) < 11: shift = "\t" else: shift = "" print( "--- {0} {1}{2}\tTraining: {3} \t\tTesting: {4} \t\tValidating: {5}" .format(direction, negativeType, shift, negative["training"][direction][negativeType], negative["testing"][direction][negativeType], negative["validating"][direction][negativeType])) training += negative["training"][direction][negativeType] testing += negative["testing"][direction][negativeType] validating += negative["validating"][direction][negativeType] tmp = training + testing + validating totalTraining += training totalTesting += testing totalValidating += validating print( "--- {0}\t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)\n---" .format(direction, training, (training / float(tmp)) * 100, testing, (testing / float(tmp)) * 100, validating, (validating / float(tmp)) * 100)) totalNegative = totalTraining + totalTesting + totalValidating print( "--- Total: {0} \t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format(totalNegative, totalTraining, (totalTraining / float(totalNegative)) * 100, totalTesting, (totalTesting / float(totalNegative)) * 100, totalValidating, (totalValidating / float(totalNegative)) * 100)) return { "total": totalNegative, "totalTraining": totalTraining, "totalTesting": totalTesting, "totalValidating": totalValidating } # Display the general repartition # # @param positive Array of all positive file repartition informations # @param negative Array of all negative file repartition informations # @return None def showTotalRepartition(self, positive, negative): total = positive["total"] + negative["total"] totalTraining = positive["totalTraining"] + negative["totalTraining"] totalTesting = positive["totalTesting"] + negative["totalTesting"] totalValidating = positive["totalValidating"] + negative[ "totalValidating"] print( "--- Positive:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format( positive["total"], positive["totalTraining"], (positive["totalTraining"] / float(positive["total"])) * 100, positive["totalTesting"], (positive["totalTesting"] / float(positive["total"])) * 100, positive["totalValidating"], (positive["totalValidating"] / float(positive["total"])) * 100)) print( "--- Negative:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format( negative["total"], negative["totalTraining"], (negative["totalTraining"] / float(negative["total"])) * 100, negative["totalTesting"], (negative["totalTesting"] / float(negative["total"])) * 100, negative["totalValidating"], (negative["totalValidating"] / float(negative["total"])) * 100)) print( "--- Total:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format(total, totalTraining, (totalTraining / float(total)) * 100, totalTesting, (totalTesting / float(total)) * 100, totalValidating, (totalValidating / float(total)) * 100))
def read_config(self): try: username = None account = None if platform.system() == "Windows": self.apps_path = Path( Utils.read_hkcu(r"Software\codestation\qcma", 'appsPath')) / 'PGAME' account = Utils.read_hkcu(r"Software\codestation\qcma", 'lastAccountId') username = Utils.read_hkcu(r"Software\codestation\qcma", 'lastOnlineId') elif platform.system() == "Linux": self.apps_path = Path(Utils.read_conf('appsPath')) / 'PGAME' account = Utils.read_conf('lastAccountId') username = Utils.read_conf('lastOnlineId') elif platform.system() == "Darwin": self.apps_path = Path(Utils.read_plist('appsPath')) / 'PGAME' account = Utils.read_plist('lastAccountId') username = Utils.read_plist('lastOnlineId') Utils.check_issue(os.path.exists(self.apps_path), "Apps Path Does Not Exist") if username and account: self.user.set_id(account) self.user.set_name(username) self.user.set_path(self.apps_path) while os.path.exists(self.apps_path / account / '_TEMP'): print( '_TEMP folder found alongside game backups. QCMA appears to be backing up a game. We will' ) print( 'wait until this operation completes. Trying again in 15 seconds...' ) sleep(15) except FileNotFoundError: sys.exit("QCMA Settings Missing")
Utils.decrypt_game(self.user.decrypt_key, self.decrypt_dir, self.working_dir / "PBOOT.PBP", self.game.id) Utils.encrypt_game(self.user.decrypt_key, self.decrypt_dir, self.hack_dir) Utils.check_issue( os.path.getsize(self.hack_dir / 'game' / 'game.psvimg') > 1000000, "Hack Too Small") Utils.replace_folder(self.hack_dir, self.game.path) print( f"\n\n\nTrinity Applied. Please refresh your QCMA database and transfer your game back to your Vita." ) if __name__ == "__main__": try: Utils.check_version() fin = FinTrinity() fin.read_config() if fin.select_game(): fin.setup_dirs() fin.backup_game() fin.download_dependencies() fin.hack() except SystemExit as e: print(Utils.pretty_exit_code(e.code)) pass finally: input("[PRESS ENTER TO CLOSE]")
class LiveGui(QtWidgets.QWidget): utils = Utils() featureExtractor = FeatureExtractor() bpn = BPNHandler(True) # Constructor of the LiveGui class # # @param None # @return None def __init__(self): super(LiveGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Live") # Retrieve all settings self.settings = Settings() # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = LiveDataset() # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Set the default result text self.resultLabel = QtWidgets.QLabel() self.resultLabel.setText("No") # Create the acquisition form elements self.createAcquisitionForm() # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start() # Update the depth image displayed within the main window # # @param None # @return None def updateImage(self): # Update to next frame self.context.wait_and_update_all() # Extract informations of each tracked user self.data = skeleton.track(self.user, self.depth, self.data) # Get the whole depth map self.data.depth_map = np.asarray( self.depth.get_tuple_depth_map()).reshape(480, 640) # Create the frame from the raw depth map string and convert it to RGB frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_GRAY2RGB) # Will be used to specify the depth of the current hand wished currentDepth, showCurrentDepth = 0, "" if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Highlight the head #ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5) # Display lines from elbows to the respective hands #ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"]) #ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"]) # Get the pixel's depth from the coordinates of the hands leftPixel = self.utils.getDepthFromMap( self.data.depth_map, self.data.skeleton["hand"]["left"]) rightPixel = self.utils.getDepthFromMap( self.data.depth_map, self.data.skeleton["hand"]["right"]) # Get the shift of the boundaries around both hands leftShift = self.utils.getHandBoundShift(leftPixel) rightShift = self.utils.getHandBoundShift(rightPixel) if self.data.hand == self.settings.LEFT_HAND: currentDepth = leftPixel # Display a rectangle around the current hand #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30)) elif self.data.hand == self.settings.RIGHT_HAND: currentDepth = rightPixel # Display a rectangle around the current hand #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30)) #else: # Display a rectangle around both hands #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (200, 70, 30)) #ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30)) # Test the data against the neural network if possible if self.data.hand != self.settings.NO_HAND: result = self.bpn.check( self.featureExtractor.getFeatures(self.data)) self.resultLabel.setText(str(result[0])) # Highlight the finger tip if result[0] != False: ui.drawPoint(frame, self.featureExtractor.fingerTip[result[1]][0], self.featureExtractor.fingerTip[result[1]][1], 5) # Highlight the eye ui.drawPoint( frame, self.featureExtractor.eyePosition[result[1]][0], self.featureExtractor.eyePosition[result[1]][1], 5) # Line of sight ui.drawElbowLine( frame, self.featureExtractor.eyePosition[result[1]], self.featureExtractor.fingerTip[result[1]]) # Indicate orientation cv2.putText(frame, self.featureExtractor.orientation[result[1]], (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 100, 255), 5) # Update the frame self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame)) self.timerScreen.start() # Create the acquisition form of the main window # # @param None # @return None def createAcquisitionForm(self): globalLayout = QtWidgets.QHBoxLayout() hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Pointing hand") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleHand) comboBox.setFixedWidth(200) comboBox.addItem("Left") comboBox.addItem("Right") comboBox.addItem("None") comboBox.addItem("Both") comboBox.setCurrentIndex(3) hlayout.addWidget(label) hlayout.addWidget(comboBox) globalLayout.addLayout(hlayout) self.resultLabel.setAlignment(QtCore.Qt.AlignCenter) globalLayout.addWidget(self.resultLabel) self.layout.addLayout(globalLayout)
def process(): #Inicializar imagen img = cv.imread('..\cameraControllerPython\Image\map.jpg') OrigImag = img.copy() # Load image image_bgr = img # Convert to RGB image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB) # Rectange values: start x, start y, width, height rectangle = (25, 25, img.shape[1] - 50, img.shape[0] - 50) # Create initial mask mask = np.zeros(image_rgb.shape[:2], np.uint8) # Create temporary arrays used by grabCut bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) # Run grabCut cv.grabCut( image_rgb, # Our image mask, # The Mask rectangle, # Our rectangle bgdModel, # Temporary array for background fgdModel, # Temporary array for background 5, # Number of iterations cv.GC_INIT_WITH_RECT) # Initiative using our rectangle # Create mask where sure and likely backgrounds set to 0, otherwise 1 mask_2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') # Multiply image with new mask to subtract background image_rgb_nobg = image_rgb * mask_2[:, :, np.newaxis] # Show image plt.imshow(image_rgb_nobg), plt.axis("off") plt.show() img = cv.cvtColor(image_rgb_nobg, cv.COLOR_RGB2BGR) imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(imgray, 5, 255, 0) contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) ctrs = img.copy() cv.drawContours(ctrs, contours, -1, (0, 255, 0), 3) ###cv.imshow('Images', ctrs) #print(contours) #print(hierarchy) decode_preds = { 0: 'car', 1: 'destination', 2: 'house', 3: 'origin', 4: 'tree', 5: 'truck', 6: 'windmill' } model = load_model('drone_vision_vgg16.h5') xL = OrigImag.shape[0] / 100 yL = OrigImag.shape[1] / 100 current = str(round(153 / xL)) + ", " + str(round(478 / yL)) destination = str(round(1034 / xL)) + ", " + str(round(184 / yL)) obstacles = np.array(["empty"]) contoursNew = np.array([[1, 2, 3]]) divisions = img.copy() i = 0 #print(len(contours)) while i < len(contours): #print(hierarchy[0][i][3]) (x, y, w, h) = cv.boundingRect(contours[i]) #print((x,y,w,h)) if (hierarchy[0][i][3] == -1 and contours[i].shape[0] > 3 and w > 10 and h > 10): #print(cont) print("Number: " + str(i)) divisions = cv.rectangle(divisions, (x, y), (x + w, y + h), (255, 0, 0), 2) #imagTopredict = OrigImag[round(x+w/2-75):round(x+w/2+75), round(y+h/2-75):round(y+h/2+75), :].copy() imagTopredict = OrigImag[y:y + h, x:x + w, :].copy() #print(imagTopredict.shape) imagTopredict = cv.resize(imagTopredict, (150, 150)) #imagTopredict = array_to_img(imagTopredict) # prepare the image for the VGG model imagTopredict = imagTopredict / 255 imagTopredict = np.array(imagTopredict)[:, :, 0:3] # convert the image pixels to a numpy array ###imagTopredict = img_to_array(imagTopredict) # reshape data for the model # if(imagTopredict.shape[0]<imagTopredict.shape[1]): # extension = imagTopredict.shape[0] # else: # extension = imagTopredict.shape[1] #imagTopredict = np.stack([imagTopredict], axis=0) imagTopredict = imagTopredict.reshape(-1, 150, 150, 3) #imagTopredict = imagTopredict.reshape((1, 150, 150, 3)) #{'DecodeJpeg:0': imagTopredict} # predict the probability across all output classes prediction = model.predict(imagTopredict) if (contours[i].shape[0] == 4): prediction[0][3] = 1 strPred = decode_preds[np.argmax(prediction)] strMsg = strPred + ' (' + str( round(prediction[0][np.argmax(prediction)] * 100, 2)) + '%)' #print(strMsg) divisions = cv.putText(divisions, strMsg, (x - 40, y), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) print( f'{decode_preds[np.argmax(prediction)]} ({round(prediction[0][np.argmax(prediction)] * 100, 2)}%)' ) print(cv.HuMoments(cv.moments(contours[i])).flatten()) if (strPred == 'origin'): current = str(round( (x + w / 2) / xL)) + "," + str(round((y + h / 2) / yL)) elif (strPred == 'destination'): destination = str(round( (x + w / 2) / xL)) + "," + str(round((y + h / 2) / yL)) else: temp = str(round(x / xL)) + "," + str(round( y / yL)) + ";" + str(round( (x + w) / xL)) + "," + str(round((y + h) / yL)) obstacles = np.concatenate((obstacles, [temp]), axis=0) contoursNew = np.concatenate( (contoursNew, [[x + w / 2, y + h / 2, math.sqrt(w**2 + h**2) / 2]]), axis=0) divisions = cv.circle(divisions, (round(x + w / 2), round(y + h / 2)), int(math.sqrt(w**2 + h**2) / 2), (255, 0, 0), 2) i = i + 1 else: contours.pop(i) hierarchy = np.delete(hierarchy, i, 1) ctrs = img.copy() cv.drawContours(ctrs, contours, -1, (0, 255, 0), 3) #print ###cv.imshow('Images 2', ctrs) cv.imshow('Divisions', divisions) cv.imwrite("images\divisions.png", divisions) ''' Probabilistic Road Map ''' parser = argparse.ArgumentParser(description='PRM Path Planning Algorithm') parser.add_argument('--numSamples', type=int, default=500, metavar='N', help='Number of sampled points') args = parser.parse_args() numSamples = args.numSamples current = list(map(int, current.split(","))) destination = list(map(int, destination.split(","))) print("Current: {} Destination: {}".format(current, destination)) print("****Obstacles****") allObs = [] for l in obstacles: if (";" in l): line = l.strip().split(";") topLeft = list(map(int, line[0].split(","))) bottomRight = list(map(int, line[1].split(","))) obs = Obstacle(topLeft, bottomRight) obs.printFullCords() allObs.append(obs) utils = Utils() utils.drawMap(allObs, current, destination) numSamples = 250 prm = PRMController(numSamples, allObs, current, destination) # Initial random seed to try initialRandomSeed = 0 xSol, ySol = prm.runPRM(initialRandomSeed) xSol = xL * np.array(xSol) ySol = yL * np.array(ySol) #Print Solution maps solutionImg = divisions.copy() j = 0 for j in range(len(ySol) - 1): solutionImg = cv.line(solutionImg, (int(xSol[j]), int(ySol[j])), (int(xSol[j + 1]), int(ySol[j + 1])), (255, 0, 0), 2) ###cv.imwrite("images\solution.png", solutionImg) solutionImgO = OrigImag.copy() j = 0 for j in range(len(ySol) - 1): solutionImgO = cv.line(solutionImgO, (int(xSol[j]), int(ySol[j])), (int(xSol[j + 1]), int(ySol[j + 1])), (255, 0, 0), 2) ###cv.imwrite("images\solutionMap.png", solutionImgO) #xSol = -0.064637985309549*(xSol-157) #ySol = -0.065317919075145*(ySol-539) contoursNew = np.delete(contoursNew, 0, axis=0) print("contoursNew") print(contoursNew) i = 0 while i < len(ySol): j = i + 2 iteration = 0 while j < len(ySol): mI = ySol[i] b = xSol[i] k = (ySol[j] - ySol[i]) / (xSol[j] - xSol[i]) freeObst = True for cN in range(len(contoursNew)): x = (contoursNew[cN, 0] + contoursNew[cN, 1] * k - mI * k + b * k**2) / (1 + k**2) y = mI + (x - b) * k distObs = math.sqrt((contoursNew[cN, 0] - x)**2 + (contoursNew[cN, 1] - y)**2) - 2 interX = (x < xSol[j] and x > xSol[i]) or (x > xSol[j] and x < xSol[i]) interY = (y < ySol[j] and y > ySol[i]) or (y > ySol[j] and y < ySol[i]) if (contoursNew[cN, 2] > distObs and interX and interY): freeObst = False if (freeObst): xSol = np.delete(xSol, range(i + 1, j), axis=0) ySol = np.delete(ySol, range(i + 1, j), axis=0) j = i + 2 else: j = j + 1 iteration = iteration + 1 i = i + 1 j = 0 for j in range(len(ySol) - 1): solutionImg = cv.line(solutionImg, (int(xSol[j]), int(ySol[j])), (int(xSol[j + 1]), int(ySol[j + 1])), (0, 255, 0), 2) cv.imshow('Soution', solutionImg) ###cv.imwrite("images\solution.png", solutionImg) solutionImgO = OrigImag.copy() j = 0 for j in range(len(ySol) - 1): solutionImgO = cv.line(solutionImgO, (int(xSol[j]), int(ySol[j])), (int(xSol[j + 1]), int(ySol[j + 1])), (0, 255, 0), 2) cv.imshow('Soution Map', solutionImgO) Sol = np.concatenate(([xSol], [ySol], np.ones((1, np.size(xSol, axis=0)))), axis=0) #Sol = np.array([[-0.56648, 0.022008, -2.96859],[0.362136, 0.932125,-559.271],[0,0,1]])*Sol Sol = np.matmul( np.array([[-0.064717, -0.000218, 10.2782], [0.000218, -0.064717, 34.8483], [0, 0, 1]]), Sol) cv.waitKey(0) return Sol[0, :], Sol[1, :]
class Accuracy: bpn = BPNHandler(True) datasetManager = DatasetManager() featureExtractor = FeatureExtractor() settings = Settings() trigonometry = Trigonometry() utils = Utils() expectedRadius = 2000 direction = [ "back-right", "right", "front-right", "front", "front-left", "left", "back-left" ] # Evaluate the pointed direction and display the average distance and angle # # @param None # @return None def processedPointedDirection(self): dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) outputDistance = [] outputAngle = [] outputAngleCamera = [] outputDistanceAt2m = [] for data in dataset: features = self.featureExtractor.getFeatures(data) depthMap = data.depth_map targetCoordinates = data.target fingerTipCoordinates = self.featureExtractor.fingerTip[0] fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates = self.featureExtractor.eyePosition[0] eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) # Retrieve the distance between the actual target and the closest impact distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if distance == None: print "Missed..." else: outputDistance.append(distance) # Retrieve the distance between the target and the fingertip: targetDistance = float(data.distance) # Calculate the error angles angle = math.degrees(math.asin(distance / targetDistance)) outputAngle.append(angle) angleCamera = math.degrees( math.asin(distance / targetCoordinates[2])) outputAngleCamera.append(angleCamera) distanceAt2m = math.asin(distance / targetDistance) * 2000 outputDistanceAt2m.append(distanceAt2m) print "--- Impact distance: {0:0.1f} mm\t Impact at 2m: {1:0.1f}\t Error angle (fingertip): {2:0.1f} deg\t Error angle (camera): {3:0.1f} deg".format( distance, distanceAt2m, angle, angleCamera) print "---\n--- Average impact distance of {0:0.1f} mm.".format( np.average(outputDistance)) print "--- Average impact distance at 2 m of {0:0.1f} mm.".format( np.average(outputDistanceAt2m)) print "--- Average eror angle of {0:0.1f} deg at the fingertip.".format( np.average(outputAngle)) print "--- Average eror angle of {0:0.1f} deg at the camera.".format( np.average(outputAngleCamera)) # Evaluate the pointed direction by category and display the average distance and angle # # @param None # @return None def processedPointedDirectionByCategory(self): datasets = self.datasetManager.getAccuracyComplete() # Load all categories separately dataset = [] for data in datasets: dataset.append(self.datasetManager.loadDataset([data])) for category in range(len(dataset)): outputDistance = [] outputAngle = [] outputAngleCamera = [] outputDistanceAt2m = [] print "\n--- {0}".format(self.direction[category]) for data in dataset[category]: features = self.featureExtractor.getFeatures(data) depthMap = data.depth_map targetCoordinates = data.target fingerTipCoordinates = self.featureExtractor.fingerTip[0] fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates = self.featureExtractor.eyePosition[0] eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) # Retrieve the distance between the actual target and the closest impact distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if distance == None: print "Missed..." else: outputDistance.append(distance) # Retrieve the distance between the target and the fingertip: targetDistance = float(data.distance) # Calculate the error angles angle = math.degrees(math.asin(distance / targetDistance)) outputAngle.append(angle) angleCamera = math.degrees( math.asin(distance / targetCoordinates[2])) outputAngleCamera.append(angleCamera) distanceAt2m = math.asin(distance / targetDistance) * 2000 outputDistanceAt2m.append(distanceAt2m) print "--- Impact distance: {0:0.1f} mm\t Impact at 2m: {1:0.1f}\t Error angle (fingertip): {2:0.1f} deg\t Error angle (camera): {3:0.1f} deg".format( distance, distanceAt2m, angle, angleCamera) print "---\n--- Average impact distance of {0:0.1f} mm.".format( np.average(outputDistance)) print "--- Average impact distance at 2 m of {0:0.1f} mm.".format( np.average(outputDistanceAt2m)) print "--- Average eror angle of {0:0.1f} deg at the fingertip.".format( np.average(outputAngle)) print "--- Average eror angle of {0:0.1f} deg at the camera.".format( np.average(outputAngleCamera)) # Draw a graphic with centered trajectories' origins # # @param None # @return None def drawUnifiedTrajectories(self): # Load the dataset dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) # Create the scene fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") ax.set_xlabel('X (horizontal)') ax.set_ylabel('Y (vertical)') ax.set_zlabel('Z (depth)') for data in dataset: result = self.featureExtractor.getFeatures(data) # Processed data fingerTipCoordinates = self.featureExtractor.fingerTip[0] eyeCoordinates = self.featureExtractor.eyePosition[0] targetCoordinates = data.target depthMap = data.depth_map fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) closest = self.trigonometry.findIntersection( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if closest != None: x = [ fingerTipCoordinates[0] - targetCoordinates[0], closest[0] - targetCoordinates[0] ] y = [ fingerTipCoordinates[1] - targetCoordinates[1], closest[1] - targetCoordinates[1] ] z = [ fingerTipCoordinates[2] - targetCoordinates[2], closest[2] - targetCoordinates[2] ] # Draw the trajectory ax.plot(x, y, z) # Draw the target point ax.scatter(0, 0, 0, c="#000000", marker="o", s=2000) plt.show() # Draw a 3D graphic with the closests impacts # # @param None # @return None def drawImpacts(self): # Load the dataset dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) # Create the scene fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") ax.set_xlabel('X (horizontal in mm)') ax.set_ylabel('Y (vertical in mm)') ax.set_zlabel('Z (depth in mm)') colorConverter = ColorConverter() for data in dataset: result = self.featureExtractor.getFeatures(data) # Processed data fingerTipCoordinates = self.featureExtractor.fingerTip[0] eyeCoordinates = self.featureExtractor.eyePosition[0] targetCoordinates = data.target depthMap = data.depth_map fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) closest = self.trigonometry.findIntersection( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if closest != None: x = closest[0] - targetCoordinates[0] y = closest[1] - targetCoordinates[1] z = closest[2] - targetCoordinates[2] distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) red = 1 - (distance / 200) if red < 0: red = 0 elif red > 1: red = 1 blue = 0 + (distance / 200) if blue < 0: blue = 0 elif blue > 1: blue = 1 cc = colorConverter.to_rgba((red, 0, blue), 0.4) # Draw the impact point ax.scatter(x, y, z, color=cc, marker="o", s=50) # Draw the target point ax.scatter(0, 0, 0, c="#000000", marker="o", color="#000000", s=100) plt.show() # Draw a 2D graphic with the closests impacts # # @param x Flag to display the horizontal axis # @param y Flag to display the vertical axis # @param z Flag to display the depth axis # @return None def drawImpacts2D(self, x=True, y=True, z=False): # Load the dataset dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) plt.axis("equal") colorConverter = ColorConverter() for data in dataset: result = self.featureExtractor.getFeatures(data) # Processed data fingerTipCoordinates = self.featureExtractor.fingerTip[0] eyeCoordinates = self.featureExtractor.eyePosition[0] targetCoordinates = data.target depthMap = data.depth_map fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) closest = self.trigonometry.findIntersection( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if closest != None: x = closest[0] - targetCoordinates[0] y = closest[1] - targetCoordinates[1] z = closest[2] - targetCoordinates[2] distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) red = 1 - (distance / 200) if red < 0: red = 0 elif red > 1: red = 1 blue = 0 + (distance / 200) if blue < 0: blue = 0 elif blue > 1: blue = 1 cc = colorConverter.to_rgba((red, 0, blue), 0.4) if not x: plt.scatter(y, z, color=cc, marker="o", s=50) elif not y: plt.scatter(x, z, color=cc, marker="o", s=50) else: plt.scatter(x, y, color=cc, marker="o", s=50) plt.show() # Draw a 3D heatmap from several recorded points # # @param points Array of all recorded points # @param blurred Flag to add a blurry effect to all points # @return numeric Depth of the given coordinates def createHeatmap(self, points, blurred=False): # Create the scene fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") # Create axes ax.set_xlabel('X (horizontal in mm)') ax.set_ylabel('Y (vertical in mm)') ax.set_zlabel('Z (depth in mm)') points = np.array(points) # Retrieve extrem values maxX, maxY, maxZ, tmp = points.max(axis=0) minX, minY, minZ, tmp = points.min(axis=0) # Retrieve middle values midX = minX + (maxX - minX) / 2 midY = minY + (maxY - minY) / 2 midZ = minZ + (maxZ - minZ) / 2 # Draw center axis ax.plot([minX, maxX], [midY, midY], [midZ, midZ]) ax.plot([midX, midX], [minY, maxY], [midZ, midZ]) ax.plot([midX, midX], [midY, midY], [minZ, maxZ]) # Add points for point in points: print "[{0},{1},{2},{3}],".format(point[0], point[1], point[2], point[3]) # Add a blurr effect to points if needed if blurred: for i in np.arange(0.1, 1.01, 0.1): if point[3] == True: c = (1, 0, 0, 0.3 / i / 10) else: c = (0, 0, 1, 0.3 / i / 10) ax.scatter(point[0], point[1], point[2], s=(50 * i * (0.55))**2, color=c) else: if point[3] == True: c = (1, 0, 0, 0.3) else: c = (0, 0, 1, 0.3) ax.scatter(point[0], point[1], point[2], s=50, color=c) # Set the correct view #ax.view_init(azim=-128, elev=-163) ax.view_init(azim=-89, elev=-74) # Display the graph plt.show() # Draw a 2D heatmap from several recorded points # # @param points Array of all recorded points # @param view Point of view to display the proper axes # @param blurred Flag to add a blurry effect to all points # @return numeric Depth of the given coordinates def showHeatmap(self, points, view, blurred=True): if view != "top" and view != "front": raise ValueError("Invalid view.. Please specify 'top' or 'front'.", view) # Create the scene if view == "top": ax = plt.subplot(111, aspect=0.3) else: ax = plt.subplot(111, aspect=1) points = np.array(points) # Retrieve extrem values maxX, maxY, maxZ, tmp = points.max(axis=0) minX, minY, minZ, tmp = points.min(axis=0) margin = 50 ax.set_xlim([minX - margin, maxX + margin]) if view == "top": ax.set_ylim([minZ - margin, maxZ + margin]) else: ax.set_ylim([minY - margin, maxY + margin]) # Retrieve middle values midX = minX + (maxX - minX) / 2 midY = minY + (maxY - minY) / 2 midZ = minZ + (maxZ - minZ) / 2 # Draw center axis if view == "top": ax.plot([minX + (midX / 2), maxX - (midX / 2)], [midZ, midZ]) ax.plot([midX, midX], [minZ + (midZ / 2), maxZ - (midZ / 2)]) else: ax.plot([minX + (midX / 2), maxX - (midX / 2)], [midY, midY]) ax.plot([midX, midX], [minY + (midY / 2), maxY - (midY / 2)]) # Add points for point in points: print "[{0},{1},{2},{3}],".format(point[0], point[1], point[2], point[3]) # Add a blurr effect to points if needed if blurred: for i in np.arange(0.1, 1.01, 0.1): if point[3] == True: c = (1, 0, 0, 0.3 / i / 10) else: c = (0, 0, 1, 0.1 / i / 10) if view == "top": ax.scatter(point[0], point[2], s=(50 * i * (0.55))**2, color=c) else: ax.scatter(point[0], point[1], s=(50 * i * (0.55))**2, color=c) else: if point[3] == True: c = (1, 0, 0, 0.3) else: c = (0, 0, 1, 0.1) if view == "top": ax.scatter(point[0], point[2], s=50, color=c) else: ax.scatter(point[0], point[1], s=50, color=c) # Display the graph ax.invert_xaxis() ax.invert_yaxis() plt.show()
class UnitTesting: featureExtractor = FeatureExtractor() utils = Utils() bpn = BPNHandler(True) currentPassed = 0 currentFailed = 0 currentTotal = 0 passed = 0 failed = 0 total = 0 # Check that the result parameter is conform to another expectation parameter # # @param expectation Targeted result # @param result Actual result to test # @param method Name of the method currently tested # @param test Name of the current assertion # @return None def check(self, expectation, result, method, test): self.currentTotal += 1 self.total += 1 if type(expectation).__module__ == np.__name__: # np.array_equiv, np.array_equal, np.testing.assert_allclose all fail with np.NaN values tmp = True if type(result).__module__ == np.__name__ and expectation.size==result.size: shape = expectation.shape # Only considers 2D arrays for i in range(shape[0]): if len(shape)>1: for j in range(shape[1]): if expectation[i][j]!=None and np.isnan(expectation[i][j]): if result[i][j]==None or not np.isnan(expectation[i][j]): tmp = False # Handle float approximation elif (type(expectation[i][j])==float and ('%0.5f'%expectation[i][j])!=('%0.5f'%result[i][j])) or (expectation[i][j]!=result[i][j]): #elif expectation[i][j] != result[i][j]: tmp = False elif expectation[i]!=None and np.isnan(expectation[i]): if result[i]==None or not np.isnan(expectation[i]): tmp = False elif expectation[i] != result[i]: tmp = False else: tmp = False elif type(expectation)==list: if type(result)==list and len(expectation)==len(result): tmp = True for i in range(len(expectation)): if expectation[i]!=result[i]: tmp = False else: tmp = False elif expectation!=None and np.isnan(expectation): if np.isnan(result): tmp = True else: tmp = False elif expectation==result: tmp = True else: tmp = False if tmp: self.currentPassed += 1 self.passed += 1 print "--- Success \t{0}: \t{1}".format(method, test) else: self.currentFailed += 1 self.failed += 1 print "--- Failure \t{0}: \t{1} \t{2} while expecting {3}".format(method, test, result, expectation) # Display the results of the unit-tests of a category # # @param None # @return None def getResults(self): print "------------------------\n--- Unit Testing results:" if self.currentTotal>0: print("--- {0} passed ({1}%)".format(self.currentPassed, int((self.currentPassed/self.currentTotal)*100))) print("--- {0} failed ({1}%)".format(self.currentFailed, int((self.currentFailed/self.currentTotal)*100))) print("--- Total asserted: {0}".format(self.currentTotal)) else: print "--- None yet..." print "------------------------" self.currentPassed = 0 self.currentFailed = 0 self.currentTotal = 0 # Display the final results of the unit-tests # # @param None # @return None def getFinalResults(self): print "\n------------------------\n--- Final Unit Testing results:" if self.total>0: print("--- {0} passed ({1}%)".format(self.passed, int((self.passed/self.total)*100))) print("--- {0} failed ({1}%)".format(self.failed, int((self.failed/self.total)*100))) print("--- Total asserted: {0}".format(self.total)) else: print "--- None yet..." print "------------------------" self.passed = 0 self.failed = 0 self.total = 0 # Assert the FeaturesExtractor class # # @param None # @return None def assertFeatureExtractor(self): print "\n--- FeaturesExtractor ---" # Assert the thresholdBinary method # Expected outputs: 0|1 self.check(0, self.featureExtractor.thresholdBinary(1, 2, 1), "thresholdBinary", "x<start") self.check(0, self.featureExtractor.thresholdBinary(2, 2, 1), "thresholdBinary", "x>end") self.check(0, self.featureExtractor.thresholdBinary(0, -1, 0), "thresholdBinary", "x==0") self.check(1, self.featureExtractor.thresholdBinary(1, 1, 1), "thresholdBinary", "x==start and x==end") self.check(1, self.featureExtractor.thresholdBinary(2, 1, 2), "thresholdBinary", "x>start and x==end") self.check(1, self.featureExtractor.thresholdBinary(2, 2, 3), "thresholdBinary", "x==start and x<end") self.check(1, self.featureExtractor.thresholdBinary(2, 1, 3), "thresholdBinary", "x>start and x<end") # Assert the thresholdExtracted method # Expected outputs: np.NaN|(x>=start and x<=end and x!=0) self.check(np.NaN, self.featureExtractor.thresholdExtracted(1, 2, 1), "thresholdExtracted", "x<start") self.check(np.NaN, self.featureExtractor.thresholdExtracted(2, 2, 1), "thresholdExtracted", "x>end") self.check(np.NaN, self.featureExtractor.thresholdExtracted(0, -1, 0), "thresholdExtracted", "x==0") self.check(1, self.featureExtractor.thresholdExtracted(1, 1, 1), "thresholdExtracted", "x==start and x==end") self.check(2, self.featureExtractor.thresholdExtracted(2, 1, 2), "thresholdExtracted", "x>start and x==end") self.check(2, self.featureExtractor.thresholdExtracted(2, 2, 3), "thresholdExtracted", "x==start and x<end") self.check(2, self.featureExtractor.thresholdExtracted(2, 1, 3), "thresholdExtracted", "x>start and x<end") # Assert the thresholdExtracted method # Expected outputs: None|(index of nearest value 1) self.check(None, self.featureExtractor.findNearestValue([], 0), "findNearestValue", "empty array") self.check(None, self.featureExtractor.findNearestValue([0], 1), "findNearestValue", "index out of bound") self.check(None, self.featureExtractor.findNearestValue([0], -1), "findNearestValue", "index out of bound") self.check(None, self.featureExtractor.findNearestValue([0,0], 1), "findNearestValue", "no 1 in small even array") self.check(None, self.featureExtractor.findNearestValue([0,0,0], 1), "findNearestValue", "no 1 in small odd array") self.check(None, self.featureExtractor.findNearestValue([0,0,0,0,0], 1), "findNearestValue", "no 1 in big even array") self.check(None, self.featureExtractor.findNearestValue([0,0,0,0,0,0], 1), "findNearestValue", "no 1 in big odd array") self.check(None, self.featureExtractor.findNearestValue([0,0,0,0,1], 1), "findNearestValue", "far initial index not in middle of odd array") self.check(0, self.featureExtractor.findNearestValue([1,0,0,0,0], 1), "findNearestValue", "close initial index not in middle of odd array") self.check(3, self.featureExtractor.findNearestValue([0,0,0,1], 1), "findNearestValue", "far initial index not in middle of even array") self.check(0, self.featureExtractor.findNearestValue([1,0,0,0], 1), "findNearestValue", "close initial index not in middle of even array") self.check(0, self.featureExtractor.findNearestValue([1,0,0], 1), "findNearestValue", "1 as first index in small array") self.check(1, self.featureExtractor.findNearestValue([0,1,0], 1), "findNearestValue", "1 as middle index in small array") self.check(2, self.featureExtractor.findNearestValue([0,0,1], 1), "findNearestValue", "1 as last index in small array") self.check(0, self.featureExtractor.findNearestValue([1,0,0,0,0], 2), "findNearestValue", "1 as first index in big array") self.check(2, self.featureExtractor.findNearestValue([0,0,1,0,0], 2), "findNearestValue", "1 as middle index in big array") self.check(4, self.featureExtractor.findNearestValue([0,0,0,0,1], 2), "findNearestValue", "1 as last index in big array") # Assert the tarExtracted method # Expected outputs: (same array with reduced values based on the minimum)|(same array if only NaN values) self.featureExtractor.currentExtracted = np.array([]) self.featureExtractor.tarExtracted() self.check(np.array([]), self.featureExtractor.currentExtracted, "tarExtracted", "empty array") self.featureExtractor.currentExtracted = np.array([np.NaN,np.NaN]) self.featureExtractor.tarExtracted() self.check(np.array([np.NaN,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "array of NaN values") self.featureExtractor.currentExtracted = np.array([-1,0,np.NaN]) self.featureExtractor.tarExtracted() self.check(np.array([0,1,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "negative minimal value") self.featureExtractor.currentExtracted = np.array([0,2,np.NaN]) self.featureExtractor.tarExtracted() self.check(np.array([0,2,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "zero minimal value") self.featureExtractor.currentExtracted = np.array([1,3,np.NaN]) self.featureExtractor.tarExtracted() self.check(np.array([0,2,np.NaN]), self.featureExtractor.currentExtracted, "tarExtracted", "positive minimal value") self.featureExtractor.currentExtracted = np.array([[np.NaN,np.NaN],[np.NaN,np.NaN]]) self.featureExtractor.tarExtracted() self.check(np.array([[np.NaN,np.NaN],[np.NaN,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array of NaN values") self.featureExtractor.currentExtracted = np.array([[1,0,np.NaN],[-1,0,np.NaN]]) self.featureExtractor.tarExtracted() self.check(np.array([[2,1,np.NaN],[0,1,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array with negative minimal value") self.featureExtractor.currentExtracted = np.array([[0,2,np.NaN],[1,2,np.NaN]]) self.featureExtractor.tarExtracted() self.check(np.array([[0,2,np.NaN],[1,2,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array with zero minimal value") self.featureExtractor.currentExtracted = np.array([[1,2,np.NaN],[3,4,np.NaN]]) self.featureExtractor.tarExtracted() self.check(np.array([[0,1,np.NaN],[2,3,np.NaN]]), self.featureExtractor.currentExtracted, "tarExtracted", "2D array with positive minimal value") # Assert the removeEmptyColumnsRows method # Expected outputs: same arrays without empty rows and columns self.featureExtractor.currentExtracted = np.array([]) self.featureExtractor.currentBinary = np.array([]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "empty array: currentExtracted") self.check(np.array([]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "empty array: currentBinary") self.featureExtractor.currentExtracted = np.array([[0,0],[0,0]]) self.featureExtractor.currentBinary = np.array([[0,0],[0,0]]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([[]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "array of 0: currentExtracted") self.check(np.array([[]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "array of 0: currentBinary") self.featureExtractor.currentExtracted = np.array([[1,2],[3,4]]) self.featureExtractor.currentBinary = np.array([[1,2],[3,4]]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "array of non-zero values: currentExtracted") self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "array of non-zero values: currentBinary") self.featureExtractor.currentExtracted = np.array([[0,0],[1,2]]) self.featureExtractor.currentBinary = np.array([[0,0],[1,2]]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([[1,2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "first row empty: currentExtracted") self.check(np.array([[1,2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "first row empty: currentBinary") self.featureExtractor.currentExtracted = np.array([[1,2],[0,0]]) self.featureExtractor.currentBinary = np.array([[1,2],[0,0]]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([[1,2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "last row empty: currentExtracted") self.check(np.array([[1,2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "last row empty: currentBinary") self.featureExtractor.currentExtracted = np.array([[0,1],[0,2]]) self.featureExtractor.currentBinary = np.array([[0,1],[0,2]]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([[1],[2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "first column empty: currentExtracted") self.check(np.array([[1],[2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "first column empty: currentBinary") self.featureExtractor.currentExtracted = np.array([[1,0],[2,0]]) self.featureExtractor.currentBinary = np.array([[1,0],[2,0]]) self.featureExtractor.removeEmptyColumnsRows() self.check(np.array([[1],[2]]), self.featureExtractor.currentExtracted, "removeEmptyColumnsRows", "last column empty: currentExtracted") self.check(np.array([[1],[2]]), self.featureExtractor.currentBinary, "removeEmptyColumnsRows", "last column empty: currentBinary") # Assert the removeEmptyColumnsRows method # Expected outputs: rotated matrice by the rotationAngle # Expected outputs: (-1|0|1|2) for rotationAngle self.featureExtractor.currentExtracted = np.array([[1,2],[3,4]]) self.featureExtractor.currentBinary = np.array([[1,2],[3,4]]) self.featureExtractor.rotate([0,0],[0,0]) self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentExtracted, "rotate", "elbow/hand on same position: currentExtracted") self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentBinary, "rotate", "elbow/hand on same position: currentBinary") self.check(0, self.featureExtractor.rotationAngle, "rotate", "elbow/hand on same position: rotationAngle") self.featureExtractor.rotate([1,1],[0,0]) self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentExtracted, "rotate", "elbow up left: currentExtracted") self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentBinary, "rotate", "elbow up left: currentBinary") self.check(1, self.featureExtractor.rotationAngle, "rotate", "elbow up left: rotationAngle") self.featureExtractor.rotate([0,1],[0,0]) self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentExtracted, "rotate", "elbow up: currentExtracted") self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentBinary, "rotate", "elbow up: currentBinary") self.check(1, self.featureExtractor.rotationAngle, "rotate", "elbow up: rotationAngle") self.featureExtractor.rotate([0,1],[1,0]) self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentExtracted, "rotate", "elbow up right: currentExtracted") self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentBinary, "rotate", "elbow up right: currentBinary") self.check(1, self.featureExtractor.rotationAngle, "rotate", "elbow up right: rotationAngle") self.featureExtractor.rotate([0,0],[1,0]) self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentExtracted, "rotate", "elbow right: currentExtracted") self.check(np.array([[3,1],[4,2]]), self.featureExtractor.currentBinary, "rotate", "elbow right: currentBinary") self.check(0, self.featureExtractor.rotationAngle, "rotate", "elbow right: rotationAngle") self.featureExtractor.rotate([0,0],[1,1]) self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentExtracted, "rotate", "elbow down right: currentExtracted") self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentBinary, "rotate", "elbow down right: currentBinary") self.check(-1, self.featureExtractor.rotationAngle, "rotate", "elbow down right: rotationAngle") self.featureExtractor.rotate([0,0],[0,1]) self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentExtracted, "rotate", "elbow down: currentExtracted") self.check(np.array([[2,4],[1,3]]), self.featureExtractor.currentBinary, "rotate", "elbow down: currentBinary") self.check(-1, self.featureExtractor.rotationAngle, "rotate", "elbow down: rotationAngle") self.featureExtractor.rotate([1,0],[0,1]) self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentExtracted, "rotate", "elbow down left: currentExtracted") self.check(np.array([[1,2],[3,4]]), self.featureExtractor.currentBinary, "rotate", "elbow down left: currentBinary") self.check(-1, self.featureExtractor.rotationAngle, "rotate", "elbow down left: rotationAngle") self.featureExtractor.rotate([1,0],[0,0]) self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentExtracted, "rotate", "elbow left: currentExtracted") self.check(np.array([[4,3],[2,1]]), self.featureExtractor.currentBinary, "rotate", "elbow left: currentBinary") self.check(2, self.featureExtractor.rotationAngle, "rotate", "elbow left: rotationAngle") # Assert the keepRange method # Expected outputs: integer value between 0 and max self.check(0, self.featureExtractor.keepRange(-1, 2), "keepRange", "negative value") self.check(0, self.featureExtractor.keepRange(0, 2), "keepRange", "zero value") self.check(1, self.featureExtractor.keepRange(1, 2), "keepRange", "positive value < max") self.check(2, self.featureExtractor.keepRange(2, 2), "keepRange", "positive value == max") self.check(2, self.featureExtractor.keepRange(3, 2), "keepRange", "positive value > max") self.check(0, self.featureExtractor.keepRange(-1, 0), "keepRange", "negative value and max==0") self.check(0, self.featureExtractor.keepRange(0, 0), "keepRange", "zero value and max==0") self.check(0, self.featureExtractor.keepRange(1, 0), "keepRange", "positive value and max==0") self.check(0, self.featureExtractor.keepRange(-1, -1), "keepRange", "negative value and negative max") self.check(0, self.featureExtractor.keepRange(0, -1), "keepRange", "zero value and negative max") self.check(0, self.featureExtractor.keepRange(1, -1), "keepRange", "positive value and negative max") # Assert the keepRange method # Expected outputs: (percentage of actual data within a restricted area)|(0 as a fallback) self.featureExtractor.currentW = 0 self.featureExtractor.currentH = 0 self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "zero currentW and currentH") self.featureExtractor.currentW = 0 self.featureExtractor.currentH = 1 self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "zero currentW") self.featureExtractor.currentW = 1 self.featureExtractor.currentH = 0 self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "zero currentH") self.featureExtractor.currentW = -1 self.featureExtractor.currentH = -1 self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "negative currentW and currentH") self.featureExtractor.currentW = -1 self.featureExtractor.currentH = 1 self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "negative currentW") self.featureExtractor.currentW = 1 self.featureExtractor.currentH = -1 self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),1,0,0,1,1), "countWithinArea", "negative currentH") self.featureExtractor.currentW = 1 self.featureExtractor.currentH = 1 self.check(0, self.featureExtractor.countWithinArea(np.array([]),1,0,0,1,1), "countWithinArea", "empty array") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),0,0,0,1,1), "countWithinArea", "zero total") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 0,0), "countWithinArea", "v1==v2 and h1==h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 1,0), "countWithinArea", "v1==v2 and h1<h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 1,0, 0,0), "countWithinArea", "v1==v2 and h1>h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 0,1), "countWithinArea", "v1<v2 and h1==h2") self.check(10, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,0, 1,1), "countWithinArea", "v1<v2 and h1<h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 1,0, 0,1), "countWithinArea", "v1<v2 and h1>h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,1, 0,0), "countWithinArea", "v1>v2 and h1==h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 0,1, 1,0), "countWithinArea", "v1>v2 and h1<h2") self.check(0, self.featureExtractor.countWithinArea(np.array([[1,2],[3,4]]),10, 1,1, 0,0), "countWithinArea", "v1>v2 and h1>h2") # Assert the getElbowHandAlignment method # Expected outputs: [(-1|0|1),(-1|0|1)] self.check([0,0], self.featureExtractor.getElbowHandAlignment(-1, 0,0,0,0, 0), "getElbowHandAlignment", "negative depth") self.check([0,0], self.featureExtractor.getElbowHandAlignment(0, 0,0,0,0, 0), "getElbowHandAlignment", "zero depth") self.check([0,0], self.featureExtractor.getElbowHandAlignment(1, 0,0,0,0, 0), "getElbowHandAlignment", "positive depth") self.check([1,0-1], self.featureExtractor.getElbowHandAlignment(1000, 61,61, 0,0, 0), "getElbowHandAlignment", "left down") self.check([-1,0-1], self.featureExtractor.getElbowHandAlignment(1000, 61,0, 0,61, 0), "getElbowHandAlignment", "right down") self.check([0,0-1], self.featureExtractor.getElbowHandAlignment(1000, 61,1, 0,0, 0), "getElbowHandAlignment", "front down") self.check([1,1], self.featureExtractor.getElbowHandAlignment(1000, 0,61, 61,0, 0), "getElbowHandAlignment", "left up") self.check([-1,1], self.featureExtractor.getElbowHandAlignment(1000, 0,0, 61,61, 0), "getElbowHandAlignment", "right up") self.check([0,1], self.featureExtractor.getElbowHandAlignment(1000, 0,1, 61,0, 0), "getElbowHandAlignment", "front up") self.check([1,0], self.featureExtractor.getElbowHandAlignment(1000, 0,61, 0,0, 0), "getElbowHandAlignment", "left lateral") self.check([-1,0], self.featureExtractor.getElbowHandAlignment(1000, 0,0, 0,61, 0), "getElbowHandAlignment", "right lateral") self.check([0,0], self.featureExtractor.getElbowHandAlignment(1000, 0,1, 0,0, 0), "getElbowHandAlignment", "front lateral") # Assert the normalizeInput method # Expected outputs: [normalized values in the range -1 to 1] self.check([], self.featureExtractor.normalizeInput([]), "normalizeInput", "empty array") self.check([-1,-1,-1], self.featureExtractor.normalizeInput([0,0,0],0,2), "normalizeInput", "low range values") self.check([0,0,0], self.featureExtractor.normalizeInput([1,1,1],0,2), "normalizeInput", "middle range values") self.check([1,1,1], self.featureExtractor.normalizeInput([2,2,2],0,2), "normalizeInput", "top range values") # Assert the processFeatures method # Expected outputs: [6 normalized features] self.check([-1,-1,-1,-1,-1,-1], self.featureExtractor.processFeatures(0,0,0, 0,0,0, np.array([]), [0,0,0]), "processFeatures", "empty array") self.check([-1,-1,-1,-1,-1,-1], self.featureExtractor.processFeatures(0,0,0, 0,0,0, np.array([0,0,0]), [0,0,0]), "processFeatures", "1 dimensional array") self.check([-1,-1,-1,-1,-1,-1], self.featureExtractor.processFeatures(0,0,0, 0,0,0, np.array([[0,0,0],[0,0,0],[0,0,0]]), [0,0,0]), "processFeatures", "zero array") # Assert the getFingerTip method # Expected outputs: [v,h] non negative values self.featureExtractor.cropLeft = 0 self.featureExtractor.emptyLeft = 0 self.featureExtractor.cropTop = 0 self.featureExtractor.emptyTop = 0 self.featureExtractor.rotationAngle = 0 self.featureExtractor.currentBinary = np.array([[]]) self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "empty array") self.featureExtractor.currentBinary = np.array([[0,0,0,0,0]]) self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "zero array") self.featureExtractor.rotationAngle = -1 self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]]) self.check([0,4], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=-1 and extrem left value") self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]]) self.check([0,2], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=-1 and middle value") self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]]) self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=-1 and extrem right value") self.featureExtractor.rotationAngle = 0 self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]]) self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=0 and extrem left value") self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]]) self.check([2,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=0 and middle value") self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]]) self.check([4,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=0 and extrem right value") self.featureExtractor.rotationAngle = 1 self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]]) self.check([0,0], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=1 and extrem left value") self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]]) self.check([0,2], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=1 and middle value") self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]]) self.check([0,4], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=1 and extrem right value") self.featureExtractor.rotationAngle = 2 self.featureExtractor.currentBinary = np.array([[1,0,0,0,0]]) self.check([4,1], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=2 and extrem left value") self.featureExtractor.currentBinary = np.array([[0,0,1,0,0]]) self.check([2,1], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=2 and middle value") self.featureExtractor.currentBinary = np.array([[0,0,0,0,1]]) self.check([0,1], self.featureExtractor.getFingerTip(), "getFingerTip", "rotationAngle=2 and extrem right value") # Assert the getEyePosition method # Expected outputs: [v,h] non negative values self.check([0,0], self.featureExtractor.getEyePosition(np.array([[]]), [0,0,0], [0,0]), "getEyePosition", "empty array") self.check([0,0], self.featureExtractor.getEyePosition(np.array([0,0,0]), [0,0,0], [0,0]), "getEyePosition", "1 dimensional array") self.check([0,0], self.featureExtractor.getEyePosition(np.array([[0,0,0],[0,0,0],[0,0,0]]), [0,0,0], [0,0]), "getEyePosition", "zero array") # Assert the Utils class # # @param None # @return None def assertUtils(self): print "\n--- Utils ---" # Assert the getDepthFromMap method # Expected outputs: non negative integer self.check(0, self.utils.getDepthFromMap(np.array([]), [0,0]), "getDepthFromMap", "empty array") self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [-1,0]), "getDepthFromMap", "y index out of bond (negative)") self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [2,0]), "getDepthFromMap", "y index out of bond (>=len)") self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [0,-1]), "getDepthFromMap", "x index out of bond (negative)") self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [0,2]), "getDepthFromMap", "x index out of bond (>=len)") self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), []), "getDepthFromMap", "empty position array") self.check(0, self.utils.getDepthFromMap(np.array([[0,0],[0,0]]), [1]), "getDepthFromMap", "unexpected position array") self.check(42, self.utils.getDepthFromMap(np.array([[0,0],[0,42]]), [1,1]), "getDepthFromMap", "correct value") # Assert the getHandBoundShift method # Expected outputs: integer self.check(-90, self.utils.getHandBoundShift(-1000), "getHandBoundShift", "negative depth") self.check(90, self.utils.getHandBoundShift(0), "getHandBoundShift", "zero depth") self.check(90, self.utils.getHandBoundShift(1000), "getHandBoundShift", "positive depth") # Assert the BPNHandler class # # @param None # @return None def assertBPNHandler(self): print "\n--- BPNHandler ---" # Assert the check method # Expected outputs: [Boolean, (0|1)] self.check([False,0], self.bpn.check([[0,0,0,0,0,0]]), "check", "zero array")
class Live(): utils = Utils() featureExtractor = FeatureExtractor() bpn = BPNHandler(True) testing = Testing() # Constructor of the Live class # # @param None # @return None def __init__(self): # Retrieve all settings self.settings = Settings() # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() # Create a new dataset item self.data = LiveDataset() self.data.hand = self.settings.BOTH_HAND # Update the frame Timer(0.001, self.updateImage, ()).start() # Update the captured depth image # # @param None # @return None def updateImage(self): # Update to next frame self.context.wait_and_update_all() # Extract informations of each tracked user self.data = skeleton.track(self.user, self.depth, self.data) # Get the whole depth map self.testing.startTimer() self.data.depth_map = np.asarray( self.depth.get_tuple_depth_map()).reshape(480, 640) self.testing.timerMarker("Depth map acquisition and conversion") self.testing.stopTimer() # Create dummy values recognition = False hand = None origin = [0, 0, 0] end = [0, 0, 0] if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Test the data against the neural network if possible if self.data.hand != self.settings.NO_HAND: result = self.bpn.check( self.featureExtractor.getFeatures(self.data)) if result[0] != False: recognition = True hand = result[1] origin = [ self.featureExtractor.eyePosition[result[1]][0], self.featureExtractor.eyePosition[result[1]][1], self.utils.getDepthFromMap( self.data.depth_map, self.featureExtractor.eyePosition[result[1]]) ] end = [ self.featureExtractor.fingerTip[result[1]][0], self.featureExtractor.fingerTip[result[1]][1], self.utils.getDepthFromMap( self.data.depth_map, self.featureExtractor.fingerTip[result[1]]) ] # Output the result print '{{"pointing":{0},"hand":{1},"origin":{2},"end":{3}}}'.format( recognition, hand, origin, end) Timer(0.001, self.updateImage, ()).start()