def __init__(self, camera_height=None, hand=None, skeleton=None, depth_map=None, image=None, type=None, distance=None, target=None): self.settings = Settings() self.utils = Utils() # Initialise each attributes with respective parameters; otherwise with a default value if camera_height is None: camera_height = 1500 self.camera_height = camera_height if hand is None: hand = self.settings.LEFT_HAND self.hand = hand if skeleton is None: skeleton = { "head": [], "shoulder": { "left": [], "right": [], "center": [] }, "elbow": { "left": [], "right": [] }, "hand": { "left": [], "right": [] } } self.skeleton = skeleton if depth_map is None: depth_map = [] self.depth_map = np.array(depth_map) if image is None: image = "" self.image = image if type is None: type = Dataset.TYPE_POSITIVE self.type = type if distance is None: distance = Dataset.DISTANCE_550 self.distance = distance if target is None: target = [] self.target = target
def run_game(): ''' Inicjalizacja gry i utworzenie ekranu ''' pygame.init() kk_settings = Settings() screen = pygame.display.set_mode( (kk_settings.screen_width, kk_settings.screen_height)) pygame.display.set_caption('Kołko i krzyżyk') rectangles = Group() circles = Group() crosses = Group() # utworzenie menu gry functions.create_menu(screen, kk_settings) #utworzenie prostokatów planszy functions.create_rectangles(kk_settings, screen, rectangles) #rozpoczęcie pętli głównej gry while kk_settings.game_running: #sprawdzanie myszki i klawiatury functions.check_events(rectangles, screen, circles, kk_settings, crosses) #odwiezenie ekranu po przejsciu kazdej iteracji functions.update_screen(kk_settings, screen, rectangles, circles, crosses)
def __init__(self): # Retrieve all settings self.settings = Settings() # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() # Create a new dataset item self.data = LiveDataset() self.data.hand = self.settings.BOTH_HAND # Update the frame Timer(0.001, self.updateImage, ()).start()
def initialise_game(): """This function initialises the game""" pygame.init() pygame.key.set_repeat(250, 30) pygame.display.set_caption("Spacegame") settings = Settings() return settings, pygame.display.set_mode( (settings.settings["Resolution"]["X"], settings.settings["Resolution"]["Y"]))
def __init__(self): super(LiveGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Live") # Retrieve all settings self.settings = Settings() # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = LiveDataset() # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Set the default result text self.resultLabel = QtWidgets.QLabel() self.resultLabel.setText("No") # Create the acquisition form elements self.createAcquisitionForm() # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start()
def generateSettings(settingsConfig): """ Helper function to decouple the argument parsing from the Settings object creation Input:: settingsConfig - list of ('key', 'value') tuples. workdir - string i.e. '/nas/run/rcmet/work/' cachedir - string i.e. '/tmp/rcmet/cache/' Output:: settings - Settings Object """ # Setup the config Data Dictionary to make parsing easier later configData = {} for entry in settingsConfig: configData[entry[0]] = entry[1] return Settings(**configData)
class Dataset: TYPE_POSITIVE = 0 TYPE_NEGATIVE = 1 TYPE_ACCURACY = 2 TYPE_HEATMAP = 3 LEFT_HAND = 0 RIGHT_HAND = 1 NO_HAND = 2 DISTANCE_550 = 0 DISTANCE_750 = 4 DISTANCE_1000 = 1 DISTANCE_1250 = 5 DISTANCE_1500 = 2 DISTANCE_1750 = 6 DISTANCE_2000 = 3 # Constructor of the Dataset class # # @param camera_height Value of the camera height while gathering informations # @param hand Identifier of the hand (Ø|1|2) # @param skeleton Skeletal joints of the detected subject # @param depth_map Depth map of the captured scene # @param image RGB image of the captured scene # @param type Identifier of the type of data (0|1|2|3) # @param distance Identifier of the type of distance chosen (0|1|2|3|4|5|6) or the actual distance between the fingertip and the target # @param target Coordinates of the target # @return None def __init__(self, camera_height=None, hand=None, skeleton=None, depth_map=None, image=None, type=None, distance=None, target=None): self.settings = Settings() self.utils = Utils() # Initialise each attributes with respective parameters; otherwise with a default value if camera_height is None: camera_height = 1500 self.camera_height = camera_height if hand is None: hand = self.settings.LEFT_HAND self.hand = hand if skeleton is None: skeleton = { "head": [], "shoulder": { "left": [], "right": [], "center": [] }, "elbow": { "left": [], "right": [] }, "hand": { "left": [], "right": [] } } self.skeleton = skeleton if depth_map is None: depth_map = [] self.depth_map = np.array(depth_map) if image is None: image = "" self.image = image if type is None: type = Dataset.TYPE_POSITIVE self.type = type if distance is None: distance = Dataset.DISTANCE_550 self.distance = distance if target is None: target = [] self.target = target # Returns a JSON encoded string of the dataset object # # @param None # @return string JSON encoded string of the dataset object def to_JSON(self): # Convert the depth map to a serializable state self.depth_map = self.depth_map.tolist() # Encode the RGB image in a base64 string self.image = self.utils.getBase64(self.image) # Get rid of extra attributes to clean the output obj = deepcopy(self) del obj.settings del obj.utils return json.dumps(obj, default=lambda o: o.__dict__, separators=(',', ':')) # Save the dataset informations as a file # # @param None # @return None def save(self): print "Saving dataset informations..." # Save the dataset to the right folder if self.type == Dataset.TYPE_POSITIVE: filename = self.settings.getPositiveFolder() elif self.type == Dataset.TYPE_NEGATIVE: filename = self.settings.getNegativeFolder() elif self.type == Dataset.TYPE_ACCURACY: filename = self.settings.getAccuracyFolder() else: raise ValueError("Invalid type of dataset to save", self.type) # Retrieve the number of files saved so far # Be careful that due to the sample file, the counter does not need to be incremented. Otherwise, the files would replace each others filename += str(self.utils.getFileNumberInFolder(filename)).zfill(3)+".json" self.utils.dumpJsonToFile(self.to_JSON(), filename) # Toggle the type identifier of the dataset # # @param value Identifier of the new type of the dataset # @return None def toggleType(self, value): self.type = value print "type toggled to {0}".format(value) # Toggle the distance identifier of the dataset # # @param value Identifier of the new distance of the dataset # @return None def toggleDistance(self, value): self.distance = value print "distance toggled to {0}".format(value) # Update the distance of the dataset # # @param value Distance value # @return None def setDistance(self, value): self.distance = value print "distance changed to {0}".format(value) # Toggle the hand identifier of the dataset # # @param value Identifier of the new hand of the dataset # @return None def toggleHand(self, value): self.hand = value print "hand toggled" # Returns the actual distance # # @param None # @return numeric Actual distance value (translated if identifier) def getWishedDistance(self): if self.distance == Dataset.DISTANCE_550: return 550 elif self.distance == Dataset.DISTANCE_750: return 750 elif self.distance == Dataset.DISTANCE_1000: return 1000 elif self.distance == Dataset.DISTANCE_1250: return 1250 elif self.distance == Dataset.DISTANCE_1500: return 1500 elif self.distance == Dataset.DISTANCE_1750: return 1750 elif self.distance == Dataset.DISTANCE_2000: return 2000 else: return self.distance
def run_game(): pg.init() bullet_sound = pg.mixer.Sound("Bullet.wav") pg.mixer.music.load("music.wav") AI = Settings() stats = Gamestats(AI) # to store the bullets alien_bullets_shoot = Group() bullets = Group() aliens = Group() screen = pg.display.set_mode((AI.screen_width, AI.screen_height)) pg.display.set_caption("Space Wars") print(str(AI.screen_width) + " " + str(AI.screen_height)) my_ship = ship(AI, screen, "my_ship.png", AI.screen_width / 2, AI.screen_height - 30) wallpaper = ship(AI, screen, "space.png", AI.screen_width / 2, AI.screen_height) # To make a play button play_button = Button(AI, screen, "Play", AI.screen_width / 2 - 50, AI.screen_height / 2 - 150, 42, (0, 0, 255), (0, 255, 0), 0) exit_button = Button(AI, screen, "Exit", AI.screen_width / 2 - 50, AI.screen_height / 2 + 50, 42, (0, 0, 255), (0, 255, 0), 0) scoreboard = Button(AI, screen, "Leaderboard", AI.screen_width / 2 - 50, AI.screen_height / 2 - 50, 40, (0, 0, 255), (0, 255, 0), 50) GameOver = Button(AI, screen, "Game over", AI.screen_width / 2 - 50, AI.screen_height / 2, 50, (0, 0, 0), (0, 255, 0), 0) bullet = Bullet(AI, screen, my_ship) no_aliens = f.find_no_aliens(AI, screen, "alienship.png") temp = 0 x = [] count = 0 tmp = random.randint(8, no_aliens) for c in range(tmp): x.append(c + 1) random.shuffle(x) pg.mixer.music.play(-1) while True: with open("file.txt") as score: last_score = score.read() if stats.ship_left > 0 and stats.game_active: count = 1 if len(aliens) == 0: bullets.empty() if stats.game_active: AI.increase_speed() AI.score_increase += 2 AI.player_level += 1 temp = 0 x = [] tmp = random.randint(no_aliens - 4, no_aliens) for c in range(tmp): x.append(c + 1) random.shuffle(x) f.event_check(AI, screen, my_ship, bullets, stats, play_button, aliens, bullets, bullet_sound, exit_button) if stats.game_active: if temp < tmp: temp = f.create_fleet(AI, screen, aliens, "alienship.png", no_aliens, temp, x) my_ship.update() f.bullets_collision_check(AI, aliens, bullets) f.update_screen(AI, screen, my_ship, bullets, aliens, stats, "my_ship.png", alien_bullets_shoot, last_score) delete_bullets(bullet, bullets) f.display_aliens(screen, AI, aliens, my_ship, stats, bullets) if not stats.game_active: wallpaper.blitme() play_button.draw_button() exit_button.draw_button() scoreboard.draw_button() pg.display.update() else: if count == 1: c = 5 if int(last_score) < int(AI.player_score): with open("file.txt", 'w') as file_object: file_object.write(str(AI.player_score)) pg.mixer.music.pause() while c > 0: screen.fill((0, 0, 0)) msg = "Re-directing to main menu in....... " + str(c) new_game = Button(AI, screen, msg, AI.screen_width / 2 - 100, 30, 25, (0, 0, 0), (255, 0, 0), 0) new_game.draw_button() GameOver.draw_button() pg.display.update() time.sleep(1) c -= 1 count = 0 elif count == 0: wallpaper.blitme() play_button.draw_button() exit_button.draw_button() scoreboard.draw_button() pg.display.update() stats.game_active = False f.event_check(AI, screen, my_ship, bullets, stats, play_button, aliens, bullets, bullet_sound, exit_button) pg.mixer.music.unpause() pg.display.flip()
def __init__(self): super(DatasetGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Dataset recording") # Retrieve all settings self.settings = Settings() # Load sounds self.countdownSound = QtMultimedia.QSound( self.settings.getResourceFolder() + "countdown.wav") self.countdownEndedSound = QtMultimedia.QSound( self.settings.getResourceFolder() + "countdown-ended.wav") # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the image generator to get an RGB image of the scene self.image = ImageGenerator() self.image.create(self.context) self.image.set_resolution_preset(RES_VGA) self.image.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = Dataset() # Create a timer for an eventual countdown before recording the data self.countdownTimer = QtCore.QTimer() self.countdownRemaining = 10 self.countdownTimer.setInterval(1000) self.countdownTimer.setSingleShot(True) self.countdownTimer.timeout.connect(self.recordCountdown) # Create a timer to eventually record data for a heat map self.heatmapRunning = False self.heatmapTimer = QtCore.QTimer() self.heatmapTimer.setInterval(10) self.heatmapTimer.setSingleShot(True) self.heatmapTimer.timeout.connect(self.recordHeatmap) # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Hold the label indicating the number of dataset taken self.numberLabel = QtWidgets.QLabel() self.updateDatasetNumberLabel() # Create the acquisition form elements self.createAcquisitionForm() # Register a dialog window to prompt the target position self.dialogWindow = DatasetDialog(self) # Allow to save the data when the right distance is reached self.recordIfReady = False # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start()
class Validating(): # Load required classes bpn = BPNHandler(True) datasetManager = DatasetManager() featureExtractor = FeatureExtractor() settings = Settings() utils = Utils() # Evaluate the complete dataset # # @param type Type of dataset to be evaluated # @return None def complete(self, type): positiveValidating = self.datasetManager.getPositiveCompleteMixed(type) negativeValidating = self.datasetManager.getMainNegative(type) # run the network self.run(positiveValidating, negativeValidating) # Evaluate the restrained dataset # # @param type Type of dataset to be evaluated # @return None def restrained(self, type): positiveValidating = self.datasetManager.getPositiveRestrainedMixed(type) negativeValidating = self.datasetManager.getNegativeMainRestrained(type) # run the network self.run(positiveValidating, negativeValidating) # Evaluate the given informations # # @param positiveValidating Array of all positive files to process # @param negativeValidating Array of all negative files to process # @param getData Flag to retrieve the data in order to bypass a future loading # @return None def run(self, positiveValidating, negativeValidating, getData=False): # Load all dataset files positive = self.datasetManager.loadDataset(positiveValidating) negative = self.datasetManager.loadDataset(negativeValidating) # Process all features print "Processing features..." positiveInput = [] for data in positive: positiveInput.extend(self.featureExtractor.getFeatures(data)) negativeInput = [] for data in negative: negativeInput.extend(self.featureExtractor.getFeatures(data)) # Check if we need to print the data or run the network if getData: self.utils.getPythonInitCode(positiveInput, "positiveInput") self.utils.getPythonInitCode(negativeInput, "negativeInput") else: # Run the validation against the network if len(positiveInput)>0: print "Positive validation" goodPositive = 0 badPositive = 0 count = 0 for positive in positiveInput: result = self.bpn.check([positive]) if result[0] == False: badPositive += 1 print("{0} is erroneous".format(count)) else: goodPositive += 1 count += 1 print print "{0} corrects and {1} bad --> {2:0.2f}%".format(goodPositive, badPositive, (goodPositive/float(goodPositive+badPositive)*100)) print if len(negativeInput)>0: print "Negative validation" goodNegative = 0 badNegative = 0 count = 0 for negative in negativeInput: result = self.bpn.check([negative]) if result[0] == True: badNegative += 1 print("{0} is erroneous".format(count)) else: goodNegative += 1 count += 1 print print "{0} corrects and {1} bad --> {2:0.2f}%".format(goodNegative, badNegative, (goodNegative/float(goodNegative+badNegative)*100)) print "Final score = {0:0.2f}%".format(((goodPositive+goodNegative)/float(goodPositive+badPositive+goodNegative+badNegative))*100) if len(positiveInput)==0 and len(negativeInput)==0: print "No input to validate..."
class DatasetGui(QtWidgets.QWidget): utils = Utils() featureExtractor = FeatureExtractor() bpn = BPNHandler(True) accuracy = accuracy.Accuracy() # Constructor of the DatasetGui class # # @param None # @return None def __init__(self): super(DatasetGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Dataset recording") # Retrieve all settings self.settings = Settings() # Load sounds self.countdownSound = QtMultimedia.QSound(self.settings.getResourceFolder()+"countdown.wav") self.countdownEndedSound = QtMultimedia.QSound(self.settings.getResourceFolder()+"countdown-ended.wav") # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the image generator to get an RGB image of the scene self.image = ImageGenerator() self.image.create(self.context) self.image.set_resolution_preset(RES_VGA) self.image.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = Dataset() # Create a timer for an eventual countdown before recording the data self.countdownTimer = QtCore.QTimer() self.countdownRemaining = 10 self.countdownTimer.setInterval(1000) self.countdownTimer.setSingleShot(True) self.countdownTimer.timeout.connect(self.recordCountdown) # Create a timer to eventually record data for a heat map self.heatmapRunning = False self.heatmapTimer = QtCore.QTimer() self.heatmapTimer.setInterval(10) self.heatmapTimer.setSingleShot(True) self.heatmapTimer.timeout.connect(self.recordHeatmap) # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Hold the label indicating the number of dataset taken self.numberLabel = QtWidgets.QLabel() self.updateDatasetNumberLabel() # Create the acquisition form elements self.createAcquisitionForm() # Register a dialog window to prompt the target position self.dialogWindow = DatasetDialog(self) # Allow to save the data when the right distance is reached self.recordIfReady = False # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start() # Update the depth image displayed within the main window # # @param None # @return None def updateImage(self): # Update to next frame self.context.wait_and_update_all() # Extract informations of each tracked user self.data = skeleton.track(self.user, self.depth, self.data) # Get the whole depth map self.data.depth_map = np.asarray(self.depth.get_tuple_depth_map()).reshape(480, 640) # Create the frame from the raw depth map string and convert it to RGB frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) # Get the RGB image of the scene self.data.image = np.fromstring(self.image.get_raw_image_map_bgr(), dtype=np.uint8).reshape(480, 640, 3) # Will be used to specify the depth of the current hand wished currentDepth, showCurrentDepth = 0, "" if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Highlight the head ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5) # Display lines from elbows to the respective hands ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"]) ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"]) # Get the pixel's depth from the coordinates of the hands leftPixel = self.utils.getDepthFromMap(self.data.depth_map, self.data.skeleton["hand"]["left"]) rightPixel = self.utils.getDepthFromMap(self.data.depth_map, self.data.skeleton["hand"]["right"]) if self.data.hand == self.settings.LEFT_HAND: currentDepth = leftPixel elif self.data.hand == self.settings.RIGHT_HAND: currentDepth = rightPixel # Get the shift of the boundaries around both hands leftShift = self.utils.getHandBoundShift(leftPixel) rightShift = self.utils.getHandBoundShift(rightPixel) # Display a rectangle around both hands ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (50, 100, 255)) ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30)) # Record the current data if the user is ready if self.recordIfReady: cv2.putText(frame, str(self.data.getWishedDistance()), (470, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (252, 63, 253), 5) if self.data.getWishedDistance()>=int(currentDepth)-10 and self.data.getWishedDistance()<=int(currentDepth)+10: self.record([]) self.recordIfReady = False else: if int(currentDepth)<self.data.getWishedDistance(): showCurrentDepth = str(currentDepth)+" +" else: showCurrentDepth = str(currentDepth)+" -" else: showCurrentDepth = str(currentDepth) cv2.putText(frame, showCurrentDepth, (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 100, 255), 5) # Update the frame self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame)) self.timerScreen.start() # Update the label indicating the number of dataset elements saved so far for the current type # # @param None # @return None def updateDatasetNumberLabel(self): if self.data.type == Dataset.TYPE_POSITIVE: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder(self.settings.getPositiveFolder()))) elif self.data.type == Dataset.TYPE_NEGATIVE: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder(self.settings.getNegativeFolder()))) elif self.data.type == Dataset.TYPE_ACCURACY: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder(self.settings.getAccuracyFolder()))) else: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder(self.settings.getDatasetFolder()))) # Record the actual informations # # @param obj Initiator of the event # @return None def record(self, obj): # If the user collects data to check accuracy, prompts additional informations if self.data.type == Dataset.TYPE_ACCURACY: self.saveForTarget() # If the user collects data for a heat map, let's do it elif self.data.type == Dataset.TYPE_HEATMAP: # The same button will be used to stop recording if not self.heatmapRunning: self.startRecordHeatmap() else: self.stopRecordHeatmap() else: # Directly save the dataset and update the label number self.data.save() self.countdownEndedSound.play() self.updateDatasetNumberLabel() # Handle a countdown as a mean to record the informations with a delay # # @param None # @return None def recordCountdown(self): # Decrease the countdown and check if it needs to continue self.countdownRemaining -= 1 if self.countdownRemaining <= 0: # Re-initialise the timer and record the data self.countdownTimer.stop() self.countdownButton.setText("Saving..") self.countdownRemaining = 10 self.record([]) else: self.countdownTimer.start() self.countdownSound.play() # Display the actual reminaining self.countdownButton.setText("Save in %ds"%(self.countdownRemaining)) # Record a heatmap representation of the informations by successive captures # # @param None # @return None def recordHeatmap(self): if self.data.hand == self.settings.NO_HAND: print "Unable to record as no hand is selected" return False if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Input the data into the feature extractor result = self.bpn.check(self.featureExtractor.getFeatures(self.data)) # Add the depth of the finger tip point = self.featureExtractor.fingerTip[result[1]] point.append(self.utils.getDepthFromMap(self.data.depth_map, point)) # Verify that informations are correct if point[0]!=0 and point[1]!=0 and point[2]!=0: # Add the result of the neural network point.append(result[0]) self.heatmap.append(point) self.countdownSound.play() # Loop timer self.heatmapTimer.start() # Start the recording of the heatmap # # @param None # @return None def startRecordHeatmap(self): self.saveButton.setText("Stop recording") self.heatmapRunning = True self.heatmapTimer.start() # Stop the recording of the heatmap # # @param None # @return None def stopRecordHeatmap(self): self.heatmapTimer.stop() self.heatmapRunning = False self.countdownEndedSound.play() self.saveButton.setText("Record") self.accuracy.showHeatmap(self.heatmap, "front") self.heatmap = [] # Raise a flag to record the informations when the chosen distance will be met # # @param None # @return None def startRecordWhenReady(self): self.recordIfReady = True # Hold the current informations to indicate the position of the target thanks to the dialog window # # @param None # @return None def saveForTarget(self): # Freeze the data self.timerScreen.stop() self.countdownEndedSound.play() # Translate the depth values to a frame and set it in the dialog window frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) self.dialogWindow.setFrame(frame) # Prompt the position of the target self.dialogWindow.exec_() # Toggle the type of dataset chosen # # @param value Identifier of the new type of dataset # @return None def toggleType(self, value): self.data.toggleType(value) if value == self.data.TYPE_HEATMAP: self.saveButton.setText("Record") self.countdownButton.setText("Record in %ds"%(self.countdownRemaining)) self.readyButton.setEnabled(False) # Create an array to hold all points self.heatmap = [] else: self.updateDatasetNumberLabel() if hasattr(self, 'saveButton'): self.saveButton.setText("Save") self.countdownButton.setText("Save in %ds"%(self.countdownRemaining)) self.readyButton.setEnabled(True) # Create the acquisition form of the main window # # @param None # @return None def createAcquisitionForm(self): globalLayout = QtWidgets.QHBoxLayout() vlayout = QtWidgets.QVBoxLayout() # Drop down menu of the distance to record the informations when the pointing hand meet the corresponding value hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Distance") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleDistance) comboBox.setFixedWidth(200) comboBox.addItem("550") comboBox.addItem("750") comboBox.addItem("1000") comboBox.addItem("1250") comboBox.addItem("1500") comboBox.addItem("1750") comboBox.addItem("2000") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) # Drop down menu to select the type of hand of the dataset hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Pointing hand") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleHand) comboBox.setFixedWidth(200) comboBox.addItem("Left") comboBox.addItem("Right") comboBox.addItem("None") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) # Drop down menu of the dataset type hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Type") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.toggleType) comboBox.setFixedWidth(200) comboBox.addItem("Positive") comboBox.addItem("Negative") comboBox.addItem("Accuracy") comboBox.addItem("Heat map") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) globalLayout.addLayout(vlayout) vlayout = QtWidgets.QVBoxLayout() self.numberLabel.setAlignment(QtCore.Qt.AlignCenter) vlayout.addWidget(self.numberLabel) # Action buttons to record the way that suits the most hLayout = QtWidgets.QHBoxLayout() self.readyButton = QtWidgets.QPushButton('Save when ready', clicked=self.startRecordWhenReady) self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record) hLayout.addWidget(self.readyButton) vlayout.addLayout(hLayout) item_layout = QtWidgets.QHBoxLayout() self.countdownButton = QtWidgets.QPushButton("Save in %ds"%(self.countdownRemaining), clicked=self.countdownTimer.start) self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record) item_layout.addWidget(self.countdownButton) item_layout.addWidget(self.saveButton) vlayout.addLayout(item_layout) globalLayout.addLayout(vlayout) self.layout.addLayout(globalLayout)
class Accuracy: bpn = BPNHandler(True) datasetManager = DatasetManager() featureExtractor = FeatureExtractor() settings = Settings() trigonometry = Trigonometry() utils = Utils() expectedRadius = 2000 direction = [ "back-right", "right", "front-right", "front", "front-left", "left", "back-left" ] # Evaluate the pointed direction and display the average distance and angle # # @param None # @return None def processedPointedDirection(self): dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) outputDistance = [] outputAngle = [] outputAngleCamera = [] outputDistanceAt2m = [] for data in dataset: features = self.featureExtractor.getFeatures(data) depthMap = data.depth_map targetCoordinates = data.target fingerTipCoordinates = self.featureExtractor.fingerTip[0] fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates = self.featureExtractor.eyePosition[0] eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) # Retrieve the distance between the actual target and the closest impact distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if distance == None: print "Missed..." else: outputDistance.append(distance) # Retrieve the distance between the target and the fingertip: targetDistance = float(data.distance) # Calculate the error angles angle = math.degrees(math.asin(distance / targetDistance)) outputAngle.append(angle) angleCamera = math.degrees( math.asin(distance / targetCoordinates[2])) outputAngleCamera.append(angleCamera) distanceAt2m = math.asin(distance / targetDistance) * 2000 outputDistanceAt2m.append(distanceAt2m) print "--- Impact distance: {0:0.1f} mm\t Impact at 2m: {1:0.1f}\t Error angle (fingertip): {2:0.1f} deg\t Error angle (camera): {3:0.1f} deg".format( distance, distanceAt2m, angle, angleCamera) print "---\n--- Average impact distance of {0:0.1f} mm.".format( np.average(outputDistance)) print "--- Average impact distance at 2 m of {0:0.1f} mm.".format( np.average(outputDistanceAt2m)) print "--- Average eror angle of {0:0.1f} deg at the fingertip.".format( np.average(outputAngle)) print "--- Average eror angle of {0:0.1f} deg at the camera.".format( np.average(outputAngleCamera)) # Evaluate the pointed direction by category and display the average distance and angle # # @param None # @return None def processedPointedDirectionByCategory(self): datasets = self.datasetManager.getAccuracyComplete() # Load all categories separately dataset = [] for data in datasets: dataset.append(self.datasetManager.loadDataset([data])) for category in range(len(dataset)): outputDistance = [] outputAngle = [] outputAngleCamera = [] outputDistanceAt2m = [] print "\n--- {0}".format(self.direction[category]) for data in dataset[category]: features = self.featureExtractor.getFeatures(data) depthMap = data.depth_map targetCoordinates = data.target fingerTipCoordinates = self.featureExtractor.fingerTip[0] fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates = self.featureExtractor.eyePosition[0] eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) # Retrieve the distance between the actual target and the closest impact distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if distance == None: print "Missed..." else: outputDistance.append(distance) # Retrieve the distance between the target and the fingertip: targetDistance = float(data.distance) # Calculate the error angles angle = math.degrees(math.asin(distance / targetDistance)) outputAngle.append(angle) angleCamera = math.degrees( math.asin(distance / targetCoordinates[2])) outputAngleCamera.append(angleCamera) distanceAt2m = math.asin(distance / targetDistance) * 2000 outputDistanceAt2m.append(distanceAt2m) print "--- Impact distance: {0:0.1f} mm\t Impact at 2m: {1:0.1f}\t Error angle (fingertip): {2:0.1f} deg\t Error angle (camera): {3:0.1f} deg".format( distance, distanceAt2m, angle, angleCamera) print "---\n--- Average impact distance of {0:0.1f} mm.".format( np.average(outputDistance)) print "--- Average impact distance at 2 m of {0:0.1f} mm.".format( np.average(outputDistanceAt2m)) print "--- Average eror angle of {0:0.1f} deg at the fingertip.".format( np.average(outputAngle)) print "--- Average eror angle of {0:0.1f} deg at the camera.".format( np.average(outputAngleCamera)) # Draw a graphic with centered trajectories' origins # # @param None # @return None def drawUnifiedTrajectories(self): # Load the dataset dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) # Create the scene fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") ax.set_xlabel('X (horizontal)') ax.set_ylabel('Y (vertical)') ax.set_zlabel('Z (depth)') for data in dataset: result = self.featureExtractor.getFeatures(data) # Processed data fingerTipCoordinates = self.featureExtractor.fingerTip[0] eyeCoordinates = self.featureExtractor.eyePosition[0] targetCoordinates = data.target depthMap = data.depth_map fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) closest = self.trigonometry.findIntersection( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if closest != None: x = [ fingerTipCoordinates[0] - targetCoordinates[0], closest[0] - targetCoordinates[0] ] y = [ fingerTipCoordinates[1] - targetCoordinates[1], closest[1] - targetCoordinates[1] ] z = [ fingerTipCoordinates[2] - targetCoordinates[2], closest[2] - targetCoordinates[2] ] # Draw the trajectory ax.plot(x, y, z) # Draw the target point ax.scatter(0, 0, 0, c="#000000", marker="o", s=2000) plt.show() # Draw a 3D graphic with the closests impacts # # @param None # @return None def drawImpacts(self): # Load the dataset dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) # Create the scene fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") ax.set_xlabel('X (horizontal in mm)') ax.set_ylabel('Y (vertical in mm)') ax.set_zlabel('Z (depth in mm)') colorConverter = ColorConverter() for data in dataset: result = self.featureExtractor.getFeatures(data) # Processed data fingerTipCoordinates = self.featureExtractor.fingerTip[0] eyeCoordinates = self.featureExtractor.eyePosition[0] targetCoordinates = data.target depthMap = data.depth_map fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) closest = self.trigonometry.findIntersection( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if closest != None: x = closest[0] - targetCoordinates[0] y = closest[1] - targetCoordinates[1] z = closest[2] - targetCoordinates[2] distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) red = 1 - (distance / 200) if red < 0: red = 0 elif red > 1: red = 1 blue = 0 + (distance / 200) if blue < 0: blue = 0 elif blue > 1: blue = 1 cc = colorConverter.to_rgba((red, 0, blue), 0.4) # Draw the impact point ax.scatter(x, y, z, color=cc, marker="o", s=50) # Draw the target point ax.scatter(0, 0, 0, c="#000000", marker="o", color="#000000", s=100) plt.show() # Draw a 2D graphic with the closests impacts # # @param x Flag to display the horizontal axis # @param y Flag to display the vertical axis # @param z Flag to display the depth axis # @return None def drawImpacts2D(self, x=True, y=True, z=False): # Load the dataset dataset = self.datasetManager.loadDataset( self.datasetManager.getAccuracyComplete()) plt.axis("equal") colorConverter = ColorConverter() for data in dataset: result = self.featureExtractor.getFeatures(data) # Processed data fingerTipCoordinates = self.featureExtractor.fingerTip[0] eyeCoordinates = self.featureExtractor.eyePosition[0] targetCoordinates = data.target depthMap = data.depth_map fingerTipCoordinates.append( self.utils.getDepthFromMap(depthMap, fingerTipCoordinates)) eyeCoordinates.append( self.utils.getDepthFromMap(depthMap, eyeCoordinates)) closest = self.trigonometry.findIntersection( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) if closest != None: x = closest[0] - targetCoordinates[0] y = closest[1] - targetCoordinates[1] z = closest[2] - targetCoordinates[2] distance = self.trigonometry.findIntersectionDistance( fingerTipCoordinates, eyeCoordinates, targetCoordinates, self.expectedRadius) red = 1 - (distance / 200) if red < 0: red = 0 elif red > 1: red = 1 blue = 0 + (distance / 200) if blue < 0: blue = 0 elif blue > 1: blue = 1 cc = colorConverter.to_rgba((red, 0, blue), 0.4) if not x: plt.scatter(y, z, color=cc, marker="o", s=50) elif not y: plt.scatter(x, z, color=cc, marker="o", s=50) else: plt.scatter(x, y, color=cc, marker="o", s=50) plt.show() # Draw a 3D heatmap from several recorded points # # @param points Array of all recorded points # @param blurred Flag to add a blurry effect to all points # @return numeric Depth of the given coordinates def createHeatmap(self, points, blurred=False): # Create the scene fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") # Create axes ax.set_xlabel('X (horizontal in mm)') ax.set_ylabel('Y (vertical in mm)') ax.set_zlabel('Z (depth in mm)') points = np.array(points) # Retrieve extrem values maxX, maxY, maxZ, tmp = points.max(axis=0) minX, minY, minZ, tmp = points.min(axis=0) # Retrieve middle values midX = minX + (maxX - minX) / 2 midY = minY + (maxY - minY) / 2 midZ = minZ + (maxZ - minZ) / 2 # Draw center axis ax.plot([minX, maxX], [midY, midY], [midZ, midZ]) ax.plot([midX, midX], [minY, maxY], [midZ, midZ]) ax.plot([midX, midX], [midY, midY], [minZ, maxZ]) # Add points for point in points: print "[{0},{1},{2},{3}],".format(point[0], point[1], point[2], point[3]) # Add a blurr effect to points if needed if blurred: for i in np.arange(0.1, 1.01, 0.1): if point[3] == True: c = (1, 0, 0, 0.3 / i / 10) else: c = (0, 0, 1, 0.3 / i / 10) ax.scatter(point[0], point[1], point[2], s=(50 * i * (0.55))**2, color=c) else: if point[3] == True: c = (1, 0, 0, 0.3) else: c = (0, 0, 1, 0.3) ax.scatter(point[0], point[1], point[2], s=50, color=c) # Set the correct view #ax.view_init(azim=-128, elev=-163) ax.view_init(azim=-89, elev=-74) # Display the graph plt.show() # Draw a 2D heatmap from several recorded points # # @param points Array of all recorded points # @param view Point of view to display the proper axes # @param blurred Flag to add a blurry effect to all points # @return numeric Depth of the given coordinates def showHeatmap(self, points, view, blurred=True): if view != "top" and view != "front": raise ValueError("Invalid view.. Please specify 'top' or 'front'.", view) # Create the scene if view == "top": ax = plt.subplot(111, aspect=0.3) else: ax = plt.subplot(111, aspect=1) points = np.array(points) # Retrieve extrem values maxX, maxY, maxZ, tmp = points.max(axis=0) minX, minY, minZ, tmp = points.min(axis=0) margin = 50 ax.set_xlim([minX - margin, maxX + margin]) if view == "top": ax.set_ylim([minZ - margin, maxZ + margin]) else: ax.set_ylim([minY - margin, maxY + margin]) # Retrieve middle values midX = minX + (maxX - minX) / 2 midY = minY + (maxY - minY) / 2 midZ = minZ + (maxZ - minZ) / 2 # Draw center axis if view == "top": ax.plot([minX + (midX / 2), maxX - (midX / 2)], [midZ, midZ]) ax.plot([midX, midX], [minZ + (midZ / 2), maxZ - (midZ / 2)]) else: ax.plot([minX + (midX / 2), maxX - (midX / 2)], [midY, midY]) ax.plot([midX, midX], [minY + (midY / 2), maxY - (midY / 2)]) # Add points for point in points: print "[{0},{1},{2},{3}],".format(point[0], point[1], point[2], point[3]) # Add a blurr effect to points if needed if blurred: for i in np.arange(0.1, 1.01, 0.1): if point[3] == True: c = (1, 0, 0, 0.3 / i / 10) else: c = (0, 0, 1, 0.1 / i / 10) if view == "top": ax.scatter(point[0], point[2], s=(50 * i * (0.55))**2, color=c) else: ax.scatter(point[0], point[1], s=(50 * i * (0.55))**2, color=c) else: if point[3] == True: c = (1, 0, 0, 0.3) else: c = (0, 0, 1, 0.1) if view == "top": ax.scatter(point[0], point[2], s=50, color=c) else: ax.scatter(point[0], point[1], s=50, color=c) # Display the graph ax.invert_xaxis() ax.invert_yaxis() plt.show()
def initialize() -> None: """Starts a new game session. Class objects used in the session: Settings: .title: Session title .intro: Session intro .card_shuffle: Shuffle cards or not .feature1: Feature 1 parameters .feature2: Feature 2 parameters .feature3: Feature 3 parameters .feature4: Feature 4 parameters .win: Win conditions .lose: Lose conditions CardDeck: .cards: Current number of cards in the deck Card: .cta: Current card's Call To Action .left_action: Left action parameters .right_action: Right action parameters """ # Welcome screen clear() logging.debug("Initialization flow invoked") print("=" * 20) print("Welcome to Whyssk!") print("=" * 20 + "\n") # Ask for session settings file while True: filename = str(input("Please enter session settings file name (must be located inside sessions\\ folder):\n")) if not filename.endswith(".json"): filename += ".json" path = str(os.path.realpath(sys.argv[0])) + "\\sessions\\" + filename print(path) if not os.path.exists(path): print(f"File {filename} does not exist in sessions\\ folder!\n") else: print(f"File {filename} found! Importing...\n") break # Import settings from json file and create class instances logging.debug(f"Getting settings from {path}...") sett, feat, cond, cards = get_settings(path) global settings, feature1, feature2, feature3, feature4, win, lose, deck, features_total settings = Settings(sett) feature1 = Feature(feat['feature1']) feature2 = Feature(feat['feature2']) feature3 = Feature(feat['feature3']) feature4 = Feature(feat['feature4']) win = Condition(cond['win']) lose = Condition(cond['lose']) deck = CardDeck(cards) if win.out_of_cards == lose.out_of_cards: raise ValueError("Win and lose conditions should have opposite values for 'out of cards' parameter!") # Generate list of features that will be used in game for feature in [feature1, feature2, feature3, feature4]: if feature.name: features_total.append(feature) logging.debug(f"Successfully imported setting from {path}") print("Import successful!") input("Press ENTER to start the session!") clear()
class Relevancy(): # Load required classes datasetManager = DatasetManager() settings = Settings() utils = Utils() repartition = ["training", "testing", "validating"] direction = [ "back-right", "right", "front-right", "front", "front-left", "left", "back-left" ] orientation = ["up", "lateral", "down"] negativeType = ["closed", "opened", "four", "three", "peace", "rock"] # Returns the repartition between positive and negative files # # @param None # @return tuple Tuple of the repartition for positive and negative files def getRepartition(self): # Get detailed counts positive = {} negative = {} for repartition in self.repartition: positive[repartition] = {} negative[repartition] = {} for direction in self.direction: positive[repartition][direction] = {} negative[repartition][direction] = {} for orientation in self.orientation: positive[repartition][direction][ orientation] = self.getDetailedPositiveRepartition( repartition, direction, orientation) for negativeType in self.negativeType: negative[repartition][direction][ negativeType] = self.getDetailedNegativeRepartition( repartition, direction, negativeType) return (positive, negative) # Returns the number of files in a given positive folder # # @param type Type of dataset # @param direction Direction featured in the dataset # @param orientation Orientation featured in the dataset # @return numeric Number of files in a given positive folder def getDetailedPositiveRepartition(self, type, direction, orientation=""): return self.utils.getFileNumberInFolder( self.settings.getPositiveFolder() + type + "/" + direction + "/" + orientation + "/") # Returns the number of files in a given negative folder # # @param type Type of dataset # @param direction Direction featured in the dataset # @param orientation Orientation featured in the dataset # @return numeric Number of files in a given negative folder def getDetailedNegativeRepartition(self, type, direction, orientation=""): return self.utils.getFileNumberInFolder( self.settings.getNegativeFolder() + type + "/" + direction + "/" + orientation + "/") # Display the general repartition # # @param None # @return None def showRepartition(self): positive, negative = self.getRepartition() print "\n\nPositive repartition\n" positive = self.showPositiveRepartition(positive) print "\n\nNegative repartition\n" negative = self.showNegativeRepartition(negative) print "\n\nTotal repartition\n" self.showTotalRepartition(positive, negative) # Display and returns the positive repartition # # @param positive Array of all positive file repartition # @return dict Informations about the repartition of the positive dataset def showPositiveRepartition(self, positive): totalPositive = 0 totalTraining = 0 totalTesting = 0 totalValidating = 0 for direction in self.direction: training = 0 testing = 0 validating = 0 for orientation in self.orientation: if len(direction + orientation) < 10: shift = "\t" else: shift = "" print( "--- {0} {1}{2}\tTraining: {3} \t\tTesting: {4} \t\tValidating: {5}" .format(direction, orientation, shift, positive["training"][direction][orientation], positive["testing"][direction][orientation], positive["validating"][direction][orientation])) training += positive["training"][direction][orientation] testing += positive["testing"][direction][orientation] validating += positive["validating"][direction][orientation] tmp = training + testing + validating totalTraining += training totalTesting += testing totalValidating += validating print( "--- {0}\t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)\n---" .format(direction, training, (training / float(tmp)) * 100, testing, (testing / float(tmp)) * 100, validating, (validating / float(tmp)) * 100)) totalPositive = totalTraining + totalTesting + totalValidating print( "--- Total: {0} \t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format(totalPositive, totalTraining, (totalTraining / float(totalPositive)) * 100, totalTesting, (totalTesting / float(totalPositive)) * 100, totalValidating, (totalValidating / float(totalPositive)) * 100)) return { "total": totalPositive, "totalTraining": totalTraining, "totalTesting": totalTesting, "totalValidating": totalValidating } # Display and returns the negative repartition # # @param negative Array of all negative file repartition # @return dict Informations about the repartition of the negative dataset def showNegativeRepartition(self, negative): totalNegative = 0 totalTraining = 0 totalTesting = 0 totalValidating = 0 for direction in self.direction: training = 0 testing = 0 validating = 0 for negativeType in self.negativeType: if len(direction + negativeType) < 11: shift = "\t" else: shift = "" print( "--- {0} {1}{2}\tTraining: {3} \t\tTesting: {4} \t\tValidating: {5}" .format(direction, negativeType, shift, negative["training"][direction][negativeType], negative["testing"][direction][negativeType], negative["validating"][direction][negativeType])) training += negative["training"][direction][negativeType] testing += negative["testing"][direction][negativeType] validating += negative["validating"][direction][negativeType] tmp = training + testing + validating totalTraining += training totalTesting += testing totalValidating += validating print( "--- {0}\t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)\n---" .format(direction, training, (training / float(tmp)) * 100, testing, (testing / float(tmp)) * 100, validating, (validating / float(tmp)) * 100)) totalNegative = totalTraining + totalTesting + totalValidating print( "--- Total: {0} \t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format(totalNegative, totalTraining, (totalTraining / float(totalNegative)) * 100, totalTesting, (totalTesting / float(totalNegative)) * 100, totalValidating, (totalValidating / float(totalNegative)) * 100)) return { "total": totalNegative, "totalTraining": totalTraining, "totalTesting": totalTesting, "totalValidating": totalValidating } # Display the general repartition # # @param positive Array of all positive file repartition informations # @param negative Array of all negative file repartition informations # @return None def showTotalRepartition(self, positive, negative): total = positive["total"] + negative["total"] totalTraining = positive["totalTraining"] + negative["totalTraining"] totalTesting = positive["totalTesting"] + negative["totalTesting"] totalValidating = positive["totalValidating"] + negative[ "totalValidating"] print( "--- Positive:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format( positive["total"], positive["totalTraining"], (positive["totalTraining"] / float(positive["total"])) * 100, positive["totalTesting"], (positive["totalTesting"] / float(positive["total"])) * 100, positive["totalValidating"], (positive["totalValidating"] / float(positive["total"])) * 100)) print( "--- Negative:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format( negative["total"], negative["totalTraining"], (negative["totalTraining"] / float(negative["total"])) * 100, negative["totalTesting"], (negative["totalTesting"] / float(negative["total"])) * 100, negative["totalValidating"], (negative["totalValidating"] / float(negative["total"])) * 100)) print( "--- Total:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)" .format(total, totalTraining, (totalTraining / float(total)) * 100, totalTesting, (totalTesting / float(total)) * 100, totalValidating, (totalValidating / float(total)) * 100))
class Dataset: TYPE_POSITIVE = 0 TYPE_NEGATIVE = 1 TYPE_ACCURACY = 2 TYPE_HEATMAP = 3 LEFT_HAND = 0 RIGHT_HAND = 1 NO_HAND = 2 DISTANCE_550 = 0 DISTANCE_750 = 4 DISTANCE_1000 = 1 DISTANCE_1250 = 5 DISTANCE_1500 = 2 DISTANCE_1750 = 6 DISTANCE_2000 = 3 # Constructor of the Dataset class # # @param camera_height Value of the camera height while gathering informations # @param hand Identifier of the hand (Ø|1|2) # @param skeleton Skeletal joints of the detected subject # @param depth_map Depth map of the captured scene # @param image RGB image of the captured scene # @param type Identifier of the type of data (0|1|2|3) # @param distance Identifier of the type of distance chosen (0|1|2|3|4|5|6) or the actual distance between the fingertip and the target # @param target Coordinates of the target # @return None def __init__(self, camera_height=None, hand=None, skeleton=None, depth_map=None, image=None, type=None, distance=None, target=None): self.settings = Settings() self.utils = Utils() # Initialise each attributes with respective parameters; otherwise with a default value if camera_height is None: camera_height = 1500 self.camera_height = camera_height if hand is None: hand = self.settings.LEFT_HAND self.hand = hand if skeleton is None: skeleton = { "head": [], "shoulder": { "left": [], "right": [], "center": [] }, "elbow": { "left": [], "right": [] }, "hand": { "left": [], "right": [] } } self.skeleton = skeleton if depth_map is None: depth_map = [] self.depth_map = np.array(depth_map) if image is None: image = "" self.image = image if type is None: type = Dataset.TYPE_POSITIVE self.type = type if distance is None: distance = Dataset.DISTANCE_550 self.distance = distance if target is None: target = [] self.target = target # Returns a JSON encoded string of the dataset object # # @param None # @return string JSON encoded string of the dataset object def to_JSON(self): # Convert the depth map to a serializable state self.depth_map = self.depth_map.tolist() # Encode the RGB image in a base64 string self.image = self.utils.getBase64(self.image) # Get rid of extra attributes to clean the output obj = deepcopy(self) del obj.settings del obj.utils return json.dumps(obj, default=lambda o: o.__dict__, separators=(',', ':')) # Save the dataset informations as a file # # @param None # @return None def save(self): print "Saving dataset informations..." # Save the dataset to the right folder if self.type == Dataset.TYPE_POSITIVE: filename = self.settings.getPositiveFolder() elif self.type == Dataset.TYPE_NEGATIVE: filename = self.settings.getNegativeFolder() elif self.type == Dataset.TYPE_ACCURACY: filename = self.settings.getAccuracyFolder() else: raise ValueError("Invalid type of dataset to save", self.type) # Retrieve the number of files saved so far # Be careful that due to the sample file, the counter does not need to be incremented. Otherwise, the files would replace each others filename += str( self.utils.getFileNumberInFolder(filename)).zfill(3) + ".json" self.utils.dumpJsonToFile(self.to_JSON(), filename) # Toggle the type identifier of the dataset # # @param value Identifier of the new type of the dataset # @return None def toggleType(self, value): self.type = value print "type toggled to {0}".format(value) # Toggle the distance identifier of the dataset # # @param value Identifier of the new distance of the dataset # @return None def toggleDistance(self, value): self.distance = value print "distance toggled to {0}".format(value) # Update the distance of the dataset # # @param value Distance value # @return None def setDistance(self, value): self.distance = value print "distance changed to {0}".format(value) # Toggle the hand identifier of the dataset # # @param value Identifier of the new hand of the dataset # @return None def toggleHand(self, value): self.hand = value print "hand toggled" # Returns the actual distance # # @param None # @return numeric Actual distance value (translated if identifier) def getWishedDistance(self): if self.distance == Dataset.DISTANCE_550: return 550 elif self.distance == Dataset.DISTANCE_750: return 750 elif self.distance == Dataset.DISTANCE_1000: return 1000 elif self.distance == Dataset.DISTANCE_1250: return 1250 elif self.distance == Dataset.DISTANCE_1500: return 1500 elif self.distance == Dataset.DISTANCE_1750: return 1750 elif self.distance == Dataset.DISTANCE_2000: return 2000 else: return self.distance
def __init__(self): super(DatasetGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Dataset recording") # Retrieve all settings self.settings = Settings() # Load sounds self.countdownSound = QtMultimedia.QSound(self.settings.getResourceFolder()+"countdown.wav") self.countdownEndedSound = QtMultimedia.QSound(self.settings.getResourceFolder()+"countdown-ended.wav") # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the image generator to get an RGB image of the scene self.image = ImageGenerator() self.image.create(self.context) self.image.set_resolution_preset(RES_VGA) self.image.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = Dataset() # Create a timer for an eventual countdown before recording the data self.countdownTimer = QtCore.QTimer() self.countdownRemaining = 10 self.countdownTimer.setInterval(1000) self.countdownTimer.setSingleShot(True) self.countdownTimer.timeout.connect(self.recordCountdown) # Create a timer to eventually record data for a heat map self.heatmapRunning = False self.heatmapTimer = QtCore.QTimer() self.heatmapTimer.setInterval(10) self.heatmapTimer.setSingleShot(True) self.heatmapTimer.timeout.connect(self.recordHeatmap) # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Hold the label indicating the number of dataset taken self.numberLabel = QtWidgets.QLabel() self.updateDatasetNumberLabel() # Create the acquisition form elements self.createAcquisitionForm() # Register a dialog window to prompt the target position self.dialogWindow = DatasetDialog(self) # Allow to save the data when the right distance is reached self.recordIfReady = False # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start()
class DatasetGui(QtWidgets.QWidget): utils = Utils() featureExtractor = FeatureExtractor() bpn = BPNHandler(True) accuracy = accuracy.Accuracy() # Constructor of the DatasetGui class # # @param None # @return None def __init__(self): super(DatasetGui, self).__init__() self.setWindowTitle("Pointing Gesture Recognition - Dataset recording") # Retrieve all settings self.settings = Settings() # Load sounds self.countdownSound = QtMultimedia.QSound( self.settings.getResourceFolder() + "countdown.wav") self.countdownEndedSound = QtMultimedia.QSound( self.settings.getResourceFolder() + "countdown-ended.wav") # Get the context and initialise it self.context = Context() self.context.init() # Create the depth generator to get the depth map of the scene self.depth = DepthGenerator() self.depth.create(self.context) self.depth.set_resolution_preset(RES_VGA) self.depth.fps = 30 # Create the image generator to get an RGB image of the scene self.image = ImageGenerator() self.image.create(self.context) self.image.set_resolution_preset(RES_VGA) self.image.fps = 30 # Create the user generator to detect skeletons self.user = UserGenerator() self.user.create(self.context) # Initialise the skeleton tracking skeleton.init(self.user) # Start generating self.context.start_generating_all() print "Starting to detect users.." # Create a new dataset item self.data = Dataset() # Create a timer for an eventual countdown before recording the data self.countdownTimer = QtCore.QTimer() self.countdownRemaining = 10 self.countdownTimer.setInterval(1000) self.countdownTimer.setSingleShot(True) self.countdownTimer.timeout.connect(self.recordCountdown) # Create a timer to eventually record data for a heat map self.heatmapRunning = False self.heatmapTimer = QtCore.QTimer() self.heatmapTimer.setInterval(10) self.heatmapTimer.setSingleShot(True) self.heatmapTimer.timeout.connect(self.recordHeatmap) # Create the global layout self.layout = QtWidgets.QVBoxLayout(self) # Create custom widgets to hold sensor's images self.depthImage = SensorWidget() self.depthImage.setGeometry(10, 10, 640, 480) # Add these custom widgets to the global layout self.layout.addWidget(self.depthImage) # Hold the label indicating the number of dataset taken self.numberLabel = QtWidgets.QLabel() self.updateDatasetNumberLabel() # Create the acquisition form elements self.createAcquisitionForm() # Register a dialog window to prompt the target position self.dialogWindow = DatasetDialog(self) # Allow to save the data when the right distance is reached self.recordIfReady = False # Create and launch a timer to update the images self.timerScreen = QtCore.QTimer() self.timerScreen.setInterval(30) self.timerScreen.setSingleShot(True) self.timerScreen.timeout.connect(self.updateImage) self.timerScreen.start() # Update the depth image displayed within the main window # # @param None # @return None def updateImage(self): # Update to next frame self.context.wait_and_update_all() # Extract informations of each tracked user self.data = skeleton.track(self.user, self.depth, self.data) # Get the whole depth map self.data.depth_map = np.asarray( self.depth.get_tuple_depth_map()).reshape(480, 640) # Create the frame from the raw depth map string and convert it to RGB frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) # Get the RGB image of the scene self.data.image = np.fromstring(self.image.get_raw_image_map_bgr(), dtype=np.uint8).reshape(480, 640, 3) # Will be used to specify the depth of the current hand wished currentDepth, showCurrentDepth = 0, "" if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Highlight the head ui.drawPoint(frame, self.data.skeleton["head"][0], self.data.skeleton["head"][1], 5) # Display lines from elbows to the respective hands ui.drawElbowLine(frame, self.data.skeleton["elbow"]["left"], self.data.skeleton["hand"]["left"]) ui.drawElbowLine(frame, self.data.skeleton["elbow"]["right"], self.data.skeleton["hand"]["right"]) # Get the pixel's depth from the coordinates of the hands leftPixel = self.utils.getDepthFromMap( self.data.depth_map, self.data.skeleton["hand"]["left"]) rightPixel = self.utils.getDepthFromMap( self.data.depth_map, self.data.skeleton["hand"]["right"]) if self.data.hand == self.settings.LEFT_HAND: currentDepth = leftPixel elif self.data.hand == self.settings.RIGHT_HAND: currentDepth = rightPixel # Get the shift of the boundaries around both hands leftShift = self.utils.getHandBoundShift(leftPixel) rightShift = self.utils.getHandBoundShift(rightPixel) # Display a rectangle around both hands ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["left"], leftShift, (50, 100, 255)) ui.drawHandBoundaries(frame, self.data.skeleton["hand"]["right"], rightShift, (200, 70, 30)) # Record the current data if the user is ready if self.recordIfReady: cv2.putText(frame, str(self.data.getWishedDistance()), (470, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (252, 63, 253), 5) if self.data.getWishedDistance( ) >= int(currentDepth) - 10 and self.data.getWishedDistance( ) <= int(currentDepth) + 10: self.record([]) self.recordIfReady = False else: if int(currentDepth) < self.data.getWishedDistance(): showCurrentDepth = str(currentDepth) + " +" else: showCurrentDepth = str(currentDepth) + " -" else: showCurrentDepth = str(currentDepth) cv2.putText(frame, showCurrentDepth, (5, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (50, 100, 255), 5) # Update the frame self.depthImage.setPixmap(ui.convertOpenCVFrameToQPixmap(frame)) self.timerScreen.start() # Update the label indicating the number of dataset elements saved so far for the current type # # @param None # @return None def updateDatasetNumberLabel(self): if self.data.type == Dataset.TYPE_POSITIVE: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getPositiveFolder()))) elif self.data.type == Dataset.TYPE_NEGATIVE: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getNegativeFolder()))) elif self.data.type == Dataset.TYPE_ACCURACY: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getAccuracyFolder()))) else: self.numberLabel.setText("Dataset #%d" % (self.utils.getFileNumberInFolder( self.settings.getDatasetFolder()))) # Record the actual informations # # @param obj Initiator of the event # @return None def record(self, obj): # If the user collects data to check accuracy, prompts additional informations if self.data.type == Dataset.TYPE_ACCURACY: self.saveForTarget() # If the user collects data for a heat map, let's do it elif self.data.type == Dataset.TYPE_HEATMAP: # The same button will be used to stop recording if not self.heatmapRunning: self.startRecordHeatmap() else: self.stopRecordHeatmap() else: # Directly save the dataset and update the label number self.data.save() self.countdownEndedSound.play() self.updateDatasetNumberLabel() # Handle a countdown as a mean to record the informations with a delay # # @param None # @return None def recordCountdown(self): # Decrease the countdown and check if it needs to continue self.countdownRemaining -= 1 if self.countdownRemaining <= 0: # Re-initialise the timer and record the data self.countdownTimer.stop() self.countdownButton.setText("Saving..") self.countdownRemaining = 10 self.record([]) else: self.countdownTimer.start() self.countdownSound.play() # Display the actual reminaining self.countdownButton.setText("Save in %ds" % (self.countdownRemaining)) # Record a heatmap representation of the informations by successive captures # # @param None # @return None def recordHeatmap(self): if self.data.hand == self.settings.NO_HAND: print "Unable to record as no hand is selected" return False if len(self.user.users) > 0 and len(self.data.skeleton["head"]) > 0: # Input the data into the feature extractor result = self.bpn.check( self.featureExtractor.getFeatures(self.data)) # Add the depth of the finger tip point = self.featureExtractor.fingerTip[result[1]] point.append(self.utils.getDepthFromMap(self.data.depth_map, point)) # Verify that informations are correct if point[0] != 0 and point[1] != 0 and point[2] != 0: # Add the result of the neural network point.append(result[0]) self.heatmap.append(point) self.countdownSound.play() # Loop timer self.heatmapTimer.start() # Start the recording of the heatmap # # @param None # @return None def startRecordHeatmap(self): self.saveButton.setText("Stop recording") self.heatmapRunning = True self.heatmapTimer.start() # Stop the recording of the heatmap # # @param None # @return None def stopRecordHeatmap(self): self.heatmapTimer.stop() self.heatmapRunning = False self.countdownEndedSound.play() self.saveButton.setText("Record") self.accuracy.showHeatmap(self.heatmap, "front") self.heatmap = [] # Raise a flag to record the informations when the chosen distance will be met # # @param None # @return None def startRecordWhenReady(self): self.recordIfReady = True # Hold the current informations to indicate the position of the target thanks to the dialog window # # @param None # @return None def saveForTarget(self): # Freeze the data self.timerScreen.stop() self.countdownEndedSound.play() # Translate the depth values to a frame and set it in the dialog window frame = np.fromstring(self.depth.get_raw_depth_map_8(), np.uint8).reshape(480, 640) frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) self.dialogWindow.setFrame(frame) # Prompt the position of the target self.dialogWindow.exec_() # Toggle the type of dataset chosen # # @param value Identifier of the new type of dataset # @return None def toggleType(self, value): self.data.toggleType(value) if value == self.data.TYPE_HEATMAP: self.saveButton.setText("Record") self.countdownButton.setText("Record in %ds" % (self.countdownRemaining)) self.readyButton.setEnabled(False) # Create an array to hold all points self.heatmap = [] else: self.updateDatasetNumberLabel() if hasattr(self, 'saveButton'): self.saveButton.setText("Save") self.countdownButton.setText("Save in %ds" % (self.countdownRemaining)) self.readyButton.setEnabled(True) # Create the acquisition form of the main window # # @param None # @return None def createAcquisitionForm(self): globalLayout = QtWidgets.QHBoxLayout() vlayout = QtWidgets.QVBoxLayout() # Drop down menu of the distance to record the informations when the pointing hand meet the corresponding value hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Distance") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleDistance) comboBox.setFixedWidth(200) comboBox.addItem("550") comboBox.addItem("750") comboBox.addItem("1000") comboBox.addItem("1250") comboBox.addItem("1500") comboBox.addItem("1750") comboBox.addItem("2000") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) # Drop down menu to select the type of hand of the dataset hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Pointing hand") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.data.toggleHand) comboBox.setFixedWidth(200) comboBox.addItem("Left") comboBox.addItem("Right") comboBox.addItem("None") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) # Drop down menu of the dataset type hlayout = QtWidgets.QHBoxLayout() label = QtWidgets.QLabel("Type") label.setFixedWidth(100) comboBox = QtWidgets.QComboBox() comboBox.currentIndexChanged.connect(self.toggleType) comboBox.setFixedWidth(200) comboBox.addItem("Positive") comboBox.addItem("Negative") comboBox.addItem("Accuracy") comboBox.addItem("Heat map") comboBox.setCurrentIndex(0) hlayout.addWidget(label) hlayout.addWidget(comboBox) vlayout.addLayout(hlayout) globalLayout.addLayout(vlayout) vlayout = QtWidgets.QVBoxLayout() self.numberLabel.setAlignment(QtCore.Qt.AlignCenter) vlayout.addWidget(self.numberLabel) # Action buttons to record the way that suits the most hLayout = QtWidgets.QHBoxLayout() self.readyButton = QtWidgets.QPushButton( 'Save when ready', clicked=self.startRecordWhenReady) self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record) hLayout.addWidget(self.readyButton) vlayout.addLayout(hLayout) item_layout = QtWidgets.QHBoxLayout() self.countdownButton = QtWidgets.QPushButton( "Save in %ds" % (self.countdownRemaining), clicked=self.countdownTimer.start) self.saveButton = QtWidgets.QPushButton('Save', clicked=self.record) item_layout.addWidget(self.countdownButton) item_layout.addWidget(self.saveButton) vlayout.addLayout(item_layout) globalLayout.addLayout(vlayout) self.layout.addLayout(globalLayout)
class Training(): # Load required classes bpn = BPNHandler() datasetManager = DatasetManager() featureExtractor = FeatureExtractor() settings = Settings() utils = Utils() # Returns the array of the positive targets based on the parameter # # @param data Data to evaluate # @param positiveTarget Array of the positive targets # @return array Array of the positive targets based on the parameter def getPositiveTargetArray(self, data, positiveTarget): output = [] for i in range(len(data)): for j in range(len(data[i])): output.append(positiveTarget[i]) return output # Returns the array of the negative targets based on the parameter # # @param data Data to evaluate # @param positiveTargetLength Length of the array of positive targets # @return array Array of the negative targets based on the parameter def getNegativeTargetArray(self, data, positiveTargetLength): # Create the negative target thanks to the lenth of the positive one negativeTarget = np.zeros(positiveTargetLength).astype(int) output = [] for i in range(len(data)): for j in range(len(data[i])): output.append(negativeTarget) return output # Train the network with the complete set of data # # @param None # @return None def complete(self): positiveTraining = self.datasetManager.getPositiveCompleteMixed("training") negativeTraining = self.datasetManager.getMainNegative("training") positiveTesting = self.datasetManager.getPositiveCompleteMixed("testing") negativeTesting = self.datasetManager.getMainNegative("testing") positiveTarget = self.datasetManager.getCompleteMixedTarget() # run the network self.run(positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, True) # Train the network with the restrained set of data # # @param None # @return None def restrained(self): positiveTraining = self.datasetManager.getPositiveRestrained("training") negativeTraining = self.datasetManager.getNegativeMainRestrained("training") positiveTesting = self.datasetManager.getPositiveRestrained("testing") negativeTesting = self.datasetManager.getNegativeMainRestrained("testing") positiveTarget = self.datasetManager.getRestrainedTarget() # run the network self.run(positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, True) # Train the network with pre-computed recent values to bypass loading # # @param None # @return None def recentValues(self): trainingInput = self.datasetManager.getRecentValuesRestrained(trainingInput=True) trainingTarget = self.datasetManager.getRecentValuesRestrained(trainingTarget=True) testingInput = self.datasetManager.getRecentValuesRestrained(testingInput=True) testingTarget = self.datasetManager.getRecentValuesRestrained(testingTarget=True) # run the network self.bpn.run(trainingInput, trainingTarget, testingInput, testingTarget, learningRate=0.05, momentum=0.1, optimal=True) # Train the network with the complete set of data # # @param positiveTraining Array of positive data from the training set # @param negativeTraining Array of negative data from the training set # @param positiveTesting Array of positive data from the testing set # @param negativeTesting Array of negative data from the testing set # @param positiveTarget Array of positive targets to reach # @param getData Flag to output the processed features in order to bypass loading the next time # @return None def run(self, positiveTraining, negativeTraining, positiveTesting, negativeTesting, positiveTarget, getData=False): # Load all dataset files and gather them accordingly training = self.datasetManager.loadDataset(positiveTraining) training.extend(self.datasetManager.loadDataset(negativeTraining)) testing = self.datasetManager.loadDataset(positiveTesting) testing.extend(self.datasetManager.loadDataset(negativeTesting)) # Process all features print "Processing features..." trainingInput = [] for data in training: trainingInput.extend(self.featureExtractor.getFeatures(data)) testingInput = [] for data in testing: testingInput.extend(self.featureExtractor.getFeatures(data)) # Build the target arrays trainingTarget = self.getPositiveTargetArray(positiveTraining, positiveTarget) trainingTarget.extend(self.getNegativeTargetArray(negativeTraining, len(positiveTarget))) testingTarget = self.getPositiveTargetArray(positiveTesting, positiveTarget) testingTarget.extend(self.getNegativeTargetArray(negativeTesting, len(positiveTarget))) # Check if we need to print the data or run the network if getData: self.utils.getPythonInitCode(trainingInput, "trainingInput") self.utils.getPythonInitCode(trainingTarget, "trainingTarget") self.utils.getPythonInitCode(testingInput, "testingInput") self.utils.getPythonInitCode(testingTarget, "testingTarget") else: # Run the network self.bpn.run(trainingInput, trainingTarget, testingInput, testingTarget, learningRate=0.05, momentum=0.1, optimal=False)
class FeatureExtractor(): # Load settings settings = Settings() # Compatibility with testing mode testing = Testing() # Hold all input feature data trainingInput, testingInput, validatingInput = [], [], [] # Hold all target data trainingTarget, testingTarget = [], [] # Hold eventual results resultTestingScore, resultTrainingScore, resultInitialWeights, resultWeights, resultConfig, resultIteration = [], [], [], [], [], [] # Hold informations about the current input currentH, currentW = 0, 0 currentBinary, currentExtracted = [], [] # Hold the transformations applied to the current input cropTop, cropLeft, cropBottom, cropRight = 0, 0, 0, 0 emptyTop, emptyLeft, emptyBottom, emptyRight = 0, 0, 0, 0 rotationAngle = 0 tar = 0 # The current fingertip and eye (if any) fingerTip = [[0, 0], [0, 0]] eyePosition = [[0, 0], [0, 0]] orientation = ["", ""] bpnValidating = None # Constructor of the FeaturesExtractor class # # @param None # @return None def __init__(self): # Vectorize threshold functions to speed the process self.thresholdBinary = np.vectorize(self.thresholdBinary) self.thresholdExtracted = np.vectorize(self.thresholdExtracted) # Apply a binary output after a threshold (This function is meant to be vectorialised) # # @param x Value to process # @param start Minimal value of the threshold # @param end Maximal value of the threshold # @return integer (0|1) def thresholdBinary(self, x, start, end): return 0 if x < start or x > end or x == 0 else 1 # Apply a threshold to transform unwanted values to NaN (This function is meant to be vectorialised) # # @param x Value to process # @param start Minimal value of the threshold # @param end Maximal value of the threshold # @return numeric (NaN|x) def thresholdExtracted(self, x, start, end): return np.NaN if x < start or x > end or x == 0 else x # Find the nearest position of the value 1 in an array, starting ideally from its middle # # @param data Array of values # @param index Current index within the array # @param orientation Orientation of next lookup (-1|1) # @param shift Shift to push lookup on sides # @return integer|None Index of the nearest 1 value; otherwise None def findNearestValue(self, data, index, orientation=-1, shift=0): # Returns the index of the nearest value around the index parameter tmp = index + (orientation * shift) if tmp >= len(data) or tmp < 0: return None elif data[tmp] == 1: return tmp else: # Invert the orientation orientation *= -1 # Increase the shift every two checks (except for the first one) if orientation == 1: shift += 1 return self.findNearestValue(data, index, orientation, shift) # Rebase the values of the extracted array based on its minimum value # # @param None # @return None def tarExtracted(self): # Determine the minimal non-zero value of the matrice try: #min = np.nanmin(self.currentExtracted) # fmin identity seems buggy self.tar = np.min( self.currentExtracted[~np.isnan(self.currentExtracted)] ) # use this hack instead except ValueError: self.tar = 0 # Remove this value to all elements of the matrice self.currentExtracted = self.currentExtracted - np.array([self.tar]) # For testing purpose self.testing.timerMarker("Tar the values of the extracted hand") # Remove all empty columns and rows of the extracted and binary arrays # # @param None # @return None def removeEmptyColumnsRows(self): # Re-initialise empty holders self.emptyTop, self.emptyLeft, self.emptyBottom, self.emptyRight = 0, 0, 0, 0 # Verify the content of the matrices if self.currentExtracted.size == 0 or self.currentBinary.size == 0: return False # Count along columns and rows from both matrices to detect empty ones column = self.currentBinary.sum(axis=0).astype(int) row = self.currentBinary.sum(axis=1).astype(int) # Remove empty left columns i = 0 while i < len(column) and column[i] == 0: self.currentExtracted = self.currentExtracted[:, 1:] self.currentBinary = self.currentBinary[:, 1:] self.emptyLeft += 1 i += 1 # Remove empty right columns i = len(column) - 1 while i >= 0 and column[i] == 0: self.currentExtracted = self.currentExtracted[:, :-1] self.currentBinary = self.currentBinary[:, :-1] self.emptyRight += 1 i -= 1 # Remove empty top rows i = 0 while i < len(row) and row[i] == 0: self.currentExtracted = self.currentExtracted[1:, :] self.currentBinary = self.currentBinary[1:, :] self.emptyTop += 1 i += 1 # Remove empty bottom rows i = len(row) - 1 while i >= 0 and row[i] == 0: self.currentExtracted = self.currentExtracted[:-1, :] self.currentBinary = self.currentBinary[:-1, :] self.emptyBottom += 1 i -= 1 # For testing purpose self.testing.timerMarker( "Remove all zeros columns and rows from both matrices") # Rotate the extracted and binary matrices to place the fingertip at their top # # @param hand Coordinates of the hand # @param elbow Coordinates of the elbow # @return None def rotate(self, hand, elbow): # The dataset can be oriented in 4 different ways that can be rotated back to form a vertical line between the hand and the elbow joints # First, determine the relative position of the hand if hand[0] - elbow[0] < 0: # the hand is located in the lower part v = elbow[0] - hand[0] up = False else: # the hand is located in the upper part v = hand[0] - elbow[0] up = True if hand[1] - elbow[1] < 0: # the hand is located in the right part h = elbow[1] - hand[1] left = False else: # the hand is located in the left part h = hand[1] - elbow[1] left = True # Check if the elbow is on top/bottom extrems to determine the rotation degree if hand[0] == elbow[0] and hand[1] == elbow[1]: self.rotationAngle = 0 elif v > h: if not up: self.rotationAngle = 0 else: self.rotationAngle = 2 else: if left: self.rotationAngle = 1 else: self.rotationAngle = -1 # Apply rotation self.currentBinary = np.rot90(self.currentBinary, self.rotationAngle) self.currentExtracted = np.rot90(self.currentExtracted, self.rotationAngle) # For testing purpose self.testing.timerMarker("Matrice rotation") # Display the content of either the extracted or the binary array in ASCII # # @param b Array to display (self.extracted|self.binary) # @return None def display(self, b): x, y = b.shape for i in range(x): text = "" for j in range(y): if np.isnan(b[i, j]) or int(b[i, j]) == 0: text += " " else: text += str(int(b[i, j])) text += "," print text print # Restrain a value between 0 and a maximum # # @param value Value to process # @param max Maximum limit # @return numeric Value between the range 0 to max def keepRange(self, value, max): if value < 0: return 0 elif value > max: if max > 0: return max else: return 0 else: return value # Get the sum of all numbers within a 2D array # # @param data Array to process # @param total Total number of data to get the percentage # @param h1 Horizontal coordinate to start # @param v1 Vertical coordinate to start # @param h2 Horizontal coordinate to finish # @param v2 Vertical coordinate to finish # @return float Percentage of data def countWithinArea(self, data, total, h1, v1, h2, v2): # Return the percentage of actual data within a restricted area if self.currentW > 0 and self.currentH > 0 and total > 0 and data.size > 0 and data.shape[ 0] >= v2 and data.shape[1] >= h2: return np.sum(data[v1:v2, h1:h2], dtype=np.int32) / float(total) * 100 else: return 0 # Divise an array in 6 sub regions to get respective sub-percents # # @param None # @return array Array of 6 sub-percents def diviseInSix(self): h, w = self.currentBinary.shape total = np.sum(self.currentBinary) output = [] output.append( self.countWithinArea(self.currentBinary, total, 0, 0, w / 2, h / 3)) # upper left output.append( self.countWithinArea(self.currentBinary, total, 0, h / 3, w / 2, 2 * (h / 3))) # middle left output.append( self.countWithinArea(self.currentBinary, total, 0, 2 * (h / 3), w / 2, h)) # lower left output.append( self.countWithinArea(self.currentBinary, total, w / 2, 0, w, h / 3)) # upper right output.append( self.countWithinArea(self.currentBinary, total, w / 2, h / 3, w, 2 * (h / 3))) # middle right output.append( self.countWithinArea(self.currentBinary, total, w / 2, 2 * (h / 3), w, h)) # lower right return self.normalizeInput(output) # Retrieve the alignement properties of the elbow and the pointing hand # # @param depth Depth of the hand to adjust the threshold # @param hand_v Vertical coordinate of the hand # @param hand_h Horizontal coordinate of the hand # @param elbow_v Vertical coordinate of the elbow # @param elbow_h Horizontal coordinate of the elbow # @param handId Identifier of the hand currently processed # @return array Array of (-1|0|1) def getElbowHandAlignment(self, depth, hand_v, hand_h, elbow_v, elbow_h, handId): # Allow to discriminate gestures pointing left/right up, lateral and down # Uses the disposition of the hand and the elbow # At 1m, a variation of 60 pixels indicates a position change if depth > 0: threshold = (60 / float(depth)) * 1000 else: threshold = 60 # Right, Left or Front? if hand_h > elbow_h + threshold: h = 1 # Left (from user point of view) self.orientation[handId] = "Left " elif hand_h + threshold < elbow_h: h = -1 # Right (from user point of view) self.orientation[handId] = "Right " else: h = 0 # Front self.orientation[handId] = "Front " # Up, Down or Lateral? if hand_v > elbow_v + threshold: v = -1 # down self.orientation[handId] += "down" elif hand_v + threshold < elbow_v: v = 1 # up self.orientation[handId] += "up" else: v = 0 # lateral self.orientation[handId] += "lateral" # For testing purpose self.testing.timerMarker("Get elbow / hand alignment") return [h, v] # Normalize the input array in a defined range # # @param input Array to process # @param old_min Minimal range limit to rebase # @param old_max Maximal range limit to rebase # @return array The processed input array of floats def normalizeInput(self, input, old_min=0, old_max=100): # Normalize the data in a range from -1 to 1 old_range = old_max - old_min new_min = -1 new_range = 1 - new_min if old_range == 0: raise ValueError("Invalid range") return [ float((n - old_min) / float(old_range) * new_range + new_min) for n in input ] # Main function to get the features of a dataset item # # @param data Dataset item # @return array Array of input features def getFeatures(self, data): result = [] # Retrieve the position of the pointing hand if data.hand == self.settings.LEFT_HAND or data.hand == self.settings.BOTH_HAND: h, v, d = map(int, data.skeleton["hand"]["left"]) h2, v2, d2 = map(int, data.skeleton["elbow"]["left"]) result.append( self.processFeatures(h, v, d, h2, v2, d2, data.depth_map, data.skeleton["head"], 0)) if data.hand == self.settings.RIGHT_HAND or data.hand == self.settings.BOTH_HAND: h, v, d = map(int, data.skeleton["hand"]["right"]) h2, v2, d2 = map(int, data.skeleton["elbow"]["right"]) result.append( self.processFeatures(h, v, d, h2, v2, d2, data.depth_map, data.skeleton["head"], len(result))) # Then, return the corresponding features return result # Retrieve the input features # # @param h Horizontal coordinate of the pointing hand # @param v Vertical coordinate of the pointing hand # @param d Depth of the pointing hand # @param h2 Horizontal coordinate of the elbow # @param v2 Vertical coordinate of the elbow # @param d2 Depth of the elbow # @param depthMap Array of the depth map of the captured scene # @param head Cordinates of the head # @param handId Identifier of the hand currently processed # @return array Array of input features def processFeatures(self, h, v, d, h2, v2, d2, depthMap, head, handId=0): # Assert the validaity of the values if depthMap.size == 0 or len(depthMap.shape) <= 1: return [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0] # Determine the bounding box around the pointing hand regarding to the depth of the hand if d != 0: shift = int((1000.0 / d) * 90) else: shift = 1 # For testing purpose self.testing.startTimer() # Determine the coordinates of the bounding box to extract self.cropTop = self.keepRange(int(v - shift), 480) self.cropLeft = self.keepRange(int(h - shift), 640) self.cropBottom = self.keepRange(int(v + shift) + 1, 480) self.cropRight = self.keepRange(int(h + shift) + 1, 640) # For testing purpose self.testing.timerMarker( "Determine the coordinates of the bounding box to extract") # Extract the informations within the bounding box startV = shift - v + self.cropTop startH = shift - h + self.cropLeft endV = shift + self.cropBottom - v endH = shift + self.cropRight - h max = (2 * shift) + 1 self.currentExtracted = np.zeros(max * max).reshape(max, max) self.currentExtracted[startV:endV, startH:endH] = depthMap[ self.cropTop:self.cropBottom, self.cropLeft:self.cropRight] self.currentBinary = np.copy(self.currentExtracted) # For testing purpose self.testing.timerMarker( "Extract the informations within the bounding box") # Extract the hand from the background with a threshold start = d - 100 end = d + 100 self.currentBinary = self.thresholdBinary(self.currentBinary, start, end) self.currentExtracted = self.thresholdExtracted( self.currentExtracted, start, end) # For testing purpose self.testing.timerMarker( "Extract the hand from the background with a threshold") # Remove all zeros columns and rows from both matrices self.removeEmptyColumnsRows() # Initialize the input features input = [] # Rotate the hand to form a vertical line between the hand and the elbow self.rotate([v, h], [v2, h2]) self.tarExtracted() # Calculate the elbow/hand alignment alignment = self.getElbowHandAlignment(d, v, h, v2, h2, handId) # Retrieve eyes position self.eyePosition[handId] = self.getEyePosition(depthMap, head, alignment) # For testing purpose self.testing.timerMarker("Get eye position") # Retrieve the finger tip position self.fingerTip[handId] = self.getFingerTip() # For testing purpose self.testing.timerMarker("Get fingertip") # Hold the ratio self.currentH, self.currentW = self.currentBinary.shape # /------------------------------------\ # | Feature 1 to 6 | # | Percent of data in sub-regions | # \------------------------------------/ # Hold the percentage of actual data within sub-areas input.extend(self.diviseInSix()) # For testing purpose self.testing.timerMarker("Process hand histogram") self.testing.stopTimer() return input # Retrieve the coordinates of the fingertip # # @param None # @return array Array of the coordinates of the fingertip def getFingerTip(self): # Prevent empty calls if len(self.currentBinary) == 0: return [0, 0] # Retrieve non-empty values of the first row index = np.nonzero(self.currentBinary[0] == 1) output = [] if len(index) < 1 or len(index[0]) < 1: return [0, 0] # Finger tip coordinates (once rotated!) v = 0 h = self.findNearestValue( self.currentBinary[0], index[0][0] + int( (index[0][-1] - index[0][0]) / 2)) if h == None: return [0, 0] # Revert rotation to get the real coordinates if self.rotationAngle == -1: v = len(self.currentBinary[0]) - 1 - h h = 0 elif self.rotationAngle == 2: h = len(self.currentBinary[0]) - 1 - h v = len(self.currentBinary) elif self.rotationAngle == 1: v = h h = len(self.currentBinary) - 1 # Revert empty columns/rows and initial crop return [ self.cropLeft + h + self.emptyLeft, self.cropTop + v + self.emptyTop ] # Retrieve the position of the virtual master eye # # @param depthMap Array of the depth map of the captured scene # @param head Coordinates of the head # @param elbowHand Orientation of the pointing forearm # @return array Array of the coordinates of the virtual master eye def getEyePosition(self, depthMap, head, elbowHand): # Assert the validaity of the values if depthMap.size == 0 or len(depthMap.shape) <= 1: return [0, 0] # First extract a sub-area based on the depth h, v, d = map(int, head) # Determine the bounding box around the head regarding its depth if d != 0: shift = int((1000.0 / d) * 90) line = int((1000.0 / d) * 80) else: shift = 1 line = 1 # Determine the coordinates of the bounding box to extract cropTop = self.keepRange(int(v - shift), 480) cropLeft = self.keepRange(int(h - shift), 640) cropBottom = self.keepRange(int(v + shift) + 1, 480) cropRight = self.keepRange(int(h + shift) + 1, 640) # Extract the informations within the bounding box startV = shift - v + cropTop startH = shift - h + cropLeft endV = shift + cropBottom - v endH = shift + cropRight - h max = (2 * shift) + 1 extracted = np.zeros(max * max).reshape(max, max) extracted[startV:endV, startH:endH] = depthMap[cropTop:cropBottom, cropLeft:cropRight] # Extract the head from the background with a threshold start = d - 100 end = d + 100 extracted = self.thresholdBinary(extracted, start, end) # Re-initialise empty holders emptyTop, emptyLeft, emptyBottom, emptyRight = 0, 0, 0, 0 # Remove all zeros columns and rows from both matrices column = extracted.sum(axis=0).astype(int) row = extracted.sum(axis=1).astype(int) # Remove empty left columns i = 0 while i < len(column) and column[i] == 0: extracted = extracted[:, 1:] emptyLeft += 1 i += 1 # Remove empty right columns i = len(column) - 1 while i >= 0 and column[i] == 0: extracted = extracted[:, :-1] emptyRight += 1 i -= 1 # Remove empty top rows i = 0 while i < len(row) and row[i] == 0: extracted = extracted[1:, :] emptyTop += 1 i += 1 # The eyes are assumed to look at the finger tip # Based on the alignment of the hand and the elbow, we can extrapolate their relative position # First, let's make sure the chosen line is accessible if len(extracted) <= line: return [0, 0] else: proportion = 0.15 total = np.sum(extracted[line]) index = np.nonzero(extracted[line] == 1) if len(index[0]) == 0: return [0, 0] else: # left side (from the user point of view) if elbowHand[0] == 1: h = index[0][0] + total - (total * proportion) # right side (from the user point of view) elif elbowHand[0] == -1: h = index[0][0] + (total * proportion) # center else: h = index[0][0] + (total * 0.5) # Return the coordinates v = int(cropTop + emptyTop + line) h = int(cropLeft + emptyLeft + h) return [h, v]
def updateThreshold(data, currSettings, adminSettings, db, cur): global ThresholdList, LimitList, MasterHitList, BooleanList, currData, loopTester newSettings = Settings('','','','','','','','','','','','','','','','','') currData = [] intializeValues() ThresholdList.append(int(currSettings.tcpThreshold)) ThresholdList.append(int(currSettings.udpThreshold)) ThresholdList.append(int(currSettings.icmpThreshold)) LimitList.append(int(currSettings.tcplimit)) LimitList.append(int(currSettings.udplimit)) LimitList.append(int(currSettings.icmplimit)) setattr(newSettings, 'maxTime', currSettings.maxTime) setattr(newSettings, 'device', currSettings.device) setattr(newSettings, 'network', currSettings.network) setattr(newSettings, 'bandwidth', currSettings.bandwidth) setattr(newSettings, 'cycle_time', currSettings.cycle_time) setattr(newSettings, 'maxFlows', currSettings.maxFlows) setattr(newSettings, 'tcplimit', currSettings.tcplimit) setattr(newSettings, 'udplimit', currSettings.udplimit) setattr(newSettings, 'icmplimit', currSettings.icmplimit) # Take note of assigned IDs: 0 = TCP; 1 = UDP; 2 = ICMP currData.append(pht(data[0])) currData.append(pht(data[1])) currData.append(pht(data[2])) if (loopTester == 0): loopTester = 1 setInitStateList(currData) if currData[0] < adminSettings.tcplimit and currData[1] < adminSettings.udplimit \ and currData[2] < adminSettings.icmplimit: setattr(newSettings, 'tcpThreshold', float("%.2f" % adminSettings.tcpThreshold)) setattr(newSettings, 'udpThreshold', float("%.2f" % adminSettings.udpThreshold)) setattr(newSettings, 'icmpThreshold', float("%.2f" % adminSettings.icmpThreshold)) #''' cur.execute("SELECT MAX(idcycle) FROM cycle") obtainedcurID = cur.fetchall() curID = int(obtainedcurID[0][0]) cur.execute("INSERT INTO threshold (idcycle,old_tcp,old_udp,old_icmp,new_tcp,new_udp,new_icmp) " "VALUES (%s, %s, %s, %s, %s, %s, %s)", (curID, currSettings.tcpThreshold, currSettings.udpThreshold, currSettings.icmpThreshold, float("%.2f" % adminSettings.tcpThreshold), float("%.2f" % adminSettings.udpThreshold), float("%.2f" % adminSettings.icmpThreshold))) db.commit() # ''' else: for x in range(0, 3): if (currData[x] > ThresholdList[x]): MasterHitList[x].append(currData[x]) BooleanList[x][0] = 1 BooleanList[x][1] = 0 elif (currData[x] < ThresholdList[x]): MasterHitList[x].append(currData[x]) BooleanList[x][0] = 0 BooleanList[x][1] = 1 if (BooleanList[x][0] == 1): StateList[x] = 1 NeededBW = currData[x] - ThresholdList[x] getFloodingAdjustment(ThresholdList, x, NeededBW, OriginalList) MasterHitList[x] = [] elif (BooleanList[x][1] == 1): StateList[x] = 0 NeededBW = ThresholdList[x] - currData[x] getBackAdjustment(ThresholdList, x, NeededBW, OriginalList) MasterHitList[x] = [] if ThresholdList[2] < currSettings.icmplimit: if currData[1] > currSettings.udpThreshold: value = currSettings.icmpThreshold - currSettings.icmplimit ThresholdList[0] = currSettings.tcpThreshold + value ThresholdList[1] = currSettings.udpThreshold ThresholdList[2] = currSettings.icmpThreshold - value else: value1 = currSettings.udpThreshold - currSettings.udplimit value2 = currSettings.icmpThreshold - currSettings.icmplimit ThresholdList[0] = currSettings.tcpThreshold + value1 + value2 ThresholdList[1] = currSettings.udpThreshold - value1 ThresholdList[2] = currSettings.icmpThreshold - value2 setattr(newSettings, 'tcpThreshold', float("%.2f" % ThresholdList[0])) setattr(newSettings, 'udpThreshold', float("%.2f" % ThresholdList[1])) setattr(newSettings, 'icmpThreshold', float("%.2f" % ThresholdList[2])) #''' cur.execute("SELECT MAX(idcycle) FROM cycle") obtainedcurID = cur.fetchall() curID = int(obtainedcurID[0][0]) cur.execute("INSERT INTO threshold (idcycle,old_tcp,old_udp,old_icmp,new_tcp,new_udp,new_icmp) " "VALUES (%s, %s, %s, %s, %s, %s, %s)", (curID, currSettings.tcpThreshold, currSettings.udpThreshold, currSettings.icmpThreshold, float("%.2f" % ThresholdList[0]), float("%.2f" % ThresholdList[1]), float("%.2f" % ThresholdList[2]))) db.commit() #''' setattr(newSettings, 'synThresh', currSettings.synThresh) setattr(newSettings, 'synackThresh', currSettings.synackThresh) setattr(newSettings, 'httpThresh', currSettings.httpThresh) setattr(newSettings, 'dnsThresh', currSettings.dnsThresh) setattr(newSettings, 'dhcpThresh', currSettings.dhcpThresh) return newSettings