def add_to_gesture(self, msg): """Add messages to ongoing gesture""" # start a new gesture if one is not ongoing if not self.g_ongoing: self.gesture = Gesture() self.g_start = time.time() self.g_ongoing = True # set sustain pedal control if msg.isController() and msg.getControllerNumber() == 64: if msg.getControllerValue() == 127: self.pedal = True else: self.pedal = False # mark note as sustained or completed if msg.isNoteOn(): self.sustained[msg.getNoteNumber()] = True if msg.isNoteOff(): self.sustained[msg.getNoteNumber()] = False # add midi message and time since start to gesture self.gesture.add_message(msg, time.time() - self.g_start) # record event time as last to inform gesture ending self.g_last = time.time() print_message(msg, self.in_name)
def FindHandFromTrack(self): # Get brightness from tracked hand kernel = np.ones((5, 5), np.float32) / 25 blurred = cv2.filter2D(self.currentFrame.copy(), -1, kernel) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) self.hand_lower_blue = self.AddValueToColorArray([ -config['hand']['hsv_hand_dec'][0], -config['hand']['hsv_hand_dec'][1], -config['hand']['hsv_hand_dec'][2] ], self.handPointHSV.copy()) self.hand_upper_blue = self.AddValueToColorArray([ config['hand']['hsv_hand_inc'][0], config['hand']['hsv_hand_inc'][1], config['hand']['hsv_hand_inc'][2] ], self.handPointHSV.copy()) mask = cv2.inRange(hsv, self.hand_lower_blue, self.hand_upper_blue) self.mask_rafined = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) search_hand_mask = self.mask_rafined.copy() search_hand = Gesture() if search_hand.InitFromMaskAndPosition( search_hand_mask, self.handTracked.centerX, self.handTracked.centerY) is False: self.foundHand = False else: self.timeSinceFoundHandTracked = time.time() self.handTracked = search_hand self.handPointHSV = hsv[self.handTracked.centerY][ self.handTracked.centerX] self.foundHand = True
def TryToTrackHand(self): lower_blue_brightness = 255 search_hand = Gesture() while lower_blue_brightness > 15: # define range of blue color in HSV lower_blue = np.array([config['hand']['hsv_lower_blue'][0], config['hand']['hsv_lower_blue'][1], lower_blue_brightness]) upper_blue = np.array([config['hand']['hsv_upper_blue'][0], config['hand']['hsv_upper_blue'][1], 255]) kernel = np.ones((5, 5), np.float32) / 25 blurred = cv2.filter2D(self.currentFrame.copy(), -1, kernel) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower_blue, upper_blue) self.mask_rafined = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) search_hand_mask = self.mask_rafined.copy() foundPalm = search_hand.SearchPalmFromMask(search_hand_mask) if foundPalm: # Set infos from tracked hand self.handTracked = search_hand self.timeSinceFoundHandTracked = time.time() self.handPointHSV = hsv[self.handTracked.centerY][self.handTracked.centerX] self.foundHand = True return lower_blue_brightness -= 10 self.foundHand = False
def fetch_pixels(): ''' Returns a tuple of pixels(inputs) and output labels ''' gestures = [] for user in user_number: path_to_csv = 'Dataset/user_' + str(user) + '/user_' + str( user) + '_loc.csv' print(path_to_csv) path_to_images = 'Dataset/user_' + str(user) + '/' list_of_images = os.listdir(path_to_images) list_of_images = filter(lambda x: '.csv' not in x, list_of_images) for image in list_of_images: im = Image.open(path_to_images + image) pix = im.load() rows, cols = im.size pixels = list() for row in range(rows): for col in range(cols): for p in pix[row, col]: pixels.append(p / 256) gesture = Gesture(image[0], pixels) gesture.generate_output() gestures.append(gesture) return gestures
def classifyGesture(self): minError = 2**31 - 1 # a large value minErrorIndex = -1 self.humanGesture = Gesture(self.gesturePoints, "Human Gesture") likelihoodScores = [0] * len(self.gestures) assessments = [{}] * len(self.gestures) for i in xrange(len(self.gestures)): assessments[i] = Gesture.compareGestures(self.gestures[i], self.humanGesture) errorList = [assessments[i][Gesture.totalError] \ for i in xrange(len(assessments))] index = errorList.index(min(errorList)) # Basic elimination to figure out if result is valid # show appropriate images in windows templateGestureRatio = max((self.gestures[index].distance /\ self.humanGesture.distance), (self.humanGesture.distance /\ self.gestures[index].distance)) distanceDiffRatio = assessments[index][Gesture.totalDistance] /\ min(self.gestures[index].distance, self.humanGesture.distance) if templateGestureRatio < 1.25 and distanceDiffRatio < 2: return index
def classifyGesture(self): minError = 2**31 - 1 # a large value minErrorIndex = -1 self.humanGesture = Gesture(self.gesturePoints, "Human Gesture") likelihoodScores = [0] * len(self.gestures) assessments = [{}] * len(self.gestures) for i in xrange(len(self.gestures)): # print "Calling:", self.gestures[i].name, self.humanGesture.name assessments[i] = Gesture.compareGestures(self.gestures[i], self.humanGesture) # print self.gestures[i].name # print assessments[i] errorList = [ assessments[i][Gesture.totalError] for i in xrange(len(assessments)) ] index = errorList.index(min(errorList)) # Basic elimination to figure out if result is valid templateGestureRatio = max( (self.gestures[index].distance / self.humanGesture.distance), (self.humanGesture.distance / self.gestures[index].distance)) distanceDiffRatio = assessments[index][Gesture.totalDistance] / min( self.gestures[index].distance, self.humanGesture.distance) if templateGestureRatio < 1.25 and distanceDiffRatio < 2: self.gestures[index].action()
def makeCircles(): radius = 512 pointCount = 256 ccwCirclePoints = [(radius*math.cos(t), radius*math.sin(t)) \ for t in np.linspace(0, 2*math.pi, num=pointCount)] ccwCircle = Gesture(ccwCirclePoints, name="CW Circle") cwCirclePoints = [(radius*math.cos(t), -radius*math.sin(t)) \ for t in np.linspace(0, 2*math.pi, num=pointCount)] cwCircle = Gesture(cwCirclePoints, name="CCW Circle") defaultGestures.append(ccwCircle) defaultGestures.append(cwCircle)
def __init__(self): """ Configuration """ # Camera settings self.FRAME_WIDTH = 341 self.FRAME_HEIGHT = 256 self.flip_camera = True # Mirror image self.camera = cv2.VideoCapture(1) # ...you can also use a test video for input #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov" #self.camera = cv2.VideoCapture(video) #self.skip_input(400) # Skip to an interesting part of the video if not self.camera.isOpened(): print "couldn't load webcam" return #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH) #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT) self.filters_dir = "filters/" # Filter settings in trackbar self.filters_file = "filters_default" # Load filter settings current_config = self.filters_dir + self.filters_file self.filters = Filters(current_config) # No actions will be triggered in test mode # (can be used to adjust settings at runtime) self.test_mode = False # Create a hand detector # In fact, this is a wrapper for many detectors # to increase detection confidence self.detector = Detector(self.filters.config) # Knowledge base for all detectors self.kb = KB() # Create gesture recognizer. # A gesture consists of a motion and a hand state. self.gesture = Gesture() # The action module executes keyboard and mouse commands self.action = Action() # Show output of detectors self.output = Output() self.run()
def makeLines(): pointCount = 256 hLineLR = Gesture([(-t, 0) for t in xrange(pointCount)], name="Horizontal Line Right to Left") hLineRL = Gesture([(t, 0) for t in xrange(pointCount)], name="Horizontal Line Left to Right") vLineTB = Gesture([(0, t) for t in xrange(pointCount)], name="Vertical Line Top to Bottom") vLineBT = Gesture([(0, -t) for t in xrange(pointCount)], name="Vertical Line Bottom to Top") diagonalTLtoBR = Gesture([(t, t) for t in xrange(pointCount)], name="Diagonal Top Left to Bottom Right") diagonalBRtoTL = Gesture([(-t, -t) for t in xrange(pointCount)], name="Diagonal Bottom Right to Top Left") diagonalTRtoBL = Gesture([(-t, t) for t in xrange(pointCount)], name="Diagonal Top Right to Bottom Left") diagonalBLtoTR = Gesture([(t, -t) for t in xrange(pointCount)], name="Diagonal Bottom Left to Top Right") defaultGestures.append(hLineLR) defaultGestures.append(hLineRL) defaultGestures.append(vLineTB) defaultGestures.append(vLineBT) defaultGestures.append(diagonalTLtoBR) defaultGestures.append(diagonalBRtoTL) defaultGestures.append(diagonalTRtoBL) defaultGestures.append(diagonalBLtoTR)
def save(self, skeletons): if len(skeletons) < 1: return False for skeleton in skeletons: gesture = Gesture("GESTURE_%d" % (self.__gesture_id_counter), recording=skeleton.gesture_data()) self.__gestures.append(gesture) print("Gesture added: %s" % gesture.id()) self.__gesture_id_counter += 1 if self.matcher: self.matcher.learn(gesture.averages().data(), gesture.id()) return self.__gestures[-1]
def main(): g = Gesture() point_list = [(0,0), (1,0), (1,1), (0,1), (0,2), (2,2), (4,2), (5,3), (4,3), (3,3), (0,3), (1,2), (2,3), (4,5), (2,4), (1,4)] point_list.extend([(point[0]+1, point[1]+2) for point in point_list]) point_list.extend([(point[0]+10, point[1]+20) for point in point_list]) print len(point_list) g.add_stroke(point_list=point_list) g.normalize() points = g.strokes[0].points x = [point.x for point in points] y = [point.y for point in points] plot(x,y, 'bo-') pyp.show()
def loadGesturesFromFile(self): self.gestures = [] self.gestureNames = [] read = "" with open(self.gestureFile, 'r') as fin: read = fin.read() fin.close() data = read.split('\n') # Basic check, should replace later with bytestream instead if len(data) < len(self.gestureHeader): self.loadDefaultGestures() else: gestureName = "" gesturePoints = [] cutoff = len(self.gestureHeader) for item in data: if item[:cutoff] == self.gestureHeader: gestureName = item[cutoff:] elif item == self.gestureEnd: self.gestures.append(Gesture(gesturePoints, gestureName)) self.gestureNames.append(gestureName) gestureName = "" gesturePoints = [] else: gesturePoints.append(map(float, item.split()))
def makeInfinity(): scale = 30 pointCount = 256 lemniscatePoints = [ (((scale * math.sqrt(2) * math.cos(t)) / (math.sin(t)**2 + 1)), -((scale * math.sqrt(2) * math.cos(t) * math.sin(t)) / (math.sin(t)**2 + 1))) for t in np.linspace( math.pi / 2, 2 * math.pi + math.pi / 2, num=pointCount) ] infinity = Gesture(lemniscatePoints, name="Infinity") defaultGestures.append(infinity)
def addRecordedGesture(self): gestureName = "" while True: gestureName = "".join([chr(random.randint(ord('a'), ord('z'))) \ for i in xrange(20)]) if gestureName not in self.getGestureNames(): break newGesture = Gesture(self.gesturePoints, name=gestureName) self.gestures.append(newGesture) print "RECORDED NEW ONE", gestureName self.lastAction = gestureName return gestureName
def __init__(self, name): self.availableGestures = [ Gesture("rock"), Gesture("paper"), Gesture("scissors"), Gesture("lizard"), Gesture("spock") ] self.chosenGesture = Gesture("NULL") self.name = name self.score = 0
def GetGesture(self): # Retry a few times before initing with palm again timeElapsedSinceLastFoundHand = time.time() - self.timeSinceFoundHandTracked stillTringToFindHandFromTrack = timeElapsedSinceLastFoundHand < config['hand']['timeToKeepSearchingHandWhenLostTracking'] if not stillTringToFindHandFromTrack: self.handTracked = None if self.handTracked is not None or stillTringToFindHandFromTrack: self.FindHandFromTrack() else: self.TryToTrackHand() self.currentGesture = self.handTracked if not self.foundHand: self.currentGesture = Gesture() self.currentGesture.properties['needInitPalm'] = not stillTringToFindHandFromTrack self.SetTimeElapsedSinceSameGesture() return self.currentGesture
def classifyGesture(self): minError = 2**31 - 1 # a large value minErrorIndex = -1 self.humanGesture = Gesture(self.gesturePoints, "Human Gesture") likelihoodScores = [0] * len(self.gestures) assessments = [{}] * len(self.gestures) for i in xrange(len(self.gestures)): assessments[i] = Gesture.compareGestures(self.gestures[i], self.humanGesture) errorList = [assessments[i][Gesture.totalError] \ for i in xrange(len(assessments))] index = errorList.index(min(errorList)) # Basic elimination to figure out if result is valid templateGestureRatio = max((self.gestures[index].distance /\ self.humanGesture.distance), (self.humanGesture.distance /\ self.gestures[index].distance)) distanceDiffRatio = assessments[index][Gesture.totalDistance] /\ min(self.gestures[index].distance, self.humanGesture.distance) if templateGestureRatio < 1.25 and distanceDiffRatio < 2: return index
def predict(filename): gesture = Gesture.from_abs_file(filename)[0] return gesture.predict()
def __init__(self): self.gestures = Gesture() self.gestures.set_values("bestfist.xml", 1.2, 5) self.gestures.set_values("cascade.xml", 1.2, 30) self.gestures.set_values("eye3.xml", 1.1, 36) self.gestures.set_values("face.xml", 1.3, 5)
class CascadeGestureDetector(): def __init__(self): self.gestures = Gesture() self.gestures.set_values("bestfist.xml", 1.2, 5) self.gestures.set_values("cascade.xml", 1.2, 30) self.gestures.set_values("eye3.xml", 1.1, 36) self.gestures.set_values("face.xml", 1.3, 5) def get_last_detected_gesture(self): return self.gestures.gesture_detected_frame def set_perimeters(self, perimeters): self.gestures.set_perimeters(perimeters) def detect_face(self, frame): return (self.gestures.detect_face(frame)) def detect_eyes(self, frame): return (self.gestures.detect_eyes(frame)) def detect_hand_gesture(self, frame): if self.gestures.first_hand_gesture_index_detected == 0: if self.gestures.detect_hand_gesture(frame, 0): return True return self.gestures.detect_hand_gesture(frame, 1) else: if self.gestures.detect_hand_gesture(frame, 1): return True return self.gestures.detect_hand_gesture(frame, 0)
class Tracker(object): """ This is the main program which gives a high-level view of all the running subsystems. It connects camera input with output in form of "actions" (such as keyboard shortcuts on the users behalf). This is done by locating a hand in an image and detecting features, like the number of fingers, and trying to match that data with a known gesture. """ def __init__(self): """ Configuration """ # Camera settings self.FRAME_WIDTH = 341 self.FRAME_HEIGHT = 256 self.flip_camera = True # Mirror image self.camera = cv2.VideoCapture(1) # ...you can also use a test video for input #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov" #self.camera = cv2.VideoCapture(video) #self.skip_input(400) # Skip to an interesting part of the video if not self.camera.isOpened(): print "couldn't load webcam" return #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH) #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT) self.filters_dir = "filters/" # Filter settings in trackbar self.filters_file = "filters_default" # Load filter settings current_config = self.filters_dir + self.filters_file self.filters = Filters(current_config) # No actions will be triggered in test mode # (can be used to adjust settings at runtime) self.test_mode = False # Create a hand detector # In fact, this is a wrapper for many detectors # to increase detection confidence self.detector = Detector(self.filters.config) # Knowledge base for all detectors self.kb = KB() # Create gesture recognizer. # A gesture consists of a motion and a hand state. self.gesture = Gesture() # The action module executes keyboard and mouse commands self.action = Action() # Show output of detectors self.output = Output() self.run() def run(self): """ In each step: Read the input image and keys, process it and react on it (e.g. with an action). """ while True: img = self.get_input() hand = self.process(img) ref = self.action.get_reference_point() self.output.show(img, hand, ref) def process(self, img): """ Process input """ # Run detection hand = self.detector.detect(img) # Store result in knowledge base self.kb.update(hand) if not self.test_mode: # Try to interprete as gesture self.interprete(hand) return hand def interprete(self, hand): """ Try to interprete the input as a gesture """ self.gesture.add_hand(hand) operation = self.gesture.detect_gesture() self.action.execute(operation) def get_input(self): """ Get input from camera and keyboard """ self.get_key() _, img = self.camera.read() img = cv2.resize(img, (self.FRAME_WIDTH, self.FRAME_HEIGHT)) if self.flip_camera: img = cv2.flip(img, 1) return img def get_key(self): """ Read keyboard input """ key = cv2.waitKey(self.filters.config["wait_between_frames"]) if key == ord('+'): # Reduce program speed self.filters.config["wait_between_frames"] += 500 if key == ord('-'): # Increase program speed if self.filters.config["wait_between_frames"] >= 500: self.filters.config["wait_between_frames"] -= 500 #if key == ord('s'): # Save config # self.filters.save() if key == ord('r'): # Reset all detectors self.detector.reset() self.action.reset() if key == ord('d'): # Make a screenshot self.output.make_screenshot() if key == ord('p') or key == ord(' '): # Pause cv2.waitKey() if key == ord('t'): # Test mode self.test_mode = not self.test_mode if key == ord('1'): self.output.toggle_estimate() if key == ord('2'): self.output.toggle_detectors() if key == ord('3'): self.output.toggle_skin() if key == ord('f'): self.toggle_filters() if key == 63235: # Right arrow self.skip_input(20) if key == 27 or key == ord('q'): # Abort program on ESC, q or space exit() def toggle_filters(self): """ Load the next filter settings """ self.filters_file = self.next_filters_file() current_config = self.filters_dir + self.filters_file self.filters.set_config(current_config) def next_filters_file(self): """ Get the next filter settings """ filters = listdir(self.filters_dir) for i, f in enumerate(filters): if f == self.filters_file: return filters[(i + 1) % len(filters)] def skip_input(self, x=1): """ Skip to a different part of a video sequence. """ for i in range(0, x): self.camera.grab()
class Game: def __init__(self): self.define_contestants() self.number_of_rounds = 0 self.gestures = Gesture(self.player_one, self.player_two) def define_contestants(self): # prompts user to pick a human or ai opponent self.opening_statement() opponent = input("\nPress 1 to play against a Human or type anything else to play against an AI" "\n >") if opponent == '1': self.player_one = Human() self.player_two = Human() else: self.player_one = Human() self.player_two = Ai() def run_game(self): self.player_one.select_name() self.player_two.select_name() self.battle() self.display_winner() def battle(self): win_limit = self.best_of() while self.player_one.score < win_limit and self.player_two.score < win_limit: self.player_one.pick_gesture(self.player_one.name) self.player_two.pick_gesture(self.player_two.name) self.display_gestures(self.player_one, self.player_two) while self.player_one.chosen_gesture == self.player_two.chosen_gesture: print("\nDraw! Choose again!") self.number_of_rounds += 1 self.player_one.pick_gesture(self.player_one.name) self.player_two.pick_gesture(self.player_two.name) self.display_gestures(self.player_one, self.player_two) self.decide_round_winner() self.number_of_rounds += 1 def best_of(self): # lets user pick best of however many rounds # chooses best of 3 if user chooses below 3 while True: try: chosen_best_of = input("\nBest of how many rounds? Minimum = 3" "\n>") chosen_best_of = int(chosen_best_of) except ValueError: print("\nMake sure you type a positive integer!") continue except TypeError: print("\nMake sure you type a positive integer!") continue else: if chosen_best_of >= 3 and chosen_best_of % 2 == 0: return (chosen_best_of / 2) + 1 elif chosen_best_of >= 3 and chosen_best_of % 2 == 1: return math.ceil((chosen_best_of / 2)) else: return 2 def ready_check(self): pass def display_winner(self): if self.player_one.score > self.player_two.score: print("\n*************************************") print(f"{self.player_one.name} wins in {self.number_of_rounds} rounds!" f"\nBetter luck next time {self.player_two.name}") print("*************************************") else: print("\n*************************************") print(f"{self.player_two.name} wins in {self.number_of_rounds} rounds!" f"\nBetter luck next time {self.player_one.name}") print("*************************************") def opening_statement(self): # General opening lines to start the game print("\nWelcome to Rock, Paper, Scissors, Lizard, Spock!" "\n\nHere are the rules:" "\n\nRock crushes Scissors" "\nScissors cuts Paper" "\nPaper covers Rock" "\nRock crushes Lizard" "\nLizard poisons Spock" "\nSpock smashes Scissors" "\nScissors decapitates Lizard" "\nLizard eats paper" "\nPaper disproves Spock" "\nSpock vaporizes Rock") def decide_round_winner(self): winner = self.gestures.define_winner(self.player_one.chosen_gesture, self.player_two.chosen_gesture) winner.score += 1 print("--------------------------------------------------") print(f"{winner.name}'s score is: {winner.score}") def display_gestures(self, player1, player2): print(f"\n{player1.name} chooses {player1.chosen_gesture}!") print(f"{player2.name} chooses {player2.chosen_gesture}!") print("--------------------------------------------------")
#!/usr/bin/env python3 import os, sys, time import numpy as np from gesture import Gesture pos_files = [x for x in os.listdir(os.getcwd()) if ".pos" in x] print(pos_files) gestures = [] for pos in pos_files: gestures.append(Gesture(pos)) for gesture in gestures: print(gesture.name, gesture.filename) lock = time.time() for line in sys.stdin: line = line.rstrip().split(',') if line[0] == 'ORT': for gesture in gestures: if gesture.in_position(float(line[1]), float(line[2]), float(line[3])): if time.time() - 1.5 > gesture.lock: if gesture.name == 'time': os.system("say it is now `date +%I`, `date +%M`") print('time gesture') elif gesture.name == 'dab':
class Server(Thread): """Thread-based server for midi input, output, and gesture processing""" def __init__(self, in_device, in_port, out_device, out_port): Thread.__init__(self) self.setDaemon(True) # ports self.in_port = in_port self.in_device = in_device self.in_name = in_device.getPortName(in_port) self.out_port = out_port self.out_device = out_device self.out_name = out_device.getPortName(out_port) # gesture self.gesture = None self.g_start = None self.g_last = None self.g_ongoing = False self.g_break = 1.0 # state self.sustained = [False] * 120 self.pedal = False self.quit = False def run(self): """The server's main loop""" Xs = [] self.in_device.openPort(self.in_port) self.out_device.openPort(self.out_port) self.in_device.ignoreTypes(True, False, True) while True: if self.quit: return msg = self.in_device.getMessage() if msg: self.add_to_gesture(msg) if self.g_ongoing and not any( self.sustained ) and not self.pedal and time.time() - self.g_last > self.g_break: self.g_ongoing = False c = Collecture(self.gesture) c = randomize(c) c.play(self.out_device) # compress and decompress gesture # g = Gesture(self.gesture.dist_vector(), "dist") # play decompressed vector # g.play(self.out_device) def add_to_gesture(self, msg): """Add messages to ongoing gesture""" # start a new gesture if one is not ongoing if not self.g_ongoing: self.gesture = Gesture() self.g_start = time.time() self.g_ongoing = True # set sustain pedal control if msg.isController() and msg.getControllerNumber() == 64: if msg.getControllerValue() == 127: self.pedal = True else: self.pedal = False # mark note as sustained or completed if msg.isNoteOn(): self.sustained[msg.getNoteNumber()] = True if msg.isNoteOff(): self.sustained[msg.getNoteNumber()] = False # add midi message and time since start to gesture self.gesture.add_message(msg, time.time() - self.g_start) # record event time as last to inform gesture ending self.g_last = time.time() print_message(msg, self.in_name) def end(self): self.quit = True
class Tracker(object): """ This is the main program which gives a high-level view of all the running subsystems. It connects camera input with output in form of "actions" (such as keyboard shortcuts on the users behalf). This is done by locating a hand in an image and detecting features, like the number of fingers, and trying to match that data with a known gesture. """ def __init__(self): """ Configuration """ # Camera settings self.FRAME_WIDTH = 341 self.FRAME_HEIGHT = 256 self.flip_camera = True # Mirror image self.camera = cv2.VideoCapture(1) # ...you can also use a test video for input #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov" #self.camera = cv2.VideoCapture(video) #self.skip_input(400) # Skip to an interesting part of the video if not self.camera.isOpened(): print "couldn't load webcam" return #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH) #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT) self.filters_dir = "filters/" # Filter settings in trackbar self.filters_file = "filters_default" # Load filter settings current_config = self.filters_dir + self.filters_file self.filters = Filters(current_config) # No actions will be triggered in test mode # (can be used to adjust settings at runtime) self.test_mode = False # Create a hand detector # In fact, this is a wrapper for many detectors # to increase detection confidence self.detector = Detector(self.filters.config) # Knowledge base for all detectors self.kb = KB() # Create gesture recognizer. # A gesture consists of a motion and a hand state. self.gesture = Gesture() # The action module executes keyboard and mouse commands self.action = Action() # Show output of detectors self.output = Output() self.run() def run(self): """ In each step: Read the input image and keys, process it and react on it (e.g. with an action). """ while True: img = self.get_input() hand = self.process(img) ref = self.action.get_reference_point() self.output.show(img, hand, ref) def process(self, img): """ Process input """ # Run detection hand = self.detector.detect(img) # Store result in knowledge base self.kb.update(hand) if not self.test_mode: # Try to interprete as gesture self.interprete(hand) return hand def interprete(self, hand): """ Try to interprete the input as a gesture """ self.gesture.add_hand(hand) operation = self.gesture.detect_gesture() self.action.execute(operation) def get_input(self): """ Get input from camera and keyboard """ self.get_key() _, img = self.camera.read() img = cv2.resize(img, (self.FRAME_WIDTH, self.FRAME_HEIGHT)) if self.flip_camera: img = cv2.flip(img, 1) return img def get_key(self): """ Read keyboard input """ key = cv2.waitKey(self.filters.config["wait_between_frames"]) if key == ord('+'): # Reduce program speed self.filters.config["wait_between_frames"] += 500 if key == ord('-'): # Increase program speed if self.filters.config["wait_between_frames"] >= 500: self.filters.config["wait_between_frames"] -= 500 #if key == ord('s'): # Save config # self.filters.save() if key == ord('r'): # Reset all detectors self.detector.reset() self.action.reset() if key == ord('d'): # Make a screenshot self.output.make_screenshot() if key == ord('p') or key == ord(' '): # Pause cv2.waitKey() if key == ord('t'): # Test mode self.test_mode = not self.test_mode if key == ord('1'): self.output.toggle_estimate() if key == ord('2'): self.output.toggle_detectors() if key == ord('3'): self.output.toggle_skin() if key == ord('f'): self.toggle_filters() if key == 63235: # Right arrow self.skip_input(20) if key == 27 or key == ord('q'): # Abort program on ESC, q or space exit() def toggle_filters(self): """ Load the next filter settings """ self.filters_file = self.next_filters_file() current_config = self.filters_dir + self.filters_file self.filters.set_config(current_config) def next_filters_file(self): """ Get the next filter settings """ filters = listdir(self.filters_dir) for i, f in enumerate(filters): if f == self.filters_file: return filters[(i+1) % len(filters)] def skip_input(self, x=1): """ Skip to a different part of a video sequence. """ for i in range(0,x): self.camera.grab()
import os from datalist import DataList from names import shortNames from gesture import Gesture from matplotlib import pyplot as plt name = "dtap" gest = Gesture("data/40_" + name + ".gest") gest.getFeatureSet() plt.show() # dl = DataList() # name = "waveout" # for i in range(7, 17): # gest = Gesture("data/"+str(i)+"_"+name+".gest") # gest.plotSpecgram() # print str(i) + ":" + str(gest.getFeatureSet()[80:]) # plt.show() # for i in range(17, 27): # gest = Gesture("data/"+str(i)+"_"+name+".gest") # gest.plotSpecgram() # print str(i) + ":" + str(gest.getFeatureSet()[80:]) # plt.show() # for i in range(27, 37): # gest = Gesture("data/"+str(i)+"_"+name+".gest")
class Game: def __init__(self): self.gesture = Gesture() self.run() def run(self): done = False self.reset() while not done: self.takeTurn() if self.gameOver(): value = input("Would you like to play again? y/n\n") while value != "y" and value != "n": os.system('cls') value = input("Would you like to play again? y/n\n") if value == 'n': done = True else: self.reset() else: print( f"Player1: {self.p1}, Player2: {self.p2}. {self.rtw} points to win" ) input("Press enter to continue\n") def gameOver(self): if self.p1 == self.rtw: print("Player 1 is the winner!\n") return True elif self.p2 == self.rtw: print("Player 2 is the winner!\n") return True return False def reset(self): os.system('cls') self.welcome() self.vsNPC = self.selectMode() self.rtw = self.roundsToWin() self.p1 = 0 self.p2 = 0 def takeTurn(self): os.system('cls') p1_action = self.player1.takeAction() os.system('cls') p2_action = self.player2.takeAction() os.system('cls') print(f"player1 chose {p1_action}, player2 chose {p2_action}.") result = self.gesture.compare_value(p1_action, p2_action) print(result[1]) if result[0] == -1: self.p1 += 1 print("Player 1 wins this round") elif result[0] == 1: self.p2 += 1 print("Player 2 wins this round") def roundsToWin(self): os.system('cls') value = input( "Please enter a number 2-9 for number of rounds to win\n") valid = set(list("23456789")) while (value not in valid): os.system('cls') value = input( "Invalid input: Please enter a number 2-9 for number of rounds to win\n" ) return int(value) def selectMode(self): value = input("Please enter 1 for vs npc, or 2 for player vs player\n") while value != "1" and value != "2": os.system('cls') value = input( "Invalid input. Please enter 1 for vs npc, or 2 for player vs player\n" ) self.vsNPC = value == "1" self.player1 = HumanPlayer(1) if self.vsNPC: self.player2 = NPC(2) else: self.player2 = HumanPlayer(2) def welcome(self): print("Welcome to Rock Paper Scissors Lizard Spock!") print("Rock crushes Scissors, Scissors cuts Paper, Paper covers Rock") print( "Rock rushes Lizard, Lizard poisons Spock, Spock smashes Scissors") print( "Scissors decapitates Lizard, Lizard eats paper, Paper disproves Spock," ) print("and Spock vaporizes rock") print()
# read from file with open('../data/Data/0' + str(i) + '/glasses/raw_2_0_unsync.txt') as f: x = f.readlines() data = [] for e in x: data.append(map(float, e[:-1].split(' ')[1:-1])) #print len(data), len(data[0]), data[0] # --- get a gesture chunk --- d_obj = Descent() g_obj = Gesture() swipe_threshold = int(d_obj.median(data[0])) * 0.80 touch_threshold = int(d_obj.median(data[0])) * 0.10 #swipe_threshold, touch_threshold = d_obj.auto_thresh(data) y = g_obj.gd(data, swipe_threshold, 2) if len(y) > 3: print '~~~~\nSwipe Detected, Swipe gesture direction\n' print len(y), y else: y = g_obj.gd_double_touch(data, touch_threshold, 0) if y: print '~~~~\nTouch Detected, index of touch\n' print len(y), y
from gesture import Gesture # g = Gesture.from_file("круг.json") # # print('g', g) # print('gm', g.measurements) # print('proj', g.select_proj_on_plane()) # print('res', g.select_proj_2d()) import matplotlib.pyplot as plt g = Gesture.from_file("cirl_test.json")[0] # g.draw_accel_3d() # g.draw_accel_3d(with_surf=True) # g.to_image() g.predict() # data = g.select_proj_2d() # plt.scatter(data[:,0], data[:,1]) # plt.show()
def __init__(self): self.process_manager = ProcessManager() self.fist = Gesture("fist.xml") self.palm = Gesture("palm.xml")
import numpy as np import os from names import * from gesture import Gesture print("gesture|avg |had |u |d |l |r ") print("-------+----+----+--+--+--+--") for name in shortNames: total = 0 had = 0 angles = [0, 0, 0, 0] #up, down, left, right for i in range(7, 37): #Get gestures gest = Gesture("data/" + str(i) + "_" + name + ".gest", True) #Get the total num of dir samples total += gest.numDirSamples #if there were inc had counter if gest.numDirSamples > 0: had += 1 #Get general directions for x in range(gest.numDirSamples): angle = gest.sgestures[x][2] if angle >= 45 and angle < 135: angles[3] += 1 elif angle >= 135 and angle < 225: angles[0] += 1 elif angle >= 225 and angle < 315: angles[2] += 1
def analyse(bw, image): """ Main logic. """ bw = cv2.pyrUp(bw) contours, hierarchy = cv2.findContours(bw, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) biggest = find_biggest_contour(contours) if biggest is not -1: gesture = Gesture() gesture.contours = contours gesture.biggest = biggest gesture.bounding = bounding_rect(contours[biggest]) gesture.hull_p = cv2.convexHull(contours[biggest]) gesture.hull_i = cv2.convexHull(contours[biggest], returnPoints=False) gesture.hull_p = cv2.approxPolyDP(gesture.hull_p, 18, True) if len(contours[biggest]) > 3: gesture.defects = cv2.convexityDefects(contours[biggest], gesture.hull_i) gesture.check_convexity() is_hand = gesture.is_hand() if is_hand: global counter, clicked dx = abs(gesture.bounding[0] - gesture.bounding[2]) dy = abs(gesture.bounding[1] - gesture.bounding[3]) #print abs(dx - dy) #print dy * 0.3 tresh = dy * 0.3 if abs(dx - dy) < tresh and counter == 0: click() print "click" counter += 1 if abs(dx - dy) < tresh: counter += 1 if counter == 20: counter = 0 if abs(dx - dy) > tresh: sh = bw.shape width = sh[0] height = sh[1] move_mouse(gesture.get_center(), height, width) draw_contours(image, gesture) return image
def load(self): self.gestures = {} for name in shortNames: self.gestures[name] = Gesture( "data/" + str(self.id) + "_" + name + ".gest", True)
def __init__(self): self.define_contestants() self.number_of_rounds = 0 self.gestures = Gesture(self.player_one, self.player_two)
def __init__(self): self.gesture = Gesture() self.run()