def recognize(self, contours): # using maximal area and convexity defect depths to # recognize between palm and fist. x, y, r, b = im.find_max_rectangle(contours) max_area, contours = im.max_area(contours) print 'area: ', float(max_area)/((r-x)*(b-y)) hull = im.find_convex_hull(contours) mean_depth = 0 if hull: cds = im.find_convex_defects(contours, hull) if len(cds) != 0: mean_depth = sum([cd[3] for cd in cds])/len(cds) if not self.isFist(max_area, mean_depth) and not self.isPalm(max_area, mean_depth): if self.gestures_buffer: self.gestures_buffer.pop(0) return Gesture('Not Sure'), max_area, mean_depth # The majority of votes in self.gestures_buffer will # determine which gesture should be in this frame FIST = 0 PALM = 1 if self.isFist(max_area, mean_depth): self.gestures_buffer.append(FIST) elif self.isPalm(max_area, mean_depth): self.gestures_buffer.append(PALM) self.gestures_buffer = self.gestures_buffer[-self.buffer_size:] avg = float(sum(self.gestures_buffer))/self.buffer_size if avg > 0.5: ges = 'Palm' else: ges = 'Fist' return Gesture(ges, 'Short'), max_area, mean_depth
def recognize(self, contours): # using maximal area and convexity defect depths to # recognize between palm and fist. x, y, r, b = im.find_max_rectangle(contours) max_area, contours = im.max_area(contours) print 'area: ', float(max_area) / ((r - x) * (b - y)) hull = im.find_convex_hull(contours) mean_depth = 0 if hull: cds = im.find_convex_defects(contours, hull) if len(cds) != 0: mean_depth = sum([cd[3] for cd in cds]) / len(cds) if not self.isFist(max_area, mean_depth) and not self.isPalm( max_area, mean_depth): if self.gestures_buffer: self.gestures_buffer.pop(0) return Gesture('Not Sure'), max_area, mean_depth # The majority of votes in self.gestures_buffer will # determine which gesture should be in this frame FIST = 0 PALM = 1 if self.isFist(max_area, mean_depth): self.gestures_buffer.append(FIST) elif self.isPalm(max_area, mean_depth): self.gestures_buffer.append(PALM) self.gestures_buffer = self.gestures_buffer[-self.buffer_size:] avg = float(sum(self.gestures_buffer)) / self.buffer_size if avg > 0.5: ges = 'Palm' else: ges = 'Fist' return Gesture(ges, 'Short'), max_area, mean_depth
def mainLoop(): input_video_fn = get_input_video_filename() print 'input video filename:', input_video_fn # Setting up the window objects and environment proc_win_name = "Processing window" cam_win_name = "Capture from camera" proc_win = cv.NamedWindow(proc_win_name, 1) cam_win = cv.NamedWindow(cam_win_name, 1) if input_video_fn: cam = cv.CaptureFromFile(input_video_fn) else: cam = cv.CaptureFromCAM(0) cv.SetMouseCallback(proc_win_name, handle_mouse) cv.SetMouseCallback(cam_win_name, handle_mouse) msdelay = 3 initHueThreshold = 42 initIntensityThreshold = 191 skin_detector = skin.SkinDetector() skin_detector.setHueThreshold(initHueThreshold) skin_detector.setIntensityThreshold(initIntensityThreshold) cv.CreateTrackbar('hueThreshold', proc_win_name, initHueThreshold, 255, skin_detector.setHueThreshold) cv.CreateTrackbar('intensityThreshold', proc_win_name, initIntensityThreshold, 255, skin_detector.setIntensityThreshold) session = ImageProcessSession(skin_detector) ga = gesture.GestureAnalyzer() grammar = Grammar() gfn = get_grammar_filename() if not gfn: print 'usage: python GestureLock.py -g grammar_file.gmr' exit(0) answer_grammer = read_grammar(gfn) im_orig_writer = ImageWriter(output_folder=get_output_folder()) im_contour_writer = ImageWriter(output_folder='out2') prev = [] while True: k = cv.WaitKey(msdelay) k = chr(k) if k > 0 else 0 if handle_keyboard(k) < 0: break bgrimg = cv.QueryFrame(cam) if not bgrimg: break im_orig_writer.write(bgrimg) cv.Flip(bgrimg, None, 1) contours = session.process(bgrimg) img = cv.CreateImage((bgrimg.width, bgrimg.height), 8, 3) if contours: ges, area, depth = ga.recognize(contours) x, y, r, b = im.find_max_rectangle(contours) cv.Rectangle(img, (x,y), (r, b), im.color.RED) cv.DrawContours(img, contours, im.color.RED, im.color.GREEN, 1, thickness=3) print ges currentInput = grammar.instantGes(ges) print currentInput if len(prev)>=2: for i,g in enumerate(currentInput): im.puttext(prev[0], str(g), 30, 70+40*i) im_contour_writer.write(prev[0]) prev.append( img ) prev.pop(0) else: prev.append( img ) if grammar == answer_grammer: for i,g in enumerate(currentInput): im.puttext(prev[0], str(g), 30, 70+40*i) im_contour_writer.write(prev[0]) im.puttext(prev[0], 'AUTHENTICATED!', 30, 70+40*len(currentInput)) im_contour_writer.write(prev[0]) print 'AUTHENTICATED!!!!' break cv.ShowImage(proc_win_name, img)