def build_list(self): # Instantiate all gestures and their relationships to other gestures # Indexes 0 and 1 are wins and 2 and 3 are losses # eg: Rock crushes Lizard and Spock, Spock and Paper smash/cover rock self.gesture_obj_list.append(Gestures('Rock', 0, ['Lizard', 'Scissors', 'Spock', 'Paper'], ['crushes', 'crushes', 'vaporizes', 'covers'])) self.gesture_obj_list.append(Gestures('Paper', 1, ['Rock', 'Spock', 'Lizard', 'Scissors'], ['covers', 'disproves', 'eats', 'cuts'])) self.gesture_obj_list.append(Gestures('Scissors', 2, ['Lizard', 'Paper', 'Rock', 'Spock'], ['decapitates', 'cuts', 'crushes', 'smashes'])) self.gesture_obj_list.append(Gestures('Lizard', 3, ['Paper', 'Spock', 'Rock', 'Scissors'], ['eats', 'poisons', 'crushes', 'decapitates'])) self.gesture_obj_list.append(Gestures('Spock', 4, ['Rock', 'Scissors', 'Lizard', 'Paper'], ['vaporizes', 'smashes', '', ''])) # Now build the name list for gesture in self.gesture_obj_list: self.gesture_name_list.append(gesture.gesture_name) return self
def FindHandFromTrack(self): # Get brightness from tracked hand kernelM = np.ones((5, 5), np.float32) / 25 blurredM = cv2.filter2D(self.currFrame.copy(), -1, kernelM) hsv = cv2.cvtColor(blurredM, cv2.COLOR_BGR2HSV) self.hand_lower_blue = self.AddValueToColorArray([ -constant.HSVConf['hsv_hand_dec'][0], -constant.HSVConf['hsv_hand_dec'][1], -constant.HSVConf['hsv_hand_dec'][2] ], self.HSVHandPoint.copy()) self.hand_upper_blue = self.AddValueToColorArray([ constant.HSVConf['hsv_hand_inc'][0], constant.HSVConf['hsv_hand_inc'][1], constant.HSVConf['hsv_hand_inc'][2] ], self.HSVHandPoint.copy()) #takes related colors details mask = cv2.inRange(hsv, self.hand_lower_blue, self.hand_upper_blue) # artifactsler doing self.workedMask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelM) #opening status state search_hand_mask = self.workedMask.copy() search_hand = Gestures() if search_hand.FindHandFromMaskAndPositions( search_hand_mask, self.trackedHandGesture.CenterX, self.trackedHandGesture.CenterY) is False: self.foundHandGesture = False else: self.timeSinceFoundHandTracked = time.time() self.trackedHandGesture = search_hand self.HSVHandPoint = hsv[self.trackedHandGesture.CenterY][ self.trackedHandGesture.CenterX] self.foundHandGesture = True
def __init__(self, **kwargs): self.ip = kwargs.get('ip', b'localhost') self.port = kwargs.get('port', 8003) self.gest = kwargs.get('gestures', 0) self.specific_message = kwargs.get('specific_message', 0) self.clt = OscClt(self.ip, self.port) if self.gest: self.gestures = Gestures(self.clt)
def TryHandTrack(self): lower_blue_brightness = constant.HSVConf['hsv_palm_max'][2] handSearching = Gestures() while lower_blue_brightness > 15: # define range of blue color in HSV lower_blue = np.array([ constant.HSVConf['hsv_palm_min'][0], constant.HSVConf['hsv_palm_min'][1], lower_blue_brightness ]) upper_blue = np.array([ constant.HSVConf['hsv_palm_max'][0], constant.HSVConf['hsv_palm_max'][1], constant.HSVConf['hsv_palm_max'][2] ]) kernelMask = np.ones((5, 5), np.float32) / 25 blurredMask = cv2.filter2D(self.currFrame.copy(), -1, kernelMask) hsv = cv2.cvtColor(blurredMask, cv2.COLOR_BGR2HSV) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower_blue, upper_blue) self.workedMask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelMask) # Debug Palm Detection if self.gestPalmDebug: cv2.imshow('Mask from HSV Range', self.workedMask) cv2.waitKey(5) search_hand_mask = self.workedMask.copy() foundPalm = handSearching.PalmGestureSearchFromMask( search_hand_mask) if foundPalm: # Set infos from tracked hand self.trackedHandGesture = handSearching self.timeSinceFoundHandTracked = time.time() self.HSVHandPoint = hsv[self.trackedHandGesture.CenterY][ self.trackedHandGesture.CenterX] self.foundHandGesture = True return lower_blue_brightness -= 10 self.foundHandGesture = False
def compare_result_improved(self): gesture_one = Gestures(self.player_one.choice) gesture_two = Gestures(self.player_two.choice) word = gesture_one.result(self.player_one.choice, self.player_two.choice) if word != "None": self.player_one.wins += 1 self.display_round_results(self.player_one.name, self.player_one.choice, self.player_two.name, self.player_two.choice, word) else: self.player_two.wins += 1 word = gesture_two.result(self.player_two.choice, self.player_one.choice) self.display_round_results(self.player_two.name, self.player_two.choice, self.player_one.name, self.player_one.choice, word)
def TakeGesture(self): # retry, thumpUp again timeElapsedSinceLastFoundHand = time.time( ) - self.timeSinceFoundHandTracked stillTryingToFindHandFromTrack = timeElapsedSinceLastFoundHand < 1 if not stillTryingToFindHandFromTrack: self.trackedHandGesture = None if self.trackedHandGesture is not None or stillTryingToFindHandFromTrack: self.FindHandFromTrack() #buraya kadar kagitta. else: self.TryHandTrack() self.currentGesture = self.trackedHandGesture if not self.foundHandGesture: self.currentGesture = Gestures() self.currentGesture.features[ 'gestNeedPalm'] = not stillTryingToFindHandFromTrack self.SetTimeSameGesture() return self.currentGesture
def __init__(self): self.css = self.default_css self.extras = [] self.click_func = None #self.proxy_delegate = None self.scroll_pos = None self.font = ('Helvetica', 12) self.text_color = (.02, .19, .52, 1) self.highlight_color = (1, 1, 1, 1) self.alignment = None self.margins = (10, 10, 10, 10) #self.loading_fragment = False self.link_prefix = 'pythonista-markdownview:relay?content=' self.debug_prefix = 'pythonista-markdownview:debug?content=' self.init_postfix = '#pythonista-markdownview-initialize' self.scroll_prefix = 'pythonista-markdownview:scroll?content=' self.in_doc_prefix = None self.last_contents = None # Web fragment is used to find the right scroll position when moving from editing to viewing ''' self.web_fragment = ui.WebView(flex = 'WH') self.web_fragment.hidden = True self.web_fragment.delegate = self self.add_subview(self.web_fragment) ''' self.delegate = self Gestures().add_long_press(self, self.double_tap, number_of_touches_required = 2)
def __init__(self,content=None,parent=AppWindows.root(),*args,**kwargs): if content: kwargs['frame']=content.bounds.inset(-self.TOOLBAR_HEIGHT,0,0,0) self.corner_radius=5 ui.View.__init__(self,*args,**kwargs) self._pt=None self.actions={} self.w0=self.width self.h0=self.height # setup toolbar H=self.TOOLBAR_HEIGHT toolbar=ui.View(frame=(0,0,self.width,self.TOOLBAR_HEIGHT), bg_color=(.7,.8,.8,.8)) toolbar.flex='w' self.add_subview(toolbar) close=ui.Button(frame=(0,0,H,H)) close.image=ui.Image.named('iob:close_32') close.action=self.remove toolbar.add_subview(close) minimize=ui.Button(frame=(self.width,0,-H,H)) minimize.image=ui.Image.named('iob:chevron_down_32') toolbar.add_subview(minimize) minimize.flex='L' minimize.action=self.toggle zoom=ui.Button(frame=(minimize.x-8,0,-H,H)) zoom.image=ui.Image.named('iob:arrow_expand_32') zoom.flex='L' zoom.action=self.zoom toolbar.add_subview(zoom) self.lbl=ui.Label(frame=(H+8,0,zoom.x-2*8-2*H,H),flex='wr') toolbar.add_subview(self.lbl) #content self.content_view = \ ui.View(frame=(0,H,self.width,self.height-H), flex='wh', bg_color=(.9,.99,.99,.8)) self.content_view.touch_enabled=False self.content_view.content_mode = ui.CONTENT_BOTTOM self.add_subview(self.content_view) self.resize=ui.ImageView(frame=(self.width-H,self.height-H,H,H)) self.resize.image=ui.Image.named('iob:arrow_resize_32') self.resize.transform=ui.Transform.rotation(pi/2.) self.resize.flex='tl' self.add_subview(self.resize) self.resize.send_to_back() self.content_view.send_to_back() self.resizing=False self.x=self.last_offset[0] self.y=self.last_offset[0] self.last_offset[0]+=H self.last_offset[1]+=H if content: self.content_view.add_subview(content) content.flex='wh' self.lbl.text=content.name #add to top window self.parent = parent self.attach() self.g=Gestures(delegate=self) self.g.add_pan(self,self.handle_touch_moved)
class Overlay(ui.View,OverlayEvent): '''Overlay class. User adds content to content_view, or as argument''' TOOLBAR_HEIGHT=20 last_offset=[60,20] def __new__(cls,content=None,parent=AppWindows.root(),*args,**kwargs): if not content or not parent: return None else: return ui.View.__new__(cls, content=content,parent=parent, *args, **kwargs) def recognizer_should_simultaneously_recognize(self,gr,othergr): #print(gr) #print(othergr) return False def __init__(self,content=None,parent=AppWindows.root(),*args,**kwargs): if content: kwargs['frame']=content.bounds.inset(-self.TOOLBAR_HEIGHT,0,0,0) self.corner_radius=5 ui.View.__init__(self,*args,**kwargs) self._pt=None self.actions={} self.w0=self.width self.h0=self.height # setup toolbar H=self.TOOLBAR_HEIGHT toolbar=ui.View(frame=(0,0,self.width,self.TOOLBAR_HEIGHT), bg_color=(.7,.8,.8,.8)) toolbar.flex='w' self.add_subview(toolbar) close=ui.Button(frame=(0,0,H,H)) close.image=ui.Image.named('iob:close_32') close.action=self.remove toolbar.add_subview(close) minimize=ui.Button(frame=(self.width,0,-H,H)) minimize.image=ui.Image.named('iob:chevron_down_32') toolbar.add_subview(minimize) minimize.flex='L' minimize.action=self.toggle zoom=ui.Button(frame=(minimize.x-8,0,-H,H)) zoom.image=ui.Image.named('iob:arrow_expand_32') zoom.flex='L' zoom.action=self.zoom toolbar.add_subview(zoom) self.lbl=ui.Label(frame=(H+8,0,zoom.x-2*8-2*H,H),flex='wr') toolbar.add_subview(self.lbl) #content self.content_view = \ ui.View(frame=(0,H,self.width,self.height-H), flex='wh', bg_color=(.9,.99,.99,.8)) self.content_view.touch_enabled=False self.content_view.content_mode = ui.CONTENT_BOTTOM self.add_subview(self.content_view) self.resize=ui.ImageView(frame=(self.width-H,self.height-H,H,H)) self.resize.image=ui.Image.named('iob:arrow_resize_32') self.resize.transform=ui.Transform.rotation(pi/2.) self.resize.flex='tl' self.add_subview(self.resize) self.resize.send_to_back() self.content_view.send_to_back() self.resizing=False self.x=self.last_offset[0] self.y=self.last_offset[0] self.last_offset[0]+=H self.last_offset[1]+=H if content: self.content_view.add_subview(content) content.flex='wh' self.lbl.text=content.name #add to top window self.parent = parent self.attach() self.g=Gestures(delegate=self) self.g.add_pan(self,self.handle_touch_moved) def handle_touch_moved(self,data): if hasattr(self,'data'): dt=data.translation-self.data.translation else: dt=data.translation self.touch_moved(Touch(data.location, data.location-dt, data.state)) self.data=data if data.state==Gestures.ENDED: self.data.translation=ui.Point(0,0) self.touch_ended(Touch(data.location, data.location-dt, data.state)) def connect(self,event,callback): try: self.actions[event].append(callback) except (KeyError, AttributeError): self.actions[event]=[callback] def draw(self): if self._pt: ui.fill_rect(self._pt[0]-10,self._pt[1]-10,20,20) def zoom(self,sender): if self.width<self.w0: #minimized if self.height==self.TOOLBAR_HEIGHT: self.toggle(self) self.width=self.w0 self.height=self.h0 self.content_view.frame=(0,self.TOOLBAR_HEIGHT,self.width,self.height-self.TOOLBAR_HEIGHT) else: self.width=200 self.height=75+self.TOOLBAR_HEIGHT self.content_view.frame=(0,self.TOOLBAR_HEIGHT,self.width,self.height-self.TOOLBAR_HEIGHT) if not self.content_view.superview and self.height>self.TOOLBAR_HEIGHT: self.add_subview(self.content_view) self.content_view.send_to_back() def toggle(self,sender): if self.height==self.TOOLBAR_HEIGHT: #reveal self.height=self.h0 sender.image=ui.Image.named('iob:chevron_down_32') self.content_view.frame=(0,self.TOOLBAR_HEIGHT,self.width,self.height-self.TOOLBAR_HEIGHT) if not self.content_view.superview: self.add_subview(self.content_view) self.content_view.send_to_back() self.process_events(self.EVENT_RESIZE) else: #hide sender.image=ui.Image.named('iob:chevron_up_32') self.remove_subview(self.content_view) self.height=self.TOOLBAR_HEIGHT def touch_began(self,touch): l=list(touch.location) self._pt=l self.set_needs_display() if ui.Point(*l) in self.resize.frame: self.resizing=True else: self.resizing=False self.parent.bringSubviewToFront_(self) def touch_moved(self,touch): l=touch.location p=touch.prev_location t=l-p if self.resizing: #i.e touching bottom corner. update size, limiting width and height #to reasonable values self.width+=max(-self.width+3.5*self.TOOLBAR_HEIGHT,t.x) self.height+=max(-self.height+2*self.TOOLBAR_HEIGHT,t.y) self.w0=self.width self.h0=self.height else: self.x+=t.x self.y+=max(-self.y+20,t.y) #do not push above status bar self.alpha=.5 self.set_needs_display() def process_events(self,evt): try: callbacks= self.actions.get(evt,[]) except AttributeError: callbacks=[] for c in callbacks: c(self) def touch_ended(self,touch): self._pt=None if self.resizing: self.process_events(self.EVENT_RESIZE) self.alpha=1. self.set_needs_display() def remove(self,sender): import objc_util objc_util.ObjCInstance(self).removeFromSuperview() self.process_events(self.EVENT_CLOSE) def attach(self): self.parent.addSubview_(self) def __del__(self): # print('deleted overlay') #for s in self.subviews: #self.remove_subview(s) self.remove(self)
class CubemosSkeleton: """Détection d'un squelette par Cubemos avec un capteur Intel RealSense. """ def __init__(self, **kwargs): self.ip = kwargs.get('ip', b'localhost') self.port = kwargs.get('port', 8003) self.gest = kwargs.get('gestures', 0) self.specific_message = kwargs.get('specific_message', 0) self.clt = OscClt(self.ip, self.port) if self.gest: self.gestures = Gestures(self.clt) def render_ids_3d(self, color_image, skeletons_2d, depth_map, depth_intrinsic, joint_confidence): """Calcul les coordonnées 3D des squelettes.""" rows, cols, channel = color_image.shape[:3] # kernel = distance_kernel_size kernel = 5 joints_2D = 0 point_3d = None # A la fin, points_3D = liste de 18 points_3D = [None] * 18 for skeleton_index in range(len(skeletons_2d)): skeleton_2D = skeletons_2d[skeleton_index] joints_2D = skeleton_2D.joints skeleton_id = skeleton_2D.id for joint_index in range(len(joints_2D)): # check if the joint was detected and has valid coordinate if skeleton_2D.confidences[joint_index] > joint_confidence: distance_in_kernel = [] l = int(joints_2D[joint_index].x - math.floor(kernel / 2)) low_bound_x = max(0, l) m = int(joints_2D[joint_index].x + math.ceil(kernel / 2)) upper_bound_x = min(cols - 1, m) n = int(joints_2D[joint_index].y - math.floor(kernel / 2)) low_bound_y = max(0, n) o = int(joints_2D[joint_index].y + math.ceil(kernel / 2)) upper_bound_y = min(rows - 1, o) for x in range(low_bound_x, upper_bound_x): for y in range(low_bound_y, upper_bound_y): distance_in_kernel.append( depth_map.get_distance(x, y)) median_distance = np.percentile( np.array(distance_in_kernel), 50) depth_pixel = [ int(joints_2D[joint_index].x), int(joints_2D[joint_index].y) ] if median_distance >= 0.3: point_3d = rs.rs2_deproject_pixel_to_point( depth_intrinsic, depth_pixel, median_distance) points_3D[joint_index] = point_3d # Envoi en OSC des points cubemos if not self.specific_message: self.clt.send_global_message(points_3D, skeleton_id) else: self.clt.send_mutiples_message(points_3D, skeleton_id) # Reconnaissance de gestes self.gestures.add_points(points_3D) def run(self): # Configure depth and color streams of the intel realsense config = rs.config() config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30) config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30) # Start the realsense pipeline pipeline = rs.pipeline() pipeline.start() # Create align object to align depth frames to color frames align = rs.align(rs.stream.color) # Get the intrinsics information for calculation of 3D point unaligned_frames = pipeline.wait_for_frames() frames = align.process(unaligned_frames) depth = frames.get_depth_frame() depth_intrinsic = depth.profile.as_video_stream_profile().intrinsics # Initialize the cubemos api with a valid license key in default_license_dir() skeletrack = skeletontracker(cloud_tracking_api_key="") joint_confidence = 0.2 # Create window for initialisation window_name = "Cubemos skeleton tracking with RealSense D400 series" cv2.namedWindow(window_name, cv2.WINDOW_NORMAL + cv2.WINDOW_KEEPRATIO) t0 = time() n = 0 while True: # Create a pipeline object. # This object configures the streaming camera and owns it's handle unaligned_frames = pipeline.wait_for_frames() frames = align.process(unaligned_frames) depth = frames.get_depth_frame() color = frames.get_color_frame() if not depth or not color: continue # Convert images to numpy arrays # #depth_image = np.asanyarray(depth.get_data()) color_image = np.asanyarray(color.get_data()) # perform inference and update the tracking id skeletons = skeletrack.track_skeletons(color_image) # render the skeletons on top of the acquired image and display it color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB) cm.render_result(skeletons, color_image, joint_confidence) self.render_ids_3d(color_image, skeletons, depth, depth_intrinsic, joint_confidence) cv2.imshow(window_name, color_image) n += 1 t = time() if t - t0 > 10: print("FPS =", round(n / 10, 1)) t0 = t n = 0 if cv2.waitKey(1) == 27: break pipeline.stop() cv2.destroyAllWindows() self.clt.save()
def create_accessory_toolbar(self): def create_button(label, func): button_width = 25 black = ObjCClass('UIColor').alloc().initWithWhite_alpha_(0.0, 1.0) action_button = ui.Button() action_button.action = func accessory_button = ObjCClass( 'UIBarButtonItem').alloc().initWithTitle_style_target_action_( label, 0, action_button, sel('invokeAction:')) accessory_button.width = button_width accessory_button.tintColor = black return (action_button, accessory_button) vobj = ObjCInstance(self) keyboardToolbar = ObjCClass('UIToolbar').alloc().init() keyboardToolbar.sizeToFit() Gestures().add_swipe(keyboardToolbar, self.hide_keyboard, Gestures.DOWN) Gestures().add_pan(keyboardToolbar, self.move_caret) button_width = 25 black = ObjCClass('UIColor').alloc().initWithWhite_alpha_(0.0, 1.0) # Create the buttons # Need to retain references to the buttons used # to handle clicks (self.indentButton, indentBarButton) = create_button(u'\u21E5', self.indent) (self.outdentButton, outdentBarButton) = create_button(u'\u21E4', self.outdent) (self.quoteButton, quoteBarButton) = create_button('>', self.block_quote) (self.linkButton, linkBarButton) = create_button('[]', self.link) #(self.anchorButton, anchorBarButton) = create_button('<>', self.anchor) (self.hashButton, hashBarButton) = create_button('#', self.heading) (self.numberedButton, numberedBarButton) = create_button('1.', self.numbered_list) (self.listButton, listBarButton) = create_button('•', self.unordered_list) (self.underscoreButton, underscoreBarButton) = create_button('_', self.insert_underscore) (self.backtickButton, backtickBarButton) = create_button('`', self.insert_backtick) (self.newButton, newBarButton) = create_button('+', self.new_item) # Flex between buttons f = ObjCClass('UIBarButtonItem').alloc( ).initWithBarButtonSystemItem_target_action_(5, None, None) #doneBarButton = ObjCClass('UIBarButtonItem').alloc().initWithBarButtonSystemItem_target_action_(0, vobj, sel('endEditing:')) keyboardToolbar.items = [ indentBarButton, f, outdentBarButton, f, quoteBarButton, f, linkBarButton, f, hashBarButton, f, numberedBarButton, f, listBarButton, f, underscoreBarButton, f, backtickBarButton, f, newBarButton ] vobj.inputAccessoryView = keyboardToolbar
def auto_gesture(self): self.gestures = [Gestures("rock"), Gestures("paper"), Gestures("scissors"), Gestures("lizard"), Gestures("spock")] self.random = (random.choices()) print(self.random)
def main(): Gestures().start()
def __init__(self): self.name = "" self.gestures = Gestures() pass