コード例 #1
0
 def __shapes(self):
     for id, shape in shapetracker().shapes.items():
         {
             'fresh': self.__fresh_shape,
             'visible': self.__visible_shape,
             'covered': self.__covered_shape,
         }[shape.state](id, shape)
         if shape.pressed:
             self.__pressed_shape(shape)
         for kps in shape.keypoints.values():
             for kp in kps:
                 cv2.drawMarker(self.frame, tuple(kp.astype(int)), (0, 255, 0))
     for shape in shapetracker().pending_shapes:
         self.__pending_shape(shape)
コード例 #2
0
    def next_frame(self):
        self.frame += 1
        self.publish('frame_begins', {'frame': self.frame})

        if not realsensecam().acquire_frames():
            return None

        handdetector().determine_hand_cnt()
        if handtracker().update() and not ui().menu_active:
            detected_shapes = shapedetector().detect_shapes()
            shapetracker().process_detected_shapes(detected_shapes)
            touchedshapetracker().update()
        if self.logger is not None:
            self.logger.logAll()
        return visualizer().visualize()
コード例 #3
0
 def logAll(self):
     l = {'fingertip': {'x': -1, 'y': -1}, 'fingerdown': False, 'shape': []}
     if handdetector().hand_cnt is not None and handdetector().hand_valid:
         l['fingertip'] = {
             'x': int(handdetector().fingertip_pos[0]),
             'y': int(handdetector().fingertip_pos[1])
         }
     if handtracker().finger_down:
         l['fingerdown'] = True
     if len(shapetracker().shapes) > 0:
         cnt = next(iter(shapetracker().shapes.values())).cnt
         eps = .1 * cv2.arcLength(cnt, True)
         approxPts = cv2.approxPolyDP(cnt, eps, True)[:, 0]
         for pos in approxPts:
             l['shape'].append({'x': int(pos[0]), 'y': int(pos[1])})
     self.log.append(l)
コード例 #4
0
 def __lost_shapes(self):
     for idx, lost_shape in enumerate(shapetracker().lost_shapes.values()):
         fac = self.lost_shapes_h / max(lost_shape.footprint.shape)
         scaled_footprint = cv2.resize(lost_shape.footprint, (0, 0), fx=fac, fy=fac)
         nz_idx = list(np.nonzero(scaled_footprint))
         nz_idx[0] += self.lost_shapes_y
         nz_idx[1] += idx * (self.lost_shapes_h + 2)
         self.frame[tuple(nz_idx)] = (255, 255, 255)
コード例 #5
0
    def update(self):
        touched_shape = handtracker().touched_shape
        if touched_shape is None:
            return

        # Create a mask that should only contain the shape that is being moved, without the finger
        new_hand_mask = handdetector().most_recent_mask
        shape_mask = shapedetector().most_recent_mask
        new_mask_to_search = cv2.bitwise_and(shape_mask,
                                             shape_mask,
                                             mask=(255 - new_hand_mask))
        for shape in shapetracker().shapes.values():
            if shape != touched_shape:
                cv2.drawContours(new_mask_to_search, [shape.cnt], 0, 0, -1)
                cv2.drawContours(new_mask_to_search, [shape.cnt], 0, 0, 5)
        new_mask_to_search = cv2.morphologyEx(new_mask_to_search,
                                              cv2.MORPH_OPEN,
                                              np.ones((5, 5), np.uint8))

        if self.old_mask_to_search is None:
            self.old_mask_to_search = new_mask_to_search
            self.initial_mask_to_search = new_mask_to_search
            self.old_hand_mask = new_hand_mask
            return  # The prediction will only be possible in the next frame

        # Calculate xor between last shape footprint (since it has last moved, ignoring swipes) and its current footprint
        xored_mask = cv2.bitwise_xor(self.old_mask_to_search,
                                     new_mask_to_search)
        xored_mask = cv2.bitwise_and(xored_mask, 255 - self.old_hand_mask)
        xored_mask = cv2.bitwise_and(xored_mask, 255 - new_hand_mask)
        xored_mask = cv2.morphologyEx(xored_mask, cv2.MORPH_OPEN,
                                      np.ones((2, 2)))
        # Use this metric to determine if the shape has moved significantly
        amount_moved_pixels = np.count_nonzero(xored_mask)
        if amount_moved_pixels > 200:
            self.move = True
            if not touched_shape.moving:
                touched_shape.start_moving(
                    np.array(handdetector().fingertip_pos))

            # Calculate and apply transform
            angle = touched_shape.transform_to_fit_masks(
                self.initial_mask_to_search, new_mask_to_search,
                -handtracker().finger_deg_delta)[1]

            # Prepare next iteration
            self.old_mask_to_search = new_mask_to_search
            self.old_hand_mask = new_hand_mask
        else:
            self.move = False
            touched_shape.stop_moving()
コード例 #6
0
    def __init__(self, logger=None):
        super().__init__()
        if conf()['compile_pyx_on_startup']:
            os.system("python setup.py build_ext --inplace")

        self.frame = 0
        self.logger = logger

        # Initialize all needed modules
        shapedetector()
        shapetracker()
        touchedshapetracker()
        handdetector()
        handtracker()
        visualizer()
        ui()
        shapepicker()
        shaperegionpicker()

        # Put the wires together
        handtracker().subscribe('hand_exit', shapetracker().clear_lost_shapes)
        handtracker().subscribe('finger_up',
                                touchedshapetracker().on_finger_up)
        handtracker().subscribe('finger_pressing', ui().on_finger_pressing)
        handtracker().subscribe('finger_up', ui().on_finger_up)
        handtracker().subscribe('finger_down', ui().on_finger_down)
        handtracker().subscribe('finger_pressing',
                                shapepicker().on_finger_pressing)
        handtracker().subscribe('finger_up', shapepicker().on_finger_up)
        handtracker().subscribe('hand_exit', handdetector().on_hand_exit)
        handtracker().subscribe('finger_pressing',
                                shaperegionpicker().on_finger_pressing)
        handtracker().subscribe('finger_up', shaperegionpicker().on_finger_up)
        handtracker().subscribe('hand_exit', shaperegionpicker().on_hand_exit)
        handtracker().subscribe('hand_exit',
                                shapepositionpicker().on_hand_exit)
コード例 #7
0
    def __init__(self, cnt):
        super().__init__()

        # Calculate the bbox
        bbox = Bbox(*cv2.boundingRect(cnt))

        # Draw an isolated footprint of the shape
        offset = tuple(- np.array(bbox.position()))
        isolated = np.zeros(bbox.size(True), np.uint8)
        cv2.drawContours(isolated, [cnt], 0, 255, -1, offset=offset)
        footprint = cv2.copyMakeBorder(isolated, 15, 15, 15, 15, cv2.BORDER_CONSTANT, 0)

        # Determine the color of the shape
        x, y, w, h = bbox.xywh()
        patch = realsensecam().bgr[y:y + h, x:x + w, :][int(h / 3):int(2 * h / 3), int(w / 3):int(2 * w / 3), :]
        patch = cv2.GaussianBlur(patch, (51, 51), 0)
        if patch is None:
            color = (0, 0, 0)
        else:
            ph, pw, _ = patch.shape
            color = patch[int(ph / 2), int(pw / 2)]
            color = tuple([int(x) for x in color])
            color_hsv = cv2.cvtColor(np.array([[color]], np.uint8), cv2.COLOR_BGR2HSV)[0][0]

        self.cnt = cnt
        self.bbox = bbox
        self.color = color
        self.color_hsv = color_hsv
        self.footprint = footprint
        self.angle = 0
        self.state = 'fresh'
        self.state_stable_since = shapetracker().epoch
        self.pressed = False
        self.moving = False
        self.initial_swipe_xy = None
        self.current_swipe_xy = None
        self.initial_move_xy = None
        self.current_move_xy = None
        self.initial_degs = 0
        self.current_degs = None
        self.cnt_on_down = None
        self.needs_transform_to_fit_shape = False
        self.keypoints = {}
        self.keypoints_on_down = None
        self.action_name = ""
コード例 #8
0
    def on_finger_pressing(self, _, data):
        if not self.active or self.must_lift_finger:
            return

        xy = data['fingertip_pos']
        found = False
        for id, shape in shapetracker().shapes.items():
            if shape.bbox.contains(*xy):
                if shape != self.shape:
                    self.shape = shape
                    self.stable_since = time.time()
                found = True
                break
        if not found:
            self.shape = None
            self.stable_since = time.time()

        if time.time() - self.stable_since >= 1:
            self.active = False
            self.shape = None
            self.callback(shape,
                          *self.args)  # Picking completed, call the callback
            return
コード例 #9
0
    def __stats(self):
        self.nth_frame += 1
        elapsed = time.time() - self.start_time
        if elapsed > .1:
            self.fps = int(self.nth_frame / elapsed)
            self.nth_frame = 0
            self.start_time = time.time()

        self.__print("Tracking {:2d} shapes".format(len(shapetracker().shapes)), 2, self.text_l)
        self.__print("{:2d} FPS".format(self.fps), 2, self.text_r - 86)

        if not handdetector().hand_valid:
            self.__print("HAND ERROR! Too many or too long hands.", 1, self.text_l, color=(0, 0, 255))
        elif ui().menu_armed:
            self.__print("Hold the blue square to enter the menu.", 1, self.text_l, color=(255, 50, 50))
        elif ui().menu_active:
            self.__print("Pick an option from the menu.", 1, self.text_l, color=(255, 50, 50))
        elif ui().action_menu_armed:
            self.__print("Hold the blue square to enter the menu.", 1, self.text_l, color=(50, 255, 50))
        elif ui().action_menu_active:
            self.__print("Pick an option from the menu.", 1, self.text_l, color=(50, 255, 50))
        elif shapepicker().active:
            if shapepicker().must_lift_finger:
                self.__print("Please lift your finger.", 0, self.text_l, color=(0, 255, 255))
            else:
                self.__print(shapepicker().hint, 0, self.text_l, color=(0, 255, 0))
                self.__print("Press and hold the shape you want to pick ({}%)".format(int(100 * shapepicker().progress())), 1, self.text_l, color=(0, 255, 0))
        elif shaperegionpicker().active:
            if shaperegionpicker().must_lift_finger:
                self.__print("Please lift your finger (keep hand under camera).", 0, self.text_l, color=(0, 255, 255))
            else:
                self.__print("Press and hold {} in the shape ({}%)".format(shaperegionpicker().current_region_description(), int(100 * shaperegionpicker().progress())), 0, self.text_l, color=(0, 255, 0))
                self.__print("When all regions are selected, remove your hand.", 1, self.text_l, color=(0, 255, 0))
        elif shapepositionpicker().active:
            self.__print(shapepositionpicker().hint, 0, self.text_l, color=(0, 255, 0))
            self.__print("When the position has been reached, remove hand.", 1, self.text_l, color=(0, 255, 0))
コード例 #10
0
 def __fresh_shape(self, id, shape):
     x, y = shape.bbox.center()
     cv2.rectangle(self.frame, (x - 15, y - 4), (x - 15 + int(shapetracker().percentage_for_shape(shape) * 30), y + 4), (0, 200, 0), cv2.FILLED)
     cv2.rectangle(self.frame, (x - 15, y - 4), (x + 15, y + 4), (200, 200, 200))
     cv2.drawContours(self.frame, [shape.cnt], 0, (0, 255, 0), 1)
コード例 #11
0
 def set_state(self, new_state):
     self.state = new_state
     self.state_stable_since = shapetracker().epoch
コード例 #12
0
    def update(self):
        if not handdetector().hand_valid:
            return False

        # Check if the visibility of the hand has changed
        now_visible = handdetector().hand_cnt is not None
        was_visible = self.hand_visible
        self.hand_visible = now_visible
        if now_visible and not was_visible:
            self.publish('hand_enter', None)
        if not now_visible and was_visible:
            self.publish('hand_exit', None)

        # Check if finger is down or not
        now_down = handdetector().fingertip_height < conf(
        )['finger_height_threshold']
        was_down = self.finger_down
        self.finger_down = now_down

        if now_down and not was_down:
            self.__start_of_tracking()
            self.__update_of()
            data = {
                'fingertip_pos': self.enhanced_fingertip_pos(),
                'finger_delta': self.finger_delta,
                'finger_deg_delta': self.finger_deg_delta
            }
            self.publish('finger_down', data)
            touchetmanager().emit_global_event('finger_down', data)
            for shape in shapetracker().shapes.values():
                shape.on_finger_down(
                    data)  # The shape will figure out if it is the target

        if not now_down and was_down:
            self.__stop_of_tracking()
            data = {
                'fingertip_pos': self.enhanced_fingertip_pos(),
                'finger_delta': self.finger_delta,
                'finger_deg_delta': self.finger_deg_delta
            }
            self.publish('finger_up', data)
            touchetmanager().emit_global_event('finger_up', data)
            for shape in shapetracker().shapes.values():
                shape.on_finger_up(
                    data)  # The shape will figure out if it is the target

        if was_down and now_down:
            data = {
                'fingertip_pos': self.enhanced_fingertip_pos(),
                'finger_delta': self.finger_delta,
                'finger_deg_delta': self.finger_deg_delta
            }
            self.publish('finger_pressing', data)
            touchetmanager().emit_global_event('finger_pressing', data)
            for shape in shapetracker().shapes.values():
                shape.on_finger_pressing(
                    data)  # The shape will figure out if it is the target
            if self.__update_of(
            ):  # Only trigger if the finger actually moved enough
                self.publish('finger_moved', data)
                touchetmanager().emit_global_event('finger_moved', data)
                for shape in shapetracker().shapes.values():
                    shape.on_finger_moved(
                        data)  # The shape will figure out if it is the target
        return True