Beispiel #1
0
    def test_roi_from_frame_center(self):

        r = Roi.from_frame(frame, SIDE.CENTER, 100)
        self.assertEqual(r, [0, 0, 640, 480])
        r = Roi.from_frame(frame, SIDE.CENTER, 90)
        self.assertEqual(r, [(64 / 2), (48 / 2), 640 - 64, 480 - 48])
        r = Roi.from_frame(frame, SIDE.CENTER, 10)
        self.assertEqual(r, [640 / 2 - 64 / 2, 480 / 2 - 48 / 2, 64, 48])
Beispiel #2
0
    def test_roi_from_frame_left(self):

        r = Roi.from_frame(frame, SIDE.LEFT, 100)
        self.assertEqual(r, [0, 0, 640, 480])
        r = Roi.from_frame(frame, SIDE.LEFT, 90)
        self.assertEqual(r, [0, 0, 640 - 64, 480])
        r = Roi.from_frame(frame, SIDE.LEFT, 10)
        self.assertEqual(r, [0, 0, 64, 480])
Beispiel #3
0
    def test_roi_from_frame_bottom(self):

        r = Roi.from_frame(frame, SIDE.BOTTOM, 100)
        self.assertEqual(r, [0, 0, 640, 480])
        r = Roi.from_frame(frame, SIDE.BOTTOM, 90)
        self.assertEqual(r, [0, 48, 640, 480 - 48])
        r = Roi.from_frame(frame, SIDE.BOTTOM, 10)
        self.assertEqual(r, [0, 480 - 48, 640, 48])
Beispiel #4
0
    def test_roi_from_frame_right(self):

        r = Roi.from_frame(frame, SIDE.RIGHT, 100)
        self.assertEqual(r, [0, 0, 640, 480])

        r = Roi.from_frame(frame, SIDE.RIGHT, 90)
        self.assertEqual(r, [64, 0, 640 - 64, 480])

        r = Roi.from_frame(frame, SIDE.RIGHT, 10)
        self.assertEqual(r, [640 - 64, 0, 64, 480])
Beispiel #5
0
    def test_roi_from_frame_top(self):
        """
        Test that check the creation of roi from frames
        """

        r = Roi.from_frame(frame, SIDE.TOP, 100)
        self.assertEqual(r, [0, 0, 640, 480])
        r = Roi.from_frame(frame, SIDE.TOP, 90)
        self.assertEqual(r, [0, 0, 640, 480 - 48])
        r = Roi.from_frame(frame, SIDE.TOP, 10)
        self.assertEqual(r, [0, 0, 640, 48])
Beispiel #6
0
 def get_roi_to_use(self, frame):
     """
     Calculate the roi to be used depending on the situation of the hand (initial, detected, tracked)
     :param frame:
     :return:
     """
     current_roi = None
     if self._detected:
         current_roi = self.detection_roi
     else:
         # if we already have failed to detect we use the extended_roi
         if self._consecutive_detection_fails > 0:
             if self._tracked:
                 current_roi = self.tracking_roi
             else:
                 current_roi = self.extended_roi
         else:
             # Not detected and not consecutive fails on detection.
             # It's probably the first time we try to detect.
             # If no initial_roi is given an square of 200 x 200 is taken on the center
             if self.initial_roi is not None and self.initial_roi != Roi():
                 current_roi = self.initial_roi
             else:
                 current_roi = Roi.from_frame(frame, SIDE.CENTER, 50)
     assert current_roi != Roi(), "hand can't be detected on a %s roi of the frame" % str(current_roi)
     return current_roi
 def test_hand_detect(self):
     """
     Test that check the creation of roi from frames
     """
     hand = Hand()
     hand.depth_threshold = 130
     expected_results = {
         "20190725130248.png": [False, False],
         "20190725130249.png": [False, False],
         "20190725130250.png": [True, True],
         "20190725130251.png":
         [False, True],  # fingers too close and it's considered not a hand
         "20190725130252.png": [True, True],
         "20190725130253.png": [True, True],
         "20190725130254.png": [False, True],
         "20190725130255.png": [False, True],
         "20190725130256.png": [False, True],
         "20190725130257.png": [False, True],
         "20190725130258.png": [False, True]
     }
     full_path = "/home/robolab/robocomp/components/robocomp-robolab/components/detection/handDetection/src/images/depth_images"
     for file in sorted(os.listdir(full_path)):
         if file.endswith(".png") and file in expected_results:
             frame = RGBDFrame(cv2.imread(os.path.join(full_path, file), 0))
             hand.initial_roi = Roi.from_frame(frame, SIDE.CENTER, 50)
             hand.detect_and_track(frame)
             print("testing file %s" % file)
             self.assertEqual(hand.detected, expected_results[file][0])
             self.assertEqual(hand.tracked, expected_results[file][1])
             frame = self.draw_in_frame(hand, frame)
             cv2.imshow("final", frame)
             key = cv2.waitKey(5000)
             if key == 112:
                 while cv2.waitKey(1000) != 112:
                     pass
Beispiel #8
0
    def update_hand_with_contour(self, hand_contour):
        """
        Attributes of the hand are calculated from the hand contour.
        TODO: calculate a truth value
        A score of 100 is the maximum value for the hand truth.
        This value is calculated like this:
        A hand is expected to have 5 finger tips, 4 intertips, a center of mass

        :param hand_contour: calculated contour that is expected to describe a hand
        :return: None
        """
        hull2 = cv2.convexHull(hand_contour, returnPoints=False)
        # Get defect points
        defects = cv2.convexityDefects(hand_contour, hull2)

        if defects is not None:
            estimated_fingertips_coords, \
            estimated_fingertips_indexes, \
            estimated_intertips_coords, \
            estimated_intertips_indexes = self._calculate_fingertips(hand_contour, defects)

            is_hand = self.is_hand(estimated_fingertips_coords, estimated_intertips_coords, strict=True)
            if is_hand:
                self._fingertips = estimated_fingertips_coords
                self._intertips = estimated_intertips_coords

                if len(estimated_fingertips_coords) == 5:
                    fingers_contour = np.take(hand_contour,
                                              estimated_fingertips_indexes + estimated_intertips_indexes,
                                              axis=0,
                                              mode="wrap")
                    bounding_rect, hand_circle, self._contour = self.get_hand_bounding_rect_from_fingers(
                        hand_contour,
                        fingers_contour)
                    # detection roi is set to the bounding rect of the fingers upscaled 20 pixels
                    # self.detection_roi = Roi(bounding_rect)
                    self.detection_roi = Roi(bounding_rect).upscaled(Roi.from_frame(self._last_frame, SIDE.CENTER, 100), 10)
                    if self._debug:
                        to_show = self._last_frame.copy()
                        cv2.drawContours(to_show, [hand_contour], -1, (255, 255, 255), 2)
                        cv2.drawContours(to_show, [fingers_contour], -1, (200, 200, 200), 2)
                        to_show = self.detection_roi.draw_on_frame(to_show)
                        # cv2.rectangle(to_show, (self.detection_roi.y, self.detection_roi.x), (self.detection_roi.y + self.detection_roi.height, self.detection_roi.x + self.detection_roi.width), [255, 255, 0])
                        # (x, y, w, h) = cv2.boundingRect(hand_contour)
                        # cv2.rectangle(to_show, (self.detection_roi.y, self.detection_roi.x), (self.detection_roi.x + self.detection_roi.height, self.detection_roi.x + self.detection_roi.width), [255, 255, 0])
                        cv2.imshow("update_hand_with_contour", to_show)


                    self._detected = True
                    self._detection_status = 1
                    self._ever_detected = True
                    self._confidence = 100
                else:
                    self._detection_status = -1
                    self._detected = False
                    self._confidence = 0
                    return
            else:
                self._detection_status = -1
                self._detected = False
                self._confidence = 0
                return
            # Find moments of the largest contour
            moments = cv2.moments(hand_contour)
            center_of_mass = None
            finger_distances = []
            average_defect_distance = None
            # Central mass of first order moments
            if moments['m00'] != 0:
                cx = int(moments['m10'] / moments['m00'])  # cx = M10/M00
                cy = int(moments['m01'] / moments['m00'])  # cy = M01/M00
                center_of_mass = (cx, cy)
                self._center_of_mass = center_of_mass
                self._position_history.append(center_of_mass)

            if center_of_mass is not None and len(estimated_intertips_coords) > 0:
                # Distance from each finger defect(finger webbing) to the center mass
                distance_between_defects_to_center = []
                for far in estimated_intertips_coords:
                    x = np.array(far)
                    center_mass_array = np.array(center_of_mass)
                    distance = np.sqrt(
                        np.power(x[0] - center_mass_array[0],
                                 2) + np.power(x[1] - center_mass_array[1], 2)
                    )
                    distance_between_defects_to_center.append(distance)

                # Get an average of three shortest distances from finger webbing to center mass
                sorted_defects_distances = sorted(distance_between_defects_to_center)
                average_defect_distance = np.mean(sorted_defects_distances[0:2])
                self._average_defect_distance = average_defect_distance
                # # Get fingertip points from contour hull
                # # If points are in proximity of 80 pixels, consider as a single point in the group
                # finger = []
                # for i in range(0, len(hull) - 1):
                #     if (np.absolute(hull[i][0][0] - hull[i + 1][0][0]) > 10) or (
                #             np.absolute(hull[i][0][1] - hull[i + 1][0][1]) > 10):
                #         if hull[i][0][1] < 500:
                #             finger.append(hull[i][0])
                #
                #
                # # The fingertip points are 5 hull points with largest y coordinates
                # finger = sorted(finger, key=lambda x: x[1])
                # fingers = finger[0:5]
            if center_of_mass is not None and len(estimated_fingertips_coords) > 0:
                # Calculate distance of each finger tip to the center mass
                finger_distances = []
                for i in range(0, len(estimated_fingertips_coords)):
                    distance = np.sqrt(
                        np.power(estimated_fingertips_coords[i][0] - center_of_mass[0], 2) + np.power(
                            estimated_fingertips_coords[i][1] - center_of_mass[0], 2))
                    finger_distances.append(distance)
                self._finger_distances = finger_distances
        else:
            self._detection_status = -2
            self._detected = False
            self._confidence = 0
            return