def __get_slice_img(self, lowest_layer, highest_layer):
        marker_points = []
        for i in range(lowest_layer, highest_layer):
            largest_cnt = self.__get_layer_cnt(i)
            if largest_cnt is not None:
                hull = cv2.convexHull(largest_cnt, returnPoints=False)
                defects = cv2.convexityDefects(largest_cnt, hull)

                if defects is None:
                    continue

                for i in range(defects.shape[0]):
                    s, e, f, d = defects[i, 0]
                    start = tuple(largest_cnt[s][0])
                    end = tuple(largest_cnt[e][0])
                    far = tuple(largest_cnt[f][0])
                    marker_points.append(start)
                    marker_points.append(end)

        marker_img = np.zeros((realsensecam().H, realsensecam().W), np.uint8)
        for op in marker_points:
            cv2.circle(marker_img, op, 1, 255, -1)
        marker_img = cv2.morphologyEx(
            marker_img, cv2.MORPH_CLOSE,
            cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)))
        return marker_img
Beispiel #2
0
 def transform_to_fit_shape(self, other_shape):
     my_mask = np.zeros((realsensecam().H, realsensecam().W), np.uint8)
     other_mask = np.zeros_like(my_mask)
     cv2.drawContours(my_mask, [self.cnt], 0, 255, -1)
     cv2.drawContours(other_mask, [other_shape.cnt], 0, 255, -1)
     translation, angle = self.transform_to_fit_masks(my_mask, other_mask, origin_is_on_down=False)
     if abs(angle) < 8:  # Otherwise we will need to do this again
         self.needs_transform_to_fit_shape = False
     self.publish('transformation_adjusted', {'shape': self, 'shape_move_delta': translation, 'shape_degs': -angle})
Beispiel #3
0
    def __init__(self, bg_color):
        self.bg_color = bg_color
        self.items = {}
        self.most_recent_mc = None
        self.mc_stable_since = 0

        self.margin = 20
        self.outter_w = realsensecam().W / 3
        self.inner_w = self.outter_w - 2 * self.margin
        self.outter_h = realsensecam().H / 6
        self.inner_h = self.outter_h - 2 * self.margin
 def __init__(self):
     self.lost_shapes_y = realsensecam().H + 2
     self.lost_shapes_h = 30
     self.text_h = 40
     self.text_l = 0
     self.text_r = realsensecam().W
     self.text_y0 = self.lost_shapes_y + self.lost_shapes_h + 2
     self.text_y = [self.text_y0, self.text_y0 + self.text_h - 23, self.text_y0 + self.text_h - 4]
     self.text_size = 0.9
     self.start_time = time.time()
     self.nth_frame = 0
     self.fps = 0
Beispiel #5
0
 def on_finger_down(self, _, data):
     xy = data['fingertip_pos']
     if xy[1] < 50:
         if xy[0] < 50:
             self.menu_armed = True
             self.finger_down_ts = time.time()
         elif xy[0] > realsensecam().W - 50:
             self.action_menu_armed = True
             self.finger_down_ts = time.time()
 def __get_layer_cnt(self, layer):
     _, depth_th = cv2.threshold(realsensecam().depth_blurred, layer, 255,
                                 cv2.THRESH_BINARY)
     contours, _ = cv2.findContours(depth_th, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) > 0:
         return max(contours, key=lambda c: cv2.contourArea(c))
     else:
         return None
    def cnt_intersects_with_hand(self, cnt, include_secondary_contours=True):
        if self.hand_cnt is None:
            return False

        # Create two empty frames, draw the contours and perform bitwise and
        hand_img = np.zeros((realsensecam().H, realsensecam().W, 1), np.uint8)
        cnt_img = hand_img.copy()
        contours_to_check = [self.hand_cnt]
        if include_secondary_contours:
            contours_to_check += self.secondary_hand_cnts
        cv2.drawContours(hand_img, contours_to_check, -1, 255, -1)
        cv2.drawContours(hand_img, contours_to_check, -1, 255,
                         conf()['hand_shape_intersection_border'])
        cv2.drawContours(cnt_img, [cnt], 0, 255, cv2.FILLED)
        anded = np.bitwise_and(hand_img, cnt_img)

        # If there are non-zero pixels left after "and"ing, there is an intersection
        return np.any(anded)
Beispiel #8
0
    def __start_of_tracking(self):
        self.of_enabled = True

        self.old_gray = cv2.cvtColor(realsensecam().bgr, cv2.COLOR_BGR2GRAY)
        mask = np.zeros((realsensecam().H, realsensecam().W), np.uint8)
        cv2.circle(mask, handdetector().fingertip_pos, 80, 255, -1)
        cnt_mask = np.zeros_like(mask)
        cv2.drawContours(cnt_mask, [handdetector().hand_cnt], 0, 255, -1)
        mask = cv2.bitwise_and(mask, cnt_mask)

        self.of_old_pts = p0 = cv2.goodFeaturesToTrack(
            self.old_gray, mask=mask, **self.of_feature_params)
        if self.of_old_pts is None:
            self.__stop_of_tracking()
            return
        self.of_orig_existing_pts = self.of_old_pts.copy()
        self.of_is_fingertip = np.array([0] * len(self.of_orig_existing_pts))

        fingertip = np.array(handdetector().fingertip_pos)
        for i, pt in enumerate(self.of_old_pts[:, 0]):
            if np.linalg.norm(np.array(pt) - fingertip) < 30:
                self.of_is_fingertip[i] = 1
def acquire_masks():
    # Find the hand
    _, depth_th_hand = cv2.threshold(realsensecam().depth_blurred, 1, 255,
                                     cv2.THRESH_BINARY)

    # Create the hand mask, the largest depth contour is assumed to be the hand
    hand_cnt = None
    hand_mask = np.zeros((realsensecam().H, realsensecam().W), np.uint8)
    _, contours, _ = cv2.findContours(depth_th_hand, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        largest_cnt = max(contours, key=lambda c: cv2.contourArea(c))
        if cv2.contourArea(largest_cnt) > 500:
            hand_cnt = largest_cnt
    if hand_cnt is not None:
        cv2.drawContours(hand_mask, [hand_cnt], 0, 255, cv2.FILLED)

    # Create the shape mask
    # Get mask by filtering by color saturation in HSV color space
    hsv = cv2.cvtColor(realsensecam().bgr, cv2.COLOR_BGR2HSV)
    shape_mask_unfiltered = cv2.inRange(hsv, np.array([0, 115, 0]),
                                        np.array([255, 255, 255]))
    _, shape_contours, _ = cv2.findContours(
        cv2.bitwise_and(shape_mask_unfiltered,
                        shape_mask_unfiltered,
                        mask=(255 - hand_mask)), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    if len(shape_contours) == 0:
        return None, None
    largest_shape_cnt = max(shape_contours, key=lambda c: cv2.contourArea(c))

    # Create a mask that should only contain the largest shape without hand
    shape_mask_filtered = np.zeros_like(hand_mask)
    cv2.drawContours(shape_mask_filtered, [largest_shape_cnt], 0, 255,
                     cv2.FILLED)
    shape_mask_filtered[shape_mask_filtered != 0] = 1
    return shape_mask_filtered, largest_shape_cnt
    def next_frame(self):
        self.frame += 1
        self.publish('frame_begins', {'frame': self.frame})

        if not realsensecam().acquire_frames():
            return None

        handdetector().determine_hand_cnt()
        if handtracker().update() and not ui().menu_active:
            detected_shapes = shapedetector().detect_shapes()
            shapetracker().process_detected_shapes(detected_shapes)
            touchedshapetracker().update()
        if self.logger is not None:
            self.logger.logAll()
        return visualizer().visualize()
Beispiel #11
0
    def __init__(self, cnt):
        super().__init__()

        # Calculate the bbox
        bbox = Bbox(*cv2.boundingRect(cnt))

        # Draw an isolated footprint of the shape
        offset = tuple(- np.array(bbox.position()))
        isolated = np.zeros(bbox.size(True), np.uint8)
        cv2.drawContours(isolated, [cnt], 0, 255, -1, offset=offset)
        footprint = cv2.copyMakeBorder(isolated, 15, 15, 15, 15, cv2.BORDER_CONSTANT, 0)

        # Determine the color of the shape
        x, y, w, h = bbox.xywh()
        patch = realsensecam().bgr[y:y + h, x:x + w, :][int(h / 3):int(2 * h / 3), int(w / 3):int(2 * w / 3), :]
        patch = cv2.GaussianBlur(patch, (51, 51), 0)
        if patch is None:
            color = (0, 0, 0)
        else:
            ph, pw, _ = patch.shape
            color = patch[int(ph / 2), int(pw / 2)]
            color = tuple([int(x) for x in color])
            color_hsv = cv2.cvtColor(np.array([[color]], np.uint8), cv2.COLOR_BGR2HSV)[0][0]

        self.cnt = cnt
        self.bbox = bbox
        self.color = color
        self.color_hsv = color_hsv
        self.footprint = footprint
        self.angle = 0
        self.state = 'fresh'
        self.state_stable_since = shapetracker().epoch
        self.pressed = False
        self.moving = False
        self.initial_swipe_xy = None
        self.current_swipe_xy = None
        self.initial_move_xy = None
        self.current_move_xy = None
        self.initial_degs = 0
        self.current_degs = None
        self.cnt_on_down = None
        self.needs_transform_to_fit_shape = False
        self.keypoints = {}
        self.keypoints_on_down = None
        self.action_name = ""
Beispiel #12
0
    def __update_of(self):
        if not self.of_enabled:
            return

        # Calculate optical flow
        new_gray = cv2.cvtColor(realsensecam().bgr, cv2.COLOR_BGR2GRAY)
        new_pts, st, err = cv2.calcOpticalFlowPyrLK(self.old_gray, new_gray,
                                                    self.of_old_pts, None,
                                                    **self.of_lk_params)
        if new_pts is None:
            self.__stop_of_tracking()
            return

        # Mark points that are not on the hand as invalid
        for i, ptt in enumerate(new_pts):
            if not handdetector().cnt_intersects_with_hand(
                    np.array([ptt]).astype(int)):
                st[i][0] = 0

        # Delete lost points
        self.of_orig_existing_pts = self.of_orig_existing_pts[st == 1]
        self.of_old_pts = self.of_old_pts[st == 1]
        self.of_is_fingertip = self.of_is_fingertip[st.flatten() == 1]
        new_pts = new_pts[st == 1]
        if len(new_pts) == 0:
            self.__stop_of_tracking()
            return

        old_finger_delta = self.finger_delta
        old_finger_deg_delta = self.finger_deg_delta

        T, R, t = icp.best_fit_transform(self.of_orig_existing_pts, new_pts)
        self.finger_delta = tuple(t)
        self.finger_deg_delta = np.rad2deg(np.arctan2(R[1, 0], R[0, 0]))
        self.finger_transform = T

        dt = np.linalg.norm(np.array(t) - np.array(old_finger_delta))
        dr = abs(old_finger_deg_delta - self.finger_deg_delta)

        # Prepare next iteration
        self.old_gray = new_gray.copy()
        self.of_old_pts = new_pts.reshape(-1, 1, 2)
        self.of_orig_existing_pts = self.of_orig_existing_pts.reshape(-1, 1, 2)

        return dt > 1 or dr > .3
Beispiel #13
0
 def on_finger_pressing(self, _, data):
     xy = data['fingertip_pos']
     if self.menu_active:
         self.menu.on_finger_pressing(xy)
     elif self.action_menu_active:
         self.action_menu.on_finger_pressing(xy)
     elif 50 <= xy[0] <= realsensecam().W - 50 or xy[1] >= 50:
         self.on_finger_up(None, data)
     elif self.menu_armed:
         self.menu_progress = min(1, time.time() - self.finger_down_ts)
         if self.menu_progress >= 1:
             self.__menu_open()
     elif self.action_menu_armed:
         self.action_menu_progress = min(1,
                                         time.time() - self.finger_down_ts)
         if self.action_menu_progress >= 1:
             self.__action_menu_open()
     else:
         self.on_finger_down(None, data)
    def visualize(self):
        self.frame = realsensecam().bgr.copy()

        if handdetector().hand_valid:
            self.__shapes()
            self.__touchets()
            ui().visualize_menu(self)  # Due to include cycle, this must be done in UI
            self.__hand()
            self.__shaperegionpicker()
        else:
            red_pic = np.full(self.frame.shape, (0, 0, 255), dtype=self.frame.dtype)
            self.frame = cv2.addWeighted(self.frame, 1, red_pic, 0.5, 0)

        # From here on, work with border
        self.frame = cv2.copyMakeBorder(self.frame, 0, self.text_h + self.lost_shapes_h + 4, 0, 0, cv2.BORDER_CONSTANT, 0)
        self.__lost_shapes()
        self.__stats()

        self.ts_of_last_frame = time.time()
        return self.frame
Beispiel #15
0
    def detect_shapes(self):
        # Get mask by filtering by color saturation in HSV color space
        hsv = cv2.cvtColor(realsensecam().bgr, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(
            hsv, np.array([0, conf()['shape_saturation_threshold'], 0]),
            np.array([255, 255, 255]))
        self.most_recent_mask = mask

        # Find contours in mask in order to isolate individual shapes
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        detected_shapes = []
        for cnt in contours:
            if cv2.contourArea(cnt) < 750:
                continue
            if handdetector().cnt_intersects_with_hand(cnt):
                continue
            s = Shape(cnt)
            if s is not None:
                detected_shapes.append(s)
        return detected_shapes
    def determine_hand_cnt(self):
        # Cut along the table surface to get all objects lying above it
        _, depth_th_hand = cv2.threshold(realsensecam().depth_blurred,
                                         conf()['hand_depth_threshold'], 255,
                                         cv2.THRESH_BINARY)
        self.most_recent_mask = depth_th_hand

        # Detect the contours, the largest is assumed to be the hand
        self.hand_cnt = None
        self.secondary_hand_cnts = []
        self.hand_valid = True
        contours, _ = cv2.findContours(depth_th_hand, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) > 0:
            sorted_cnts = sorted(contours, key=lambda c: -cv2.contourArea(c))
            if cv2.contourArea(
                    sorted_cnts[0]) > 500:  # Largest contour is the hand
                self.hand_cnt = sorted_cnts[0]
            for cnt in sorted_cnts[
                    1:]:  # Other large contours will be saved as secondary hands
                if cv2.contourArea(cnt) < 500:
                    break
                self.secondary_hand_cnts.append(cnt)
        if self.hand_cnt is None:
            return  # No hand detected

        # The following code is used to find out where the hand enters the camera

        # Initialize / reset datastructures
        touched_corners = set(
        )  # This will hold the edges of the frame that are touched by the hand
        self.edgepts = []
        self.edgeextrem1 = None
        self.edgeextrem2 = None
        self.edgeextremcenter = None

        # Detect points touching an edge and account which edges are touched
        for pt in self.hand_cnt[:, 0]:
            iscorner = False
            if pt[1] <= 1:  # Top edge
                touched_corners.add(0)
                iscorner = True
            if pt[0] <= 1:  # Left edge
                touched_corners.add(1)
                iscorner = True
            if pt[1] >= realsensecam().H - 1:  # Bottom edge
                touched_corners.add(2)
                iscorner = True
            if pt[0] >= realsensecam().W - 1:  # Right edge
                touched_corners.add(3)
                iscorner = True
            if iscorner:  # If the point has touched an edge, add it to edgepts
                self.edgepts.append(pt)

        # Make sure that top and bottom (or left and right) edge are not touched simultaneously
        for i in touched_corners:
            for j in touched_corners:
                if i != j and i % 2 == j % 2:
                    # In this case, the hand is larger than the recorded area and we cannot infer anything
                    self.hand_valid = False
                    print("Hand error: Too long hand")
                    return

        # Detect where the hand is touching the edge(s)
        if len(self.edgepts) < 2:
            self.hand_valid = False
            print("Hand error: Edge points detection failed")
            return
        # Find the two points touching an edge that are furthest apart, as well as their center
        pairwise_1_norm_dists = cdist(self.edgepts, self.edgepts, 'cityblock')
        furthest_pts = np.unravel_index(np.argmax(pairwise_1_norm_dists),
                                        pairwise_1_norm_dists.shape)
        self.edgeextrem1 = tuple(self.edgepts[furthest_pts[0]])
        self.edgeextrem2 = tuple(self.edgepts[furthest_pts[1]])
        self.edgeextremcenter = (int(
            (self.edgeextrem1[0] + self.edgeextrem2[0]) /
            2), int((self.edgeextrem1[1] + self.edgeextrem2[1]) / 2))

        # The following code is for finger detection

        # Acquire slice
        slice_img = self.__get_slice_img(3, 20)
        if slice_img is None:
            self.hand_valid = False
            print("Hand error: Invalid slice img")
            return
        slice_cnts, _ = cv2.findContours(slice_img, cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
        if len(slice_cnts) == 0:
            self.hand_valid = False
            print("Hand error: Invalid slice contours")
            return

        # Get furthest point from arm entry point
        cnts_arr = np.concatenate(slice_cnts)[:, 0]
        dists = cdist(cnts_arr, [self.edgeextremcenter])
        furthest_pt = cnts_arr[np.argmax(dists, axis=0)][0]
        self.fingertip_pos = tuple(furthest_pt)

        # Calculate height of fingertip
        x = self.fingertip_pos[0]
        y = self.fingertip_pos[1]
        r = conf()['finger_height_measure_radius']
        left = max(0, x - r)
        top = max(0, y - r)
        right = min(realsensecam().W, x + r)
        bottom = min(realsensecam().H, y + r)
        # Let the highest pixel of that surface be the fingertip height
        cropped = realsensecam().depth_blurred[top:bottom, left:right]
        observed_height = np.max(cropped)
        if self.fingertip_height == np.inf:
            self.fingertip_height = observed_height
        else:
            self.fingertip_height = 0.5 * observed_height + 0.5 * self.fingertip_height
import cv2
from conf import *
from controller import controller
from realsensecam import realsensecam

NAME = 'sample_videos/test'

if __name__ == "__main__":
    with Conf():
        # Initialize camera
        realsensecam(list((NAME + '_rgb.mp4', NAME + '_depth.mp4')))
        # Initialize controller
        controller()

        while True:
            img = controller().next_frame()
            if img is None:
                print("Controller reports end of capture. Terminating.")
                break
            cv2.imshow('DynamicUIs', img)
            k = cv2.waitKey(1)
            if ord('q') == k:
                break
            else:
                controller().on_key(k)
        realsensecam().stop()  # Stop camera
    largest_shape_cnt = max(shape_contours, key=lambda c: cv2.contourArea(c))

    # Create a mask that should only contain the largest shape without hand
    shape_mask_filtered = np.zeros_like(hand_mask)
    cv2.drawContours(shape_mask_filtered, [largest_shape_cnt], 0, 255,
                     cv2.FILLED)
    shape_mask_filtered[shape_mask_filtered != 0] = 1
    return shape_mask_filtered, largest_shape_cnt


def visualize_pts(img, pts, color):
    for pt in pts.astype(int):
        cv2.circle(img, tuple(pt), 1, color)


realsensecam()
rof = None
new_img = None
new_shape_mask_filtered = None
while True:
    # Read a new frame from the camera
    realsensecam().acquire_frames()
    new_shape_mask_filtered, largest_shape_cnt = acquire_masks()
    if new_shape_mask_filtered is None:
        continue

    new_img = cv2.cvtColor(realsensecam().bgr, cv2.COLOR_BGR2GRAY)
    if rof is None:
        rof = ReconstructiveOpticalFlow(new_img, new_shape_mask_filtered)
        continue
Beispiel #19
0
        '--breakpoints',
        nargs='+',
        type=int,
        help="Pause at the specified frames and wait for a key to be pressed")
    parser.add_argument('-l', '--logfile')
    args = parser.parse_args()

    if args.video_output is not None and args.video_input is not None:
        print(
            "Error: You may not use -i and -o simultaneously. Please use only up to one of them at a time."
        )
        exit(-1)

    # Initialize camera
    if args.video_input is not None:
        realsensecam(list(filename_from_name(args.video_input)))
    else:
        realsensecam()

    if args.logfile is not None:
        logger = Logger()
    else:
        logger = None
    controller(logger)

    if args.video_output is not None:
        videowriter = VideoWriter(*filename_from_name(args.video_output),
                                  30, (realsensecam().W, realsensecam().H),
                                  threaded=False)

    breakpoints = args.b__breakpoints or []
Beispiel #20
0
 def position_difference(self, other_shape):
     return np.linalg.norm(other_shape.bbox.center_nparr() - self.bbox.center_nparr()) / realsensecam().diagonal