示例#1
0
    def cv_init(self):
        WINDOW = "Hand Tracking"
        PALM_MODEL_PATH = "models/palm_detection_without_custom_op.tflite"
        LANDMARK_MODEL_PATH = "models/hand_landmark.tflite"
        ANCHORS_PATH = "models/anchors.csv"
        detector = HandTracker(PALM_MODEL_PATH,
                               LANDMARK_MODEL_PATH,
                               ANCHORS_PATH,
                               box_shift=0.2,
                               box_enlarge=1.3)
        sign_classifier = load_model('models/model2.h5')
        SIGNS = [
            'one', 'two', 'three', 'four', 'five', 'ok', 'rock', 'thumbs_up'
        ]
        SIGNS_dict = {
            'one': 1,
            'two': 2,
            'three': 3,
            'four': 4,
            'five': 5,
            'ok': 6,
            'rock': 7,
            'thumbs_up': 8
        }
        POINT_COLOR = (0, 255, 0)
        CONNECTION_COLOR = (255, 0, 0)
        THICKNESS = 2

        cv2.namedWindow(WINDOW)
        capture = cv2.VideoCapture(1)

        if capture.isOpened():
            hasFrame, frame = capture.read()
        else:
            hasFrame = False

        self.connections = [(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 7),
                            (7, 8), (9, 10), (10, 11), (11, 12), (13, 14),
                            (14, 15), (15, 16), (17, 18), (18, 19), (19, 20),
                            (0, 5), (5, 9), (9, 13), (13, 17), (0, 17)]
示例#2
0
    def __init__(self):
        self.palm_model_path = "./models/palm_detection_without_custom_op.tflite"
        self.landmark_model_path = "./models/hand_landmark.tflite"
        self.anchors_path = "./models/anchors.csv"

        self.detector = HandTracker(self.palm_model_path,
                                    self.landmark_model_path,
                                    self.anchors_path,
                                    box_shift=0.2,
                                    box_enlarge=1.3)

        self.POINT_COLOR = (0, 255, 0)
        self.CONNECTION_COLOR = (255, 0, 0)
        self.THICKNESS = 2
        self.connections = [(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 7),
                            (7, 8), (9, 10), (10, 11), (11, 12), (13, 14),
                            (14, 15), (15, 16), (17, 18), (18, 19), (19, 20),
                            (0, 5), (5, 9), (9, 13), (13, 17), (0, 17)]

        self.palm_pos = [0, 0]
        self.gesture = "first"
        self.palm_depth = 0
示例#3
0
#    4   |   |   |   |
#    |   6   10  14  18
#    3   |   |   |   |
#    |   5---9---13--17
#    2    \         /
#     \    \       /
#      1    \     /
#       \    \   /
#        ------0-
connections = [(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 7), (7, 8), (9, 10),
               (10, 11), (11, 12), (13, 14), (14, 15), (15, 16), (17, 18),
               (18, 19), (19, 20), (0, 5), (5, 9), (9, 13), (13, 17), (0, 17)]

detector = HandTracker(PALM_MODEL_PATH,
                       LANDMARK_MODEL_PATH,
                       ANCHORS_PATH,
                       box_shift=0.2,
                       box_enlarge=1.3)


def get_size_pulm(pulm_key_point):
    #calculation size of pulm
    k_pulm = len(pulm_key_point)
    distance_pulm = np.zeros(k_pulm - 1)
    size_pulm = 0
    for i in range(k_pulm - 1):
        x1 = points[i][0]
        y1 = points[i][1]
        x2 = points[i + 1][0]
        y2 = points[i + 1][1]
        distance_pulm[i] = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
示例#4
0
def main():
    cv2.namedWindow(WINDOW)
    capture = cv2.VideoCapture(0)

    if capture.isOpened():
        hasFrame, frame = capture.read()
    else:
        hasFrame = False

    detector = HandTracker(PALM_MODEL_PATH,
                           LANDMARK_MODEL_PATH,
                           ANCHORS_PATH,
                           box_shift=0.2,
                           box_enlarge=1.3)

    while hasFrame:
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        scale = np.array(np.max(frame.shape[:2]) / 256.0)
        padding = [0, 280]

        img_pad, img_norm, pad = detector.preprocess_img(image)
        source, keypoints, debug_info = detector.detect_hand(img_norm)

        if debug_info is not None:
            candidate_detect = debug_info["detection_candidates"]
            candidate_anchors = debug_info["anchor_candidates"]
            selected_box_id = debug_info["selected_box_id"]

        if DRAW_ANCHORS and debug_info is not None:
            for anchor in candidate_anchors:
                dx, dy = anchor[:2] * 256
                w, h = anchor[
                    2:] * 256 * 0.2  # no idea of 0.2 is the correct size multiplication
                box = box_from_dimensions(dx - (w / 2), dy - (h / 2), h, w)
                box *= scale
                box -= padding
                frame = draw_box(frame, box, color=(200, 0, 0))

        if DRAW_DETECTION_BOXES and debug_info is not None:
            for i, detection in enumerate(candidate_detect):
                dx, dy, w, h = detection[:4]
                center_wo_offst = candidate_anchors[i, :2] * 256
                box = box_from_dimensions(dx - (w / 2), dy - (h / 2), h, w)
                box += center_wo_offst
                box *= scale
                box -= padding
                frame = draw_box(frame, box)

        if DRAW_HAND_KEYPOINTS and debug_info is not None:
            detection = candidate_detect[selected_box_id]
            center_wo_offst = candidate_anchors[i, :2] * 256
            hand_key_points = center_wo_offst + detection[4:].reshape(-1, 2)
            for key_point in hand_key_points:
                key_point *= scale
                key_point -= padding
                cv2.circle(frame,
                           tuple(key_point.astype("int")),
                           color=(255, 255, 255),
                           radius=5,
                           thickness=2)

        if DRAW_BEST_DETECTION_BOX_NMS and debug_info is not None:
            detection = candidate_detect[selected_box_id]
            dx, dy, w, h = detection[:4]
            center_wo_offst = candidate_anchors[selected_box_id, :2] * 256
            box = box_from_dimensions(dx - (w / 2), dy - (h / 2), h, w)
            box += center_wo_offst
            box *= scale
            box -= padding
            frame = draw_box(frame, box, color=(0, 0, 255))

        cv2.imshow(WINDOW, frame)
        hasFrame, frame = capture.read()
        key = cv2.waitKey(20)
        if key == ESCAPE_KEY_CODE:
            break

    capture.release()
    cv2.destroyAllWindows()
示例#5
0
def gesture_function():
    cv2.namedWindow(WINDOW)
    capture = cv2.VideoCapture(1)

    if capture.isOpened():
        hasFrame, frame = capture.read()
    else:
        hasFrame = False

    #        8   12  16  20
    #        |   |   |   |
    #        7   11  15  19
    #    4   |   |   |   |
    #    |   6   10  14  18
    #    3   |   |   |   |
    #    |   5---9---13--17
    #    2    \         /
    #     \    \       /
    #      1    \     /
    #       \    \   /
    #        ------0-
    connections = [(0, 1), (1, 2), (2, 3), (3, 4), (5, 6), (6, 7), (7, 8),
                   (9, 10), (10, 11), (11, 12), (13, 14), (14, 15), (15, 16),
                   (17, 18), (18, 19), (19, 20), (0, 5), (5, 9), (9, 13),
                   (13, 17), (0, 17)]

    detector = HandTracker(PALM_MODEL_PATH,
                           LANDMARK_MODEL_PATH,
                           ANCHORS_PATH,
                           box_shift=0.2,
                           box_enlarge=1.3)

    def get_euclidean_distance(ax, ay, bx, by):
        dist = ((ax - bx)**2) + ((ay - by)**2)
        return math.sqrt(dist)

    def isThumbNearFirstFinger(p1, p2):
        distance = get_euclidean_distance(p1[0], p1[1], p2[0], p2[1])
        return distance < 0.1

    gesture = [0 for i in range(9)]
    global_vars.acquire()
    while hasFrame and global_vars.do_react:
        global_vars.release()

        thumbIsOpen = False
        firstFingerIsOpen = False
        secondFingerIsOpen = False
        thirdFingerIsOpen = False
        fourthFingerIsOpen = False

        k = 0

        landmarkList = [(0, 0)] * 21

        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        points, _ = detector(image)
        if points is not None:
            for point in points:
                x, y = point
                landmarkList[k] = (x, y)
                k = k + 1
                cv2.circle(frame, (int(x), int(y)), THICKNESS * 2, POINT_COLOR,
                           THICKNESS)
            for connection in connections:
                x0, y0 = points[connection[0]]
                x1, y1 = points[connection[1]]
                cv2.line(frame, (int(x0), int(y0)), (int(x1), int(y1)),
                         CONNECTION_COLOR, THICKNESS)

            j = landmarkList[2][0]
            if landmarkList[3][0] < j and landmarkList[4][0] < j:
                thumbIsOpen = True

            j = landmarkList[6][1]
            if landmarkList[7][1] < j and landmarkList[8][1] < j:
                firstFingerIsOpen = True

            j = landmarkList[10][1]
            if landmarkList[11][1] < j and landmarkList[12][1] < j:
                secondFingerIsOpen = True

            j = landmarkList[14][1]
            if landmarkList[15][1] < j and landmarkList[16][1] < j:
                thirdFingerIsOpen = True

            j = landmarkList[18][1]
            if landmarkList[19][1] < j and landmarkList[20][1] < j:
                fourthFingerIsOpen = True

            if thumbIsOpen and firstFingerIsOpen and secondFingerIsOpen and thirdFingerIsOpen and fourthFingerIsOpen:
                gesture[5] += 1  # "FIVE"
            elif not thumbIsOpen and firstFingerIsOpen and secondFingerIsOpen and thirdFingerIsOpen and fourthFingerIsOpen:
                gesture[4] += 1  # "FOUR"
            elif thumbIsOpen and firstFingerIsOpen and secondFingerIsOpen and not thirdFingerIsOpen and not fourthFingerIsOpen:
                gesture[3] += 1  # "THREE"
            elif thumbIsOpen and firstFingerIsOpen and not secondFingerIsOpen and not thirdFingerIsOpen and not fourthFingerIsOpen:
                gesture[2] += 1  # "TWO"
            elif not thumbIsOpen and firstFingerIsOpen and not secondFingerIsOpen and not thirdFingerIsOpen and not fourthFingerIsOpen:
                gesture[1] += 1  # "ONE"
            elif not thumbIsOpen and firstFingerIsOpen and secondFingerIsOpen and not thirdFingerIsOpen and not fourthFingerIsOpen:
                gesture[0] += 1  # "YEAH"
            elif not thumbIsOpen and firstFingerIsOpen and not secondFingerIsOpen and not thirdFingerIsOpen and fourthFingerIsOpen:
                gesture[6] += 1  # "ROCK"
            elif thumbIsOpen and firstFingerIsOpen and not secondFingerIsOpen and not thirdFingerIsOpen and fourthFingerIsOpen:
                gesture[7] += 1  # "SPIDERMAN"
            elif not thumbIsOpen and not firstFingerIsOpen and not secondFingerIsOpen and not thirdFingerIsOpen and not fourthFingerIsOpen:
                gesture[8] += 1  # "FIST"

            print(gesture)

            if any([i >= 3 for i in gesture]):
                cur_gesture = gesture.index(3)
                print("Detected gesture: ", cur_gesture)
                global_vars.acquire()
                # print("inside lock from gesture!")
                cur_action = global_vars.gesture_assignments[cur_gesture]
                if cur_action != 7:
                    try:
                        # print(global_vars.gesture_assignments)
                        zoom_bridge_functions.zoom_function_wrap(cur_action)
                    except (pywinauto.findwindows.ElementNotFoundError,
                            pywinauto.findbestmatch.MatchError, AttributeError,
                            RuntimeError) as e:
                        print(e.args)
                else:
                    global_vars.do_live_transcribe = not global_vars.do_live_transcribe

                global_vars.release()
                # print("outside lock from gesture!")
                gesture = [0 for i in range(9)]

        # cv2.imshow(WINDOW, frame)
        hasFrame, frame = capture.read()
        key = cv2.waitKey(1)
        if key == 27:
            break
        time.sleep(0.01)
        global_vars.acquire()

    capture.release()
    cv2.destroyAllWindows()
示例#6
0
def handtracking():
    #Initialize webcam capture
    cv2.namedWindow(WINDOW)
    capture = cv2.VideoCapture(0)
    record = False

    if capture.isOpened():
        hasFrame, frame = capture.read()
    else:
        hasFrame = False

    #        8   12  16  20
    #        |   |   |   |
    #        7   11  15  19
    #    4   |   |   |   |
    #    |   6   10  14  18
    #    3   |   |   |   |
    #    |   5---9---13--17
    #    2    \         /
    #     \    \       /
    #      1    \     /
    #       \    \   /
    #        ------0-

    #All connections made to detect hand from the diagram above
    connections = [(1, 2), (2, 3), (3, 4), (5, 6), (6, 7), (7, 8), (9, 10),
                   (10, 11), (11, 12), (13, 14), (14, 15), (15, 16), (17, 18),
                   (18, 19), (19, 20), (5, 9), (9, 13), (13, 17), (0, 5),
                   (0, 17), (0, 9), (0, 13), (0, 17), (0, 2), (0, 1), (1, 5),
                   (0, 6), (0, 10), (0, 14), (0, 18), (1, 9), (1, 13), (1, 17),
                   (0, 5), (0, 17), (5, 9), (9, 13), (13, 17)]

    #Palm connections
    palm = [(0, 9), (0, 13), (0, 17), (0, 1), (1, 5), (1, 9), (1, 13), (1, 17),
            (0, 5), (0, 17), (5, 9), (9, 13), (13, 17)]

    detector = HandTracker(PALM_MODEL_PATH,
                           LANDMARK_MODEL_PATH,
                           ANCHORS_PATH,
                           box_shift=0.2,
                           box_enlarge=1.3)

    #Initialize Video Writer to save video clip for model to predict on
    width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
    height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
    size = (width, height)
    fourcc = cv2.VideoWriter_fourcc(*'FMP4')
    out = cv2.VideoWriter('testvideo.mp4', fourcc, 5, size)

    while hasFrame:

        #Convert images to different color channels for processing
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        #Masking image to isolate the hand
        hsv_color1 = np.asarray([0, 0, 255])
        hsv_color2 = np.asarray([76, 36, 133])

        mask = cv2.inRange(image_hsv, hsv_color1, hsv_color2)
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)

        if record == False:
            cv2.putText(mask, "Press Space To Start Recording or Q to quit",
                        (50, 50), 0, 1, (255, 255, 255), 2, cv2.LINE_AA)

        #Detect points on hand
        points, _ = detector(image)
        if points is not None:
            for point in points:
                x, y = point

            #Join points using connections
            for connection in connections:

                x0, y0 = points[connection[0]]
                x1, y1 = points[connection[1]]

                if connection in palm:
                    cv2.line(mask, (int(x0), int(y0)), (int(x1), int(y1)),
                             CONNECTION_COLOR, THICKNESS * 5)

                else:
                    cv2.line(mask, (int(x0), int(y0)), (int(x1), int(y1)),
                             CONNECTION_COLOR, THICKNESS * 3)

        cv2.namedWindow("Sign2Text", cv2.WINDOW_NORMAL)

        horizontal_concat = np.concatenate((mask, frame), axis=1)

        cv2.resizeWindow('Sign2Text', 1000, 1000)

        cv2.imshow('Sign2Text', horizontal_concat)

        hasFrame, frame = capture.read()
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break
        if key % 256 == 32:
            record = True
            cv2.rectangle(mask, (0, 0), (1240, 100), (0, 0, 0), -1)
        if record:
            out.write(mask)

    capture.release()
    out.release()
示例#7
0
def hand_tracking(frame, detector):
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    points, bbox = detector(image)
    return (frame, bbox)


cv2.namedWindow("New")

PALM_MODEL_PATH = "models/palm_detection_without_custom_op.tflite"
CONNECTION_COLOR = (0, 255, 0)
THICKNESS = 4
ANCHORS_PATH = "models/anchors.csv"

detector = HandTracker("false",
                       PALM_MODEL_PATH,
                       "",
                       ANCHORS_PATH,
                       box_shift=0.2,
                       box_enlarge=1)

cap = cv2.VideoCapture(0)

results, answer = run(n=0)  # find puzzles with as few givens as possible.
puzzle = best(results)  # use the best one of those puzzles.
#print(puzzle)

if cap.isOpened():
    hasFrame, frame = cap.read()
else:
    hasFrame = False

bbox = []