Пример #1
0
    def detect_hands(self, background, frame):
        """hand detector with SSD"""
        # actual detection
        #boxes, scores = detector_utils.detect_objects(frame, detection_graph, sess)
        boxes, scores = detector_utils.detect_objects(frame,
                                                      self.detection_graph,
                                                      self.sess)
        # boxes=cv2.multiply(boxes,4)

        # find hands

        return detector_utils.find_hand_in_image(
            self.params['num_hands_detect'], self.params["score_thresh"],
            scores, boxes, background, not self.game_start)
Пример #2
0
def main():
    cap = cv2.VideoCapture(0)
    detection_graph, sess = detector_utils.load_inference_graph()
    ret, frame = cap.read()
    height, width, _ = frame.shape
    frame = cv2.resize(frame, (width // 2, height // 2))
    height, width, _ = frame.shape

    pong = Pong(h=height,
                w=width,
                default_ball_dx=width // 100,
                default_ball_dy=height // 100,
                default_paddle_speed=height // 100,
                default_half_paddle_height=height // 10)
    i = 0

    while True:
        i += 1
        ret, frame = cap.read()
        frame = cv2.resize(frame, (width, height))
        frame = cv2.flip(frame, 1)  # flip across vertical axis

        # wait for keys
        key = cv2.waitKey(100)
        pong.on_key(key)

        boxes, scores = detector_utils.detect_objects(
            cv2.resize(frame, (320, 180)), detection_graph, sess)
        if boxes is not None and scores is not None:
            # draw bounding boxes
            detector_utils.draw_box_on_image(1, 0.5, scores, boxes, width,
                                             height, frame)
            update_pong_with_boxes_scores(boxes, scores, pong, height)

        # update game
        ended = pong.update()
        pong.draw(frame)

        # Display the resulting frame
        cv2.imshow('frame', frame)
        if pong.is_key(key, 'q') or ended:
            break
Пример #3
0
def worker(input_q, output_q, cap_params, frame_processed):
    print(">> loading frozen model for worker")
    detection_graph, sess = detector_utils.load_inference_graph()
    sess = tf.Session(graph=detection_graph)
    while True:
        # print("> ===== in worker loop, frame ", frame_processed
        frame = input_q.get()
        if (frame is not None):
            # actual detection
            boxes, scores = detector_utils.detect_objects(
                frame, detection_graph, sess)

            print(boxes, scores)
            # draw bounding boxes
            detector_utils.draw_box_on_image(
                cap_params['num_hands_detect'], cap_params["score_thresh"], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)
            output_q.put(frame)
            frame_processed += 1
        else:
            output_q.put(frame)
    sess.close()
Пример #4
0
def test_nn():
    camera = cv2.VideoCapture(0)
    handset = PersistentHandset()
    background_set = False

    while True:
        ret, frame = camera.read()

        if not background_set:  # or (not handset.hands and time.clock() - handset.bg_update > 5):
            blurred_background = cv2.medianBlur(frame, 7)
            handset.background = blurred_background
            handset.bg_update = time.clock()
            background_set = True

        boxes, scores = detector_utils.detect_objects(frame, detection_graph,
                                                      sess)
        blurred_frame = cv2.GaussianBlur(frame, (9, 9), 3)
        handset.update(boxes, scores, blurred_frame)

        # print("Total Hands in Handset: ", len(handset.hands))

        # Debug views
        for hand in handset.hands:
            draw_hand_bb_on_image(frame, hand)
            cv2.circle(frame, hand.b_center, 10, (255, 0, 0), -1)
            cv2.imshow("Mask", hand.mask)
            hullcopy = cv2.cvtColor(hand.mask.copy(), cv2.COLOR_GRAY2BGR)
            for point in hand.hull.tolist():
                p = point[0]
                cv2.circle(hullcopy, (p[0], p[1]), MINIMUM_TIP_RADIUS,
                           (0, 0, 255), -1)
            # cv2.drawContours(hullcopy, [hand.hull], 0, (255, 0, 0), 1, 8)
            cv2.imshow("Hull", hullcopy)
        cv2.imshow("Output", frame)

        key = cv2.waitKey(1)
        if key == 27:
            break
Пример #5
0
def test_hand(iou_threshold=0.5, size=1):
    """tests the hand detector with given iou and scale"""
    detection_graph, sess = detector_utils.load_inference_graph(
        "/frozen_inference_graph.pb")
    sess = tf.Session(graph=detection_graph)
    total_time = 0
    num_pic = 0
    labels = open("test/hand_label.txt", "r")
    path = labels.readline().rstrip()
    total = 0
    count = 0
    true_positive = 0
    false_positive = 0
    maximum = 0
    minimum = float("inf")
    while path:
        image = cv2.imread('test/image_hand/' + path, 1)
        num_pic += 1
        num_face = labels.readline().rstrip()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        image = cv2.resize(image, (0, 0), fx=size, fy=size)
        start_time = time.time()
        boxes, scores = detector_utils.detect_objects(image, detection_graph,
                                                      sess)
        total_time += (time.time() - start_time)
        _, hand_boxes = detector_utils.find_hand_in_image(
            0, 0.25, scores, boxes, image, False)

        rect_list = []
        for i in range(int(num_face)):
            rect = labels.readline().rstrip().split()
            rect = list(map(int, rect))
            rect_list.append(rect)
        for top, right, bottom, left in hand_boxes:
            iou = 0
            # cv2.rectangle(image,(int(left),int(top)),
            # (int(right),int(bottom)),(0,255,255))
            top *= (1 / size)
            right *= (1 / size)
            bottom *= (1 / size)
            left *= (1 / size)
            for rect in rect_list:
                # cv2.rectangle(image,(rect[0],rect[1]),
                # (rect[2],rect[3]),(255,255,255))
                iou = max(iou, IOU(rect, [left, top, right, bottom]))
            if iou < iou_threshold:
                false_positive += 1
            else:
                true_positive += 1
            total += iou
            minimum = min(minimum, iou)
            maximum = max(maximum, iou)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        # cv2.imshow('asdf',image)
        # cv2.waitKey(0)

        count += int(num_face)
        path = labels.readline().rstrip()

    print("hand precision@{}: {:.3f}".format(
        iou_threshold, true_positive / (true_positive + false_positive)))
    print("hand recall@{}: {:.3}".format(iou_threshold, true_positive / count))
    print("average_time:", total_time / num_pic)
    return true_positive / (true_positive +
                            false_positive), total_time / num_pic
Пример #6
0
    handset.bg_update = time.clock()
    canny_edges = cv2.Canny(frame, 40, 200, 3)
    blur_edges = cv2.GaussianBlur(canny_edges, (3, 3), 3)
    edge_contours, _ = cv2.findContours(blur_edges, cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
    boxes = []
    for i, cnt in enumerate(edge_contours):
        rect = cv2.minAreaRect(cnt)
        box = np.int0(cv2.boxPoints(rect))
        boxes.append(box)
    arena = TouchArena(width, height, boxes)

    while True:
        # Process Handset using opencv
        ret, frame = camera.read()
        boxes, scores = detector_utils.detect_objects(
            frame, hand_analyzer.detection_graph, hand_analyzer.sess)
        blurred_frame = cv2.GaussianBlur(frame, (9, 9), 3)
        handset.update(boxes, scores, blurred_frame)

        # Debug window from opencv
        for hand in handset.hands:
            hand_analyzer.draw_hand_bb_on_image(frame, hand)
            cv2.circle(frame, hand.b_center, 10, (255, 0, 0), -1)
            # cv2.imshow("Mask", hand.mask)
            hullcopy = cv2.cvtColor(hand.mask.copy(), cv2.COLOR_GRAY2BGR)
            for point in hand.hull.tolist():
                p = point[0]
                cv2.circle(hullcopy, (p[0], p[1]),
                           hand_analyzer.MINIMUM_TIP_RADIUS, (0, 0, 255), -1)
            # cv2.drawContours(hullcopy, [hand.hull], 0, (255, 0, 0), 1, 8)
            cv2.imshow("Hull", hullcopy)
    brake_hand = [0, 0, 0, 0]
    gas_hand = [1, 1, 1, 1]
    while True:
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        ret, image_np = cap.read()
        # image_np = cv2.flip(image_np, 1)
        try:
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        except:
            print("Error converting to RGB")

        # Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
        # while scores contains the confidence for each of these boxes.
        # Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)

        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)

        # find boxes with high enough score
        valid_boxes = [
            boxes[i] for i in range(len(boxes))
            if scores[i] > args.score_thresh
        ]

        # update controller values
        if len(valid_boxes) >= 2:
            for i in range(2):
                left, top, right, bottom = (valid_boxes[i][1] * im_width,
                                            valid_boxes[i][0] * im_height,
                                            valid_boxes[i][3] * im_width,
                                            valid_boxes[i][2] * im_height)
Пример #8
0
while True:
    background=cv2.imread("images/background.jpg",1)
    frame = video_capture.read()

    background=cv2.resize(background,video_capture.size())
    num_frames+=1
    toggle=1-toggle
    if toggle:
        continue
    frame = cv2.flip(frame, 1)
    frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    frame_small=cv2.resize(frame,(0,0),fx=0.25,fy=0.25)
    if (frame_small is not None):
        # actual detection
        #boxes, scores = detector_utils.detect_objects(frame, detection_graph, sess)
        boxes, scores = detector_utils.detect_objects(frame_small, detection_graph, sess)
        # boxes=cv2.multiply(boxes,4)
        face_locations = face_recognition.face_locations(frame_small)
        # draw bounding boxes
        hand_location=detector_utils.draw_box_on_image(
            params['num_hands_detect'],
            params["score_thresh"], scores, boxes,
            params['im_width'], params['im_height'], background)

        for top, right, bottom, left in face_locations:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
            # Draw a box around the face