示例#1
0
    def get_landmarks_from_image(self, image_file):
        frame = cv2.imread(image_file)
        img1, img2, scale, pad = resize_pad(frame)

        normalized_palm_detections = self.palm_detector.predict_on_image(img1)
        palm_detections = denormalize_detections(normalized_palm_detections,
                                                 scale, pad)

        xc, yc, scale, theta = self.palm_detector.detection2roi(
            palm_detections)
        img, affine2, box2 = self.hand_regressor.extract_roi(
            frame, xc, yc, theta, scale)
        flags2, handed2, normalized_landmarks2 = self.hand_regressor(img)
        landmarks2 = self.hand_regressor.denormalize_landmarks(
            normalized_landmarks2, affine2)

        for i in range(len(flags2)):
            landmark, flag = landmarks2[i], flags2[i]
            print(flag)
            if flag > 0.5:
                draw_landmarks(frame,
                               landmark[:, :2],
                               HAND_CONNECTIONS,
                               size=2)

        return flags2, handed2, normalized_landmarks2
示例#2
0
文件: tests.py 项目: guo-ou/mit-6.834
def test_my_map(map_file_name):
    world, path = execute.load_map_from_json(map_file_name)
    if not (type(path) == type(list()) and len(path) > 0):
        raise ValueError("Path must be at least length 1")
    fig, ax = plt.subplots()
    ax.set_xlim([0, world.width])
    ax.set_ylim([0, world.height])
    visualization.draw_landmarks(ax, world.landmarks)
    visualization.draw_path(path)
    Q_robot = visualization.draw_robot(world.robot_pose)
    plt.show()
    green_test_message()
示例#3
0
    face_detections = denormalize_detections(normalized_face_detections, scale,
                                             pad)

    xc, yc, scale, theta = face_detector.detection2roi(face_detections.cpu())
    img, affine, box = face_regressor.extract_roi(frame, xc, yc, theta, scale)
    flags, normalized_landmarks = face_regressor(img.to(gpu))
    landmarks = face_regressor.denormalize_landmarks(
        normalized_landmarks.cpu(), affine)

    #frame = cv2.rectangle(frame, (0,0), (400,400), (120, 120, 120), 400)

    for i in range(len(flags)):
        landmark, flag = landmarks[i], flags[i]
        if flag > .5:
            draw_landmarks(frame, landmark[:, :2], FACE_CONNECTIONS, size=1)

    draw_roi(frame, box)
    draw_detections(frame, face_detections)

    cv2.imshow(WINDOW, frame[:, :, :])
    # cv2.imwrite('sample/%04d.jpg'%frame_ct, frame[:,:,::-1])

    hasFrame, frame = capture.read()
    key = cv2.waitKey(1)
    if key == 27:
        break

capture.release()
cv2.destroyAllWindows()
示例#4
0
    img1, img2, scale, pad = resize_pad(frame)

    normalized_pose_detections = pose_detector.predict_on_image(img2)
    pose_detections = denormalize_detections(normalized_pose_detections, scale,
                                             pad)

    xc, yc, scale, theta = pose_detector.detection2roi(pose_detections)
    img, affine, box = pose_regressor.extract_roi(frame, xc, yc, theta, scale)
    flags, normalized_landmarks, mask = pose_regressor(img.to(gpu))
    landmarks = pose_regressor.denormalize_landmarks(normalized_landmarks,
                                                     affine)

    draw_detections(frame, pose_detections)
    draw_roi(frame, box)

    for i in range(len(flags)):
        landmark, flag = landmarks[i], flags[i]
        if flag > .5:
            draw_landmarks(frame, landmark, POSE_CONNECTIONS, size=2)

    cv2.imshow(WINDOW, frame[:, :, ::-1])
    # cv2.imwrite('sample/%04d.jpg'%frame_ct, frame[:,:,::-1])

    hasFrame, frame = capture.read()
    key = cv2.waitKey(1)
    if key == 27:
        break

capture.release()
cv2.destroyAllWindows()
    img1 = torch.tensor(img1, device=gpu).byte()
    img1 = img1.unsqueeze(0)
    normalized_landmarks2, flags2 = hand_regressor(img1)

    #for i in range(len(flags)):
    #    landmark, flag = landmarks[i], flags[i]
    #    if flag>.5:
    #        draw_landmarks(frame, landmark[:,:2], FACE_CONNECTIONS, size=1)


    for i in range(len(flags2)):
        landmark, flag = normalized_landmarks2[i], flags2[i]
        #landmark, flag = landmarks2[i], flags2[i]
        #if flag>.01:
        if True:
            draw_landmarks(frame, landmark[:,:2], HAND_CONNECTIONS, size=2)
            print(flag)
            #draw_landmarks(frame, landmark[:,:2] * 200, HAND_CONNECTIONS, size=2)

    #draw_roi(frame, box)
    #draw_roi(frame, box2)
    #draw_detections(frame, face_detections)
    #draw_detections(frame, palm_detections)

    cv2.imshow(WINDOW, frame)
    # cv2.imwrite('sample/%04d.jpg'%frame_ct, frame[:,:,::-1])

    hasFrame, frame = capture.read()
    key = cv2.waitKey(1)
    if key == 27:
        break
示例#6
0
    if imgs:
        imgs = torch.stack(
            imgs)  #.permute(0,3,1,2).float() / 255. #/ 127.5 - 1.0
        affines = torch.stack(affines)
    else:
        imgs = torch.zeros((0, 3, res, res)).to(gpu)
        affines = torch.zeros((0, 2, 3)).to(gpu)

    #img, affine2, box2 = hand_regressor.extract_roi(img1, xc, yc, theta, scale)
    flags2, handed2, normalized_landmarks2 = hand_regressor(imgs)
    landmarks2 = hand_regressor.denormalize_landmarks(normalized_landmarks2,
                                                      affines)

    for i in range(len(flags2)):
        landmark, flag = landmarks2[i], flags2[i]
        if flag > .5:
            draw_landmarks(img1, landmark[:, :2], HAND_CONNECTIONS, size=2)

    draw_roi(img1, points)

    cv2.imshow(WINDOW, img1)

    hasFrame, frame = capture.read()
    key = cv2.waitKey(1)
    if key == 27:
        break

capture.release()
cv2.destroyAllWindows()
示例#7
0
input_name = ort_session.get_inputs()[0].name

while hasFrame:
    frame_ct += 1

    img1, img2, scale, pad = resize_pad(frame)
    img3 = cv2.resize(img1, (192, 192))

    img_in = np.expand_dims(img3, axis=0).astype(np.uint8)
    ort_inputs = {input_name: img_in}

    ort_outs = ort_session.run(None, ort_inputs)

    frame = cv2.rectangle(img3, (30, 30), (162, 162), (255, 255, 255), -1)
    img3 = cv2.resize(img3, (960, 960))

    for i in range(len(ort_outs[0])):
        landmark, flag = ort_outs[0][i], ort_outs[1][i]
        if flag > .5:
            draw_landmarks(img3, landmark[:, :2] * 5, FACE_CONNECTIONS, size=1)

    cv2.imshow(WINDOW, img3)

    hasFrame, frame = capture.read()
    key = cv2.waitKey(1)
    if key == 27:
        break

capture.release()
cv2.destroyAllWindows()