示例#1
0
def doPoseAdjustment(imageToProcess,
                     folder_path,
                     webcam_number=1,
                     nn_model="BODY_25",
                     pose_model="SL",
                     fitted=True):

    if fitted:
        width, height = get_curr_screen_geometry()
    else:
        width, height = 640, 480

    nn_mapping, new_mapping, new_pairs = getPoseParameters(
        nn_model, pose_model)

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = openpose_path + "models/"

    try:
        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()
        datum = op.Datum()
        datum.cvInputData = imageToProcess
        opWrapper.emplaceAndPop([datum])
        pose_keypoints = datum.poseKeypoints
        try:
            pose_keypoints = pose_keypoints[:, :, :2]
        except:
            pose_keypoints = np.zeros([1, len(new_mapping), 2])

        pose_keypoints = selectJoints(pose_keypoints, nn_mapping, new_mapping)
        pose_keypoints = organizeBiggestPerson(pose_keypoints)
        pose_keypoints = pose_keypoints[0]
        for i in range(len(pose_keypoints)):
            if 0 in pose_keypoints[i]:
                pose_keypoints[i,
                               0], pose_keypoints[i, 1] = imageToProcess.shape[
                                   0] / 2, imageToProcess.shape[1] / 2

        joint_n = 0
        k = 1
        while True:
            image = np.copy(imageToProcess)
            img_out = poseDATAtoFrame(image,
                                      pose_keypoints,
                                      0,
                                      new_mapping,
                                      new_pairs,
                                      thickness=1,
                                      color=-1)
            for i in range(len(pose_keypoints)):
                A = tuple(pose_keypoints[i].astype(int))
                if (i == joint_n):
                    cv2.circle(img_out, (A[0], A[1]), 1, (255, 255, 255), -1)
                else:
                    cv2.circle(img_out, (A[0], A[1]), 1, (0, 0, 0), -1)
            img_processed = np.copy(img_out)
            img_out = cv2.copyMakeBorder(img_out,
                                         50,
                                         0,
                                         0,
                                         0,
                                         cv2.BORDER_CONSTANT,
                                         value=[0, 0, 0])
            cv2.putText(img_out,
                        "{}".format(new_mapping[joint_n]), (200, 30),
                        cv2.FONT_HERSHEY_COMPLEX,
                        .8, (255, 255, 255),
                        2,
                        lineType=cv2.LINE_AA)
            cv2.putText(img_out,
                        "k: {}".format(k), (450, 30),
                        cv2.FONT_HERSHEY_COMPLEX,
                        .8, (255, 255, 255),
                        2,
                        lineType=cv2.LINE_AA)

            img_out[40:45, 450:450 + k, :] = 255
            cv2.namedWindow('OpenPose', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('OpenPose', (width, height))
            cv2.imshow("OpenPose", img_out)
            key = cv2.waitKey(30)
            if key == ord('q'):
                print("Not saving...")
                break
            elif key == 81:  #left
                pose_keypoints[joint_n, 0] -= k
            elif key == 82:  #up
                pose_keypoints[joint_n, 1] -= k
            elif key == 83:  #right
                pose_keypoints[joint_n, 0] += k
            elif key == 84:  #down
                pose_keypoints[joint_n, 1] += k
            elif key == 171:  #+
                k += 1
            elif key == 173:  #-
                if (k > 1):
                    k -= 1
            elif (key == 32):  #space
                if (joint_n >= pose_keypoints.shape[0] - 1):
                    joint_n = 0
                else:
                    joint_n += 1
            elif (key == 8):
                if (joint_n <= 0 - 1):
                    joint_n = pose_keypoints.shape[0]
                else:
                    joint_n -= 1
            elif (key == 13 or key == 141):  #enter
                print("Saving dimensions")
                cv2.destroyAllWindows()
                mmppx, mmppy = getMmppInterface(img_processed)
                saveCalibrations(img_processed,
                                 pose_keypoints,
                                 new_mapping,
                                 folder_path,
                                 mmppx,
                                 mmppy,
                                 mode="manual")
                break
    except KeyboardInterrupt:
        print("Not saving...")
        cv2.destroyAllWindows()
        sys.exit(-1)
def realtimePE(webcam_number=1,
               video_name="Webcam",
               output_name="Webcam",
               nn_model="BODY_25",
               pose_model="BODY_25",
               process_params="B",
               save_video=False,
               show_video=True,
               save_data=False,
               fitted=True):
    if fitted:
        width, height = get_curr_screen_geometry()
    else:
        width, height = 640, 480

    nn_mapping, new_mapping, new_pairs = getPoseParameters(
        nn_model, pose_model)

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = openpose_path + "models/"

    try:
        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()
        datum = op.Datum()

        video_out_path, output_path = setOutputPath(video_name, output_name)
        file_metadata = setMetadata(0, new_mapping, new_pairs)
        frame_width, frame_height, fps = file_metadata[
            "frame_width"], file_metadata["frame_height"], file_metadata["fps"]

        cap = cv2.VideoCapture(webcam_number)
        if save_data:
            writeToDATA(output_path, file_metadata, write_mode='w')
        if save_video:
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            vid_writer = cv2.VideoWriter(video_out_path, fourcc, fps,
                                         (frame_width, frame_height))

        if (cap.isOpened() == False):
            print("Error opening video stream or file")
        while (cap.isOpened):
            # Process Image
            ret, imageToProcess = cap.read()
            if not ret:
                break

            # Start timer
            timer = cv2.getTickCount()

            datum.cvInputData = imageToProcess
            opWrapper.emplaceAndPop([datum])
            pose_keypoints = datum.poseKeypoints
            try:
                pose_keypoints = pose_keypoints[:, :, :2]
            except:
                pose_keypoints = np.zeros([1, len(new_mapping), 2])
            pose_keypoints = selectJoints(pose_keypoints, nn_mapping,
                                          new_mapping)
            if 'B' in process_params:
                pose_keypoints = organizeBiggestPerson(pose_keypoints)
                pose_keypoints = pose_keypoints[0]
            if show_video or save_video or show_frame:
                if 'B' in process_params:
                    img_out = poseDATAtoFrame(imageToProcess,
                                              pose_keypoints,
                                              0,
                                              new_mapping,
                                              new_pairs,
                                              thickness=3,
                                              color=-1)
                else:
                    img_out = poseDATAtoFrame(imageToProcess,
                                              pose_keypoints, [0],
                                              new_mapping,
                                              new_pairs,
                                              thickness=3,
                                              color=-1)
                if save_video:
                    vid_writer.write(img_out)
            if save_data:
                pose_keypoints = np.round(pose_keypoints).astype(int)
                file_data = {'keypoints': pose_keypoints.tolist()}
                writeToDATA(output_path, file_data, write_mode='a')

            if show_video or show_frame:
                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
                # Display FPS on frame
                cv2.putText(img_out, "FPS : " + str(int(fps)), (100, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

                # Display Image
                cv2.namedWindow('OpenPose', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('OpenPose', (width, height))
                cv2.imshow("OpenPose", img_out)

            if (cv2.waitKey(25) & 0xFF == ord('q')):
                break

    except KeyboardInterrupt:
        cap.release()
        cv2.destroyAllWindows()
        sys.exit(-1)
示例#3
0
def poseAutomaticAdjustmentInterface(n_mean=5,
                                     enter_folder=True,
                                     webcam_number=1,
                                     nn_model="BODY_25",
                                     pose_model="SL",
                                     fitted=True):
    if fitted:
        width, height = get_curr_screen_geometry()
    else:
        width, height = 640, 480
    if enter_folder:
        folder_path = saveSelectedFolder()
    else:
        folder_path = repo_path + "Calib/" + "Tmp"
    print("Get set...")
    time.sleep(10)
    print("Started")

    nn_mapping, new_mapping, new_pairs = getPoseParameters(
        nn_model, pose_model)

    # Custom Params (refer to include/openpose/flags.hpp for more parameters)
    params = dict()
    params["model_folder"] = openpose_path + "models/"

    try:
        # Starting OpenPose
        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        cap = cv2.VideoCapture(webcam_number)
        if (cap.isOpened() == False):
            print("Error opening video stream or file")

        keep_running = True
        while keep_running:
            ret, imageToProcess = cap.read()
            frame = np.copy(imageToProcess)
            if not ret:
                break
            datum = op.Datum()
            datum.cvInputData = imageToProcess
            opWrapper.emplaceAndPop([datum])
            pose_keypoints = datum.poseKeypoints
            try:
                pose_keypoints = pose_keypoints[:, :, :2]
            except:
                pose_keypoints = np.zeros([1, len(new_mapping), 2])

            pose_keypoints = selectJoints(pose_keypoints, nn_mapping,
                                          new_mapping)
            pose_keypoints = organizeBiggestPerson(pose_keypoints)
            pose_keypoints = pose_keypoints[0]
            img_processed = poseDATAtoFrame(imageToProcess,
                                            pose_keypoints,
                                            0,
                                            new_mapping,
                                            new_pairs,
                                            thickness=1,
                                            color=-1)
            for i in range(len(pose_keypoints)):
                if 0 in pose_keypoints[i]:
                    print("Wait till all joints are found")
                else:
                    keep_running = False
            if not keep_running:
                print("Joints found")

    except KeyboardInterrupt:
        print("Not saving...")
        cv2.destroyAllWindows()
        sys.exit(-1)

    mmppx, mmppy = getMmppAutomatically(frame)
    saveCalibrations(img_processed,
                     pose_keypoints,
                     new_mapping,
                     folder_path,
                     mmppx,
                     mmppy,
                     mode="automatic",
                     show_plot=False)