コード例 #1
0
import time
import tensorflow as tf

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.InteractiveSession(config=config)

# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../../../models/"

mpose = keras.models.load_model('weights-improvement-109-0.98.hdf5',custom_objects={'binary_precision':keras_metrics.precision(), 'binary_recall':keras_metrics.recall()})

poseModel = op.PoseModel.BODY_25
original_keypoints_index = op.getPoseBodyPartMapping(poseModel)
keypoints_index = dict((bp, num) for num, bp in original_keypoints_index.items())

vs = cv2.VideoCapture(0)

# Starting OpenPoseasdasdas
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()

datum = op.Datum()

fps_time = 0


while True:
コード例 #2
0
 def getPoseBodyPartMapping(poseModel):
     return pyopenpose.getPoseBodyPartMapping(poseModel)
コード例 #3
0
def main():
    try:
        parser = argparse.ArgumentParser()
        #parser.add_argument("--image_path", default="../../../examples/media/COCO_val2014_000000000192.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
        #parser.add_argument("--image_dir", default="../openpose/examples/media/COCO_val2014_000000000192.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
        args = parser.parse_known_args()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        #params["model_folder"] = "../../../models/"
        params["model_folder"] = "../openpose/models/"
        #params["number_people_max"] = 1
        #save data as json to folder
        #Find a better way to do this. Currently saves each frame as json
        params["write_json"] = "json_output"

        # Add others in path?
        for i in range(0, len(args[1])):
            curr_item = args[1][i]
            if i != len(args[1]) - 1: next_item = args[1][i + 1]
            else: next_item = "1"
            if "--" in curr_item and "--" in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = "1"
            elif "--" in curr_item and "--" not in next_item:
                key = curr_item.replace('-', '')
                if key not in params: params[key] = next_item

        opWrapper = op.WrapperPython()
        opWrapper.configure(params)
        opWrapper.start()

        #Video location as a string
        vid_location = "media/side_landscape_2.mp4"
        #vid_location = "video.avi"
        cap = cv2.VideoCapture(vid_location)

        width = cap.get(3)
        height = cap.get(4)
        fps = cap.get(5)

        font = cv2.FONT_HERSHEY_SIMPLEX

        #Find Video Format
        video_type = vid_location.split(".")[-1]
        if video_type == "mp4":
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter('media/output.mp4', fourcc, fps,
                                  (int(width), int(height)))
        elif video_type == "avi":
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            out = cv2.VideoWriter('media/output.avi', fourcc, fps,
                                  (int(width), int(height)))
        else:
            print("Video format not supported")
            sys.exit(-1)

        frame_num = 0
        sway_tot = 0
        unsuccessful_frames = 0

        while (cap.isOpened()):
            ret, frame = cap.read()
            #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if ret == True:
                # Process Image
                datum = op.Datum()
                imageToProcess = frame

                frame_num += 1
                cv2.putText(imageToProcess, str(frame_num), (100, 100), font,
                            1, (255, 255, 255), 1)

                datum.cvInputData = imageToProcess

                opWrapper.emplaceAndPop([datum])

                # Display Image
                print("Body keypoints: \n" + str(datum.poseKeypoints))
                cv2.imshow("OpenPose 1.5.1 - Tutorial Python API",
                           datum.cvOutputData)

                #Get x difference/sway between nose[0]/neck[1] with midhip[8]
                #nose_x = datum.poseKeypoints[0][0][0]
                #neck_x = datum.poseKeypoints[0][1][0]
                #midhip_x = datum.poseKeypoints[0][8][0]
                # part not found
                # maybe change to confidence level == 0 because technically part could just be on left edge
                # if (nose_x == 0 or neck_x == 0 or midhip_x == 0):
                #     unsuccessful_frames += 1
                #     print("A part not found- frame not used")
                # else:
                #     print("nose", nose_x)
                #     print("neck", neck_x)
                #     print("midhip", midhip_x)
                #     sway = abs(nose_x - midhip_x)
                #     print("frame sway", sway)
                #     sway_tot += sway

                # Save frame to output video
                out.write(datum.cvOutputData)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break
    # sway_avg = sway_tot/(frame_num - unsuccessful_frames)
    # print("sway average", int(sway_avg))

    #prints model part numbers
        poseModel = op.PoseModel.BODY_25
        print(op.getPoseBodyPartMapping(poseModel))
        cap.release()
        out.release()
        cv2.destroyAllWindows

    except Exception as e:
        print(e)
        sys.exit(-1)
コード例 #4
0
def main(video_path, save_path):
    for video in os.listdir(video_path):
        # if video.endswith('.avi'):
        if video.endswith('.mp4'):
            print('Processing' + video + '...........')
            vs = cv2.VideoCapture(video_path + '/' + video)

            width = 1280
            height = 720
            vs.set(cv2.CAP_PROP_FRAME_WIDTH, width)
            vs.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

            # Custom Params (refer to include/openpose/flags.hpp for more parameters)
            params = dict()
            params["model_folder"] = "../../../../models/"
            params["model_pose"] = "BODY_25"
            params["fps_max"] = -1
            params['write_video_fps'] = -1
            params['number_people_max'] = 1
            poseModel = op.PoseModel.BODY_25
            original_keypoints_index = op.getPoseBodyPartMapping(poseModel)
            # keypoints_index = dict((bp, num) for num, bp in original_keypoints_index.items())

            # Starting OpenPose
            opWrapper = op.WrapperPython()
            opWrapper.configure(params)
            opWrapper.start()

            # Create objects to process pictures
            datum = op.Datum()

            # Create array to save all keypoint frame
            KeypointFrame = np.array([])
            image_count = 0
            start = time.time()
            while vs.isOpened():

                #Get frame from video or webcam
                ret, frame = vs.read()
                if not ret:
                    break
                #Give inputData for openpoes to process
                datum.cvInputData = frame
                opWrapper.emplaceAndPop([datum])

                #Check  openpose whether detect keypoints or not
                if datum.poseKeypoints.any() and datum.poseKeypoints.ndim == 3:

                    #Reshape keypoints data and save KeypointFrame
                    keypoints = datum.poseKeypoints[0].reshape(1, 25, 3)
                    if KeypointFrame.size != 0:
                        KeypointFrame = np.vstack((KeypointFrame, keypoints))
                    else:
                        KeypointFrame = keypoints

                #Get openpose Output
                # image = datum.cvOutputData
                image_count += 1
                #Show the output
                #cv2.imshow("Openpose", image)
                if cv2.waitKey(1) == ord('q'):
                    break

            end = time.time()
            total_time = end - start
            print('FPS:', image_count / total_time)

            #Save data  as Numpy type
            # np.save(save_path+'/'+video.replace(".mp4",".npy"),KeypointFrame)
            np.save(save_path + '/' + video.replace(".mp4", ".npy"),
                    KeypointFrame)
            print('Successful get ' + video + ' KeyPoints')

            vs.release()
            cv2.destroyAllWindows()