示例#1
0
def get_pose_from_image(file_path):
    image = cv2.imread(file_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # estimation
    pose_2d, visibility, pose_3d = pose_estimator.estimate(image)

    # close model
    pose_estimator.close()

    # Save to JSON
    if pose_2d is None:
        pose_2d = []
    else:
        pose_2d = pose_2d.tolist()
    if pose_3d is None:
        pose_3d = []
    else:
        pose_3d = pose_3d.tolist()

    results = {'2d': pose_2d, '3d': pose_3d}

    file_name, file_ext = splitext(file_path)
    with open(join('/shared/estimations',
                   basename(file_name) + '.json'), 'w') as results_file:
        print 'Writing to file ' + file_path + '.json'
        results_file.write(json.dumps(results))
示例#2
0
def upload_file():
    if request.method == 'POST':
        code = request.get_data()
        img = base64.b64decode(code.decode().split(',')[1])
        fstr = IMAGE_FILE_DIR + '/' + time.strftime("%Y%m%d%H%M%S") + '.jpg'
        file = open(fstr, 'wb')
        file.write(img)

        image = cv2.imread(fstr)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb
        # create pose estimator

        image_size = image.shape
        pose_estimator = PoseEstimator(image_size, SESSION_PATH,
                                       PROB_MODEL_PATH)
        # load model

        pose_estimator.initialise()
        # estimation

        pose_2d, visibility, pose_3d = pose_estimator.estimate(image)
        # close model

        pose_estimator.close()
        # Show 2D and 3D poses

        file.close()
        print(pose_2d)
        draw_limbs(in_image, data_2d, joint_visibility)
        return (base64.b64encode(in_image), 200)
def vmdlifting(image_file, vmd_file, position_file):
    image_file_path = realpath(DIR_PATH) + '/' + image_file
    image = cv2.imread(image_file_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # estimation
    pose_2d, visibility, pose_3d = pose_estimator.estimate(image)

    # close model
    pose_estimator.close()

    if (position_file is not None):
        # dump 3d joint position data to position_file
        fout = open(position_file, "w")
        for pose in pose_3d:
            for j in range(pose.shape[1]):
                print(j, pose[0, j], pose[1, j], pose[2, j], file=fout)
        fout.close()

    # head position & face expression
    head_rotation, expression_frames = head_face_estimation(image_file_path)
    pos2vmd(pose_3d, vmd_file, head_rotation, expression_frames)
示例#4
0
def main():
    start = time.time()
    image = cv2.imread(IMAGE_FILE_PATH)
    #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    try:
        # estimation
        pose_2d, visibility, pose_3d = pose_estimator.estimate(image)
        timeElapsed = time.time() - start
        print("Execution time in Seconds: ", timeElapsed)

        # Show 2D and 3D poses
        display_results(image, pose_2d, visibility, pose_3d)
    except ValueError:
        print(
            'No visible people in the image. Change CENTER_TR in packages/lifting/utils/config.py ...'
        )

    # close model
    pose_estimator.close()
示例#5
0
def parse_rosbag(): 
  rospy.init_node("parse_rosbag", anonymous=True)
  participant_name = rospy.get_param("~participant") # can make this for multiple people
  topic_names = rospy.get_param('~topic_names')

  # Fetch image_topic
  for topic in topic_names: 
    if 'image_raw' in topic:
      image_topic = topic 

  # Process all ROSBAG recordings for each participant. 
  for file_name in os.listdir(os.path.join(BAG_DIR_PATH)):
    if participant_name in file_name:
      print("Processing " + file_name + "...")
      last_mocap_msgs = {}  # Used to downsample motion capture to camera sampling rate
      modelInitialized = False

      bag_file_path = os.path.join(os.path.join(BAG_DIR_PATH), file_name)
      bag = rosbag.Bag(bag_file_path)
      msgs = bag.read_messages(topic_names)
      
      csv_dir = os.path.join(CSV_DIR_PATH)
      csv_file_name = file_name.split('.bag')[0] + ".csv"

      with open(os.path.join(csv_dir, csv_file_name), 'wb') as csvfile:
        csv_writer = csv.writer(csvfile, delimiter=',', quotechar=' ', quoting=csv.QUOTE_MINIMAL)
        write_csv_field_names(csv_writer, UCL_JOINT_NAMES, topic_names)

        for subtopic, msg, t in msgs:
          # Downsample to rate of camera. Take last sample for motion capture. 
          if subtopic == image_topic:   
            image = CvBridge().imgmsg_to_cv2(msg)

            if not modelInitialized:
              pose_estimator = PoseEstimator(image.shape, SESSION_PATH, PROB_MODEL_PATH)
              pose_estimator.initialise()
              modelInitialized = True
              print("Processing " + file_name + "...")

            # Run 'lifting from the deep algorithm'
            pose_2d, visibility, pose_3d = pose_estimator.estimate(image) 

            # Write motion capture and 'Lifting from the deep' output to .csv.
            write_data_to_csv(csv_writer, pose_3d, last_mocap_msgs, topic_names, t)
          else:
            last_mocap_msgs[subtopic] = msg  # Save most recent sample for downsampling

  # close model for lifting from the deep' algorithm
  pose_estimator.close()
def main(argv):
    cap = cv2.VideoCapture(FLAGS.VIDEO_FILE_PATH)
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    image_size = (height, width, 3)
    ret, image = cap.read()
    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)
    pose_estimator.initialise()

    frame_num = 0
    poses = {}
    with open(FLAGS.JSON_OUTPUT, 'w') as outfile:
        try:
            while (True):
                ret, image = cap.read()
                if (ret == False):
                    print("End of video")
                    break
                image = cv2.cvtColor(image,
                                     cv2.COLOR_BGR2RGB)  # conversion to rgb

                try:
                    # estimation
                    pose_2d, visibility, pose_3d = pose_estimator.estimate(
                        image)

                    print(
                        str(
                            int(
                                cap.get(cv2.CAP_PROP_POS_FRAMES) * 100 /
                                cap.get(cv2.CAP_PROP_FRAME_COUNT))) + "%")
                    poses[str(frame_num)] = pose_3d.tolist()
                    #print(poses[str(frame_num)])

                    if (FLAGS.DISPLAY):
                        # Show 2D and 3D poses
                        display_results(image, pose_2d, visibility, pose_3d)
                except ValueError:
                    print(
                        'No visible people in the image. Change CENTER_TR in packages/lifting/utils/config.py ...'
                    )
                frame_num += 1
        except KeyboardInterrupt:
            pass
        json.dump(poses, outfile)

    # close model
    pose_estimator.close()
示例#7
0
def estimate_image():
    image = cv2.imread(IMAGE_FILE_PATH)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # estimation
    pose_2d, visibility, pose_3d = pose_estimator.estimate(image)

    # close model
    pose_estimator.close()

    # Show 2D and 3D poses
    display_results(image, pose_2d, visibility, pose_3d)
示例#8
0
def estimate_video_and_save_pkl():

    pose_list = []
    cap = cv2.VideoCapture(PROJECT_PATH + '/data/videos/test_video.mp4')

    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    # create pose estimator
    pose_estimator = PoseEstimator((480, 640, 3), SESSION_PATH,
                                   PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    count = 0
    while cap.isOpened():
        count += 1
        if count == 300:
            break
        print('count:{}'.format(str(count)))
        ret_val, frame = cap.read()
        frame = cv2.resize(frame, (640, 480))

        try:
            # estimation
            start = time.time()
            pose_2d, visibility, pose_3d = pose_estimator.estimate(frame)
            finish = time.time() - start
            print("time:", finish)
        except:
            continue

        pose_list.append(pose_3d[0])
    # close model
    pose_estimator.close()

    with open(
            PROJECT_PATH + '/data/videos/' +
            '3d_joints_{}'.format(str(time.time())), 'wb') as fo:
        pickle.dump(pose_list, fo)
示例#9
0
def main(image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

    # create pose estimator
    image_size = image.shape

    pose_estimator = PoseEstimator(image_size, SESSION_PATH, PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # estimation
    pose_2d, visibility = pose_estimator.estimate(image)

    # close model
    pose_estimator.close()

    # Show 2D and 3D poses
    # print pose_2d
    # display_results(image, pose_2d, visibility)
    if len(pose_2d) == 0:
        return []
    return pose_2d[0]
示例#10
0
    image = size_image(image)
    plt2d=None
    plt3d=None
    try:
    #if True:
        pose_2d, visibility, pose_3d = pose_estimator.estimate(image)
        if 'display' in args:
           plt3d,plt2d=display_results(image, pose_2d, visibility, pose_3d)

        lift={}
        lift["pose2d"]=pose_2d.round(2).tolist()
        lift["pose3d"]=pose_3d.round(2).tolist()
        lift["visibility"]=visibility.tolist()
        #cv2.imshow('req',plt3d)
        #cv2.waitKey(0)
        zmqa.send(socket,plt3d,extra=lift)
    except:
    #else:
        print "!@*!! error:", sys.exc_info()[0]
        lift={}
        lift['error']=str(sys.exc_info()[0])
        lift['fname']=fname
        zmqa.send(socket,None,extra=lift)

# close model
pose_estimator.close()




示例#11
0
def vmdlifting_multi(video_file, vmd_file, position_file):
    video_file_path = realpath(video_file)

    cap = cv2.VideoCapture(video_file_path)

    pose_3d_list = []
    head_rotation_list = []
    expression_frames_list = []
    idx = 0
    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()

        # 読み込みがなければ終了
        if ret == False:
            break

        print("frame load idx={0}".format(idx))

        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # conversion to rgb

        # 念のため、フレーム画像出力
        image_file_path = "{0}/frame_{1:012d}.png".format(
            dirname(video_file_path), idx)
        cv2.imwrite(image_file_path, image)

        # create pose estimator
        image_size = image.shape

        pose_estimator = PoseEstimator(image_size, SESSION_PATH,
                                       PROB_MODEL_PATH)

        # load model
        pose_estimator.initialise()

        # estimation
        pose_2d, visibility, pose_3d = pose_estimator.estimate(image)

        # close model
        pose_estimator.close()

        if (position_file is not None):
            # dump 3d joint position data to position_file
            fout = open(position_file, "w")
            for pose in pose_3d:
                for j in range(pose.shape[1]):
                    print(j, pose[0, j], pose[1, j], pose[2, j], file=fout)
            fout.close()

        # head position & face expression
        head_rotation, expression_frames = head_face_estimation(
            image_file_path)
        head_rotation_list.append(head_rotation)
        expression_frames_list.append(expression_frames)

        pose_3d_list.append(pose_3d)

        idx += 1

    # When everything done, release the capture
    cap.release()

    pos2vmd_multi(pose_3d_list, vmd_file, head_rotation_list,
                  expression_frames_list)
示例#12
0
def estimate_video():

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    video_writer1 = cv2.VideoWriter(
        PROJECT_PATH +
        '/data/videos/test_result_{}.mp4'.format(str(time.time())), fourcc,
        30.0, (640, 480))
    cap = cv2.VideoCapture(PROJECT_PATH + '/data/videos/test_video.mp4')

    import mpl_toolkits.mplot3d.axes3d as p3
    fig = plt.figure()
    ax = fig.gca(projection='3d')

    pose_list = []

    def animate(pose):

        assert (pose.ndim == 2)
        assert (pose.shape[0] == 3)

        ax.clear()
        for c in _CONNECTION:
            col = '#%02x%02x%02x' % joint_color(c[0])
            ax.plot([pose[0, c[0]], pose[0, c[1]]],
                    [pose[1, c[0]], pose[1, c[1]]],
                    [pose[2, c[0]], pose[2, c[1]]],
                    c=col)
        for j in range(pose.shape[1]):
            col = '#%02x%02x%02x' % joint_color(j)
            ax.scatter(pose[0, j],
                       pose[1, j],
                       pose[2, j],
                       c=col,
                       marker='o',
                       edgecolor=col)
        # smallest = pose.min()
        # largest = pose.max()
        # print('smallest:', smallest)  # -885.36476784
        # print('largest:', largest)  # 809.97294291
        #
        # ax.set_xlim3d(smallest, largest)
        # ax.set_ylim3d(smallest, largest)
        # ax.set_zlim3d(smallest, largest)

        ax.set_xlim3d(-1000, 1000)
        ax.set_ylim3d(-1000, 1000)
        ax.set_zlim3d(-1000, 1000)

    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    import matplotlib.animation as manimation

    FFMpegWriter = manimation.writers['ffmpeg']
    metadata = dict(title='Movie Test',
                    artist='Matplotlib',
                    comment='Movie support!')
    writer = FFMpegWriter(fps=30, metadata=metadata)

    # create pose estimator

    pose_estimator = PoseEstimator((480, 640, 3), SESSION_PATH,
                                   PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    count = 0
    while cap.isOpened():
        count += 1
        if count == 300:
            break
        print('count:{}'.format(str(count)))
        ret_val, frame = cap.read()
        frame = cv2.resize(frame, (640, 480))

        try:
            # estimation
            start = time.time()
            pose_2d, visibility, pose_3d = pose_estimator.estimate(frame)
            print(time.time() - start)
        except:
            continue

        pose_list.append(pose_3d[0])
        draw_limbs(frame, pose_2d, visibility)
        video_writer1.write(frame)

    # close model
    pose_estimator.close()

    with writer.saving(
            fig, PROJECT_PATH +
            '/data/videos/test_result_3d_{}.mp4'.format(str(time.time())),
            100):
        for i in range(len(pose_list)):
            animate(pose_list[i])
            writer.grab_frame()
示例#13
0
def main():

    # Get video IDs (VIDs) already processed
    VIDsDONE = [x.split('/')[-1][:-4] for x in glob.glob('./*.npz')]
    print VIDsDONE

    # Extract 3D joints for all VIDs
    for dir_path in sorted(glob.glob('/home/jo356/rds/hpc-work/P3/ImgSeq/*')):

        VID = dir_path.split('/')[-1]
        # Skip VIDs that were already processed
        if VID in VIDsDONE:
            continue
        print VID

        # If the first frame is missing, then record the number of first frames missing and later when a non-missing frame comes in, include it (1 + N_first_skipped)-times
        N_first_skipped = 0

        joints_3D = []
        # Count missing/skipped frames
        cnt = 0

        img_filenames = sorted(glob.glob(dir_path + '/*.jpg'))
        # Create pose estimator
        pose_estimator = PoseEstimator(IMAGE_SIZE, SESSION_PATH,
                                       PROB_MODEL_PATH)
        # Load model
        pose_estimator.initialise()

        for frame_num, img_filename in enumerate(img_filenames):

            image = cv2.imread(img_filename)
            # Conversion to RGB
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # Estimation
            try:
                pose_2d, visibility, pose_3d = pose_estimator.estimate(image)
                joints_3D.append(pose_3d[0].T)

                if N_first_skipped > 0:
                    for i in range(N_first_skipped):
                        joints_3D.append(pose_3d[0].T)
                    N_first_skipped = 0

                #print pose_3d
                #print pose_3d[0].T
                #print np.reshape( np.reshape(pose_3d[0], (17, 3)), (3,17) )
                #print "wrong reshaping !!!"

                print img_filename.split('/')[-1]

            # Frame missing => pose was not recognised/estimated reliably
            except ValueError:
                cnt += 1
                print img_filename.split('/')[-1], " is missing"

                # If the first frame is missing or a non-missing frame still did not come in, then just record the number to replicate later when a non-missing frame comes in
                if frame_num == 0 or N_first_skipped > 0:
                    N_first_skipped += 1
                # Replicate previous frame
                else:
                    joints_3D.append(joints_3D[-1])
            # Treated as above
            except:
                print "2D joints not identified:", sys.exc_info()[0]
                cnt += 1
                print img_filename.split('/')[-1], " is missing"

                # If the first frame is missing or a non-missing frame still did not come in, then just record the number to replicate later when a non-missing frame comes in
                if frame_num == 0 or N_first_skipped > 0:
                    N_first_skipped += 1
                # Replicate previous frame
                else:
                    joints_3D.append(joints_3D[-1])

            # print pose_2d
            # print pose_3d
            # print visibility

        # Close model
        pose_estimator.close()

        np.savez('./' + VID + '.npz',
                 joints_3D=np.reshape(joints_3D, (-1, 17, 3)),
                 skipped_frames=cnt)
        print "Skipped frames: ", cnt, " / ", len(img_filenames)
示例#14
0
def vmdlifting(image_file, vmd_file, center_enabled=False):
    image_file_path = realpath(image_file)
    cap = cv2.VideoCapture(image_file_path)
    initialized = False
    positions_list = []
    head_rotation_list = []
    visibility_list = []
    frame_num = 0
    print("pose estimation start")
    while (cap.isOpened()):
        ret, image = cap.read()
        if not ret:
            break

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb
        #debug_img = "debug/" + str(frame_num) + ".png"
        #cv2.imwrite(debug_img, image)

        # create pose estimator
        image_size = image.shape

        if not initialized:
            pose_estimator = PoseEstimator(image_size, SESSION_PATH,
                                           PROB_MODEL_PATH)
            # load model
            pose_estimator.initialise()
            initialized = True

        # pose estimation
        try:
            pose_2d, visibility, pose_3d = pose_estimator.estimate(image)
        except Exception as ex:
            frame_num += 1
            continue

        if pose_2d is None or visibility is None or pose_3d is None:
            frame_num += 1
            continue

        dump_positions(pose_2d, visibility, pose_3d)
        positions = convert_position(pose_3d)
        #print(positions)
        adjust_center(pose_2d, positions, image)
        #print(positions)
        positions_list.append(positions)
        visibility_list.append(visibility[0])
        print("frame_num: ", frame_num)
        frame_num += 1

    # close model
    pose_estimator.close()

    refine_position(positions_list)

    bone_frames = []
    frame_num = 0
    for positions, visibility in zip(positions_list, visibility_list):
        if positions is None:
            frame_num += 1
            continue
        bf = positions_to_frames(positions, visibility, frame_num,
                                 center_enabled)
        bone_frames.extend(bf)
        frame_num += 1

    showik_frames = make_showik_frames()
    writer = VmdWriter()
    writer.write_vmd_file(vmd_file, bone_frames, showik_frames)
示例#15
0
def main():
    # class definition
    # cmpr_results = get_statistics()
    # data flower
    # NOTE: to read data from camera set data flow input to "0"
    dflower_1 = data_flow(ap.VIDEO_PATH + ap.Vid_dir_1 + ap.VIDEO_NAME_1)
    # dflower_2 = data_flow(ap.VIDEO_PATH + ap.Vid_dir_2 +  ap.VIDEO_NAME_2)
    images_1 = dflower_1.flow_from_video()  # comparing two different results
    # images_2 = dflower_2.flow_from_video()
    # IMAGE_FILE_PATH = '/home/ali/CLionProjects/PoseEstimation/full_flow/data/images/'
    # img_list = glob.glob(IMAGE_FILE_PATH + '*.jpg')

    # Model setting
    std_shape = (1280, 720, 3)
    pose_estimator = PoseEstimator(
        std_shape, PROJECT_PATH + ap.SAVED_SESSIONS_DIR + ap.SESSION_PATH,
        PROJECT_PATH + ap.SAVED_SESSIONS_DIR + ap.PROB_MODEL_PATH)

    # load model
    pose_estimator.initialise()

    # loading postprocessors
    # ppop = smoothing_prediction(ap.start, ap.end)
    # stdsp = stats(ap.VIDEO_NAME_1, ap.start, ap.end)

    # flowing data
    ret = True
    ii = 0
    num_keypoints = 17
    pose_2d_s = []
    pose_3d_s = []
    visibility_s = []
    image_s = []
    #
    while ret:
        # create pose estimator
        try:
            while ii < ap.start:
                ii += 1
                next(images_1)
                # next( images_2 )
                continue
            image_1 = cv2.resize(next(images_1), (std_shape[1], std_shape[0]))
            # image_2 = cv2.resize( next( images_2 ), (std_shape[1], std_shape[0]) )
            ii += 1
        except:
            ret = False
        # for item in  img_list:
        # image = cv2.cvtColor( cv2.resize( cv2.imread( item ), (std_shape[1], std_shape[0]) ) , cv2.COLOR_BGR2RGB )
        # image_size = image_1.shape # (720, 1280, 3)

        # estimation
        try:
            if ii % 2 == 0:
                continue
            pose_2d_1, visibility_1, pose_3d_1 = pose_estimator.estimate(
                image_1)
            # pose_2d_2, visibility_2, pose_3d_2 = pose_estimator.estimate(image_2)
            #
            pose_2d_s.append(pose_2d_1[0])
            pose_3d_s.append(pose_3d_1[0])
            visibility_s.append(visibility_1)
            image_s.append(image_1)
            # writing images
            # img = Image.fromarray( image_1 )
            # img.save(ap.RESULTS + 'imagespng/' + 'img_frame_' + str(ii) + '.png')
            print('frame number: ', ii)
            #
        except:
            print('No player detected yet... ')
            continue
        # Single 2D and 3D poses representation

        print('Processing the frame {0}'.format(ii))
        if ii > ap.end:
            break
        if ap.viz:
            display_results(image_1, pose_2d_1, visibility_1, pose_3d_1, ii)

        # collecting data
        # cmpr_results.get_result( pose_3d_1[0], pose_3d_2[0] )
        # display_results(image_2, pose_2d_2, visibility_2, pose_3d_2, ii)
        # fig, fig_2 = comparing_result( pose_3d_1[0], pose_3d_2[0] )
        # fig.show()
        # fig_2.show()
        # working on 2D and 3D correspondness
        # postprocessing the poses
        # getting the data
        # stdsp.get_data()

    # optimized_pose = []
    # posposj = [pose_3d_s[i][:,13] for i in range(len(pose_3d_s))]
    # ppop.optimize_pose(13, posposj)
    # for j in range(num_keypoints):
    #     posposj = [pose_3d_s[i][:,j] for i in range(len(pose_3d_s))]
    #     optimized_pose.append( ppop.optimize_pose(j, posposj) )

    # ppop.do_post_process(optimized_pose)
    # stdsp.update_state(optimized_pose)
    # stdsp.save_stats()

    # saving images and 2d poses
    with open(
            ap.RESULTS + 'pickles/' + ap.VIDEO_NAME_1.split('.')[0] +
            '_pose2Ds.pickle', 'wb') as f:
        pickle.dump(pose_2d_s, f)
    with open(
            ap.RESULTS + 'pickles/' + ap.VIDEO_NAME_1.split('.')[0] +
            '_pose3Ds.pickle', 'wb') as f:
        pickle.dump(pose_3d_s, f)
    with open(
            ap.RESULTS + 'pickles/' + ap.VIDEO_NAME_1.split('.')[0] +
            '_image_s.pickle', 'wb') as f:
        pickle.dump(image_s, f)
    #
    # optimized_pose = np.array(optimized_pose).transpose(2,0,1)

    # plotting the smoothed poses ...
    # plotting frame by frame
    #path_to_ref_pos = ['golf_03_best_pos.pickle', 'golf_04_best_pos.pickle']
    # stdsp.load_opt_stat(path_to_ref_pos)
    # stdsp.comparing_poses(optimized_pose)
    #indx = []
    # stdsp.draw_3D_smooth( optimized_pose, pose_2d_s, image_s )

    # close model
    pose_estimator.close()
示例#16
0
def main():
    for i in TEST_IMAGE_LIST:
        image = cv2.imread(i)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # conversion to rgb

        # create pose estimator
        image_size = image.shape
        pose_estimator = PoseEstimator(image_size, SESSION_PATH,
                                       PROB_MODEL_PATH)

        # load model
        pose_estimator.initialise()

        try:
            # estimation
            print('START')
            pose_2d, visibility, pose_3d = pose_estimator.estimate(image)
            print('=================================')
            print(str(i))
            print('pose 3d:', pose_3d)
            print('pose_2d:', pose_2d)
            angle_target_2d = pose_2d[0].copy()
            angle_target_3d = pose_3d[0].copy()

            #投影頭部的點到 x-z 平面
            angle_2d = calculate_angle_2d(
                [angle_target_3d[0][8], angle_target_3d[2][8]],
                [angle_target_3d[0][10], angle_target_3d[2][10]])
            print(angle_2d)

            #3D頭部
            standard_v = np.array([0, 0, -1])
            v_t = np.array([
                angle_target_3d[0][8] - angle_target_3d[0][10],
                angle_target_3d[1][8] - angle_target_3d[1][10],
                angle_target_3d[2][8] - angle_target_3d[2][10]
            ])
            print('====頭的角度===', vg.angle(standard_v, v_t))
            #3D肩膀
            vector = np.array([abs(angle_target_3d[0][6] - angle_target_3d[0][3]), \
            abs(angle_target_3d[1][6] - angle_target_3d[1][3]),\
            abs(angle_target_3d[2][6] - angle_target_3d[2][3])])\

            if vector[0] > vector[1] and vector[0] > vector[2]:
                standard_v = np.array([1, 0, 0])
            elif vector[1] > vector[0] and vector[1] > vector[2]:
                standard_v = np.array([0, -1, 0])
            else:
                print('判斷肩膀正面側面出現問題!預設正面')
                standard_v = np.array([1, 0, 0])
            v_t = np.array([
                angle_target_3d[0][11] - angle_target_3d[0][14],
                angle_target_3d[1][11] - angle_target_3d[1][14],
                angle_target_3d[2][11] - angle_target_3d[2][14]
            ])
            print('====肩膀的角度===', vg.angle(standard_v, v_t))
            #3D脊椎(脊椎中間連線頸椎中間與脊椎中間連線屁股中間)
            # standard_v = np.array([angle_target_3d[0][7] - angle_target_3d[0][8],
            #     angle_target_3d[1][7] - angle_target_3d[1][8],
            #     angle_target_3d[2][7] - angle_target_3d[2][8]])
            # v_t = np.array([angle_target_3d[0][7] - angle_target_3d[0][0],
            #     angle_target_3d[1][7] - angle_target_3d[1][0],
            #     angle_target_3d[2][7] - angle_target_3d[2][0]])
            # print('====脊椎的角度===', vg.angle(standard_v, v_t))

            #3D脊椎(雙腳中間連線脊椎終點與脊椎中間連線頸椎中點)
            # foot_center_x = (angle_target_3d[0][3] + angle_target_3d[0][6])/2
            # foot_center_y = (angle_target_3d[1][3] + angle_target_3d[1][6])/2
            # foot_center_z = (angle_target_3d[2][3] + angle_target_3d[2][6])/2
            # standard_v = np.array([foot_center_x - angle_target_3d[0][0],
            #     foot_center_y - angle_target_3d[1][0],
            #     foot_center_z - angle_target_3d[2][0]])
            # v_t = np.array([angle_target_3d[0][8] - angle_target_3d[0][0],
            #     angle_target_3d[1][8] - angle_target_3d[1][0],
            #     angle_target_3d[2][8] - angle_target_3d[2][0]])
            # print('====脊椎的角度===', vg.angle(standard_v, v_t))

            #馬術的3D脊椎(雙腳中間連線脊椎終點與脊椎中間連線頸椎中點)
            standard_v = np.array([0, 0, 1])
            v_t = np.array([
                angle_target_3d[0][8] - angle_target_3d[0][0],
                angle_target_3d[1][8] - angle_target_3d[1][0],
                angle_target_3d[2][8] - angle_target_3d[2][0]
            ])
            print('====脊椎的角度===', vg.angle(standard_v, v_t))

            #3D左膝蓋
            standard_v = np.array([
                angle_target_3d[0][5] - angle_target_3d[0][4],
                angle_target_3d[1][5] - angle_target_3d[1][4],
                angle_target_3d[2][5] - angle_target_3d[2][4]
            ])
            v_t = np.array([
                angle_target_3d[0][5] - angle_target_3d[0][6],
                angle_target_3d[1][5] - angle_target_3d[1][6],
                angle_target_3d[2][5] - angle_target_3d[2][6]
            ])
            print('====左膝蓋的角度===', vg.angle(standard_v, v_t))
            #3D右膝蓋
            standard_v = np.array([
                angle_target_3d[0][2] - angle_target_3d[0][1],
                angle_target_3d[1][2] - angle_target_3d[1][1],
                angle_target_3d[2][2] - angle_target_3d[2][1]
            ])
            v_t = np.array([
                angle_target_3d[0][2] - angle_target_3d[0][3],
                angle_target_3d[1][2] - angle_target_3d[1][3],
                angle_target_3d[2][2] - angle_target_3d[2][3]
            ])
            print('====右膝蓋的角度===', vg.angle(standard_v, v_t))

            #投影法

            # Show 2D and 3D poses

            display_results(image, pose_2d, visibility, pose_3d)
        except ValueError:
            print(
                'No visible people in the image. Change CENTER_TR in packages/lifting/utils/config.py ...'
            )

        # close model
        pose_estimator.close()