예제 #1
0
def predict_on_tracks(model, img_dir, poseflow_path, output_path, track_id,
                      trim_length):
    # Get all the images
    im_paths = sorted(glob(osp.join(img_dir, '*.png')))
    all_kps = get_labels_poseflow(poseflow_path, len(im_paths))

    # Here we set which track to use.
    track_id = min(track_id, len(all_kps) - 1)
    print('Total number of PoseFlow tracks:', len(all_kps))
    print('Processing track_id:', track_id)
    kps = all_kps[track_id]

    bbox_params_smooth, s, e = get_smooth_bbox_params(kps, vis_thresh=0.1)

    images = []
    images_orig = []
    min_f = max(s, 0)
    max_f = min(e, len(kps))

    print('----------')
    print('Preprocessing frames.')
    print('----------')

    for i in range(min_f, max_f):
        proc_params = process_image(
            im_path=im_paths[i],
            bbox_param=bbox_params_smooth[i],
        )
        images.append(proc_params.pop('image'))
        images_orig.append(proc_params)

    if track_id > 0:
        output_path += '_{}'.format(track_id)

    mkdir(output_path)
    pred_path = osp.join(output_path, 'hmmr_output.pkl')
    if osp.exists(pred_path):
        print('----------')
        print('Loading pre-computed prediction.')
        print('----------')

        with open(pred_path, 'rb') as f:
            preds = pickle.load(f)
    else:
        print('----------')
        print('Running prediction.')
        print('----------')

        preds = model.predict_all_images(images)

        with open(pred_path, 'wb') as f:
            print('Saving prediction results to', pred_path)
            pickle.dump(preds, f)
예제 #2
0
파일: demo.py 프로젝트: timerobin/phd
def main(model):
    # Keypoints are only used to compute the bounding box around human tracks.
    # They are not fed into the model. Keypoint format is [x, y, vis]. Keypoint
    # order doesn't matter.
    if config.dataset == '':
        im_paths, kps = load_poseflow_video(config.vid_path, config.out_dir)
        vis_thresh = 0.1
    elif config.dataset == 'penn_action':
        im_paths, kps = load_penn_video(config.penn_dir, config.vid_id)
        vis_thresh = 0.5
    else:
        raise Exception('Dataset {} not recognized'.format(config.dataset))
    bbox_params_smooth, s, e = get_smooth_bbox_params(kps, vis_thresh)
    images = []
    min_f = max(s, 0)
    max_f = min(e, len(kps))
    for i in range(min_f, max_f):
        images.append(
            process_image(im_path=im_paths[i],
                          bbox_param=bbox_params_smooth[i]))
    all_images, vid_paths = process_videos(
        config=config,
        images=images,
        T=(NUM_CONDITION + config.ar_length),
        suffix='AR{}'.format(config.ar_length),
    )
    if not osp.exists(config.out_dir):
        os.mkdir(config.out_dir)
    renderer = VisRenderer(img_size=224)
    for i in range(0, len(all_images), config.batch_size):
        run_predictions(
            config=config,
            renderer=renderer,
            model=model,
            images=all_images[i:i + config.batch_size],
            vid_paths=vid_paths[i:i + config.batch_size],
            num_condition=NUM_CONDITION,
        )
예제 #3
0
def predict_on_tracks(model, img_dir, poseflow_path, output_path, track_id,
                      trim_length):
    # Get all the images
    im_paths = sorted(glob(osp.join(img_dir, '*.png')))
    all_kps = get_labels_poseflow(poseflow_path, len(im_paths))

    # Here we set which track to use.
    track_id = min(track_id, len(all_kps) - 1)
    print('Total number of PoseFlow tracks:', len(all_kps))
    print('Processing track_id:', track_id)
    kps = all_kps[track_id]

    bbox_params_smooth, s, e = get_smooth_bbox_params(kps, vis_thresh=0.1)

    images = []
    images_orig = []
    min_f = max(s, 0)
    max_f = min(e, len(kps))

    print('----------')
    print('Preprocessing frames.')
    print('----------')

    for i in range(min_f, max_f):
        proc_params = process_image(
            im_path=im_paths[i],
            bbox_param=bbox_params_smooth[i],
        )
        images.append(proc_params.pop('image'))
        images_orig.append(proc_params)

    if track_id > 0:
        output_path += '_{}'.format(track_id)

    mkdir(output_path)
    pred_path = osp.join(output_path, 'hmmr_output.pkl')
    if osp.exists(pred_path):
        print('----------')
        print('Loading pre-computed prediction.')
        print('----------')

        with open(pred_path, 'rb') as f:
            preds = pickle.load(f)
    else:
        print('----------')
        print('Running prediction.')
        print('----------')

        preds = model.predict_all_images(images)

        with open(pred_path, 'wb') as f:
            print('Saving prediction results to', pred_path)
            pickle.dump(preds, f)

    human_body_info = {}
    human_body_info['anim_len'] = len(preds['cams'])
    human_body_info['cam_array'] = preds['cams']
    human_body_info['smpl_array'] = np.arange(len(preds['cams']) * 72,
                                              dtype='float32').reshape(
                                                  (len(preds['cams']), 24, 3))
    cams = [0, 0, 0]
    for i in range(len(preds['cams'])):
        temp = rot_mat_to_axis_angle(preds['poses'][i])
        temp = np.reshape(temp, (24, 3))
        human_body_info['smpl_array'][i] = temp

        cams = cams + preds['cams'][i]
    cams = cams / len(preds['cams'])
    human_body_info['cam_array'][:] = cams
    with open('../demo/transfer_data/human_body_info.pkl', 'wb') as f:
        print('Saving prediction results to',
              '../demo/transfer_data/human_body_info.pkl')
        pickle.dump(human_body_info, f)
예제 #4
0
def predict_on_tracks(model, img_dir, poseflow_path, output_path, track_id,
                      trim_length):
    # Get all the images
    im_paths = sorted(glob(osp.join(img_dir, '*.png')))
    all_kps = get_labels_poseflow(poseflow_path, len(im_paths))

    # Here we set which track to use.
    track_id = min(track_id, len(all_kps) - 1)
    print('Total number of PoseFlow tracks:', len(all_kps))
    print('Processing track_id:', track_id)
    kps = all_kps[track_id]

    bbox_params_smooth, s, e = get_smooth_bbox_params(kps, vis_thresh=0.1)

    images = []
    images_orig = []
    min_f = max(s, 0)
    max_f = min(e, len(kps))

    print('----------')
    print('Preprocessing frames.')
    print('----------')

    for i in range(min_f, max_f):
        proc_params = process_image(
            im_path=im_paths[i],
            bbox_param=bbox_params_smooth[i],
        )
        images.append(proc_params.pop('image'))
        images_orig.append(proc_params)

    if track_id > 0:
        output_path += '_{}'.format(track_id)

    mkdir(output_path)

    # make dir to save joint rotation mat in json
    import os
    without = output_path.split(os.sep)[:-1]  # without hmmr_output
    if without.__len__() > 1:
        print('length>1')
        sys.exit(1)
    else:
        without = without[0]
    myjson_dir = osp.join(without, 'rot_output')
    myrot_path = osp.join(myjson_dir, 'rot_output.json')
    mykps_path = osp.join(myjson_dir, 'kps_output.json')
    mkdir(myjson_dir)

    # george's revision

    pred_path = osp.join(output_path, 'hmmr_output.pkl')

    if osp.exists(pred_path):
        print('----------')
        print('Loading pre-computed prediction.')
        print('----------')

        with open(pred_path, 'rb') as f:
            preds = pickle.load(f)
    else:
        print('----------')
        print('Running prediction.')
        print('----------')

        preds = model.predict_all_images(images)

        with open(pred_path, 'wb') as f:
            print('Saving prediction results to', pred_path)
            pickle.dump(preds, f)
    # get the kps
    mykps = preds['kps']
    totalkpsdict = {}
    totalkpsdict['frame_Count'] = mykps.shape[0]
    for i in range(0, mykps.shape[0]):
        frame_index = "frame_" + "%04d" % i
        framedict = {}
        for j in range(0, mykps.shape[1]):
            _kps = mykps[i][j]
            kpslist = [float(j) for j in _kps]
            kps_index = 'kps_' + "%02d" % j
            framedict[kps_index] = kpslist
        totalkpsdict[frame_index] = framedict
    print('Saving kps results to', mykps_path)
    with open(mykps_path, 'w') as jf:
        json.dump(totalkpsdict, jf, sort_keys=True)
    # get the poses
    myposes = preds['poses']
    totaldict = {}
    totaldict['frame_Count'] = myposes.shape[0]
    print("There are totally {} frames ".format(myposes.shape[0]))
    print('----------')
    for i in range(0, myposes.shape[0]):
        frame_index = "frame_" + "%04d" % i
        framedict = {}
        print('processing frame : {}'.format(frame_index))
        for j in range(0, myposes.shape[1]):
            rotmat = myposes[i][j]
            rotlist = list(np.reshape(rotmat, (1, -1))[0])
            rotlist = [float(j) for j in rotlist]
            rot_index = 'rot_' + "%02d" % j
            framedict[rot_index] = rotlist
        totaldict[frame_index] = framedict
        print('----------')
    print('Saving rot results to', myrot_path)

    with open(myrot_path, 'w') as jf:
        json.dump(totaldict, jf, sort_keys=True)
    # george's revision

    if trim_length > 0:
        output_path += '_trim'
    print('----------')
    print('Rendering results to {}.'.format(output_path))
    print('----------')
    #preds is short for predict next is to dig out how to render smpl model
    render_preds(
        output_path=output_path,
        config=config,
        preds=preds,
        images=images,
        images_orig=images_orig,
        trim_length=trim_length,
    )