コード例 #1
0
def smpl_from_skel(path, sub, out, skel3d, args):
    config = CONFIG[args.body]
    results3d, filenames = read_keypoints3d_all(skel3d)
    pids = list(results3d.keys())
    weight_shape = load_weight_shape(args.model, args.opts)
    weight_pose = load_weight_pose(args.model, args.opts)
    with Timer('Loading {}, {}'.format(args.model, args.gender)):
        body_model = load_model(args.gender, model_type=args.model)
    for pid, result in results3d.items():
        body_params = smpl_from_keypoints3d(body_model,
                                            result['keypoints3d'],
                                            config,
                                            args,
                                            weight_shape=weight_shape,
                                            weight_pose=weight_pose)
        result['body_params'] = body_params

    # write for each frame
    for nf, skelname in enumerate(tqdm(filenames, desc='writing')):
        basename = os.path.basename(skelname)
        outname = join(out, basename)
        res = []
        for pid, result in results3d.items():
            frames = result['frames']
            if nf in frames:
                nnf = frames.index(nf)
                val = {'id': pid}
                params = select_nf(result['body_params'], nnf)
                val.update(params)
                res.append(val)
        write_smpl(outname, res)
コード例 #2
0
ファイル: mv1p.py プロジェクト: sergeyprokudin/EasyMocap
def mv1pmf_smpl(dataset, args, weight_pose=None, weight_shape=None):
    dataset.skel_path = args.skel
    kp3ds = []
    start, end = args.start, min(args.end, len(dataset))
    keypoints2d, bboxes = [], []
    dataset.no_img = True
    for nf in tqdm(range(start, end), desc='loading'):
        images, annots = dataset[nf]
        keypoints2d.append(annots['keypoints'])
        bboxes.append(annots['bbox'])
    kp3ds = dataset.read_skeleton(start, end)
    keypoints2d = np.stack(keypoints2d)
    bboxes = np.stack(bboxes)
    kp3ds = check_keypoints(kp3ds, 1)
    # optimize the human shape
    with Timer('Loading {}, {}'.format(args.model, args.gender),
               not args.verbose):
        body_model = load_model(gender=args.gender, model_type=args.model)
    params = smpl_from_keypoints3d2d(body_model,
                                     kp3ds,
                                     keypoints2d,
                                     bboxes,
                                     dataset.Pall,
                                     config=dataset.config,
                                     args=args,
                                     weight_shape=weight_shape,
                                     weight_pose=weight_pose)
    # write out the results
    dataset.no_img = not (args.vis_smpl or args.vis_repro)
    for nf in tqdm(range(start, end), desc='render'):
        images, annots = dataset[nf]
        param = select_nf(params, nf - start)
        dataset.write_smpl(param, nf)
        if args.vis_smpl:
            vertices = body_model(return_verts=True,
                                  return_tensor=False,
                                  **param)
            dataset.vis_smpl(vertices=vertices[0],
                             faces=body_model.faces,
                             images=images,
                             nf=nf,
                             sub_vis=args.sub_vis,
                             add_back=True)
        if args.vis_repro:
            keypoints = body_model(return_verts=False,
                                   return_tensor=False,
                                   **param)[0]
            kpts_repro = projectN3(keypoints, dataset.Pall)
            dataset.vis_repro(images, kpts_repro, nf=nf, sub_vis=args.sub_vis)
コード例 #3
0
    from easymocap.mytools import load_parser, parse_parser
    parser = load_parser()
    parser.add_argument('--skel', type=str, default=None, 
        help='path to keypoints3d')
    parser.add_argument('--direct', action='store_true')
    parser.add_argument('--video', action='store_true')
    parser.add_argument('--gtK', action='store_true')
    parser.add_argument('--normal', action='store_true',
        help='set to use the normal of the mirror')
    args = parse_parser(parser)
    
    helps = '''
  Demo code for single view and one person with mirror:

    - Input : {}: [{}]
    - Output: {}
    - Body  : {} => {}, {}
    '''.format(args.path, ', '.join(args.sub), args.out,
        args.model, args.gender, args.body)
    print(helps)
    with Timer('Loading {}, {}'.format(args.model, args.gender)):
        body_model = load_model(args.gender, model_type=args.model)
    with Timer('Loading SPIN'):
        spin_model = SPIN(
            SMPL_MEAN_PARAMS='data/models/smpl_mean_params.npz', 
            checkpoint='data/models/spin_checkpoint.pt', 
            device=body_model.device)
    if args.video:
        demo_1v1pmf_smpl_mirror(args.path, body_model, spin_model, args)
    else:
        demo_1v1p1f_smpl_mirror(args.path, body_model, spin_model, args)