Ejemplo n.º 1
0
def smpl_from_skel(path, sub, out, skel3d, args):
    config = CONFIG[args.body]
    results3d, filenames = read_keypoints3d_all(skel3d)
    pids = list(results3d.keys())
    weight_shape = load_weight_shape(args.model, args.opts)
    weight_pose = load_weight_pose(args.model, args.opts)
    with Timer('Loading {}, {}'.format(args.model, args.gender)):
        body_model = load_model(args.gender, model_type=args.model)
    for pid, result in results3d.items():
        body_params = smpl_from_keypoints3d(body_model,
                                            result['keypoints3d'],
                                            config,
                                            args,
                                            weight_shape=weight_shape,
                                            weight_pose=weight_pose)
        result['body_params'] = body_params

    # write for each frame
    for nf, skelname in enumerate(tqdm(filenames, desc='writing')):
        basename = os.path.basename(skelname)
        outname = join(out, basename)
        res = []
        for pid, result in results3d.items():
            frames = result['frames']
            if nf in frames:
                nnf = frames.index(nf)
                val = {'id': pid}
                params = select_nf(result['body_params'], nnf)
                val.update(params)
                res.append(val)
        write_smpl(outname, res)
Ejemplo n.º 2
0
def calib_intri(path, step):
    camnames = sorted(os.listdir(join(path, 'images')))
    cameras = {}
    for ic, cam in enumerate(camnames):
        imagenames = sorted(glob(join(path, 'images', cam, '*.jpg')))
        chessnames = sorted(glob(join(path, 'chessboard', cam, '*.json')))
        k3ds, k2ds = [], []
        for chessname in chessnames[::step]:
            data = read_json(chessname)
            k3d = np.array(data['keypoints3d'], dtype=np.float32)
            k2d = np.array(data['keypoints2d'], dtype=np.float32)
            if k2d[:, -1].sum() < 0.01:
                continue
            k3ds.append(k3d)
            k2ds.append(np.ascontiguousarray(k2d[:, :-1]))
        gray = cv2.imread(imagenames[0], 0)
        print('>> Detect {}/{:3d} frames'.format(cam, len(k2ds)))
        with Timer('calibrate'):
            ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
                k3ds, k2ds, gray.shape[::-1], None, None)
            cameras[cam] = {
                'K': K,
                'dist': dist  # dist: (1, 5)
            }
    write_intri(join(path, 'output', 'intri.yml'), cameras)
Ejemplo n.º 3
0
def mvposev1(dataset, args, cfg):
    dataset.no_img = not (args.vis_det or args.vis_match or args.vis_repro
                          or args.ret_crop)
    start, end = args.start, min(args.end, len(dataset))
    affinity_model = ComposedAffinity(cameras=dataset.cameras,
                                      basenames=dataset.cams,
                                      cfg=cfg.affinity)
    group = PeopleGroup(Pall=dataset.Pall, cfg=cfg.group)

    if args.vis3d:
        from easymocap.socket.base_client import BaseSocketClient
        vis3d = BaseSocketClient(args.host, args.port)
    for nf in tqdm(range(start, end), desc='reconstruction'):
        group.clear()
        with Timer('load data', not args.time):
            images, annots = dataset[nf]
        if args.vis_det:
            dataset.vis_detections(images, annots, nf, sub_vis=args.sub_vis)
        # 计算不同视角的检测结果的affinity
        with Timer('compute affinity', not args.time):
            affinity, dimGroups = affinity_model(annots, images=images)
        with Timer('associate', not args.time):
            group = simple_associate(annots,
                                     affinity,
                                     dimGroups,
                                     dataset.Pall,
                                     group,
                                     cfg=cfg.associate)
            results = group
        if args.vis_match:
            dataset.vis_detections(images,
                                   annots,
                                   nf,
                                   mode='match',
                                   sub_vis=args.sub_vis)
        if args.vis_repro:
            dataset.vis_repro(images, results, nf, sub_vis=args.sub_vis)
        dataset.write_keypoints2d(annots, nf)
        dataset.write_keypoints3d(results, nf)
        if args.vis3d:
            vis3d.send(group.results)
    Timer.report()
Ejemplo n.º 4
0
def mv1pmf_smpl(dataset, args, weight_pose=None, weight_shape=None):
    dataset.skel_path = args.skel
    kp3ds = []
    start, end = args.start, min(args.end, len(dataset))
    keypoints2d, bboxes = [], []
    dataset.no_img = True
    for nf in tqdm(range(start, end), desc='loading'):
        images, annots = dataset[nf]
        keypoints2d.append(annots['keypoints'])
        bboxes.append(annots['bbox'])
    kp3ds = dataset.read_skeleton(start, end)
    keypoints2d = np.stack(keypoints2d)
    bboxes = np.stack(bboxes)
    kp3ds = check_keypoints(kp3ds, 1)
    # optimize the human shape
    with Timer('Loading {}, {}'.format(args.model, args.gender),
               not args.verbose):
        body_model = load_model(gender=args.gender, model_type=args.model)
    params = smpl_from_keypoints3d2d(body_model,
                                     kp3ds,
                                     keypoints2d,
                                     bboxes,
                                     dataset.Pall,
                                     config=dataset.config,
                                     args=args,
                                     weight_shape=weight_shape,
                                     weight_pose=weight_pose)
    # write out the results
    dataset.no_img = not (args.vis_smpl or args.vis_repro)
    for nf in tqdm(range(start, end), desc='render'):
        images, annots = dataset[nf]
        param = select_nf(params, nf - start)
        dataset.write_smpl(param, nf)
        if args.vis_smpl:
            vertices = body_model(return_verts=True,
                                  return_tensor=False,
                                  **param)
            dataset.vis_smpl(vertices=vertices[0],
                             faces=body_model.faces,
                             images=images,
                             nf=nf,
                             sub_vis=args.sub_vis,
                             add_back=True)
        if args.vis_repro:
            keypoints = body_model(return_verts=False,
                                   return_tensor=False,
                                   **param)[0]
            kpts_repro = projectN3(keypoints, dataset.Pall)
            dataset.vis_repro(images, kpts_repro, nf=nf, sub_vis=args.sub_vis)
Ejemplo n.º 5
0
def calib_intri_share(path, step):
    camnames = sorted(os.listdir(join(path, 'images')))
    imagenames = sorted(glob(join(path, 'images', '*', '*.jpg')))
    chessnames = sorted(glob(join(path, 'chessboard', '*', '*.json')))
    k3ds_, k2ds_, imgs = [], [], []
    valid_idx = []
    for i, chessname in enumerate(chessnames):
        flag, k2d, k3d = read_chess(chessname)
        k3ds_.append(k3d)
        k2ds_.append(k2d)
        if not flag:
            continue
        valid_idx.append(i)
    MAX_ERROR_PIXEL = 1.
    lines, line_cols = get_lines_chessboard()
    valid_idx = valid_idx[::step]
    len_valid = len(valid_idx)
    cameras = {}
    while True:
        # sample
        imgs = [imagenames[i] for i in valid_idx]
        k3ds = [k3ds_[i] for i in valid_idx]
        k2ds = [np.ascontiguousarray(k2ds_[i][:, :-1]) for i in valid_idx]
        gray = cv2.imread(imgs[0], 0)
        print('>> Detect {:3d} frames'.format(len(valid_idx)))
        with Timer('calibrate'):
            ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
                k3ds, k2ds, gray.shape[::-1], None, None)
        with Timer('check'):
            removed = []
            for i in range(len(imgs)):
                img = cv2.imread(imgs[i])
                points2d_repro, _ = cv2.projectPoints(k3ds[i], rvecs[i],
                                                      tvecs[i], K, dist)
                points2d_repro = points2d_repro.squeeze()
                points2d = k2ds_[valid_idx[i]]
                err = np.linalg.norm(points2d_repro - points2d[:, :2],
                                     axis=1).mean()
                plot_points2d(img,
                              points2d_repro,
                              lines,
                              col=(0, 0, 255),
                              lw=1,
                              putText=False)
                plot_points2d(img, points2d, lines, lw=1, putText=False)
                print(imgs[i], err)
                # cv2.imshow('vis', img)
                # cv2.waitKey(0)
                if err > MAX_ERROR_PIXEL:
                    removed.append(i)
            for i in removed[::-1]:
                valid_idx.pop(i)
        if len_valid == len(valid_idx) or not args.remove:
            print(K)
            print(dist)
            for cam in camnames:
                cameras[cam] = {
                    'K': K,
                    'dist': dist  # dist: (1, 5)
                }
            break
        len_valid = len(valid_idx)
    write_intri(join(path, 'output', 'intri.yml'), cameras)
Ejemplo n.º 6
0
    from easymocap.mytools import load_parser, parse_parser
    parser = load_parser()
    parser.add_argument('--skel', type=str, default=None, 
        help='path to keypoints3d')
    parser.add_argument('--direct', action='store_true')
    parser.add_argument('--video', action='store_true')
    parser.add_argument('--gtK', action='store_true')
    parser.add_argument('--normal', action='store_true',
        help='set to use the normal of the mirror')
    args = parse_parser(parser)
    
    helps = '''
  Demo code for single view and one person with mirror:

    - Input : {}: [{}]
    - Output: {}
    - Body  : {} => {}, {}
    '''.format(args.path, ', '.join(args.sub), args.out,
        args.model, args.gender, args.body)
    print(helps)
    with Timer('Loading {}, {}'.format(args.model, args.gender)):
        body_model = load_model(args.gender, model_type=args.model)
    with Timer('Loading SPIN'):
        spin_model = SPIN(
            SMPL_MEAN_PARAMS='data/models/smpl_mean_params.npz', 
            checkpoint='data/models/spin_checkpoint.pt', 
            device=body_model.device)
    if args.video:
        demo_1v1pmf_smpl_mirror(args.path, body_model, spin_model, args)
    else:
        demo_1v1p1f_smpl_mirror(args.path, body_model, spin_model, args)