コード例 #1
0
ファイル: run.py プロジェクト: flybiubiu/bihand
def validate(val_loader, model, vis=False):
    # switch to evaluate mode
    evaluator = EvalUtil()
    drawer = HandDrawer(reslu=256)
    model.eval()
    if vis:
        drawer.daemon = True
        drawer.start()
    bar = Bar(colored("EVAL", color='yellow'), max=len(val_loader))
    with torch.no_grad():
        for i, metas in enumerate(val_loader):
            results, targets = one_forward_pass(metas, model)
            pred_jointRS = results['jointRS']  # B, 21, 3
            targ_joint = targets['joint']  # B, 21, 3
            joint_bone = targets['joint_bone'].unsqueeze(1)  # B, 21, 1
            joint_root = targets['joint_root'].unsqueeze(1)  # B, 21, 3
            pred_joint = pred_jointRS * joint_bone + joint_root  # B, 21, 3

            # quantitative
            for targj, predj in zip(targ_joint, pred_joint):
                evaluator.feed(targj * 1000.0, predj * 1000.0)

            pck20 = evaluator.get_pck_all(20)
            pck30 = evaluator.get_pck_all(30)
            pck40 = evaluator.get_pck_all(40)
            bar.suffix = ('({batch}/{size}) '
                          'pck20avg: {pck20:.3f} | '
                          'pck30avg: {pck30:.3f} | '
                          'pck40avg: {pck40:.3f} | ').format(
                              batch=i + 1,
                              size=len(val_loader),
                              pck20=pck20,
                              pck30=pck30,
                              pck40=pck40,
                          )
            bar.next()

            ## visualize
            if vis:  # little bit time comsuming
                clr = targets['clr'].detach().cpu()
                uvd = handutils.xyz2uvd(pred_joint,
                                        targets['joint_root'],
                                        targets['joint_bone'],
                                        intr=targets['intr'],
                                        mode='persp').detach().cpu()
                uv = uvd[:, :, :2] * clr.shape[-1]

                vertsRS = results['vertsRS'].detach().cpu()
                mean_bone_len = torch.Tensor([0.1])  # 0.1 m
                fixed_root = torch.Tensor([0.0, 0.0, 0.5])  # 0.5 m
                verts = vertsRS * mean_bone_len + fixed_root
                drawer.feed(clr, verts, uv)

        bar.finish()
        drawer.set_stop()
        (_1, _2, _3, auc_all, pck_curve_all,
         thresholds) = evaluator.get_measures(20, 50, 20)
        print("AUC all: {}".format(auc_all))

    return auc_all
コード例 #2
0
def validate(val_loader, model, criterion, args, stop=-1):
    # switch to evaluate mode
    am_accH = AverageMeter()

    model.eval()
    bar = Bar('\033[33m Eval  \033[0m', max=len(val_loader))
    with torch.no_grad():
        for i, metas in enumerate(val_loader):
            results, targets, _1, _2 = one_forward_pass(
                metas, model, criterion, args=None, train=False
            )
            avg_acc_hm, _ = evalutils.accuracy_heatmap(
                results['l_hm'][-1],
                targets['hm'],
                targets['hm_veil']
            )
            bar.suffix = (
                '({batch}/{size}) '
                'accH: {accH:.4f} | '
            ).format(
                batch=i + 1,
                size=len(val_loader),
                accH=avg_acc_hm,
            )
            am_accH.update(avg_acc_hm, targets['batch_size'])
            bar.next()
            if stop != -1 and i >= stop:
                break
        bar.finish()
        print("accH: {}".format(am_accH.avg))
    return am_accH.avg
コード例 #3
0
def validate(val_loader, model, criterion, args, stop=-1):
    am_quat_norm = AverageMeter()
    am_quat_l2 = AverageMeter()
    am_quat_cos = AverageMeter()
    evaluator = EvalUtil()
    model.eval()
    total_quat = []
    total_beta = []
    bar = Bar('\033[33m Eval  \033[0m', max=len(val_loader))
    with torch.no_grad():
        for i, metas in enumerate(val_loader):
            results, targets, total_loss, losses = one_forward_pass(metas,
                                                                    model,
                                                                    criterion,
                                                                    args,
                                                                    train=True)
            am_quat_norm.update(losses['quat_norm'].item(),
                                targets['batch_size'])
            am_quat_l2.update(losses['quat_l2'].item(), targets['batch_size'])
            am_quat_cos.update(losses['quat_cos'].item(),
                               targets['batch_size'])
            predjointRS = results['jointRS']
            joint_bone = targets['joint_bone'].unsqueeze(1)
            predjointR = predjointRS * joint_bone

            targjointRS = targets['jointRS']
            targjointR = targjointRS * joint_bone

            predjointR = predjointR.detach().cpu()
            targjointR = targjointR.detach().cpu()
            for targj, predj in zip(targjointR, predjointR):
                evaluator.feed(targj * 1000.0, predj * 1000.0)

            pck20 = evaluator.get_pck_all(20)
            pck30 = evaluator.get_pck_all(30)
            pck40 = evaluator.get_pck_all(40)

            bar.suffix = ('({batch}/{size}) '
                          'lN: {lossN:.5f} | '
                          'lL2: {lossL2:.5f} | '
                          'lC: {lossC:.3f} |'
                          'pck20avg: {pck20:.3f} | '
                          'pck30avg: {pck30:.3f} | '
                          'pck40avg: {pck40:.3f} | ').format(
                              batch=i + 1,
                              size=len(val_loader),
                              pck20=pck20,
                              pck30=pck30,
                              pck40=pck40,
                              lossN=am_quat_norm.avg,
                              lossL2=am_quat_l2.avg,
                              lossC=am_quat_cos.avg,
                          )

            bar.next()
        bar.finish()

    return 0
コード例 #4
0
def train(train_loader, model, criterion, optimizer, args):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    am_loss_hm = AverageMeter()
    am_loss_mask = AverageMeter()
    am_loss_all = AverageMeter()

    last = time.time()
    # switch to trian
    model.train()
    bar = Bar('\033[31m Train \033[0m', max=len(train_loader))
    for i, metas in enumerate(train_loader):
        data_time.update(time.time() - last)
        results, targets, total_loss, losses = one_forward_pass(
            metas, model, criterion, args, train=True
        )
        am_loss_hm.update(
            losses['ups_hm'].item(), targets['batch_size']
        )
        am_loss_mask.update(
            losses['ups_mask'].item(), targets['batch_size']
        )
        am_loss_all.update(
            total_loss.item(), targets['batch_size']
        )

        ''' backward and step '''
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        ''' progress '''
        batch_time.update(time.time() - last)
        last = time.time()
        bar.suffix = (
            '({batch}/{size}) '
            'd: {data:.2f}s | '
            'b: {bt:.2f}s | '
            't: {total:}s | '
            'eta:{eta:}s | '
            'lH: {lossH:.5f} | '
            'lM: {lossM:.5f} | '
            'lA: {lossA:.3f} |'
        ).format(
            batch=i + 1,
            size=len(train_loader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            lossH=am_loss_hm.avg,
            lossM=am_loss_mask.avg,
            lossA=am_loss_all.avg,
        )
        bar.next()
    bar.finish()
コード例 #5
0
def train(train_loader, model, criterion, optimizer, args):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    am_quat_norm = AverageMeter()
    am_quat_l2 = AverageMeter()
    am_quat_cos = AverageMeter()
    am_joint = AverageMeter()
    am_kin_len = AverageMeter()

    last = time.time()
    # switch to trian
    model.train()
    bar = Bar('\033[31m Train \033[0m', max=len(train_loader))
    for i, metas in enumerate(train_loader):
        data_time.update(time.time() - last)
        results, targets, total_loss, losses = one_forward_pass(metas,
                                                                model,
                                                                criterion,
                                                                args,
                                                                train=True)
        am_quat_norm.update(losses['quat_norm'].item(), targets['batch_size'])
        am_quat_l2.update(losses['quat_l2'].item(), targets['batch_size'])
        am_quat_cos.update(losses['quat_cos'].item(), targets['batch_size'])
        ''' backward and step '''
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
        ''' progress '''
        batch_time.update(time.time() - last)
        last = time.time()
        bar.suffix = (
            '({batch}/{size}) '
            'd: {data:.2f}s | '
            'b: {bt:.2f}s | '
            't: {total:}s | '
            'eta:{eta:}s | '
            # 'lJ: {lossJ:.5f} | '
            # 'lK: {lossK:.5f} | '
            'lN: {lossN:.5f} | '
            'lL2: {lossL2:.5f} | '
            'lC: {lossC:.3f} |').format(
                batch=i + 1,
                size=len(train_loader),
                data=data_time.avg,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                lossJ=am_joint.avg,
                lossK=am_kin_len.avg,
                lossN=am_quat_norm.avg,
                lossL2=am_quat_l2.avg,
                lossC=am_quat_cos.avg,
            )
        bar.next()
    bar.finish()
コード例 #6
0
ファイル: train_siknet.py プロジェクト: lixiny/bihand
def validate(val_loader, model, criterion, args, stop=-1):
    am_quat_norm = AverageMeter()
    evaluator = EvalUtil()
    model.eval()
    bar = Bar(colored('Eval', 'yellow'), max=len(val_loader))

    with torch.no_grad():
        for i, metas in enumerate(val_loader):
            results, targets, total_loss, losses = one_fowrard_pass(
                metas, model, criterion, args, train=True
            )
            am_quat_norm.update(
                losses['quat_norm'].item(), targets['batch_size']
            )

            joint_bone = targets['joint_bone'].unsqueeze(1)

            predjointR = results['jointRS'] * joint_bone
            targjointR = targets['jointRS'] * joint_bone

            for targj, predj in zip(targjointR, predjointR):
                evaluator.feed(targj * 1000.0, predj * 1000.0)

            pck20 = evaluator.get_pck_all(20)
            pck30 = evaluator.get_pck_all(30)
            pck40 = evaluator.get_pck_all(40)

            bar.suffix = (
                '({batch}/{size}) '
                'pck20avg: {pck20:.3f} | '
                'pck30avg: {pck30:.3f} | '
                'pck40avg: {pck40:.3f} | '
            ).format(
                batch=i + 1,
                size=len(val_loader),
                pck20=pck20,
                pck30=pck30,
                pck40=pck40,
            )

            bar.next()
        bar.finish()
        (
            _1, _2, _3,
            auc_all,
            pck_curve_all,
            thresholds
        ) = evaluator.get_measures(
            20, 50, 20
        )
        print(pck_curve_all)
        print("AUC all: {}".format(auc_all))

    return auc_all
コード例 #7
0
def train(train_loader, model, criterion, optimizer,
          lr_init=None, lr_now=None, glob_step=None, lr_decay=None, gamma=None,
          max_norm=True):
    losses = utils.AverageMeter()

    model.train()

    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(train_loader))

    for i, (inps, tars) in enumerate(train_loader):
        glob_step += 1
        if glob_step % lr_decay == 0 or glob_step == 1:
            lr_now = utils.lr_decay(optimizer, glob_step, lr_init, lr_decay, gamma)
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(async=True))

        outputs = model(inputs)

        optimizer.zero_grad()
        loss = criterion(outputs, targets)
        losses.update(loss.item(), inputs.size(0))
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.4f}' \
            .format(batch=i + 1,
                    size=len(train_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()

    bar.finish()
    return glob_step, lr_now, losses.avg
コード例 #8
0
def validate(val_loader, model, criterion, args, stop=-1):
    # switch to evaluate mode
    evaluator = EvalUtil()
    model.eval()
    bar = Bar(colored('Eval', 'yellow'), max=len(val_loader))
    with torch.no_grad():
        for i, metas in enumerate(val_loader):
            results, targets, _1, _2 = one_forward_pass(metas,
                                                        model,
                                                        criterion,
                                                        args=None,
                                                        train=False)
            pred_joint = results['l_joint'][-1].detach().cpu()
            targ_joint = targets['joint'].detach().cpu()

            for targj, predj in zip(targ_joint, pred_joint):
                evaluator.feed(targj * 1000.0, predj * 1000.0)

            pck20 = evaluator.get_pck_all(20)
            pck30 = evaluator.get_pck_all(30)
            pck40 = evaluator.get_pck_all(40)
            bar.suffix = ('({batch}/{size}) '
                          'pck20avg: {pck20:.3f} | '
                          'pck30avg: {pck30:.3f} | '
                          'pck40avg: {pck40:.3f} | ').format(
                              batch=i + 1,
                              size=len(val_loader),
                              pck20=pck20,
                              pck30=pck30,
                              pck40=pck40,
                          )
            bar.next()
            if stop != -1 and i >= stop:
                break
        bar.finish()
        (_1, _2, _3, auc_all, pck_curve_all,
         thresholds) = evaluator.get_measures(20, 50, 20)
        print("AUC all: {}".format(auc_all))

    return auc_all
コード例 #9
0
def test(test_loader, model, criterion, stat_3d, procrustes=True):
    losses = utils.AverageMeter()

    model.eval()

    all_dist = []
    start = time.time()
    batch_time = 0
    bar = Bar('>>>', fill='>', max=len(test_loader))

    for i, (inps, tars) in enumerate(test_loader):
        inputs = Variable(inps.cuda())
        targets = Variable(tars.cuda(async=True))
        outputs = model(inputs)

        # calculate loss
        outputs_coord = outputs
        loss = criterion(outputs_coord, targets)

        losses.update(loss.item(), inputs.size(0))

        tars = targets

        # calculate
        targets_unnorm = data_process.unNormalizeData(tars.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'], stat_3d['dim_use'])
        outputs_unnorm = data_process.unNormalizeData(outputs.data.cpu().numpy(), stat_3d['mean'], stat_3d['std'], stat_3d['dim_use'])

        # remove dim ignored
        dim_use = np.hstack((np.arange(3), stat_3d['dim_use']))

        outputs_use = outputs_unnorm[:, dim_use]
        targets_use = targets_unnorm[:, dim_use]

        if procrustes:
            for ba in range(inps.size(0)):
                gt = targets_use[ba].reshape(-1, 3)
                out = outputs_use[ba].reshape(-1, 3)
                _, Z, T, b, c = get_transformation(gt, out, True)
                out = (b * out.dot(T)) + c
                outputs_use[ba, :] = out.reshape(1, 51)

        sqerr = (outputs_use - targets_use) ** 2

        distance = np.zeros((sqerr.shape[0], 17))
        dist_idx = 0
        for k in np.arange(0, 17 * 3, 3):
            distance[:, dist_idx] = np.sqrt(np.sum(sqerr[:, k:k + 3], axis=1))
            dist_idx += 1
        all_dist.append(distance)

        # update summary
        if (i + 1) % 100 == 0:
            batch_time = time.time() - start
            start = time.time()

        bar.suffix = '({batch}/{size}) | batch: {batchtime:.4}ms | Total: {ttl} | ETA: {eta:} | loss: {loss:.6f}' \
            .format(batch=i + 1,
                    size=len(test_loader),
                    batchtime=batch_time * 10.0,
                    ttl=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg)
        bar.next()

    all_dist = np.vstack(all_dist)
    joint_err = np.mean(all_dist, axis=0)
    ttl_err = np.mean(all_dist)
    bar.finish()
    print (">>> error: {} <<<".format(ttl_err))
    return losses.avg, ttl_err
コード例 #10
0
ファイル: gen_sikdata_offline.py プロジェクト: lixiny/bihand
def validate(val_loader, model, criterion, args, stop=-1):
    # switch to evaluate mode
    evaluator = EvalUtil()
    model.eval()
    bar = Bar(colored('Eval', 'yellow'), max=len(val_loader))
    jointImpl_ = []
    jointGt_ = []
    with torch.no_grad():
        for i, metas in enumerate(val_loader):
            results, targets, _1, _2 = one_forward_pass(
                metas, model, criterion, args=None, train=False
            )
            joint_root = targets['joint_root'].unsqueeze(1)  # (B, 1, 3)
            predjoint = results['l_joint'][-1]  # (B 21 3)
            predjointR = predjoint - joint_root
            predjointR = np.array(predjointR.detach().cpu())

            jointImpl_.append(predjointR)

            targjointR = np.array(targets['jointR'].detach().cpu())
            jointGt_.append(targjointR)

            for targj, predj in zip(targjointR, predjointR):
                evaluator.feed(targj * 1000.0, predj * 1000.0)

            pck20 = evaluator.get_pck_all(20)
            pck30 = evaluator.get_pck_all(30)
            pck40 = evaluator.get_pck_all(40)
            bar.suffix = (
                '({batch}/{size}) '
                't: {total:}s | '
                'eta:{eta:}s | '
                'pck20avg: {pck20:.3f} | '
                'pck30avg: {pck30:.3f} | '
                'pck40avg: {pck40:.3f} | '
            ).format(
                batch=i + 1,
                size=len(val_loader),
                total=bar.elapsed_td,
                eta=bar.eta_td,
                pck20=pck20,
                pck30=pck30,
                pck40=pck40,
            )
            bar.next()
    bar.finish()
    (
        _1, _2, _3,
        auc_all,
        pck_curve_all,
        thresholds
    ) = evaluator.get_measures(
        20, 50, 20
    )
    print("AUC all: {}".format(auc_all))
    jointGt_ = np.concatenate(jointGt_, axis=0)
    jointImpl_ = np.concatenate(jointImpl_, axis=0)
    saving = {
        'jointGt_': jointGt_,
        'jointImpl_': jointImpl_
    }
    return saving
コード例 #11
0
ファイル: rhd.py プロジェクト: flybiubiu/bihand
    def __init__(
        self,
        data_root="/disk1/data/RHD/RHD_published_v2",
        data_split='train',
        hand_side='right',
        njoints=21,
        use_cache=True,
    ):

        if not os.path.exists(data_root):
            raise ValueError("data_root: %s not exist" % data_root)
        self.name = 'rhd'
        self.data_split = data_split
        self.hand_side = hand_side
        self.clr_paths = []
        self.dep_paths = []
        self.mask_paths = []
        self.joints = []
        self.kp2ds = []
        self.centers = []
        self.scales = []
        self.sides = []
        self.intrs = []
        self.njoints = njoints  # total 21 hand parts
        self.reslu = [320, 320]

        self.root_id = snap_joint_name2id['loc_bn_palm_L']  # 0
        self.mid_mcp_id = snap_joint_name2id['loc_bn_mid_L_01']  # 9

        # [train|test|val|train_val|all]
        if data_split == 'train':
            self.sequence = [
                'training',
            ]
        elif data_split == 'test':
            self.sequence = [
                'evaluation',
            ]
        elif data_split == 'val':
            self.sequence = [
                'evaluation',
            ]
        elif data_split == 'train_val':
            self.sequence = [
                'training',
            ]
        elif data_split == 'all':
            self.sequence = ['training', 'evaluation']
        else:
            raise ValueError(
                "split {} not in [train|test|val|train_val|all]".format(
                    data_split))

        self.cache_folder = os.path.join(CACHE_HOME, "bihand-train", "rhd")
        os.makedirs(self.cache_folder, exist_ok=True)
        cache_path = os.path.join(self.cache_folder,
                                  "{}.pkl".format(self.data_split))
        if os.path.exists(cache_path) and use_cache:
            with open(cache_path, "rb") as fid:
                annotations = pickle.load(fid)
                self.sides = annotations["sides"]
                self.clr_paths = annotations["clr_paths"]
                self.dep_paths = annotations["dep_paths"]
                self.mask_paths = annotations["mask_paths"]
                self.joints = annotations["joints"]
                self.kp2ds = annotations["kp2ds"]
                self.intrs = annotations["intrs"]
                self.centers = annotations["centers"]
                self.scales = annotations["scales"]
            print("rhd {} gt loaded from {}".format(self.data_split,
                                                    cache_path))
            return

        datapath_list = [os.path.join(data_root, seq) for seq in self.sequence]
        annoname_list = ["anno_{}.pickle".format(seq) for seq in self.sequence]
        anno_list = [
            os.path.join(datapath, annoname) \
            for datapath, annoname in zip(datapath_list, annoname_list)
        ]
        clr_root_list = [
            os.path.join(datapath, "color") for datapath in datapath_list
        ]
        dep_root_list = [
            os.path.join(datapath, "depth") for datapath in datapath_list
        ]
        mask_root_list = [
            os.path.join(datapath, "mask") for datapath in datapath_list
        ]

        print("init RHD {}, It will take a while at first time".format(
            data_split))
        for anno, clr_root, dep_root, mask_root \
                in zip(
            anno_list,
            clr_root_list,
            dep_root_list,
            mask_root_list
        ):

            with open(anno, 'rb') as fi:
                rawdatas = pickle.load(fi)
                fi.close()

            bar = Bar('RHD', max=len(rawdatas))
            for i in range(len(rawdatas)):
                raw = rawdatas[i]
                rawkp2d = raw['uv_vis'][:, :2]  # kp 2d left & right hand
                rawvis = raw['uv_vis'][:, 2]
                rawjoint = raw[
                    'xyz']  # x, y, z coordinates of the keypoints, in meters
                rawintr = raw['K']
                ''' "both" means left, right'''
                kp2dboth = [
                    rawkp2d[:21][rhd_to_snap_id, :],
                    rawkp2d[21:][rhd_to_snap_id, :]
                ]
                visboth = [
                    rawvis[:21][rhd_to_snap_id], rawvis[21:][rhd_to_snap_id]
                ]
                jointboth = [
                    rawjoint[:21][rhd_to_snap_id, :],
                    rawjoint[21:][rhd_to_snap_id, :]
                ]
                intrboth = [rawintr, rawintr]
                sideboth = ['l', 'r']

                maskpth = os.path.join(mask_root, '%.5d.png' % i)
                mask = Image.open(maskpth).convert("RGB")
                mask = np.array(mask)[:, :, 2:]
                id_left = [i for i in range(2, 18)]
                id_right = [i for i in range(18, 34)]
                np.putmask(
                    mask,
                    np.logical_and(mask >= id_left[0], mask <= id_left[-1]),
                    128)
                np.putmask(
                    mask,
                    np.logical_and(mask >= id_right[0], mask <= id_right[-1]),
                    255)
                area_left = np.sum(mask == 128)
                area_right = np.sum(mask == 255)
                vis_side = 'l' if area_left > area_right else 'r'

                for kp2d, vis, joint, side, intr \
                        in zip(kp2dboth, visboth, jointboth, sideboth, intrboth):
                    vis_sum = vis.sum()
                    if side != vis_side:
                        continue
                    clrpth = os.path.join(clr_root, '%.5d.png' % i)
                    deppth = os.path.join(dep_root, '%.5d.png' % i)
                    maskpth = os.path.join(mask_root, '%.5d.png' % i)
                    name = '%.5d' % i + side
                    self.clr_paths.append(clrpth)
                    self.dep_paths.append(deppth)
                    self.mask_paths.append(maskpth)
                    self.sides.append(side)

                    joint = joint[np.newaxis, :, :]
                    self.joints.append(joint)
                    center = handutils.get_annot_center(kp2d)
                    scale = handutils.get_annot_scale(kp2d)
                    kp2d = kp2d[np.newaxis, :, :]
                    self.kp2ds.append(kp2d)
                    center = center[np.newaxis, :]
                    self.centers.append(center)
                    scale = (np.atleast_1d(scale))[np.newaxis, :]
                    self.scales.append(scale)
                    intr = intr[np.newaxis, :]
                    self.intrs.append(intr)

                bar.suffix = ('({n}/{all}), total:{t:}s, eta:{eta:}s').format(
                    n=i + 1,
                    all=len(rawdatas),
                    t=bar.elapsed_td,
                    eta=bar.eta_td)
                bar.next()
            bar.finish()
        self.joints = np.concatenate(self.joints, axis=0).astype(
            np.float32)  # (59629, 21, 3)
        self.kp2ds = np.concatenate(self.kp2ds, axis=0).astype(
            np.float32)  # (59629, 21, 2)
        self.centers = np.concatenate(self.centers, axis=0).astype(
            np.float32)  # (59629, 21, 2)
        self.scales = np.concatenate(self.scales, axis=0).astype(
            np.float32)  # (59629, 21, 1)
        self.intrs = np.concatenate(self.intrs,
                                    axis=0).astype(np.float32)  # (59629, 4)

        if use_cache:
            full_info = {
                "sides": self.sides,
                "clr_paths": self.clr_paths,
                "dep_paths": self.dep_paths,
                "mask_paths": self.mask_paths,
                "joints": self.joints,
                "kp2ds": self.kp2ds,
                "intrs": self.intrs,
                "centers": self.centers,
                "scales": self.scales,
            }
            with open(cache_path, "wb") as fid:
                pickle.dump(full_info, fid)
                print("Wrote cache for dataset rhd {} to {}".format(
                    self.data_split, cache_path))
        return