Ejemplo n.º 1
0
    def validation_step(self, batch, batch_idx):

        drec = self(batch['pose_body'].view(-1, 63))

        loss = self._compute_loss(batch, drec)
        val_loss = loss['unweighted_loss']['loss_total']

        if self.renderer is not None and self.global_rank == 0 and batch_idx % 500 == 0 and np.random.rand(
        ) > 0.5:
            out_fname = makepath(
                self.work_dir,
                'renders/vald_rec_E{:03d}_It{:04d}_val_loss_{:.2f}.png'.format(
                    self.current_epoch, batch_idx, val_loss.item()),
                isfile=True)
            self.renderer([batch, drec], out_fname=out_fname)
            dgen = self.vp_model.sample_poses(
                self.vp_ps.logging.num_bodies_to_display)
            out_fname = makepath(self.work_dir,
                                 'renders/vald_gen_E{:03d}_I{:04d}.png'.format(
                                     self.current_epoch, batch_idx),
                                 isfile=True)
            self.renderer([dgen], out_fname=out_fname)

        progress_bar = {'v2v': val_loss}
        return {
            'val_loss': c2c(val_loss),
            'progress_bar': progress_bar,
            'log': progress_bar
        }
def smpl_params2ply(bm, out_dir, pose_body, pose_hand = None, trans=None, betas=None, root_orient=None):
    '''
    :param bm: pytorch body model with batch_size 1
    :param pose_body: can be a single list of pose parameters, or a list of list of pose parameters:
    :param trans: Nx3
    :param betas: Nxnum_betas
    :return
    dumps are all parameter as gltf objects
    '''

    faces = c2c(bm.f)

    makepath(out_dir)

    for fIdx in range(0, len(pose_body)):

        bm.pose_body.data[0,:] = bm.pose_body.new(pose_body[fIdx].reshape(1,-1))
        if pose_hand is not None: bm.pose_hand.data[0,:] = bm.pose_hand.new(pose_hand[fIdx].reshape(1,-1))
        if trans is not None: bm.trans.data[0,:] = bm.trans.new(trans[fIdx].reshape(1,-1))
        if betas is not None: bm.betas.data[0,:len(betas[fIdx])] = bm.betas.new(betas[fIdx])
        if root_orient is not None: bm.root_orient.data[0,:] = bm.root_orient.new(root_orient[fIdx])

        v = c2c(bm.forward().v)[0]

        mesh = trimesh.base.Trimesh(v, faces)
        mesh.export(os.path.join(out_dir, '%03d.ply' % fIdx))
Ejemplo n.º 3
0
def dump_amass2pytroch(datasets, amass_dir, out_posepath, logger = None, rnd_seed = 100, keep_rate = 0.01):
    '''
    Select random number of frames from central 80 percent of each mocap sequence
    Save individual data features like pose and shape per frame in pytorch pt files
    test set will have the extra field for original markers

    :param datasets: the name of the dataset
    :param amass_dir: directory of downloaded amass npz files. should be in this structure: path/datasets/subjects/*_poses.npz
    :param out_posepath: the path for final pose.pt file
    :param logger: an instance of human_body_prior.tools.omni_tools.log2file
    :param rnd_seed:
    :return: Number of datapoints dumped using out_poseth address pattern
    '''
    import glob

    np.random.seed(rnd_seed)

    makepath(out_posepath, isfile=True)

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(out_posepath.replace('pose.pt', '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % out_posepath)

    data_pose = []
    data_dmpl = []
    data_betas = []
    data_gender = []
    data_trans = []

    for ds_name in datasets:
        npz_fnames = glob.glob(os.path.join(amass_dir, ds_name, '*/*_poses.npz'))
        logger('randomly selecting data points from %s.' % (ds_name))
        for npz_fname in tqdm(npz_fnames):
            try:
                cdata = np.load(npz_fname)
            except:
                logger('Could not read %s! skipping..'%npz_fname)
                continue
            N = len(cdata['poses'])

            cdata_ids = np.random.choice(list(range(int(0.1*N), int(0.9*N),1)), int(keep_rate*0.8*N), replace=False)#removing first and last 10% of the data to avoid repetitive initial poses
            if len(cdata_ids)<1: continue

            data_pose.extend(cdata['poses'][cdata_ids].astype(np.float32))
            data_dmpl.extend(cdata['dmpls'][cdata_ids].astype(np.float32))
            data_trans.extend(cdata['trans'][cdata_ids].astype(np.float32))
            data_betas.extend(np.repeat(cdata['betas'][np.newaxis].astype(np.float32), repeats=len(cdata_ids), axis=0))
            data_gender.extend([gdr2num[str(cdata['gender'].astype(np.str))] for _ in cdata_ids])

    assert len(data_pose) != 0

    torch.save(torch.tensor(np.asarray(data_pose, np.float32)), out_posepath)
    torch.save(torch.tensor(np.asarray(data_dmpl, np.float32)), out_posepath.replace('pose.pt', 'dmpl.pt'))
    torch.save(torch.tensor(np.asarray(data_betas, np.float32)), out_posepath.replace('pose.pt', 'betas.pt'))
    torch.save(torch.tensor(np.asarray(data_trans, np.float32)), out_posepath.replace('pose.pt', 'trans.pt'))
    torch.save(torch.tensor(np.asarray(data_gender, np.int32)), out_posepath.replace('pose.pt', 'gender.pt'))

    return len(data_pose)
Ejemplo n.º 4
0
def registration2markers(registration_dir, out_marker_dir):
    np.random.seed(100)
    m2b_distance = 0.0095

    genders = {
        '50002': 'male',
        '50004': 'female',
        '50007': 'male',
        '50009': 'male',
        '50020': 'female',
        '50021': 'female',
        '50022': 'female',
        '50025': 'female',
        '50026': 'male',
        '50027': 'male'
    }

    with open('./ssm_all_marker_placements.json') as f:
        all_marker_placements = json.load(f)
    all_mrks_keys = list(all_marker_placements.keys())

    for dfaust_subject in genders.keys():
        subject_reg_pkls = glob.glob(
            os.path.join(registration_dir, dfaust_subject, '*.pkl'))

        chosen_k = all_mrks_keys[np.random.choice(len(all_marker_placements))]
        chosen_marker_set = all_marker_placements[chosen_k]
        print('chose %s markerset for dfaust subject %s' %
              (chosen_k, dfaust_subject))
        for reg_pkl in subject_reg_pkls:
            with open(reg_pkl, 'rb') as f:
                data = pickle.load(f, encoding='latin-1')

            marker_data = np.zeros([len(data['v']), len(chosen_marker_set), 3])

            cur_m2b_distance = m2b_distance + abs(
                np.random.normal(0, m2b_distance / 3., size=[3
                                                             ]))  # Noise in 3D

            for fIdx in range(0, len(data['v'])):
                vertices = rotate_mesh(data['v'][fIdx].copy(), 90)
                vn = c2c(
                    compute_vertex_normal(torch.Tensor(vertices),
                                          torch.Tensor(data['f'])))

                for mrk_id, vid in enumerate(chosen_marker_set.values()):
                    marker_data[
                        fIdx,
                        mrk_id] = vertices[vid] + cur_m2b_distance * vn[vid]

            outpath = makepath(os.path.join(out_marker_dir, dfaust_subject,
                                            os.path.basename(reg_pkl)),
                               isfile=True)
            np.savez(
                outpath, **{
                    'markers': marker_data,
                    'labels': list(chosen_marker_set.keys()),
                    'frame_rate': 60,
                    'gender': genders[dfaust_subject]
                })
    def render_once(body_parms,
                    body_colors=[colors['grey'], colors['brown-light']],
                    out_fname=None):
        '''

        :param body_parms: list of dictionaries of body parameters.
        :param body_colors: list of np arrays of color rgb values
        :param movie_outpath: a mp4 path
        :return:
        '''

        if out_fname is not None: makepath(out_fname, isfile=True)
        assert len(body_parms) <= len(body_colors), ValueError(
            'Not enough colors provided for #{} body_parms'.format(
                len(body_parms)))

        bs = body_parms[0]['pose_body'].shape[0]

        body_ids = np.random.choice(bs, num_bodies_to_display)

        body_evals = [
            c2c(
                bm(root_orient=v['root_orient'].view(bs, -1) if 'root_orient'
                   in v else torch.zeros(bs, 3).type_as(v['pose_body']),
                   pose_body=v['pose_body'].contiguous().view(bs, -1)).v)
            for v in body_parms
        ]
        num_verts = body_evals[0].shape[1]

        render_meshes = []
        for bId in body_ids:
            concat_cur_meshes = None
            for body, body_color in zip(body_evals, body_colors):
                cur_body_mesh = Mesh(body[bId],
                                     faces,
                                     vertex_colors=np.ones([num_verts, 3]) *
                                     body_color)
                concat_cur_meshes = cur_body_mesh if concat_cur_meshes is None else mesh_cat(
                    concat_cur_meshes, cur_body_mesh)
            render_meshes.append(concat_cur_meshes)

        img = renderer(render_meshes)

        if out_fname is not None: imagearray2file(img, out_fname, fps=10)

        return
Ejemplo n.º 6
0
    def perform_training(self, num_epochs=None, message=None):
        starttime = datetime.now().replace(microsecond=0)
        if num_epochs is None: num_epochs = self.ps.num_epochs

        self.logger(
            'Started Training at %s for %d epochs' % (datetime.strftime(starttime, '%Y-%m-%d_%H:%M:%S'), num_epochs))

        vis_bm =  BodyModel(self.ps.bm_path, 'smplh', num_betas=16).to(self.comp_device)
        prev_lr = np.inf
        scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=int(num_epochs // 3), gamma=0.5)
        for epoch_num in range(1, num_epochs + 1):
            scheduler.step()
            cur_lr = self.optimizer.param_groups[0]['lr']
            if cur_lr != prev_lr:
                self.logger('--- Optimizer learning rate changed from %.2e to %.2e ---' % (prev_lr, cur_lr))
                prev_lr = cur_lr
            self.epochs_completed += 1
            train_loss_dict = self.train()
            eval_loss_dict = self.evaluate()

            with torch.no_grad():
                eval_msg = VPoserTrainer.creat_loss_message(eval_loss_dict, expr_code=self.ps.expr_code,
                                                            epoch_num=self.epochs_completed, it=len(self.ds_val),
                                                            try_num=self.try_num, mode='evald')
                if eval_loss_dict['loss_total'] < self.best_loss_total:
                    self.ps.best_model_fname = makepath(os.path.join(self.ps.work_dir, 'snapshots', 'TR%02d_E%03d.pt' % (
                    self.try_num, self.epochs_completed)), isfile=True)
                    self.logger(eval_msg + ' ** ')
                    self.best_loss_total = eval_loss_dict['loss_total']
                    torch.save(self.vposer_model.module.state_dict() if isinstance(self.vposer_model, torch.nn.DataParallel) else self.vposer_model.state_dict(), self.ps.best_model_fname)

                    imgname = '[%s]_TR%02d_E%03d.png' % (self.ps.expr_code, self.try_num, self.epochs_completed)
                    imgpath = os.path.join(self.ps.work_dir, 'images', imgname)
                    try:
                        VPoserTrainer.vis_results(self.vis_dorig, self.vposer_model, bm=vis_bm, imgpath=imgpath)
                    except:
                        print('The visualization failed.')
                else:
                    self.logger(eval_msg)

                self.swriter.add_scalars('total_loss/scalars', {'train_loss_total': train_loss_dict['loss_total'],
                                                                'evald_loss_total': eval_loss_dict['loss_total'], },
                                         self.epochs_completed)

            # if early_stopping(eval_loss_dict['loss_total']):
            #     self.logger("Early stopping at epoch %d"%self.epochs_completed)
            #     break

        endtime = datetime.now().replace(microsecond=0)

        self.logger('Finished Training at %s\n' % (datetime.strftime(endtime, '%Y-%m-%d_%H:%M:%S')))
        self.logger(
            'Training done in %s! Best val total loss achieved: %.2e\n' % (endtime - starttime, self.best_loss_total))
        self.logger('Best model path: %s\n' % self.ps.best_model_fname)
Ejemplo n.º 7
0
def train_vposer_once(_config):

    resume_training_if_possible = True

    model = VPoserTrainer(_config)
    model.vp_ps.logging.expr_msg = create_expr_message(model.vp_ps)
    # model.text_logger(model.vp_ps.logging.expr_msg.replace(". ", '.\n'))
    dump_config(model.vp_ps,
                osp.join(model.work_dir, '{}.yaml'.format(model.expr_id)))

    logger = TensorBoardLogger(model.work_dir, name='tensorboard')
    lr_monitor = LearningRateMonitor()

    snapshots_dir = osp.join(model.work_dir, 'snapshots')
    checkpoint_callback = ModelCheckpoint(
        dirpath=makepath(snapshots_dir, isfile=True),
        filename="%s_{epoch:02d}_{val_loss:.2f}" % model.expr_id,
        save_top_k=1,
        verbose=True,
        monitor='val_loss',
        mode='min',
    )
    early_stop_callback = EarlyStopping(
        **model.vp_ps.train_parms.early_stopping)

    resume_from_checkpoint = None
    if resume_training_if_possible:
        available_ckpts = sorted(glob.glob(osp.join(snapshots_dir, '*.ckpt')),
                                 key=os.path.getmtime)
        if len(available_ckpts) > 0:
            resume_from_checkpoint = available_ckpts[-1]
            model.text_logger(
                'Resuming the training from {}'.format(resume_from_checkpoint))

    trainer = pl.Trainer(
        gpus=1,
        weights_summary='top',
        distributed_backend='ddp',
        # replace_sampler_ddp=False,
        # accumulate_grad_batches=4,
        # profiler=False,
        # overfit_batches=0.05,
        # fast_dev_run = True,
        # limit_train_batches=0.02,
        # limit_val_batches=0.02,
        # num_sanity_val_steps=2,
        plugins=[DDPPlugin(find_unused_parameters=False)],
        callbacks=[lr_monitor, early_stop_callback, checkpoint_callback],
        max_epochs=model.vp_ps.train_parms.num_epochs,
        logger=logger,
        resume_from_checkpoint=resume_from_checkpoint)

    trainer.fit(model)
Ejemplo n.º 8
0
    def prepare_data(self):
        '''' Similar to standard AMASS dataset preparation pipeline:
        Donwload npz file, corresponding to body data from https://amass.is.tue.mpg.de/ and place them under amass_dir
        '''
        self.text_logger = log2file(makepath(self.work_dir,
                                             '{}.log'.format(self.expr_id),
                                             isfile=True),
                                    prefix=self._log_prefix)

        prepare_vposer_datasets(self.dataset_dir,
                                self.vp_ps.data_parms.amass_splits,
                                self.vp_ps.data_parms.amass_dir,
                                logger=self.text_logger)
Ejemplo n.º 9
0
def dump_vposer_samples(bm, pose_body, out_imgpath=False, save_ply=False):
    '''
    
    :param bm: the BodyModel instance
    :param pose_body: Nx63 will pose the body
    :param out_imgpath: the final png path
    :param save_ply: if True will dump as ply files
    :return: 
    '''

    view_angles = [0, 90, -90]
    imw, imh = 400, 400
    mv = MeshViewer(width=imw, height=imh, use_offscreen=True)

    images = np.zeros([len(view_angles), len(pose_body), 1, imw, imh, 3])
    for cId in range(0, len(pose_body)):

        bm.pose_body.data[:] = bm.pose_body.new(pose_body[cId].reshape(-1))

        body_mesh = trimesh.Trimesh(vertices=c2c(bm().v[0]),
                                    faces=c2c(bm.f),
                                    vertex_colors=np.tile(
                                        colors['grey'], (6890, 1)))

        for rId, angle in enumerate(view_angles):
            apply_mesh_tranfsormations_(
                [body_mesh],
                trimesh.transformations.rotation_matrix(
                    np.radians(angle), (0, 1, 0)))
            mv.set_meshes([body_mesh], group_name='static')
            images[rId, cId, 0] = mv.render()
            apply_mesh_tranfsormations_(
                [body_mesh],
                trimesh.transformations.rotation_matrix(
                    np.radians(-angle), (0, 1, 0)))

    if out_imgpath:
        imagearray2file(images, out_imgpath)

        np.savez(out_imgpath.replace('.png', '.npz'), pose=pose_body)

        if save_ply:
            im_id = os.path.basename(out_imgpath).split('.')[0]
            out_dir = makepath(
                os.path.join(os.path.dirname(out_imgpath), '%s_ply' % im_id))
            smpl_params2ply(bm, out_dir=out_dir, pose_body=pose_body)

        print('Saved image: %s' % out_imgpath)

    return images
Ejemplo n.º 10
0
def extract_weights_asnumpy(exp_id, vp_model=False):
    from human_body_prior.tools.omni_tools import makepath
    from human_body_prior.tools.omni_tools import copy2cpu as c2c

    vposer_pt, vposer_ps = load_vposer(exp_id, vp_model=vp_model)

    save_wt_dir = makepath(os.path.join(vposer_ps.work_dir, 'weights_npy'))

    weights = {}
    for var_name, var in vposer_pt.named_parameters():
        weights[var_name] = c2c(var)
    np.savez(os.path.join(save_wt_dir, 'vposerWeights.npz'), **weights)

    print(('Dumped weights as numpy arrays to %s' % save_wt_dir))
    return vposer_ps, weights
Ejemplo n.º 11
0
def sample_vposer(expr_dir, bm, num_samples=5, vp_model='snapshot'):
    from human_body_prior.tools.omni_tools import id_generator, makepath
    from human_body_prior.tools.model_loader import load_vposer
    from human_body_prior.tools.omni_tools import copy2cpu

    vposer_pt, ps = load_vposer(expr_dir, vp_model=vp_model)

    sampled_pose_body = copy2cpu(vposer_pt.sample_poses(num_poses=num_samples))

    out_dir = makepath(
        os.path.join(ps.work_dir, 'evaluations', 'pose_generation'))
    out_imgpath = os.path.join(out_dir, '%s.png' % id_generator(6))

    dump_vposer_samples(bm, sampled_pose_body, out_imgpath)
    print('Dumped samples at %s' % out_dir)
    return sampled_pose_body
Ejemplo n.º 12
0
    def on_train_start(self):
        if self.global_rank != 0: return
        self.train_starttime = dt.now().replace(microsecond=0)

        ######## make a backup of vposer
        git_repo_dir = os.path.abspath(__file__).split('/')
        git_repo_dir = '/'.join(
            git_repo_dir[:git_repo_dir.index('human_body_prior') + 1])
        starttime = dt.strftime(self.train_starttime, '%Y_%m_%d_%H_%M_%S')
        archive_path = makepath(self.work_dir,
                                'code',
                                'vposer_{}.tar.gz'.format(starttime),
                                isfile=True)
        cmd = 'cd %s && git ls-files -z | xargs -0 tar -czf %s' % (
            git_repo_dir, archive_path)
        os.system(cmd)
        ########
        self.text_logger(
            'Created a git archive backup at {}'.format(archive_path))
        dump_config(self.vp_ps,
                    osp.join(self.work_dir, '{}.yaml'.format(self.expr_id)))
Ejemplo n.º 13
0
    def __init__(self, _config):
        super(VPoserTrainer, self).__init__()

        _support_data_dir = get_support_data_dir()

        vp_ps = load_config(**_config)

        make_deterministic(vp_ps.general.rnd_seed)

        self.expr_id = vp_ps.general.expr_id
        self.dataset_id = vp_ps.general.dataset_id

        self.work_dir = vp_ps.logging.work_dir = makepath(
            vp_ps.general.work_basedir, self.expr_id)
        self.dataset_dir = vp_ps.logging.dataset_dir = osp.join(
            vp_ps.general.dataset_basedir, vp_ps.general.dataset_id)

        self._log_prefix = '[{}]'.format(self.expr_id)
        self.text_logger = log2file(prefix=self._log_prefix)

        self.seq_len = vp_ps.data_parms.num_timeseq_frames

        self.vp_model = VPoser(vp_ps)

        with torch.no_grad():

            self.bm_train = BodyModel(vp_ps.body_model.bm_fname)

        if vp_ps.logging.render_during_training:
            self.renderer = vposer_trainer_renderer(
                self.bm_train, vp_ps.logging.num_bodies_to_display)
        else:
            self.renderer = None

        self.example_input_array = {
            'pose_body': torch.ones(vp_ps.train_parms.batch_size, 63),
        }
        self.vp_ps = vp_ps
Ejemplo n.º 14
0
import os
from human_body_prior.tools.omni_tools import makepath, log2file
from human_body_prior.data.prepare_data import prepare_vposer_datasets
from datetime import datetime

expr_code = datetime.now().strftime("%d %m %Y %H:%M:%S")

amass_dir = r'/content/drive/My Drive/LAZAR/AMASS'

vposer_datadir = makepath('prepared/%s' % (expr_code))

logger = log2file(os.path.join(vposer_datadir, '%s.log' % (expr_code)))
logger('[%s] Preparing data for training VPoser.'%expr_code)

amass_splits = {
    'vald': ['HumanEva', 'MPIHDM05', 'SFU', 'MPImosh'],
    'test': ['Transitions_mocap', 'SSMsynced'],
    'train': ['CMU', 'MPILimits', 'TotalCapture', 'EyesJapanDataset', 'KIT', 'BMLrub', 'EKUT', 'TCDhandMocap', 'ACCAD']
}
amass_splits['train'] = list(set(amass_splits['train']).difference(set(amass_splits['test'] + amass_splits['vald'])))

prepare_vposer_datasets(amass_splits, amass_dir, vposer_datadir, logger=logger)
Ejemplo n.º 15
0
    def __init__(self, work_dir, ps):
        from tensorboardX import SummaryWriter

        from human_body_prior.data.dataloader import VPoserDS

        self.pt_dtype = torch.float64 if ps.fp_precision == '64' else torch.float32

        torch.manual_seed(ps.seed)

        ps.work_dir = makepath(work_dir, isfile=False)

        logger = log2file(os.path.join(work_dir, '%s.log' % ps.expr_code))

        summary_logdir = os.path.join(work_dir, 'summaries')
        self.swriter = SummaryWriter(log_dir=summary_logdir)
        logger('tensorboard --logdir=%s' % summary_logdir)
        logger('Torch Version: %s\n' % torch.__version__)

        shutil.copy2(os.path.realpath(__file__), work_dir)

        use_cuda = torch.cuda.is_available()
        if use_cuda: torch.cuda.empty_cache()
        self.comp_device = torch.device(
            "cuda:%d" % ps.cuda_id if torch.cuda.is_available() else "cpu")

        logger('%d CUDAs available!' % torch.cuda.device_count())

        gpu_brand = torch.cuda.get_device_name(
            ps.cuda_id) if use_cuda else None
        logger('Training with %s [%s]' %
               (self.comp_device,
                gpu_brand) if use_cuda else 'Training on CPU!!!')
        logger('Base dataset_dir is %s' % ps.dataset_dir)

        kwargs = {'num_workers': ps.n_workers}
        ds_train = VPoserDS(dataset_dir=os.path.join(ps.dataset_dir, 'train'))
        self.ds_train = DataLoader(ds_train,
                                   batch_size=ps.batch_size,
                                   shuffle=True,
                                   drop_last=True,
                                   **kwargs)
        ds_val = VPoserDS(dataset_dir=os.path.join(ps.dataset_dir, 'vald'))
        self.ds_val = DataLoader(ds_val,
                                 batch_size=ps.batch_size,
                                 shuffle=True,
                                 drop_last=True,
                                 **kwargs)
        ds_test = VPoserDS(dataset_dir=os.path.join(ps.dataset_dir, 'test'))
        self.ds_test = DataLoader(ds_test,
                                  batch_size=ps.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  **kwargs)
        logger('Train dataset size %.2f M' %
               (len(self.ds_train.dataset) * 1e-6))
        logger('Validation dataset size %d' % len(self.ds_val.dataset))
        logger('Test dataset size %d' % len(self.ds_test.dataset))

        ps.data_shape = list(ds_val[0]['pose_aa'].shape)
        self.vposer_model = VPoser(num_neurons=ps.num_neurons,
                                   latentD=ps.latentD,
                                   data_shape=ps.data_shape,
                                   use_cont_repr=ps.use_cont_repr)

        if ps.use_multigpu:
            self.vposer_model = nn.DataParallel(self.vposer_model)

        self.vposer_model.to(self.comp_device)

        varlist = [var[1] for var in self.vposer_model.named_parameters()]

        params_count = sum(p.numel() for p in varlist if p.requires_grad)
        logger('Total Trainable Parameters Count is %2.2f M.' %
               ((params_count) * 1e-6))

        self.optimizer = optim.Adam(varlist,
                                    lr=ps.base_lr,
                                    weight_decay=ps.reg_coef)

        self.logger = logger
        self.best_loss_total = np.inf
        self.try_num = ps.try_num
        self.epochs_completed = 0
        self.ps = ps

        if ps.best_model_fname is not None:
            if isinstance(self.vposer_model, torch.nn.DataParallel):
                self.vposer_model.module.load_state_dict(
                    torch.load(ps.best_model_fname,
                               map_location=self.comp_device))
            else:
                self.vposer_model.load_state_dict(
                    torch.load(ps.best_model_fname,
                               map_location=self.comp_device))

            logger('Restored model from %s' % ps.best_model_fname)

        chose_ids = np.random.choice(list(range(len(ds_val))),
                                     size=ps.num_bodies_to_display,
                                     replace=False,
                                     p=None)
        data_all = {}
        for id in chose_ids:
            for k, v in ds_val[id].items():
                if k in data_all.keys():
                    data_all[k] = torch.cat([data_all[k], v[np.newaxis]],
                                            dim=0)
                else:
                    data_all[k] = v[np.newaxis]

        self.vis_dorig = {
            k: data_all[k].to(self.comp_device)
            for k in data_all.keys()
        }

        self.bm = BodyModel(self.ps.bm_path,
                            'smplh',
                            batch_size=self.ps.batch_size,
                            use_posedirs=True).to(self.comp_device)
Ejemplo n.º 16
0
def dump_amass2pytroch(datasets,
                       amass_dir,
                       out_dir,
                       split_name,
                       logger=None,
                       rnd_seed=100):
    '''
    Select random number of frames from central 80 percent of each mocap sequence

    :param datasets:
    :param amass_dir:
    :param out_dir:
    :param split_name:
    :param logger
    :param rnd_seed:
    :return:
    '''
    import glob
    from tqdm import tqdm

    assert split_name in ['train', 'vald', 'test']
    np.random.seed(rnd_seed)

    makepath(out_dir, isfile=False)

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(os.path.join(out_dir, '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % out_dir)

    if split_name in ['vald', 'test']:
        keep_rate = 0.3  # this should be fixed for vald and test datasets
    elif split_name == 'train':
        keep_rate = 0.3  # 30 percent, which would give you around 3.5 M training data points

    data_pose = []
    data_betas = []
    data_gender = []
    data_trans = []
    data_markers = []

    for ds_name in datasets:
        npz_fnames = glob.glob(os.path.join(amass_dir, ds_name, '*/*.npz'))
        logger('randomly selecting data points from %s.' % (ds_name))
        for npz_fname in tqdm(npz_fnames):
            cdata = np.load(npz_fname)
            N = len(cdata['poses'])

            # skip first and last frames to avoid initial standard poses, e.g. T pose
            cdata_ids = np.random.choice(list(
                range(int(0.1 * N), int(0.9 * N), 1)),
                                         int(keep_rate * 0.8 * N),
                                         replace=False)
            if len(cdata_ids) < 1: continue

            data_pose.extend(cdata['poses'][cdata_ids].astype(np.float32))
            data_trans.extend(cdata['trans'][cdata_ids].astype(np.float32))
            data_betas.extend(
                np.repeat(cdata['betas'][np.newaxis].astype(np.float32),
                          repeats=len(cdata_ids),
                          axis=0))
            data_gender.extend([{
                'male': -1,
                'neutral': 0,
                'female': 1
            }[str(cdata['gender'].astype(np.str))] for _ in cdata_ids])
            if split_name == 'test':
                data_markers.extend(
                    np.repeat(cdata['betas'][np.newaxis].astype(np.float32),
                              repeats=len(cdata_ids),
                              axis=0))

    outdir = makepath(os.path.join(out_dir, split_name))

    assert len(data_pose) != 0

    outpath = os.path.join(outdir, 'pose.pt')
    torch.save(torch.tensor(np.asarray(data_pose, np.float32)), outpath)

    outpath = os.path.join(outdir, 'betas.pt')
    torch.save(torch.tensor(np.asarray(data_betas, np.float32)), outpath)

    outpath = os.path.join(outdir, 'trans.pt')
    torch.save(torch.tensor(np.asarray(data_trans, np.float32)), outpath)

    outpath = os.path.join(outdir, 'gender.pt')
    torch.save(torch.tensor(np.asarray(data_gender, np.int32)), outpath)

    logger('Len. split %s %d' % (split_name, len(data_pose)))
Ejemplo n.º 17
0
def prepare_vposer_datasets(amass_splits,
                            amass_dir,
                            vposer_datadir,
                            logger=None):

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(os.path.join(vposer_datadir, '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % vposer_datadir)

    stageI_outdir = os.path.join(vposer_datadir, 'stage_I')

    shutil.copy2(sys.argv[0],
                 os.path.join(vposer_datadir, os.path.basename(sys.argv[0])))

    logger('Stage I: Fetch data from AMASS npz files')

    for split_name, datasets in amass_splits.items():
        if os.path.exists(os.path.join(stageI_outdir, split_name, 'pose.pt')):
            continue
        dump_amass2pytroch(datasets,
                           amass_dir,
                           stageI_outdir,
                           split_name=split_name,
                           logger=logger)

    logger(
        'Stage II: augment data by noise and save into h5 files to be used in a cross framework scenario.'
    )
    ## Writing to h5 files is also convinient since appending to files is possible
    from torch.utils.data import DataLoader
    import tables as pytables
    from tqdm import tqdm

    class AMASS_ROW(pytables.IsDescription):

        gender = pytables.Int16Col(1)  # 1-character String
        pose = pytables.Float32Col(52 * 3)  # float  (single-precision)
        pose_matrot = pytables.Float32Col(52 * 9)  # float  (single-precision)
        betas = pytables.Float32Col(16)  # float  (single-precision)
        trans = pytables.Float32Col(3)  # float  (single-precision)

    stageII_outdir = makepath(os.path.join(vposer_datadir, 'stage_II'))

    batch_size = 256
    max_num_epochs = 1  # how much augmentation we would get

    for split_name in amass_splits.keys():
        h5_outpath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if os.path.exists(h5_outpath): continue

        ds = AMASS_Augment(dataset_dir=os.path.join(stageI_outdir, split_name))
        logger('%s has %d data points!' % (split_name, len(ds)))
        dataloader = DataLoader(ds,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=32,
                                drop_last=False)
        with pytables.open_file(h5_outpath, mode="w") as h5file:
            table = h5file.create_table('/', 'data', AMASS_ROW)

            for epoch_num in range(max_num_epochs):
                for bId, bData in tqdm(enumerate(dataloader)):
                    for i in range(len(bData['trans'])):
                        for k in bData.keys():
                            table.row[k] = c2c(bData[k][i])
                        table.row.append()
                    table.flush()

    logger('Stage III: dump every thing as a final thing to pt files')
    # we would like to use pt files because their interface could run in multiple threads
    stageIII_outdir = makepath(os.path.join(vposer_datadir, 'stage_III'))

    for split_name in amass_splits.keys():
        h5_filepath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if not os.path.exists(h5_filepath): continue

        with pytables.open_file(h5_filepath, mode="r") as h5file:
            data = h5file.get_node('/data')
            data_dict = {k: [] for k in data.colnames}
            for id in range(len(data)):
                cdata = data[id]
                for k in data_dict.keys():
                    data_dict[k].append(cdata[k])

        for k, v in data_dict.items():
            outfname = makepath(os.path.join(stageIII_outdir, split_name,
                                             '%s.pt' % k),
                                isfile=True)
            if os.path.exists(outfname): continue
            torch.save(torch.from_numpy(np.asarray(v)), outfname)

    logger('Dumped final pytorch dataset at %s' % stageIII_outdir)
def imagearray2file(img_array, outpath=None, fps=30):
    '''
    :param nparray: RxCxTxwidthxheightx3
    :param outpath: the directory where T images will be dumped for each time point in range T
    :param fps: fps of the gif file
    :return:
        it will return an image list with length T
        if outpath is given as a png file, an image will be saved for each t in T.
        if outpath is given as a gif file, an animated image with T frames will be created.
    '''
    import cv2
    from human_body_prior.tools.omni_tools import makepath

    makepath(outpath, isfile=True)

    if not isinstance(img_array, np.ndarray) or img_array.ndim < 6:
        raise ValueError(
            'img_array should be a numpy array of shape RxCxTxwidthxheightx3')

    R, C, T, img_h, img_w, img_c = img_array.shape

    out_images = []
    for tIdx in range(T):
        row_images = []
        for rIdx in range(R):
            col_images = []
            for cIdx in range(C):
                col_images.append(img_array[rIdx, cIdx, tIdx])
            row_images.append(np.hstack(col_images))
        t_image = np.vstack(row_images)
        out_images.append(t_image)

    if outpath is not None:
        if '.png' in outpath:
            for tIdx in range(T):
                if T > 1:
                    cur_outpath = outpath.replace('.png', '_%03d.png' % tIdx)
                else:
                    cur_outpath = outpath
                cv2.imwrite(cur_outpath, out_images[tIdx])
                while not os.path.exists(cur_outpath):
                    continue  # wait until the snapshot is written to the disk
        elif '.gif' in outpath:
            import imageio
            with imageio.get_writer(outpath, mode='I', fps=fps) as writer:
                for tIdx in range(T):
                    img = out_images[tIdx].astype(np.uint8)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    writer.append_data(img)
        elif '.avi' in outpath:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            video = cv2.VideoWriter(outpath, fourcc, fps, (img_w, img_h), True)
            for tIdx in range(T):
                img = out_images[tIdx].astype(np.uint8)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                video.write(img)

            video.release()
            cv2.destroyAllWindows()
        elif '.mp4' in outpath:
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            video = cv2.VideoWriter(outpath, fourcc, fps, (img_w, img_h), True)
            for tIdx in range(T):
                img = out_images[tIdx].astype(np.uint8)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                video.write(img)

            video.release()
            cv2.destroyAllWindows()

    return out_images
Ejemplo n.º 19
0

if __name__ == '__main__':
    # ['CMU', 'Transitions_mocap', 'MPI_Limits', 'SSM_synced', 'TotalCapture', 'Eyes_Japan_Dataset', 'MPI_mosh', 'MPI_HDM05', 'HumanEva', 'ACCAD', 'EKUT', 'SFU', 'KIT', 'H36M', 'TCD_handMocap', 'BML']

    msg = ''' Using standard AMASS dataset preparation pipeline: 
    0) Donwload all npz files from https://amass.is.tue.mpg.de/ 
    1) Convert npz files to pytorch readable pt files. 
    2) Either use these files directly or augment them in parallel and write into h5 files
    3)[optional] If you have augmented your data, dump augmented results into final pt files and use with your dataloader'''

    expr_code = 'VXX_SVXX_TXX'  #VERSION_SUBVERSION_TRY

    amass_dir = 'PATH_TO_DOWNLOADED_NPZFILES/*/*_poses.npz'

    work_dir = makepath('WHERE_YOU_WANT_YOUR_FILE_TO_BE_DUMPED/%s' %
                        (expr_code))

    logger = log2file(os.path.join(work_dir, '%s.log' % (expr_code)))
    logger('[%s] AMASS Data Preparation Began.' % expr_code)
    logger(msg)

    amass_splits = {
        'vald': ['HumanEva', 'MPI_HDM05', 'SFU', 'MPI_mosh'],
        'test': ['Transitions_mocap', 'SSM_synced'],
        'train': [
            'CMU', 'MPI_Limits', 'TotalCapture', 'Eyes_Japan_Dataset', 'KIT',
            'BML', 'EKUT', 'TCD_handMocap'
        ]  #ACCAD
    }
    amass_splits['train'] = list(
        set(amass_splits['train']).difference(
Ejemplo n.º 20
0
def dump_amass2pytroch(datasets,
                       amass_dir,
                       out_posepath,
                       logger=None,
                       rnd_seed=100,
                       keep_rate=0.01):
    '''
    Select random number of frames from central 80 percent of each mocap sequence
    Save individual data features like pose and shape per frame in pytorch pt files
    test set will have the extra field for original markers

    :param datasets: the name of the dataset
    :param amass_dir: directory of downloaded amass npz files. should be in this structure: path/datasets/subjects/*_poses.npz
    :param out_posepath: the path for final pose.pt file
    :param logger: an instance of human_body_prior.tools.omni_tools.log2file
    :param rnd_seed:
    :return: Number of datapoints dumped using out_poseth address pattern
    '''
    import glob

    np.random.seed(rnd_seed)

    makepath(out_posepath, isfile=True)

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(
            out_posepath.replace('pose.pt', '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % out_posepath)

    data_pose = []
    data_betas = []
    data_gender = []
    data_trans = []

    data_idx = []
    data_frame = []

    data_tightness = []
    data_outfit = []

    for ds_name in datasets:
        npz_fnames = glob.glob(os.path.join(amass_dir, ds_name, '*/info.mat'))
        logger('randomly selecting data points from %s.' % (ds_name))
        for npz_fname in tqdm(npz_fnames):
            try:
                cdata = loadInfo(npz_fname)
                cdata['idx'] = int(npz_fname.split("/")[-2])
            except:
                logger('Could not read %s! skipping..' % npz_fname)
                continue

            cdata['poses'] = cdata['poses'].T
            cdata['trans'] = cdata['trans'].T

            outfit_arr = np.zeros(len(outfit_types))
            for key in cdata['outfit'].keys():
                outfit_arr[outfit_types.index(key)] = fabric_types.index(
                    cdata['outfit'][key]['fabric']) + 1

            if len(cdata['poses'].shape) < 2: continue

            N = len(cdata['poses'])

            cdata_ids = np.arange(N)
            np.random.shuffle(cdata_ids)

            if len(cdata_ids) < 1: continue

            # try:
            data_frame.extend(np.array(cdata_ids).astype(np.int32))
            data_idx.extend(np.array([cdata['idx'] for _ in cdata_ids]))
            data_pose.extend(cdata['poses'][cdata_ids].astype(np.float32))
            data_trans.extend(cdata['trans'][cdata_ids].astype(np.float32))
            data_betas.extend(
                np.repeat(cdata['shape'][np.newaxis].astype(np.float32),
                          repeats=len(cdata_ids),
                          axis=0))
            data_gender.extend(np.array([cdata['gender'] for _ in cdata_ids]))
            data_tightness.extend(
                np.repeat(cdata['tightness'][np.newaxis].astype(np.float32),
                          repeats=len(cdata_ids),
                          axis=0))
            data_outfit.extend(
                np.repeat(outfit_arr[np.newaxis].astype(np.int32),
                          repeats=len(cdata_ids),
                          axis=0))

            # except:
            #     print(N, cdata['poses'].shape)

    assert len(data_pose) != 0

    torch.save(torch.tensor(np.asarray(data_pose, np.float32)), out_posepath)
    torch.save(torch.tensor(np.asarray(data_betas, np.float32)),
               out_posepath.replace('pose.pt', 'betas.pt'))
    torch.save(torch.tensor(np.asarray(data_trans, np.float32)),
               out_posepath.replace('pose.pt', 'trans.pt'))
    torch.save(torch.tensor(np.asarray(data_gender, np.int32)),
               out_posepath.replace('pose.pt', 'gender.pt'))
    torch.save(torch.tensor(np.asarray(data_frame, np.int32)),
               out_posepath.replace('pose.pt', 'frame.pt'))
    torch.save(torch.tensor(np.asarray(data_idx, np.int32)),
               out_posepath.replace('pose.pt', 'idx.pt'))
    torch.save(torch.tensor(np.asarray(data_tightness, np.float32)),
               out_posepath.replace('pose.pt', 'tightness.pt'))
    torch.save(torch.tensor(np.asarray(data_outfit, np.int32)),
               out_posepath.replace('pose.pt', 'outfit.pt'))

    return len(data_pose)
Ejemplo n.º 21
0
def prepare_amass(amass_splits,
                  amass_dir,
                  work_dir,
                  logger=None,
                  betas_range=None,
                  betas_limit=None,
                  frame_len=None,
                  max_len=None,
                  downsample_rate=None):

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(os.path.join(work_dir, '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % work_dir)

    stageI_outdir = os.path.join(work_dir, 'stage_I')

    shutil.copy2(sys.argv[0],
                 os.path.join(work_dir, os.path.basename(sys.argv[0])))

    logger('Stage I: Fetch data from AMASS npz files')

    # split mode - split a single dataset into train/vald/test with specified proportions
    # e.g.
    # amass_splits = {
    #       'dataset': 'HumanEva',
    #       'splits': (.85, .05, .1)  # train, vald, test
    # }
    if 'splits' in amass_splits.keys():
        import numbers
        from functools import reduce
        splits = amass_splits['splits']
        _amass_splits = {}
        assert [isinstance(s, numbers.Number) for s in splits] == [
            True, True, True
        ], "amass_splits['splits'] must be (number, number, number)"
        assert reduce(
            lambda x, y: x + y, splits
        ) <= 1., "sum of amass_splits['splits'] must equal or less than 1.0"

        for split_idx, split_name in enumerate(('train', 'vald', 'test')):
            # if there is a zero-split, skip through the dataset creation
            if split_idx > 0 and splits[split_idx] == 0: continue

            final_splits = (0., 1.)
            outpath = makepath(os.path.join(stageI_outdir, split_name,
                                            'pose.pt'),
                               isfile=True)
            # reconstruct amass_splits as normal mode for stage II and III
            _amass_splits[split_name] = amass_splits['dataset']
            if os.path.exists(outpath): continue
            if split_name is 'train': final_splits = (0., splits[0])
            elif split_name is 'vald':
                final_splits = (splits[0], splits[0] + splits[1])
            else:
                final_splits = (splits[0] + splits[1],
                                splits[0] + splits[1] + splits[2])

            if frame_len:
                downsample_amass2pytroch(amass_splits['dataset'],
                                         amass_dir,
                                         outpath,
                                         logger=logger,
                                         betas_range=betas_range,
                                         betas_limit=betas_limit,
                                         splits=final_splits,
                                         frame_len=frame_len,
                                         max_len=max_len,
                                         downsample_rate=downsample_rate)
            else:
                dump_amass2pytroch(amass_splits['dataset'],
                                   amass_dir,
                                   outpath,
                                   logger=logger,
                                   betas_range=betas_range,
                                   betas_limit=betas_limit,
                                   splits=final_splits,
                                   max_len=max_len)

        # assigin the reconstructed amass_splits back after stage I compeletion
        amass_splits = _amass_splits

    # normal mode - using different datasets as train/vald/test
    # e.g.
    # amass_splits = {
    #       'vald': ['HumanEva'],
    #       'test': ['SSM_synced'],
    #       'train': ['CMU']
    # }
    else:
        for split_name, datasets in amass_splits.items():
            outpath = makepath(os.path.join(stageI_outdir, split_name,
                                            'pose.pt'),
                               isfile=True)
            if os.path.exists(outpath): continue
            if frame_len:
                downsample_amass2pytroch(datasets,
                                         amass_dir,
                                         outpath,
                                         logger=logger,
                                         betas_range=betas_range,
                                         betas_limit=betas_limit,
                                         frame_len=frame_len,
                                         max_len=max_len,
                                         downsample_rate=downsample_rate)
            else:
                dump_amass2pytroch(datasets,
                                   amass_dir,
                                   outpath,
                                   logger=logger,
                                   betas_range=betas_range,
                                   betas_limit=betas_limit,
                                   max_len=max_len)

    logger(
        'Stage II: augment the data and save into h5 files to be used in a cross framework scenario.'
    )

    class AMASS_ROW(pytables.IsDescription):
        fid = pytables.Int16Col(1)  # 1-character String
        fname = pytables.Int32Col(1)  # 1-character String
        gender = pytables.Int16Col(1)  # 1-character String
        pose = pytables.Float32Col(52 * 3)  # float  (single-precision)
        dmpl = pytables.Float32Col(8)  # float  (single-precision)
        pose_matrot = pytables.Float32Col(52 * 9)  # float  (single-precision)
        betas = pytables.Float32Col(16)  # float  (single-precision)
        trans = pytables.Float32Col(3)  # float  (single-precision)

    stageII_outdir = makepath(os.path.join(work_dir, 'stage_II'))

    batch_size = 256
    max_num_epochs = 1  # how much augmentation we would get

    for split_name in amass_splits.keys():
        h5_outpath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if os.path.exists(h5_outpath): continue

        ds = AMASS_Augment(dataset_dir=os.path.join(stageI_outdir, split_name))
        logger('%s has %d data points!' % (split_name, len(ds)))
        dataloader = DataLoader(ds,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=32,
                                drop_last=False)
        with pytables.open_file(h5_outpath, mode="w") as h5file:
            table = h5file.create_table('/', 'data', AMASS_ROW)

            for epoch_num in range(max_num_epochs):
                for bId, bData in tqdm(enumerate(dataloader)):
                    for i in range(len(bData['trans'])):
                        for k in bData.keys():
                            table.row[k] = c2c(bData[k][i])
                        table.row.append()
                    table.flush()

    logger(
        '\nStage III: dump every data field for all the splits as final pytorch pt files'
    )
    # we would like to use pt files because their interface could run in multiple threads
    stageIII_outdir = makepath(os.path.join(work_dir, 'stage_III'))

    for split_name in amass_splits.keys():
        h5_filepath = os.path.join(stageII_outdir, '%s.h5' % split_name)
        if not os.path.exists(h5_filepath): continue

        with pytables.open_file(h5_filepath, mode="r") as h5file:
            data = h5file.get_node('/data')
            data_dict = {k: [] for k in data.colnames}
            for id in range(len(data)):
                cdata = data[id]
                for k in data_dict.keys():
                    data_dict[k].append(cdata[k])

        for k, v in data_dict.items():
            outfname = makepath(os.path.join(stageIII_outdir, split_name,
                                             '%s.pt' % k),
                                isfile=True)
            if os.path.exists(outfname): continue
            torch.save(torch.from_numpy(np.asarray(v)), outfname)

    logger('Dumped final pytorch dataset at %s' % stageIII_outdir)
Ejemplo n.º 22
0
def dump_amass2pytroch(datasets,
                       amass_dir,
                       out_posepath,
                       logger=None,
                       betas_range=None,
                       betas_limit=None,
                       splits=None,
                       rnd_seed=100,
                       keep_rate=0.01,
                       max_len=None):
    '''
    Select random number of frames from central 80 percent of each mocap sequence
    Save individual data features like pose and shape per frame in pytorch pt files
    test set will have the extra field for original markers

    :param datasets: the name of the dataset
    :param amass_dir: directory of downloaded amass npz files. should be in this structure: path/datasets/subjects/*_poses.npz
    :param out_posepath: the path for final pose.pt file
    :param logger: an instance of human_body_prior.tools.omni_tools.log2file
    :param betas_range: variance of each beta
    :param betas_limit: betas variance ranging from -betas_limit to betas_limit. only works with integer betas_range
    :param splits: (splits_start, splits_end), e.g. (.85, .90) means splits 5% of the dataset starts from 85%
    :param rnd_seed: random seed
    :param max_len: max frame allowed
    :return: Number of datapoints dumped using out_poseth address pattern
    '''
    import glob

    np.random.seed(rnd_seed)

    makepath(out_posepath, isfile=True)

    if logger is None:
        starttime = datetime.now().replace(microsecond=0)
        log_name = datetime.strftime(starttime, '%Y%m%d_%H%M')
        logger = log2file(
            out_posepath.replace('pose.pt', '%s.log' % (log_name)))
        logger('Creating pytorch dataset at %s' % out_posepath)

    data_pose = []
    data_dmpl = []
    data_betas = []
    data_gender = []
    data_trans = []
    data_fname = []
    data_fid = []

    for ds_name in datasets:
        npz_fnames = glob.glob(
            os.path.join(amass_dir, ds_name, '*/*_poses.npz'))
        if splits:
            logger(
                f'randomly selecting {"%.1f" % ((splits[1] - splits[0]) * 100)}% data points from {ds_name}.'
            )
        else:
            logger(f'randomly selecting data points from {ds_name}.')

        for dir_id, npz_fname in enumerate(tqdm(npz_fnames)):
            try:
                cdata = np.load(npz_fname)
            except:
                logger('Could not read %s! skipping..' % npz_fname)
                continue
            N = len(cdata['poses'])

            # fname = abs(hash(npz_fname.split('/')[-1].split('.')[0])) % (10 ** 8) # hash filename to a unique positive 8-digits integer
            fname = dir_id * 1000
            cdata_ids = np.random.choice(
                list(range(int(0.1 * N), int(0.9 * N), 1)),
                int(keep_rate * 0.8 * N),
                replace=False
            )  # removing first and last 10% of the data to avoid repetitive initial poses
            if len(cdata_ids) < 1: continue

            if 'int' in str(type(betas_range)) or 'numpy.ndarray' in str(
                    type(betas_range)):
                if 'int' in str(type(betas_range)):
                    if betas_range == 0:
                        data_pose.extend(cdata['poses'][cdata_ids].astype(
                            np.float32))
                        data_dmpl.extend(cdata['dmpls'][cdata_ids].astype(
                            np.float32))
                        data_trans.extend(cdata['trans'][cdata_ids].astype(
                            np.float32))
                        data_betas.extend(
                            np.repeat(cdata['betas'][np.newaxis].astype(
                                np.float32),
                                      repeats=len(cdata_ids),
                                      axis=0))
                        data_gender.extend([
                            gdr2num[str(cdata['gender'].astype(np.str))]
                            for _ in cdata_ids
                        ])
                        data_fname.extend([fname for _ in cdata_ids])
                        data_fid.extend([i for i, _ in enumerate(cdata_ids)])
                    else:
                        assert betas_range % 2 == 0, ValueError(
                            'betas_range should be multiple to 2')
                        if betas_limit is None: betas_limit = 2.
                        # if `betas_range` is an integer,
                        # sample the number of betas1 and betas2
                        # that varience from -2. to 2. as follows:
                        beta1, beta2 = cdata['betas'][0], cdata['betas'][1]
                        # left range, right range
                        beta1_lr, beta1_rr = max(0., betas_limit + beta1), max(
                            0., betas_limit - beta1)
                        beta2_lr, beta2_rr = max(0., betas_limit + beta2), max(
                            0., betas_limit - beta2)
                        # left range percentage, right range percentage
                        beta1_lp, beta1_rp = beta1_lr / (
                            beta1_lr + beta1_rr), beta1_rr / (beta1_lr +
                                                              beta1_rr)
                        beta2_lp, beta2_rp = beta2_lr / (
                            beta2_lr + beta2_rr), beta2_rr / (beta2_lr +
                                                              beta2_rr)
                        # left range sample number
                        beta1_ln, beta2_ln = int(betas_range * beta1_lp), int(
                            betas_range * beta2_lp)
                        # do sampling for beta1 range
                        beta1_range = betas_range_sample(
                            betas_range, beta1, beta1_ln, betas_limit)
                        beta2_range = betas_range_sample(
                            betas_range, beta2, beta2_ln, betas_limit)
                        # reconstruct beatas_range as numpy.ndarray
                        betas_range = np.zeros(
                            (betas_range, len(cdata['betas'])))
                        betas_range[:, 0] = beta1_range
                        betas_range[:, 1] = beta2_range

                for i, beta_delta in enumerate(betas_range):
                    cdata_betas = np.array(cdata['betas']).astype(np.float32)
                    data_pose.extend(cdata['poses'][cdata_ids].astype(
                        np.float32))
                    data_dmpl.extend(cdata['dmpls'][cdata_ids].astype(
                        np.float32))
                    data_trans.extend(cdata['trans'][cdata_ids].astype(
                        np.float32))
                    data_betas.extend(
                        np.repeat((cdata_betas +
                                   beta_delta)[np.newaxis].astype(np.float32),
                                  repeats=len(cdata_ids),
                                  axis=0))
                    data_gender.extend([
                        gdr2num[str(cdata['gender'].astype(np.str))]
                        for _ in cdata_ids
                    ])
                    data_fname.extend([fname + i for _ in cdata_ids])
                    data_fid.extend([ii for ii, _ in enumerate(cdata_ids)])
            else:
                data_pose.extend(cdata['poses'][cdata_ids].astype(np.float32))
                data_dmpl.extend(cdata['dmpls'][cdata_ids].astype(np.float32))
                data_trans.extend(cdata['trans'][cdata_ids].astype(np.float32))
                data_betas.extend(
                    np.repeat(cdata['betas'][np.newaxis].astype(np.float32),
                              repeats=len(cdata_ids),
                              axis=0))
                data_gender.extend([
                    gdr2num[str(cdata['gender'].astype(np.str))]
                    for _ in cdata_ids
                ])
                data_fname.extend([fname for _ in cdata_ids])
                data_fid.extend([i for i, _ in enumerate(cdata_ids)])

    assert len(data_pose) != 0
    assert len(data_pose) == len(data_dmpl) == len(data_betas) == len(
        data_trans) == len(data_gender) == len(data_fname) == len(data_fid)

    if splits:
        import math

        # split data
        split_start = math.floor(len(data_pose) * splits[0])
        split_end = math.floor(len(data_pose) * splits[1])

        data_pose = data_pose[split_start:split_end]
        data_dmpl = data_dmpl[split_start:split_end]
        data_betas = data_betas[split_start:split_end]
        data_trans = data_trans[split_start:split_end]
        data_gender = data_gender[split_start:split_end]
        data_fname = data_fname[split_start:split_end]
        data_fid = data_fid[split_start:split_end]

        assert len(data_pose) > 0
        logger(
            f'data length: {len(data_pose)}, parsing from proportion ({"%.1f" % splits[0]}, {"%.1f" % splits[1]}) to index ({split_start}, {split_end})\n\n'
        )

    torch.save(torch.tensor(np.asarray(data_pose, np.float32)), out_posepath)
    torch.save(torch.tensor(np.asarray(data_dmpl, np.float32)),
               out_posepath.replace('pose.pt', 'dmpl.pt'))
    torch.save(torch.tensor(np.asarray(data_betas, np.float32)),
               out_posepath.replace('pose.pt', 'betas.pt'))
    torch.save(torch.tensor(np.asarray(data_trans, np.float32)),
               out_posepath.replace('pose.pt', 'trans.pt'))
    torch.save(torch.tensor(np.asarray(data_gender, np.int32)),
               out_posepath.replace('pose.pt', 'gender.pt'))
    torch.save(torch.tensor(np.asarray(data_fname, np.int32)),
               out_posepath.replace('pose.pt', 'fname.pt'))
    torch.save(torch.tensor(np.asarray(data_fid, np.int32)),
               out_posepath.replace('pose.pt', 'fid.pt'))

    return len(data_pose)
Ejemplo n.º 23
0
def prepare_vposer_datasets(vposer_dataset_dir,
                            amass_splits,
                            amass_dir,
                            logger=None):

    if dataset_exists(vposer_dataset_dir):
        if logger is not None:
            logger('VPoser dataset already exists at {}'.format(
                vposer_dataset_dir))
        return

    ds_logger = log2file(makepath(vposer_dataset_dir,
                                  'dataset.log',
                                  isfile=True),
                         write2file_only=True)
    logger = ds_logger if logger is None else logger_sequencer(
        [ds_logger, logger])

    logger('Creating pytorch dataset at %s' % vposer_dataset_dir)
    logger('Using AMASS body parameters from {}'.format(amass_dir))

    shutil.copy2(__file__, vposer_dataset_dir)

    # class AMASS_ROW(pytables.IsDescription):
    #
    #     # gender = pytables.Int16Col(1)  # 1-character String
    #     root_orient = pytables.Float32Col(3)  # float  (single-precision)
    #     pose_body = pytables.Float32Col(21 * 3)  # float  (single-precision)
    #     # pose_hand = pytables.Float32Col(2 * 15 * 3)  # float  (single-precision)
    #
    #     # betas = pytables.Float32Col(16)  # float  (single-precision)
    #     # trans = pytables.Float32Col(3)  # float  (single-precision)

    def fetch_from_amass(ds_names):
        keep_rate = 0.3

        npz_fnames = []
        for ds_name in ds_names:
            mosh_stageII_fnames = glob.glob(
                osp.join(amass_dir, ds_name, '*/*_poses.npz'))
            npz_fnames.extend(mosh_stageII_fnames)
            logger('Found {} sequences from {}.'.format(
                len(mosh_stageII_fnames), ds_name))

            for npz_fname in npz_fnames:
                cdata = np.load(npz_fname)
                N = len(cdata['poses'])

                # skip first and last frames to avoid initial standard poses, e.g. T pose
                cdata_ids = np.random.choice(list(
                    range(int(0.1 * N), int(0.9 * N), 1)),
                                             int(keep_rate * 0.8 * N),
                                             replace=False)
                if len(cdata_ids) < 1: continue
                fullpose = cdata['poses'][cdata_ids].astype(np.float32)
                yield {
                    'pose_body': fullpose[:, 3:66],
                    'root_orient': fullpose[:, :3]
                }

    for split_name, ds_names in amass_splits.items():
        if dataset_exists(vposer_dataset_dir, split_names=[split_name]):
            continue
        logger('Preparing VPoser data for split {}'.format(split_name))

        data_fields = {}
        for data in fetch_from_amass(ds_names):
            for k in data.keys():
                if k not in data_fields: data_fields[k] = []
                data_fields[k].append(data[k])

        for k, v in data_fields.items():
            outpath = makepath(vposer_dataset_dir,
                               split_name,
                               '{}.pt'.format(k),
                               isfile=True)
            v = np.concatenate(v)
            torch.save(torch.tensor(v), outpath)

        logger('{} datapoints dumped for split {}. ds_meta_pklpath: {}'.format(
            len(v), split_name, osp.join(vposer_dataset_dir, split_name)))

    Configer(**{
        'amass_splits': amass_splits.toDict(),
        'amass_dir': amass_dir,
    }).dump_settings(makepath(vposer_dataset_dir, 'settings.ini', isfile=True))

    logger('Dumped final pytorch dataset at %s' % vposer_dataset_dir)