def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS): super(Regressor, self).__init__() npose = 24 * 6 self.fc1 = nn.Linear(512 * 4 + npose + 13, 1024) self.drop1 = nn.Dropout() self.fc2 = nn.Linear(1024, 1024) self.drop2 = nn.Dropout() self.decpose = nn.Linear(1024, npose) self.decshape = nn.Linear(1024, 10) self.deccam = nn.Linear(1024, 3) nn.init.xavier_uniform_(self.decpose.weight, gain=0.01) nn.init.xavier_uniform_(self.decshape.weight, gain=0.01) nn.init.xavier_uniform_(self.deccam.weight, gain=0.01) self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False) mean_params = np.load(smpl_mean_params) init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0) init_shape = torch.from_numpy( mean_params['shape'][:].astype('float32')).unsqueeze(0) init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0) self.register_buffer('init_pose', init_pose) self.register_buffer('init_shape', init_shape) self.register_buffer('init_cam', init_cam)
def set_gender(self, gender="neutral", use_smplx=False): if use_smplx: from smplx import SMPL self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False, gender=gender).to( next(self.smpl.parameters()).device) else: from meva.lib.smpl import SMPL self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False, gender=gender).to( next(self.smpl.parameters()).device)
def __init__(self, seqlen, batch_size=64, n_layers=1, hidden_size=2048, add_linear=False, bidirectional=False, use_residual=True, cfg="vae_rec_1"): super(MEVA, self).__init__() self.vae_cfg = vae_cfg = Config(cfg) self.seqlen = seqlen self.batch_size = batch_size self.vae_model, _, _ = get_models(vae_cfg, iter=-2) for param in self.vae_model.parameters(): param.requires_grad = False self.feat_encoder = TemporalEncoder( n_layers=n_layers, hidden_size=hidden_size, bidirectional=bidirectional, add_linear=add_linear, use_residual=use_residual, ) vae_hidden_size = 512 self.motion_encoder = TemporalEncoder( n_layers=n_layers, hidden_size=512, bidirectional=bidirectional, add_linear=True, output_size=vae_hidden_size, use_residual=False, ) # if self.vae_cfg.model_specs['model_name'] == "VAErec": fc1 = nn.Linear(vae_hidden_size, 256) act = nn.Tanh() fc2 = nn.Linear(256, 144) self.vae_init_mlp = nn.Sequential(fc1, act, fc2) self.regressor = Regressor() mean_params = np.load(SMPL_MEAN_PARAMS) self.first_in_flag = True self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False) self.set_gender()
def __init__(self, block, layers, smpl_mean_params): self.inplanes = 64 super(HMR, self).__init__() npose = 24 * 6 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc1 = nn.Linear(512 * block.expansion + npose + 13, 1024) self.drop1 = nn.Dropout() self.fc2 = nn.Linear(1024, 1024) self.drop2 = nn.Dropout() self.decpose = nn.Linear(1024, npose) self.decshape = nn.Linear(1024, 10) self.deccam = nn.Linear(1024, 3) nn.init.xavier_uniform_(self.decpose.weight, gain=0.01) nn.init.xavier_uniform_(self.decshape.weight, gain=0.01) nn.init.xavier_uniform_(self.deccam.weight, gain=0.01) self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False).to('cpu') for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() mean_params = np.load(smpl_mean_params) init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0) init_shape = torch.from_numpy( mean_params['shape'][:].astype('float32')).unsqueeze(0) init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0) self.register_buffer('init_pose', init_pose) self.register_buffer('init_shape', init_shape) self.register_buffer('init_cam', init_cam)
def compute_error_verts(pred_verts, target_verts=None, target_theta=None): """ Computes MPJPE over 6890 surface vertices. Args: verts_gt (Nx6890x3). verts_pred (Nx6890x3). Returns: error_verts (N). """ if target_verts is None: from meva.lib.smpl import SMPL_MODEL_DIR from meva.lib.smpl import SMPL device = 'cpu' smpl = SMPL( SMPL_MODEL_DIR, batch_size=1, # target_theta.shape[0], ).to(device) betas = torch.from_numpy(target_theta[:, 75:]).to(device) pose = torch.from_numpy(target_theta[:, 3:75]).to(device) target_verts = [] b_ = torch.split(betas, 5000) p_ = torch.split(pose, 5000) for b, p in zip(b_, p_): output = smpl(betas=b, body_pose=p[:, 3:], global_orient=p[:, :3], pose2rot=True) target_verts.append(output.vertices.detach().cpu().numpy()) target_verts = np.concatenate(target_verts, axis=0) assert len(pred_verts) == len(target_verts) error_per_vert = np.sqrt(np.sum((target_verts - pred_verts)**2, axis=2)) return np.mean(error_per_vert, axis=1)
def db_2_dataset(dataset_data): return dataset_data if __name__ == "__main__": cfg, cfg_file = parse_args() SMPL_MAJOR_JOINTS = np.array([1, 2, 4, 5, 7, 8, 16, 17, 18, 19, 20, 21]) device = (torch.device("cuda", index=0) if torch.cuda.is_available() else torch.device("cpu")) smpl = SMPL( SMPL_MODEL_DIR, batch_size=64, create_transl=False, ) meva_model = MEVA( n_layers=cfg.MODEL.TGRU.NUM_LAYERS, batch_size=cfg.TRAIN.BATCH_SIZE, seqlen=cfg.DATASET.SEQLEN, hidden_size=cfg.MODEL.TGRU.HIDDEN_SIZE, add_linear=cfg.MODEL.TGRU.ADD_LINEAR, bidirectional=cfg.MODEL.TGRU.BIDIRECTIONAL, use_residual=cfg.MODEL.TGRU.RESIDUAL, cfg=cfg.VAE_CFG, ).to(device) meva_dir = 'results/meva/train_meva_2/model_best.pth.tar'
class Regressor(nn.Module): def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS): super(Regressor, self).__init__() npose = 24 * 6 self.fc1 = nn.Linear(512 * 4 + npose + 13, 1024) self.drop1 = nn.Dropout() self.fc2 = nn.Linear(1024, 1024) self.drop2 = nn.Dropout() self.decpose = nn.Linear(1024, npose) self.decshape = nn.Linear(1024, 10) self.deccam = nn.Linear(1024, 3) nn.init.xavier_uniform_(self.decpose.weight, gain=0.01) nn.init.xavier_uniform_(self.decshape.weight, gain=0.01) nn.init.xavier_uniform_(self.deccam.weight, gain=0.01) self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False) mean_params = np.load(smpl_mean_params) init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0) init_shape = torch.from_numpy( mean_params['shape'][:].astype('float32')).unsqueeze(0) init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0) self.register_buffer('init_pose', init_pose) self.register_buffer('init_shape', init_shape) self.register_buffer('init_cam', init_cam) def set_gender(self, gender="neutral", use_smplx=False): if use_smplx: from smplx import SMPL self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False, gender=gender).to( next(self.smpl.parameters()).device) else: from meva.lib.smpl import SMPL self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False, gender=gender).to( next(self.smpl.parameters()).device) def iter_refine(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3, J_regressor=None): batch_size = x.shape[0] if init_pose is None: init_pose = self.init_pose.expand(batch_size, -1) if init_shape is None: init_shape = self.init_shape.expand(batch_size, -1) if init_cam is None: init_cam = self.init_cam.expand(batch_size, -1) pred_pose = init_pose pred_shape = init_shape pred_cam = init_cam for i in range(n_iter): xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1) xc = self.fc1(xc) xc = self.drop1(xc) xc = self.fc2(xc) xc = self.drop2(xc) pred_pose = self.decpose(xc) + pred_pose pred_shape = self.decshape(xc) + pred_shape pred_cam = self.deccam(xc) + pred_cam return pred_pose, pred_shape, pred_cam def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3, J_regressor=None): batch_size = x.shape[0] pred_pose, pred_shape, pred_cam = self.iter_refine( x, init_pose=init_pose, init_shape=init_shape, init_cam=init_cam, n_iter=n_iter, J_regressor=J_regressor) pred_rotmat = convert_orth_6d_to_mat(pred_pose).view( batch_size, 24, 3, 3) ############### SMOOTH ############### # from meva.utils.geometry import smooth_pose_mat # pred_rotmat = torch.tensor(smooth_pose_mat(pred_rotmat.cpu().numpy(), ratio = 0.3)).float().to(pred_rotmat.device) ############### SMOOTH ############### return self.smpl_to_kpts(pred_rotmat, pred_shape, pred_cam, J_regressor) def smpl_to_kpts(self, pred_rotmat, pred_shape, pred_cam, J_regressor): pred_output = self.smpl(betas=pred_shape, body_pose=pred_rotmat[:, 1:], global_orient=pred_rotmat[:, 0].unsqueeze(1), pose2rot=False) pred_vertices = pred_output.vertices pred_joints = pred_output.joints if J_regressor is not None: J_regressor_batch = J_regressor[None, :].expand( pred_vertices.shape[0], -1, -1).to(pred_vertices.device) pred_joints = torch.matmul(J_regressor_batch, pred_vertices) pred_joints = pred_joints[:, H36M_TO_J14, :] pred_keypoints_2d = projection(pred_joints, pred_cam) pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3, 3)).reshape( -1, 72) output = [{ 'theta': torch.cat([pred_cam, pose, pred_shape], dim=1), 'verts': pred_vertices, 'kp_2d': pred_keypoints_2d, 'kp_3d': pred_joints, 'rotmat': pred_rotmat }] return output
dtype = torch.float32 torch.set_default_dtype(dtype) cfg_name = args.cfg cfg = Config(args.cfg) gpu_index = args.gpu_index device = torch.device('cuda', index=gpu_index) image_size = args.image_size has_smpl_root = cfg.data_specs['has_smpl_root'] model, _, run_batch = get_models(cfg, iter = args.iter) model.to(device) model.eval() smpl = SMPL( SMPL_MODEL_DIR, batch_size=50, create_transl=False, dtype = dtype ).to(device) J_regressor = torch.from_numpy(np.load(osp.join(MEVA_DATA_DIR, 'J_regressor_h36m.npy'))).float() output_base = "/hdd/zen/data/ActmixGenenerator/output/3dpw" output_path = osp.join(output_base, cfg_name) if not osp.isdir(output_path): os.makedirs(output_path) dataset_3dpw = joblib.load("/hdd/zen/data/ActBound/AMASS/3dpw_train_res.pkl") # dataset_3dpw = joblib.load("/hdd/zen/data/ActBound/AMASS/3dpw_val_res.pkl") # dataset_3dpw = joblib.load("/hdd/zen/data/ActBound/AMASS/3dpw_test_res.pkl") image_size = 400 total = cfg.data_specs['t_total']