Esempio n. 1
0
def run_model_single(gender, pose_params, shape_params, trans_params):
    smpl_params = torch.Tensor(
        torch.cat(
            (pose_params, shape_params, trans_params)).type(torch.float32))

    smpl_layer = SMPL_Layer(center_idx=0,
                            gender=gender,
                            model_root=settings.SMPL_MODEL_PATH)
    verts, Jtr = smpl_layer(
        torch.unsqueeze(smpl_params[:pose_size], 0),
        th_betas=torch.unsqueeze(smpl_params[pose_size:pose_size + beta_size],
                                 0),
        th_trans=torch.unsqueeze(smpl_params[pose_size + beta_size:], 0))
    return verts.numpy()[0], smpl_layer.th_faces.numpy()
Esempio n. 2
0
def get_init_joint_2d(intrinsics, extrinsics, gender):
    pose_params = torch.zeros(pose_size)
    shape_params = torch.rand(beta_size)
    trans_params = torch.tensor([0, 1, 0])

    smpl_layer = SMPL_Layer(center_idx=0,
                            gender=gender,
                            model_root=settings.SMPL_MODEL_PATH)
    smpl_params = torch.Tensor(
        torch.cat(
            (pose_params, shape_params, trans_params)).type(torch.float32))
    verts, Jtr = smpl_layer(
        torch.unsqueeze(smpl_params[:pose_size], 0),
        th_betas=torch.unsqueeze(smpl_params[pose_size:pose_size + beta_size],
                                 0),
        th_trans=torch.unsqueeze(smpl_params[pose_size + beta_size:], 0))
    cam_param = common_utils.get_camera_info(intrinsics, extrinsics)
    pred_joint2d = common_utils.projection_torch(vertices=Jtr[0],
                                                 cam_param=cam_param,
                                                 width=1920,
                                                 height=1080)
    return pred_joint2d.numpy()
Esempio n. 3
0
import torch

from smplpytorch.pytorch.smpl_layer import SMPL_Layer
from display_utils import display_model
from pose_data import pose1, pose2, pose3, pose4

if __name__ == '__main__':
    cuda = False
    batch_size = 1

    # Create the SMPL layer
    smpl_layer = SMPL_Layer(center_idx=0,
                            gender='female',
                            model_root='smplpytorch/native/models')
    userInp = input(
        "Please enter the desired pose (Hint: interger number from [1-4]  :  ")
    pose_params = None
    if userInp == '1':
        pose_params = pose1
    elif userInp == '2':
        pose_params = pose2
    elif userInp == '3':
        pose_params = pose3
    elif userInp == '4':
        pose_params = pose4
    else:
        print("Please enter values between 1 to 4")

# Random preset shape
    shape_params = torch.tensor(
        [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]) * 20
Esempio n. 4
0
    return args

# argument parsing
args = parse_args()
cfg.set_args(args.gpu_ids, args.stage)
cudnn.benchmark = True

# SMPL joint set
joint_num = 29 # original: 24. manually add nose, L/R eye, L/R ear
joints_name = ('Pelvis', 'L_Hip', 'R_Hip', 'Torso', 'L_Knee', 'R_Knee', 'Spine', 'L_Ankle', 'R_Ankle', 'Chest', 'L_Toe', 'R_Toe', 'Neck', 'L_Thorax', 'R_Thorax', 'Head', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hand', 'R_Hand', 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear')
flip_pairs = ( (1,2), (4,5), (7,8), (10,11), (13,14), (16,17), (18,19), (20,21), (22,23) , (25,26), (27,28) )
skeleton = ( (0,1), (1,4), (4,7), (7,10), (0,2), (2,5), (5,8), (8,11), (0,3), (3,6), (6,9), (9,14), (14,17), (17,19), (19, 21), (21,23), (9,13), (13,16), (16,18), (18,20), (20,22), (9,12), (12,24), (24,15), (24,25), (24,26), (25,27), (26,28) )

# SMPl mesh
vertex_num = 6890
smpl_layer = SMPL_Layer(gender='neutral', model_root=cfg.smpl_path + '/smplpytorch/native/models')
face = smpl_layer.th_faces.numpy()
joint_regressor = smpl_layer.th_J_regressor.numpy()
root_joint_idx = 0

# snapshot load
model_path = './snapshot_%d.pth.tar' % int(args.test_epoch)
assert osp.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
model = get_model(vertex_num, joint_num, 'test')

model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'], strict=False)
model.eval()
Esempio n. 5
0
 def get_layer(self, gender='neutral'):
     return SMPL_Layer(gender=gender, model_root=cfg.smpl_path + '/smplpytorch/native/models')
Esempio n. 6
0
import torch

from smplpytorch.pytorch.smpl_layer import SMPL_Layer
from display_utils import display_model

if __name__ == '__main__':
    cuda = False
    batch_size = 1

    # Create the SMPL layer
    smpl_layer = SMPL_Layer(
        center_idx=0,
        gender='male',
        model_root=
        '/BS/bharat/work/installation/smplpytorch/smplpytorch/native/models')

    # Generate random pose and shape parameters
    pose_params = torch.rand(batch_size, 72) * 0.2
    shape_params = torch.rand(batch_size, 10) * 0.03
    offsets = torch.rand(batch_size, 6890, 3) * 0.05

    # GPU mode
    if cuda:
        pose_params = pose_params.cuda()
        shape_params = shape_params.cuda()
        smpl_layer.cuda()
        offsets.cuda()

    # Forward from the SMPL layer
    verts, Jtr = smpl_layer(pose_params,
                            th_betas=shape_params,
import torch
import cv2
import numpy as np
from smplpytorch.pytorch.smpl_layer import SMPL_Layer
from Test.smpl_pytorch.display_utils import display_model
import matplotlib.pyplot as plt
from pytorch3d.io import save_obj


if __name__ == '__main__':
    cuda = False
    batch_size = 1

    # Create the SMPL layer
    smpl_layer = SMPL_Layer(
        center_idx=0,
        gender='male',
        model_root='/Users/anupamtripathi/PycharmProjects/3d_body_mesurement/Test/smpl_pytorch/smlppytorch/native/models')

    # Generate random pose and shape parameters
    pose_params = torch.ones(batch_size, 72) * 0
    shape_params = torch.ones(batch_size, 10)
    pose_params = torch.tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -1, 0., 0., 1, 0., 0., 0.,
         0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]).float()


    # GPU mode
    if cuda:
        pose_params = pose_params.cuda()
        shape_params = shape_params.cuda()
        smpl_layer.cuda()
Esempio n. 8
0
 def get_layer(self, gender):
     return SMPL_Layer(gender=gender,
                       model_root=cfg.smpl_dir +
                       '/smplpytorch/native/models')
Esempio n. 9
0
def run_model(gt_dataset, gender, epochs=10):
    # parameter setting
    pose_params = torch.zeros(pose_size)
    shape_params = torch.rand(beta_size)
    trans_params = torch.tensor([0, 1, 0])
    # trans_params = torch.zeros(3)
    # trans_params = torch.zeros(3)
    smpl_params = Variable(torch.cat(
        (pose_params, shape_params, trans_params)).type(torch.float32),
                           requires_grad=True)

    smpl_layer = SMPL_Layer(center_idx=0,
                            gender=gender,
                            model_root=settings.SMPL_MODEL_PATH)
    # verts, Jtr = smpl_layer(pose_params, th_betas=shape_params, th_trans=trans_params)
    # mesh = trimesh.Trimesh(vertice, faces)
    # display_utils.save_obj(mesh.vertices, mesh.faces, 'test.obj')
    datasets = []
    for gt_joint2d, intrinsics, extrinsics in gt_dataset:
        datasets.append({
            'joint2d': torch.Tensor(gt_joint2d),
            'intrinsics': torch.Tensor(intrinsics),
            'extrinsics': torch.Tensor(extrinsics)
        })

    # hyper parameter
    learning_rate = 1e-1
    optimizer = optim.Adam([smpl_params], lr=learning_rate)

    for epoch in tqdm(range(epochs)):
        for dataset in datasets:
            verts, Jtr = smpl_layer(
                torch.unsqueeze(smpl_params[:pose_size], 0),
                th_betas=torch.unsqueeze(
                    smpl_params[pose_size:pose_size + beta_size], 0),
                th_trans=torch.unsqueeze(smpl_params[pose_size + beta_size:],
                                         0))
            gt_joint2d = dataset['joint2d']
            intrinsics = dataset['intrinsics']
            extrinsics = dataset['extrinsics']
            cam_param = common_utils.get_camera_info(intrinsics, extrinsics)
            pred_joint2d = common_utils.projection_torch(vertices=Jtr[0],
                                                         cam_param=cam_param,
                                                         width=1920,
                                                         height=1080)
            loss = (pred_joint2d - gt_joint2d).pow(2).sum()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    pose_t = torch.unsqueeze(smpl_params[:pose_size],
                             0).reshape(-1, 3).detach().numpy()
    rotation = []
    for pose in pose_t:
        r = R.from_rotvec(pose)
        rotation.append(r.as_euler('xyz', degrees=True).tolist())
    # th_pose_rotmat = th_posemap_axisang(pose_t).reshape(-1, 9)
    # pose_t = th_pose_rotmat.detach().numpy()
    result = {
        'rotation':
        rotation,
        'shape_params':
        smpl_params.detach().numpy()[pose_size:pose_size + beta_size].tolist(),
        'trans_params':
        smpl_params.detach().numpy()[pose_size + beta_size:].tolist(),
        'joint_3d':
        Jtr[0].detach().numpy().squeeze().tolist()
    }
    return verts, Jtr, result
Esempio n. 10
0
import torch

from smplpytorch.pytorch.smpl_layer import SMPL_Layer
from display_utils import display_model

if __name__ == '__main__':
    cuda = False
    batch_size = 1

    # Create the SMPL layer
    smpl_layer = SMPL_Layer(center_idx=0,
                            gender='neutral',
                            model_root='models')

    # Generate random pose and shape parameters
    pose_params = torch.rand(batch_size, 72) * 0.2
    shape_params = torch.rand(batch_size, 10) * 0.03

    # GPU mode
    if cuda:
        pose_params = pose_params.cuda()
        shape_params = shape_params.cuda()
        smpl_layer.cuda()

    # Forward from the SMPL layer
    verts, Jtr = smpl_layer(pose_params, th_betas=shape_params)

    # Draw output vertices and joints
    display_model({
        'verts': verts.cpu().detach(),
        'joints': Jtr.cpu().detach()