class Pose2MeshModel(nn.Module): def __init__(self): super(Pose2MeshModel, self).__init__() if torch.cuda.is_available(): self.reg = AcosRegressor(hidden_dim=256).cuda() self.smpl = SMPLModel(device=torch.device('cuda'), model_path = './model_24_joints.pkl', simplify=True ) else: self.reg = AcosRegressorRegressor(hidden_dim=256).cpu() self.smpl = SMPLModel(device=torch.device('cpu'), model_path = './model_24_joints.pkl', simplify=True ) ckpt_path = './checkpoints_0303_24_joints' state_dict = torch.load('%s/regressor_040.pth' % (ckpt_path)) self.reg.load_state_dict(state_dict) def forward(self, input): trans = torch.zeros((input.shape[0], 3), device=input.device) betas = torch.zeros((input.shape[0], 10), device=input.device) thetas = self.reg(input) print('Estimated theta:\n', thetas.detach().cpu().numpy()) mesh, joints = self.smpl(betas, thetas, trans) return mesh, joints def evaluate(self, input, save_dir): mesh, joints = self.forward(input) self.smpl.write_obj(mesh[0].detach().cpu().numpy(), save_dir) np.savetxt('recon_pose.xyz', joints[0].detach().cpu().numpy().reshape(24,3), delimiter=' ')
def __init__(self): super(Pose2MeshModel, self).__init__() if torch.cuda.is_available(): self.reg = AcosRegressor(hidden_dim=256).cuda() self.smpl = SMPLModel(device=torch.device('cuda'), model_path = './model_24_joints.pkl', simplify=True ) else: self.reg = AcosRegressorRegressor(hidden_dim=256).cpu() self.smpl = SMPLModel(device=torch.device('cpu'), model_path = './model_24_joints.pkl', simplify=True ) ckpt_path = './checkpoints_0303_24_joints' state_dict = torch.load('%s/regressor_040.pth' % (ckpt_path)) self.reg.load_state_dict(state_dict)
def create_dataset(num_samples, dataset_name, batch_size=32, theta_var=1.0, gpu_id=[0]): if len(gpu_id) > 0 and torch.cuda.is_available(): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id[0]) device = torch.device('cuda') else: device = torch.device('cpu') #print(device) pose_size = 72 beta_size = 10 np.random.seed(9608) model = SMPLModel(device=device, model_path='model_24_joints.pkl', simplify=True) d_poses = torch.from_numpy((np.random.rand(num_samples, pose_size) - 0.5) * theta_var)\ .type(torch.float64).to(device) #d_betas = torch.from_numpy((np.random.rand(num_samples, beta_size) - 0.5) * 0.2) \ # .type(torch.float64).to(device) d_betas = torch.from_numpy(np.zeros((num_samples, beta_size)))\ .type(torch.float64).to(device) __trans = torch.from_numpy(np.zeros( (batch_size, 3))).type(torch.float64).to(device) joints = [] for i in tqdm(range(num_samples // batch_size)): __poses = d_poses[i * batch_size:(i + 1) * batch_size] __betas = d_betas[i * batch_size:(i + 1) * batch_size] with torch.no_grad(): __result, __joints = model(__betas, __poses, __trans) joints.append(__joints) #outmesh_path = './samples/smpl_torch_{}.obj' #for i in range(result.shape[0]): #model.write_obj(result[i], outmesh_path.format(i)) d_joints = torch.cat(joints, dim=0) dataset = { 'joints': d_joints.detach().cpu().numpy(), 'thetas': d_poses.detach().cpu().numpy(), #'betas': d_betas.detach().cpu().numpy() } with open(dataset_name, 'wb') as f: pickle.dump(dataset, f)
def run_test(): if platform == 'linux': os.environ['CUDA_VISIBLE_DEVICES'] = '2' data_type = torch.float32 device = torch.device('cuda') pose_size = 72 beta_size = 10 np.random.seed(9608) model = SMPLModel( device=device, model_path='./model_lsp.pkl', data_type=data_type, ) dataset = Human36MDataset(model, max_item=100, calc_mesh=True) # generate mesh, align with 14 point ground truth case_num = 10 data = dataset[:case_num] meshes = data['meshes'] input = data['lsp_joints'] target_2d = data['gt2d'] target_3d = data['gt3d'] transforms = map_3d_to_2d(input, target_2d, target_3d) # Important: mesh should be centered at the origin! deformed_meshes = transforms(meshes) mesh_3d = deformed_meshes.detach().cpu().numpy() # visualize(data['imagename'], mesh_3d[:,:,:2].astype(np.int), # target_2d.detach().cpu().numpy().astype(np.int)) for i, mesh in enumerate(mesh_3d): model.write_obj(mesh, '_test_cache/real_mesh_{}.obj'.format(i)) # weird. UV_position_map, UV_scatter, rgbs_backup = get_UV(mesh, 300) # write colorized coordinates to ply write_ply('_test_cache/colored_mesh_{}.ply'.format(i), mesh, rgbs_backup) out = np.concatenate((UV_position_map, UV_scatter), axis=1) imsave('_test_cache/UV_position_map_{}.png'.format(i), out) resampled_mesh = resample(UV_position_map) model.write_obj(resampled_mesh, '_test_cache/recon_mesh_{}.obj'.format(i))
def create_UV_maps(UV_label_root=None, uv_prefix='smpl_fbx_template'): if platform == 'linux': os.environ['CUDA_VISIBLE_DEVICES'] = '0' data_type = torch.float32 device=torch.device('cuda') pose_size = 72 beta_size = 10 np.random.seed(9608) model = SMPLModel( device=device, model_path='./model_lsp.pkl', data_type=data_type, ) dataset = Human36MWashedDataset(model, calc_mesh=True, root_dir='/home/wzeng/mydata/h3.6m/images_washed') generator = UV_Map_Generator( UV_height=256, UV_pickle=uv_prefix+'.pickle' ) # create root folder for UV labels if UV_label_root is None: UV_label_root=dataset.root_dir.replace('_washed', '_UV_map_{}'.format(uv_prefix[:-9])) if not os.path.isdir(UV_label_root): os.makedirs(UV_label_root) subs = [sub for sub in os.listdir(dataset.root_dir) if os.path.isdir(dataset.root_dir + '/' + sub)] for sub in subs: os.makedirs(UV_label_root + '/' + sub) else: print('{} folder exists, process terminated...'.format(UV_label_root)) return # generate mesh, align with 14 point ground truth batch_size = 64 total_batch_num = dataset.length // batch_size + 1 _loop = tqdm(range(total_batch_num), ncols=80) for batch_id in _loop: data = dataset[batch_id * batch_size: (batch_id + 1) * batch_size] meshes = data['meshes'] input = data['lsp_joints'] target_2d = data['gt2d'] target_3d = data['gt3d'] imagename = [UV_label_root + str for str in data['imagename']] transforms = map_3d_to_2d(input, target_2d, target_3d) # Important: mesh should be centered at the origin! deformed_meshes = transforms(meshes) mesh_3d = deformed_meshes.detach().cpu().numpy() ''' test_folder = '_test_radvani' if not os.path.isdir(test_folder): os.makedirs(test_folder) visualize(test_folder, data['imagename'], mesh_3d[:,:,:2].astype(np.int), target_2d.detach().cpu().numpy().astype(np.int), dataset.root_dir) ''' s=time() for name, mesh in zip(imagename, mesh_3d): UV_position_map, verts_backup = \ generator.get_UV_map(mesh) imwrite(name, (UV_position_map * 255).astype(np.uint8)) # write colorized coordinates to ply '''
if __name__ == '__main__': torch.backends.cudnn.enabled = True batch_size = 16 max_batch_num = 40 dataset = Joint2SMPLDataset('train_dataset.pickle', batch_size) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True) torch.set_default_dtype(torch.float64) device = torch.device('cuda') reg = Regressor(batch_size=batch_size, hidden_layer=3, hidden_dim=512).cuda() smpl = SMPLModel(device=device) loss_op = nn.L1Loss() optimizer = optim.Adagrad(reg.parameters(), lr=0.0002, weight_decay=1e-5) #optimizer = optim.Adam(reg.parameters(), lr=0.0002, betas=(0.5,0.999), weight_decay=1e-5) batch_num = 0 ckpt_path = 'checkpoints_0217' if not os.path.isdir(ckpt_path): os.mkdir(ckpt_path) if batch_num > 0 and os.path.isfile('%s/regressor_%03d.pth' % (ckpt_path, batch_num)): state_dict = torch.load('%s/regressor_%03d.pth' % (ckpt_path, batch_num)) reg.load_state_dict(state_dict) # copy current file into checkpoint folder to record parameters, ugly.
'punching', 'shake_shoulders', 'shake_hips', 'jumping_jacks', 'one_leg_jump', 'running_on_spot' ] femaleids = sids[:5] maleids = sids[5:] # sidpid = sid + '_' + pid regis_dir = './dyna_registrations/dyna_female.h5' data_dir = './DFaust_67' f = h5py.File(regis_dir, 'r') comp_device = torch.device('cpu') smplmodel = SMPLModel(device=comp_device) # dmplmodel = DMPLModel(device=comp_device) # dbsmodel = DBSModel(device=comp_device) dataset = [] for femaleid in femaleids: # print('\n{} now is processing:'.format(maleid)) data_fnames = glob.glob(os.path.join(data_dir, femaleid, '*_poses.npz')) for data_fname in tqdm(data_fnames): sidpid = data_fname[18:-10] verts = f[sidpid].value.transpose([2, 0, 1]) verts = torch.Tensor(verts).type(torch.float64) bdata = np.load(data_fname) betas = torch.Tensor(bdata['betas'][:10][np.newaxis]).squeeze().type( torch.float64)
import os import torch import pickle import numpy as np from smpl_torch_batch import SMPLModel from tqdm import tqdm is_cuda = torch.cuda.is_available() device = torch.device("cuda" if is_cuda else "cpu") male = ['50002', '50007', '50009', '50026', '50027'] female = ['50004', '50020', '50022', '50025'] hid = male + female model_m = SMPLModel( model_path='data/human_dataset/smpl_models/model_300_m.pkl', device=device) model_f = SMPLModel( model_path='data/human_dataset/smpl_models/model_300_f.pkl', device=device) with open('data/human_dataset/test/D-FAUST/train.lst', 'r') as f: all_seq = f.read().split('\n') all_seq = list(filter(lambda x: len(x) > 0, all_seq)) all_id = pickle.load(open('data/human_dataset/all_betas.pkl', 'rb')) out_path = 'data/human_dataset/all_train_mesh' for identity in hid: print('human id: ', identity) model = model_m if identity in male else model_f for seq in tqdm(all_seq): code_path = os.path.join('data/human_dataset/smpl_params', seq) seq_len = len(os.listdir(code_path))
@ Given a randomly generated human mesh, try to find the thetas by matching joints only. ''' if __name__ == '__main__': if torch.cuda.is_available(): os.environ['CUDA_VISIBLE_DEVICES'] = '0' device = torch.device('cuda') else: device = torch.device('cpu') #print(device) pose_size = 72 beta_size = 10 np.random.seed(9608) model = SMPLModel(device=device, model_path = './model_24_joints.pkl', simplify=True) if not os.path.isdir('joint2pose_result'): os.makedirs('joint2pose_result') loss_op = nn.L1Loss() betas = torch.zeros((1, beta_size), dtype=torch.float64, device=device) trans = torch.zeros((1, 3), dtype=torch.float64, device=device) for i in range(10): print('Test case %d:' % (i+1)) real_pose = torch.from_numpy((np.random.rand(1, pose_size) - 0.5) * 1)\ .type(torch.float64).to(device) real_result, real_joints = model(betas, real_pose, trans) model.write_obj(real_result[0].detach().cpu().numpy(), 'joint2pose_result/real_mesh_{}.obj'.format(i))
def run_test(): if platform == 'linux': os.environ['CUDA_VISIBLE_DEVICES'] = '2' data_type = torch.float32 device = torch.device('cuda') pose_size = 72 beta_size = 10 np.random.seed(9608) model = SMPLModel( device=device, model_path='./model_lsp.pkl', data_type=data_type, ) dataset = Human36MDataset(model, max_item=100, calc_mesh=True) # generate mesh, align with 14 point ground truth case_num = 10 data = dataset[:case_num] meshes = data['meshes'] input = data['lsp_joints'] target_2d = data['gt2d'] target_3d = data['gt3d'] transforms = map_3d_to_2d(input, target_2d, target_3d) # Important: mesh should be centered at the origin! deformed_meshes = transforms(meshes) mesh_3d = deformed_meshes.detach().cpu().numpy() file_prefix = 'smpl_fbx_template' generator = UV_Map_Generator(UV_height=256, UV_pickle=file_prefix + '.pickle') test_folder = '_test_smpl_fbx' if not os.path.isdir(test_folder): os.makedirs(test_folder) visualize(test_folder, data['imagename'], mesh_3d[:, :, :2].astype(np.int), target_2d.detach().cpu().numpy().astype(np.int)) s = time() for i, mesh in enumerate(mesh_3d): model.write_obj(mesh, '{}/real_mesh_{}.obj'.format(test_folder, i)) # weird. UV_position_map, verts_backup = \ generator.get_UV_map(mesh) # write colorized coordinates to ply UV_scatter, _, _ = generator.render_point_cloud(verts=mesh) generator.write_ply('{}/colored_mesh_{}.ply'.format(test_folder, i), mesh) out = np.concatenate((UV_position_map, UV_scatter), axis=1) imsave('{}/UV_position_map_{}.png'.format(test_folder, i), out) resampled_mesh = generator.resample(UV_position_map) model.write_obj(resampled_mesh, '{}/recon_mesh_{}.obj'.format(test_folder, i)) print('{} cases for {}s'.format(case_num, time() - s))
def main(args): base_dir = args.base_dir dataset = args.dataset dataset_dir = os.path.join(base_dir, dataset) annot = load_json(os.path.join(dataset_dir, 'annots.json')) model = SMPLModel(model_path='./data/model.pkl') samples = random.sample(range(1, len(annot)), 100) for item in samples: item = '%05d' % item param = annot[item] # load SMPL parameters pose = np.array(param['pose']) shape = np.array(param['betas']) scale = np.array(param['scale']) trans = np.array(param['trans']) # load camera parameters intri = np.array(param['intri']) extri = np.array(param['extri']) # load image img_path = os.path.join(dataset_dir, param['img_path']) img = cv2.imread(img_path) shape = torch.from_numpy(shape).type(torch.FloatTensor).resize(1, 10) pose = torch.from_numpy(pose).type(torch.FloatTensor).resize(1, 72) trans = torch.from_numpy(trans).type(torch.FloatTensor).resize(1, 3) scale = torch.from_numpy(scale).type(torch.FloatTensor) mesh, joints = model(shape, pose, trans, scale) mesh = mesh.numpy().reshape(6890, 3) joints = joints.numpy().reshape(24, 3) # We also provide 2D keypoints in the format used by LSP. # kp_2d = np.array(param['lsp_joints_2d']) # draw mask if dataset == 'trainset': img = draw_mask(dataset_dir, param, img) # draw keypoints im = draw_keypoints(joints, extri, intri, img) # draw bbox im = draw_bbox(param, im) # visualize ratiox = 800 / int(im.shape[0]) ratioy = 800 / int(im.shape[1]) if ratiox < ratioy: ratio = ratiox else: ratio = ratioy cv2.namedWindow("sample", 0) cv2.resizeWindow("sample", int(im.shape[1] * ratio), int(im.shape[0] * ratio)) cv2.moveWindow("sample", 0, 0) cv2.imshow('sample', im / 255.) print(img_path) cv2.waitKey()
os.environ['CUDA_VISIBLE_DEVICES']='0' torch.backends.cudnn.enabled=True batch_size = 64 max_batch_num = 40 #dataset = Joint2SMPLDataset('train_dataset.pickle', batch_size) theta_var = 1.0 training_stage = 5 dataset = Joint2SMPLDataset('train_dataset_24_joints_1.0.pickle', batch_size, fix_beta_zero=True) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True) torch.set_default_dtype(torch.float64) device = torch.device('cuda') reg = AcosRegressor(batch_size=batch_size).cuda() smpl = SMPLModel(device=device, model_path = './model_24_joints.pkl', simplify=True ) loss_op = nn.L1Loss() optimizer = optim.Adam(reg.parameters(), lr=0.0005, betas=(0.5, 0.999), weight_decay=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.25, patience=1, verbose=True) batch_num = 0 ckpt_path = 'checkpoints_0303_24_joints'.format(theta_var) if not os.path.isdir(ckpt_path): os.mkdir(ckpt_path) if batch_num > 0 and os.path.isfile('%s/regressor_%03d.pth' % (ckpt_path, batch_num)): state_dict = torch.load_state_dict('%s/regressor_%03d.pth' % (ckpt_path, batch_num)) reg.load(state_dict) # copy current file into checkpoint folder to record parameters, ugly.
batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True) dataset_smpl_test = SURREAL(train=False) dataloader_smpl_test = torch.utils.data.DataLoader(dataset_smpl_test, batch_size=opt.batchSize, shuffle=False, num_workers=int( opt.workers)) len_dataset = len(dataset) # ========================================================== # # ===================CREATE network================================= # smpl = SMPLModel(device=torch.device('cuda'), model_path='./data/basicModel_m_lbs_10_207_0_v1.1.0.pkl') # takes cuda torch variable repeated batch time vertices = smpl.v_template #network.mesh.vertices #smpl.v_template#network.mesh.vertices vertices = vertices.cpu().detach().numpy() network = AE_AtlasNet_Humans(inputmesh=vertices) faces = network.mesh.faces #smpl.faces faces = [faces for i in range(opt.batchSize)] faces = np.array(faces) #faces.dtype="float32" faces = torch.from_numpy(faces).cuda() print("face", faces) print("vertices", vertices) #vertices = np.array(vertices) vertices = [vertices for i in range(opt.batchSize)]
triface_pointer += 2 elif len(ls) == 3: # a triangle tri_face_lines.append(line+'\n') triface_pointer += 1 else: print('wtf?') print(len(tri_face_lines), triface_pointer) assert(len(tri_face_lines) == trifaces.shape[0]) with open(out_obj, 'w') as f: f.writelines(other_lines + tri_face_lines) if __name__ == '__main__': device=torch.device('cuda') data_type=torch.float32 pose_size = 72 beta_size = 10 model = SMPLModel( device=device, model_path = './model_lsp.pkl', data_type=data_type, simplify=True ) quad_obj = 'untitled.obj' out_obj = 'smpl_fbx_template.obj' fbx_obj_verts = import_verts(quad_obj) #model.write_obj(fbx_obj_verts, 'hybrid_fbx_verts_SMPL_face.obj') triangulation(quad_obj, out_obj, model.faces)