Exemple #1
0
    def _init_create_networks(self):
        # generator network
        self._G = self._create_generator()
        #self._G.init_weights()
        if len(self._gpu_ids) > 1:
            self._G = torch.nn.DataParallel(self._G, device_ids=self._gpu_ids)
        if torch.cuda.is_available():
            self._G.cuda()

        self._FC = self._create_fcnet()
        self._FC.init_weights()
        if len(self._gpu_ids) > 1:
            self._FC = torch.nn.DataParallel(self._FC,
                                             device_ids=self._gpu_ids)
        if torch.cuda.is_available():
            self._FC.cuda()

        # Initialize MANO layer
        mano_layer_right = ManoLayer(
            mano_root='/home/enric/libraries/manopth/mano/models/',
            side='right',
            use_pca=True,
            ncomps=45,
            flat_hand_mean=True)
        if torch.cuda.is_available():
            mano_layer_right = mano_layer_right.cuda()
        self._MANO = mano_layer_right

        # Discriminator network
        self._D = self._create_discriminator()
        self._D.init_weights()
        if torch.cuda.is_available():
            self._D.cuda()
Exemple #2
0
def main() :
       mano_layer = ManoLayer(mano_root='mano/models', use_pca=True, ncomps=6, flat_hand_mean=False, side='right')
       gpu = True
       num_flows = 2
       flow_length = 10

       pd = np.load('normalized_bh_inMANOorder.npy') ## Need to Download it
       model_path = 'BH2MANO_model/model_BH2MANO.pth'

       bg_path = 'BG_data/'
       bg_files = os.listdir(os.path.join(os.getcwd(), bg_path))

       data_dir = 'results/'

       enc_layer = Encoder_BH2MANO()
       enc_layer.load_state_dict(torch.load(model_path))
       if gpu:
              enc_layer.cuda()
              mano_layer.cuda()

       mano_layer.eval()
       enc_layer.eval()

       colors = get_hand_colors()

       gt = {}
       gt['joints_2d'] = []
       gt['cam_params'] = []
       gt['joints_3d'] = []
       gt['verts_3d'] = []
       for frame_idx in tqdm(range(num_flows)):

              pose_flow = gen_3d_pose_flow(pd.reshape((pd.shape[0], pd.shape[1] * pd.shape[2])), flow_length=flow_length)

              samples = pose_flow
              input_pose_joints_ = torch.tensor(samples).float().cuda()
              input_pose_joints = input_pose_joints_.view(input_pose_joints_.size(0), -1)
              pose_param,shape_param = enc_layer(input_pose_joints)
              pose_param[:, :3] *= 0.
              pose_param[:, 0] += np.pi
              shape_param = torch.rand(1, 10).expand(pose_param.size(0), -1).cuda().float() * 4. - 2.

              hand_verts, hand_joints = mano_layer(pose_param.clone(), shape_param.clone())
              hand_verts = hand_verts.cpu().detach().numpy() / 1000.
              hand_joints = hand_joints.cpu().detach().numpy() / 1000.

              f = mano_layer.th_faces.cpu().detach().numpy()

              color = colors[random.randint(0, 26)]
              size = 224
              w, h = size, size

              # flow_length = pose_flow.shape[0]
              assert flow_length == pose_flow.shape[0]

              ss, tu, tv, rot = get_ss_tu_tv(hand_verts[0], hand_joints[0], w, h)

              ss_end, tu_end, tv_end, rot_end = get_ss_tu_tv(hand_verts[-1], hand_joints[-1], w, h)
              rot_var_speed = random.uniform(0, 0.6) # random rotation speed

              ## Get Background
              while True :
                     bg_orig = imageio.imread(os.path.join(bg_path, random.choice(bg_files)))
                     if (bg_orig.shape[0] > size) and (bg_orig.shape[1] > size) : # get background that is large enough
                            break 
              bg_cent_x, bg_cent_y = get_bg(bg_orig.shape, size)
              bg_cent_end_x, bg_cent_end_y = get_bg(bg_orig.shape, size)
              bg = bg_orig[bg_cent_x - int(size / 2): bg_cent_x + int(size / 2),
                   bg_cent_y - int(size / 2): bg_cent_y + int(size / 2), :]

              ## Collect GTs
              images = []
              masks = []
              joints_2d = np.zeros((flow_length, 42))
              cam_params = np.zeros((flow_length, 27))
              joints_3d  = np.zeros((flow_length, 21, 3))
              verts_3d = np.zeros((flow_length, 778, 3))
              for i in range(flow_length):

                     img, mask, vert_3d, joint_3d, vert, joint = create_synth(hand_verts[i], hand_joints[i], color, f, ss, tu, tv, rot, w, h, bg)
                     images.append(img)
                     masks.append(mask * 255)
                     cam_params[i, :] = np.concatenate([np.array([1., ss, tu, tv]), rot,
                                                        pose_param[i, 3:].detach().cpu().numpy(),
                                                        shape_param[i].detach().cpu().numpy()], 0)
                     joints_2d[i, :] = joint[:,:2].reshape((42))
                     joints_3d[i, :, :] = joint_3d
                     verts_3d[i, :, :] = vert_3d

                     ss = ss + (ss_end - ss) / flow_length * 0.5
                     tu = tu + (tu_end - tu) / flow_length * 0.2
                     tv = tv + (tv_end - tv) / flow_length * 0.2
                     rot = rot + (rot_end - rot) / flow_length * rot_var_speed
                     bg_cent_x = int(bg_cent_x + (bg_cent_end_x - bg_cent_x) / flow_length)
                     bg_cent_y = int(bg_cent_y + (bg_cent_end_y - bg_cent_y) / flow_length)
                     bg = bg_orig[bg_cent_x - int(size / 2):bg_cent_x + int(size / 2),
                          bg_cent_y - int(size / 2):bg_cent_y + int(size / 2), :]

              gt['joints_2d'].append(joints_2d)
              gt['joints_3d'].append(joints_3d)
              gt['verts_3d'].append(verts_3d)
              gt['cam_params'].append(cam_params)
              imageio.mimsave(data_dir + 'gifs/%s.gif' % (frame_idx), images)
              imageio.mimsave(data_dir + 'masks/synth_%s_mask.gif' % (frame_idx), masks)
       with open(data_dir + 'ground_truths.pickle', 'wb') as fo:
              pickle.dump(gt, fo, protocol=pickle.HIGHEST_PROTOCOL)
        mano_root=args.mano_root,
        ncomps=args.mano_ncomps)  # 这里是实例化 只会执行init 函数 不会执行forward
    n_components = 6
    rot = 3

    # Generate random pose coefficients
    pose_params = torch.rand(args.batch_size, n_components + rot)
    pose_params.requires_grad = True
    if args.random_shape:
        shape = torch.rand(args.batch_size, 10)
    else:
        shape = torch.zeros(1)  # Hack to act like None for PyTorch JIT
    if args.cuda:
        pose_params = pose_params.cuda()  # 注意写法的区别
        shape = shape.cuda()
        layer.cuda()

    # Loop for forward/backward quick profiling
    # for idx in tqdm(range(args.iters)):
    #     Forward pass
        # verts, Jtr = layer(pose_params, th_betas=shape)  # 这里执行forward函数

    # Backward pass
    # loss = torch.norm(verts)
    # loss.backward()

    if not args.no_display:
        verts, Jtr = layer(pose_params, th_betas=shape)
        joints = Jtr.cpu().detach()
        verts = verts.cpu().detach()
        # Draw obtained vertices and joints
smallfinger_secondjoint_vertices = [618,
621, 624, 625, 626, 628, 629, 631, 632, 633,
634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646,
647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672,
673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685,
686, 687, 688, 689, 690, 691, 692, 693, 694, 695]
smallfinger_thirdjoint_vertices = [644, 645, 646,
651, 652, 653, 654, 656, 657,
660, 661, 662, 663, 664, 665, 666, 667, 670, 671, 672,
673, 674, 675, 676, 677, 678, 679,
686, 687, 688, 689, 690, 691, 692, 693, 694, 695]

bigfinger_secondjoint_vertices = [697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724,
725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737,
738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750,
751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763,
764, 765, 766, 767, 768]
bigfinger_thirdjoint_vertices = [745, 744, 766, 729, 735, 751, 765, 730, 752, 764, 738, 728, 768,
       727, 767, 743, 747, 720, 748, 717, 750, 734, 761, 737, 724, 762,
       763, 726, 740, 719, 746, 718, 725, 722, 723, 733, 749, 716, 731,
       721, 736, 759, 739, 760, 756]

# Initialize MANO layer
MANO = ManoLayer(
    mano_root='/home/enric/libraries/manopth/mano/models/', side='right', use_pca=True, ncomps=45, flat_hand_mean=True)
#if torch.cuda.device_count() > 1:
    #print("Let's use", torch.cuda.device_count(), "GPUs!")
    #MANO = torch.nn.DataParallel(MANO)
MANO = MANO.cuda()