Example #1
0
def compute_gpu_smpl(poses, shapes, get_joints=False):
    smpl = SMPL(SMPL_MODEL_PATH, joint_type='cocoplus')
    verts_tensor, Js_tensor, _ = smpl(tf.constant(shapes, dtype=tf.float32),
                                      tf.constant(poses, dtype=tf.float32),
                                      get_skin=True)
    init = tf.group(tf.global_variables_initializer(),
                    tf.local_variables_initializer())

    # Start running ops on the graph. Allow_soft_placement must be set to
    # True to build towers on GPU, as some of the ops don't have GPU
    # implementations.
    sess_config = tf.ConfigProto(
            allow_growth=True,
            allow_soft_placement=True,
            log_device_placement=False)
    sess = tf.Session(config=sess_config)
    sess.run(init)
    if get_joints:
        verts, joints = sess.run([verts_tensor, Js_tensor])
        sess.close()
        return verts, joints
    else:
        verts = sess.run(verts_tensor)
        sess.close()
        return verts
def buid_graph_body(trans_0, pose_0, model_path, joints_0):

    tf.reset_default_graph()
    g_2 = tf.Graph()

    with g_2.as_default():
        betas = tf.placeholder(dtype=tf.float32, shape=(trans_0.shape[0], 10))
        #pose = tf.Variable(pose_0,dtype = tf.float32)
        #pose_new = tf.concat([pose, tf.zeros((1,69))], axis=1)
        pose_new = tf.Variable(pose_0, dtype=tf.float32)
        trans_new = tf.Variable(trans_0, dtype=tf.float32)

        pose = tf.constant(pose_0, dtype=tf.float32)
        trans = tf.constant(trans_0, dtype=tf.float32)
        joints = tf.constant(joints_0, dtype=tf.float32)

        model = SMPL(model_path, joint_type=config.joint_type)
        Js = model(betas, pose_new, get_skin=False)

        joints_new = model.J_transformed + tf.stack([
            tf.stack([trans_new[i, :] for j in range(24)])
            for i in range(trans_0.shape[0])
        ])

        loss_restriction_3d = tf.square(joints - joints_new)

        loss = tf.reduce_sum(loss_restriction_3d)

        error = tf.train.AdamOptimizer(1e-2).minimize(loss)

        return g_2, betas, pose_new, trans_new, error, loss
Example #3
0
    def __init__(self, config, pretrained_resnet_path=''):
        self.config = config
        self.load_path = config.load_path

        # Config + path.
        if not config.load_path:
            raise Exception(
                '[!] You need to specify `load_path` to load a pretrained model'
            )
        if not os.path.exists(config.load_path + '.index'):
            print('{} doesnt exist..'.format(config.load_path))
            import ipdb
            ipdb.set_trace()

        # Model parameters.
        self.batch_size = config.batch_size
        self.sequence_length = config.sequence_length
        self.pred_mode = config.pred_mode
        self.num_conv_layers = config.num_conv_layers
        self.fov = self.num_conv_layers * 4 + 1
        self.use_optcam = config.use_optcam
        self.delta_t_values = [int(dt) for dt in config.delta_t_values]

        # Data parameters.
        self.img_size = 224

        # Other parameters.
        self.num_output = 85
        self.smpl_model_path = config.smpl_model_path
        self.smpl = SMPL(self.smpl_model_path)
        self.smpl_model_path = config.smpl_model_path

        self.encoder_vars = []

        # Prepare model.
        input_size = (self.batch_size, self.sequence_length, self.img_size,
                      self.img_size, 3)
        self.images_pl = tf.placeholder(tf.float32, shape=input_size)
        self.f_hal = get_hallucinator_model()
        self.f_image_enc = get_image_encoder()
        self.f_temporal_enc = get_temporal_encoder()

        self.omegas_pred = {0: self.make_omega_pred(use_optcam=False)}
        for dt in self.delta_t_values:
            self.omegas_pred[dt] = self.make_omega_pred(
                use_optcam=self.use_optcam)

        # Starting point for IEF.
        mean_cams, mean_shape, mean_pose = self.load_mean_params()
        self.theta_mean = tf.concat(
            (mean_cams, tf.reshape(mean_pose, (-1, 72)), mean_shape), axis=1)
        self.build_test_model()

        # Smaller fraction will take up less GPU space, but might be slower.
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        self.prepare(resnet_path=pretrained_resnet_path)
def smpl_regression(model_path):

    tf.reset_default_graph()
    g_1 = tf.Graph()

    with g_1.as_default():
        Betas = tf.placeholder(dtype=tf.float32, shape=[1, 10])
        Thetas = tf.placeholder(dtype=tf.float32, shape=[1, 72])
        Trans = tf.placeholder(dtype=tf.float32, shape=[1, 3])
        model = SMPL(model_path, joint_type=config.joint_type)
        Js, A_global_tf = model(Betas,
                                Thetas,
                                get_skin=False,
                                get_A_global=True)
        Joints = model.J_transformed + Trans
        return g_1, Betas, Thetas, Trans, Joints, A_global_tf
Example #5
0
    def __init__(self, config, data_loader, mocap_loader):
        """
        Args:
          config
          if no 3D label is available,
              data_loader is a dict
          else
              data_loader is a dict
        mocap_loader is a tuple (pose, shape)
        mocap_loader is for discriminator
        """
        # Config + path
        self.config = config
        self.model_dir = config.model_dir
        self.load_path = config.load_path

        self.data_format = config.data_format
        self.smpl_model_path = config.smpl_model_path
        self.pretrained_model_path = config.pretrained_model_path
        self.encoder_only = config.encoder_only
        self.use_3d_label = config.use_3d_label

        # Data size
        self.img_size = config.img_size
        self.num_stage = config.num_stage
        self.batch_size = config.batch_size
        self.max_epoch = config.epoch

        self.num_cam = 3
        self.proj_fn = batch_orth_proj_idrot

        self.num_theta = 72  # 24 * 3
        self.total_params = self.num_theta + self.num_cam + 10

        # Data
        num_images = num_examples(config.datasets)
        num_mocap = num_examples(config.mocap_datasets)

        self.num_itr_per_epoch = num_images / self.batch_size
        self.num_mocap_itr_per_epoch = num_mocap / self.batch_size

        # First make sure data_format is right
        if self.data_format == 'NCHW':
            # B x H x W x 3 --> B x 3 x H x W
            data_loader['image'] = tf.transpose(data_loader['image'],
                                                [0, 3, 1, 2])

        self.image_loader = data_loader['image']
        self.kp_loader = data_loader['label']

        if self.use_3d_label:
            self.poseshape_loader = data_loader['label3d']
            # image_loader[3] is N x 2, first column is 3D_joints gt existence,
            # second column is 3D_smpl gt existence
            self.has_gt3d_joints = data_loader['has3d'][:, 0]
            self.has_gt3d_smpl = data_loader['has3d'][:, 1]

        self.pose_loader = mocap_loader[0]
        self.shape_loader = mocap_loader[1]

        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.log_img_step = config.log_img_step

        # For visualization:
        num2show = np.minimum(6, self.batch_size)
        # Take half from front & back
        self.show_these = tf.constant(
            np.hstack(
                [np.arange(num2show / 2), self.batch_size - np.arange(3) - 1]),
            tf.int32)

        # Model spec
        self.model_type = config.model_type
        self.keypoint_loss = keypoint_l1_loss

        # Optimizer, learning rate
        self.e_lr = config.e_lr
        self.d_lr = config.d_lr
        # Weight decay
        self.e_wd = config.e_wd
        self.d_wd = config.d_wd
        self.e_loss_weight = config.e_loss_weight
        self.d_loss_weight = config.d_loss_weight
        self.e_3d_weight = config.e_3d_weight

        self.optimizer = tf.train.AdamOptimizer

        # Instantiate SMPL
        self.smpl = SMPL(self.smpl_model_path)
        self.E_var = []
        self.build_model()

        # Logging
        init_fn = None
        if self.use_pretrained():
            # Make custom init_fn
            print("Fine-tuning from %s" % self.pretrained_model_path)
            if 'resnet_v2_50' in self.pretrained_model_path:
                resnet_vars = [
                    var for var in self.E_var if 'resnet_v2_50' in var.name
                ]
                self.pre_train_saver = tf.train.Saver(resnet_vars)
            elif 'pose-tensorflow' in self.pretrained_model_path:
                resnet_vars = [
                    var for var in self.E_var if 'resnet_v1_101' in var.name
                ]
                self.pre_train_saver = tf.train.Saver(resnet_vars)
            else:
                self.pre_train_saver = tf.train.Saver()

            def load_pretrain(sess):
                self.pre_train_saver.restore(sess, self.pretrained_model_path)

            init_fn = load_pretrain

        self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=5)
        self.summary_writer = tf.summary.FileWriter(self.model_dir)
        self.sv = tf.train.Supervisor(logdir=self.model_dir,
                                      global_step=self.global_step,
                                      saver=self.saver,
                                      summary_writer=self.summary_writer,
                                      init_fn=init_fn)
        gpu_options = tf.GPUOptions(allow_growth=True)
        self.sess_config = tf.ConfigProto(allow_soft_placement=False,
                                          log_device_placement=False,
                                          gpu_options=gpu_options)
            joints.append(joint)

    return joints


if __name__ == '__main__':
    config(sys.argv)

    # Setup models
    smpl_male = load_model('{}/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'.format(
        config.smpl_model_dir))
    smpl_female = load_model('{}/basicModel_f_lbs_10_207_0_v1.0.0.pkl'.format(
        config.smpl_model_dir))

    smpl_neutral_py = load_model(config.neutral_model_path)
    smpl_neutral = SMPL(config.neutral_model_path)

    with open(config.neutral_model_path, 'rb') as f:
        dd = pickle.load(f, encoding='latin1')
        regressor = dd['cocoplus_regressor']

    # Get original labels.
    all_labels = sorted(glob('{}/sequenceFiles/*.pkl'.format(config.base_dir)))

    sess = tf.Session()

    if not exists(config.out_dir):
        makedirs(config.out_dir)

    for labels_pkl in all_labels:
        save_neutral_shape(labels_pkl, config.out_dir, sess)
Example #7
0
#surreal_util.draw_joints2D(
#                rgb,
#        #        # 24 joints
#                surreal_util.project_vertices(smpl_joints3D, intrinsic, extrinsic),
#                None, m.kintree_table, color = 'b')

#smpl_model_path = "./models/basicModel_m_lbs_10_207_0_v1.0.0.pkl,neutral_smpl_with_cocoplus_reg.pkl"
smpl_model_path = "./models/neutral_smpl_with_cocoplus_reg.pkl,"
shapes_gt_pl = tf.placeholder(tf.float32, shape=[batch_size, 10])
poses_gt_pl = tf.placeholder(tf.float32, shape=[batch_size, 72])

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    smpl = SMPL(smpl_model_path, joint_type='lsp')
    verts_gt, Js_gt, _, J_transformed_gt = smpl(shapes_gt_pl,
                                                poses_gt_pl,
                                                get_skin=True,
                                                trans=None,
                                                idx=1)

    #verts, Js, _, J_transformed = smpl(shapes_gt_pl, self.poses_gt_pl,
    #         get_skin = True, trans = None, idx = 1)
    feed_dict = {
        shapes_gt_pl: np.expand_dims(gt_shape, 0),
        poses_gt_pl: np.expand_dims(gt_poses, 0),
    }
    J_transformed_np = sess.run(J_transformed_gt, feed_dict)
    print("J_transformed_np shape ", J_transformed_np.shape)
Example #8
0
    fig = plt.figure(1)
    plt.imshow(rend_img)
    return fig


# define parser
parser = argparse.ArgumentParser()
parser.add_argument('num', type=int)
args = parser.parse_args()

# define some paths
smpl_model_path = '/gpfs/milgram/project/yildirim/hakan/hmr/models/neutral_smpl_with_cocoplus_reg.pkl'
smpl_face_path = '/gpfs/milgram/project/yildirim/hakan/hmr/src/tf_smpl/smpl_faces.npy'

# define SMPL model and renderer
smpl = SMPL(smpl_model_path)
renderer = vis_util.SMPLRenderer(face_path=smpl_face_path)

# define camera, position and shape parameters
# cam = np.load('latents/cam'+str(args.num)+'.npy').flatten()
cam = np.array([1., 0, 0], np.float32)
pose = np.load('latents/pose' + str(args.num) + '.npy')
shape = np.load('latents/shape' + str(args.num) + '.npy')
cam_tf = tf.Variable(cam)
pose = tf.Variable(pose)
shape = tf.Variable(shape)

# define pre-processing dict, no clue why they require it
proc_param = {
    'end_pt': np.array([339, 339]),
    'img_size': 230,
Example #9
0
    def __init__(self,
                 config,
                 sequence_length,
                 resnet_path='',
                 sess=None,
                 precomputed_phi=False):
        self.config = config
        self.load_path = config.load_path
        tf.set_random_seed(config.seed)

        self.num_conv_layers = 3
        self.fov = self.num_conv_layers * 4 + 1
        self.sequence_length = sequence_length
        self.use_delta_from_pred = config.use_delta_from_pred
        self.use_optcam = config.use_optcam
        self.precomputed_phi = precomputed_phi

        # Config + path
        if not config.load_path:
            raise Exception(
                'You need to specify `load_path` to load a pretrained model')
        if not osp.exists(config.load_path + '.index'):
            print('{} doesnt exist'.format(config.load_path))
            import ipdb
            ipdb.set_trace()

        # Data
        self.batch_size = config.batch_size
        self.img_size = config.img_size
        self.E_var = []
        self.pad_edges = config.pad_edges

        self.smpl_model_path = config.smpl_model_path
        self.use_hmr_ief = False

        self.num_output = 85

        if precomputed_phi:
            input_size = (self.batch_size, self.sequence_length, 2048)
        else:
            input_size = (self.batch_size, self.sequence_length, self.img_size,
                          self.img_size, 3)
        self.images_pl = tf.placeholder(tf.float32, shape=input_size)

        strip_size = (self.batch_size, self.fov, 2048)
        self.movie_strips_pl = tf.placeholder(tf.float32, shape=strip_size)

        # Model Spec
        self.f_image_enc = get_image_encoder()
        self.f_temporal_enc = get_temporal_encoder()
        self.f_prediction_ar = get_prediction_model()

        self.smpl = SMPL(self.smpl_model_path)
        self.omegas_movie_strip = self.make_omega_pred()
        self.omegas_pred = self.make_omega_pred(use_optcam=True)

        # HMR Model Params
        self.num_stage = 3
        self.total_params = 85

        self.load_mean_omega()
        self.build_temporal_encoder_model()
        self.build_auto_regressive_model()
        self.update_E_vars()

        if sess is None:
            options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
            self.sess = tf.Session(config=tf.ConfigProto(gpu_options=options))
        else:
            self.sess = sess

        # Load data.
        self.prepare(resnet_path)