Ejemplo n.º 1
0
def make_prdicted_mesh_neutral(predicted_params_path, flame_model_path):
    params = np.load(predicted_params_path,
                     allow_pickle=True,
                     encoding='latin1')
    #print(params)
    params = params[()]
    pose = np.zeros(15)
    #expression = np.zeros(100)
    shape = np.hstack(
        (params['shape'], np.zeros(300 - params['shape'].shape[0])))
    #pose = np.hstack((params['pose'], np.zeros(15-params['pose'].shape[0])))
    expression = np.hstack(
        (params['expression'], np.zeros(100 - params['expression'].shape[0])))
    flame_genral_model = load_model(flame_model_path)
    generated_neutral_mesh = verts_decorated(
        #ch.array([0.0,0.0,0.0]),
        ch.array(params['cam']),
        ch.array(pose),
        ch.array(flame_genral_model.r),
        flame_genral_model.J_regressor,
        ch.array(flame_genral_model.weights),
        flame_genral_model.kintree_table,
        flame_genral_model.bs_style,
        flame_genral_model.f,
        bs_type=flame_genral_model.bs_type,
        posedirs=ch.array(flame_genral_model.posedirs),
        betas=ch.array(
            np.hstack((shape, expression))
        ),  #betas=ch.array(np.concatenate((theta[0,75:85], np.zeros(390)))), #
        shapedirs=ch.array(flame_genral_model.shapedirs),
        want_Jtr=True)
    # neutral_mesh = Mesh(v=generated_neutral_mesh.r, f=generated_neutral_mesh.f)
    neutral_mesh = trimesh.Trimesh(vertices=generated_neutral_mesh.r,
                                   faces=generated_neutral_mesh.f)
    return neutral_mesh
Ejemplo n.º 2
0
def load_shape_models(nViews, opt_model_dir, dv, model, frameId,
                      mesh_v_opt_save_path, pose, trans, betas):

    shape_model = [None] * nViews
    v_template = ch.array(model.v_template) + dv

    if exists(mesh_v_opt_save_path):
        opti_mesh = Mesh(filename=mesh_v_opt_save_path)
        v_opt = ch.array(opti_mesh.v.copy())
        compute = False
    else:
        v_opt = None
        compute = True

    for i in range(nViews):
        if v_opt is not None:
            v_template = v_opt
            model.betas[:] = 0
            np_betas = np.zeros_like(model.betas)
        else:
            v_template = ch.array(model.v_template) + dv
            np_betas = np.zeros_like(model.betas)
            np_betas[:len(betas[i])] = betas[i]

        shape_model[i] = verts_decorated(v_template=v_template,
                                         pose=ch.array(pose[i]),
                                         trans=ch.array(trans[i]),
                                         J=model.J_regressor,
                                         kintree_table=model.kintree_table,
                                         betas=ch.array(np_betas),
                                         weights=model.weights,
                                         posedirs=model.posedirs,
                                         shapedirs=model.shapedirs,
                                         bs_type='lrotmin',
                                         bs_style='lbs',
                                         f=model.f)

    return shape_model, compute
Ejemplo n.º 3
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(trans=ch.zeros(3),
                             pose=ch.array(init_pose),
                             v_template=model.v_template,
                             J=model.J_regressor,
                             betas=betas,
                             shapedirs=model.shapedirs[:, :, :n_betas],
                             weights=model.weights,
                             kintree_table=model.kintree_table,
                             bs_style=model.bs_style,
                             f=model.f,
                             bs_type=model.bs_type,
                             posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([
            model.J_regressor.dot(model.shapedirs[:, :, i])
            for i in range(len(betas))
        ])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(sv.pose,
                                                    J_onbetas,
                                                    model.kintree_table,
                                                    xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
            (j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([
            my_exp(sv.pose[55]),
            my_exp(-sv.pose[58]),
            my_exp(-sv.pose[12]),
            my_exp(-sv.pose[15])
        ])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0
                            and coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1]
                            and target_coord[0] >= 0
                            and target_coord[1] < tmp_img.shape[0]
                            and target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(pose=sv.pose,
                                  betas=sv.betas,
                                  model=model,
                                  regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(objs,
                        x0=[sv.betas, sv.pose],
                        method='dogleg',
                        callback=on_step,
                        options={
                            'maxiter': 100,
                            'e_3': .0001,
                            'disp': 0
                        })

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r, Jtr)
Ejemplo n.º 4
0
def optimize_smal(fposes,
                  ftrans,
                  fbetas,
                  model,
                  cams,
                  segs,
                  imgs,
                  landmarks,
                  landmarks_names,
                  key_vids,
                  symIdx=None,
                  frameId=0,
                  opt_model_dir=None,
                  save_name=None,
                  COMPUTE_OPT=True,
                  img_paths=None,
                  img_offset=None,
                  img_scales=None):

    mesh_v_opt_save_path = join(opt_model_dir,
                                'mesh_v_opt_no_mc_' + str(frameId) + '.ply')
    mesh_v_opt_mc_save_path = join(opt_model_dir,
                                   'mesh_v_opt_' + str(frameId) + '.ply')
    mesh_init_save_path = join(opt_model_dir,
                               'mesh_init_' + str(frameId) + '.ply')
    nViews = len(fposes)
    if not COMPUTE_OPT:
        if not exists(opt_model_dir):
            makedirs(opt_model_dir)
        dv = 0
        compute_texture(nViews, opt_model_dir, dv, model, frameId,
                        mesh_init_save_path, fposes, ftrans, fbetas,
                        '_no_refine', cams, imgs, segs, img_paths, img_offset,
                        img_scales)
        return

    # Write the initial mesh
    np_betas = np.zeros_like(model.betas)
    np_betas[:len(fbetas[0])] = fbetas[0]
    tmp = verts_decorated(v_template=model.v_template,
                          pose=ch.zeros_like(model.pose.r),
                          trans=ch.zeros_like(model.trans),
                          J=model.J_regressor,
                          kintree_table=model.kintree_table,
                          betas=ch.array(np_betas),
                          weights=model.weights,
                          posedirs=model.posedirs,
                          shapedirs=model.shapedirs,
                          bs_type='lrotmin',
                          bs_style='lbs',
                          f=model.f)
    tmp_mesh = Mesh(v=tmp.r, f=tmp.f)

    tmp_path = join(opt_model_dir, 'mesh_init_' + str(frameId) + '.ply')
    tmp_mesh.write_ply(tmp_path)
    del tmp

    assert (nViews == len(cams))
    assert (nViews == len(segs))
    assert (nViews == len(imgs))

    # Define a displacement vector. We set a small non zero displacement as initialization
    dv = ch.array(np.random.rand(model.r.shape[0], 3) / 1000.)

    # Cell structure for ARAP
    f = model.f
    _, A3, A = edgesIdx(nV=dv.shape[0], f=f, save_dir='.', name='smal')
    wedge = wedges(A3, dv)

    s = np.zeros_like(dv)
    arap = ARAP(reg_e=MatVecMult(A3.T,
                                 model.ravel() + dv.ravel()).reshape(-1, 3),
                model_e=MatVecMult(A3.T, model.ravel()).reshape(-1, 3),
                w=wedge,
                A=A)

    k_arap = settings['ref_k_arap_per_view'] * nViews
    for weight, part in zip(settings['ref_W_arap_values'],
                            settings['ref_W_arap_parts']):
        k_arap, W_per_vertex = get_arap_part_weights(
            A, k_arap, [part], [weight])  #, animal_name) # was only Head

    W = np.zeros((W_per_vertex.shape[0], 3))
    for i in range(3):
        W[:, i] = W_per_vertex

    k_lap = settings['ref_k_lap'] * nViews * W
    k_sym = settings['ref_k_sym'] * nViews
    k_keyp = settings['ref_k_keyp_weight'] * nViews

    # Load already computed mesh
    if not exists(opt_model_dir):
        makedirs(opt_model_dir)
    shape_model, compute = load_shape_models(nViews, opt_model_dir, dv, model,
                                             frameId, mesh_v_opt_save_path,
                                             fposes, ftrans, fbetas)

    mv = None

    # Remove inside mouth faces
    '''
    if settings['ref_remove_inside_mouth']:
        # Giraffe
        faces_orig = shape_model[0].f.copy()
        im_v = im_up_v + im_down_v
        idx = [np.where(model.f == ix)[0] for ix in im_v]
        idx = np.concatenate(idx).ravel()
        for i in range(nViews):
            shape_model[i].f = np.delete(shape_model[i].f, idx, 0)
    '''

    if compute:
        objs = {}

        FIX_CAM = True
        free_variables = []
        kp_weights = k_keyp * np.ones((landmarks[0].shape[0], 1))
        print('removing shoulders, often bad annotated')
        kp_weights[landmarks_names.index('leftShoulder'), :] *= 0
        kp_weights[landmarks_names.index('rightShoulder'), :] *= 0
        objs_pose = None
        j2d = None
        #k_silh_term = settings['ref_k_silh_term']
        k_m2s = settings['ref_k_m2s']
        k_s2m = settings['ref_k_s2m']

        objs, params_, j2d = set_pose_objs(shape_model,
                                           cams,
                                           landmarks,
                                           key_vids,
                                           kp_weights=kp_weights,
                                           FIX_CAM=FIX_CAM,
                                           ONLY_KEYP=True,
                                           OPT_SHAPE=False)

        if np.any(k_arap) != 0:
            objs['arap'] = k_arap * arap
        if k_sym != 0:
            objs['sym_0'] = k_sym * (ch.abs(dv[:, 0] - dv[symIdx, 0]))
            objs['sym_1'] = k_sym * (ch.abs(dv[:, 1] + dv[symIdx, 1] -
                                            0.00014954))
            objs['sym_2'] = k_sym * (ch.abs(dv[:, 2] - dv[symIdx, 2]))
        if np.any(k_lap) != 0:
            lap_op = np.asarray(
                laplacian(Mesh(v=dv, f=shape_model[0].f)).todense())
            objs['lap'] = k_lap * ch.dot(lap_op, dv)

        mv = None
        mv2 = MeshViewers(shape=(1, nViews))  #None
        vc = np.ones_like(dv)
        dv_r = fit_silhouettes_pyramid_opt(objs,
                                           shape_model,
                                           dv,
                                           segs,
                                           cams,
                                           j2d=j2d,
                                           weights=1.,
                                           mv=mv,
                                           imgs=imgs,
                                           s2m_weights=k_s2m,
                                           m2s_weights=k_m2s,
                                           max_iter=100,
                                           free_variables=free_variables,
                                           vc=vc,
                                           symIdx=symIdx,
                                           mv2=mv2,
                                           objs_pose=objs_pose)

        # Save result image
        for i in range(nViews):
            img_res = render_mesh(Mesh(shape_model[i].r, shape_model[i].f),
                                  imgs[i].shape[1],
                                  imgs[i].shape[0],
                                  cams[i],
                                  img=imgs[i],
                                  world_frame=True)
            img_result = np.hstack((imgs[i], img_res * 255.))
            save_img_path = save_name[i].replace('.pkl', '_v_opt.png')
            cv2.imwrite(save_img_path, img_result)

        shape_model[0].pose[:] = 0
        shape_model[0].trans[:] = 0
        V = shape_model[0].r.copy()
        vm = V[symIdx, :].copy()
        vm[:, 1] = -1 * vm[:, 1]
        V2 = (V + vm) / 2.0

        mesh_out = Mesh(v=V2, f=shape_model[0].f)
        mesh_out.show()
        mesh_out.write_ply(mesh_v_opt_save_path)

        save_dv_data_path = mesh_v_opt_save_path.replace('.ply', '_dv.pkl')
        dv_data = {'betas': shape_model[0].betas.r, 'dv': dv_r}
        pkl.dump(dv_data, open(save_dv_data_path, 'wb'))

    compute_texture(nViews, opt_model_dir, dv, model, frameId,
                    mesh_v_opt_save_path, fposes, ftrans, fbetas, '_non_opt',
                    cams, imgs, segs, img_paths, img_offset, img_scales)

    return
Ejemplo n.º 5
0
def optimize_on_joints_and_silhouette(j2d,
                                      sil,
                                      model,
                                      cam,
                                      img,
                                      prior,
                                      init_pose,
                                      init_shape,
                                      n_betas=10,
                                      conf=None):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param sil: h x w silhouette with soft boundaries (np.float32, range(-1, 1))
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param init_pose: 72D vector, pose prediction results provided by HMR
    :param init_shape: 10D vector, shape prediction results provided by HMR
    :param n_betas: number of shape coefficients considered during optimization
    :param conf: 14D vector storing the confidence values from the CNN
    :returns: a tuple containing the optimized model, its joints projected on image space, the
              camera translation
    """
    # define the mapping LSP joints -> SMPL joints
    cids = range(12) + [13]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    betas = ch.array(init_shape)

    # instantiate the model:
    sv = verts_decorated(trans=ch.zeros(3),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    # make the SMPL joints depend on betas
    Jdirs = np.dstack([
        model.J_regressor.dot(model.shapedirs[:, :, i])
        for i in range(len(betas))
    ])
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    # get joint positions as a function of model pose, betas and trans
    (_, A_global) = global_rigid_transformation(sv.pose,
                                                J_onbetas,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

    # add the head joint
    Jtr = ch.vstack((Jtr, sv[head_id]))
    smpl_ids.append(len(Jtr) - 1)

    # update the weights using confidence values
    weights = base_weights * conf[cids] if conf is not None else base_weights

    # project SMPL joints and vertex on the image plane using the estimated camera
    cam.v = ch.vstack([Jtr, sv])

    # obtain a gradient map of the soft silhouette
    grad_x = cv2.Sobel(sil, cv2.CV_32FC1, 1, 0) * 0.125
    grad_y = cv2.Sobel(sil, cv2.CV_32FC1, 0, 1) * 0.125

    # data term #1: distance between observed and estimated joints in 2D
    obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
        (j2d[cids] - cam[smpl_ids]), sigma))

    # data term #2: distance between the observed and projected boundaries
    obj_s2d = lambda w, sigma, flag, target_pose: (w * flag * GMOf(
        (target_pose - cam[len(Jtr):(len(Jtr) + 6890)]), sigma))

    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    print('****** Optimization on joints')
    curr_pose = sv.pose.r
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas
        objs['thetas'] = wbetas * (sv.pose - curr_pose
                                   )  # constrain theta changes

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .001,
                        'disp': 0
                    })
    curr_pose = sv.pose.r
    # cam.v = ch.vstack([Jtr, sv.r])

    # run the optimization in 2 stages, progressively decreasing the
    # weights for the priors
    print('****** Optimization on silhouette and joints')
    opt_weights = zip([57.4, 4.78], [2e2, 1e2])
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        # find the boundary vertices and estimate their expected location
        smpl_vs = cam.r[len(Jtr):, :]
        boundary_flag = np.zeros((smpl_vs.shape[0], 1))
        expected_pos = np.zeros((smpl_vs.shape[0], 2))
        for vi, v in enumerate(smpl_vs):
            r, c = int(v[1]), int(v[0])
            if r < 0 or r >= sil.shape[0] or c < 0 or c >= sil.shape[1]:
                continue
            sil_v = sil[r, c]
            grad = np.array([grad_x[r, c], grad_y[r, c]])
            grad_n = np.linalg.norm(grad)
            if grad_n > 1e-1 and sil_v < 0.4:  # vertex on or out of the boundaries
                boundary_flag[vi] = 1.0
                step = (grad / grad_n) * (sil_v / grad_n)
                expected_pos[vi] = np.array([c - step[0], r - step[1]])

        # run optimization
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['s2d'] = obj_s2d(5., 100, boundary_flag, expected_pos)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas  # constrain beta changes
        objs['thetas'] = wbetas * (sv.pose - curr_pose
                                   )  # constrain theta changes
        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .001,
                        'disp': 0
                    })

    return sv, cam.r, cam.t.r
Ejemplo n.º 6
0
def optimize_on_DensePose(ori_img,
                          tar_img,
                          dp_iuv,
                          op_j2d,
                          w_j2d,
                          model,
                          cam,
                          cam_old,
                          prior,
                          init_trans,
                          init_pose,
                          init_betas,
                          n_betas=10,
                          viz=False,
                          imageid=1):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    outPath = '/home/xiul/databag/net_images/smplify/vid_{:06d}_puredense'.format(
        imageid)
    if not os.path.isdir(outPath):
        os.mkdir(outPath)

    dp_iuv_tar = dp_iuv.copy()
    width = dp_iuv_tar.shape[1]
    height = dp_iuv_tar.shape[0]
    dp_iuv_weight = np.zeros(dp_iuv_tar.shape) + 10
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 0] = 10
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 1] = 10
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 2] = 15

    dp_iuv_weight = ch.array(dp_iuv_weight)
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:

    tarIUV = ch.array(dp_iuv)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.array(init_betas)

    # initialize the pose by using the optimized body orientation and the
    # pose prior

    pose = ch.array(init_pose)
    #init_pose = np.hstack((body_orient, body_init))

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.array(init_trans),
                         pose=pose,
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=None)

    J_tmpx = MatVecMult(J_reg, sv[:, 0])
    J_tmpy = MatVecMult(J_reg, sv[:, 1])
    J_tmpz = MatVecMult(J_reg, sv[:, 2])
    Jtr = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    #Jtr = J_reg.dot(sv)
    cam.v = Jtr

    reIUV = render_model(sv,
                         model.f,
                         width,
                         height,
                         cam,
                         near=0.5,
                         far=25,
                         vc=dp_colors,
                         img=None)

    reModel = render_model(sv,
                           model.f,
                           width,
                           height,
                           cam,
                           near=0.5,
                           far=25,
                           vc=None,
                           img=None)

    fullModel = render_model(sv,
                             model.f,
                             ori_img.shape[1],
                             ori_img.shape[0],
                             cam_old,
                             near=0.5,
                             far=25,
                             vc=None,
                             img=None)

    #gaussian_pyramid(input_objective, imshape=None, normalization='SSE', n_levels=3, as_list=False, label=None):
    def obj_j2d(w, sigma):
        return (w * w_j2d.reshape((-1, 1)) * GMOf((op_j2d - cam), sigma))

    #input_objective, imshape, normalization, n_levels, as_list

    err = (tarIUV - reIUV) * dp_iuv_weight

    obj_dense = 100 * gaussian_pyramid(err, n_levels=4, normalization='SSE')

    #obj_dense = gaussian_pyramid(err, n_levels=4, normalization=SSE)
    #obj_dense = 10000*err

    # data term: distance between observed and estimated joints in 2D
    # obj_dense = lambda w, sigma: (
    #      w * GMOf((tarIUV - reIUV).reshape(-1,3), sigma))
    # mixture of gaussians pose prior
    def pprior(w):
        return w * prior(sv.pose)

    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10

    def my_exp(x):
        return alpha * ch.exp(x)

    def obj_angle(w):
        return w * ch.concatenate([
            my_exp(sv.pose[55]),
            my_exp(-sv.pose[58]),
            my_exp(-sv.pose[12]),
            my_exp(-sv.pose[15])
        ])

    def viz_func(stage_num):
        plt.figure(1, figsize=(10, 10))
        plt.clf()
        plt.subplot(2, 3, 1)
        # show optimized joints in 2D
        tmp_img = reIUV.r.copy()
        tmp_tar = tarIUV.copy()
        tmp_model = reModel.r.copy()
        full_model = fullModel.r.copy()
        # w = tmp_tar.shape[1]
        # h = tmp_tar.shape[0]
        plt.imshow(tar_img)
        plt.imshow(tmp_img, alpha=0.5)
        for j1, j2, w_ts in zip(cam.r, op_j2d, w_j2d):
            if (w_ts > 0):
                plt.plot([j1[0], j2[0]], [j1[1], j2[1]], 'r')
        plt.xlim([0, width])
        plt.ylim([height, 0])
        plt.subplot(2, 3, 2)
        plt.imshow(tar_img)
        plt.imshow(tmp_tar, alpha=0.5)
        plt.xlim([0, width])
        plt.ylim([height, 0])
        plt.subplot(2, 3, 3)
        plt.imshow(tar_img)
        tmp_model_alpha = np.ones((tmp_model.shape[0], tmp_model.shape[1]))
        tmp_model_alpha[tmp_model[:, :, 0] < 1e-2] = 0
        tmp_model = np.dstack((tmp_model, tmp_model_alpha))
        plt.imshow(tmp_model)
        plt.xlim([0, width])
        plt.ylim([height, 0])
        plt.subplot(2, 1, 2)
        plt.imshow(full_model)

        plt.draw()
        plt.savefig(os.path.join(outPath, 'stage-{}.png'.format(stage_num)),
                    bbox_inches='tight')
        full_model_int = full_model.copy()
        full_model_int *= 255
        full_model_int = full_model_int.astype(np.uint8)
        cv2.imwrite(os.path.join(outPath, 'model-{}.png'.format(stage_num)),
                    full_model_int)
        out_params = {
            'pose': sv.pose.r,
            'shape': sv.betas.r,
            'trans': sv.trans.r
        }
        with open(os.path.join(outPath, 'param-{}.pkl'.format(stage_num)),
                  'w') as fio:
            pickle.dump(out_params, fio, pickle.HIGHEST_PROTOCOL)

    viz_func(0)
    #if viz:

    if viz and False:

        def on_step(cstep):
            """Create visualization."""
            # TODO this function is in vis_func
            #plt.savefig(os.path.join(outPath,'{}.png'.format(strftime("%d_%H_%M_%S", gmtime()))),bbox_inches='tight')

        on_step(stepI)

    else:
        on_step = None

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior

    # opt_weights = zip([4.78, 3.78, 2.78, 1.78],
    #                   [5, 5, 5, 5],
    #                   [1, 0.25, 0.10, 0])
    opt_weights = zip([4.78, 4.78, 4.78, 4.78, 4.78], [50, 50, 50, 50, 50],
                      [0.1, 0.1, 0.5, 0.25, 0.1])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas, w_joints) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        #if stage >= 1:
        objs['dense'] = obj_dense
        objs['j2d'] = obj_j2d(w_joints, 50)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose, sv.trans],
                    method='dogleg',
                    callback=on_step,
                    options={
                        'maxiter': 10000,
                        'e_3': .0001,
                        'disp': 1
                    })

        viz_func(stage + 1)

    t1 = time()
    _LOGGER.info('elapsed %.05f', (t1 - t0))

    if viz and False:
        plt.ioff()
Ejemplo n.º 7
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       init_pose,
                       init_shape,
                       n_betas=10,
                       conf=None):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param init_pose: 72D vector, pose prediction results provided by HMR
    :param init_shape: 10D vector, shape prediction results provided by HMR
    :param n_betas: number of shape coefficients considered during optimization
    :param conf: 14D vector storing the confidence values from the CNN
    :returns: a tuple containing the optimized model, its joints projected on image space, the
              camera translation
    """
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.array(init_shape)

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.zeros(3),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    # make the SMPL joints depend on betas
    Jdirs = np.dstack([
        model.J_regressor.dot(model.shapedirs[:, :, i])
        for i in range(len(betas))
    ])
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    # get joint positions as a function of model pose, betas and trans
    (_, A_global) = global_rigid_transformation(sv.pose,
                                                J_onbetas,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

    # add the head joint, corresponding to a vertex...
    Jtr = ch.vstack((Jtr, sv[head_id]))

    # ... and add the joint id to the list
    smpl_ids.append(len(Jtr) - 1)

    # update the weights using confidence values
    weights = base_weights * conf[cids] if conf is not None else base_weights

    # project SMPL joints on the image plane using the estimated camera
    cam.v = Jtr

    # data term: distance between observed and estimated joints in 2D
    obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
        (j2d[cids] - cam[smpl_ids]), sigma))

    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .0001,
                        'disp': 0
                    })

    return sv, cam.r, cam.t.r
Ejemplo n.º 8
0
def main(args):
    """Set up paths to image and joint data, saves results.
    #Fit SMPL to a target 3D skeleton
    """
    modelpath = (abspath(dirname(__file__))) + '/smplify/models/'

    # Note that rendering many views can take a while.
    model = load_model(modelpath + args.model)
    n_betas = args.n_betas
    fig = plt.figure(figsize=(15, 15))
    ax = fig.add_subplot(111, projection='3d')
    out_dir = args.out_dir
    out_dir = None
    for idx in range(args.max_num):
        joint_file = 'body3DScene_{:08d}.json'.format(
            (idx + args.start_number) * 3)
        SMPL_file = 'bodySMPL_{:08d}.pkl'.format(idx + args.start_number)
        DP_file = 'DensePose_{:08d}.mat'.format(idx + args.start_number)

        #create visualization
        ax.clear()
        ax.set_aspect('equal')
        #load DensePose data
        #ax.set_xlim((x_max+x_min)/2.0 - plot_radius,(x_max+x_min)/2.0+plot_radius)
        #ax.set_ylim((y_max+y_min)/2.0 - plot_radius,(y_max+y_min)/2.0+plot_radius)
        #ax.set_zlim((z_max+z_min)/2.0 - plot_radius,(z_max+z_min)/2.0+plot_radius)

        DP_mat = sio.loadmat(join(args.DP, DP_file))
        DP_Points = DP_mat['cPoints']
        DP_Err = DP_mat['cErr']
        DP_valid = DP_Points[:, 3] > 0
        ax.scatter(DP_Points[DP_valid, 0] / 100.,
                   DP_Points[DP_valid, 1] / 100.,
                   DP_Points[DP_valid, 2] / 100.,
                   s=2,
                   c=DP_Err[DP_valid, 0])

        #Load SMPL and Joints
        if (os.path.isfile(join(args.joints, joint_file))):
            with open(join(args.joints, joint_file)) as f:
                rawData = json.load(f)
            Joint15 = rawData["bodies"][0]["joints15"]
            Joint15 = np.reshape(Joint15, (15, 4))
            joints = Joint15[dome2lsp, 0:3] / 100.
            plot_pose3d(joints, ax)

            #load SMPl
            with open(join(args.SMPL, SMPL_file), 'rb') as f:
                rawSMPL = pickle.load(f)

            sv = verts_decorated(trans=ch.array(rawSMPL['trans']),
                                 pose=ch.array(rawSMPL['pose']),
                                 v_template=model.v_template,
                                 J=model.J_regressor,
                                 betas=ch.array(rawSMPL['betas']),
                                 shapedirs=model.shapedirs[:, :, :n_betas],
                                 weights=model.weights,
                                 kintree_table=model.kintree_table,
                                 bs_style=model.bs_style,
                                 f=model.f,
                                 bs_type=model.bs_type,
                                 posedirs=model.posedirs)
            # Visualize the SMPL vertices
            ax.scatter(sv.r[::2, 0], sv.r[::2, 1], sv.r[::2, 2], s=0.1, c='k')

            #plot skeleton
        ax.set_xlim(-1.5, 1.5)
        ax.set_ylim(-3, 0)
        ax.set_zlim(-1.5, 1.5)
        ax.view_init(elev=110., azim=90.)
        if out_dir is not None:
            figureName = 'Figure_{:08d}.png'.format(idx + args.start_number)
            plt.savefig(join(out_dir, figureName), bbox_inches='tight')
        else:
            plt.show()
            raw_input('Press any key to continue...')
Ejemplo n.º 9
0
def optimize_on_DensePose(dp_iuv,
                          op_j2d,
                          w_j2d,
                          model,
                          cam,
                          prior,
                          body_orient,
                          body_trans,
                          body_init,
                          n_betas=10,
                          viz=False,
                          imageid=1):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    outPath = '/home/xiul/databag/dbfusion/record0/smplify/vid{}'.format(
        imageid)
    if not os.path.isdir(outPath):
        os.mkdir(outPath)

    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:

    tarIUV = ch.array(dp_iuv)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.zeros(n_betas)

    # initialize the pose by using the optimized body orientation and the
    # pose prior

    init_pose = np.hstack((body_orient, prior.weights.dot(prior.means)))
    #init_pose = np.hstack((body_orient, body_init))

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.array(body_trans),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    J_tmpx = MatVecMult(J_reg, sv[:, 0])
    J_tmpy = MatVecMult(J_reg, sv[:, 1])
    J_tmpz = MatVecMult(J_reg, sv[:, 2])
    Jtr = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    #Jtr = J_reg.dot(sv)
    cam.v = Jtr

    op_j2d = op_j2d * 1280 / 1920
    w = 1280
    h = 720

    reIUV = render_model(sv,
                         model.f,
                         w,
                         h,
                         cam,
                         near=0.5,
                         far=25,
                         vc=dp_colors,
                         img=None)

    #gaussian_pyramid(input_objective, imshape=None, normalization='SSE', n_levels=3, as_list=False, label=None):
    obj_j2d = lambda w, sigma: (w * w_j2d.reshape((-1, 1)) * GMOf(
        (op_j2d - cam), sigma))
    #input_objective, imshape, normalization, n_levels, as_list

    err = tarIUV - reIUV
    obj_dense = 100 * gaussian_pyramid(err, n_levels=6, normalization='SSE')

    # data term: distance between observed and estimated joints in 2D
    # obj_dense = lambda w, sigma: (
    #      w * GMOf((tarIUV - reIUV).reshape(-1,3), sigma))
    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    if viz:
        import matplotlib.pyplot as plt
        from time import gmtime, strftime

        def on_step(cstep):
            """Create visualization."""
            plt.figure(1, figsize=(10, 10))
            plt.clf()
            plt.subplot(1, 2, 1)
            # show optimized joints in 2D
            tmp_img = reIUV.r.copy()
            tmp_tar = tarIUV.copy()
            plt.imshow(tmp_img)
            for j1, j2 in zip(cam.r, op_j2d):
                plt.plot([j1[0], j2[0]], [j1[1], j2[1]], 'r')
            plt.subplot(1, 2, 2)
            plt.imshow(tmp_tar)
            plt.draw()
            plt.savefig(os.path.join(
                outPath, '{}.png'.format(strftime("%d_%H_%M_%S", gmtime()))),
                        bbox_inches='tight')

        on_step(stepI)

    else:
        on_step = None

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78, 4.78, 4.78, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1, 5, 5, 5])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        if stage >= 4:
            objs['dense'] = obj_dense
        if stage <= 4:
            objs['j2d'] = obj_j2d(1, 100)
        else:
            objs['j2d'] = obj_j2d(0.1, 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=on_step,
                    options={
                        'maxiter': 10000,
                        'e_3': .001,
                        'disp': 1
                    })

    t1 = time()
    _LOGGER.info('elapsed %.05f', (t1 - t0))

    if viz:
        plt.ioff()
Ejemplo n.º 10
0
def test_interplote():
    import cv2
    pkl_file = '/home/xiul/databag/net_images/hmr/kobe.pkl'
    im = cv2.imread('/home/xiul/databag/net_images/images/kobe.jpg')

    data_all = parse_hmr_data(pkl_file)
    #only use 1:4
    MODEL_DIR = join(abspath(dirname(__file__)), '../../models')
    # Model paths:
    MODEL_NEUTRAL_PATH = join(MODEL_DIR,
                              'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl')
    MODEL_FEMALE_PATH = join(MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    MODEL_MALE_PATH = join(MODEL_DIR, 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl')

    model = load_model(MODEL_MALE_PATH)

    sv = []
    f = []
    v_num = model.v_template.r.shape[0]
    fx = im.shape[1]
    fy = im.shape[1]
    rt = ch.zeros(3)
    t = ch.zeros(3)
    cx = im.shape[1] / 2
    cy = im.shape[0] / 2
    data_all = data_all[1:5]

    alpha_val = np.linspace(0, 1, num=10)
    cam = ProjectPoints(f=np.array([fx, fy]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])

    for alpha in alpha_val:
        c_pose = data_all[2]['pose'] * alpha + data_all[3]['pose'] * (1 -
                                                                      alpha)
        c_pose[:3] = 0
        c_pose[0] = np.pi
        c_sv = verts_decorated(trans=ch.array(data_all[1]['trans']),
                               pose=ch.array(c_pose),
                               v_template=model.v_template,
                               J=model.J_regressor,
                               betas=ch.array(data_all[1]['betas']),
                               shapedirs=model.shapedirs[:, :, :10],
                               weights=model.weights,
                               kintree_table=model.kintree_table,
                               bs_style=model.bs_style,
                               f=model.f,
                               bs_type=model.bs_type,
                               posedirs=model.posedirs)

        #fullModel = render_model(sv, model.f, ori_img.shape[1], ori_img.shape[0],
        #                         cam_old, near=0.5, far=25, vc=None, img=None)

        im_render = render_model(c_sv,
                                 model.f,
                                 im.shape[1],
                                 im.shape[0],
                                 cam,
                                 near=0.5,
                                 far=25,
                                 vc=None,
                                 img=None)
        im_render = im_render.r * 255
        im_render = im_render.astype(np.uint8)

        im_out = model_im_overlay(im_render, im)
        print im_out.shape
        print im_out.dtype
        cv2.imshow('im', im_out)
        cv2.waitKey(-1)
Ejemplo n.º 11
0
def main():
    import cv2
    pkl_file = '/home/xiul/databag/net_images/hmr/kobe.pkl'
    im = cv2.imread('/home/xiul/databag/net_images/images/kobe.jpg')

    data_all = parse_hmr_data(pkl_file)
    #only use 1:4
    MODEL_DIR = join(abspath(dirname(__file__)), '../../models')
    # Model paths:
    MODEL_NEUTRAL_PATH = join(MODEL_DIR,
                              'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl')
    MODEL_FEMALE_PATH = join(MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    MODEL_MALE_PATH = join(MODEL_DIR, 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl')

    model = load_model(MODEL_MALE_PATH)

    sv = []
    f = []
    v_num = model.v_template.r.shape[0]
    fx = im.shape[1]
    fy = im.shape[1]
    rt = ch.zeros(3)
    t = ch.zeros(3)
    cx = im.shape[1] / 2
    cy = im.shape[0] / 2
    data_all = data_all[1:5]

    for idx, c_data_old in enumerate(data_all):
        c_data = normalize_cam_single(c_data_old['cam'], c_data_old, cx, cy,
                                      fx)
        c_sv = verts_decorated(trans=ch.array(c_data['trans']),
                               pose=ch.array(c_data['pose']),
                               v_template=model.v_template,
                               J=model.J_regressor,
                               betas=ch.array(c_data['betas']),
                               shapedirs=model.shapedirs[:, :, :10],
                               weights=model.weights,
                               kintree_table=model.kintree_table,
                               bs_style=model.bs_style,
                               f=model.f,
                               bs_type=model.bs_type,
                               posedirs=model.posedirs)
        sv.append(c_sv.r.copy())
        c_f = model.f + v_num * idx
        f.append(c_f.copy())

    sv = np.array(sv)
    f = np.array(f)

    sv_vec = np.reshape(sv, (-1, 3))
    f_vec = np.reshape(f, (-1, 3))

    # initialize the camera

    cam = ProjectPoints(f=np.array([fx, fy]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])

    #fullModel = render_model(sv, model.f, ori_img.shape[1], ori_img.shape[0],
    #                         cam_old, near=0.5, far=25, vc=None, img=None)

    im_render = render_model(sv_vec,
                             f_vec,
                             im.shape[1],
                             im.shape[0],
                             cam,
                             near=0.5,
                             far=25,
                             vc=None,
                             img=None)
    im_render = im_render.r * 255
    im_render = im_render.astype(np.uint8)

    im_out = model_im_overlay(im_render, im)
    print im_out.shape
    print im_out.dtype
    cv2.imshow('im', im_out)
    cv2.waitKey(-1)
Ejemplo n.º 12
0
def vis_param():
    import cv2
    import matplotlib.pyplot as plt

    hsv_map = plt.get_cmap('hsv')
    from os.path import join, abspath, dirname

    seq = 'run'

    ori_pkl = '/home/xiul/databag/net_images/hmr/{}.pkl'.format(seq)
    im = cv2.imread('/home/xiul/databag/net_images/images/{}.jpg'.format(seq))

    data_all = parse_hmr_data(ori_pkl)
    #only use 1:4
    MODEL_DIR = join(abspath(dirname(__file__)), '../../models')
    # Model paths:
    MODEL_NEUTRAL_PATH = join(MODEL_DIR,
                              'basicModel_neutral_lbs_10_207_0_v1.0.0.pkl')
    MODEL_FEMALE_PATH = join(MODEL_DIR, 'basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    MODEL_MALE_PATH = join(MODEL_DIR, 'basicmodel_m_lbs_10_207_0_v1.0.0.pkl')

    model = load_model(MODEL_MALE_PATH)

    sv = []
    f_vec = []
    v_num = model.v_template.r.shape[0]
    rt = ch.zeros(3)
    t = ch.zeros(3)

    cx = im.shape[1] / 2
    cy = im.shape[0] / 2
    f = 2000

    for idx, c_data_old in enumerate(data_all):
        opt_param = normalize_cam_single(c_data_old, cx, cy, f)
        c_sv = verts_decorated(trans=ch.array(opt_param['trans']),
                               pose=ch.array(opt_param['pose']),
                               v_template=model.v_template,
                               J=model.J_regressor,
                               betas=ch.array(opt_param['betas']),
                               shapedirs=model.shapedirs[:, :, :10],
                               weights=model.weights,
                               kintree_table=model.kintree_table,
                               bs_style=model.bs_style,
                               f=model.f,
                               bs_type=model.bs_type,
                               posedirs=model.posedirs)
        sv.append(copy.deepcopy(c_sv))
        cf = model.f + idx * v_num
        f_vec.append(copy.deepcopy(cf))
        print idx
    # initialize the camera
    print 'model_loaded'
    cam = ProjectPoints(f=np.array([f, f]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])

    sv = reduce(lambda x, y: np.concatenate((x, y)), sv)
    f_vec = reduce(lambda x, y: np.concatenate((x, y)), f_vec)

    import scipy.io
    smpldpPath = '/home/xiul/workspace/PanopticDome/models/SMPL_DP.mat'
    smpldpFile = scipy.io.loadmat(smpldpPath)
    smpldp = smpldpFile['SMPL_IUV']
    dp_colors = smpldp[:, [2, 1, 3]]
    dp_colors = dp_colors
    dp_colors[:, 2] = dp_colors[:, 2] / 255.0

    reg_colors = np.ones(dp_colors.shape, dp_colors.dtype)
    with open(
            '/home/xiul/workspace/PanopticDome/models/neutral_smpl_with_cocoplus_reg.pkl'
    ) as fio:
        cPkl = pickle.load(fio)

    J_reg = model.J_regressor
    #J_reg = cPkl['cocoplus_regressor']
    J_reg_vts = []
    for reg_row in J_reg:
        J_reg_vts.append(scipy.sparse.coo_matrix(reg_row).col.tolist())

    import matplotlib.pyplot as plt
    c_map = plt.get_cmap('hsv')

    for J_id, J_vts in enumerate(J_reg_vts):
        for cj in J_vts:
            reg_colors[cj, :] = c_map(J_id * 1.0 / len(J_reg_vts))[0:3]

    supp_vc = np.zeros(sv.shape)
    all_num = len(data_all)
    for cidx in range(all_num):
        c_color = hsv_map(cidx)
        supp_vc[cidx * v_num:cidx * v_num + v_num, :] = c_color[:3]

        #supp_vc[cidx*v_num:cidx*v_num+v_num,:] = (cidx+8)*1.0/(all_num+8)
        #supp_vc[cidx*v_num:cidx*v_num+v_num,:] = dp_colors
        #supp_vc[cidx*v_num:cidx*v_num+v_num,:] = reg_colors

    #f_vec = np.reshape(f_vec,(-1,3))

    #fullModel = render_model(sv, model.f, ori_img.shape[1], ori_img.shape[0],
    #                         cam_old, near=0.5, far=25, vc=None, img=None)

    print 'start_render'
    im_render = render_model(sv,
                             f_vec,
                             im.shape[1],
                             im.shape[0],
                             cam,
                             near=0.5,
                             far=25,
                             vc=supp_vc,
                             img=None,
                             lighting=True)
    im_render = im_render.r * 255
    im_render = im_render.astype(np.uint8)

    im_bg = np.zeros(im_render.shape, im_render.dtype) * 255
    im_out = model_im_overlay(im_render, im_bg)

    cv2.imshow('im', im_out)
    cv2.waitKey(-1)
Ejemplo n.º 13
0
    def genMeshInstance(self, meshType):
        self.render_lock.lock()
        if len(self.smpl_Lists) > 0:
            cfile = self.smpl_Lists[self.frame_id]
            with open(cfile) as f:
                SMPLParams = pickle.load(f)
            if not type(SMPLParams) == list:
                smplParam = SMPLParams
                SMPLParams = []
                SMPLParams.append(smplParam)

            self.vts_buffer = []
            self.vns_buffer = []
            self.inds_buffer = []
            self.face_num = []
            for smplParam in SMPLParams:
                if meshType == 0:  #SMPL
                    sv = verts_decorated(
                        trans=chumpy.array(smplParam['trans']),
                        pose=chumpy.array(smplParam['pose']),
                        v_template=self.smpl.v_template,
                        J=self.smpl.J_regressor,
                        betas=chumpy.array(smplParam['betas']),
                        shapedirs=self.smpl.shapedirs,
                        weights=self.smpl.weights,
                        kintree_table=self.smpl.kintree_table,
                        bs_style=self.smpl.bs_style,
                        f=self.smpl.f,
                        bs_type=self.smpl.bs_type,
                        posedirs=self.smpl.posedirs)

                    SMPL_vts = sv.r * 100.0
                    SMPL_inds = sv.f
                    vts_num = SMPL_vts.shape[0]
                    SMPL_vns = numpy.zeros((vts_num, 3))
                    U = SMPL_vts[SMPL_inds[:, 1]] - SMPL_vts[SMPL_inds[:, 0]]
                    V = SMPL_vts[SMPL_inds[:, 2]] - SMPL_vts[SMPL_inds[:, 0]]

                    Nor = numpy.cross(U, V)
                    Nor = sklearn.preprocessing.normalize(Nor)
                    SMPL_vns[SMPL_inds[:, 0]] += Nor
                    SMPL_vns[SMPL_inds[:, 1]] += Nor
                    SMPL_vns[SMPL_inds[:, 2]] += Nor

                    SMPL_vns = sklearn.preprocessing.normalize(SMPL_vns)

                    vns = SMPL_vns.flatten()
                    vts = SMPL_vts.flatten()
                    inds = SMPL_inds.flatten()
                else:
                    pose = 0 - smplParam['pose']
                    vts, vns, inds = self.meshlib.adam_vertsdecorate(
                        pose, smplParam['betas'], smplParam['trans'],
                        smplParam['faces'], 2)
                    vns = vns.flatten()
                    vts = vts.flatten()
                    inds = inds.flatten()

                vn_buffer = glGenBuffers(1)
                vts_buffer = glGenBuffers(1)
                inds_buffer = glGenBuffers(1)
                face_num = len(inds) / 3
                glBindBuffer(GL_ARRAY_BUFFER, vn_buffer)
                glBufferData(GL_ARRAY_BUFFER,
                             len(vns) * sizeof(ctypes.c_float),
                             (ctypes.c_float * len(vns))(*vns), GL_STATIC_DRAW)

                glBindBuffer(GL_ARRAY_BUFFER, vts_buffer)
                glBufferData(GL_ARRAY_BUFFER,
                             len(vts) * sizeof(ctypes.c_float),
                             (ctypes.c_float * len(vts))(*vts), GL_STATIC_DRAW)

                glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, inds_buffer)
                glBufferData(GL_ELEMENT_ARRAY_BUFFER,
                             sizeof(ctypes.c_uint) * len(inds),
                             (ctypes.c_uint * len(inds))(*inds),
                             GL_STATIC_DRAW)

                self.vns_buffer.append(vn_buffer)
                self.vts_buffer.append(vts_buffer)
                self.inds_buffer.append(inds_buffer)
                self.face_num.append(face_num)
        self.render_lock.unlock()
Ejemplo n.º 14
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array(
        [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(
            trans=ch.zeros(3),
            pose=ch.array(init_pose),
            v_template=model.v_template,
            J=model.J_regressor,
            betas=betas,
            shapedirs=model.shapedirs[:, :, :n_betas],
            weights=model.weights,
            kintree_table=model.kintree_table,
            bs_style=model.bs_style,
            f=model.f,
            bs_type=model.bs_type,
            posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i])
                           for i in range(len(betas))])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(
            sv.pose, J_onbetas, model.kintree_table, xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (
            w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[
                                                 58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and
                            coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1] and
                            target_coord[0] >= 0 and
                            target_coord[1] < tmp_img.shape[0] and
                            target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(
                pose=sv.pose, betas=sv.betas, model=model, regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(
                objs,
                x0=[sv.betas, sv.pose],
                method='dogleg',
                callback=on_step,
                options={'maxiter': 100,
                         'e_3': .0001,
                         'disp': 0})

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)