예제 #1
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = list(range(12)) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(trans=ch.zeros(3),
                             pose=ch.array(init_pose),
                             v_template=model.v_template,
                             J=model.J_regressor,
                             betas=betas,
                             shapedirs=model.shapedirs[:, :, :n_betas],
                             weights=model.weights,
                             kintree_table=model.kintree_table,
                             bs_style=model.bs_style,
                             f=model.f,
                             bs_type=model.bs_type,
                             posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([
            model.J_regressor.dot(model.shapedirs[:, :, i])
            for i in range(len(betas))
        ])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(sv.pose,
                                                    J_onbetas,
                                                    model.kintree_table,
                                                    xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
            (j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([
            my_exp(sv.pose[55]),
            my_exp(-sv.pose[58]),
            my_exp(-sv.pose[12]),
            my_exp(-sv.pose[15])
        ])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0
                            and coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1]
                            and target_coord[0] >= 0
                            and target_coord[1] < tmp_img.shape[0]
                            and target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(pose=sv.pose,
                                  betas=sv.betas,
                                  model=model,
                                  regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(objs,
                        x0=[sv.betas, sv.pose],
                        method='dogleg',
                        callback=on_step,
                        options={
                            'maxiter': 100,
                            'e_3': .0001,
                            'disp': 0
                        })

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
예제 #2
0
def optimize_on_joints_and_silhouette(j2d,
                                      sil,
                                      model,
                                      cam,
                                      img,
                                      prior,
                                      init_pose,
                                      init_shape,
                                      n_betas=10,
                                      conf=None):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param sil: h x w silhouette with soft boundaries (np.float32, range(-1, 1))
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param init_pose: 72D vector, pose prediction results provided by HMR
    :param init_shape: 10D vector, shape prediction results provided by HMR
    :param n_betas: number of shape coefficients considered during optimization
    :param conf: 14D vector storing the confidence values from the CNN
    :returns: a tuple containing the optimized model, its joints projected on image space, the
              camera translation
    """
    # define the mapping LSP joints -> SMPL joints
    cids = range(12) + [13]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    betas = ch.array(init_shape)

    # instantiate the model:
    sv = verts_decorated(trans=ch.zeros(3),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    # make the SMPL joints depend on betas
    Jdirs = np.dstack([
        model.J_regressor.dot(model.shapedirs[:, :, i])
        for i in range(len(betas))
    ])
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    # get joint positions as a function of model pose, betas and trans
    (_, A_global) = global_rigid_transformation(sv.pose,
                                                J_onbetas,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

    # add the head joint
    Jtr = ch.vstack((Jtr, sv[head_id]))
    smpl_ids.append(len(Jtr) - 1)

    # update the weights using confidence values
    weights = base_weights * conf[cids] if conf is not None else base_weights

    # project SMPL joints and vertex on the image plane using the estimated camera
    cam.v = ch.vstack([Jtr, sv])

    # obtain a gradient map of the soft silhouette
    grad_x = cv2.Sobel(sil, cv2.CV_32FC1, 1, 0) * 0.125
    grad_y = cv2.Sobel(sil, cv2.CV_32FC1, 0, 1) * 0.125

    # data term #1: distance between observed and estimated joints in 2D
    obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
        (j2d[cids] - cam[smpl_ids]), sigma))

    # data term #2: distance between the observed and projected boundaries
    obj_s2d = lambda w, sigma, flag, target_pose: (w * flag * GMOf(
        (target_pose - cam[len(Jtr):(len(Jtr) + 6890)]), sigma))

    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    print('****** Optimization on joints')
    curr_pose = sv.pose.r
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas
        objs['thetas'] = wbetas * (sv.pose - curr_pose
                                   )  # constrain theta changes

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .001,
                        'disp': 0
                    })
    curr_pose = sv.pose.r
    # cam.v = ch.vstack([Jtr, sv.r])

    # run the optimization in 2 stages, progressively decreasing the
    # weights for the priors
    print('****** Optimization on silhouette and joints')
    opt_weights = zip([57.4, 4.78], [2e2, 1e2])
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        # find the boundary vertices and estimate their expected location
        smpl_vs = cam.r[len(Jtr):, :]
        boundary_flag = np.zeros((smpl_vs.shape[0], 1))
        expected_pos = np.zeros((smpl_vs.shape[0], 2))
        for vi, v in enumerate(smpl_vs):
            r, c = int(v[1]), int(v[0])
            if r < 0 or r >= sil.shape[0] or c < 0 or c >= sil.shape[1]:
                continue
            sil_v = sil[r, c]
            grad = np.array([grad_x[r, c], grad_y[r, c]])
            grad_n = np.linalg.norm(grad)
            if grad_n > 1e-1 and sil_v < 0.4:  # vertex on or out of the boundaries
                boundary_flag[vi] = 1.0
                step = (grad / grad_n) * (sil_v / grad_n)
                expected_pos[vi] = np.array([c - step[0], r - step[1]])

        # run optimization
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['s2d'] = obj_s2d(5., 100, boundary_flag, expected_pos)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas  # constrain beta changes
        objs['thetas'] = wbetas * (sv.pose - curr_pose
                                   )  # constrain theta changes
        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .001,
                        'disp': 0
                    })

    return sv, cam.r, cam.t.r
예제 #3
0
def optimize_on_joints3D(model, joints3D, opt_shape=False, viz=True):
    """Fit the model to the given set of 3D joints
    :param model: initial SMPL model ===> is modified after optimization
    :param joints3D: 3D joint locations [16 x 3]
    :param opt_shape: boolean, if True optimizes for shape parameter betas
    :param viz: boolean, if True enables visualization during optimization
    """
    t0 = time()
    if joints3D.shape[0] == 16:
        obj_joints3D = lambda w, sigma: (w * GMOf(
            (joints3D - model.J_transformed[get_indices_16()]), sigma))
    elif joints3D.shape[0] == 24:
        obj_joints3D = lambda w, sigma: (w * GMOf(
            (joints3D - model.J_transformed), sigma))
    else:
        raise ('How many joints?')

    # Create the pose prior (GMM over CMU)
    prior = MaxMixtureCompletePrior(n_gaussians=8).get_gmm_prior()
    pprior = lambda w: w * prior(model.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    my_exp = lambda x: 10 * chumpy.exp(x)
    obj_angle = lambda w: w * chumpy.concatenate([
        my_exp(model.pose[55]),
        my_exp(-model.pose[58]),
        my_exp(-model.pose[12]),
        my_exp(-model.pose[15])
    ])
    # Visualization at optimization step
    if viz:

        def on_step(_):
            """Draw a visualization."""
            plt.figure(1, figsize=(5, 5))
            renderBody(model)
            plt.draw()
            plt.pause(1e-3)
    else:
        on_step = None

    # weight configuration (pose and shape: original values as in SMPLify)
    # the first list contains the weights for the pose prior,
    # the second list contains the weights for the shape prior
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])

    print('Initial: error(joints3D) = %.2f' %
          (obj_joints3D(100, 100).r**2).sum())

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (wpose, wbetas) in enumerate(opt_weights):
        objs = {}
        objs['joints3D'] = obj_joints3D(100., 100)
        objs['pose'] = pprior(wpose)
        objs['pose_exp'] = obj_angle(0.317 * wpose)
        if opt_shape:
            objs['betas'] = wbetas * model.betas

            chumpy.minimize(objs,
                            x0=[model.pose, model.betas],
                            method='dogleg',
                            callback=on_step,
                            options={
                                'maxiter': 1000,
                                'e_3': .0001,
                                'disp': 0
                            })
        else:
            chumpy.minimize(objs,
                            x0=[model.pose],
                            method='dogleg',
                            callback=on_step,
                            options={
                                'maxiter': 1000,
                                'e_3': .0001,
                                'disp': 0
                            })
        print('Stage %d: error(joints3D) = %.2f' %
              (stage, (objs['joints3D'].r**2).sum()))
    print('\nElapsed theta fitting (%d joints): %.2f sec.' %
          (joints3D.shape[0], (time() - t0)))
예제 #4
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       init_pose,
                       init_shape,
                       n_betas=10,
                       conf=None):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param init_pose: 72D vector, pose prediction results provided by HMR
    :param init_shape: 10D vector, shape prediction results provided by HMR
    :param n_betas: number of shape coefficients considered during optimization
    :param conf: 14D vector storing the confidence values from the CNN
    :returns: a tuple containing the optimized model, its joints projected on image space, the
              camera translation
    """
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.array(init_shape)

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.zeros(3),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    # make the SMPL joints depend on betas
    Jdirs = np.dstack([
        model.J_regressor.dot(model.shapedirs[:, :, i])
        for i in range(len(betas))
    ])
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    # get joint positions as a function of model pose, betas and trans
    (_, A_global) = global_rigid_transformation(sv.pose,
                                                J_onbetas,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

    # add the head joint, corresponding to a vertex...
    Jtr = ch.vstack((Jtr, sv[head_id]))

    # ... and add the joint id to the list
    smpl_ids.append(len(Jtr) - 1)

    # update the weights using confidence values
    weights = base_weights * conf[cids] if conf is not None else base_weights

    # project SMPL joints on the image plane using the estimated camera
    cam.v = Jtr

    # data term: distance between observed and estimated joints in 2D
    obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
        (j2d[cids] - cam[smpl_ids]), sigma))

    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .0001,
                        'disp': 0
                    })

    return sv, cam.r, cam.t.r
예제 #5
0
def optimize_on_vertices(model,
                         vertices,
                         joints3D=np.zeros(1),
                         weights_corr=np.zeros(1),
                         vertices_cross_corr=np.zeros(1),
                         indices_cross_corr=np.zeros(1),
                         weights_cross_corr=np.zeros(1),
                         opt_trans=False,
                         viz=True):
    """Fit the model to the given set of 3D vertices and 3D joints
    :param model: initial SMPL model ===> is modified after optimization
    :param vertices: 3D vertex locations to fit [num_vertices x 3]
    :param joints3D: 3D joint locations to fit [24 x 3]
    :param vertices_cross_corr, indices_cross_corr, weights_cross_corr:
    :for each point in vertices_cross_corr, we have the index of its corresponding smpl vertex and the weight
    :for this correspondence
    :param opt_trans: boolean, if True optimizes only translation
    :param viz: boolean, if True enables visualization during optimization
    """
    t0 = time()
    # Optimization term on the joints3D distance
    if joints3D.shape[0] > 1:
        if joints3D.shape[0] == 16:
            obj_joints3d = lambda w, sigma: (w * GMOf(
                (joints3D - model.J_transformed[get_indices_16()]), sigma))
        elif joints3D.shape[0] == 24:
            obj_joints3d = lambda w, sigma: (w * GMOf(
                (joints3D - model.J_transformed), sigma))
        else:
            raise ('How many joints?')

    # data term: distance between observed and estimated points in 3D
    if (weights_corr.shape[0] == 1):
        weights_corr = np.ones((vertices.shape[0]))

    obj_vertices = lambda w, sigma: (w * GMOf(
        ((vertices.T * weights_corr) - (model.T * weights_corr)).T, sigma))

    if (vertices_cross_corr.shape[0] > 1):
        smplV = model[indices_cross_corr.astype(int), :]
        obj_vertices_cross = lambda w, sigma: (w * GMOf(
            ((vertices_cross_corr.T * weights_cross_corr) -
             (smplV.T * weights_cross_corr)).T, sigma))
    # Create the pose prior (GMM over CMU)
    prior = MaxMixtureCompletePrior(n_gaussians=8).get_gmm_prior()
    pprior = lambda w: w * prior(model.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    my_exp = lambda x: 10 * chumpy.exp(x)
    obj_angle = lambda w: w * chumpy.concatenate([
        my_exp(model.pose[55]),
        my_exp(-model.pose[58]),
        my_exp(-model.pose[12]),
        my_exp(-model.pose[15])
    ])
    # Visualization at optimization step
    if viz:

        def on_step(_):
            """Draw a visualization."""
            plt.figure(1, figsize=(5, 5))
            renderBody(model)
            plt.draw()
            plt.pause(1e-3)
    else:
        on_step = None

    # weight configuration (pose and shape: original values as in SMPLify)
    # the first list contains the weights for the pose prior,
    # the second list contains the weights for the shape prior
    # the third list contains the weights for the joints3D loss
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1], [5, 5, 5, 5])
    print('Initial:')
    print('\terror(vertices) = %.2f' % (obj_vertices(100, 100).r**2).sum())
    if (joints3D.shape[0] > 1):
        print('\terror(joints3d) = %.2f' % (obj_joints3d(100, 100).r**2).sum())
    if (vertices_cross_corr.shape[0] > 1):
        print('\terror(vertices_cross) = %.2f' %
              (obj_vertices_cross(100, 100).r**2).sum())

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (wpose, wbetas, wjoints3D) in enumerate(opt_weights):
        print('Stage %d' % stage)
        objs = {}
        if (joints3D.shape[0] > 1):
            objs['joints3D'] = wjoints3D * obj_joints3d(100., 100)
        objs['vertices'] = obj_vertices(100., 100)
        if (vertices_cross_corr.shape[0] > 1):
            objs['vertices_cross'] = obj_vertices_cross(100., 100)
        objs['pose'] = pprior(wpose)
        objs['pose_exp'] = obj_angle(0.317 * wpose)
        objs['betas'] = wbetas * model.betas

        if opt_trans:
            chumpy.minimize(objs,
                            x0=[model.trans],
                            method='dogleg',
                            callback=on_step,
                            options={
                                'maxiter': 1000,
                                'e_3': .0001,
                                'disp': 0
                            })
        else:
            chumpy.minimize(objs,
                            x0=[model.pose, model.betas],
                            method='dogleg',
                            callback=on_step,
                            options={
                                'maxiter': 1000,
                                'e_3': .0001,
                                'disp': 0
                            })
        print('\terror(vertices) = %.2f' % (objs['vertices'].r**2).sum())
        if (joints3D.shape[0] > 1):
            print('\terror(joints3D) = %.2f' % (objs['joints3D'].r**2).sum())
        if (vertices_cross_corr.shape[0] > 1):
            print('\terror(vertices_cross) = %.2f' %
                  (objs['vertices_cross'].r**2).sum())
    print('Elapsed iteration %.2f sec.' % (time() - t0))