Exemplo n.º 1
0
def optimize_on_DensePose(ori_img,
                          tar_img,
                          dp_iuv,
                          op_j2d,
                          w_j2d,
                          model,
                          cam,
                          cam_old,
                          prior,
                          init_trans,
                          init_pose,
                          init_betas,
                          n_betas=10,
                          viz=False,
                          imageid=1):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    outPath = '/home/xiul/databag/net_images/smplify/vid_{:06d}_puredense'.format(
        imageid)
    if not os.path.isdir(outPath):
        os.mkdir(outPath)

    dp_iuv_tar = dp_iuv.copy()
    width = dp_iuv_tar.shape[1]
    height = dp_iuv_tar.shape[0]
    dp_iuv_weight = np.zeros(dp_iuv_tar.shape) + 10
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 0] = 10
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 1] = 10
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 2] = 15

    dp_iuv_weight = ch.array(dp_iuv_weight)
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:

    tarIUV = ch.array(dp_iuv)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.array(init_betas)

    # initialize the pose by using the optimized body orientation and the
    # pose prior

    pose = ch.array(init_pose)
    #init_pose = np.hstack((body_orient, body_init))

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.array(init_trans),
                         pose=pose,
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=None)

    J_tmpx = MatVecMult(J_reg, sv[:, 0])
    J_tmpy = MatVecMult(J_reg, sv[:, 1])
    J_tmpz = MatVecMult(J_reg, sv[:, 2])
    Jtr = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    #Jtr = J_reg.dot(sv)
    cam.v = Jtr

    reIUV = render_model(sv,
                         model.f,
                         width,
                         height,
                         cam,
                         near=0.5,
                         far=25,
                         vc=dp_colors,
                         img=None)

    reModel = render_model(sv,
                           model.f,
                           width,
                           height,
                           cam,
                           near=0.5,
                           far=25,
                           vc=None,
                           img=None)

    fullModel = render_model(sv,
                             model.f,
                             ori_img.shape[1],
                             ori_img.shape[0],
                             cam_old,
                             near=0.5,
                             far=25,
                             vc=None,
                             img=None)

    #gaussian_pyramid(input_objective, imshape=None, normalization='SSE', n_levels=3, as_list=False, label=None):
    def obj_j2d(w, sigma):
        return (w * w_j2d.reshape((-1, 1)) * GMOf((op_j2d - cam), sigma))

    #input_objective, imshape, normalization, n_levels, as_list

    err = (tarIUV - reIUV) * dp_iuv_weight

    obj_dense = 100 * gaussian_pyramid(err, n_levels=4, normalization='SSE')

    #obj_dense = gaussian_pyramid(err, n_levels=4, normalization=SSE)
    #obj_dense = 10000*err

    # data term: distance between observed and estimated joints in 2D
    # obj_dense = lambda w, sigma: (
    #      w * GMOf((tarIUV - reIUV).reshape(-1,3), sigma))
    # mixture of gaussians pose prior
    def pprior(w):
        return w * prior(sv.pose)

    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10

    def my_exp(x):
        return alpha * ch.exp(x)

    def obj_angle(w):
        return w * ch.concatenate([
            my_exp(sv.pose[55]),
            my_exp(-sv.pose[58]),
            my_exp(-sv.pose[12]),
            my_exp(-sv.pose[15])
        ])

    def viz_func(stage_num):
        plt.figure(1, figsize=(10, 10))
        plt.clf()
        plt.subplot(2, 3, 1)
        # show optimized joints in 2D
        tmp_img = reIUV.r.copy()
        tmp_tar = tarIUV.copy()
        tmp_model = reModel.r.copy()
        full_model = fullModel.r.copy()
        # w = tmp_tar.shape[1]
        # h = tmp_tar.shape[0]
        plt.imshow(tar_img)
        plt.imshow(tmp_img, alpha=0.5)
        for j1, j2, w_ts in zip(cam.r, op_j2d, w_j2d):
            if (w_ts > 0):
                plt.plot([j1[0], j2[0]], [j1[1], j2[1]], 'r')
        plt.xlim([0, width])
        plt.ylim([height, 0])
        plt.subplot(2, 3, 2)
        plt.imshow(tar_img)
        plt.imshow(tmp_tar, alpha=0.5)
        plt.xlim([0, width])
        plt.ylim([height, 0])
        plt.subplot(2, 3, 3)
        plt.imshow(tar_img)
        tmp_model_alpha = np.ones((tmp_model.shape[0], tmp_model.shape[1]))
        tmp_model_alpha[tmp_model[:, :, 0] < 1e-2] = 0
        tmp_model = np.dstack((tmp_model, tmp_model_alpha))
        plt.imshow(tmp_model)
        plt.xlim([0, width])
        plt.ylim([height, 0])
        plt.subplot(2, 1, 2)
        plt.imshow(full_model)

        plt.draw()
        plt.savefig(os.path.join(outPath, 'stage-{}.png'.format(stage_num)),
                    bbox_inches='tight')
        full_model_int = full_model.copy()
        full_model_int *= 255
        full_model_int = full_model_int.astype(np.uint8)
        cv2.imwrite(os.path.join(outPath, 'model-{}.png'.format(stage_num)),
                    full_model_int)
        out_params = {
            'pose': sv.pose.r,
            'shape': sv.betas.r,
            'trans': sv.trans.r
        }
        with open(os.path.join(outPath, 'param-{}.pkl'.format(stage_num)),
                  'w') as fio:
            pickle.dump(out_params, fio, pickle.HIGHEST_PROTOCOL)

    viz_func(0)
    #if viz:

    if viz and False:

        def on_step(cstep):
            """Create visualization."""
            # TODO this function is in vis_func
            #plt.savefig(os.path.join(outPath,'{}.png'.format(strftime("%d_%H_%M_%S", gmtime()))),bbox_inches='tight')

        on_step(stepI)

    else:
        on_step = None

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior

    # opt_weights = zip([4.78, 3.78, 2.78, 1.78],
    #                   [5, 5, 5, 5],
    #                   [1, 0.25, 0.10, 0])
    opt_weights = zip([4.78, 4.78, 4.78, 4.78, 4.78], [50, 50, 50, 50, 50],
                      [0.1, 0.1, 0.5, 0.25, 0.1])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas, w_joints) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        #if stage >= 1:
        objs['dense'] = obj_dense
        objs['j2d'] = obj_j2d(w_joints, 50)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose, sv.trans],
                    method='dogleg',
                    callback=on_step,
                    options={
                        'maxiter': 10000,
                        'e_3': .0001,
                        'disp': 1
                    })

        viz_func(stage + 1)

    t1 = time()
    _LOGGER.info('elapsed %.05f', (t1 - t0))

    if viz and False:
        plt.ioff()
Exemplo n.º 2
0
def optimize_on_DensePose(dp_iuv,
                          op_j2d,
                          w_j2d,
                          model,
                          cam,
                          prior,
                          body_orient,
                          body_trans,
                          body_init,
                          n_betas=10,
                          viz=False,
                          imageid=1):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    outPath = '/home/xiul/databag/dbfusion/record0/smplify/vid{}'.format(
        imageid)
    if not os.path.isdir(outPath):
        os.mkdir(outPath)

    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:

    tarIUV = ch.array(dp_iuv)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.zeros(n_betas)

    # initialize the pose by using the optimized body orientation and the
    # pose prior

    init_pose = np.hstack((body_orient, prior.weights.dot(prior.means)))
    #init_pose = np.hstack((body_orient, body_init))

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.array(body_trans),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    J_tmpx = MatVecMult(J_reg, sv[:, 0])
    J_tmpy = MatVecMult(J_reg, sv[:, 1])
    J_tmpz = MatVecMult(J_reg, sv[:, 2])
    Jtr = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    #Jtr = J_reg.dot(sv)
    cam.v = Jtr

    op_j2d = op_j2d * 1280 / 1920
    w = 1280
    h = 720

    reIUV = render_model(sv,
                         model.f,
                         w,
                         h,
                         cam,
                         near=0.5,
                         far=25,
                         vc=dp_colors,
                         img=None)

    #gaussian_pyramid(input_objective, imshape=None, normalization='SSE', n_levels=3, as_list=False, label=None):
    obj_j2d = lambda w, sigma: (w * w_j2d.reshape((-1, 1)) * GMOf(
        (op_j2d - cam), sigma))
    #input_objective, imshape, normalization, n_levels, as_list

    err = tarIUV - reIUV
    obj_dense = 100 * gaussian_pyramid(err, n_levels=6, normalization='SSE')

    # data term: distance between observed and estimated joints in 2D
    # obj_dense = lambda w, sigma: (
    #      w * GMOf((tarIUV - reIUV).reshape(-1,3), sigma))
    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    if viz:
        import matplotlib.pyplot as plt
        from time import gmtime, strftime

        def on_step(cstep):
            """Create visualization."""
            plt.figure(1, figsize=(10, 10))
            plt.clf()
            plt.subplot(1, 2, 1)
            # show optimized joints in 2D
            tmp_img = reIUV.r.copy()
            tmp_tar = tarIUV.copy()
            plt.imshow(tmp_img)
            for j1, j2 in zip(cam.r, op_j2d):
                plt.plot([j1[0], j2[0]], [j1[1], j2[1]], 'r')
            plt.subplot(1, 2, 2)
            plt.imshow(tmp_tar)
            plt.draw()
            plt.savefig(os.path.join(
                outPath, '{}.png'.format(strftime("%d_%H_%M_%S", gmtime()))),
                        bbox_inches='tight')

        on_step(stepI)

    else:
        on_step = None

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78, 4.78, 4.78, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1, 5, 5, 5])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        if stage >= 4:
            objs['dense'] = obj_dense
        if stage <= 4:
            objs['j2d'] = obj_j2d(1, 100)
        else:
            objs['j2d'] = obj_j2d(0.1, 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=on_step,
                    options={
                        'maxiter': 10000,
                        'e_3': .001,
                        'disp': 1
                    })

    t1 = time()
    _LOGGER.info('elapsed %.05f', (t1 - t0))

    if viz:
        plt.ioff()