コード例 #1
0
    def _global_rigid_transformation(self):
        results = {}
        pose = self.pose.reshape((-1, 3))
        parent = {
            i: self.kintree_table[0, i]
            for i in range(1, self.kintree_table.shape[1])
        }

        with_zeros = lambda x: ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))
        pack = lambda x: ch.hstack([ch.zeros((4, 3)), x.reshape((4, 1))])

        results[0] = with_zeros(
            ch.hstack((Rodrigues(pose[0, :]), self.J[0, :].reshape((3, 1)))))

        for i in range(1, self.kintree_table.shape[1]):
            results[i] = results[parent[i]].dot(
                with_zeros(
                    ch.hstack((
                        Rodrigues(pose[i, :]),  # rotation around bone endpoint
                        (self.J[i, :] - self.J[parent[i], :]).reshape(
                            (3, 1))  # bone
                    ))))

        results = [results[i] for i in sorted(results.keys())]
        results_global = results

        # subtract rotated J position
        results2 = [
            results[i] -
            (pack(results[i].dot(ch.concatenate((self.J[i, :], [0])))))
            for i in range(len(results))
        ]
        result = ch.dstack(results2)

        return result, results_global
コード例 #2
0
def global_rigid_transformation(pose, J, kintree_table):
    def _rodrigues(x):
        return Rodrigues(x)
    results = {}
    pose = pose.reshape((-1, 3))
    id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])}
    parent = {i: id_to_col[kintree_table[0, i]]
              for i in range(1, kintree_table.shape[1])}

    results[0] = with_zeros(
        ch.hstack((_rodrigues(pose[0, :]), J[0, :].reshape((3, 1)))))

    for i in range(1, kintree_table.shape[1]):
        results[i] = results[parent[i]].dot(
            with_zeros(ch.hstack((_rodrigues(pose[i, :]),
                                  ((J[i, :] - J[parent[i], :]).reshape((3, 1)))
                                  ))))

    results = [results[i] for i in sorted(results.keys())]
    results_global = results

    results2 = [results[i] - (pack(
        results[i].dot(ch.concatenate(((J[i, :]), 0))))
    ) for i in range(len(results))]
    results = results2
    result = ch.dstack(results)
    return result, results_global
コード例 #3
0
ファイル: geometry.py プロジェクト: AjayTalati/opendr
def volume(v, f):

    # Construct a 3D matrix which is of size (nfaces x 3 x 3)
    # Each row corresponds to a face; the third dimension indicates
    # which triangle in that face is being referred to
    vs = ch.dstack((v[f[:, 0], :], v[f[:, 1], :], v[f[:, 2], :]))

    v321 = vs[:, 0, 2] * vs[:, 1, 1] * vs[:, 2, 0]
    v231 = vs[:, 0, 1] * vs[:, 1, 2] * vs[:, 2, 0]
    v312 = vs[:, 0, 2] * vs[:, 1, 0] * vs[:, 2, 1]
    v132 = vs[:, 0, 0] * vs[:, 1, 2] * vs[:, 2, 1]
    v213 = vs[:, 0, 1] * vs[:, 1, 0] * vs[:, 2, 2]
    v123 = vs[:, 0, 0] * vs[:, 1, 1] * vs[:, 2, 2]

    volumes = (-v321 + v231 + v312 - v132 - v213 + v123) * (1.0 / 6.0)
    return ch.abs(ch.sum(volumes))
コード例 #4
0
ファイル: geometry.py プロジェクト: Hutaimu1/cv-
def volume(v, f):

    # Construct a 3D matrix which is of size (nfaces x 3 x 3)
    # Each row corresponds to a face; the third dimension indicates
    # which triangle in that face is being referred to
    vs = ch.dstack((v[f[:, 0], :], v[f[:, 1], :], v[f[:, 2], :]))

    v321 = vs[:, 0, 2] * vs[:, 1, 1] * vs[:, 2, 0]
    v231 = vs[:, 0, 1] * vs[:, 1, 2] * vs[:, 2, 0]
    v312 = vs[:, 0, 2] * vs[:, 1, 0] * vs[:, 2, 1]
    v132 = vs[:, 0, 0] * vs[:, 1, 2] * vs[:, 2, 1]
    v213 = vs[:, 0, 1] * vs[:, 1, 0] * vs[:, 2, 2]
    v123 = vs[:, 0, 0] * vs[:, 1, 1] * vs[:, 2, 2]

    volumes = (-v321 + v231 + v312 - v132 - v213 + v123) * (1. / 6.)
    return ch.abs(ch.sum(volumes))
コード例 #5
0
def global_rigid_transformation(pose, J, kintree_table):
    results = {}
    pose = pose.reshape((-1, 3))
    id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])}
    parent = {
        i: id_to_col[kintree_table[0, i]]
        for i in range(1, kintree_table.shape[1])
    }

    def with_zeros(x):
        return ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))

    results[0] = with_zeros(
        ch.hstack((Rodrigues(pose[0, :]), J[0, :].reshape((3, 1)))))

    for i in range(1, kintree_table.shape[1]):
        results[i] = results[parent[i]].dot(
            with_zeros(
                ch.hstack((Rodrigues(pose[i, :]),
                           ((J[i, :] - J[parent[i], :]).reshape((3, 1)))))))

    def pack(x):
        return ch.hstack([np.zeros((4, 3)), x.reshape((4, 1))])

    results = [results[i] for i in sorted(results.keys())]
    results_global = results

    if True:
        results2 = [
            results[i] - pack(results[i].dot(ch.concatenate(((J[i, :]), 0))))
            for i in range(len(results))
        ]

        results = results2

    result = ch.dstack(results)
    return result, results_global
コード例 #6
0
def optimize_on_DensePose(raw_img,
                          tar_img,
                          dp_iuv,
                          op_j2d,
                          w_j2d,
                          model,
                          cam,
                          cam_old,
                          prior,
                          init_trans,
                          init_pose,
                          init_betas,
                          n_betas=10,
                          viz=False,
                          imgname='tmp'):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    outPath = os.path.join(ROOT_PATH, 'smplify/{}_stage'.format(imgname))
    if not os.path.isdir(outPath):
        os.mkdir(outPath)

    print outPath

    dp_iuv_tar = dp_iuv.copy()
    width = dp_iuv_tar.shape[1]
    height = dp_iuv_tar.shape[0]

    dp_iuv_weight = np.zeros(dp_iuv_tar.shape) + 1
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 0] = 1
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 1] = 1
    dp_iuv_weight[dp_iuv_tar[:, :, 2] < 0.95, 2] = 255.0 / 24

    dp_iuv_weight = ch.array(dp_iuv_weight)
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:

    tarIUV = ch.array(dp_iuv)

    ori_betas = init_betas.copy()
    ori_betas_ch = ch.array(ori_betas)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.array(init_betas)

    # initialize the pose by using the optimized body orientation and the
    # pose prior

    pose = ch.array(init_pose)
    #init_pose = np.hstack((body_orient, body_init))

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.array(init_trans),
                         pose=pose,
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=None)

    J_tmpx = MatVecMult(J_reg, sv[:, 0])
    J_tmpy = MatVecMult(J_reg, sv[:, 1])
    J_tmpz = MatVecMult(J_reg, sv[:, 2])
    Jtr = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    #Jtr = J_reg.dot(sv)
    cam.v = Jtr

    reIUV = render_model(sv,
                         model.f,
                         width,
                         height,
                         cam,
                         near=0.5,
                         far=25,
                         vc=dp_colors,
                         img=None)

    reModel = render_model(sv,
                           model.f,
                           width,
                           height,
                           cam,
                           near=0.5,
                           far=25,
                           vc=None,
                           img=None)

    fullModel = render_model(sv,
                             model.f,
                             raw_img.shape[1],
                             raw_img.shape[0],
                             cam_old,
                             near=0.5,
                             far=25,
                             vc=None,
                             img=None)

    #gaussian_pyramid(input_objective, imshape=None, normalization='SSE', n_levels=3, as_list=False, label=None):
    def obj_j2d(w, sigma):
        return (w * w_j2d.reshape((-1, 1)) * GMOf((op_j2d - cam), sigma))

    #input_objective, imshape, normalization, n_levels, as_list

    err = (tarIUV - reIUV) * dp_iuv_weight

    err_uv = 1 - ch.exp(-err[:, :, 0:2]**2 / 1)

    err_inds = 1 - ch.exp(-err[:, :, 2]**2 / 0.001)
    err_total = ch.dstack((err_uv, err_inds))
    #obj_dense = gaussian_pyramid(err, n_levels=4, normalization='SSE')
    obj_dense = gaussian_pyramid(err_total, n_levels=4, normalization=None)

    # TODO
    # the render error can be further extended to geodistic error.

    # data term: distance between observed and estimated joints in 2D
    # obj_dense = lambda w, sigma: (
    #      w * GMOf((tarIUV - reIUV).reshape(-1,3), sigma))
    # mixture of gaussians pose prior
    def pprior(w):
        return w * prior(sv.pose)

    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10

    def my_exp(x):
        return alpha * ch.exp(x)

    def obj_angle(w):
        return w * ch.concatenate([
            my_exp(sv.pose[55]),
            my_exp(-sv.pose[58]),
            my_exp(-sv.pose[12]),
            my_exp(-sv.pose[15])
        ])

    def viz_func(stage_num):
        #plt.figure(1, figsize=(10, 10))

        # show optimized joints in 2D
        tmp_img = reIUV.r.copy()
        tmp_tar = tarIUV.copy()
        tmp_model = reModel.r.copy()
        full_model = fullModel.r.copy()
        # w = tmp_tar.shape[1]
        # h = tmp_tar.shape[0]
        for aa in ax:
            for cax in aa:
                cax.clear()

        ax[0][0].imshow(tar_img)
        ax[0][0].imshow(tmp_img, alpha=0.5)
        for j1, j2, w_ts in zip(cam.r, op_j2d, w_j2d):
            if (w_ts > 0):
                ax[0][0].plot([j1[0], j2[0]], [j1[1], j2[1]], 'r')
        ax[0][0].set_xlim([0, width])
        ax[0][0].set_ylim([height, 0])

        ax[0][1].imshow(tar_img)
        ax[0][1].imshow(tmp_tar, alpha=0.5)
        ax[0][1].set_xlim([0, width])
        ax[0][1].set_ylim([height, 0])

        ax[0][2].imshow(tar_img)
        tmp_model_alpha = np.ones((tmp_model.shape[0], tmp_model.shape[1]))
        tmp_model_alpha[tmp_model[:, :, 0] < 1e-2] = 0
        tmp_model = np.dstack((tmp_model, tmp_model_alpha))
        ax[0][2].imshow(tmp_model)
        ax[0][2].set_xlim([0, width])
        ax[0][2].set_ylim([height, 0])
        tmp_err = err_total.r.copy()

        #show error
        ax[1][0].imshow(tmp_err[:, :, 0])
        ax[1][0].set_xlim([0, width])
        ax[1][0].set_ylim([height, 0])

        ax[1][1].imshow(tmp_err[:, :, 1])
        ax[1][1].set_xlim([0, width])
        ax[1][1].set_ylim([height, 0])

        ax[1][2].imshow(tmp_err[:, :, 2])
        ax[1][2].set_xlim([0, width])
        ax[1][2].set_ylim([height, 0])

        #show target
        ax[2][0].imshow(tmp_tar[:, :, 0])
        ax[2][0].set_xlim([0, width])
        ax[2][0].set_ylim([height, 0])

        ax[2][1].imshow(tmp_tar[:, :, 1])
        ax[2][1].set_xlim([0, width])
        ax[2][1].set_ylim([height, 0])

        ax[2][2].imshow(tmp_tar[:, :, 2])
        ax[2][2].set_xlim([0, width])
        ax[2][2].set_ylim([height, 0])

        #show reproj
        ax[3][0].imshow(tmp_img[:, :, 0])
        ax[3][0].set_xlim([0, width])
        ax[3][0].set_ylim([height, 0])

        ax[3][1].imshow(tmp_img[:, :, 1])
        ax[3][1].set_xlim([0, width])
        ax[3][1].set_ylim([height, 0])

        ax[3][2].imshow(tmp_img[:, :, 2])
        ax[3][2].set_xlim([0, width])
        ax[3][2].set_ylim([height, 0])

        for aa in ax:
            aa[0].set_xticks(())
            aa[0].set_yticks(())
            aa[1].set_xticks(())
            aa[1].set_yticks(())
            aa[2].set_xticks(())
            aa[2].set_yticks(())

        #plt.tight_layout()
        plt.draw()
        plt.savefig(os.path.join(outPath, 'stage-{}.png'.format(stage_num)),
                    bbox_inches='tight')
        full_model_int = full_model.copy()
        full_model_int *= 255
        full_model_int = full_model_int.astype(np.uint8)
        cv2.imwrite(os.path.join(outPath, 'model-{}.png'.format(stage_num)),
                    full_model_int)
        out_params = {
            'pose': sv.pose.r,
            'shape': sv.betas.r,
            'trans': sv.trans.r
        }
        with open(os.path.join(outPath, 'param-{}.pkl'.format(stage_num)),
                  'w') as fio:
            pickle.dump(out_params, fio, pickle.HIGHEST_PROTOCOL)

    #if viz:

    if viz and False:

        def on_step(cstep):
            """Create visualization."""
            # TODO this function is in vis_func
            #plt.savefig(os.path.join(outPath,'{}.png'.format(strftime("%d_%H_%M_%S", gmtime()))),bbox_inches='tight')

        on_step(stepI)

    else:
        on_step = None

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior

    # opt_weights = zip([4.78, 3.78, 2.78, 1.78],
    #                   [5, 5, 5, 5],
    #                   [1, 0.25, 0.10, 0])
    opt_weights = zip([4.78], [50], [10])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas, w_joints) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}

        #objs['dense'] = obj_dense
        objs['j2d'] = obj_j2d(w_joints, 50)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        #objs['betas'] = wbetas * (sv.betas - ori_betas_ch)

        ch.minimize(
            objs,
            #x0=[sv.betas, sv.pose, sv.trans],
            x0=[sv.pose, sv.trans],
            method='dogleg',
            callback=on_step,
            options={
                'maxiter': 10000,
                'e_3': .001,
                'disp': 1
            })

        viz_func(254)

    t1 = time()
    _LOGGER.info('elapsed %.05f', (t1 - t0))

    if viz and False:
        plt.ioff()