Exemplo n.º 1
0
    def test_nested_concatenate(self):
        aa = ch.arange(3)
        bb = ch.arange(4)
        cc = ch.arange(5)

        result = ch.concatenate((ch.concatenate((aa, bb)), cc))
        self.assertTrue(result.m0 is aa)
        self.assertTrue(result.m1 is bb)
        self.assertTrue(result.m2 is cc)

        self.assertTrue(result.dr_wrt(aa).nnz > 0)
        self.assertTrue(result.dr_wrt(bb).nnz > 0)
        self.assertTrue(result.dr_wrt(cc).nnz > 0)
Exemplo n.º 2
0
def laplacian_pyramid(input_objective, imshape, normalization, n_levels, as_list):

    if normalization is None:
        norm2 = lambda x : x
    elif normalization is 'SSE':
        norm2 = lambda x : x / np.sqrt(np.sum(x.r**2.))
    elif normalization is 'size':
        norm2 = lambda x : x / x.r.size
    else:
        norm2 = normalization

    
    output_objs = []
    for level in range(n_levels):
    
        blur_mtx = filter_for(imshape[0], imshape[1], imshape[2] if len(imshape)>2 else 1, kernel = GaussianKernel2D(3, 1))
        blurred = MatVecMult(blur_mtx, input_objective).reshape(imshape)
        output_objs.append(norm2(input_objective - blurred))


        halfsampler_mtx, imshape = halfsampler_for(imshape)
        input_objective = MatVecMult(halfsampler_mtx, blurred.ravel()).reshape(imshape)
        
    output_objs.append(norm2(input_objective).reshape(imshape)) 
        
    return output_objs if as_list else reduce(lambda x, y : ch.concatenate((x.ravel(), y.ravel())), output_objs)
Exemplo n.º 3
0
def chZonalHarmonics(a):
    zl0 = -ch.sqrt(ch.pi) * (-1.0 + ch.cos(a))
    zl1 = 0.5 * ch.sqrt(3.0 * ch.pi) * ch.sin(a)**2
    zl2 = -0.5 * ch.sqrt(
        5.0 * ch.pi) * ch.cos(a) * (-1.0 + ch.cos(a)) * (ch.cos(a) + 1.0)
    z = [zl0, zl1, zl2]
    return ch.concatenate(z)
Exemplo n.º 4
0
 def obj_angle(w):
     return w * ch.concatenate([
         my_exp(sv.pose[55]),
         my_exp(-sv.pose[58]),
         my_exp(-sv.pose[12]),
         my_exp(-sv.pose[15])
     ])
Exemplo n.º 5
0
def gaussian_pyramid(input_objective, imshape=None, normalization='SSE', n_levels=3, as_list=False, label=None):
    
    if imshape is None:
        imshape = input_objective.shape

    if normalization is None:
        norm2 = lambda x : x
    elif normalization is 'SSE':
        norm2 = lambda x : x / np.sqrt(np.sum(x.r**2.))
    elif normalization is 'size':
        norm2 = lambda x : x / x.r.size
    else:
        norm2 = normalization

    cur_imshape = deepcopy(imshape)
    cur_obj = input_objective

    input_objective = norm2(input_objective)
    output_objectives = [input_objective]

    for ik in range(n_levels):    
        cur_obj = GaussPyrDownOne(px=cur_obj, im_shape = cur_imshape)
        cur_imshape = cur_obj.output_shape
        output_objectives.append(norm2(cur_obj) if label is None else norm2(cur_obj) >> '%s%d' % (label,ik))
        
    if not as_list:
        andit = lambda a : reduce(lambda x, y : ch.concatenate((x.ravel(), y.ravel())), a)
        output_objectives = andit(output_objectives)

    return output_objectives
Exemplo n.º 6
0
def global_rigid_transformation(pose, J, kintree_table):
    def _rodrigues(x):
        return Rodrigues(x)
    results = {}
    pose = pose.reshape((-1, 3))
    id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])}
    parent = {i: id_to_col[kintree_table[0, i]]
              for i in range(1, kintree_table.shape[1])}

    results[0] = with_zeros(
        ch.hstack((_rodrigues(pose[0, :]), J[0, :].reshape((3, 1)))))

    for i in range(1, kintree_table.shape[1]):
        results[i] = results[parent[i]].dot(
            with_zeros(ch.hstack((_rodrigues(pose[i, :]),
                                  ((J[i, :] - J[parent[i], :]).reshape((3, 1)))
                                  ))))

    results = [results[i] for i in sorted(results.keys())]
    results_global = results

    results2 = [results[i] - (pack(
        results[i].dot(ch.concatenate(((J[i, :]), 0))))
    ) for i in range(len(results))]
    results = results2
    result = ch.dstack(results)
    return result, results_global
Exemplo n.º 7
0
    def _global_rigid_transformation(self):
        results = {}
        pose = self.pose.reshape((-1, 3))
        parent = {
            i: self.kintree_table[0, i]
            for i in range(1, self.kintree_table.shape[1])
        }

        with_zeros = lambda x: ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))
        pack = lambda x: ch.hstack([ch.zeros((4, 3)), x.reshape((4, 1))])

        results[0] = with_zeros(
            ch.hstack((Rodrigues(pose[0, :]), self.J[0, :].reshape((3, 1)))))

        for i in range(1, self.kintree_table.shape[1]):
            results[i] = results[parent[i]].dot(
                with_zeros(
                    ch.hstack((
                        Rodrigues(pose[i, :]),  # rotation around bone endpoint
                        (self.J[i, :] - self.J[parent[i], :]).reshape(
                            (3, 1))  # bone
                    ))))

        results = [results[i] for i in sorted(results.keys())]
        results_global = results

        # subtract rotated J position
        results2 = [
            results[i] -
            (pack(results[i].dot(ch.concatenate((self.J[i, :], [0])))))
            for i in range(len(results))
        ]
        result = ch.dstack(results2)

        return result, results_global
Exemplo n.º 8
0
def lrotmin(p): 
    if isinstance(p, np.ndarray):
        p = p.ravel()[3:]
        return np.concatenate([(cv2.Rodrigues(np.array(pp))[0]-np.eye(3)).ravel() for pp in p.reshape((-1,3))]).ravel()        
    if p.ndim != 2 or p.shape[1] != 3:
        p = p.reshape((-1,3))
    p = p[1:]
    return ch.concatenate([(Rodrigues(pp)-ch.eye(3)).ravel() for pp in p]).ravel()
Exemplo n.º 9
0
def lrotmin(p):
    if isinstance(p, np.ndarray):
        p = p.ravel()[3:]
        return np.concatenate([(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel()
    if p.ndim != 2 or p.shape[1] != 3:
        p = p.reshape((-1, 3))
    p = p[1:]
    return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel()
Exemplo n.º 10
0
def verts_decorated_quat(trans,
                         pose,
                         v_template,
                         J_regressor,
                         weights,
                         kintree_table,
                         f,
                         posedirs=None,
                         betas=None,
                         add_shape=True,
                         shapedirs=None,
                         want_Jtr=False):

    for which in [
            trans, pose, v_template, weights, posedirs, betas, shapedirs
    ]:
        if which is not None:
            assert ischumpy(which)
    v = v_template

    v_shaped = v + shapedirs.dot(betas)  #Add Shape of the model.
    quaternion_angles = axis2quat(pose.reshape((-1, 3))).reshape(-1)[4:]
    shape_feat = betas[1]
    feat = ch.concatenate([quaternion_angles, shape_feat], axis=0)
    poseblends = posedirs.dot(feat)
    v_posed = v_shaped + poseblends

    J_tmpx = MatVecMult(J_regressor, v_shaped[:, 0])
    J_tmpy = MatVecMult(J_regressor, v_shaped[:, 1])
    J_tmpz = MatVecMult(J_regressor, v_shaped[:, 2])
    J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    result, meta = verts_core(pose,
                              v,
                              J,
                              weights,
                              kintree_table,
                              want_Jtr=True)
    Jtr = meta.Jtr if meta is not None else None
    tr = trans.reshape((1, 3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights

    result.posedirs = posedirs
    result.v_posed = v_posed
    result.shapedirs = shapedirs
    result.betas = betas
    result.v_shaped = v_shaped
    result.J_transformed = Jtr

    return result
Exemplo n.º 11
0
def capsule_dist(capsule0, capsule1, alpha=.3, increase_hand=True):
    range0 = range(capsule0.center_id,
                   capsule0.center_id + len(capsule0.centers))
    range1 = range(capsule1.center_id,
                   capsule1.center_id + len(capsule1.centers))
    cnt0 = ch.concatenate([[cid] * len(range1) for cid in range0])
    cnt1 = ch.concatenate([range1] * len(range0))
    if increase_hand:
        if (capsule0.id == 18) or (capsule0.id == 19) or (
                capsule1.id == 18) or (capsule1.id == 19):
            dst = (alpha * 1.2 * capsule0.rad.r)**2 + (alpha * 1.2 *
                                                       capsule1.rad.r)**2
        else:
            dst = (alpha * capsule0.rad.r)**2 + (alpha * capsule1.rad.r)**2
    else:
        dst = (alpha * capsule0.rad.r)**2 + (alpha * capsule1.rad.r)**2
    radiuss = np.hstack([dst] * len(cnt0)).squeeze()
    return (cnt0, cnt1, radiuss)
Exemplo n.º 12
0
def capsule_dist(capsule0, capsule1, alpha=.3, increase_hand=True):
    range0 = range(capsule0.center_id,
                   capsule0.center_id + len(capsule0.centers))
    range1 = range(capsule1.center_id,
                   capsule1.center_id + len(capsule1.centers))
    cnt0 = ch.concatenate([[cid] * len(range1) for cid in range0])
    cnt1 = ch.concatenate([range1] * len(range0))
    if increase_hand:
        if (capsule0.id == 18) or (capsule0.id == 19) or (
                capsule1.id == 18) or (capsule1.id == 19):
            dst = (alpha * 1.2 * capsule0.rad.r)**2 + (alpha * 1.2 *
                                                       capsule1.rad.r)**2
        else:
            dst = (alpha * capsule0.rad.r)**2 + (alpha * capsule1.rad.r)**2
    else:
        dst = (alpha * capsule0.rad.r)**2 + (alpha * capsule1.rad.r)**2
    radiuss = np.hstack([dst] * len(cnt0)).squeeze()
    return (cnt0, cnt1, radiuss)
Exemplo n.º 13
0
def chZonalToSphericalHarmonics(z, theta, phi):
    sphCoeffs = []
    for l in np.arange(len(z)):
        for m in np.arange(np.int(-(l * 2 + 1) / 2),
                           np.int((l * 2 + 1) / 2) + 1):
            ylm_d = chSpherical_harmonics[(l, m)](theta, phi)
            sh = np.sqrt(4 * np.pi / (2 * l + 1)) * z[l] * ylm_d
            sphCoeffs = sphCoeffs + [sh]

    #Correct order in band l=1.
    sphCoeffs[1], sphCoeffs[3] = sphCoeffs[3], sphCoeffs[1]
    chSphCoeffs = ch.concatenate(sphCoeffs)
    return chSphCoeffs
Exemplo n.º 14
0
def axis2quat(p):
    angle = ch.sqrt(ch.clip(ch.sum(ch.square(p), 1), 1e-16, 1e16))
    norm_p = p / angle[:, np.newaxis]
    cos_angle = ch.cos(angle / 2)
    sin_angle = ch.sin(angle / 2)
    qx = norm_p[:, 0] * sin_angle
    qy = norm_p[:, 1] * sin_angle
    qz = norm_p[:, 2] * sin_angle
    qw = cos_angle - 1
    return ch.concatenate([
        qx[:, np.newaxis], qy[:, np.newaxis], qz[:, np.newaxis], qw[:,
                                                                    np.newaxis]
    ],
                          axis=1)
Exemplo n.º 15
0
def getHandModel():
    globalJoints = ch.zeros((45, ))
    globalBeta = ch.zeros((10, ))
    chRot = ch.zeros((3, ))
    chTrans = ch.array([0., 0., 0.5])

    fullpose = ch.concatenate([chRot, globalJoints], axis=0)
    m = load_model_withInputs(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        '../mano/models/MANO_RIGHT.pkl'),
                              fullpose,
                              chTrans,
                              globalBeta,
                              ncomps=15,
                              flat_hand_mean=True)

    return m, chRot, globalJoints, chTrans, globalBeta
Exemplo n.º 16
0
def global_rigid_transformation(pose, J, kintree_table):
    results = {}
    pose = pose.reshape((-1, 3))
    id_to_col = {kintree_table[1, i]: i for i in range(kintree_table.shape[1])}
    parent = {
        i: id_to_col[kintree_table[0, i]]
        for i in range(1, kintree_table.shape[1])
    }

    def with_zeros(x):
        return ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))

    results[0] = with_zeros(
        ch.hstack((Rodrigues(pose[0, :]), J[0, :].reshape((3, 1)))))

    for i in range(1, kintree_table.shape[1]):
        results[i] = results[parent[i]].dot(
            with_zeros(
                ch.hstack((Rodrigues(pose[i, :]),
                           ((J[i, :] - J[parent[i], :]).reshape((3, 1)))))))

    def pack(x):
        return ch.hstack([np.zeros((4, 3)), x.reshape((4, 1))])

    results = [results[i] for i in sorted(results.keys())]
    results_global = results

    if True:
        results2 = [
            results[i] - pack(results[i].dot(ch.concatenate(((J[i, :]), 0))))
            for i in range(len(results))
        ]

        results = results2

    result = ch.dstack(results)
    return result, results_global
Exemplo n.º 17
0
 def compute_r(self):
     min_w = self.weights[self.min_component_idx]
     # Add the sqrt(-log(weights))
     return ch.concatenate((self.loglikelihoods[self.min_component_idx].r,
                            np.sqrt(-np.log(min_w))))
Exemplo n.º 18
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       exp_logistic,
                       n_betas=10,
                       inner_penetration=False,
                       silh=None,
                       conf=None,
                       viz=False):
    """Run the optimization."""
    if silh is not None:
        raise NotImplementedError("Silhouette fitting is not supported in "
                                  "this code release due to dependencies on "
                                  "proprietary code for the "
                                  "distance computation.")
    t0 = _time()
    # define the mapping LSP joints -> SMPL joints
    if j2d.shape[0] == 14:
        cids = range(12) + [13]
    elif j2d.shape[0] == 91:
        cids = range(j2d.shape[0])
    else:
        raise Exception("Unknown number of joints: %d! Mapping not defined!" %
                        j2d.shape[0])
    # joint ids for SMPL
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    # weight given to each joint during optimization;
    if j2d.shape[0] == 14:
        weights = [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    else:
        weights = [1] * (len(smpl_ids) + len(landmark_mesh_91))
    # The non-skeleton vertex ids are added later.

    if try_both_orient:
        flipped_orient = _cv2.Rodrigues(body_orient)[0].dot(
            _cv2.Rodrigues(_np.array([0., _np.pi, 0]))[0])
        flipped_orient = _cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        errors = []
    svs = []
    cams = []
    # rends = []
    for o_id, orient in enumerate(orientations):
        # initialize betas
        betas = _ch.zeros(n_betas)  # pylint: disable=no-member

        init_pose = _np.hstack((orient, prior.weights.dot(prior.means)))

        # 2D joint error term
        # make the SMPL joint depend on betas
        Jdirs = _np.dstack([
            model.J_regressor.dot(model.shapedirs[:, :, i])
            for i in range(len(betas))
        ])
        # pylint: disable=no-member
        J_onbetas = _ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # instantiate the model
        sv = verts_decorated(trans=_ch.zeros(3),
                             pose=_ch.array(init_pose),
                             v_template=model.v_template,
                             J=model.J_regressor,
                             betas=betas,
                             shapedirs=model.shapedirs[:, :, :n_betas],
                             weights=model.weights,
                             kintree_table=model.kintree_table,
                             bs_style=model.bs_style,
                             f=model.f,
                             bs_type=model.bs_type,
                             posedirs=model.posedirs)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = _global_rigid_transformation(sv.pose,
                                                     J_onbetas,
                                                     model.kintree_table,
                                                     xp=_ch)
        Jtr = _ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        if j2d.shape[0] == 14:
            # add the "fake" joint for the head
            head_id = _HEAD_REGR[0]
            Jtr = _ch.vstack((Jtr, sv[head_id]))
            if o_id == 0:
                smpl_ids.append(len(Jtr) - 1)
        else:
            # add the plain vertex IDs on the mesh surface.
            for vertex_id in landmark_mesh_91.values():
                Jtr = _ch.vstack((Jtr, sv[vertex_id]))
                # add the joint id
                # for SMPL it's the last one added
                if o_id == 0:
                    smpl_ids.append(len(Jtr) - 1)
        weights = _np.array(weights, dtype=_np.float64)
        if conf is not None:
            weights *= conf[cids]

        # we'll project the joints on the image plane
        cam.v = Jtr

        # data term: difference between observed and estimated joints
        obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * _GMOf(
            (j2d[cids] - cam[smpl_ids]), sigma))
        # pose prior
        pprior = lambda w: w * prior(sv.pose)  # pylint: disable=cell-var-from-loop
        # joint angles prior
        # 55: left elbow, should bend -np.pi/2
        # 58: right elbow, should bend np.pi/2
        # 12: left knee, should bend np.pi/2
        # 15: right knee, should bend np.pi/2
        if exp_logistic:
            _LOGGER.info('USING LOGISTIC')
            # Skinny Logistic function. as 50-> inf we get a step function at
            # 0.1. (0.1) is a margin bc 0 is still ok.
            my_exp = lambda x: 1 / (1 + _ch.exp(100 * (0.1 + -x)))
        else:
            x_0 = 0  #10
            alpha = 10
            my_exp = lambda x: alpha * _ch.exp((x - x_0))  # pylint: disable=cell-var-from-loop

        obj_angle = lambda w: w * _ch.concatenate([
            my_exp(sv.pose[55]),  # pylint: disable=cell-var-from-loop
            my_exp(-sv.pose[58]),  # pylint: disable=cell-var-from-loop
            my_exp(-sv.pose[12]),  # pylint: disable=cell-var-from-loop
            my_exp(-sv.pose[15])
        ])  # pylint: disable=cell-var-from-loop

        if viz:
            from body.mesh.sphere import Sphere
            from body.mesh.meshviewer import MeshViewer
            import matplotlib.pyplot as plt

            # set up visualization
            # openGL window
            mv = MeshViewer(window_width=120, window_height=120)

            # and ids
            show_ids = _np.array(smpl_ids)[weights > 0]
            vc = _np.ones((len(Jtr), 3))
            vc[show_ids] = [0, 1, 0]

            plt.ion()

            def on_step(_):
                """Create visualization."""
                # show optimized joints in 3D
                # pylint: disable=cell-var-from-loop
                mv.set_dynamic_meshes([_Mesh(v=sv.r, f=[]),
                                       Sphere(center=cam.t.r,
                                              radius=.1).to_mesh()] \
                        + [Sphere(center=jc, radius=.01).to_mesh(vc[ijc])
                           for ijc, jc in enumerate(Jtr.r)])
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        _np.around(cam.r[smpl_ids]).astype(int),
                        _np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0
                            and coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        _cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1]
                            and target_coord[0] >= 0
                            and target_coord[1] < tmp_img.shape[0]
                            and target_coord[1] >= 0):
                        _cv2.circle(tmp_img, tuple(target_coord), 3,
                                    [0, 255, 0])
                plt.imshow(tmp_img)
                plt.draw()
                plt.show()

            on_step(_)
        else:
            on_step = None

        sp = _SphereCollisions(pose=sv.pose,
                               betas=sv.betas,
                               model=model,
                               regs=_REGRESSORS)
        sp.no_hands = True
        # configuration used with conf joints
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}
            #if stage < 2:
            objs['j2d'] = obj_j2d(1., 100)  # TODO: evaluate.

            objs['pose'] = pprior(w)

            # WEIGHT FOR ANGLE
            if exp_logistic:
                # Set to high weight always.
                objs['pose_exp'] = obj_angle(5 * 1e3)
            else:
                objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas
            if inner_penetration:
                objs['sph_coll'] = 1e3 * sp
            try:
                _ch.minimize(objs.values(),
                             x0=[sv.betas, sv.pose],
                             method='dogleg',
                             callback=on_step,
                             options={
                                 'maxiter': 100,
                                 'e_3': .0001,
                                 'disp': 0
                             })
            except AssertionError:
                # Divergence detected.
                _LOGGER.warn("Diverging optimization! Breaking!")
                break
        t1 = _time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)
        # rends.append(rend)
    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r,
            cams[choose_id].rt.r)
Exemplo n.º 19
0
def setupTexturedRenderer(renderer,
                          vstack,
                          vch,
                          f_list,
                          vc_list,
                          vnch,
                          uv,
                          haveTextures_list,
                          textures_list,
                          camera,
                          frustum,
                          sharedWin=None):
    f = []
    f_listflat = [item for sublist in f_list for item in sublist]
    lenMeshes = 0
    for mesh_i, mesh in enumerate(f_listflat):
        polygonLen = 0
        for polygons in mesh:

            f = f + [polygons + lenMeshes]
            polygonLen += len(polygons)
        lenMeshes += len(vch[mesh_i])

    fstack = np.vstack(f)

    if len(vnch) == 1:
        vnstack = vnch[0]
    else:
        vnstack = ch.vstack(vnch)

    if len(vc_list) == 1:
        vcstack = vc_list[0]
    else:
        vcstack = ch.vstack(vc_list)

    uvflat = [item for sublist in uv for item in sublist]
    ftstack = np.vstack(uvflat)

    texturesch = []
    textures_listflat = [item for sublist in textures_list for item in sublist]

    # import ipdb; ipdb.set_trace()

    for texture_list in textures_listflat:
        if texture_list != None:
            for texture in texture_list:
                if isinstance(texture, np.ndarray):
                    texturesch = texturesch + [ch.array(texture)]
                elif texture != None:
                    texturesch = texturesch + [ch.array(texture)]

    if len(texturesch) == 0:
        texture_stack = ch.Ch([])
    elif len(texturesch) == 1:
        texture_stack = texturesch[0].ravel()
    else:
        texture_stack = ch.concatenate([tex.ravel() for tex in texturesch])

    haveTextures_listflat = [
        item for sublist in haveTextures_list for item in sublist
    ]

    renderer.set(camera=camera,
                 frustum=frustum,
                 v=vstack,
                 f=fstack,
                 vn=vnstack,
                 vc=vcstack,
                 ft=ftstack,
                 texture_stack=texture_stack,
                 v_list=vch,
                 f_list=f_listflat,
                 vc_list=vc_list,
                 ft_list=uvflat,
                 textures_list=textures_listflat,
                 haveUVs_list=haveTextures_listflat,
                 bgcolor=ch.ones(3),
                 overdraw=True)
    renderer.msaa = True
    renderer.sharedWin = sharedWin
Exemplo n.º 20
0
# likelihoodsZ = [chGenComponentsProbs[comp]*ch.exp( - (chInput - chZ)**2 / (2 * covars))  * (1/(ch.sqrt(covars) * np.sqrt(2 * np.pi))) for comp in range(nComps)]
# chLikelihoodsZ = ch.concatenate(likelihoods)
# chGenMarginalZ = ch.exp(ch.sum(ch.log(chLikelihoodsZ)))

gmmRec = mixture.GMM(n_components=nRecComps, covariance_type='spherical')
gmmRec.covars_ = gmm.covars_.copy()

#Update the mean of the gaussians and update the mixing weights.
methods = ['dogleg', 'minimize', 'BFGS', 'L-BFGS-B', 'Nelder-Mead']
free_vars = [recMeans.ravel(), recSoftmaxW]

print("Beginning optimization.")
while True:

    gmmRec.weights_ = np.array(chRecSoftmax.r)
    gmmRec.means_ = np.array(ch.concatenate(recMeans))
    epsilon = np.random.randn(numVars)
    u = choice(nRecComps, size=1, p=chRecSoftmax.r)
    chZ[:] = chZRecComps[:, u].r.ravel() + recCovars * epsilon.ravel()
    pu = chRecSoftmax
    L = ch.log(pu[u]) + ch.sum(chLogJoint.ravel()) - ch.sum(
        chRecLogLikelihoods[:, :, u].ravel())
    drL = L.dr_wrt(recMeans) / numPixels
    alpha = 0.1

    recSoftmaxW[:] = recSoftmaxW.r[:] + alpha * L.dr_wrt(recSoftmaxW).reshape(
        recSoftmaxW.shape) / numPixels
    ipdb.set_trace()
    chZ[:] = chZ.r[:] + alpha * L.dr_wrt(chZ).reshape(chZ.r.shape) / numPixels
    chZRecComps[:, u] = chZ.r[:]
    # ch.minimize({'raw': -L}, bounds=None, method=methods[1], x0=free_vars, callback=None, options={'disp':False, 'maxiter':1})
Exemplo n.º 21
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack((ch.ch.MatVecMult(
        myJ_regressor, model.v_template[:, 0] +
        model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack([
        ch.concatenate([
            myJ0[0, 0], (.6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]),
            myJ0[9, 2]
        ]),
        ch.vstack([myJ0[i] for i in range(1, 7)]),
        ch.concatenate(
            [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
        ch.concatenate(
            [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
        ch.concatenate(
            [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
        ch.vstack([myJ0[i] for i in range(10, 24)])
    ])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[
                (np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack(
                [ch.zeros(1),
                 ch.abs(lengths[ijoint]) / 2.,
                 ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1) -
                Rodrigues(rots[ijoint]).dot(len_offset), rots[ijoint],
                rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
Exemplo n.º 22
0
def diffHog(image, drconv=None, numOrient=9, cwidth=8, cheight=8):
    imagegray = 0.3 * image[:, :, 0] + 0.59 * image[:, :,
                                                    1] + 0.11 * image[:, :, 2]
    sy, sx = imagegray.shape

    # gx = ch.empty(imagegray.shape, dtype=np.double)
    gx = imagegray[:, 2:] - imagegray[:, :-2]
    gx = ch.hstack([np.zeros([sy, 1]), gx, np.zeros([sy, 1])])

    gy = imagegray[2:, :] - imagegray[:-2, :]
    # gy = imagegray[:, 2:] - imagegray[:, :-2]
    gy = ch.vstack([np.zeros([1, sx]), gy, np.zeros([1, sx])])

    gx += 1e-5
    # gy = imagegray[:-2,1:-1] - imagegray[2:,1:-1] + 0.00001
    # gx = imagegray[1:-1,:-2] - imagegray[1:-1, 2:] + 0.00001

    distFilter = np.ones([2 * cheight, 2 * cwidth], dtype=np.uint8)
    distFilter[np.int(2 * cheight / 2), np.int(2 * cwidth / 2)] = 0
    distFilter = (cv2.distanceTransform(distFilter, cv2.DIST_L2, 3) - np.max(
        cv2.distanceTransform(distFilter, cv2.DIST_L2, 3))) / (
            -np.max(cv2.distanceTransform(distFilter, cv2.DIST_L2, 3)))

    magn = ch.sqrt(gy**2 + gx**2) * 180 / np.sqrt(2)

    angles = ch.arctan(gy / gx) * 180 / np.pi + 90

    # meanOrient = np.linspace(0, 180, numOrient)

    orientations_arr = np.arange(numOrient)

    meanOrient = orientations_arr / numOrient * 180

    fb_resttmp = 1 - ch.abs(
        ch.expand_dims(angles[:, :], 2) -
        meanOrient[1:].reshape([1, 1, numOrient - 1])) * numOrient / 180
    zeros_rest = np.zeros([sy, sx, numOrient - 1, 1])
    fb_rest = ch.max(ch.concatenate([fb_resttmp[:, :, :, None], zeros_rest],
                                    axis=3),
                     axis=3)

    chMinOrient0 = ch.min(ch.concatenate([
        ch.abs(
            ch.expand_dims(angles[:, :], 2) -
            meanOrient[0].reshape([1, 1, 1]))[:, :, :, None],
        ch.abs(180 - ch.expand_dims(angles[:, :], 2) -
               meanOrient[0].reshape([1, 1, 1]))[:, :, :, None]
    ],
                                         axis=3),
                          axis=3)

    zeros_fb0 = np.zeros([sy, sx, 1])
    fb0_tmp = ch.concatenate(
        [1 - chMinOrient0[:, :] * numOrient / 180, zeros_fb0], axis=2)
    fb_0 = ch.max(fb0_tmp, axis=2)

    fb = ch.concatenate([fb_0[:, :, None], fb_rest], axis=2)

    # fb[:,:,0] = ch.max(1 - ch.abs(ch.expand_dims(angles,2) - meanOrient.reshape([1,1,numOrient]))*numOrient/180,0)

    # fb = 1./(1. + ch.exp(1 - ch.abs(ch.expand_dims(angles,2) - meanOrient.reshape([1,1,numOrient]))*numOrient/180))

    Fb = ch.expand_dims(magn, 2) * fb

    if drconv is None:
        drconv = dr_wrt_convolution(Fb[:, :, 0], distFilter)

    Fs_list = [
        convolve2D(x=Fb[:, :, Fbi], filter=distFilter,
                   convolve2DDr=drconv).reshape([Fb.shape[0], Fb.shape[1], 1])
        for Fbi in range(numOrient)
    ]

    # Fs_list = [scipy.signal.convolve2d(Fb[:,:,Fbi], distFilter).reshape([Fb.shape[0], Fb.shape[1],1]) for Fbi in range(numOrient)]
    Fs = ch.concatenate(Fs_list, axis=2)

    # cellCols = np.arange(start=cwidth/2, stop=Fs.shape[1]-cwidth/2 , step=cwidth)
    # cellRows = np.arange(start=cheight/2, stop=Fs.shape[0]-cheight/2 , step=cheight)

    Fcells = Fs[0:Fs.shape[0]:cheight, 0:Fs.shape[1]:cwidth, :]

    epsilon = 1e-5

    v = Fcells / ch.sqrt(ch.sum(Fcells**2) + epsilon)
    # v = Fcells

    # hog, hogim = skimage.feature.hog(imagegray,  orientations=numOrient, pixels_per_cell=(cheight, cwidth), visualise=True)
    hog_image = HogImage(image=image,
                         hog=Fcells,
                         numOrient=numOrient,
                         cwidth=cwidth,
                         cheight=cheight)

    # plt.imshow(hog_image)
    # plt.figure()
    # plt.imshow(hogim)
    # ipdb.set_trace()

    return v, hog_image, drconv
Exemplo n.º 23
0
def optimize_on_joints_and_silhouette(j2d,
                                      sil,
                                      model,
                                      cam,
                                      img,
                                      prior,
                                      init_pose,
                                      init_shape,
                                      n_betas=10,
                                      conf=None):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param sil: h x w silhouette with soft boundaries (np.float32, range(-1, 1))
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param init_pose: 72D vector, pose prediction results provided by HMR
    :param init_shape: 10D vector, shape prediction results provided by HMR
    :param n_betas: number of shape coefficients considered during optimization
    :param conf: 14D vector storing the confidence values from the CNN
    :returns: a tuple containing the optimized model, its joints projected on image space, the
              camera translation
    """
    # define the mapping LSP joints -> SMPL joints
    cids = range(12) + [13]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    betas = ch.array(init_shape)

    # instantiate the model:
    sv = verts_decorated(trans=ch.zeros(3),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    # make the SMPL joints depend on betas
    Jdirs = np.dstack([
        model.J_regressor.dot(model.shapedirs[:, :, i])
        for i in range(len(betas))
    ])
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    # get joint positions as a function of model pose, betas and trans
    (_, A_global) = global_rigid_transformation(sv.pose,
                                                J_onbetas,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

    # add the head joint
    Jtr = ch.vstack((Jtr, sv[head_id]))
    smpl_ids.append(len(Jtr) - 1)

    # update the weights using confidence values
    weights = base_weights * conf[cids] if conf is not None else base_weights

    # project SMPL joints and vertex on the image plane using the estimated camera
    cam.v = ch.vstack([Jtr, sv])

    # obtain a gradient map of the soft silhouette
    grad_x = cv2.Sobel(sil, cv2.CV_32FC1, 1, 0) * 0.125
    grad_y = cv2.Sobel(sil, cv2.CV_32FC1, 0, 1) * 0.125

    # data term #1: distance between observed and estimated joints in 2D
    obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
        (j2d[cids] - cam[smpl_ids]), sigma))

    # data term #2: distance between the observed and projected boundaries
    obj_s2d = lambda w, sigma, flag, target_pose: (w * flag * GMOf(
        (target_pose - cam[len(Jtr):(len(Jtr) + 6890)]), sigma))

    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    print('****** Optimization on joints')
    curr_pose = sv.pose.r
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas
        objs['thetas'] = wbetas * (sv.pose - curr_pose
                                   )  # constrain theta changes

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .001,
                        'disp': 0
                    })
    curr_pose = sv.pose.r
    # cam.v = ch.vstack([Jtr, sv.r])

    # run the optimization in 2 stages, progressively decreasing the
    # weights for the priors
    print('****** Optimization on silhouette and joints')
    opt_weights = zip([57.4, 4.78], [2e2, 1e2])
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        # find the boundary vertices and estimate their expected location
        smpl_vs = cam.r[len(Jtr):, :]
        boundary_flag = np.zeros((smpl_vs.shape[0], 1))
        expected_pos = np.zeros((smpl_vs.shape[0], 2))
        for vi, v in enumerate(smpl_vs):
            r, c = int(v[1]), int(v[0])
            if r < 0 or r >= sil.shape[0] or c < 0 or c >= sil.shape[1]:
                continue
            sil_v = sil[r, c]
            grad = np.array([grad_x[r, c], grad_y[r, c]])
            grad_n = np.linalg.norm(grad)
            if grad_n > 1e-1 and sil_v < 0.4:  # vertex on or out of the boundaries
                boundary_flag[vi] = 1.0
                step = (grad / grad_n) * (sil_v / grad_n)
                expected_pos[vi] = np.array([c - step[0], r - step[1]])

        # run optimization
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['s2d'] = obj_s2d(5., 100, boundary_flag, expected_pos)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas  # constrain beta changes
        objs['thetas'] = wbetas * (sv.pose - curr_pose
                                   )  # constrain theta changes
        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .001,
                        'disp': 0
                    })

    return sv, cam.r, cam.t.r
Exemplo n.º 24
0
def dump3DModel2DKpsHand(img,
                         m,
                         filename,
                         camMat,
                         est2DJoints=None,
                         gt2DJoints=None,
                         outDir=None,
                         camPose=np.eye(4, dtype=np.float32)):
    '''
    Saves the following in this order depending on availibility
    1. GT 2D joint locations
    2. 2D joint locations as estimated by the CPM
    3. 2D joint locations after fitting the MANO model to the estimation 2D joints
    4. 3D model of the hand in the estimated pose
    :param img:
    :param m:
    :param filename:
    :param camMat:
    :param est2DJoints:
    :param gt2DJoints:
    :param outDir:
    :return:
    '''
    if outDir is not None:
        plt.ioff()
    fig = plt.figure(figsize=(2, 2))
    figManager = plt.get_current_fig_manager()
    # figManager.window.showMaximized()

    ax = fig.add_subplot(2, 2, 4, projection="3d")
    plot3dVisualize(ax, m, flip_x=False, camPose=camPose)
    ax.title.set_text("3D mesh")

    J3DHomo = ch.concatenate([m.J_transformed, np.ones((21, 1))], axis=1)
    J3DTrans = J3DHomo.dot(camPose.T)[:, :3]
    projPts = utilsEval.chProjectPoints(J3DTrans, camMat, False)[jointsMap]

    axEst = fig.add_subplot(2, 2, 3)
    imgOutEst = utilsEval.showHandJoints(img.copy(),
                                         np.copy(projPts.r).astype(np.float32),
                                         estIn=None,
                                         filename=None,
                                         upscale=1,
                                         lineThickness=3)
    axEst.imshow(imgOutEst[:, :, [2, 1, 0]])
    axEst.title.set_text("After fitting")

    if est2DJoints is not None:
        axGT = fig.add_subplot(2, 2, 2)
        imgOutGt = utilsEval.showHandJoints(img.copy(),
                                            est2DJoints.astype(np.float32),
                                            estIn=None,
                                            filename=None,
                                            upscale=1,
                                            lineThickness=3)
        axGT.imshow(imgOutGt[:, :, [2, 1, 0]])
        axGT.title.set_text("Before fitting")

    if gt2DJoints is not None:
        ax1 = fig.add_subplot(2, 2, 1)
        ax1.imshow(gt2DJoints[:, :, [2, 1, 0]])
        ax1.title.set_text("Ground Truth")

    if outDir is not None:
        if len(filename.split('/')) > 1:
            if len(filename.split('/')) == 2:
                if not os.path.exists(
                        os.path.join(outDir,
                                     filename.split('/')[0])):
                    os.mkdir(os.path.join(outDir, filename.split('/')[0]))
            elif len(filename.split('/')) == 3:
                if not os.path.exists(
                        os.path.join(outDir,
                                     filename.split('/')[0])):
                    os.mkdir(os.path.join(outDir, filename.split('/')[0]))
                if not os.path.exists(
                        os.path.join(outDir,
                                     filename.split('/')[0],
                                     filename.split('/')[1])):
                    os.mkdir(
                        os.path.join(outDir,
                                     filename.split('/')[0],
                                     filename.split('/')[1]))
            else:
                raise NotImplementedError

        fig.set_size_inches((11, 8.5), forward=False)
        # plt.show()
        plt.savefig(os.path.join(outDir, filename + '.jpg'), dpi=300)
        plt.close(fig)
    else:
        plt.show()
Exemplo n.º 25
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack(
        (ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 0] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack(
        [ch.concatenate([myJ0[0, 0], (
            .6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]), myJ0[9, 2]]),
         ch.vstack([myJ0[i] for i in range(1, 7)]), ch.concatenate(
             [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
         ch.concatenate(
             [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
         ch.concatenate(
             [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
         ch.vstack([myJ0[i] for i in range(10, 24)])])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[(
                np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack([ch.zeros(1), ch.abs(lengths[ijoint]) / 2.,
                                    ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(
                    3, 1) - Rodrigues(rots[ijoint]).dot(len_offset),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
Exemplo n.º 26
0
def load_model_1(fname_or_dict,
                 ncomps=6,
                 flat_hand_mean=False,
                 v_template=None,
                 shared_args=None,
                 optwrt='pose_coeff',
                 relRot=np.eye(3),
                 relTrans=np.array([0.0, 0.0, 0.0])):
    ''' This model loads the fully articulable HAND SMPL model,
    and replaces the pose DOFS by ncomps from PCA'''

    import numpy as np
    import chumpy as ch
    import pickle
    import scipy.sparse as sp
    np.random.seed(1)

    if not isinstance(fname_or_dict, dict):
        with open(fname_or_dict, 'rb') as f:
            smpl_data = pickle.load(f, encoding='latin1')
    else:
        smpl_data = fname_or_dict

    rot = 3  # for global orientation!!!
    dof = 20

    # smpl_data['hands_components'] = np.eye(45)
    from sklearn.preprocessing import normalize
    smpl_data['hands_components'] = normalize(smpl_data['hands_components'],
                                              axis=1)
    hands_components = smpl_data['hands_components']
    std = np.linalg.norm(hands_components, axis=1)
    hands_mean = np.zeros(hands_components.shape[1]
                          ) if flat_hand_mean else smpl_data['hands_mean']
    hands_coeffs = smpl_data['hands_coeffs'][:, :ncomps]

    selected_components = np.vstack((hands_components[:ncomps]))
    hands_mean = hands_mean.copy()

    if shared_args is not None and 'pose_coeffs' in shared_args:
        pose_coeffs = ch.zeros(rot + selected_components.shape[0])
        pose_coeffs[:len(shared_args['pose_coeffs']
                         )] = shared_args['pose_coeffs']
    else:
        pose_coeffs = ch.zeros(rot + selected_components.shape[0])
    full_hand_pose = pose_coeffs[rot:(rot + ncomps)].dot(selected_components)

    smpl_data['fullpose'] = ch.concatenate(
        (pose_coeffs[:rot], hands_mean + full_hand_pose))
    pose_dof = ch.zeros(rot + dof)

    smpl_data['pose'] = pose_coeffs
    smpl_data['pose_dof'] = pose_dof

    Jreg = smpl_data['J_regressor']
    if not sp.issparse(Jreg):
        smpl_data['J_regressor'] = (sp.csc_matrix(
            (Jreg.data, (Jreg.row, Jreg.col)), shape=Jreg.shape))

    # slightly modify ready_arguments to make sure that it uses the fullpose
    # (which will NOT be pose) for the computation of posedirs
    dd = ready_arguments(smpl_data, posekey4vposed='fullpose')

    # create the smpl formula with the fullpose,
    # but expose the PCA coefficients as smpl.pose for compatibility
    args = {
        'pose': dd['fullpose'],
        'v': dd['v_posed'],
        'J': dd['J'],
        'weights': dd['weights'],
        'kintree_table': dd['kintree_table'],
        'xp': ch,
        'want_Jtr': True,
        'bs_style': dd['bs_style'],
    }
    # print(dd['J'].r)

    result_previous, meta = verts_core(**args)
    result_noRel = result_previous + dd['trans'].reshape((1, 3))
    result = result_noRel.dot(relRot) + relTrans
    result.no_translation = result_previous

    if meta is not None:
        for field in ['Jtr', 'A', 'A_global', 'A_weighted']:
            if (hasattr(meta, field)):
                setattr(result, field, getattr(meta, field))

    if hasattr(result, 'Jtr'):
        result.J_transformed = (result.Jtr + dd['trans'].reshape(
            (1, 3))).dot(relRot) + relTrans

    for k, v in dd.items():
        setattr(result, k, v)

    if v_template is not None:
        result.v_template[:] = v_template

    return result
Exemplo n.º 27
0
        dd[s] = ch.array(dd[s])
    else:
        print type(dd[s])

dd['v_shaped'] = dd['shapedirs'].dot(dd['betas'])+dd['v_template']
v_shaped = dd['v_shaped']

J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:,0])
J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:,1])
J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:,2])
dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T

if dd['pose'].ndim != 2 or p.shape[1] != 3:
    p = dd['pose'].reshape((-1,3))
    p = p[1:]
    c= ch.concatenate([(Rodrigues(pp)-ch.eye(3)).ravel() for pp in p]).ravel()

dd['v_posed'] = v_shaped + dd['posedirs'].dot(c)

args = {
        'pose': dd['pose'],
        'v': dd['v_posed'],
        'J': dd['J'],
        'weights': dd['weights'],
        'kintree_table': dd['kintree_table'],
        'xp': ch,
        'want_Jtr': True,
        'bs_style': dd['bs_style']
}

pose=args['pose']
Exemplo n.º 28
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(trans=ch.zeros(3),
                             pose=ch.array(init_pose),
                             v_template=model.v_template,
                             J=model.J_regressor,
                             betas=betas,
                             shapedirs=model.shapedirs[:, :, :n_betas],
                             weights=model.weights,
                             kintree_table=model.kintree_table,
                             bs_style=model.bs_style,
                             f=model.f,
                             bs_type=model.bs_type,
                             posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([
            model.J_regressor.dot(model.shapedirs[:, :, i])
            for i in range(len(betas))
        ])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(sv.pose,
                                                    J_onbetas,
                                                    model.kintree_table,
                                                    xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
            (j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([
            my_exp(sv.pose[55]),
            my_exp(-sv.pose[58]),
            my_exp(-sv.pose[12]),
            my_exp(-sv.pose[15])
        ])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0
                            and coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1]
                            and target_coord[0] >= 0
                            and target_coord[1] < tmp_img.shape[0]
                            and target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(pose=sv.pose,
                                  betas=sv.betas,
                                  model=model,
                                  regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(objs,
                        x0=[sv.betas, sv.pose],
                        method='dogleg',
                        callback=on_step,
                        options={
                            'maxiter': 100,
                            'e_3': .0001,
                            'disp': 0
                        })

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r, Jtr)
def load_model(fname_or_dict,
               ncomps=6,
               flat_hand_mean=False,
               v_template=None,
               use_pca=True):
    ''' This model loads the fully articulable HAND SMPL model,
    and replaces the pose DOFS by ncomps from PCA'''

    from verts import verts_core
    import numpy as np
    import chumpy as ch
    import pickle
    import scipy.sparse as sp
    np.random.seed(1)

    if not isinstance(fname_or_dict, dict):
        smpl_data = pickle.load(open(fname_or_dict))
    else:
        smpl_data = fname_or_dict

    rot = 3  # for global orientation!!!

    if use_pca:
        hands_components = smpl_data[hands_components]  # PCA components
    else:
        hands_components = np.eye(
            45)  # directly modify 15x3 articulation angles
    hands_mean = np.zeros(hands_components.shape[1]
                          ) if flat_hand_mean else smpl_data['hands_mean']
    hands_coeffs = smpl_data['hands_coeffs'][:, :ncomps]

    selected_components = np.vstack((hands_components[:ncomps]))
    hands_mean = hands_mean.copy()

    pose_coeffs = ch.zeros(rot + selected_components.shape[0])
    full_hand_pose = pose_coeffs[rot:(rot + ncomps)].dot(selected_components)

    smpl_data['fullpose'] = ch.concatenate(
        (pose_coeffs[:rot], hands_mean + full_hand_pose))
    smpl_data['pose'] = pose_coeffs

    Jreg = smpl_data['J_regressor']
    if not sp.issparse(Jreg):
        smpl_data['J_regressor'] = (sp.csc_matrix(
            (Jreg.data, (Jreg.row, Jreg.col)), shape=Jreg.shape))

    # slightly modify ready_arguments to make sure that it uses the fullpose
    # (which will NOT be pose) for the computation of posedirs
    dd = ready_arguments(smpl_data, posekey4vposed='fullpose')

    # create the smpl formula with the fullpose,
    # but expose the PCA coefficients as smpl.pose for compatibility
    args = {
        'pose': dd['fullpose'],
        'v': dd['v_posed'],
        'J': dd['J'],
        'weights': dd['weights'],
        'kintree_table': dd['kintree_table'],
        'xp': ch,
        'want_Jtr': True,
        'bs_style': dd['bs_style'],
    }

    result_previous, meta = verts_core(**args)
    result = result_previous + dd['trans'].reshape((1, 3))
    result.no_translation = result_previous

    if meta is not None:
        for field in ['Jtr', 'A', 'A_global', 'A_weighted']:
            if (hasattr(meta, field)):
                setattr(result, field, getattr(meta, field))

    if hasattr(result, 'Jtr'):
        result.J_transformed = result.Jtr + dd['trans'].reshape((1, 3))

    for k, v in dd.items():
        setattr(result, k, v)

    if v_template is not None:
        result.v_template[:] = v_template
    result.dd = dd
    return result
Exemplo n.º 30
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array(
        [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(
            trans=ch.zeros(3),
            pose=ch.array(init_pose),
            v_template=model.v_template,
            J=model.J_regressor,
            betas=betas,
            shapedirs=model.shapedirs[:, :, :n_betas],
            weights=model.weights,
            kintree_table=model.kintree_table,
            bs_style=model.bs_style,
            f=model.f,
            bs_type=model.bs_type,
            posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i])
                           for i in range(len(betas))])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(
            sv.pose, J_onbetas, model.kintree_table, xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (
            w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[
                                                 58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and
                            coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1] and
                            target_coord[0] >= 0 and
                            target_coord[1] < tmp_img.shape[0] and
                            target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(
                pose=sv.pose, betas=sv.betas, model=model, regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(
                objs,
                x0=[sv.betas, sv.pose],
                method='dogleg',
                callback=on_step,
                options={'maxiter': 100,
                         'e_3': .0001,
                         'disp': 0})

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
Exemplo n.º 31
0
 def compute_r(self):
     # pylint: disable=unsubscriptable-object
     min_w = self.weights[self.min_component_idx]
     # Add the sqrt(-log(weights)).
     return ch.concatenate((self.loglikelihoods[self.min_component_idx].r,
                            np.sqrt(-np.log(min_w))))
Exemplo n.º 32
0
def load_model_withInputs_poseCoeffs(
    fname_or_dict,
    chRot,
    chPoseCoeff,
    chTrans,
    chBetas,
    ncomps=6,
    flat_hand_mean=False,
    v_template=None,
    shared_args=None,
):

    import numpy as np
    import chumpy as ch
    import pickle
    import scipy.sparse as sp
    np.random.seed(1)

    if not isinstance(fname_or_dict, dict):
        # smpl_data = pickle.load(open(fname_or_dict))
        smpl_data = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1')
    else:
        smpl_data = fname_or_dict

    rot = 3  # for global orientation!!!
    dof = 20

    # smpl_data['hands_components'] = np.eye(45)
    from sklearn.preprocessing import normalize
    smpl_data['hands_components'] = normalize(smpl_data['hands_components'],
                                              axis=1)
    hands_components = smpl_data['hands_components']
    hands_mean = np.zeros(hands_components.shape[1]
                          ) if flat_hand_mean else smpl_data['hands_mean']
    hands_coeffs = smpl_data['hands_coeffs'][:, :ncomps]

    selected_components = np.vstack((hands_components[:ncomps]))
    hands_mean = hands_mean.copy()

    pose_coeffs = ch.concatenate([chRot, chPoseCoeff], axis=0)
    full_hand_pose = pose_coeffs[rot:(rot + ncomps)].dot(selected_components)

    smpl_data['fullpose'] = ch.concatenate(
        (pose_coeffs[:rot], hands_mean + full_hand_pose))

    smpl_data['pose'] = pose_coeffs

    Jreg = smpl_data['J_regressor']
    if not sp.issparse(Jreg):
        smpl_data['J_regressor'] = (sp.csc_matrix(
            (Jreg.data, (Jreg.row, Jreg.col)), shape=Jreg.shape))

    # slightly modify ready_arguments to make sure that it uses the fullpose
    # (which will NOT be pose) for the computation of posedirs
    dd = ready_arguments(smpl_data,
                         posekey4vposed='fullpose',
                         shared_args=shared_args,
                         chTrans=chTrans,
                         chBetas=chBetas)

    # create the smpl formula with the fullpose,
    # but expose the PCA coefficients as smpl.pose for compatibility
    args = {
        'pose': dd['fullpose'],
        'v': dd['v_posed'],
        'J': dd['J'],
        'weights': dd['weights'],
        'kintree_table': dd['kintree_table'],
        'xp': ch,
        'want_Jtr': True,
        'bs_style': dd['bs_style'],
    }
    # print(dd['J'].r)

    result_previous, meta = verts_core(**args)
    result_noRel = result_previous + dd['trans'].reshape((1, 3))
    result = result_noRel
    result.no_translation = result_previous

    if meta is not None:
        for field in ['Jtr', 'A', 'A_global', 'A_weighted']:
            if (hasattr(meta, field)):
                setattr(result, field, getattr(meta, field))

    if hasattr(result, 'Jtr'):
        result.J_transformed = (result.Jtr + dd['trans'].reshape((1, 3)))

    for k, v in dd.items():
        setattr(result, k, v)

    if v_template is not None:
        result.v_template[:] = v_template

    return result
Exemplo n.º 33
0
def verts_decorated_quat(trans,
                         pose,
                         v_template,
                         J,
                         weights,
                         kintree_table,
                         f,
                         posedirs=None,
                         betas=None,
                         add_shape=True,
                         shapedirs=None,
                         want_Jtr=False):

    for which in [
            trans, pose, v_template, weights, posedirs, betas, shapedirs
    ]:
        if which is not None:
            assert ischumpy(which)
    v = v_template

    if shapedirs is not None:
        if betas is None:
            betas = chumpy.zeros(shapedirs.shape[-1])
        if add_shape:
            v_shaped = v + shapedirs.dot(betas)  #Add Shape of the model.
        else:
            v_shaped = v
    else:
        v_shaped = v

    quaternion_angles = axis2quat(pose.reshape((-1, 3))).reshape(-1)

    shape_feat = betas[1]
    feat = ch.concatenate([quaternion_angles, shape_feat], axis=0)
    feat = quaternion_angles

    poseblends = posedirs.dot(feat)
    v_posed = v_shaped + poseblends

    v = v_posed
    regressor = J
    J_tmpx = MatVecMult(regressor, v_shaped[:, 0])
    J_tmpy = MatVecMult(regressor, v_shaped[:, 1])
    J_tmpz = MatVecMult(regressor, v_shaped[:, 2])
    J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    result, meta = verts_core(pose,
                              v,
                              J,
                              weights,
                              kintree_table,
                              want_Jtr=True)
    Jtr = meta.Jtr if meta is not None else None
    tr = trans.reshape((1, 3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights
    result.kintree_table = kintree_table
    result.poseblends = poseblends
    result.quats = quaternion_angles

    if meta is not None:
        for field in ['Jtr', 'A', 'A_global', 'A_weighted']:
            if (hasattr(meta, field)):
                setattr(result, field, getattr(meta, field))

    if posedirs is not None:
        result.posedirs = posedirs
        result.v_posed = v_posed
    if shapedirs is not None:
        result.shapedirs = shapedirs
        result.betas = betas
        result.v_shaped = v_shaped
    if want_Jtr:
        result.J_transformed = Jtr
    result.poseblends = poseblends
    return result
Exemplo n.º 34
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       init_pose,
                       init_shape,
                       n_betas=10,
                       conf=None):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image
    :param prior: mixture of gaussians pose prior
    :param init_pose: 72D vector, pose prediction results provided by HMR
    :param init_shape: 10D vector, shape prediction results provided by HMR
    :param n_betas: number of shape coefficients considered during optimization
    :param conf: 14D vector storing the confidence values from the CNN
    :returns: a tuple containing the optimized model, its joints projected on image space, the
              camera translation
    """
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array([1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                            dtype=np.float64)

    # initialize the shape to the mean shape in the SMPL training set
    betas = ch.array(init_shape)

    # instantiate the model:
    # verts_decorated allows us to define how many
    # shape coefficients (directions) we want to consider (here, n_betas)
    sv = verts_decorated(trans=ch.zeros(3),
                         pose=ch.array(init_pose),
                         v_template=model.v_template,
                         J=model.J_regressor,
                         betas=betas,
                         shapedirs=model.shapedirs[:, :, :n_betas],
                         weights=model.weights,
                         kintree_table=model.kintree_table,
                         bs_style=model.bs_style,
                         f=model.f,
                         bs_type=model.bs_type,
                         posedirs=model.posedirs)

    # make the SMPL joints depend on betas
    Jdirs = np.dstack([
        model.J_regressor.dot(model.shapedirs[:, :, i])
        for i in range(len(betas))
    ])
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    # get joint positions as a function of model pose, betas and trans
    (_, A_global) = global_rigid_transformation(sv.pose,
                                                J_onbetas,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

    # add the head joint, corresponding to a vertex...
    Jtr = ch.vstack((Jtr, sv[head_id]))

    # ... and add the joint id to the list
    smpl_ids.append(len(Jtr) - 1)

    # update the weights using confidence values
    weights = base_weights * conf[cids] if conf is not None else base_weights

    # project SMPL joints on the image plane using the estimated camera
    cam.v = Jtr

    # data term: distance between observed and estimated joints in 2D
    obj_j2d = lambda w, sigma: (w * weights.reshape((-1, 1)) * GMOf(
        (j2d[cids] - cam[smpl_ids]), sigma))

    # mixture of gaussians pose prior
    pprior = lambda w: w * prior(sv.pose)
    # joint angles pose prior, defined over a subset of pose parameters:
    # 55: left elbow,  90deg bend at -np.pi/2
    # 58: right elbow, 90deg bend at np.pi/2
    # 12: left knee,   90deg bend at np.pi/2
    # 15: right knee,  90deg bend at np.pi/2
    alpha = 10
    my_exp = lambda x: alpha * ch.exp(x)
    obj_angle = lambda w: w * ch.concatenate([
        my_exp(sv.pose[55]),
        my_exp(-sv.pose[58]),
        my_exp(-sv.pose[12]),
        my_exp(-sv.pose[15])
    ])

    # weight configuration used in the paper, with joints + confidence values from the CNN
    # (all the weights used in the code were obtained via grid search, see the paper for more details)
    # the first list contains the weights for the pose priors,
    # the second list contains the weights for the shape prior
    opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                      [1e2, 5 * 1e1, 1e1, .5 * 1e1])

    # run the optimization in 4 stages, progressively decreasing the
    # weights for the priors
    for stage, (w, wbetas) in enumerate(opt_weights):
        _LOGGER.info('stage %01d', stage)
        objs = {}
        objs['j2d'] = obj_j2d(1., 100)
        objs['pose'] = pprior(w)
        objs['pose_exp'] = obj_angle(0.317 * w)
        objs['betas'] = wbetas * betas

        ch.minimize(objs,
                    x0=[sv.betas, sv.pose],
                    method='dogleg',
                    callback=None,
                    options={
                        'maxiter': 100,
                        'e_3': .0001,
                        'disp': 0
                    })

    return sv, cam.r, cam.t.r
Exemplo n.º 35
0
 def compute_r(self):
     min_w = self.weights[self.min_component_idx]
     # Add the sqrt(-log(weights))
     return ch.concatenate((self.loglikelihoods[self.min_component_idx].r,
                            np.sqrt(-np.log(min_w))))