Esempio n. 1
0
def verts_core(pose, v, J, weights, kintree_table, want_Jtr=False):
    A, A_global = global_rigid_transformation(pose, J, kintree_table)
    T = A.dot(weights.T)

    rest_shape_h = ch.vstack((v.T, np.ones((1, v.shape[0]))))

    v = (T[:, 0, :] * rest_shape_h[0, :].reshape(
        (1, -1)) + T[:, 1, :] * rest_shape_h[1, :].reshape(
            (1, -1)) + T[:, 2, :] * rest_shape_h[2, :].reshape(
                (1, -1)) + T[:, 3, :] * rest_shape_h[3, :].reshape((1, -1))).T

    v = v[:, :3]

    class result_meta(object):
        pass

    if not want_Jtr:
        Jtr = None
    else:
        Jtr = ch.vstack([g[:3, 3] for g in A_global])

    meta = result_meta()
    meta.Jtr = Jtr
    meta.A = A
    meta.A_global = A_global
    meta.A_weighted = T

    return v, meta
Esempio n. 2
0
    def chumpy_compute_error(p1,
                             p2,
                             H,
                             Mcor,
                             error=0,
                             return_sigma=False,
                             sigma_precomputed=None):
        """Compute deviation of p1 and p2 from common epipole, given H.
        """
        # Warp p2 estimates with new homography
        p2_new_denom = H[2, 0] * p2[:, 0] + H[2, 1] * p2[:, 1] + H[2, 2]
        p2_new_x = (H[0, 0] * p2[:, 0] + H[0, 1] * p2[:, 1] +
                    H[0, 2]) / p2_new_denom
        p2_new_y = (H[1, 0] * p2[:, 0] + H[1, 1] * p2[:, 1] +
                    H[1, 2]) / p2_new_denom
        p2_new = ch.vstack((p2_new_x, p2_new_y)).T

        # Compute current best FoE
        u = p2_new[:, 0] - p1[:, 0]
        v = p2_new[:, 1] - p1[:, 1]
        T = ch.vstack((-v, u, v * p1[:, 0] - u * p1[:, 1])).T
        U, S, V = ch.linalg.svd(Mcor.dot(T.T.dot(T)).dot(Mcor))
        qv = Mcor.dot(V[-1, :])
        q = qv[:2] / qv[2]

        d = T.dot(qv)

        # Robust error norm
        if error == 0:
            d = d**2
        elif error == 1:
            sigma = np.median(np.abs(d() - np.median(d())))
            d = ch.sqrt(d**2 + sigma**2)
        elif error == 2:
            # Geman-McClure
            sigma = np.median(np.abs(d() - np.median(d()))) * 1.5
            d = d**2 / (d**2 + sigma)
        elif error == 3:
            # Geman-McClure, corrected
            if sigma_precomputed is not None:
                sigma = sigma_precomputed
            else:
                sigma = 1.426 * np.median(
                    np.abs(d() - np.median(d()))) * np.sqrt(3.0)

            d = d**2 / (d**2 + sigma**2)
            # Correction
            d = d * sigma

        elif error == 4:
            # Inverse exponential norm
            sigma = np.median(np.abs(d() - np.median(d())))
            d = -ch.exp(-d**2 / (2 * sigma**2)) * sigma

        err = d.sum()

        if return_sigma:
            return sigma
        else:
            return err
Esempio n. 3
0
def chumpy_get_H(p1, p2):
    """ Compute differentiable homography from p1 to p2.
    
    Parameters
    ----------
    p1,p2 : array_like, shape (4,2)
        Containing points to match.
    """
    xmin = 0
    ymin = 0
    xmax = 1024
    ymax = 576
    p1 = 2 * p1 / ch.array([[xmax, ymax]])
    p1 = p1 - 1.0
    p2 = 2 * p2 / ch.array([[xmax, ymax]])
    p2 = p2 - 1.0

    N = p1.shape[0]
    A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
        (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
    A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
        (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
    A = ch.vstack((A1, A2))

    U, S, V = ch.linalg.svd(A.T.dot(A))
    H_new = V[-1, :].reshape((3, 3))

    # Re-normalize
    ML = ch.array([[xmax / 2.0, 0.0, xmax / 2.0], [0, ymax / 2.0, ymax / 2.0],
                   [0, 0, 1.0]])
    MR = ch.array([[2.0 / xmax, 0.0, -1.0], [0, 2.0 / ymax, -1.0], [0, 0,
                                                                    1.0]])
    H_new = ML.dot(H_new).dot(MR)

    return H_new / H_new[2, 2]
Esempio n. 4
0
 def __init__(self, t, rod, rad, length):
     self.t = t  # translation of the axis
     self.rod = rod  # rotation of the axis in Rodrigues form
     self.rad = rad  # radious of the capsule
     self.length = length # length of the axis
     axis0 = ch.vstack([0, ch.abs(self.length), 0])
     self.axis = ch.vstack((t.T, (t + Rodrigues(rod).dot(axis0)).T))
     v0 = ch.hstack([v[:26].T*rad, (v[26:].T*rad)+ axis0])
     self.v = ((t + Rodrigues(rod).dot(v0)).T)
     self.set_sphere_centers()
Esempio n. 5
0
    def chumpy_get_H(p1, p2):
        """ Compute differentiable homography from p1 to p2.
        """
        N = p1.shape[0]
        A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
            (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
        A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
            (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
        A = ch.vstack((A1, A2))

        U, S, V = ch.linalg.svd(A.T.dot(A))
        H_new = V[-1, :].reshape((3, 3))
        return H_new
Esempio n. 6
0
def guess_init(model, focal_length, j2d, init_pose):
    """Initialize the camera translation via triangle similarity, by using the torso joints        .
    :param model: SMPL model
    :param focal_length: camera focal length (kept fixed)
    :param j2d: 14x2 array of CNN joints
    :param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
    :returns: 3D vector corresponding to the estimated camera translation
    """
    cids = np.arange(0, 12)
    # map from LSP to SMPL joints
    j2d_here = j2d[cids]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(
        opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])
    Jtr = Jtr[smpl_ids].r

    # 9 is L shoulder, 3 is L hip
    # 8 is R shoulder, 2 is R hip
    diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
    mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))

    diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
    mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))

    est_d = focal_length * (mean_height3d / mean_height2d)
    # just set the z value
    init_t = np.array([0., 0., est_d])
    return init_t
Esempio n. 7
0
    def _global_rigid_transformation(self):
        results = {}
        pose = self.pose.reshape((-1, 3))
        parent = {
            i: self.kintree_table[0, i]
            for i in range(1, self.kintree_table.shape[1])
        }

        with_zeros = lambda x: ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))
        pack = lambda x: ch.hstack([ch.zeros((4, 3)), x.reshape((4, 1))])

        results[0] = with_zeros(
            ch.hstack((Rodrigues(pose[0, :]), self.J[0, :].reshape((3, 1)))))

        for i in range(1, self.kintree_table.shape[1]):
            results[i] = results[parent[i]].dot(
                with_zeros(
                    ch.hstack((
                        Rodrigues(pose[i, :]),  # rotation around bone endpoint
                        (self.J[i, :] - self.J[parent[i], :]).reshape(
                            (3, 1))  # bone
                    ))))

        results = [results[i] for i in sorted(results.keys())]
        results_global = results

        # subtract rotated J position
        results2 = [
            results[i] -
            (pack(results[i].dot(ch.concatenate((self.J[i, :], [0])))))
            for i in range(len(results))
        ]
        result = ch.dstack(results2)

        return result, results_global
Esempio n. 8
0
def mesh_points_by_barycentric_coordinates( mesh_verts, mesh_faces, lmk_face_idx, lmk_b_coords ):
    """ function: evaluation 3d points given mesh and landmark embedding
    """
    dif1 = ch.vstack([(mesh_verts[mesh_faces[lmk_face_idx], 0] * lmk_b_coords).sum(axis=1),
                    (mesh_verts[mesh_faces[lmk_face_idx], 1] * lmk_b_coords).sum(axis=1),
                    (mesh_verts[mesh_faces[lmk_face_idx], 2] * lmk_b_coords).sum(axis=1)]).T
    return dif1
Esempio n. 9
0
    def _set_up(self):
        self.v_shaped = self.shapedirs.dot(self.betas) + self.v_template

        self.v_shaped_personal = self.v_shaped + self.v_personal
        if sp.issparse(self.J_regressor):
            self.J = sp_dot(self.J_regressor, self.v_shaped)
        else:
            self.J = ch.sum(self.J_regressor.T.reshape(-1, 1, 24) *
                            self.v_shaped.reshape(-1, 3, 1),
                            axis=0).T
        self.v_posevariation = self.posedirs.dot(
            posemap(self.bs_type)(self.pose))
        self.v_poseshaped = self.v_shaped_personal + self.v_posevariation

        self.A, A_global = self._global_rigid_transformation()
        self.Jtr = ch.vstack([g[:3, 3] for g in A_global])
        self.J_transformed = self.Jtr + self.trans.reshape((1, 3))

        self.V = self.A.dot(self.weights.T)

        rest_shape_h = ch.hstack(
            (self.v_poseshaped, ch.ones((self.v_poseshaped.shape[0], 1))))
        self.v_posed = ch.sum(self.V.T * rest_shape_h.reshape(-1, 4, 1),
                              axis=1)[:, :3]
        self.v = self.v_posed + self.trans
Esempio n. 10
0
def createRendererGT(glMode, chAz, chEl, chDist, center, v, vc, f_list, vn,
                     light_color, chComponent, chVColors, targetPosition,
                     chDisplacement, width, height, uv, haveTextures_list,
                     textures_list, frustum, win):
    renderer = TexturedRenderer()
    renderer.set(glMode=glMode)

    vflat = [item for sublist in v for item in sublist]
    rangeMeshes = range(len(vflat))
    # vch = [ch.array(vflat[mesh]) for mesh in rangeMeshes]
    vch = vflat
    if len(vch) == 1:
        vstack = vch[0]
    else:
        vstack = ch.vstack(vch)

    camera, modelRotation, _ = setupCamera(
        vstack, chAz, chEl, chDist, center + targetPosition + chDisplacement,
        width, height)
    vnflat = [item for sublist in vn for item in sublist]
    # vnch = [ch.array(vnflat[mesh]) for mesh in rangeMeshes]
    # vnchnorm = [vnch[mesh]/ch.sqrt(vnch[mesh][:,0]**2 + vnch[mesh][:,1]**2 + vnch[mesh][:,2]**2).reshape([-1,1]) for mesh in rangeMeshes]
    vcflat = [item for sublist in vc for item in sublist]
    vcch = [vcflat[mesh] for mesh in rangeMeshes]

    # vc_list =  computeGlobalAndPointLighting(vch, vnch, vcch, lightPosGT, chGlobalConstantGT, light_colorGT)

    setupTexturedRenderer(renderer, vstack, vch, f_list, vcch, vnflat, uv,
                          haveTextures_list, textures_list, camera, frustum,
                          win)
    return renderer
def get_keypoints_from_mesh_ch(mesh_vertices, keypoints_regressed):
    """ Assembles the full 21 keypoint set from the 16 Mano Keypoints and 5 mesh vertices for the fingers. """
    keypoints = [0.0 for _ in range(21)]  # init empty list

    # fill keypoints which are regressed
    mapping = {
        0: 0,  #Wrist
        1: 5,
        2: 6,
        3: 7,  #Index
        4: 9,
        5: 10,
        6: 11,  #Middle
        7: 17,
        8: 18,
        9: 19,  # Pinky
        10: 13,
        11: 14,
        12: 15,  # Ring
        13: 1,
        14: 2,
        15: 3
    }  # Thumb

    for manoId, myId in mapping.items():
        keypoints[myId] = keypoints_regressed[manoId, :]

    # get other keypoints from mesh
    for myId, meshId in kpId2vertices.items():
        keypoints[myId] = ch.mean(mesh_vertices[meshId, :], 0)

    keypoints = ch.vstack(keypoints)

    return keypoints
Esempio n. 12
0
def createNewRendererTarget(glMode, chAz, chEl, chDist, center, v, vc, f_list,
                            vn, light_color, chComponent, chVColors,
                            targetPosition, chDisplacement, width, height, uv,
                            haveTextures_list, textures_list, frustum, win):
    renderer = ResidualRendererOpenDR()
    renderer.set(glMode=glMode)

    vflat = [item for sublist in v for item in sublist]
    rangeMeshes = range(len(vflat))

    vnflat = [item for sublist in vn for item in sublist]

    vcflat = [item for sublist in vc for item in sublist]
    # vcch = [np.ones_like(vcflat[mesh])*chVColors.reshape([1,3]) for mesh in rangeMeshes]

    vc_list = computeSphericalHarmonics(vnflat, vcflat, light_color,
                                        chComponent)

    if len(vflat) == 1:
        vstack = vflat[0]
    else:
        vstack = ch.vstack(vflat)

    camera, modelRotation, _ = setupCamera(
        vstack, chAz, chEl, chDist, center + targetPosition + chDisplacement,
        width, height)

    setupTexturedRenderer(renderer, vstack, vflat, f_list, vc_list, vnflat, uv,
                          haveTextures_list, textures_list, camera, frustum,
                          win)
    return renderer
Esempio n. 13
0
def landmark_error_3d( scale ,trans_2_3d, mesh_verts,mesh_faces, target_lmk_3d_face, target_lmk_3d_body,lmk_face_idx, lmk_b_coords, lmk_facevtx_idx, lmk_bodyvtx_idx, face_weight=1.0,body_weight =1.0,use_lunkuo =False ):
    """ function: 3d landmark error objective
    """
    # select corresponding vertices
    v_selected = mesh_points_by_barycentric_coordinates( mesh_verts, mesh_faces, lmk_face_idx, lmk_b_coords )
    source_face_lmkvtx = mesh_verts[lmk_facevtx_idx[0:17]]
#    lmk_num  = lmk_face_idx.shape[0]
    source_body_lmkvtx = mesh_verts[lmk_bodyvtx_idx]
    # an index to select which landmark to use
#    lmk_selection = np.arange(0,lmk_num).ravel() # use all
    if use_lunkuo:
        frame_landmark_idx = range(0,17)+range(17, 60) + range(61, 64) + range(65, 68)
    else:
        frame_landmark_idx = range(17, 60) + range(61, 64) + range(65, 68)
    target_lmk_3d_face = target_lmk_3d_face[frame_landmark_idx,:]
    if use_lunkuo:
        v_selected_merge = ch.vstack([source_face_lmkvtx,v_selected])
    else:
        v_selected_merge = v_selected
    # residual vectors
    if( target_lmk_3d_face.shape[1] == 2):
        cast_source_face_lmkvtx = scale*v_selected_merge[:,0:2]+trans_2_3d[0:2]
        cast_source_body_lmkvtx = scale*source_body_lmkvtx[:,0:2]+trans_2_3d[0:2]
        lmk3d_obj_face = face_weight * (target_lmk_3d_face[:,0:2] - cast_source_face_lmkvtx)
        lmk3d_obj_body = body_weight * (target_lmk_3d_body[:,0:2] - cast_source_body_lmkvtx)
    elif target_lmk_3d_face.shape[1] == 3:
        cast_source_face_lmkvtx = scale * v_selected_merge[:,:] + trans_2_3d[:]
        cast_source_body_lmkvtx = scale * source_body_lmkvtx[:, 0:2] + trans_2_3d[0:2]
        lmk3d_obj_face = face_weight * (target_lmk_3d_face - cast_source_face_lmkvtx)
        lmk3d_obj_body = body_weight * (target_lmk_3d_body[:,0:2] - cast_source_body_lmkvtx)
    else:
        pass

    return lmk3d_obj_face,lmk3d_obj_body
Esempio n. 14
0
    def update_capsules_and_centers(self):
        centers = [set_sphere_centers(capsule) for capsule in self.capsules]
        count = 0
        for capsule in self.capsules:
            capsule.center_id = count
            count += len(capsule.centers)
        self.sph_vs = ch.vstack(centers)
        self.sph_weights = get_sphere_bweights(self.sph_vs, self.capsules)
        self.ids0 = []
        self.ids1 = []
        self.radiuss = []
        self.caps_pairs = []
        for collision in collisions:
            if hasattr(self, 'no_hands'):
                (id0, id1, rd) = capsule_dist(self.capsules[collision[0]],
                                              self.capsules[collision[1]],
                                              increase_hand=False)
            else:
                (id0, id1, rd) = capsule_dist(self.capsules[collision[0]],
                                              self.capsules[collision[1]])
            self.ids0.append(id0.r)
            self.ids1.append(id1.r)
            self.radiuss.append(rd)
            self.caps_pairs.append(
                ['%02d_%02d' % (collision[0], collision[1])] * len(id0))

        self.ids0 = np.concatenate(self.ids0).astype(int)
        self.ids1 = np.concatenate(self.ids1).astype(int)
        self.radiuss = np.concatenate(self.radiuss)
        self.caps_pairs = np.concatenate(self.caps_pairs)

        assert (self.caps_pairs.size == self.ids0.size)
        assert (self.radiuss.size == self.ids0.size)
Esempio n. 15
0
def ready_arguments(fname_or_dict, highRes):
    import numpy as np
    import pickle
    import chumpy as ch
    from chumpy.ch import MatVecMult
    from dataset.smpl_layer.posemapper import posemap
    import scipy.sparse as sp

    if not isinstance(fname_or_dict, dict):
        dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1')
        # dd = pickle.load(open(fname_or_dict, 'rb'))
    else:
        dd = fname_or_dict

    want_shapemodel = 'shapedirs' in dd
    nposeparms = dd['kintree_table'].shape[1] * 3

    if 'trans' not in dd:
        dd['trans'] = np.zeros(3)
    if 'pose' not in dd:
        dd['pose'] = np.zeros(nposeparms)
    if 'shapedirs' in dd and 'betas' not in dd:
        dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])

    for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
        if (s in dd) and isinstance(dd[s], ch.ch.Ch):
            dd[s] = dd[s].r

    if want_shapemodel:
        dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template']
        v_shaped = dd['v_shaped']
        J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0])
        J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1])
        J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2])
        dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T
        dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
    else:
        dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))

    if highRes is not None:
        with open(highRes, 'rb') as f:
            mapping, hf = pickle.load(f, encoding='latin1')
        num_betas = dd['shapedirs'].shape[-1]
        hv = mapping.dot(dd['v_template'].ravel()).reshape(-1, 3)
        J_reg = dd['J_regressor'].asformat('csr')
        dd['f'] = hf
        dd['v_template'] = hv
        dd['weights'] = np.hstack([
                np.expand_dims(
                    np.mean(
                        mapping.dot(np.repeat(np.expand_dims(dd['weights'][:, i], -1), 3)).reshape(-1, 3)
                        , axis=1),
                    axis=-1)
                for i in range(24)
            ])
        dd['posedirs'] = mapping.dot(dd['posedirs'].reshape((-1, 207))).reshape(-1, 3, 207)
        dd['shapedirs'] = mapping.dot(dd['shapedirs'].reshape((-1, num_betas))).reshape(-1, 3, num_betas)
        dd['J_regressor'] = sp.csr_matrix((J_reg.data, J_reg.indices, J_reg.indptr), shape=(24, hv.shape[0]))

    return dd
Esempio n. 16
0
 def __init__(self, t, rod, rad, length):
     assert (hasattr(t, 'dterms'))
     # the translation should be a chumpy object (differentiable wrt shape)
     self.t = t  # translation of the axis
     self.rod = rod  # rotation of the axis in Rodrigues form
     # the radius should be a chumpy object (differentiable wrt shape)
     assert (hasattr(rad, 'dterms'))
     self.rad = rad  # radius of the capsule
     # the length should be a chumpy object (differentiable wrt shape)
     assert (hasattr(length, 'dterms'))
     self.length = length  # length of the axis
     axis0 = ch.vstack([0, ch.abs(self.length), 0])
     self.axis = ch.vstack((t.T, (t + Rodrigues(rod).dot(axis0)).T))
     v0 = ch.hstack([v[:26].T * rad, (v[26:].T * rad) + axis0])
     self.v = ((t + Rodrigues(rod).dot(v0)).T)
     self.set_sphere_centers()
Esempio n. 17
0
def joints_coco(smpl):
    J = smpl.J_transformed
    nose = smpl[VERT_NOSE]
    ear_l = smpl[VERT_EAR_L]
    ear_r = smpl[VERT_EAR_R]
    eye_l = smpl[VERT_EYE_L]
    eye_r = smpl[VERT_EYE_R]

    shoulders_m = ch.sum(J[[14, 13]], axis=0) / 2.
    neck = J[12] - 0.55 * (J[12] - shoulders_m)

    return ch.vstack((
        nose,
        neck,
        2.1 * (J[14] - shoulders_m) + neck,
        J[[19, 21]],
        2.1 * (J[13] - shoulders_m) + neck,
        J[[18, 20]],
        J[2] + 0.38 * (J[2] - J[1]),
        J[[5, 8]],
        J[1] + 0.38 * (J[1] - J[2]),
        J[[4, 7]],
        eye_r,
        eye_l,
        ear_r,
        ear_l,
    ))
Esempio n. 18
0
def ready_arguments(fname_or_dict):

    if not isinstance(fname_or_dict, dict):
        dd = pickle.load(open(fname_or_dict))
    else:
        dd = fname_or_dict
        
    backwards_compatibility_replacements(dd)                   #insert or change the name of some index
        
    want_shapemodel = 'shapedirs' in dd
    nposeparms = dd['kintree_table'].shape[1]*3                #shape[1] = 24, shape[0] = 2, why shape[0] = 2?
                                                               # *3 is to sca.e the kintree_table to QUATERNION
    if 'trans' not in dd:
        dd['trans'] = np.zeros(3)                              # why trans = (0,0,0) what trans?       
    if 'pose' not in dd:
        dd['pose'] = np.zeros(nposeparms)                      # dim of pose is 24*3 = 72
    if 'shapedirs' in dd and 'betas' not in dd:                # dim of shapedirs 6890 * 3 * 10
        dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])      # dim of beta     10

    for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
        if (s in dd) and not hasattr(dd[s], 'dterms'):
            dd[s] = ch.array(dd[s])

    if want_shapemodel:
        dd['v_shaped'] = dd['shapedirs'].dot(dd['betas'])+dd['v_template']
        v_shaped = dd['v_shaped']
        J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:,0])        
        J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:,1])        
        J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:,2])        
        dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T    
        dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
    else:    
        dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
            
    return dd
def ready_arguments(fname_or_dict):

    if not isinstance(fname_or_dict, dict):
        dd = pickle.load(open(fname_or_dict, 'rb'), encoding="latin1")
    else:
        dd = fname_or_dict
        
    backwards_compatibility_replacements(dd)
        
    want_shapemodel = 'shapedirs' in dd
    nposeparms = dd['kintree_table'].shape[1]*3

    if 'trans' not in dd:
        dd['trans'] = np.zeros(3)
    if 'pose' not in dd:
        dd['pose'] = np.zeros(nposeparms)
    if 'shapedirs' in dd and 'betas' not in dd:
        dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])

    for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
        if (s in dd) and not hasattr(dd[s], 'dterms'):
            dd[s] = ch.array(dd[s])

    if want_shapemodel:
        dd['v_shaped'] = dd['shapedirs'].dot(dd['betas'])+dd['v_template']
        v_shaped = dd['v_shaped']
        J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:,0])        
        J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:,1])        
        J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:,2])        
        dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T    
        dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
    else:    
        dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
            
    return dd
Esempio n. 20
0
 def __init__(self, t, rod, rad, length):
     assert (hasattr(t, 'dterms'))
     # the translation should be a chumpy object (differentiable wrt shape)
     self.t = t  # translation of the axis
     self.rod = rod  # rotation of the axis in Rodrigues form
     # the radius should be a chumpy object (differentiable wrt shape)
     assert (hasattr(rad, 'dterms'))
     self.rad = rad  # radius of the capsule
     # the length should be a chumpy object (differentiable wrt shape)
     assert (hasattr(length, 'dterms'))
     self.length = length  # length of the axis
     axis0 = ch.vstack([0, ch.abs(self.length), 0])
     self.axis = ch.vstack((t.T, (t + Rodrigues(rod).dot(axis0)).T))
     v0 = ch.hstack([v[:26].T * rad, (v[26:].T * rad) + axis0])
     self.v = ((t + Rodrigues(rod).dot(v0)).T)
     self.set_sphere_centers()
Esempio n. 21
0
def SecondFundamentalForm(v, f):
    from chumpy import hstack, vstack
    from chumpy.linalg import Pinv
    nbrs = MatVecMult(FirstEdgesMtx(v, f, want_big=True), v.ravel()).reshape(
        (-1, 3))

    b0 = VertNormals(f=f, v=v)
    b1 = NormalizedNx3(CrossProduct(b0, nbrs - v)).reshape((-1, 3))
    b2 = NormalizedNx3(CrossProduct(b0, b1)).reshape((-1, 3))

    cnct = get_vert_connectivity(np.asarray(v), f)
    ffs = []
    for i in range(v.size / 3):
        nbrs = v[np.nonzero(np.asarray(cnct[i].todense()).ravel())[0]] - row(
            v[i])
        us = nbrs.dot(b2[i])
        vs = nbrs.dot(b1[i])
        hs = nbrs.dot(b0[i])
        coeffs = Pinv(
            hstack((col((us * .5)**2), col(us * vs), col(
                (vs * .5)**2)))).dot(hs)
        ffs.append(row(coeffs))
        # if i == 3586:
        #     import pdb; pdb.set_trace()

    ffs = vstack(ffs)
    return ffs
Esempio n. 22
0
def guess_init(model, focal_length, j2d, init_pose):
    """Initialize the camera translation via triangle similarity, by using the torso joints        .
    :param model: SMPL model
    :param focal_length: camera focal length (kept fixed)
    :param j2d: 14x2 array of CNN joints
    :param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
    :returns: 3D vector corresponding to the estimated camera translation
    """
    cids = np.arange(0, 12)
    # map from LSP to SMPL joints
    j2d_here = j2d[cids]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(opt_pose,
                                                model.J,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])
    Jtr = Jtr[smpl_ids].r

    # 9 is L shoulder, 3 is L hip
    # 8 is R shoulder, 2 is R hip
    diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
    mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))

    diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
    mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))

    est_d = focal_length * (mean_height3d / mean_height2d)
    # just set the z value
    init_t = np.array([0., 0., est_d])
    return init_t
Esempio n. 23
0
    def _set_up(self):
        self.v_shaped = self.shapedirs.dot(self.betas) + self.v_template

        body_height = (self.v_shaped[2802, 1] + self.v_shaped[6262, 1]) - (
            self.v_shaped[2237, 1] + self.v_shaped[6728, 1])
        # print('111111111111111111111111111111111111111')
        # print(np.array(body_height)[0])
        # print(type(body_height))
        # print('22222222222222222222222222222222222222222')
        self.scale = 1.66 / np.array(body_height)[0]

        self.v_shaped_personal = self.scale * self.v_shaped + self.v_personal

        if sp.issparse(self.J_regressor):
            self.J = self.scale * sp_dot(self.J_regressor, self.v_shaped)
        else:
            self.J = self.scale * ch.sum(self.J_regressor.T.reshape(-1, 1, 24)
                                         * self.v_shaped.reshape(-1, 3, 1),
                                         axis=0).T
        self.v_posevariation = self.posedirs.dot(
            posemap(self.bs_type)(self.pose))
        self.v_poseshaped = self.v_shaped_personal + self.v_posevariation

        self.A, A_global = self._global_rigid_transformation()
        self.Jtr = ch.vstack([g[:3, 3] for g in A_global])
        self.J_transformed = self.Jtr + self.trans.reshape((1, 3))

        self.V = self.A.dot(self.weights.T)

        rest_shape_h = ch.hstack(
            (self.v_poseshaped, ch.ones((self.v_poseshaped.shape[0], 1))))
        self.v_posed = ch.sum(self.V.T * rest_shape_h.reshape(-1, 4, 1),
                              axis=1)[:, :3]
        self.v = self.v_posed + self.trans
Esempio n. 24
0
def verts_decorated(trans, pose, 
    v_template, J, weights, kintree_table, bs_style, f,
    bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False):

    for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]:
        if which is not None:
            assert ischumpy(which)

    v = v_template

    if shapedirs is not None:
        if betas is None:
            betas = chumpy.zeros(shapedirs.shape[-1])
        v_shaped = v + shapedirs.dot(betas)
    else:
        v_shaped = v
        
    if posedirs is not None:
        v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose))
    else:
        v_posed = v_shaped
        
    v = v_posed
        
    if sp.issparse(J):
        regressor = J
        J_tmpx = MatVecMult(regressor, v_shaped[:,0])        
        J_tmpy = MatVecMult(regressor, v_shaped[:,1])        
        J_tmpz = MatVecMult(regressor, v_shaped[:,2])        
        J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T            
    else:    
        assert(ischumpy(J))
        
    assert(bs_style=='lbs')
    result, Jtr = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy)
     
    tr = trans.reshape((1,3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights
    result.kintree_table = kintree_table
    result.bs_style = bs_style
    result.bs_type =bs_type
    if posedirs is not None:
        result.posedirs = posedirs
        result.v_posed = v_posed
    if shapedirs is not None:
        result.shapedirs = shapedirs
        result.betas = betas
        result.v_shaped = v_shaped
    if want_Jtr:
        result.J_transformed = Jtr
    return result
Esempio n. 25
0
def verts_decorated(trans, pose, 
    v_template, J, weights, kintree_table, bs_style, f,
    bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False):

    for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]:
        if which is not None:
            assert ischumpy(which)

    v = v_template

    if shapedirs is not None:
        if betas is None:
            betas = chumpy.zeros(shapedirs.shape[-1])
        v_shaped = v + shapedirs.dot(betas)
    else:
        v_shaped = v
        
    if posedirs is not None:
        v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose))
    else:
        v_posed = v_shaped
        
    v = v_posed
        
    if sp.issparse(J):
        regressor = J
        J_tmpx = MatVecMult(regressor, v_shaped[:,0])        
        J_tmpy = MatVecMult(regressor, v_shaped[:,1])        
        J_tmpz = MatVecMult(regressor, v_shaped[:,2])        
        J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T            
    else:    
        assert(ischumpy(J))
        
    assert(bs_style=='lbs')
    result, Jtr = verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy)
     
    tr = trans.reshape((1,3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights
    result.kintree_table = kintree_table
    result.bs_style = bs_style
    result.bs_type =bs_type
    if posedirs is not None:
        result.posedirs = posedirs
        result.v_posed = v_posed
    if shapedirs is not None:
        result.shapedirs = shapedirs
        result.betas = betas
        result.v_shaped = v_shaped
    if want_Jtr:
        result.J_transformed = Jtr
    return result
Esempio n. 26
0
 def _calc_coords(self):
     # calculate joint location and rotation
     A, A_global = global_rigid_transformation(self.model.fullpose, self.model.J, self.model.kintree_table, ch)
     Jtr = ch.vstack([g[:3, 3] for g in A_global])
     R = [g[:3, :3] for g in A_global]
     coords_kp_xyz = get_keypoints_from_mesh_ch(self.model, Jtr)
     coords_kp_xyz = coords_kp_xyz + self.global_trans
     return coords_kp_xyz
Esempio n. 27
0
def verts_core(pose, v, J, weights, kintree_table, want_Jtr=False):
    A, A_global = global_rigid_transformation(pose, J, kintree_table)
    T = A.dot(weights.T)

    rest_shape_h = ch.vstack((v.T, np.ones((1, v.shape[0]))))

    v = (T[:, 0, :] * rest_shape_h[0, :].reshape((1, -1)) +
         T[:, 1, :] * rest_shape_h[1, :].reshape((1, -1)) +
         T[:, 2, :] * rest_shape_h[2, :].reshape((1, -1)) +
         T[:, 3, :] * rest_shape_h[3, :].reshape((1, -1))).T

    v = v[:, :3]

    if not want_Jtr:
        return v
    Jtr = ch.vstack([g[:3, 3] for g in A_global])
    return (v, Jtr)
Esempio n. 28
0
def mstack(vs, fs):
    import chumpy as ch
    import numpy as np
    lengths = [v.shape[0] for v in vs]
    f = np.vstack([fs[i]+np.sum(lengths[:i]).astype(np.uint32) for i in range(len(fs))])
    v = ch.vstack(vs)

    return v, f
Esempio n. 29
0
def mstack(vs, fs):
    import chumpy as ch
    import numpy as np
    lengths = [v.shape[0] for v in vs]
    f = np.vstack([fs[i]+np.sum(lengths[:i]).astype(np.uint32) for i in range(len(fs))])
    v = ch.vstack(vs)

    return v, f
Esempio n. 30
0
def guess_init(
        model,  # pylint: disable=too-many-locals, too-many-arguments
        focal_length,
        j2d,
        weights,
        init_pose,
        use_gt_guess):
    """Initialize the camera depth using the torso points."""
    cids = _np.arange(0, 12)
    j2d_here = j2d[cids]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    # The initial pose is a t-pose with all body parts planar to the camera
    # (i.e., they have their maximum length).
    opt_pose = _ch.array(init_pose)  # pylint: disable=no-member
    (_, A_global) = _global_rigid_transformation(opt_pose,
                                                 model.J,
                                                 model.kintree_table,
                                                 xp=_ch)
    Jtr = _ch.vstack([g[:3, 3] for g in A_global])  # pylint: disable=no-member
    Jtr = Jtr[smpl_ids].r

    if use_gt_guess:
        # More robust estimate:
        # Since there is no fore'lengthening', only foreshortening, take the longest
        # visible body part as a robust estimate in the case of noise-free data.
        longest_part = None
        longest_part_length = -1.
        for conn in connections_lsp:
            if (conn[0] < 12 and conn[1] < 12 and weights[conn[0]] > 0.
                    and weights[conn[1]] > 0.):
                part_length_proj_gt = _np.sqrt(
                    _np.sum(_np.array(j2d_here[conn[0]] -
                                      j2d_here[conn[1]])**2,
                            axis=0))
                if part_length_proj_gt > longest_part_length:
                    longest_part_length = part_length_proj_gt
                    longest_part = conn
        #length2d = _np.sqrt(_np.sum(
        #    _np.array(j2d_here[longest_part[0]] - j2d_here[longest_part[1]])**2, axis=0))
        part_length_3D = _np.sqrt(
            _np.sum(_np.array(Jtr[longest_part[0]] - Jtr[longest_part[1]])**2,
                    axis=0))
        est_d = focal_length * (part_length_3D / longest_part_length)
    else:
        diff3d = _np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
        mean_height3d = _np.mean(_np.sqrt(_np.sum(diff3d**2, axis=1)))
        diff2d = _np.array(
            [j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
        mean_height2d = _np.mean(_np.sqrt(_np.sum(diff2d**2, axis=1)))
        f = focal_length  # pylint: disable=redefined-outer-name
        if mean_height2d == 0.:
            _LOGGER.warn(
                "Depth can not be correctly estimated. Guessing wildly.")
            est_d = 60.
        else:
            est_d = f * (mean_height3d / mean_height2d)
    init_t = _np.array([0., 0., est_d])
    return init_t
Esempio n. 31
0
def verts_decorated_quat(trans,
                         pose,
                         v_template,
                         J_regressor,
                         weights,
                         kintree_table,
                         f,
                         posedirs=None,
                         betas=None,
                         add_shape=True,
                         shapedirs=None,
                         want_Jtr=False):

    for which in [
            trans, pose, v_template, weights, posedirs, betas, shapedirs
    ]:
        if which is not None:
            assert ischumpy(which)
    v = v_template

    v_shaped = v + shapedirs.dot(betas)  #Add Shape of the model.
    quaternion_angles = axis2quat(pose.reshape((-1, 3))).reshape(-1)[4:]
    shape_feat = betas[1]
    feat = ch.concatenate([quaternion_angles, shape_feat], axis=0)
    poseblends = posedirs.dot(feat)
    v_posed = v_shaped + poseblends

    J_tmpx = MatVecMult(J_regressor, v_shaped[:, 0])
    J_tmpy = MatVecMult(J_regressor, v_shaped[:, 1])
    J_tmpz = MatVecMult(J_regressor, v_shaped[:, 2])
    J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T

    result, meta = verts_core(pose,
                              v,
                              J,
                              weights,
                              kintree_table,
                              want_Jtr=True)
    Jtr = meta.Jtr if meta is not None else None
    tr = trans.reshape((1, 3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights

    result.posedirs = posedirs
    result.v_posed = v_posed
    result.shapedirs = shapedirs
    result.betas = betas
    result.v_shaped = v_shaped
    result.J_transformed = Jtr

    return result
Esempio n. 32
0
def ready_arguments(fname_or_dict,
                    posekey4vposed='pose',
                    shared_args=None,
                    chTrans=None,
                    chBetas=None):
    import numpy as np
    import pickle
    import chumpy as ch
    from chumpy.ch import MatVecMult
    from mano.webuser.posemapper import posemap

    if not isinstance(fname_or_dict, dict):
        dd = pickle.load(open(fname_or_dict))
    else:
        dd = fname_or_dict

    want_shapemodel = 'shapedirs' in dd
    nposeparms = dd['kintree_table'].shape[1] * 3

    if 'trans' not in dd:
        dd['trans'] = np.zeros(3)
    if 'pose' not in dd:
        dd['pose'] = np.zeros(nposeparms)
    if 'shapedirs' in dd and 'betas' not in dd:
        dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])

    if chTrans is not None:
        dd['trans'] = chTrans

    if chTrans is not None:
        dd['betas'] = chBetas

    for s in [
            'v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs',
            'betas', 'J', 'fullpose', 'pose_dof'
    ]:
        if (s in dd) and not hasattr(dd[s], 'dterms'):
            if shared_args is not None and s in shared_args:
                dd[s] = shared_args[s]
            else:
                dd[s] = ch.array(dd[s])

    assert (posekey4vposed in dd)
    if want_shapemodel:
        dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template']
        v_shaped = dd['v_shaped']
        J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0])
        J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1])
        J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2])
        dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T
        dd['v_posed'] = v_shaped + dd['posedirs'].dot(
            posemap(dd['bs_type'])(dd[posekey4vposed]))
    else:
        dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(
            posemap(dd['bs_type'])(dd[posekey4vposed]))

    return dd
Esempio n. 33
0
def smooth_rings(gar_rings, verts):
    """
    :param gar_rings:
    :param verts:
    :returns
    """
    error = ch.zeros([0, 3])

    for ring in gar_rings:
        N = len(ring)
        aring = np.array(ring)
        ring_0 = aring[np.arange(0, N)]
        ring_m1 = aring[np.array([N - 1] + range(0, N - 1))]
        ring_p1 = aring[np.array(range(1, N) + [0])]
        err_ring = verts[ring_m1] - 2 * verts[ring_0] + verts[ring_p1]
        error = ch.vstack([error, err_ring])

    error = ch.vstack([error, err_ring])
    return error
Esempio n. 34
0
def _fit_rot_trans(
        model,  # pylint: disable=too-many-arguments, too-many-locals
        j2d,
        center,
        init_t,
        init_pose,
        conf,
        flength):
    """Find a rotation and translation to minimize the projection error of the pose."""
    opt_pose = _ch.array(init_pose)  # pylint: disable=no-member
    opt_trans = _ch.array(init_t)  # pylint: disable=no-member
    (_, A_global) = _global_rigid_transformation(opt_pose,
                                                 model.J,
                                                 model.kintree_table,
                                                 xp=_ch)
    Jtr = _ch.vstack([g[:3, 3] for g in A_global]) + opt_trans  # pylint: disable=no-member

    cam = _ProjectPoints(f=_np.array([flength, flength]),
                         rt=_np.zeros(3),
                         t=_np.zeros(3),
                         k=_np.zeros(5),
                         c=center)
    cam_3d = _ProjectPoints3D(f=_np.array([flength, flength]),
                              rt=_np.zeros(3),
                              t=_np.zeros(3),
                              k=_np.zeros(5),
                              c=center)
    cam.v = Jtr
    cam_3d.v = Jtr
    free_variables = [opt_pose[:3],
                      opt_trans]  # Optimize global rotation and translation.
    j2d_ids = range(12)
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    #_CID_TORSO = [2, 3, 8, 9]
    #torso_smpl_ids = [2, 1, 17, 16]
    _ch.minimize(
        {
            'j2d': (j2d.T[j2d_ids] - cam[smpl_ids]).T * conf[j2d_ids],
            # 'dev^2': 1e2 * (opt_trans[2] - init_t[2])
        },
        x0=free_variables,
        method='dogleg',
        callback=None,  #on_step_vis,
        options={
            'maxiter': 100,
            'e_3': .0001,
            'disp': 0
        })
    _LOGGER.debug("Global rotation: %s, global translation: %s.",
                  str(opt_pose[:3].r), str(opt_trans.r))
    _LOGGER.debug("Points 3D: %s.", str(cam_3d.r[:10, :]))
    _LOGGER.debug("Points 2D: %s.", str(cam.r[:10, :]))
    return opt_pose[:3].r, opt_trans.r, cam_3d.r[:,
                                                 2].min(), cam_3d.r[:,
                                                                    2].max()
Esempio n. 35
0
def SecondFundamentalForm(v, f):    
    from chumpy import hstack, vstack
    from chumpy.linalg import Pinv
    nbrs = MatVecMult(FirstEdgesMtx(v, f, want_big=True), v.ravel()).reshape((-1,3))
    
    b0 = NormalizedNx3(VertNormalsScaled(f=f, v=v)).reshape((-1,3))
    b1 = NormalizedNx3(CrossProduct(b0, nbrs-v)).reshape((-1,3))
    b2 = NormalizedNx3(CrossProduct(b0, b1)).reshape((-1,3))
    
    cnct = get_vert_connectivity(v.r, f)
    ffs = []
    for i in range(v.size/3):
        nbrs = v[np.nonzero(np.asarray(cnct[i].todense()).ravel())[0]] - row(v[i])
        us = nbrs.dot(b2[i])
        vs = nbrs.dot(b1[i])
        hs = nbrs.dot(b0[i])
        coeffs = Pinv(hstack((col((us*.5)**2), col(us*vs), col((vs*.5)**2)))).dot(hs)
        ffs.append(row(coeffs))
        # if i == 3586:
        #     import pdb; pdb.set_trace()

    ffs = vstack(ffs)
    return ffs
Esempio n. 36
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack(
        (ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 0] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack(
        [ch.concatenate([myJ0[0, 0], (
            .6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]), myJ0[9, 2]]),
         ch.vstack([myJ0[i] for i in range(1, 7)]), ch.concatenate(
             [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
         ch.concatenate(
             [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
         ch.concatenate(
             [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
         ch.vstack([myJ0[i] for i in range(10, 24)])])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[(
                np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack([ch.zeros(1), ch.abs(lengths[ijoint]) / 2.,
                                    ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(
                    3, 1) - Rodrigues(rots[ijoint]).dot(len_offset),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
Esempio n. 37
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array(
        [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(
            trans=ch.zeros(3),
            pose=ch.array(init_pose),
            v_template=model.v_template,
            J=model.J_regressor,
            betas=betas,
            shapedirs=model.shapedirs[:, :, :n_betas],
            weights=model.weights,
            kintree_table=model.kintree_table,
            bs_style=model.bs_style,
            f=model.f,
            bs_type=model.bs_type,
            posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i])
                           for i in range(len(betas))])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(
            sv.pose, J_onbetas, model.kintree_table, xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (
            w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[
                                                 58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and
                            coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1] and
                            target_coord[0] >= 0 and
                            target_coord[1] < tmp_img.shape[0] and
                            target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(
                pose=sv.pose, betas=sv.betas, model=model, regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(
                objs,
                x0=[sv.betas, sv.pose],
                method='dogleg',
                callback=on_step,
                options={'maxiter': 100,
                         'e_3': .0001,
                         'disp': 0})

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
Esempio n. 38
0
    def test_occlusion(self):
        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()

        # Create renderer
        import chumpy as ch
        import numpy as np
        from opendr.renderer import TexturedRenderer, ColoredRenderer
        #rn = TexturedRenderer()
        rn = ColoredRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
        rn.texture_image = m.texture_image
        rn.ft = m.ft
        rn.vt = m.vt
        m.v[:,2] = np.mean(m.v[:,2])

        # red is front and zero
        # green is back and 1
        t0 = ch.array([1,0,.1])
        t1 = ch.array([-1,0,.1])
        v0 = ch.array(m.v) + t0

        if False:
            v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1
        else:
            v1 = ch.array(m.v) + t1
        vc0 = v0*0 + np.array([[.4,0,0]])
        vc1 = v1*0 + np.array([[0,.4,0]])
        vc = ch.vstack((vc0, vc1))

        v = ch.vstack((v0, v1))
        f = np.vstack((m.f, m.f+len(v0)))

        w, h = (320, 240)
        rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.camera.t = ch.array([0,0,-2.5])
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        m.vc = v.r*0 + np.array([[1,0,0]])
        rn.set(v=v, f=f, vc=vc)

        t0[:] = np.array([1.4, 0, .1-.02])
        t1[:] = np.array([-0.6, 0, .1+.02])

        target = rn.r

        if visualize:
            plt.figure()
            plt.imshow(target)
            plt.title('target')

            plt.figure()
            plt.show()

        im_orig = rn.r.copy()

        from cvwrap import cv2

        tr = t0
        eps_emp = .02
        eps_pred = .02

        #blur = lambda x : cv2.blur(x, ksize=(5,5))
        blur = lambda x : x
        for tr in [t0, t1]:
            if tr is t0:
                sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2])
            else:
                sum_limits = [1., 5., 4.]

            if visualize:
                plt.figure()
            for i in range(3):
                dr_pred = np.array(rn.dr_wrt(tr[i]).todense()).reshape(rn.shape) * eps_pred
                dr_pred = blur(dr_pred)

                # central differences
                tr[i] = tr[i].r + eps_emp/2.
                rn_greater = rn.r.copy()
                tr[i] = tr[i].r - eps_emp/1.
                rn_lesser = rn.r.copy()
                tr[i] = tr[i].r + eps_emp/2.

                dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp)

                dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5
                dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5

                if visualize:
                    plt.subplot(3,3,i+1)
                    plt.imshow(dr_pred_shown)
                    plt.title('pred')
                    plt.axis('off')

                    plt.subplot(3,3,3+i+1)
                    plt.imshow(dr_emp_shown)
                    plt.title('empirical')
                    plt.axis('off')

                    plt.subplot(3,3,6+i+1)

                diff = np.abs(dr_emp - dr_pred)
                if visualize:
                    plt.imshow(diff)
                diff = diff.ravel()
                if visualize:
                    plt.title('diff (sum: %.2e)'  % (np.sum(diff)))
                    plt.axis('off')

                # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),)
                # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),)

                #import pdb; pdb.set_trace()
                self.assertTrue(np.sum(diff) < sum_limits[i])
Esempio n. 39
0
def initialize_camera(model,
                      j2d,
                      img,
                      init_pose,
                      flength=5000.,
                      pix_thsh=25.,
                      viz=False):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image 
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be considered,
              3D vector for the body orientation
    """
    # optimize camera translation and body orientation based on torso joints
    # LSP torso ids:
    # 2=right hip, 3=left hip, 8=right shoulder, 9=left shoulder
    torso_cids = [2, 3, 8, 9]
    # corresponding SMPL torso ids
    torso_smpl_ids = [2, 1, 17, 16]

    center = np.array([img.shape[1] / 2, img.shape[0] / 2])

    # initialize camera rotation
    rt = ch.zeros(3)
    # initialize camera translation
    _LOGGER.info('initializing translation via similar triangles')
    init_t = guess_init(model, flength, j2d, init_pose)
    t = ch.array(init_t)

    # check how close the shoulder joints are
    try_both_orient = np.linalg.norm(j2d[8] - j2d[9]) < pix_thsh

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(
        opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])

    # initialize the camera
    cam = ProjectPoints(
        f=np.array([flength, flength]), rt=rt, t=t, k=np.zeros(5), c=center)

    # we are going to project the SMPL joints
    cam.v = Jtr

    if viz:
        viz_img = img.copy()

        # draw the target (CNN) joints
        for coord in np.around(j2d).astype(int):
            if (coord[0] < img.shape[1] and coord[0] >= 0 and
                    coord[1] < img.shape[0] and coord[1] >= 0):
                cv2.circle(viz_img, tuple(coord), 3, [0, 255, 0])

        import matplotlib.pyplot as plt
        plt.ion()

        # draw optimized joints at each iteration
        def on_step(_):
            """Draw a visualization."""
            plt.figure(1, figsize=(5, 5))
            plt.subplot(1, 1, 1)
            viz_img = img.copy()
            for coord in np.around(cam.r[torso_smpl_ids]).astype(int):
                if (coord[0] < viz_img.shape[1] and coord[0] >= 0 and
                        coord[1] < viz_img.shape[0] and coord[1] >= 0):
                    cv2.circle(viz_img, tuple(coord), 3, [0, 0, 255])
            plt.imshow(viz_img[:, :, ::-1])
            plt.draw()
            plt.show()
            plt.pause(1e-3)
    else:
        on_step = None
    # optimize for camera translation and body orientation
    free_variables = [cam.t, opt_pose[:3]]
    ch.minimize(
        # data term defined over torso joints...
        {'cam': j2d[torso_cids] - cam[torso_smpl_ids],
         # ...plus a regularizer for the camera translation
         'cam_t': 1e2 * (cam.t[2] - init_t[2])},
        x0=free_variables,
        method='dogleg',
        callback=on_step,
        options={'maxiter': 100,
                 'e_3': .0001,
                 # disp set to 1 enables verbose output from the optimizer
                 'disp': 0})
    if viz:
        plt.ioff()
    return (cam, try_both_orient, opt_pose[:3].r)