Example #1
0
def setupCamera(v, chAz, chEl, chDist, objCenter, width, height):

    chCamModelWorld = computeHemisphereTransformation(chAz, chEl, chDist,
                                                      objCenter)

    chMVMat = ch.dot(chCamModelWorld,
                     np.array(mathutils.Matrix.Rotation(radians(270), 4, 'X')))

    chInvCam = ch.inv(chMVMat)

    modelRotation = chInvCam[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = chInvCam[0:3, 3]

    translation, rotation = (chTranslation, chRod)
    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=1.12 * ch.array([width, width]),
                           c=ch.array([width, height]) / 2.0,
                           k=ch.zeros(5))
    camera.openglMat = np.array(mathutils.Matrix.Rotation(
        radians(180), 4, 'X'))
    return camera, modelRotation, chMVMat
def main(mesh_list, out_list, scale=1.0, move_scale=True):
    assert len(mesh_list) == len(out_list)
    for mesh_file, out_file in zip(mesh_list, out_list):
        mesh = load_obj_data_binary(mesh_file)
        if move_scale:  # move to center and scale to unit bounding box
            mesh['v'] = (mesh['v'] - np.array([128, -192, 128]) +
                         0.5) * voxel_size

        if not ('vn' in mesh and mesh['vn'] is not None):
            mesh['vn'] = np.array(VertNormals(f=mesh['f'], v=mesh['v']))

        V = ch.array(mesh['v']) * scale
        V -= trans

        C = np.ones_like(mesh['v'])
        C *= np.array([186, 212, 255], dtype=np.float32) / 255.0
        # C *= np.array([158, 180, 216], dtype=np.float32) / 250.0
        C = np.minimum(C, 1.0)
        A = np.zeros_like(mesh['v'])
        A += LambertianPointLight(f=mesh['f'],
                                  v=V,
                                  vn=-mesh['vn'],
                                  num_verts=len(V),
                                  light_pos=np.array([0, -50, -50]),
                                  light_color=np.array([1.0, 1.0, 1.0]),
                                  vc=C)

        cam_t, cam_r = ch.array((0, 0, 0)), ch.array((3.14, 0, 0))
        U = ProjectPoints(v=V,
                          f=[flength, flength],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=cam_t,
                          rt=cam_r)
        rn = ColoredRenderer(camera=U,
                             v=V,
                             f=mesh['f'],
                             vc=A,
                             bgcolor=np.array([1.0, 0.0, 0.0]),
                             frustum={
                                 'width': w,
                                 'height': h,
                                 'near': 0.1,
                                 'far': 20
                             })

        img = np.asarray(rn)[:, :, (2, 1, 0)]
        msk = np.sum(np.abs(img - np.array([[[0, 0, 1.0]]], dtype=np.float32)),
                     axis=-1,
                     keepdims=True)
        msk[msk > 0] = 1
        img = cv.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        msk = cv.resize(msk, (msk.shape[1] // 2, msk.shape[0] // 2),
                        interpolation=cv.INTER_AREA)
        msk[msk < 1] = 0
        msk = msk[:, :, np.newaxis]
        img = np.concatenate([img, msk], axis=-1)
        cv.imshow('render3', img)
        cv.waitKey(3)
        cv.imwrite(out_file, np.uint8(img * 255))
Example #3
0
def load_results(pkl_path, model):
    from smpl_webuser.verts import verts_decorated as SmplModel
    with open(pkl_path, 'rb') as f:
        res = pkl.load(f, encoding='latin1')

    sv = SmplModel(trans=ch.array(res['trans']),
                   pose=ch.array(res['pose']),
                   v_template=model.v_template,
                   J=model.J,
                   weights=model.weights,
                   kintree_table=model.kintree_table,
                   bs_style='lbs',
                   f=model.f,
                   betas=ch.array(res['betas']),
                   shapedirs=model.shapedirs[:, :, :len(res['betas'])])

    if 'E' in res.keys():
        E = res['E']
    else:
        E = None
    if 'params' in res.keys():
        params = res['params']
    else:
        params = None
    return sv, res['kp'][0], res['kp'][1], res['flength'], E, res[
        'landmarks'], res['cam_t'], res['cam_rt'], res['cam_k'], params
Example #4
0
    def test_depth_image(self):
        # Create renderer
        import chumpy as ch
        from renderer import DepthRenderer
        rn = DepthRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,0]), rotation=ch.zeros(3))
        m.v = m.v * .01
        m.v[:,2] += 4
        w, h = (320, 240)
        from camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, vc=m.vc*0+1, bgcolor=ch.zeros(3))
        
        # import time
        # tm = time.time()
        # rn.r
        # print 'took %es' % (time.time() - tm)

        # print np.min(rn.r.ravel())
        # print np.max(rn.r.ravel())
        self.assertLess(np.abs(np.min(rn.r.ravel()) - 3.98), 1e-5)
        self.assertLess(np.abs(np.min(m.v[:,2]) - np.min(rn.r.ravel())), 1e-5)
        self.assertLess(np.abs(rn.r[h/2,w/2] - 3.98), 1e-5)
Example #5
0
 def test_nandivide(self):
     foo = ch.array(np.random.randn(16).reshape((4, 4)))
     bar = ch.array(np.random.randn(16).reshape((4, 4)))
     bar[2, 2] = 0
     self.assertEqual(ch.NanDivide(foo, bar)[2, 2].r, 0.)
     foo[2, 2] = 0
     self.assertEqual(ch.NanDivide(foo, bar)[2, 2].r, 0.)
Example #6
0
def getHandModelPoseCoeffsMultiFrame(numComp, numFrames, isOpenGLCoord):
    chGlobalPoseCoeff = ch.zeros((numComp, ))
    chGlobalBeta = ch.zeros((10, ))

    chRotList = []
    chTransList = []
    mList = []

    for i in range(numFrames):
        chRot = ch.zeros((3, ))
        if isOpenGLCoord:
            chTrans = ch.array([0., 0., -0.5])
        else:
            chTrans = ch.array([0., 0., 0.5])
        chRotList.append(chRot)
        chTransList.append(chTrans)

        m = load_model_withInputs_poseCoeffs(os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            '../mano/models/MANO_RIGHT.pkl'),
                                             chRot=chRot,
                                             chTrans=chTrans,
                                             chPoseCoeff=chGlobalPoseCoeff,
                                             chBetas=chGlobalBeta,
                                             ncomps=numComp)
        mList.append(m)

    return mList, chRotList, chGlobalPoseCoeff, chTransList, chGlobalBeta
Example #7
0
    def test_depth_image(self):
        # Create renderer
        import chumpy as ch
        from .renderer import DepthRenderer
        rn = DepthRenderer()

        # Assign attributes to renderer
        from .util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
        m.v = m.v * .01
        m.v[:, 2] += 4
        w, h = (320, 240)
        from .camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v,
                                  rt=ch.zeros(3),
                                  t=ch.zeros(3),
                                  f=ch.array([w, w]) / 2.,
                                  c=ch.array([w, h]) / 2.,
                                  k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, vc=m.vc * 0 + 1, bgcolor=ch.zeros(3))

        # import time
        # tm = time.time()
        # rn.r
        # print 'took %es' % (time.time() - tm)

        # print np.min(rn.r.ravel())
        # print np.max(rn.r.ravel())
        self.assertLess(np.abs(np.min(rn.r.ravel()) - 3.98), 1e-5)
        self.assertLess(np.abs(np.min(m.v[:, 2]) - np.min(rn.r.ravel())), 1e-5)
        self.assertLess(np.abs(rn.r[h / 2, w / 2] - 3.98), 1e-5)
Example #8
0
def get_renderer():
    import chumpy as ch
    from opendr.everything import *

    # Load mesh
    m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
    m.v += ch.array([0, 0, 3.])
    w, h = (320, 240)
    trans = ch.array([[0, 0, 0]])

    # Construct renderer
    rn = TexturedRenderer()
    rn.camera = ProjectPoints(v=m.v,
                              rt=ch.zeros(3),
                              t=ch.zeros(3),
                              f=ch.array([w, w]) / 2.,
                              c=ch.array([w, h]) / 2.,
                              k=ch.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=trans + m.v,
           f=m.f,
           texture_image=m.texture_image[:, :, ::-1],
           ft=m.ft,
           vt=m.vt,
           bgcolor=ch.zeros(3))
    rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f),
                               components=ch.array([4., 0., 0., 0.]),
                               light_color=ch.ones(3))

    return rn
Example #9
0
def chumpy_get_H(p1, p2):
    """ Compute differentiable homography from p1 to p2.
    
    Parameters
    ----------
    p1,p2 : array_like, shape (4,2)
        Containing points to match.
    """
    xmin = 0
    ymin = 0
    xmax = 1024
    ymax = 576
    p1 = 2 * p1 / ch.array([[xmax, ymax]])
    p1 = p1 - 1.0
    p2 = 2 * p2 / ch.array([[xmax, ymax]])
    p2 = p2 - 1.0

    N = p1.shape[0]
    A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
        (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
    A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
        (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
    A = ch.vstack((A1, A2))

    U, S, V = ch.linalg.svd(A.T.dot(A))
    H_new = V[-1, :].reshape((3, 3))

    # Re-normalize
    ML = ch.array([[xmax / 2.0, 0.0, xmax / 2.0], [0, ymax / 2.0, ymax / 2.0],
                   [0, 0, 1.0]])
    MR = ch.array([[2.0 / xmax, 0.0, -1.0], [0, 2.0 / ymax, -1.0], [0, 0,
                                                                    1.0]])
    H_new = ML.dot(H_new).dot(MR)

    return H_new / H_new[2, 2]
Example #10
0
  def fit_joints(both_joints, n_pose_params=15, shape_sigma=10.0,
                save_filename=None):
    """
    Fits the MANO model to hand joint 3D locations
    both_jonts: tuple of length 2, 21 joints per hand, e.g. output of ContactPose.hand_joints()
    n_pose_params: number of pose parameters (excluding 3 global rotation params)
    shape_sigma: reciprocal of shape regularization strength
    save_filename: file where the fitting output will be saved in JSON format
    """
    mano_params = []
    for hand_idx, joints in enumerate(both_joints):
      if joints is None:  # hand is not present
        mano_params.append(mano_param_dict(n_pose_params))  # dummy
        continue
      cp_joints = openpose2mano(joints)

      # MANO model
      m = mutils.load_mano_model(MANOFitter._mano_dicts[hand_idx],
                                 ncomps=n_pose_params, flat_hand_mean=False)
      m.betas[:] = np.zeros(m.betas.size)
      m.pose[:]  = np.zeros(m.pose.size)
      mano_joints = mano_joints_with_fingertips(m)
      mano_joints_np = np.array([[float(mm) for mm in m] for m in mano_joints])

      # align palm
      cp_palm = get_palm_joints(np.asarray(cp_joints))
      mano_palm = get_palm_joints(np.asarray(mano_joints_np))
      mTc = register_pcs(cp_palm, mano_palm)
      cp_joints = np.dot(mTc, np.vstack((cp_joints.T, np.ones(len(cp_joints)))))
      cp_joints = cp_joints[:3].T
      cp_joints = ch.array(cp_joints)

      # set up objective
      objective = [m-c for m,c in zip(mano_joints, cp_joints)]
      mean_betas = ch.array(np.zeros(m.betas.size))
      objective.append((m.betas - mean_betas) / shape_sigma)
      # optimize
      ch.minimize(objective, x0=(m.pose, m.betas, m.trans), method='dogleg')

      p = mano_param_dict(n_pose_params)
      p['pose']  = np.array(m.pose).tolist()
      p['betas'] = np.array(m.betas).tolist()
      p['valid'] = True
      p['mTc']['translation'] = (mTc[:3, 3] - np.array(m.trans)).tolist()
      p['mTc']['rotation'] = txq.mat2quat(mTc[:3, :3]).tolist()
      mano_params.append(p)

      # # to access hand mesh vertices and faces
      # vertices = np.array(m.r)
      # vertices = mutils.tform_points(np.linalg.inv(mTc), vertices)
      # faces = np.array(m.f)

    if save_filename is not None:
      with open(save_filename, 'w') as f:
        json.dump(mano_params, f, indent=4, separators=(',', ':'))
      print('{:s} written'.format(save_filename))
    return mano_params
Example #11
0
def scalecam(cam, imres):
    if imres == 1:
        return cam
    # Returns a camera which shares camera parameters by reference,
    # and whose scale is resized according to imres
    return ProjectPoints(rt=cam.rt,
                         t=cam.t,
                         f=array(cam.f.r * imres),
                         c=array(cam.c.r * imres),
                         k=cam.k)
Example #12
0
def _fit_rot_trans(
        model,  # pylint: disable=too-many-arguments, too-many-locals
        j2d,
        center,
        init_t,
        init_pose,
        conf,
        flength):
    """Find a rotation and translation to minimize the projection error of the pose."""
    opt_pose = _ch.array(init_pose)  # pylint: disable=no-member
    opt_trans = _ch.array(init_t)  # pylint: disable=no-member
    (_, A_global) = _global_rigid_transformation(opt_pose,
                                                 model.J,
                                                 model.kintree_table,
                                                 xp=_ch)
    Jtr = _ch.vstack([g[:3, 3] for g in A_global]) + opt_trans  # pylint: disable=no-member

    cam = _ProjectPoints(f=_np.array([flength, flength]),
                         rt=_np.zeros(3),
                         t=_np.zeros(3),
                         k=_np.zeros(5),
                         c=center)
    cam_3d = _ProjectPoints3D(f=_np.array([flength, flength]),
                              rt=_np.zeros(3),
                              t=_np.zeros(3),
                              k=_np.zeros(5),
                              c=center)
    cam.v = Jtr
    cam_3d.v = Jtr
    free_variables = [opt_pose[:3],
                      opt_trans]  # Optimize global rotation and translation.
    j2d_ids = range(12)
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    #_CID_TORSO = [2, 3, 8, 9]
    #torso_smpl_ids = [2, 1, 17, 16]
    _ch.minimize(
        {
            'j2d': (j2d.T[j2d_ids] - cam[smpl_ids]).T * conf[j2d_ids],
            # 'dev^2': 1e2 * (opt_trans[2] - init_t[2])
        },
        x0=free_variables,
        method='dogleg',
        callback=None,  #on_step_vis,
        options={
            'maxiter': 100,
            'e_3': .0001,
            'disp': 0
        })
    _LOGGER.debug("Global rotation: %s, global translation: %s.",
                  str(opt_pose[:3].r), str(opt_trans.r))
    _LOGGER.debug("Points 3D: %s.", str(cam_3d.r[:10, :]))
    _LOGGER.debug("Points 2D: %s.", str(cam.r[:10, :]))
    return opt_pose[:3].r, opt_trans.r, cam_3d.r[:,
                                                 2].min(), cam_3d.r[:,
                                                                    2].max()
Example #13
0
    def __init__(self, obj_path, model_path, w=224, h=224):
        self.m = get_body_mesh(obj_path, trans=ch.array([0, 0, 4]), rotation=ch.array([np.pi / 2, 0, 0]))
        # Load SMPL model (here we load the female model)
        self.body = load_model(model_path)
        self.w = w
        self.h = h
        self.img_size = min(self.w, self.h)

        self.num_cam = 3
        self.num_theta = 72
        self.num_beta = 10
Example #14
0
    def test_derivatives2(self):
        import chumpy as ch
        import numpy as np
        from .renderer import DepthRenderer

        rn = DepthRenderer()

        # Assign attributes to renderer
        from .util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0, 0, 4]), rotation=ch.zeros(3))
        w, h = (320, 240)
        from .camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v,
                                  rt=ch.zeros(3),
                                  t=ch.zeros(3),
                                  f=ch.array([w, w]) / 2.,
                                  c=ch.array([w, h]) / 2.,
                                  k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))

        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()
            plt.figure()

        for which in range(3):
            r1 = rn.r

            adder = np.random.rand(rn.v.r.size).reshape(rn.v.r.shape) * .01
            change = rn.v.r * 0 + adder
            dr_pred = rn.dr_wrt(rn.v).dot(change.ravel()).reshape(rn.shape)
            rn.v = rn.v.r + change

            r2 = rn.r
            dr_emp = r2 - r1

            #print np.mean(np.abs(dr_pred-dr_emp))

            self.assertLess(np.mean(np.abs(dr_pred - dr_emp)), .024)

            if visualize:
                plt.subplot(2, 3, which + 1)
                plt.imshow(dr_pred)
                plt.clim(-.01, .01)
                plt.title('emp')
                plt.subplot(2, 3, which + 4)
                plt.imshow(dr_emp)
                plt.clim(-.01, .01)
                plt.title('pred')
                plt.draw()
                plt.show()
Example #15
0
def test_earth():
    m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
    # Create V, A, U, f: geometry, brightness, camera, renderer
    V = ch.array(m.v)
    A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
                           components=[3., 2., 0., 0., 0., 0., 0., 0., 0.],
                           light_color=ch.ones(3))
    # camera
    U = ProjectPoints(v=V,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=ch.zeros(3),
                      rt=ch.zeros(3))
    f = TexturedRenderer(vc=A,
                         camera=U,
                         f=m.f,
                         bgcolor=[0., 0., 0.],
                         texture_image=m.texture_image,
                         vt=m.vt,
                         ft=m.ft,
                         frustum={
                             'width': w,
                             'height': h,
                             'near': 1,
                             'far': 20
                         })

    # Parameterize the vertices
    translation, rotation = ch.array([0, 0, 8]), ch.zeros(3)
    f.v = translation + V.dot(Rodrigues(rotation))

    observed = f.r
    np.random.seed(1)
    # this is reactive
    # in the sense that changes to values will affect function which depend on them.
    translation[:] = translation.r + np.random.rand(3)
    rotation[:] = rotation.r + np.random.rand(3) * .2
    # Create the energy
    E_raw = f - observed
    E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')

    Image.fromarray((observed * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "reference.png"))
    step = 0
    Image.fromarray((f.r * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "step_{:05d}.png".format(step)))

    print('OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS')
    free_variables = [translation, rotation]
    ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=create_callback(f))
    ch.minimize({'raw': E_raw}, x0=free_variables, callback=create_callback(f))
Example #16
0
    def get_cam_params(self):

        v_raw = np.sin(np.arange(900)).reshape((-1,3))
        v_raw[:, 2] -= 2
        
        rt = ch.zeros(3)
        t = ch.zeros(3)
        f = ch.array([500,500])
        c = ch.array([320,240])
        k = ch.zeros(5)

        cam_params = {'v': ch.Ch(v_raw), 'rt': rt, 't': t, 'f': f, 'c': c, 'k': k}
        return cam_params
Example #17
0
    def get_cam_params(self):

        v_raw = np.sin(np.arange(900)).reshape((-1,3))
        v_raw[:, 2] -= 2
        
        rt = ch.zeros(3)
        t = ch.zeros(3)
        f = ch.array([500,500])
        c = ch.array([320,240])
        k = ch.zeros(5)

        cam_params = {'v': ch.Ch(v_raw), 'rt': rt, 't': t, 'f': f, 'c': c, 'k': k}
        return cam_params
Example #18
0
def prepare_network_input_semantic(img_dir, zip_intermedia_results=True):
    log_str = ''
    img_size = 384

    # create semantic map
    smpl = objio.load_obj_data_binary(img_dir[:-4] + '_smpl_2.obj')
    smpl_volume = voxel_util.voxelize_2(img_dir[:-4] + '_smpl_2.obj',
                                        dim_h, dim_w, VOXELIZER_PATH)
    smpl_volume = voxel_util.binary_fill_from_corner_3D(smpl_volume)
    smpl_v_volume = voxel_util.calc_vmap_volume(smpl_volume, smpl['v'],
                                                dim_h, dim_w, voxel_size)
    mesh_volume = np.zeros((dim_w, dim_h, dim_w), dtype=np.float32)
    sio.savemat(img_dir[:-4] + '_volume.mat',
                {'mesh_volume': mesh_volume, 'smpl_v_volume': smpl_v_volume},
                do_compression=True)

    # render semantic map
    smpl_v_ = renderers.compress_along_z_axis_single(smpl['v'])
    u = renderers.project_vertices(smpl_v_, img_size, img_size,
                                   cam_r=ch.array((3.14, 0, 0)),
                                   cam_t=ch.array((0, 0, 0)))
    smpl_vc = dutil.get_smpl_semantic_code()
    v_map = renderers.render_color_model_without_lighting(img_size, img_size,
                                                          smpl_v_, smpl_vc,
                                                          smpl['f'], u,
                                                          bg_img=None)
    v_map = np.float32(np.copy(v_map))
    cv.imwrite(img_dir[:-4] + '_vmap.png', np.uint8(v_map*255))

    # test orthogonal projection from SMPL's volume
    smpl_volume_proj = np.max(smpl_volume, axis=-1)
    smpl_volume_proj = np.flipud(np.transpose(smpl_volume_proj))
    smpl_volume_proj = cv.resize(np.uint8(smpl_volume_proj) * 255, (256, 384))
    cv.imwrite(img_dir[:-4] + '_test_proj.png', smpl_volume_proj)

    # save smpl semantic volume for visualization
    voxel_util.save_v_volume(smpl_v_volume, img_dir[:-4] + '_v_volume.obj',
                             dim_h, dim_w, voxel_size)

    suffixes = ['_final.txt', '_smpl.obj', '_smpl_2.obj', '_smpl_proj.png',
                '_test_proj.png', '_v_volume.obj']
    if zip_intermedia_results:
        z = zipfile.ZipFile(img_dir[:-4] + '_intermediate_prepare.zip', 'w')
        for suffix in suffixes:
            z.write(img_dir[:-4] + suffix)
            os.remove(img_dir[:-4] + suffix)
        z.close()

    print(log_str)
    return log_str
Example #19
0
def initialize_camera(model,
                      j2d,
                      img,
                      init_pose,
                      flength=flength):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on
                     both the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be
              considered, 3D vector for the body orientation
    """
    # optimize camera translation and body orientation based on torso joints
    # LSP torso ids:
    # 2=right hip, 3=left hip, 8=right shoulder, 9=left shoulder
    torso_cids = [2, 3, 8, 9]
    # corresponding SMPL torso ids
    torso_smpl_ids = [2, 1, 17, 16]

    center = np.array([img.shape[1] / 2, img.shape[0] / 2])
    rt = ch.zeros(3)    # initial camera rotation
    init_t = guess_init(model, flength, j2d, init_pose)
    t = ch.array(init_t)    # initial camera translation

    opt_pose = ch.array(init_pose)
    _, A_global = global_rigid_transformation(opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])

    # initialize the camera and project SMPL joints
    cam = ProjectPoints(f=np.array([flength, flength]), rt=rt, t=t, k=np.zeros(5), c=center)
    cam.v = Jtr     # project SMPL joints

    # optimize for camera translation and body orientation
    free_variables = [cam.t, opt_pose[:3]]
    ch.minimize(
        # data term defined over torso joints...
        {'cam': j2d[torso_cids] - cam[torso_smpl_ids],
         # ...plus a regularizer for the camera translation
         'cam_t': 1e2 * (cam.t[2] - init_t[2])},
        x0=free_variables, method='dogleg', callback=None,
        options={'maxiter': 100, 'e_3': .0001, 'disp': 0})

    return cam, opt_pose[:3].r
Example #20
0
    def render(self, thetas, texture_bgr, rotate=np.array([0, 0, 0]), background_img=None):
        """
        get the rendered image and rendered silhouette
        :param thetas: model parameters, 3 * camera parameter + 72 * body pose + 10 * body shape
        :param texture_bgr: texture image in bgr format
        :return: the rendered image and deviation of rendered image to texture image
        (rendered image, deviation of rendered image, silhouette)
        """
        self.set_texture(texture_bgr)
        thetas = thetas.reshape(-1)
        cams = thetas[:self.num_cam]
        theta = thetas[self.num_cam: (self.num_cam + self.num_theta)]
        beta = thetas[(self.num_cam + self.num_theta):]

        self.body.pose[:] = theta
        self.body.betas[:] = beta

        #
        # size = cams[0] * min(self.w, self.h)
        # position = cams[1:3] * min(self.w, self.h) / 2 + min(self.w, self.h) / 2
        """
        ####################################################################
        ATTENTION!
        I do not know why the flength is 500.
        But it worked
        ####################################################################
        """

        texture_rn = TexturedRenderer()
        texture_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
                                          f=np.ones(2) * self.img_size * 0.62,
                                          c=np.array([self.w / 2, self.h / 2]),
                                          k=ch.zeros(5))
        texture_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
        texture_rn.set(v=self.body, f=self.m.f, vc=self.m.vc, texture_image=self.m.texture_image, ft=self.m.ft,
                       vt=self.m.vt)
        if background_img is not None:
            texture_rn.background_image = background_img / 255. if background_img.max() > 1 else background_img

        silhouette_rn = ColoredRenderer()
        silhouette_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
                                             f=np.ones(2) * self.img_size * 0.62,
                                             c=np.array([self.w / 2, self.h / 2]),
                                             k=ch.zeros(5))
        silhouette_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
        silhouette_rn.set(v=self.body, f=self.m.f, vc=np.ones_like(self.body), bgcolor=np.zeros(3))

        return texture_rn.r, texture_dr_wrt(texture_rn, silhouette_rn.r), silhouette_rn.r
Example #21
0
def ready_arguments(fname_or_dict):

    if not isinstance(fname_or_dict, dict):
        dd = pickle.load(open(fname_or_dict))
    else:
        dd = fname_or_dict
        
    backwards_compatibility_replacements(dd)                   #insert or change the name of some index
        
    want_shapemodel = 'shapedirs' in dd
    nposeparms = dd['kintree_table'].shape[1]*3                #shape[1] = 24, shape[0] = 2, why shape[0] = 2?
                                                               # *3 is to sca.e the kintree_table to QUATERNION
    if 'trans' not in dd:
        dd['trans'] = np.zeros(3)                              # why trans = (0,0,0) what trans?       
    if 'pose' not in dd:
        dd['pose'] = np.zeros(nposeparms)                      # dim of pose is 24*3 = 72
    if 'shapedirs' in dd and 'betas' not in dd:                # dim of shapedirs 6890 * 3 * 10
        dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])      # dim of beta     10

    for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
        if (s in dd) and not hasattr(dd[s], 'dterms'):
            dd[s] = ch.array(dd[s])

    if want_shapemodel:
        dd['v_shaped'] = dd['shapedirs'].dot(dd['betas'])+dd['v_template']
        v_shaped = dd['v_shaped']
        J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:,0])        
        J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:,1])        
        J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:,2])        
        dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T    
        dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
    else:    
        dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
            
    return dd
Example #22
0
    def sample_view(self, novel_view):
        if novel_view:
            cam_t = ch.array((0, 0, np.random.uniform(1.5, 2.5)))

            theta = np.random.uniform(0, np.pi)
            phi = np.random.uniform(0, 2 * np.pi)
            angle = np.random.uniform(0, 2 * np.pi)
        else:
            cam_t = ch.array((0, 0, 2.0))

            theta = np.random.uniform(0, 0.3)
            phi = np.random.uniform(0, 2 * np.pi)
            angle = np.random.uniform(0, 2 * np.pi)
        cam_r = np.array([np.sin(theta) * np.cos(phi), np.cos(theta), np.sin(theta) * np.sin(phi)])
        cam_r = ch.array(cam_r * angle)
        return cam_t, cam_r
Example #23
0
def compute_imu_data(gender, betas, poses, frame_rate):
    if gender == 'male':
        Jdirs = Jdirs_male
        model = model_male
    else:
        Jdirs = Jdirs_female
        model = model_female

    betas[:] = 0
    J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
        model.v_template.r)

    A_global_list = []
    print('Length of poses: %d' % (len(poses) / 1))
    for idx, p in enumerate(poses):
        (_, A_global) = global_rigid_transformation(p,
                                                    J_onbetas,
                                                    model.kintree_table,
                                                    xp=ch)
        A_global_list.append(A_global)

    vertex = []
    for idx, p in enumerate(poses):
        model.pose[:] = p
        model.betas[:] = 0
        model.betas[:10] = betas
        tmp = model.r[VERTEX_IDS]
        vertex.append(tmp)  # 6*3

    orientation, acceleration = get_ori_accel(A_global_list, vertex,
                                              frame_rate)

    return orientation, acceleration
Example #24
0
def guess_init(model, focal_length, j2d, init_pose):
    """Initialize the camera translation via triangle similarity, by using the torso joints        .
    :param model: SMPL model
    :param focal_length: camera focal length (kept fixed)
    :param j2d: 14x2 array of CNN joints
    :param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
    :returns: 3D vector corresponding to the estimated camera translation
    """
    cids = np.arange(0, 12)
    # map from LSP to SMPL joints
    j2d_here = j2d[cids]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(
        opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])
    Jtr = Jtr[smpl_ids].r

    # 9 is L shoulder, 3 is L hip
    # 8 is R shoulder, 2 is R hip
    diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
    mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))

    diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
    mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))

    est_d = focal_length * (mean_height3d / mean_height2d)
    # just set the z value
    init_t = np.array([0., 0., est_d])
    return init_t
Example #25
0
def render_color_model_with_lighting(w, h, v, vn, vc, f, u,
                                      sh_comps=None, light_c=ch.ones(3),
                                      vlight_pos=None, vlight_color=None,
                                      bg_img=None):
    """renders colored model with lighting effect"""
    assert(sh_comps is not None or vlight_pos is not None)
    V = ch.array(v)
    A = np.zeros_like(v)

    # SH lighting
    if sh_comps is not None:
        A += vc * SphericalHarmonics(vn=vn, components=sh_comps, light_color=light_c)

    # single point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 1:
        A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlight_pos,
                                  light_color=vlight_color, vc=vc)

    # multiple point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 2:
        for vlp in vlight_pos:
            A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlp,
                                      light_color=vlight_color, vc=vc)

    black_img = np.array(np.zeros((w, h, 3)), dtype=np.float32)
    bg_img_ = bg_img if bg_img is not None else black_img

    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, background_image=bg_img_,
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
Example #26
0
 def unproject_depth_image(self, depth_image, camera_space=False):
     us = np.arange(depth_image.size) % depth_image.shape[1]
     vs = np.arange(depth_image.size) // depth_image.shape[1]
     ds = depth_image.ravel()
     uvd = ch.array(np.vstack((us.ravel(), vs.ravel(), ds.ravel())).T)
     xyz = self.unproject_points(uvd, camera_space=camera_space)
     return xyz.reshape((depth_image.shape[0], depth_image.shape[1], -1))
Example #27
0
    def _global_rigid_transformation(self):
        results = {}
        pose = self.pose.reshape((-1, 3))
        parent = {
            i: self.kintree_table[0, i]
            for i in range(1, self.kintree_table.shape[1])
        }

        with_zeros = lambda x: ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))
        pack = lambda x: ch.hstack([ch.zeros((4, 3)), x.reshape((4, 1))])

        results[0] = with_zeros(
            ch.hstack((Rodrigues(pose[0, :]), self.J[0, :].reshape((3, 1)))))

        for i in range(1, self.kintree_table.shape[1]):
            results[i] = results[parent[i]].dot(
                with_zeros(
                    ch.hstack((
                        Rodrigues(pose[i, :]),  # rotation around bone endpoint
                        (self.J[i, :] - self.J[parent[i], :]).reshape(
                            (3, 1))  # bone
                    ))))

        results = [results[i] for i in sorted(results.keys())]
        results_global = results

        # subtract rotated J position
        results2 = [
            results[i] -
            (pack(results[i].dot(ch.concatenate((self.J[i, :], [0])))))
            for i in range(len(results))
        ]
        result = ch.dstack(results2)

        return result, results_global
Example #28
0
def render_mask(w, h, v, f, u):
    """renders silhouette"""
    V = ch.array(v)
    A = np.ones(v.shape)
    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, bgcolor=ch.zeros(3),
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
Example #29
0
def guess_init(model, focal_length, j2d, init_pose):
    """Initialize the camera translation via triangle similarity, by using the torso joints        .
    :param model: SMPL model
    :param focal_length: camera focal length (kept fixed)
    :param j2d: 14x2 array of CNN joints
    :param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
    :returns: 3D vector corresponding to the estimated camera translation
    """
    cids = np.arange(0, 12)
    # map from LSP to SMPL joints
    j2d_here = j2d[cids]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(opt_pose,
                                                model.J,
                                                model.kintree_table,
                                                xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])
    Jtr = Jtr[smpl_ids].r

    # 9 is L shoulder, 3 is L hip
    # 8 is R shoulder, 2 is R hip
    diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
    mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))

    diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
    mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))

    est_d = focal_length * (mean_height3d / mean_height2d)
    # just set the z value
    init_t = np.array([0., 0., est_d])
    return init_t
Example #30
0
 def unproject_depth_image(self, depth_image, camera_space=False):
     us = np.arange(depth_image.size) % depth_image.shape[1]
     vs = np.arange(depth_image.size) // depth_image.shape[1]
     ds = depth_image.ravel()
     uvd = ch.array(np.vstack((us.ravel(), vs.ravel(), ds.ravel())).T)
     xyz = self.unproject_points(uvd, camera_space=camera_space)
     return xyz.reshape((depth_image.shape[0], depth_image.shape[1], -1))
def ready_arguments(fname_or_dict):

    if not isinstance(fname_or_dict, dict):
        dd = pickle.load(open(fname_or_dict, 'rb'), encoding="latin1")
    else:
        dd = fname_or_dict
        
    backwards_compatibility_replacements(dd)
        
    want_shapemodel = 'shapedirs' in dd
    nposeparms = dd['kintree_table'].shape[1]*3

    if 'trans' not in dd:
        dd['trans'] = np.zeros(3)
    if 'pose' not in dd:
        dd['pose'] = np.zeros(nposeparms)
    if 'shapedirs' in dd and 'betas' not in dd:
        dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])

    for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
        if (s in dd) and not hasattr(dd[s], 'dterms'):
            dd[s] = ch.array(dd[s])

    if want_shapemodel:
        dd['v_shaped'] = dd['shapedirs'].dot(dd['betas'])+dd['v_template']
        v_shaped = dd['v_shaped']
        J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:,0])        
        J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:,1])        
        J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:,2])        
        dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T    
        dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
    else:    
        dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))
            
    return dd
Example #32
0
    def test_derivatives2(self):
        import chumpy as ch
        import numpy as np
        from renderer import DepthRenderer

        rn = DepthRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
        w, h = (320, 240)
        from camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))

        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()
            plt.figure()

        for which in range(3):
            r1 = rn.r

            adder = np.random.rand(rn.v.r.size).reshape(rn.v.r.shape)*.01
            change = rn.v.r * 0 + adder
            dr_pred = rn.dr_wrt(rn.v).dot(change.ravel()).reshape(rn.shape)
            rn.v = rn.v.r + change

            r2 = rn.r
            dr_emp = r2 - r1

            #print np.mean(np.abs(dr_pred-dr_emp))

            self.assertLess(np.mean(np.abs(dr_pred-dr_emp)), .024)

            if visualize:
                plt.subplot(2,3,which+1)
                plt.imshow(dr_pred)
                plt.clim(-.01,.01)
                plt.title('emp')
                plt.subplot(2,3,which+4)
                plt.imshow(dr_emp)
                plt.clim(-.01,.01)
                plt.title('pred')
                plt.draw()
                plt.show()
Example #33
0
def guess_init(
        model,  # pylint: disable=too-many-locals, too-many-arguments
        focal_length,
        j2d,
        weights,
        init_pose,
        use_gt_guess):
    """Initialize the camera depth using the torso points."""
    cids = _np.arange(0, 12)
    j2d_here = j2d[cids]
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
    # The initial pose is a t-pose with all body parts planar to the camera
    # (i.e., they have their maximum length).
    opt_pose = _ch.array(init_pose)  # pylint: disable=no-member
    (_, A_global) = _global_rigid_transformation(opt_pose,
                                                 model.J,
                                                 model.kintree_table,
                                                 xp=_ch)
    Jtr = _ch.vstack([g[:3, 3] for g in A_global])  # pylint: disable=no-member
    Jtr = Jtr[smpl_ids].r

    if use_gt_guess:
        # More robust estimate:
        # Since there is no fore'lengthening', only foreshortening, take the longest
        # visible body part as a robust estimate in the case of noise-free data.
        longest_part = None
        longest_part_length = -1.
        for conn in connections_lsp:
            if (conn[0] < 12 and conn[1] < 12 and weights[conn[0]] > 0.
                    and weights[conn[1]] > 0.):
                part_length_proj_gt = _np.sqrt(
                    _np.sum(_np.array(j2d_here[conn[0]] -
                                      j2d_here[conn[1]])**2,
                            axis=0))
                if part_length_proj_gt > longest_part_length:
                    longest_part_length = part_length_proj_gt
                    longest_part = conn
        #length2d = _np.sqrt(_np.sum(
        #    _np.array(j2d_here[longest_part[0]] - j2d_here[longest_part[1]])**2, axis=0))
        part_length_3D = _np.sqrt(
            _np.sum(_np.array(Jtr[longest_part[0]] - Jtr[longest_part[1]])**2,
                    axis=0))
        est_d = focal_length * (part_length_3D / longest_part_length)
    else:
        diff3d = _np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
        mean_height3d = _np.mean(_np.sqrt(_np.sum(diff3d**2, axis=1)))
        diff2d = _np.array(
            [j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
        mean_height2d = _np.mean(_np.sqrt(_np.sum(diff2d**2, axis=1)))
        f = focal_length  # pylint: disable=redefined-outer-name
        if mean_height2d == 0.:
            _LOGGER.warn(
                "Depth can not be correctly estimated. Guessing wildly.")
            est_d = 60.
        else:
            est_d = f * (mean_height3d / mean_height2d)
    init_t = _np.array([0., 0., est_d])
    return init_t
Example #34
0
    def on_changed(self, which):
        if 'model' in which:
            if not isinstance(self.model, dict):
                dd = pkl.load(open(self.model, "rb"), encoding="latin1")
            else:
                dd = self.model

            backwards_compatibility_replacements(dd)

            # for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
            for s in ['posedirs', 'shapedirs']:
                if (s in dd) and not hasattr(dd[s], 'dterms'):
                    dd[s] = ch.array(dd[s])

            self.f = dd['f']
            self.shapedirs = dd['shapedirs']
            self.J_regressor = dd['J_regressor']
            if 'J_regressor_prior' in dd:
                self.J_regressor_prior = dd['J_regressor_prior']
            self.bs_type = dd['bs_type']
            self.bs_style = dd['bs_style']
            self.weights = ch.array(dd['weights'])
            if 'vert_sym_idxs' in dd:
                self.vert_sym_idxs = dd['vert_sym_idxs']
            if 'weights_prior' in dd:
                self.weights_prior = dd['weights_prior']
            self.kintree_table = dd['kintree_table']
            self.posedirs = dd['posedirs']

            if not hasattr(self, 'betas'):
                self.betas = ch.zeros(self.shapedirs.shape[-1])

            if not hasattr(self, 'trans'):
                self.trans = ch.zeros(3)

            if not hasattr(self, 'pose'):
                self.pose = ch.zeros(72)

            if not hasattr(self, 'v_template'):
                self.v_template = ch.array(dd['v_template'])

            if not hasattr(self, 'v_personal'):
                self.v_personal = ch.zeros_like(self.v_template)

            self._set_up()
Example #35
0
def make_prdicted_mesh_neutral(predicted_params_path, flame_model_path):
    params = np.load(predicted_params_path,
                     allow_pickle=True,
                     encoding='latin1')
    #print(params)
    params = params[()]
    pose = np.zeros(15)
    #expression = np.zeros(100)
    shape = np.hstack(
        (params['shape'], np.zeros(300 - params['shape'].shape[0])))
    #pose = np.hstack((params['pose'], np.zeros(15-params['pose'].shape[0])))
    expression = np.hstack(
        (params['expression'], np.zeros(100 - params['expression'].shape[0])))
    flame_genral_model = load_model(flame_model_path)
    generated_neutral_mesh = verts_decorated(
        #ch.array([0.0,0.0,0.0]),
        ch.array(params['cam']),
        ch.array(pose),
        ch.array(flame_genral_model.r),
        flame_genral_model.J_regressor,
        ch.array(flame_genral_model.weights),
        flame_genral_model.kintree_table,
        flame_genral_model.bs_style,
        flame_genral_model.f,
        bs_type=flame_genral_model.bs_type,
        posedirs=ch.array(flame_genral_model.posedirs),
        betas=ch.array(
            np.hstack((shape, expression))
        ),  #betas=ch.array(np.concatenate((theta[0,75:85], np.zeros(390)))), #
        shapedirs=ch.array(flame_genral_model.shapedirs),
        want_Jtr=True)
    # neutral_mesh = Mesh(v=generated_neutral_mesh.r, f=generated_neutral_mesh.f)
    neutral_mesh = trimesh.Trimesh(vertices=generated_neutral_mesh.r,
                                   faces=generated_neutral_mesh.f)
    return neutral_mesh
Example #36
0
def get_renderer():
    import chumpy as ch
    from opendr.everything import *

    # Load mesh
    m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
    m.v += ch.array([0,0,3.])
    w, h = (320, 240)
    trans = ch.array([[0,0,0]])

    # Construct renderer
    rn = TexturedRenderer()
    rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=trans+m.v, f=m.f, texture_image=m.texture_image[:,:,::-1], ft=m.ft, vt=m.vt, bgcolor=ch.zeros(3))
    rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f), components=ch.array([4.,0.,0.,0.]), light_color=ch.ones(3))

    return rn
Example #37
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array(
        [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(
            trans=ch.zeros(3),
            pose=ch.array(init_pose),
            v_template=model.v_template,
            J=model.J_regressor,
            betas=betas,
            shapedirs=model.shapedirs[:, :, :n_betas],
            weights=model.weights,
            kintree_table=model.kintree_table,
            bs_style=model.bs_style,
            f=model.f,
            bs_type=model.bs_type,
            posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i])
                           for i in range(len(betas))])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(
            sv.pose, J_onbetas, model.kintree_table, xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (
            w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[
                                                 58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and
                            coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1] and
                            target_coord[0] >= 0 and
                            target_coord[1] < tmp_img.shape[0] and
                            target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(
                pose=sv.pose, betas=sv.betas, model=model, regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(
                objs,
                x0=[sv.betas, sv.pose],
                method='dogleg',
                callback=on_step,
                options={'maxiter': 100,
                         'e_3': .0001,
                         'disp': 0})

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
Example #38
0
def initialize_camera(model,
                      j2d,
                      img,
                      init_pose,
                      flength=5000.,
                      pix_thsh=25.,
                      viz=False):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image 
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be considered,
              3D vector for the body orientation
    """
    # optimize camera translation and body orientation based on torso joints
    # LSP torso ids:
    # 2=right hip, 3=left hip, 8=right shoulder, 9=left shoulder
    torso_cids = [2, 3, 8, 9]
    # corresponding SMPL torso ids
    torso_smpl_ids = [2, 1, 17, 16]

    center = np.array([img.shape[1] / 2, img.shape[0] / 2])

    # initialize camera rotation
    rt = ch.zeros(3)
    # initialize camera translation
    _LOGGER.info('initializing translation via similar triangles')
    init_t = guess_init(model, flength, j2d, init_pose)
    t = ch.array(init_t)

    # check how close the shoulder joints are
    try_both_orient = np.linalg.norm(j2d[8] - j2d[9]) < pix_thsh

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(
        opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])

    # initialize the camera
    cam = ProjectPoints(
        f=np.array([flength, flength]), rt=rt, t=t, k=np.zeros(5), c=center)

    # we are going to project the SMPL joints
    cam.v = Jtr

    if viz:
        viz_img = img.copy()

        # draw the target (CNN) joints
        for coord in np.around(j2d).astype(int):
            if (coord[0] < img.shape[1] and coord[0] >= 0 and
                    coord[1] < img.shape[0] and coord[1] >= 0):
                cv2.circle(viz_img, tuple(coord), 3, [0, 255, 0])

        import matplotlib.pyplot as plt
        plt.ion()

        # draw optimized joints at each iteration
        def on_step(_):
            """Draw a visualization."""
            plt.figure(1, figsize=(5, 5))
            plt.subplot(1, 1, 1)
            viz_img = img.copy()
            for coord in np.around(cam.r[torso_smpl_ids]).astype(int):
                if (coord[0] < viz_img.shape[1] and coord[0] >= 0 and
                        coord[1] < viz_img.shape[0] and coord[1] >= 0):
                    cv2.circle(viz_img, tuple(coord), 3, [0, 0, 255])
            plt.imshow(viz_img[:, :, ::-1])
            plt.draw()
            plt.show()
            plt.pause(1e-3)
    else:
        on_step = None
    # optimize for camera translation and body orientation
    free_variables = [cam.t, opt_pose[:3]]
    ch.minimize(
        # data term defined over torso joints...
        {'cam': j2d[torso_cids] - cam[torso_smpl_ids],
         # ...plus a regularizer for the camera translation
         'cam_t': 1e2 * (cam.t[2] - init_t[2])},
        x0=free_variables,
        method='dogleg',
        callback=on_step,
        options={'maxiter': 100,
                 'e_3': .0001,
                 # disp set to 1 enables verbose output from the optimizer
                 'disp': 0})
    if viz:
        plt.ioff()
    return (cam, try_both_orient, opt_pose[:3].r)
Example #39
0
    def test_occlusion(self):
        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()

        # Create renderer
        import chumpy as ch
        import numpy as np
        from opendr.renderer import TexturedRenderer, ColoredRenderer
        #rn = TexturedRenderer()
        rn = ColoredRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
        rn.texture_image = m.texture_image
        rn.ft = m.ft
        rn.vt = m.vt
        m.v[:,2] = np.mean(m.v[:,2])

        # red is front and zero
        # green is back and 1
        t0 = ch.array([1,0,.1])
        t1 = ch.array([-1,0,.1])
        v0 = ch.array(m.v) + t0

        if False:
            v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1
        else:
            v1 = ch.array(m.v) + t1
        vc0 = v0*0 + np.array([[.4,0,0]])
        vc1 = v1*0 + np.array([[0,.4,0]])
        vc = ch.vstack((vc0, vc1))

        v = ch.vstack((v0, v1))
        f = np.vstack((m.f, m.f+len(v0)))

        w, h = (320, 240)
        rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.camera.t = ch.array([0,0,-2.5])
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        m.vc = v.r*0 + np.array([[1,0,0]])
        rn.set(v=v, f=f, vc=vc)

        t0[:] = np.array([1.4, 0, .1-.02])
        t1[:] = np.array([-0.6, 0, .1+.02])

        target = rn.r

        if visualize:
            plt.figure()
            plt.imshow(target)
            plt.title('target')

            plt.figure()
            plt.show()

        im_orig = rn.r.copy()

        from cvwrap import cv2

        tr = t0
        eps_emp = .02
        eps_pred = .02

        #blur = lambda x : cv2.blur(x, ksize=(5,5))
        blur = lambda x : x
        for tr in [t0, t1]:
            if tr is t0:
                sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2])
            else:
                sum_limits = [1., 5., 4.]

            if visualize:
                plt.figure()
            for i in range(3):
                dr_pred = np.array(rn.dr_wrt(tr[i]).todense()).reshape(rn.shape) * eps_pred
                dr_pred = blur(dr_pred)

                # central differences
                tr[i] = tr[i].r + eps_emp/2.
                rn_greater = rn.r.copy()
                tr[i] = tr[i].r - eps_emp/1.
                rn_lesser = rn.r.copy()
                tr[i] = tr[i].r + eps_emp/2.

                dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp)

                dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5
                dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5

                if visualize:
                    plt.subplot(3,3,i+1)
                    plt.imshow(dr_pred_shown)
                    plt.title('pred')
                    plt.axis('off')

                    plt.subplot(3,3,3+i+1)
                    plt.imshow(dr_emp_shown)
                    plt.title('empirical')
                    plt.axis('off')

                    plt.subplot(3,3,6+i+1)

                diff = np.abs(dr_emp - dr_pred)
                if visualize:
                    plt.imshow(diff)
                diff = diff.ravel()
                if visualize:
                    plt.title('diff (sum: %.2e)'  % (np.sum(diff)))
                    plt.axis('off')

                # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),)
                # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),)

                #import pdb; pdb.set_trace()
                self.assertTrue(np.sum(diff) < sum_limits[i])