예제 #1
0
def getHandModelPoseCoeffsMultiFrame(numComp, numFrames, isOpenGLCoord):
    chGlobalPoseCoeff = ch.zeros((numComp, ))
    chGlobalBeta = ch.zeros((10, ))

    chRotList = []
    chTransList = []
    mList = []

    for i in range(numFrames):
        chRot = ch.zeros((3, ))
        if isOpenGLCoord:
            chTrans = ch.array([0., 0., -0.5])
        else:
            chTrans = ch.array([0., 0., 0.5])
        chRotList.append(chRot)
        chTransList.append(chTrans)

        m = load_model_withInputs_poseCoeffs(os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            '../mano/models/MANO_RIGHT.pkl'),
                                             chRot=chRot,
                                             chTrans=chTrans,
                                             chPoseCoeff=chGlobalPoseCoeff,
                                             chBetas=chGlobalBeta,
                                             ncomps=numComp)
        mList.append(m)

    return mList, chRotList, chGlobalPoseCoeff, chTransList, chGlobalBeta
예제 #2
0
def chumpy_get_H(p1, p2):
    """ Compute differentiable homography from p1 to p2.
    
    Parameters
    ----------
    p1,p2 : array_like, shape (4,2)
        Containing points to match.
    """
    xmin = 0
    ymin = 0
    xmax = 1024
    ymax = 576
    p1 = 2 * p1 / ch.array([[xmax, ymax]])
    p1 = p1 - 1.0
    p2 = 2 * p2 / ch.array([[xmax, ymax]])
    p2 = p2 - 1.0

    N = p1.shape[0]
    A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
        (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
    A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
        (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
    A = ch.vstack((A1, A2))

    U, S, V = ch.linalg.svd(A.T.dot(A))
    H_new = V[-1, :].reshape((3, 3))

    # Re-normalize
    ML = ch.array([[xmax / 2.0, 0.0, xmax / 2.0], [0, ymax / 2.0, ymax / 2.0],
                   [0, 0, 1.0]])
    MR = ch.array([[2.0 / xmax, 0.0, -1.0], [0, 2.0 / ymax, -1.0], [0, 0,
                                                                    1.0]])
    H_new = ML.dot(H_new).dot(MR)

    return H_new / H_new[2, 2]
예제 #3
0
    def test_depth_image(self):
        # Create renderer
        import chumpy as ch
        from .renderer import DepthRenderer
        rn = DepthRenderer()

        # Assign attributes to renderer
        from .util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
        m.v = m.v * .01
        m.v[:, 2] += 4
        w, h = (320, 240)
        from .camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v,
                                  rt=ch.zeros(3),
                                  t=ch.zeros(3),
                                  f=ch.array([w, w]) / 2.,
                                  c=ch.array([w, h]) / 2.,
                                  k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, vc=m.vc * 0 + 1, bgcolor=ch.zeros(3))

        # import time
        # tm = time.time()
        # rn.r
        # print 'took %es' % (time.time() - tm)

        # print np.min(rn.r.ravel())
        # print np.max(rn.r.ravel())
        self.assertLess(np.abs(np.min(rn.r.ravel()) - 3.98), 1e-5)
        self.assertLess(np.abs(np.min(m.v[:, 2]) - np.min(rn.r.ravel())), 1e-5)
        self.assertLess(np.abs(rn.r[h / 2, w / 2] - 3.98), 1e-5)
예제 #4
0
def rigid_scan_2_mesh_alignment(scan, mesh, visualize=False):
    options = {'sparse_solver': lambda A, x: cg(A, x, maxiter=2000)[0]}
    options['disp'] = 1.0
    options['delta_0'] = 0.1
    options['e_3'] = 1e-4

    s = ch.ones(1)
    r = ch.zeros(3)
    R = Rodrigues(r)
    t = ch.zeros(3)
    trafo_mesh = s*(R.dot(mesh.v.T)).T + t

    sampler = sample_from_mesh(scan, sample_type='vertices')
    s2m = ScanToMesh(scan, trafo_mesh, mesh.f, scan_sampler=sampler, signed=False, normalize=False)

    if visualize:       
        #Visualization code
        mv = MeshViewer()
        mv.set_static_meshes([scan])
        tmp_mesh = Mesh(trafo_mesh.r, mesh.f)
        tmp_mesh.set_vertex_colors('light sky blue')
        mv.set_dynamic_meshes([tmp_mesh])
        def on_show(_):
            tmp_mesh = Mesh(trafo_mesh.r, mesh.f)
            tmp_mesh.set_vertex_colors('light sky blue')
            mv.set_dynamic_meshes([tmp_mesh])
    else:
        def on_show(_):
            pass

    ch.minimize(fun={'dist': s2m, 's_reg': 100*(ch.abs(s)-s)}, x0=[s, r, t], callback=on_show, options=options)
    return s,Rodrigues(r),t
예제 #5
0
def initialize_camera(iuv, j2d, model, fx=None, fy=None, cx=None, cy=None):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image 
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be considered,
              3D vector for the body orientation
    """

    rt = ch.zeros(3)

    t = ch.zeros(3)

    # check how close the shoulder joints are

    # initialize the camera
    cam = ProjectPoints(f=np.array([fx, fy]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])
    return cam
예제 #6
0
    def test_depth_image(self):
        # Create renderer
        import chumpy as ch
        from renderer import DepthRenderer
        rn = DepthRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,0]), rotation=ch.zeros(3))
        m.v = m.v * .01
        m.v[:,2] += 4
        w, h = (320, 240)
        from camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, vc=m.vc*0+1, bgcolor=ch.zeros(3))
        
        # import time
        # tm = time.time()
        # rn.r
        # print 'took %es' % (time.time() - tm)

        # print np.min(rn.r.ravel())
        # print np.max(rn.r.ravel())
        self.assertLess(np.abs(np.min(rn.r.ravel()) - 3.98), 1e-5)
        self.assertLess(np.abs(np.min(m.v[:,2]) - np.min(rn.r.ravel())), 1e-5)
        self.assertLess(np.abs(rn.r[h/2,w/2] - 3.98), 1e-5)
예제 #7
0
파일: slider_demo.py 프로젝트: Hutaimu1/cv-
def get_renderer():
    import chumpy as ch
    from opendr.everything import *

    # Load mesh
    m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
    m.v += ch.array([0, 0, 3.])
    w, h = (320, 240)
    trans = ch.array([[0, 0, 0]])

    # Construct renderer
    rn = TexturedRenderer()
    rn.camera = ProjectPoints(v=m.v,
                              rt=ch.zeros(3),
                              t=ch.zeros(3),
                              f=ch.array([w, w]) / 2.,
                              c=ch.array([w, h]) / 2.,
                              k=ch.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=trans + m.v,
           f=m.f,
           texture_image=m.texture_image[:, :, ::-1],
           ft=m.ft,
           vt=m.vt,
           bgcolor=ch.zeros(3))
    rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f),
                               components=ch.array([4., 0., 0., 0.]),
                               light_color=ch.ones(3))

    return rn
def setup_silhouette_obj(silhs, rends, f):
    n_model = [ch.sum(rend[:, :, 0] > 0) for rend in rends]
    dist_tsf = [cv2.distanceTransform(np.uint8(1 - silh), cv2.DIST_L2, cv2.DIST_MASK_PRECISE) for silh in silhs]

    # Make sigma proportional to image area.
    # This gives radius 400 for 960 x 1920 image.
    sigma_ratio = 0.2727
    sigma = [np.sqrt(sigma_ratio * (silh.shape[0] * silh.shape[1]) / np.pi) for silh in silhs]
    # Model2silhouette ==> Consistency (contraction)

    def obj_m2s(w, i):
        return w * (GMOf(rends[i][:, :, 0] * dist_tsf[i], sigma[i]) / np.sqrt(n_model[i]))

    # silhouette error term (scan-to-model) ==> Coverage (expansion)
    coords = [np.fliplr(np.array(np.where(silh > 0)).T) + 0.5  for silh in silhs]# is this offset needed?
    scan_flat_v = [np.hstack((coord, ch.zeros(len(coord)).reshape((-1, 1)))) for coord in coords]
    scan_flat = [Mesh(v=sflat_v, f=[]) for sflat_v in scan_flat_v]
    # 2d + all 0.
    sv_flat = [ch.hstack((rend.camera, ch.zeros(len(rend.v)).reshape((-1, 1)))) for rend in rends]
    for i in range(len(rends)):
        sv_flat[i].f = f

    def obj_s2m(w, i):
        from sbody.mesh_distance import ScanToMesh
        return w * ch.sqrt(GMOf(ScanToMesh(scan_flat[i], sv_flat[i], f), sigma[i]))
       
    # For vis
    for i in range(len(rends)):
        scan_flat[i].vc = np.tile(np.array([0, 0, 1]), (len(scan_flat[i].v), 1))

    return obj_m2s, obj_s2m, dist_tsf
예제 #9
0
파일: test_opendr.py 프로젝트: zuru/DSS
def test_earth():
    m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
    # Create V, A, U, f: geometry, brightness, camera, renderer
    V = ch.array(m.v)
    A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
                           components=[3., 2., 0., 0., 0., 0., 0., 0., 0.],
                           light_color=ch.ones(3))
    # camera
    U = ProjectPoints(v=V,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=ch.zeros(3),
                      rt=ch.zeros(3))
    f = TexturedRenderer(vc=A,
                         camera=U,
                         f=m.f,
                         bgcolor=[0., 0., 0.],
                         texture_image=m.texture_image,
                         vt=m.vt,
                         ft=m.ft,
                         frustum={
                             'width': w,
                             'height': h,
                             'near': 1,
                             'far': 20
                         })

    # Parameterize the vertices
    translation, rotation = ch.array([0, 0, 8]), ch.zeros(3)
    f.v = translation + V.dot(Rodrigues(rotation))

    observed = f.r
    np.random.seed(1)
    # this is reactive
    # in the sense that changes to values will affect function which depend on them.
    translation[:] = translation.r + np.random.rand(3)
    rotation[:] = rotation.r + np.random.rand(3) * .2
    # Create the energy
    E_raw = f - observed
    E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')

    Image.fromarray((observed * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "reference.png"))
    step = 0
    Image.fromarray((f.r * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "step_{:05d}.png".format(step)))

    print('OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS')
    free_variables = [translation, rotation]
    ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=create_callback(f))
    ch.minimize({'raw': E_raw}, x0=free_variables, callback=create_callback(f))
예제 #10
0
    def test_derivatives2(self):
        import chumpy as ch
        import numpy as np
        from .renderer import DepthRenderer

        rn = DepthRenderer()

        # Assign attributes to renderer
        from .util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0, 0, 4]), rotation=ch.zeros(3))
        w, h = (320, 240)
        from .camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v,
                                  rt=ch.zeros(3),
                                  t=ch.zeros(3),
                                  f=ch.array([w, w]) / 2.,
                                  c=ch.array([w, h]) / 2.,
                                  k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))

        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()
            plt.figure()

        for which in range(3):
            r1 = rn.r

            adder = np.random.rand(rn.v.r.size).reshape(rn.v.r.shape) * .01
            change = rn.v.r * 0 + adder
            dr_pred = rn.dr_wrt(rn.v).dot(change.ravel()).reshape(rn.shape)
            rn.v = rn.v.r + change

            r2 = rn.r
            dr_emp = r2 - r1

            #print np.mean(np.abs(dr_pred-dr_emp))

            self.assertLess(np.mean(np.abs(dr_pred - dr_emp)), .024)

            if visualize:
                plt.subplot(2, 3, which + 1)
                plt.imshow(dr_pred)
                plt.clim(-.01, .01)
                plt.title('emp')
                plt.subplot(2, 3, which + 4)
                plt.imshow(dr_emp)
                plt.clim(-.01, .01)
                plt.title('pred')
                plt.draw()
                plt.show()
예제 #11
0
    def chumpy_get_H(p1, p2):
        """ Compute differentiable homography from p1 to p2.
        """
        N = p1.shape[0]
        A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
            (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
        A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
            (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
        A = ch.vstack((A1, A2))

        U, S, V = ch.linalg.svd(A.T.dot(A))
        H_new = V[-1, :].reshape((3, 3))
        return H_new
예제 #12
0
    def get_cam_params(self):

        v_raw = np.sin(np.arange(900)).reshape((-1,3))
        v_raw[:, 2] -= 2
        
        rt = ch.zeros(3)
        t = ch.zeros(3)
        f = ch.array([500,500])
        c = ch.array([320,240])
        k = ch.zeros(5)

        cam_params = {'v': ch.Ch(v_raw), 'rt': rt, 't': t, 'f': f, 'c': c, 'k': k}
        return cam_params
예제 #13
0
    def get_cam_params(self):

        v_raw = np.sin(np.arange(900)).reshape((-1,3))
        v_raw[:, 2] -= 2
        
        rt = ch.zeros(3)
        t = ch.zeros(3)
        f = ch.array([500,500])
        c = ch.array([320,240])
        k = ch.zeros(5)

        cam_params = {'v': ch.Ch(v_raw), 'rt': rt, 't': t, 'f': f, 'c': c, 'k': k}
        return cam_params
예제 #14
0
    def render(self, thetas, texture_bgr, rotate=np.array([0, 0, 0]), background_img=None):
        """
        get the rendered image and rendered silhouette
        :param thetas: model parameters, 3 * camera parameter + 72 * body pose + 10 * body shape
        :param texture_bgr: texture image in bgr format
        :return: the rendered image and deviation of rendered image to texture image
        (rendered image, deviation of rendered image, silhouette)
        """
        self.set_texture(texture_bgr)
        thetas = thetas.reshape(-1)
        cams = thetas[:self.num_cam]
        theta = thetas[self.num_cam: (self.num_cam + self.num_theta)]
        beta = thetas[(self.num_cam + self.num_theta):]

        self.body.pose[:] = theta
        self.body.betas[:] = beta

        #
        # size = cams[0] * min(self.w, self.h)
        # position = cams[1:3] * min(self.w, self.h) / 2 + min(self.w, self.h) / 2
        """
        ####################################################################
        ATTENTION!
        I do not know why the flength is 500.
        But it worked
        ####################################################################
        """

        texture_rn = TexturedRenderer()
        texture_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
                                          f=np.ones(2) * self.img_size * 0.62,
                                          c=np.array([self.w / 2, self.h / 2]),
                                          k=ch.zeros(5))
        texture_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
        texture_rn.set(v=self.body, f=self.m.f, vc=self.m.vc, texture_image=self.m.texture_image, ft=self.m.ft,
                       vt=self.m.vt)
        if background_img is not None:
            texture_rn.background_image = background_img / 255. if background_img.max() > 1 else background_img

        silhouette_rn = ColoredRenderer()
        silhouette_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
                                             f=np.ones(2) * self.img_size * 0.62,
                                             c=np.array([self.w / 2, self.h / 2]),
                                             k=ch.zeros(5))
        silhouette_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
        silhouette_rn.set(v=self.body, f=self.m.f, vc=np.ones_like(self.body), bgcolor=np.zeros(3))

        return texture_rn.r, texture_dr_wrt(texture_rn, silhouette_rn.r), silhouette_rn.r
예제 #15
0
def render_mask(w, h, v, f, u):
    """renders silhouette"""
    V = ch.array(v)
    A = np.ones(v.shape)
    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, bgcolor=ch.zeros(3),
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
예제 #16
0
def setupCamera(v, chAz, chEl, chDist, objCenter, width, height):

    chCamModelWorld = computeHemisphereTransformation(chAz, chEl, chDist,
                                                      objCenter)

    chMVMat = ch.dot(chCamModelWorld,
                     np.array(mathutils.Matrix.Rotation(radians(270), 4, 'X')))

    chInvCam = ch.inv(chMVMat)

    modelRotation = chInvCam[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = chInvCam[0:3, 3]

    translation, rotation = (chTranslation, chRod)
    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=1.12 * ch.array([width, width]),
                           c=ch.array([width, height]) / 2.0,
                           k=ch.zeros(5))
    camera.openglMat = np.array(mathutils.Matrix.Rotation(
        radians(180), 4, 'X'))
    return camera, modelRotation, chMVMat
예제 #17
0
def main(mesh_list, out_list, scale=1.0, move_scale=True):
    assert len(mesh_list) == len(out_list)
    for mesh_file, out_file in zip(mesh_list, out_list):
        mesh = load_obj_data_binary(mesh_file)
        if move_scale:  # move to center and scale to unit bounding box
            mesh['v'] = (mesh['v'] - np.array([128, -192, 128]) +
                         0.5) * voxel_size

        if not ('vn' in mesh and mesh['vn'] is not None):
            mesh['vn'] = np.array(VertNormals(f=mesh['f'], v=mesh['v']))

        V = ch.array(mesh['v']) * scale
        V -= trans

        C = np.ones_like(mesh['v'])
        C *= np.array([186, 212, 255], dtype=np.float32) / 255.0
        # C *= np.array([158, 180, 216], dtype=np.float32) / 250.0
        C = np.minimum(C, 1.0)
        A = np.zeros_like(mesh['v'])
        A += LambertianPointLight(f=mesh['f'],
                                  v=V,
                                  vn=-mesh['vn'],
                                  num_verts=len(V),
                                  light_pos=np.array([0, -50, -50]),
                                  light_color=np.array([1.0, 1.0, 1.0]),
                                  vc=C)

        cam_t, cam_r = ch.array((0, 0, 0)), ch.array((3.14, 0, 0))
        U = ProjectPoints(v=V,
                          f=[flength, flength],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=cam_t,
                          rt=cam_r)
        rn = ColoredRenderer(camera=U,
                             v=V,
                             f=mesh['f'],
                             vc=A,
                             bgcolor=np.array([1.0, 0.0, 0.0]),
                             frustum={
                                 'width': w,
                                 'height': h,
                                 'near': 0.1,
                                 'far': 20
                             })

        img = np.asarray(rn)[:, :, (2, 1, 0)]
        msk = np.sum(np.abs(img - np.array([[[0, 0, 1.0]]], dtype=np.float32)),
                     axis=-1,
                     keepdims=True)
        msk[msk > 0] = 1
        img = cv.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        msk = cv.resize(msk, (msk.shape[1] // 2, msk.shape[0] // 2),
                        interpolation=cv.INTER_AREA)
        msk[msk < 1] = 0
        msk = msk[:, :, np.newaxis]
        img = np.concatenate([img, msk], axis=-1)
        cv.imshow('render3', img)
        cv.waitKey(3)
        cv.imwrite(out_file, np.uint8(img * 255))
예제 #18
0
def setupCamera_centauro(v, cameraParams, tf_world_2_camera):
    '''
    Simplified setup for the centauro toy example or
    most of cases where we don't need to regress for camera parameters
    in : tf_world_2_camera  (Preferably use Matrix44.look_at() to create this)
    '''
    modelRotation = tf_world_2_camera[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = tf_world_2_camera[0:3, 3]

    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=1000 * cameraParams['chCamFocalLength'] *
                           cameraParams['a'],
                           c=cameraParams['c'],
                           k=ch.zeros(5))

    #print ('camera shape', camera.shape)
    #print ('camera   ', camera)

    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    camera.openglMat = flipXRotation  #Needed to match OpenGL flipped axis.

    # chMVMat construct might be wrong
    # From line 308 chInvCam = ch.inv(chMVMat)
    # It seems, chMVMat is  tf_camera_2_world
    # In any case, the create_renderer function ignores this returned variable
    chMVMat = ch.inv(tf_world_2_camera)

    return camera, modelRotation, chMVMat
예제 #19
0
def initialize_camera(fx, fy, cx, cy):
    """
    @param: fx,fy,cx,cy as pinhole camera model parameter
    @return: a differentiable camera instance
    """

    rt = ch.zeros(3)

    t = ch.zeros(3)

    cam = ProjectPoints(f=np.array([fx, fy]),
                        rt=rt,
                        t=t,
                        k=np.zeros(5),
                        c=[cx, cy])
    return cam
예제 #20
0
    def _global_rigid_transformation(self):
        results = {}
        pose = self.pose.reshape((-1, 3))
        parent = {
            i: self.kintree_table[0, i]
            for i in range(1, self.kintree_table.shape[1])
        }

        with_zeros = lambda x: ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))
        pack = lambda x: ch.hstack([ch.zeros((4, 3)), x.reshape((4, 1))])

        results[0] = with_zeros(
            ch.hstack((Rodrigues(pose[0, :]), self.J[0, :].reshape((3, 1)))))

        for i in range(1, self.kintree_table.shape[1]):
            results[i] = results[parent[i]].dot(
                with_zeros(
                    ch.hstack((
                        Rodrigues(pose[i, :]),  # rotation around bone endpoint
                        (self.J[i, :] - self.J[parent[i], :]).reshape(
                            (3, 1))  # bone
                    ))))

        results = [results[i] for i in sorted(results.keys())]
        results_global = results

        # subtract rotated J position
        results2 = [
            results[i] -
            (pack(results[i].dot(ch.concatenate((self.J[i, :], [0])))))
            for i in range(len(results))
        ]
        result = ch.dstack(results2)

        return result, results_global
예제 #21
0
    def test_derivatives2(self):
        import chumpy as ch
        import numpy as np
        from renderer import DepthRenderer

        rn = DepthRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
        w, h = (320, 240)
        from camera import ProjectPoints
        rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))

        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()
            plt.figure()

        for which in range(3):
            r1 = rn.r

            adder = np.random.rand(rn.v.r.size).reshape(rn.v.r.shape)*.01
            change = rn.v.r * 0 + adder
            dr_pred = rn.dr_wrt(rn.v).dot(change.ravel()).reshape(rn.shape)
            rn.v = rn.v.r + change

            r2 = rn.r
            dr_emp = r2 - r1

            #print np.mean(np.abs(dr_pred-dr_emp))

            self.assertLess(np.mean(np.abs(dr_pred-dr_emp)), .024)

            if visualize:
                plt.subplot(2,3,which+1)
                plt.imshow(dr_pred)
                plt.clim(-.01,.01)
                plt.title('emp')
                plt.subplot(2,3,which+4)
                plt.imshow(dr_emp)
                plt.clim(-.01,.01)
                plt.title('pred')
                plt.draw()
                plt.show()
예제 #22
0
파일: verts.py 프로젝트: caotin/pix2surf
def verts_decorated(trans, pose, 
    v_template, J, weights, kintree_table, bs_style, f,
    bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False):

    for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]:
        if which is not None:
            assert ischumpy(which)

    v = v_template

    if shapedirs is not None:
        if betas is None:
            betas = chumpy.zeros(shapedirs.shape[-1])
        v_shaped = v + shapedirs.dot(betas)
    else:
        v_shaped = v
        
    if posedirs is not None:
        v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose))
    else:
        v_posed = v_shaped
        
    v = v_posed
        
    if sp.issparse(J):
        regressor = J
        J_tmpx = MatVecMult(regressor, v_shaped[:,0])        
        J_tmpy = MatVecMult(regressor, v_shaped[:,1])        
        J_tmpz = MatVecMult(regressor, v_shaped[:,2])        
        J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T            
    else:    
        assert(ischumpy(J))
        
    assert(bs_style=='lbs')
    result, Jtr = verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy)
     
    tr = trans.reshape((1,3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights
    result.kintree_table = kintree_table
    result.bs_style = bs_style
    result.bs_type =bs_type
    if posedirs is not None:
        result.posedirs = posedirs
        result.v_posed = v_posed
    if shapedirs is not None:
        result.shapedirs = shapedirs
        result.betas = betas
        result.v_shaped = v_shaped
    if want_Jtr:
        result.J_transformed = Jtr
    return result
예제 #23
0
def verts_decorated(trans, pose, 
    v_template, J, weights, kintree_table, bs_style, f,
    bs_type=None, posedirs=None, betas=None, shapedirs=None, want_Jtr=False):

    for which in [trans, pose, v_template, weights, posedirs, betas, shapedirs]:
        if which is not None:
            assert ischumpy(which)

    v = v_template

    if shapedirs is not None:
        if betas is None:
            betas = chumpy.zeros(shapedirs.shape[-1])
        v_shaped = v + shapedirs.dot(betas)
    else:
        v_shaped = v
        
    if posedirs is not None:
        v_posed = v_shaped + posedirs.dot(posemap(bs_type)(pose))
    else:
        v_posed = v_shaped
        
    v = v_posed
        
    if sp.issparse(J):
        regressor = J
        J_tmpx = MatVecMult(regressor, v_shaped[:,0])        
        J_tmpy = MatVecMult(regressor, v_shaped[:,1])        
        J_tmpz = MatVecMult(regressor, v_shaped[:,2])        
        J = chumpy.vstack((J_tmpx, J_tmpy, J_tmpz)).T            
    else:    
        assert(ischumpy(J))
        
    assert(bs_style=='lbs')
    result, Jtr = lbs.verts_core(pose, v, J, weights, kintree_table, want_Jtr=True, xp=chumpy)
     
    tr = trans.reshape((1,3))
    result = result + tr
    Jtr = Jtr + tr

    result.trans = trans
    result.f = f
    result.pose = pose
    result.v_template = v_template
    result.J = J
    result.weights = weights
    result.kintree_table = kintree_table
    result.bs_style = bs_style
    result.bs_type =bs_type
    if posedirs is not None:
        result.posedirs = posedirs
        result.v_posed = v_posed
    if shapedirs is not None:
        result.shapedirs = shapedirs
        result.betas = betas
        result.v_shaped = v_shaped
    if want_Jtr:
        result.J_transformed = Jtr
    return result
예제 #24
0
def getHandModel():
    globalJoints = ch.zeros((45, ))
    globalBeta = ch.zeros((10, ))
    chRot = ch.zeros((3, ))
    chTrans = ch.array([0., 0., 0.5])

    fullpose = ch.concatenate([chRot, globalJoints], axis=0)
    m = load_model_withInputs(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        '../mano/models/MANO_RIGHT.pkl'),
                              fullpose,
                              chTrans,
                              globalBeta,
                              ncomps=15,
                              flat_hand_mean=True)

    return m, chRot, globalJoints, chTrans, globalBeta
예제 #25
0
    def on_changed(self, which):
        if 'model' in which:
            if not isinstance(self.model, dict):
                dd = pkl.load(open(self.model, "rb"), encoding="latin1")
            else:
                dd = self.model

            backwards_compatibility_replacements(dd)

            # for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:
            for s in ['posedirs', 'shapedirs']:
                if (s in dd) and not hasattr(dd[s], 'dterms'):
                    dd[s] = ch.array(dd[s])

            self.f = dd['f']
            self.shapedirs = dd['shapedirs']
            self.J_regressor = dd['J_regressor']
            if 'J_regressor_prior' in dd:
                self.J_regressor_prior = dd['J_regressor_prior']
            self.bs_type = dd['bs_type']
            self.bs_style = dd['bs_style']
            self.weights = ch.array(dd['weights'])
            if 'vert_sym_idxs' in dd:
                self.vert_sym_idxs = dd['vert_sym_idxs']
            if 'weights_prior' in dd:
                self.weights_prior = dd['weights_prior']
            self.kintree_table = dd['kintree_table']
            self.posedirs = dd['posedirs']

            if not hasattr(self, 'betas'):
                self.betas = ch.zeros(self.shapedirs.shape[-1])

            if not hasattr(self, 'trans'):
                self.trans = ch.zeros(3)

            if not hasattr(self, 'pose'):
                self.pose = ch.zeros(72)

            if not hasattr(self, 'v_template'):
                self.v_template = ch.array(dd['v_template'])

            if not hasattr(self, 'v_personal'):
                self.v_personal = ch.zeros_like(self.v_template)

            self._set_up()
예제 #26
0
파일: slider_demo.py 프로젝트: cadik/opendr
def get_renderer():
    import chumpy as ch
    from opendr.everything import *

    # Load mesh
    m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
    m.v += ch.array([0,0,3.])
    w, h = (320, 240)
    trans = ch.array([[0,0,0]])

    # Construct renderer
    rn = TexturedRenderer()
    rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=trans+m.v, f=m.f, texture_image=m.texture_image[:,:,::-1], ft=m.ft, vt=m.vt, bgcolor=ch.zeros(3))
    rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f), components=ch.array([4.,0.,0.,0.]), light_color=ch.ones(3))

    return rn
예제 #27
0
파일: renderers.py 프로젝트: zoq/DeepHuman
def _project_vertices(v, w, h, cam_r, cam_t):
    """projects vertices onto image plane"""
    V = ch.array(v)
    U = ProjectPointsOrthogonal(v=V,
                                f=[w, w],
                                c=[w / 2., h / 2.],
                                k=ch.zeros(5),
                                t=cam_t,
                                rt=cam_r)
    return U
예제 #28
0
def _project_vertices(v, w, h, cam_r, cam_t):
    """projects vertices onto image plane"""
    V = ch.array(v)
    U = ProjectPoints(
        v=V,
        f=[w * 2, w * 2],
        c=[w / 2., h / 2.],  # camera intrinsics
        k=ch.zeros(5),
        t=cam_t,
        rt=cam_r)
    return U
예제 #29
0
def initialize_camera(model,
                      j2d,
                      img,
                      init_pose,
                      flength=flength):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on
                     both the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be
              considered, 3D vector for the body orientation
    """
    # optimize camera translation and body orientation based on torso joints
    # LSP torso ids:
    # 2=right hip, 3=left hip, 8=right shoulder, 9=left shoulder
    torso_cids = [2, 3, 8, 9]
    # corresponding SMPL torso ids
    torso_smpl_ids = [2, 1, 17, 16]

    center = np.array([img.shape[1] / 2, img.shape[0] / 2])
    rt = ch.zeros(3)    # initial camera rotation
    init_t = guess_init(model, flength, j2d, init_pose)
    t = ch.array(init_t)    # initial camera translation

    opt_pose = ch.array(init_pose)
    _, A_global = global_rigid_transformation(opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])

    # initialize the camera and project SMPL joints
    cam = ProjectPoints(f=np.array([flength, flength]), rt=rt, t=t, k=np.zeros(5), c=center)
    cam.v = Jtr     # project SMPL joints

    # optimize for camera translation and body orientation
    free_variables = [cam.t, opt_pose[:3]]
    ch.minimize(
        # data term defined over torso joints...
        {'cam': j2d[torso_cids] - cam[torso_smpl_ids],
         # ...plus a regularizer for the camera translation
         'cam_t': 1e2 * (cam.t[2] - init_t[2])},
        x0=free_variables, method='dogleg', callback=None,
        options={'maxiter': 100, 'e_3': .0001, 'disp': 0})

    return cam, opt_pose[:3].r
예제 #30
0
def load_BFM_2017(fname):
    '''
    Loads BFM 2017 in h5 file format and returns a chumpy object and all model parameters
    '''

    with h5py.File(fname, 'r') as f:
        shape_mean = f['shape']['model']['mean'][:]
        shape_pcaBasis = f['shape']['model']['pcaBasis'][:]
        shape_pcaVariance = f['shape']['model']['pcaVariance'][:]

        expression_mean = f['expression']['model']['mean'][:]
        expression_pcaBasis = f['expression']['model']['pcaBasis'][:]
        expression_pcaVariance = f['expression']['model']['pcaVariance'][:]

        color_mean = f['color']['model']['mean'][:]
        color_pcaBasis = f['color']['model']['pcaBasis'][:]
        color_pcaVariance = f['color']['model']['pcaVariance'][:]

        shape_coeffs = ch.zeros(shape_pcaBasis.shape[1])
        exp_coeffs = ch.zeros(expression_pcaBasis.shape[1])
        color_coeffs = ch.zeros(color_pcaBasis.shape[1])

        sc = ch.diag(np.sqrt(shape_pcaVariance)).dot(shape_coeffs)
        ec = ch.diag(np.sqrt(expression_pcaVariance)).dot(exp_coeffs)
        cc = ch.diag(np.sqrt(color_pcaVariance)).dot(color_coeffs)
        v_bfm = ch.array(shape_mean).reshape(-1,3) + ch.array(shape_pcaBasis).dot(sc).reshape(-1,3) + \
                ch.array(expression_mean).reshape(-1, 3) + ch.array(expression_pcaBasis).dot(ec).reshape(-1, 3)
        c_bfm = ch.array(color_mean).reshape(
            -1, 3) + ch.array(color_pcaBasis).dot(cc).reshape(-1, 3)
        return {
            'verts': v_bfm,
            'color': c_bfm,
            'shape_coeffs': shape_coeffs,
            'exp_coeffs': exp_coeffs,
            'color_coeffs': color_coeffs
        }
예제 #31
0
def proj_smpl_onto_img(img, smpl, pose, shape, cam_f, cam_t):
    if isinstance(cam_f, float):
        cam_f = np.array([cam_f, cam_f])

    smpl.pose[:] = pose
    smpl.betas[:] = shape
    center = np.array([img.shape[1] / 2, img.shape[0] / 2])
    cam = ProjectPoints(
        f=cam_f, rt=ch.zeros(3), t=cam_t, k=np.zeros(5), c=center)

    cam.v = smpl.r
    for v in cam.r:
        r = int(round(v[1]))
        c = int(round(v[0]))
        if 0 <= r < img.shape[0] and 0 <= c < img.shape[1]:
            img[r, c, :] = np.asarray([255, 255, 255])

    return img
예제 #32
0
def smooth_rings(gar_rings, verts):
    """
    :param gar_rings:
    :param verts:
    :returns
    """
    error = ch.zeros([0, 3])

    for ring in gar_rings:
        N = len(ring)
        aring = np.array(ring)
        ring_0 = aring[np.arange(0, N)]
        ring_m1 = aring[np.array([N - 1] + range(0, N - 1))]
        ring_p1 = aring[np.array(range(1, N) + [0])]
        err_ring = verts[ring_m1] - 2 * verts[ring_0] + verts[ring_p1]
        error = ch.vstack([error, err_ring])

    error = ch.vstack([error, err_ring])
    return error
예제 #33
0
def setupCamera(v, cameraParams):

    chDistMat = geometry.Translate(x=0,
                                   y=cameraParams['Zshift'],
                                   z=cameraParams['chCamHeight'])

    chRotElMat = geometry.RotateX(a=-cameraParams['chCamEl'])

    chCamModelWorld = ch.dot(chDistMat, chRotElMat)

    flipZYRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0, 1.0, 0.0],
                               [0.0, -1.0, 0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    chMVMat = ch.dot(chCamModelWorld, flipZYRotation)

    chInvCam = ch.inv(chMVMat)

    modelRotation = chInvCam[0:3, 0:3]

    chRod = opendr.geometry.Rodrigues(rt=modelRotation).reshape(3)
    chTranslation = chInvCam[0:3, 3]

    translation, rotation = (chTranslation, chRod)

    camera = ProjectPoints(v=v,
                           rt=rotation,
                           t=translation,
                           f=1000 * cameraParams['chCamFocalLength'] *
                           cameraParams['a'],
                           c=cameraParams['c'],
                           k=ch.zeros(5))

    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    camera.openglMat = flipXRotation  #Needed to match OpenGL flipped axis.

    return camera, modelRotation, chMVMat
예제 #34
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack(
        (ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 0] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack(
        [ch.concatenate([myJ0[0, 0], (
            .6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]), myJ0[9, 2]]),
         ch.vstack([myJ0[i] for i in range(1, 7)]), ch.concatenate(
             [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
         ch.concatenate(
             [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
         ch.concatenate(
             [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
         ch.vstack([myJ0[i] for i in range(10, 24)])])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[(
                np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack([ch.zeros(1), ch.abs(lengths[ijoint]) / 2.,
                                    ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(
                    3, 1) - Rodrigues(rots[ijoint]).dot(len_offset),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
예제 #35
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack((ch.ch.MatVecMult(
        myJ_regressor, model.v_template[:, 0] +
        model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack([
        ch.concatenate([
            myJ0[0, 0], (.6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]),
            myJ0[9, 2]
        ]),
        ch.vstack([myJ0[i] for i in range(1, 7)]),
        ch.concatenate(
            [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
        ch.concatenate(
            [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
        ch.concatenate(
            [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
        ch.vstack([myJ0[i] for i in range(10, 24)])
    ])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[
                (np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack(
                [ch.zeros(1),
                 ch.abs(lengths[ijoint]) / 2.,
                 ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1) -
                Rodrigues(rots[ijoint]).dot(len_offset), rots[ijoint],
                rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
예제 #36
0
def optimize_on_joints(j2d,
                       model,
                       cam,
                       img,
                       prior,
                       try_both_orient,
                       body_orient,
                       n_betas=10,
                       regs=None,
                       conf=None,
                       viz=False):
    """Fit the model to the given set of joints, given the estimated camera
    :param j2d: 14x2 array of CNN joints
    :param model: SMPL model
    :param cam: estimated camera
    :param img: h x w x 3 image 
    :param prior: mixture of gaussians pose prior
    :param try_both_orient: boolean, if True both body_orient and its flip are considered for the fit
    :param body_orient: 3D vector, initialization for the body orientation
    :param n_betas: number of shape coefficients considered during optimization
    :param regs: regressors for capsules' axis and radius, if not None enables the interpenetration error term
    :param conf: 14D vector storing the confidence values from the CNN
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the optimized model, its joints projected on image space, the camera translation
    """
    t0 = time()
    # define the mapping LSP joints -> SMPL joints
    # cids are joints ids for LSP:
    cids = range(12) + [13]
    # joint ids for SMPL
    # SMPL does not have a joint for head, instead we use a vertex for the head
    # and append it later.
    smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]

    # the vertex id for the joint corresponding to the head
    head_id = 411

    # weights assigned to each joint during optimization;
    # the definition of hips in SMPL and LSP is significantly different so set
    # their weights to zero
    base_weights = np.array(
        [1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)

    if try_both_orient:
        flipped_orient = cv2.Rodrigues(body_orient)[0].dot(
            cv2.Rodrigues(np.array([0., np.pi, 0]))[0])
        flipped_orient = cv2.Rodrigues(flipped_orient)[0].ravel()
        orientations = [body_orient, flipped_orient]
    else:
        orientations = [body_orient]

    if try_both_orient:
        # store here the final error for both orientations,
        # and pick the orientation resulting in the lowest error
        errors = []

    svs = []
    cams = []
    for o_id, orient in enumerate(orientations):
        # initialize the shape to the mean shape in the SMPL training set
        betas = ch.zeros(n_betas)

        # initialize the pose by using the optimized body orientation and the
        # pose prior
        init_pose = np.hstack((orient, prior.weights.dot(prior.means)))

        # instantiate the model:
        # verts_decorated allows us to define how many
        # shape coefficients (directions) we want to consider (here, n_betas)
        sv = verts_decorated(
            trans=ch.zeros(3),
            pose=ch.array(init_pose),
            v_template=model.v_template,
            J=model.J_regressor,
            betas=betas,
            shapedirs=model.shapedirs[:, :, :n_betas],
            weights=model.weights,
            kintree_table=model.kintree_table,
            bs_style=model.bs_style,
            f=model.f,
            bs_type=model.bs_type,
            posedirs=model.posedirs)

        # make the SMPL joints depend on betas
        Jdirs = np.dstack([model.J_regressor.dot(model.shapedirs[:, :, i])
                           for i in range(len(betas))])
        J_onbetas = ch.array(Jdirs).dot(betas) + model.J_regressor.dot(
            model.v_template.r)

        # get joint positions as a function of model pose, betas and trans
        (_, A_global) = global_rigid_transformation(
            sv.pose, J_onbetas, model.kintree_table, xp=ch)
        Jtr = ch.vstack([g[:3, 3] for g in A_global]) + sv.trans

        # add the head joint, corresponding to a vertex...
        Jtr = ch.vstack((Jtr, sv[head_id]))

        # ... and add the joint id to the list
        if o_id == 0:
            smpl_ids.append(len(Jtr) - 1)

        # update the weights using confidence values
        weights = base_weights * conf[
            cids] if conf is not None else base_weights

        # project SMPL joints on the image plane using the estimated camera
        cam.v = Jtr

        # data term: distance between observed and estimated joints in 2D
        obj_j2d = lambda w, sigma: (
            w * weights.reshape((-1, 1)) * GMOf((j2d[cids] - cam[smpl_ids]), sigma))

        # mixture of gaussians pose prior
        pprior = lambda w: w * prior(sv.pose)
        # joint angles pose prior, defined over a subset of pose parameters:
        # 55: left elbow,  90deg bend at -np.pi/2
        # 58: right elbow, 90deg bend at np.pi/2
        # 12: left knee,   90deg bend at np.pi/2
        # 15: right knee,  90deg bend at np.pi/2
        alpha = 10
        my_exp = lambda x: alpha * ch.exp(x)
        obj_angle = lambda w: w * ch.concatenate([my_exp(sv.pose[55]), my_exp(-sv.pose[
                                                 58]), my_exp(-sv.pose[12]), my_exp(-sv.pose[15])])

        if viz:
            import matplotlib.pyplot as plt
            plt.ion()

            def on_step(_):
                """Create visualization."""
                plt.figure(1, figsize=(10, 10))
                plt.subplot(1, 2, 1)
                # show optimized joints in 2D
                tmp_img = img.copy()
                for coord, target_coord in zip(
                        np.around(cam.r[smpl_ids]).astype(int),
                        np.around(j2d[cids]).astype(int)):
                    if (coord[0] < tmp_img.shape[1] and coord[0] >= 0 and
                            coord[1] < tmp_img.shape[0] and coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(coord), 3, [0, 0, 255])
                    if (target_coord[0] < tmp_img.shape[1] and
                            target_coord[0] >= 0 and
                            target_coord[1] < tmp_img.shape[0] and
                            target_coord[1] >= 0):
                        cv2.circle(tmp_img, tuple(target_coord), 3,
                                   [0, 255, 0])
                plt.imshow(tmp_img[:, :, ::-1])
                plt.draw()
                plt.show()
                plt.pause(1e-2)

            on_step(_)
        else:
            on_step = None

        if regs is not None:
            # interpenetration term
            sp = SphereCollisions(
                pose=sv.pose, betas=sv.betas, model=model, regs=regs)
            sp.no_hands = True
        # weight configuration used in the paper, with joints + confidence values from the CNN
        # (all the weights used in the code were obtained via grid search, see the paper for more details)
        # the first list contains the weights for the pose priors,
        # the second list contains the weights for the shape prior
        opt_weights = zip([4.04 * 1e2, 4.04 * 1e2, 57.4, 4.78],
                          [1e2, 5 * 1e1, 1e1, .5 * 1e1])

        # run the optimization in 4 stages, progressively decreasing the
        # weights for the priors
        for stage, (w, wbetas) in enumerate(opt_weights):
            _LOGGER.info('stage %01d', stage)
            objs = {}

            objs['j2d'] = obj_j2d(1., 100)

            objs['pose'] = pprior(w)

            objs['pose_exp'] = obj_angle(0.317 * w)

            objs['betas'] = wbetas * betas

            if regs is not None:
                objs['sph_coll'] = 1e3 * sp

            ch.minimize(
                objs,
                x0=[sv.betas, sv.pose],
                method='dogleg',
                callback=on_step,
                options={'maxiter': 100,
                         'e_3': .0001,
                         'disp': 0})

        t1 = time()
        _LOGGER.info('elapsed %.05f', (t1 - t0))
        if try_both_orient:
            errors.append((objs['j2d'].r**2).sum())
        svs.append(sv)
        cams.append(cam)

    if try_both_orient and errors[0] > errors[1]:
        choose_id = 1
    else:
        choose_id = 0
    if viz:
        plt.ioff()
    return (svs[choose_id], cams[choose_id].r, cams[choose_id].t.r)
예제 #37
0
def inverseKinematicCh(pts3D, fullposeInit = np.zeros((48,), dtype=np.float32),
                       transInit = np.array([0., 0., -0.45]), betaInit = np.zeros((10,), dtype=np.float32)):
    '''
    Performs inverse kinematic optimization when given the 3d points
    3D points --> MANO params
    :param pts3D:
    :param fullposeInit:
    :param transInit:
    :param betaInit:
    :return: fullpose, trans and beta vector
    '''
    import chumpy as ch
    from mano.chumpy.smpl_handpca_wrapper_HAND_only import load_model, load_model_withInputs
    from ghope.constraints import Constraints

    assert pts3D.shape == (21,3)

    constraints = Constraints()
    validTheta = ch.zeros((len(constraints.validThetaIDs,)))
    fullposeCh = constraints.fullThetaMat.dot(ch.expand_dims(validTheta,1))[:,0]
    transCh = ch.array(undo_chumpy(transInit))
    betaCh = ch.array(undo_chumpy(betaInit))

    pts3DGT = pts3D

    m = load_model_withInputs(MANO_MODEL_PATH, fullposeCh, transCh, betaCh, ncomps=6, flat_hand_mean=True)

    if fullposeInit.shape == (16,3,3):
        fpList = []
        for i in range(16):
            fpList.append(cv2.Rodrigues(fullposeInit[i])[0][:, 0])
        validTheta[:] = np.concatenate(fpList, 0)[constraints.validThetaIDs]
    else:
        validTheta[:] = undo_chumpy(fullposeInit[constraints.validThetaIDs])

    loss = {}

    projPts = m.J_transformed
    projPtsGT = pts3DGT
    loss['joints'] = (projPts - projPtsGT)

    # m.fullpose[Constraints().invalidThetaIDs] = 0.
    thetaConstMin, thetaConstMax = Constraints().getHandJointConstraintsCh(fullposeCh[3:])
    # loss['constMin'] = 1e4 * thetaConstMin
    # loss['constMax'] = 1e4 * thetaConstMax
    freeVars = [validTheta, transCh]

    def cbPass(_):
        print(np.max(np.abs(m.J_transformed - pts3DGT)))
        pass

    ch.minimize(loss, x0=freeVars, callback=cbPass, method='dogleg', options={'maxiter': 10})

    if True:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(m.J_transformed[:, 0], m.J_transformed[:, 1], m.J_transformed[:, 2], c='r')
        ax.scatter(pts3DGT[:, 0], pts3DGT[:, 1], pts3DGT[:, 2], c='b')
        plt.show()

    return undo_chumpy(m.fullpose), undo_chumpy(m.trans), undo_chumpy(m.betas)
예제 #38
0
def initialize_camera(model,
                      j2d,
                      img,
                      init_pose,
                      flength=5000.,
                      pix_thsh=25.,
                      viz=False):
    """Initialize camera translation and body orientation
    :param model: SMPL model
    :param j2d: 14x2 array of CNN joints
    :param img: h x w x 3 image 
    :param init_pose: 72D vector of pose parameters used for initialization
    :param flength: camera focal length (kept fixed)
    :param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
                     is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
                     the estimated one and its flip)
    :param viz: boolean, if True enables visualization during optimization
    :returns: a tuple containing the estimated camera,
              a boolean deciding if both the optimized body orientation and its flip should be considered,
              3D vector for the body orientation
    """
    # optimize camera translation and body orientation based on torso joints
    # LSP torso ids:
    # 2=right hip, 3=left hip, 8=right shoulder, 9=left shoulder
    torso_cids = [2, 3, 8, 9]
    # corresponding SMPL torso ids
    torso_smpl_ids = [2, 1, 17, 16]

    center = np.array([img.shape[1] / 2, img.shape[0] / 2])

    # initialize camera rotation
    rt = ch.zeros(3)
    # initialize camera translation
    _LOGGER.info('initializing translation via similar triangles')
    init_t = guess_init(model, flength, j2d, init_pose)
    t = ch.array(init_t)

    # check how close the shoulder joints are
    try_both_orient = np.linalg.norm(j2d[8] - j2d[9]) < pix_thsh

    opt_pose = ch.array(init_pose)
    (_, A_global) = global_rigid_transformation(
        opt_pose, model.J, model.kintree_table, xp=ch)
    Jtr = ch.vstack([g[:3, 3] for g in A_global])

    # initialize the camera
    cam = ProjectPoints(
        f=np.array([flength, flength]), rt=rt, t=t, k=np.zeros(5), c=center)

    # we are going to project the SMPL joints
    cam.v = Jtr

    if viz:
        viz_img = img.copy()

        # draw the target (CNN) joints
        for coord in np.around(j2d).astype(int):
            if (coord[0] < img.shape[1] and coord[0] >= 0 and
                    coord[1] < img.shape[0] and coord[1] >= 0):
                cv2.circle(viz_img, tuple(coord), 3, [0, 255, 0])

        import matplotlib.pyplot as plt
        plt.ion()

        # draw optimized joints at each iteration
        def on_step(_):
            """Draw a visualization."""
            plt.figure(1, figsize=(5, 5))
            plt.subplot(1, 1, 1)
            viz_img = img.copy()
            for coord in np.around(cam.r[torso_smpl_ids]).astype(int):
                if (coord[0] < viz_img.shape[1] and coord[0] >= 0 and
                        coord[1] < viz_img.shape[0] and coord[1] >= 0):
                    cv2.circle(viz_img, tuple(coord), 3, [0, 0, 255])
            plt.imshow(viz_img[:, :, ::-1])
            plt.draw()
            plt.show()
            plt.pause(1e-3)
    else:
        on_step = None
    # optimize for camera translation and body orientation
    free_variables = [cam.t, opt_pose[:3]]
    ch.minimize(
        # data term defined over torso joints...
        {'cam': j2d[torso_cids] - cam[torso_smpl_ids],
         # ...plus a regularizer for the camera translation
         'cam_t': 1e2 * (cam.t[2] - init_t[2])},
        x0=free_variables,
        method='dogleg',
        callback=on_step,
        options={'maxiter': 100,
                 'e_3': .0001,
                 # disp set to 1 enables verbose output from the optimizer
                 'disp': 0})
    if viz:
        plt.ioff()
    return (cam, try_both_orient, opt_pose[:3].r)
예제 #39
0
    def test_occlusion(self):
        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()

        # Create renderer
        import chumpy as ch
        import numpy as np
        from opendr.renderer import TexturedRenderer, ColoredRenderer
        #rn = TexturedRenderer()
        rn = ColoredRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
        rn.texture_image = m.texture_image
        rn.ft = m.ft
        rn.vt = m.vt
        m.v[:,2] = np.mean(m.v[:,2])

        # red is front and zero
        # green is back and 1
        t0 = ch.array([1,0,.1])
        t1 = ch.array([-1,0,.1])
        v0 = ch.array(m.v) + t0

        if False:
            v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1
        else:
            v1 = ch.array(m.v) + t1
        vc0 = v0*0 + np.array([[.4,0,0]])
        vc1 = v1*0 + np.array([[0,.4,0]])
        vc = ch.vstack((vc0, vc1))

        v = ch.vstack((v0, v1))
        f = np.vstack((m.f, m.f+len(v0)))

        w, h = (320, 240)
        rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.camera.t = ch.array([0,0,-2.5])
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        m.vc = v.r*0 + np.array([[1,0,0]])
        rn.set(v=v, f=f, vc=vc)

        t0[:] = np.array([1.4, 0, .1-.02])
        t1[:] = np.array([-0.6, 0, .1+.02])

        target = rn.r

        if visualize:
            plt.figure()
            plt.imshow(target)
            plt.title('target')

            plt.figure()
            plt.show()

        im_orig = rn.r.copy()

        from cvwrap import cv2

        tr = t0
        eps_emp = .02
        eps_pred = .02

        #blur = lambda x : cv2.blur(x, ksize=(5,5))
        blur = lambda x : x
        for tr in [t0, t1]:
            if tr is t0:
                sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2])
            else:
                sum_limits = [1., 5., 4.]

            if visualize:
                plt.figure()
            for i in range(3):
                dr_pred = np.array(rn.dr_wrt(tr[i]).todense()).reshape(rn.shape) * eps_pred
                dr_pred = blur(dr_pred)

                # central differences
                tr[i] = tr[i].r + eps_emp/2.
                rn_greater = rn.r.copy()
                tr[i] = tr[i].r - eps_emp/1.
                rn_lesser = rn.r.copy()
                tr[i] = tr[i].r + eps_emp/2.

                dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp)

                dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5
                dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5

                if visualize:
                    plt.subplot(3,3,i+1)
                    plt.imshow(dr_pred_shown)
                    plt.title('pred')
                    plt.axis('off')

                    plt.subplot(3,3,3+i+1)
                    plt.imshow(dr_emp_shown)
                    plt.title('empirical')
                    plt.axis('off')

                    plt.subplot(3,3,6+i+1)

                diff = np.abs(dr_emp - dr_pred)
                if visualize:
                    plt.imshow(diff)
                diff = diff.ravel()
                if visualize:
                    plt.title('diff (sum: %.2e)'  % (np.sum(diff)))
                    plt.axis('off')

                # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),)
                # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),)

                #import pdb; pdb.set_trace()
                self.assertTrue(np.sum(diff) < sum_limits[i])