예제 #1
0
def chumpy_get_H(p1, p2):
    """ Compute differentiable homography from p1 to p2.
    
    Parameters
    ----------
    p1,p2 : array_like, shape (4,2)
        Containing points to match.
    """
    xmin = 0
    ymin = 0
    xmax = 1024
    ymax = 576
    p1 = 2 * p1 / ch.array([[xmax, ymax]])
    p1 = p1 - 1.0
    p2 = 2 * p2 / ch.array([[xmax, ymax]])
    p2 = p2 - 1.0

    N = p1.shape[0]
    A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
        (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
    A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
        (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
    A = ch.vstack((A1, A2))

    U, S, V = ch.linalg.svd(A.T.dot(A))
    H_new = V[-1, :].reshape((3, 3))

    # Re-normalize
    ML = ch.array([[xmax / 2.0, 0.0, xmax / 2.0], [0, ymax / 2.0, ymax / 2.0],
                   [0, 0, 1.0]])
    MR = ch.array([[2.0 / xmax, 0.0, -1.0], [0, 2.0 / ymax, -1.0], [0, 0,
                                                                    1.0]])
    H_new = ML.dot(H_new).dot(MR)

    return H_new / H_new[2, 2]
예제 #2
0
    def chumpy_get_H(p1, p2):
        """ Compute differentiable homography from p1 to p2.
        """
        N = p1.shape[0]
        A1 = ch.vstack((ch.zeros((3, N)), -p1.T, -ch.ones(
            (1, N)), p2[:, 1] * p1[:, 0], p2[:, 1] * p1[:, 1], p2[:, 1])).T
        A2 = ch.vstack((p1.T, ch.ones((1, N)), ch.zeros(
            (3, N)), -p2[:, 0] * p1[:, 0], -p2[:, 0] * p1[:, 1], -p2[:, 0])).T
        A = ch.vstack((A1, A2))

        U, S, V = ch.linalg.svd(A.T.dot(A))
        H_new = V[-1, :].reshape((3, 3))
        return H_new
예제 #3
0
    def _set_up(self):
        self.v_shaped = self.shapedirs.dot(self.betas) + self.v_template

        self.v_shaped_personal = self.v_shaped + self.v_personal
        if sp.issparse(self.J_regressor):
            self.J = sp_dot(self.J_regressor, self.v_shaped)
        else:
            self.J = ch.sum(self.J_regressor.T.reshape(-1, 1, 24) *
                            self.v_shaped.reshape(-1, 3, 1),
                            axis=0).T
        self.v_posevariation = self.posedirs.dot(
            posemap(self.bs_type)(self.pose))
        self.v_poseshaped = self.v_shaped_personal + self.v_posevariation

        self.A, A_global = self._global_rigid_transformation()
        self.Jtr = ch.vstack([g[:3, 3] for g in A_global])
        self.J_transformed = self.Jtr + self.trans.reshape((1, 3))

        self.V = self.A.dot(self.weights.T)

        rest_shape_h = ch.hstack(
            (self.v_poseshaped, ch.ones((self.v_poseshaped.shape[0], 1))))
        self.v_posed = ch.sum(self.V.T * rest_shape_h.reshape(-1, 4, 1),
                              axis=1)[:, :3]
        self.v = self.v_posed + self.trans
예제 #4
0
파일: slider_demo.py 프로젝트: Hutaimu1/cv-
def get_renderer():
    import chumpy as ch
    from opendr.everything import *

    # Load mesh
    m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
    m.v += ch.array([0, 0, 3.])
    w, h = (320, 240)
    trans = ch.array([[0, 0, 0]])

    # Construct renderer
    rn = TexturedRenderer()
    rn.camera = ProjectPoints(v=m.v,
                              rt=ch.zeros(3),
                              t=ch.zeros(3),
                              f=ch.array([w, w]) / 2.,
                              c=ch.array([w, h]) / 2.,
                              k=ch.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=trans + m.v,
           f=m.f,
           texture_image=m.texture_image[:, :, ::-1],
           ft=m.ft,
           vt=m.vt,
           bgcolor=ch.zeros(3))
    rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f),
                               components=ch.array([4., 0., 0., 0.]),
                               light_color=ch.ones(3))

    return rn
예제 #5
0
    def run(self, beta=None, theta=None, garment_d=None, garment_class=None):
        """Outputs body and garment of specified garment class given theta, beta and displacements."""
        if beta is not None:
            self.smpl_base.betas[:beta.shape[0]] = beta
        else:
            self.smpl_base.betas[:] = 0
        if theta is not None:
            self.smpl_base.pose[:] = theta
        else:
            self.smpl_base.pose[:] = 0
        self.smpl_base.v_personal[:] = 0
        if garment_d is not None and garment_class is not None:
            if 'skirt' not in garment_class:
                vert_indices = self.class_info[garment_class]['vert_indices']
                f = self.class_info[garment_class]['f']
                self.smpl_base.v_personal[vert_indices] = garment_d
                garment_m = Mesh(v=self.smpl_base.r[vert_indices], f=f)
            else:
                # vert_indices = self.class_info[garment_class]['vert_indices']
                f = self.class_info[garment_class]['f']

                A = self.smpl_base.A.reshape((16, 24)).T
                skirt_V = self.skirt_skinning.dot(A).reshape((-1, 4, 4))

                verts = self.skirt_weight.dot(self.smpl_base.v_poseshaped)
                verts = verts + garment_d
                verts_h = ch.hstack((verts, ch.ones((verts.shape[0], 1))))
                verts = ch.sum(skirt_V * verts_h.reshape(-1, 1, 4),
                               axis=-1)[:, :3]
                garment_m = Mesh(v=verts, f=f)
        else:
            garment_m = None
        self.smpl_base.v_personal[:] = 0
        body_m = Mesh(v=self.smpl_base.r, f=self.smpl_base.f)
        return body_m, garment_m
예제 #6
0
def rigid_scan_2_mesh_alignment(scan, mesh, visualize=False):
    options = {'sparse_solver': lambda A, x: cg(A, x, maxiter=2000)[0]}
    options['disp'] = 1.0
    options['delta_0'] = 0.1
    options['e_3'] = 1e-4

    s = ch.ones(1)
    r = ch.zeros(3)
    R = Rodrigues(r)
    t = ch.zeros(3)
    trafo_mesh = s*(R.dot(mesh.v.T)).T + t

    sampler = sample_from_mesh(scan, sample_type='vertices')
    s2m = ScanToMesh(scan, trafo_mesh, mesh.f, scan_sampler=sampler, signed=False, normalize=False)

    if visualize:       
        #Visualization code
        mv = MeshViewer()
        mv.set_static_meshes([scan])
        tmp_mesh = Mesh(trafo_mesh.r, mesh.f)
        tmp_mesh.set_vertex_colors('light sky blue')
        mv.set_dynamic_meshes([tmp_mesh])
        def on_show(_):
            tmp_mesh = Mesh(trafo_mesh.r, mesh.f)
            tmp_mesh.set_vertex_colors('light sky blue')
            mv.set_dynamic_meshes([tmp_mesh])
    else:
        def on_show(_):
            pass

    ch.minimize(fun={'dist': s2m, 's_reg': 100*(ch.abs(s)-s)}, x0=[s, r, t], callback=on_show, options=options)
    return s,Rodrigues(r),t
예제 #7
0
    def _set_up(self):
        self.v_shaped = self.shapedirs.dot(self.betas) + self.v_template

        body_height = (self.v_shaped[2802, 1] + self.v_shaped[6262, 1]) - (
            self.v_shaped[2237, 1] + self.v_shaped[6728, 1])
        # print('111111111111111111111111111111111111111')
        # print(np.array(body_height)[0])
        # print(type(body_height))
        # print('22222222222222222222222222222222222222222')
        self.scale = 1.66 / np.array(body_height)[0]

        self.v_shaped_personal = self.scale * self.v_shaped + self.v_personal

        if sp.issparse(self.J_regressor):
            self.J = self.scale * sp_dot(self.J_regressor, self.v_shaped)
        else:
            self.J = self.scale * ch.sum(self.J_regressor.T.reshape(-1, 1, 24)
                                         * self.v_shaped.reshape(-1, 3, 1),
                                         axis=0).T
        self.v_posevariation = self.posedirs.dot(
            posemap(self.bs_type)(self.pose))
        self.v_poseshaped = self.v_shaped_personal + self.v_posevariation

        self.A, A_global = self._global_rigid_transformation()
        self.Jtr = ch.vstack([g[:3, 3] for g in A_global])
        self.J_transformed = self.Jtr + self.trans.reshape((1, 3))

        self.V = self.A.dot(self.weights.T)

        rest_shape_h = ch.hstack(
            (self.v_poseshaped, ch.ones((self.v_poseshaped.shape[0], 1))))
        self.v_posed = ch.sum(self.V.T * rest_shape_h.reshape(-1, 4, 1),
                              axis=1)[:, :3]
        self.v = self.v_posed + self.trans
예제 #8
0
def render_color_model_with_lighting(w, h, v, vn, vc, f, u,
                                      sh_comps=None, light_c=ch.ones(3),
                                      vlight_pos=None, vlight_color=None,
                                      bg_img=None):
    """renders colored model with lighting effect"""
    assert(sh_comps is not None or vlight_pos is not None)
    V = ch.array(v)
    A = np.zeros_like(v)

    # SH lighting
    if sh_comps is not None:
        A += vc * SphericalHarmonics(vn=vn, components=sh_comps, light_color=light_c)

    # single point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 1:
        A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlight_pos,
                                  light_color=vlight_color, vc=vc)

    # multiple point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 2:
        for vlp in vlight_pos:
            A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlp,
                                      light_color=vlight_color, vc=vc)

    black_img = np.array(np.zeros((w, h, 3)), dtype=np.float32)
    bg_img_ = bg_img if bg_img is not None else black_img

    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, background_image=bg_img_,
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
예제 #9
0
def render_training_pairs(
        mesh,
        bone_pose,
        img_w,
        img_h,
        camera_r,
        camera_t,  #color_bg,
        sh_comps=None,
        light_c=ch.ones(3),
        vlight_pos=None,
        vlight_color=None):
    """generates training image pairs
    Will generate color image, mask, semantic map, normal map
    """
    v_ = mesh['v']

    # render color image
    # To avoid aliasing, I render the image with 2x resolution and then resize it
    # See: https://stackoverflow.com/questions/22069167/opencv-how-to-smoothen-boundary
    u = _project_vertices(v_, img_w * 2, img_h * 2, camera_r, camera_t)

    # pose in camera coordinate, pose on image
    pose_to_camera, pose_on_image = _project_pose(bone_pose, img_w, img_h,
                                                  camera_r, camera_t)

    # color_bg = cv.resize(color_bg, (img_w * 2, img_h * 2))
    img = _render_color_model_with_lighting(
        img_w * 2,
        img_h * 2,
        v_,
        mesh['vn'],
        mesh['vc'],
        mesh['f'],
        u,
        sh_comps=sh_comps,
        light_c=light_c,
        vlight_pos=vlight_pos,
        vlight_color=vlight_color,
    )
    # bg_img=color_bg)
    img = cv.resize(img, (img_w, img_h))
    img = np.float32(np.copy(img))

    # # render silhouette
    # u = _project_vertices(v_, img_w, img_h, camera_r, camera_t)
    # msk = _render_mask(img_w, img_h, v_, mesh['f'], u)
    # msk = np.float32(np.copy(msk))

    # # render normal maps
    # if 'n' in mesh:
    #     n_ = mesh['n'] * 0.5 + 0.5
    # else:
    #     vn = util.calc_normal(mesh)
    #     n_ = vn * 0.5 + 0.5
    # nml = _render_color_model_without_lighting(img_w, img_h, v_, n_, mesh['f'],
    #                                            u, bg_img=None)
    # nml = np.float32(np.copy(nml))

    return img, pose_to_camera, pose_on_image
예제 #10
0
파일: test_opendr.py 프로젝트: zuru/DSS
def test_earth():
    m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3))
    # Create V, A, U, f: geometry, brightness, camera, renderer
    V = ch.array(m.v)
    A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
                           components=[3., 2., 0., 0., 0., 0., 0., 0., 0.],
                           light_color=ch.ones(3))
    # camera
    U = ProjectPoints(v=V,
                      f=[w, w],
                      c=[w / 2., h / 2.],
                      k=ch.zeros(5),
                      t=ch.zeros(3),
                      rt=ch.zeros(3))
    f = TexturedRenderer(vc=A,
                         camera=U,
                         f=m.f,
                         bgcolor=[0., 0., 0.],
                         texture_image=m.texture_image,
                         vt=m.vt,
                         ft=m.ft,
                         frustum={
                             'width': w,
                             'height': h,
                             'near': 1,
                             'far': 20
                         })

    # Parameterize the vertices
    translation, rotation = ch.array([0, 0, 8]), ch.zeros(3)
    f.v = translation + V.dot(Rodrigues(rotation))

    observed = f.r
    np.random.seed(1)
    # this is reactive
    # in the sense that changes to values will affect function which depend on them.
    translation[:] = translation.r + np.random.rand(3)
    rotation[:] = rotation.r + np.random.rand(3) * .2
    # Create the energy
    E_raw = f - observed
    E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')

    Image.fromarray((observed * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "reference.png"))
    step = 0
    Image.fromarray((f.r * 255).astype(np.uint8)).save(
        os.path.join(save_dir, "step_{:05d}.png".format(step)))

    print('OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS')
    free_variables = [translation, rotation]
    ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=create_callback(f))
    ch.minimize({'raw': E_raw}, x0=free_variables, callback=create_callback(f))
예제 #11
0
def compute_approx_scale(lmk_3d,
                         model,
                         lmk_face_idx,
                         lmk_b_coords,
                         opt_options=None):
    """ function: compute approximate scale to align scan and model

    input: 
        lmk_3d: input landmark 3d, in shape (N,3)
        model: FLAME face model
        lmk_face_idx, lmk_b_coords: landmark embedding, in face indices and barycentric coordinates
        opt_options: optimizaton options

    output:
        model.r: fitted result vertices
        model.f: fitted result triangulations (fixed in this code)
        parms: fitted model parameters

    """

    scale = ch.ones(1)
    scan_lmks = scale * ch.array(lmk_3d)
    model_lmks = mesh_points_by_barycentric_coordinates(
        model, model.f, lmk_face_idx, lmk_b_coords)
    lmk_err = scan_lmks - model_lmks

    # options
    if opt_options is None:
        print("fit_lmk3d(): no 'opt_options' provided, use default settings.")
        import scipy.sparse as sp
        opt_options = {}
        opt_options['disp'] = 1
        opt_options['delta_0'] = 0.1
        opt_options['e_3'] = 1e-4
        opt_options['maxiter'] = 2000
        sparse_solver = lambda A, x: sp.linalg.cg(
            A, x, maxiter=opt_options['maxiter'])[0]
        opt_options['sparse_solver'] = sparse_solver

    # on_step callback
    def on_step(_):
        pass

    ch.minimize(fun=lmk_err,
                x0=[scale, model.trans, model.pose[:3]],
                method='dogleg',
                callback=on_step,
                options=opt_options)
    return scale.r
예제 #12
0
    def test_sum_mean_std_var(self):
        for fn in [ch.sum, ch.mean, ch.var, ch.std]:

            # Create fake input and differences in input space
            data1 = ch.ones((3, 4, 7, 2))
            data2 = ch.array(data1.r + .1 *
                             np.random.rand(data1.size).reshape(data1.shape))
            diff = data2.r - data1.r

            # Compute outputs
            result1 = fn(data1, axis=2)
            result2 = fn(data2, axis=2)

            # Empirical and predicted derivatives
            gt = result2.r - result1.r
            pred = result1.dr_wrt(data1).dot(diff.ravel()).reshape(gt.shape)

            #print(np.max(np.abs(gt - pred)))

            if fn in [ch.std, ch.var]:
                self.assertTrue(1e-2 > np.max(np.abs(gt - pred)))
            else:
                self.assertTrue(1e-14 > np.max(np.abs(gt - pred)))
                # test caching
                dr0 = result1.dr_wrt(data1)
                data1[:] = np.random.randn(data1.size).reshape(data1.shape)
                self.assertTrue(
                    result1.dr_wrt(data1) is
                    dr0)  # changing values shouldn't force recompute
                result1.axis = 1
                self.assertTrue(result1.dr_wrt(data1) is not dr0)

        self.assertEqual(
            ch.mean(ch.eye(3), axis=1).ndim,
            np.mean(np.eye(3), axis=1).ndim)
        self.assertEqual(
            ch.mean(ch.eye(3), axis=0).ndim,
            np.mean(np.eye(3), axis=0).ndim)
        self.assertEqual(
            ch.sum(ch.eye(3), axis=1).ndim,
            np.sum(np.eye(3), axis=1).ndim)
        self.assertEqual(
            ch.sum(ch.eye(3), axis=0).ndim,
            np.sum(np.eye(3), axis=0).ndim)
예제 #13
0
 def run(self, beta=None, theta=None, garment_d=None, garment_class=None):
     """Outputs body and garment of specified garment class given theta, beta and displacements."""
     if beta is not None:
         self.smpl_base.betas[:beta.shape[0]] = beta
     else:
         self.smpl_base.betas[:] = 0
     if theta is not None:
         self.smpl_base.pose[:] = theta
     else:
         self.smpl_base.pose[:] = 0
     self.smpl_base.v_personal[:] = 0
     if garment_d is not None and garment_class is not None:
         if 'skirt' not in garment_class:
             vert_indices = self.class_info[garment_class]['vert_indices']
             f = self.class_info[garment_class]['f']
             self.smpl_base.v_personal[vert_indices] = garment_d
             garment_m = Mesh(v=self.smpl_base.r[vert_indices], f=f)
         else:
             vert_indices = self.class_info[garment_class]['vert_indices']
             f = self.class_info[garment_class]['f']
             verts = self.smpl_base.v_poseshaped[vert_indices] + garment_d
             verts_h = ch.hstack((verts, ch.ones((verts.shape[0], 1))))
             verts = ch.sum(self.smpl_base.V.T[vert_indices] *
                            verts_h.reshape(-1, 4, 1),
                            axis=1)[:, :3]
             # if theta is not None:
             #     rotmat = self.smpl_base.A.r[:, :, 0]
             #     verts_homo = np.hstack(
             #         (verts, np.ones((verts.shape[0], 1))))
             #     verts = verts_homo.dot(rotmat.T)[:, :3]
             garment_m = Mesh(v=verts, f=f)
     else:
         garment_m = None
     self.smpl_base.v_personal[:] = 0
     body_m = Mesh(v=self.smpl_base.r, f=self.smpl_base.f)
     return body_m, garment_m
예제 #14
0
def _simple_renderer(rn, meshes, yrot=0, texture=None):
    mesh = meshes[0]
    if texture is not None:
        if not hasattr(mesh, 'ft'):
            """mesh.ft = copy(mesh.f)
            vt = copy(mesh.v[:, :2])
            vt -= np.min(vt, axis=0).reshape((1, -1))
            vt /= np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt"""
            mesh.vt, mesh.ft = Template_tex()

        mesh.texture_filepath = rn.texture_image

    # Set camers parameters
    if texture is not None:
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=np.ones(3))

    for next_mesh in meshes[1:]:
        _stack_with(rn, next_mesh, texture)

    # Construct Back Light (on back right corner)

    if texture is not None:
        rn.vc = ch.ones(rn.v.shape)
    else:
        albedo = rn.vc
        # Construct Back Light (on back right corner)
        rn.vc = LambertianPointLight(f=rn.f,
                                     v=rn.v,
                                     num_verts=len(rn.v),
                                     light_pos=rotateY(
                                         np.array([-200, -100, -100]), yrot),
                                     vc=albedo,
                                     light_color=np.array([1, 1, 1]))

        # Construct Left Light
        rn.vc += LambertianPointLight(f=rn.f,
                                      v=rn.v,
                                      num_verts=len(rn.v),
                                      light_pos=rotateY(
                                          np.array([800, 10, 300]), yrot),
                                      vc=albedo,
                                      light_color=np.array([1, 1, 1]))

        # Construct Right Light
        rn.vc += LambertianPointLight(f=rn.f,
                                      v=rn.v,
                                      num_verts=len(rn.v),
                                      light_pos=rotateY(
                                          np.array([-500, 500, 1000]), yrot),
                                      vc=albedo,
                                      light_color=np.array([.7, .7, .7]))

    return rn.r
예제 #15
0
def setupTexturedRenderer(renderer,
                          vstack,
                          vch,
                          f_list,
                          vc_list,
                          vnch,
                          uv,
                          haveTextures_list,
                          textures_list,
                          camera,
                          frustum,
                          sharedWin=None):
    f = []
    f_listflat = [item for sublist in f_list for item in sublist]
    lenMeshes = 0
    for mesh_i, mesh in enumerate(f_listflat):
        polygonLen = 0
        for polygons in mesh:

            f = f + [polygons + lenMeshes]
            polygonLen += len(polygons)
        lenMeshes += len(vch[mesh_i])

    fstack = np.vstack(f)

    if len(vnch) == 1:
        vnstack = vnch[0]
    else:
        vnstack = ch.vstack(vnch)

    if len(vc_list) == 1:
        vcstack = vc_list[0]
    else:
        vcstack = ch.vstack(vc_list)

    uvflat = [item for sublist in uv for item in sublist]
    ftstack = np.vstack(uvflat)

    texturesch = []
    textures_listflat = [item for sublist in textures_list for item in sublist]

    # import ipdb; ipdb.set_trace()

    for texture_list in textures_listflat:
        if texture_list != None:
            for texture in texture_list:
                if isinstance(texture, np.ndarray):
                    texturesch = texturesch + [ch.array(texture)]
                elif texture != None:
                    texturesch = texturesch + [ch.array(texture)]

    if len(texturesch) == 0:
        texture_stack = ch.Ch([])
    elif len(texturesch) == 1:
        texture_stack = texturesch[0].ravel()
    else:
        texture_stack = ch.concatenate([tex.ravel() for tex in texturesch])

    haveTextures_listflat = [
        item for sublist in haveTextures_list for item in sublist
    ]

    renderer.set(camera=camera,
                 frustum=frustum,
                 v=vstack,
                 f=fstack,
                 vn=vnstack,
                 vc=vcstack,
                 ft=ftstack,
                 texture_stack=texture_stack,
                 v_list=vch,
                 f_list=f_listflat,
                 vc_list=vc_list,
                 ft_list=uvflat,
                 textures_list=textures_listflat,
                 haveUVs_list=haveTextures_listflat,
                 bgcolor=ch.ones(3),
                 overdraw=True)
    renderer.msaa = True
    renderer.sharedWin = sharedWin
예제 #16
0
chLightEl = ch.Ch([np.pi/2])
chLightDist = ch.Ch([0.5])
chLightDistGT = ch.Ch([0.5])
chLightAzGT = ch.Ch([0.0])
chLightElGT = ch.Ch([np.pi/4])

ligthTransf = computeHemisphereTransformation(chLightAz, chLightEl, chLightDist, targetPosition)
ligthTransfGT = computeHemisphereTransformation(chLightAzGT, chLightElGT, chLightDistGT, targetPosition)

lightPos = ch.dot(ligthTransf, ch.Ch([0.,0.,0.,1.]))[0:3]
lightPos = ch.Ch([targetPosition[0]+0.5,targetPosition[1],targetPosition[2] + 0.5])
lightPosGT = ch.dot(ligthTransfGT, ch.Ch([0.,0.,0.,1.]))[0:3]

chGlobalConstant = ch.Ch([0.5])
chGlobalConstantGT = ch.Ch([0.5])
light_color = ch.ones(3)*chPointLightIntensity
light_colorGT = ch.ones(3)*chPointLightIntensityGT
chVColors = ch.Ch([0.8,0.8,0.8])
chVColorsGT = ch.Ch([0.8,0.8,0.8])

shCoefficientsFile = 'data/sceneSH' + str(sceneIdx) + '.pickle'

chAmbientIntensityGT = ch.Ch([0.025])
clampedCosCoeffs = clampedCosineCoefficients()
chAmbientSHGT = ch.zeros([9])

envMapDic = {}
SHFilename = 'data/LightSHCoefficients.pickle'

with open(SHFilename, 'rb') as pfile:
    envMapDic = pickle.load(pfile)
예제 #17
0
def convert_BFM_mesh_to_FLAME(FLAME_model_fname, BFM_mesh_fname,
                              FLAME_out_fname):
    '''
    Convert Basel Face Model mesh to a FLAME mesh
    \param FLAME_model_fname        path of the FLAME model
    \param BFM_mesh_fname           path of the BFM mesh to be converted
    \param FLAME_out_fname          path of the output file
    '''

    # Regularizer weights for jaw pose (i.e. opening of mouth), shape, and facial expression.
    # Increase regularization in case of implausible output meshes.
    w_pose = 1e-4
    w_shape = 1e-3
    w_exp = 1e-4

    if not os.path.exists(os.path.dirname(FLAME_out_fname)):
        os.makedirs(os.path.dirname(FLAME_out_fname))

    if not os.path.exists(BFM_mesh_fname):
        print('BFM mesh not found %s' % BFM_mesh_fname)
        return
    BFM_mesh = Mesh(filename=BFM_mesh_fname)

    if not os.path.exists(FLAME_model_fname):
        print('FLAME model not found %s' % FLAME_model_fname)
        return
    model = load_model(FLAME_model_fname)

    if not os.path.exists('./data/BFM_to_FLAME_corr.npz'):
        print('Cached mapping not found')
        return
    cached_data = np.load('./data/BFM_to_FLAME_corr.npz',
                          allow_pickle=True,
                          encoding="latin1")

    BFM2017_corr = cached_data['BFM2017_corr'].item()
    BFM2009_corr = cached_data['BFM2009_corr'].item()
    BFM2009_cropped_corr = cached_data['BFM2009_cropped_corr'].item()

    if (2 * BFM_mesh.v.shape[0] == BFM2017_corr['mtx'].shape[1]) and (
            BFM_mesh.f.shape[0] == BFM2017_corr['f_in'].shape[0]):
        conv_mesh = convert_mesh(BFM_mesh, BFM2017_corr)
    elif (2 * BFM_mesh.v.shape[0] == BFM2009_corr['mtx'].shape[1]) and (
            BFM_mesh.f.shape[0] == BFM2009_corr['f_in'].shape[0]):
        conv_mesh = convert_mesh(BFM_mesh, BFM2009_corr)
    elif (2 * BFM_mesh.v.shape[0] == BFM2009_cropped_corr['mtx'].shape[1]
          ) and (BFM_mesh.f.shape[0] == BFM2009_cropped_corr['f_in'].shape[0]):
        conv_mesh = convert_mesh(BFM_mesh, BFM2009_cropped_corr)
    else:
        print('Conversion failed - input mesh does not match any setup')
        return

    FLAME_mask_ids = cached_data['FLAME_mask_ids']

    scale = ch.ones(1)
    v_target = scale * ch.array(conv_mesh.v)
    dist = v_target[FLAME_mask_ids] - model[FLAME_mask_ids]
    pose_reg = model.pose[3:]
    shape_reg = model.betas[:300]
    exp_reg = model.betas[300:]
    obj = {
        'dist': dist,
        'pose_reg': w_pose * pose_reg,
        'shape_reg': w_shape * shape_reg,
        'exp_reg': w_exp * exp_reg
    }
    ch.minimize(obj, x0=[scale, model.trans, model.pose[:3]])
    ch.minimize(obj,
                x0=[
                    scale, model.trans, model.pose[np.hstack(
                        (np.arange(3), np.arange(6, 9)))], model.betas
                ])

    v_out = model.r / scale.r
    Mesh(v_out, model.f).write_obj(FLAME_out_fname)
예제 #18
0
파일: slider_demo.py 프로젝트: cadik/opendr
def get_renderer():
    import chumpy as ch
    from opendr.everything import *

    # Load mesh
    m = load_mesh('/Users/matt/geist/OpenDR/test_dr/nasa_earth.obj')
    m.v += ch.array([0,0,3.])
    w, h = (320, 240)
    trans = ch.array([[0,0,0]])

    # Construct renderer
    rn = TexturedRenderer()
    rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=trans+m.v, f=m.f, texture_image=m.texture_image[:,:,::-1], ft=m.ft, vt=m.vt, bgcolor=ch.zeros(3))
    rn.vc = SphericalHarmonics(vn=VertNormals(v=rn.v, f=rn.f), components=ch.array([4.,0.,0.,0.]), light_color=ch.ones(3))

    return rn
예제 #19
0
파일: renderers.py 프로젝트: zoq/DeepHuman
def render_training_pairs(mesh,
                          smpl,
                          img_w,
                          img_h,
                          camera_r,
                          camera_t,
                          color_bg,
                          sh_comps=None,
                          light_c=ch.ones(3),
                          vlight_pos=None,
                          vlight_color=None):
    """generates training image pairs
    Will generate color image, mask, semantic map, normal map
    """
    v_, v_smpl_ = mesh['v'], smpl['v']

    # render color image
    # To avoid aliasing, I render the image with 2x resolution and then resize it
    # See: https://stackoverflow.com/questions/22069167/opencv-how-to-smoothen-boundary
    u = _project_vertices(v_, img_w * 2, img_h * 2, camera_r, camera_t)
    color_bg = cv.resize(color_bg, (img_w * 2, img_h * 2))
    img = _render_color_model_with_lighting(img_w * 2,
                                            img_h * 2,
                                            v_,
                                            mesh['vn'],
                                            mesh['vc'],
                                            mesh['f'],
                                            u,
                                            sh_comps=sh_comps,
                                            light_c=light_c,
                                            vlight_pos=vlight_pos,
                                            vlight_color=vlight_color,
                                            bg_img=color_bg)
    img = cv.resize(img, (img_w, img_h))
    img = np.float32(np.copy(img))

    # render silhouette
    u = _project_vertices(v_, img_w, img_h, camera_r, camera_t)
    msk = _render_mask(img_w, img_h, v_, mesh['f'], u)
    msk = np.float32(np.copy(msk))

    # render normal maps
    if 'n' in mesh:
        n_ = mesh['n'] * 0.5 + 0.5
    else:
        vn = util.calc_normal(mesh)
        n_ = vn * 0.5 + 0.5
    nml = _render_color_model_without_lighting(img_w,
                                               img_h,
                                               v_,
                                               n_,
                                               mesh['f'],
                                               u,
                                               bg_img=None)
    nml = np.float32(np.copy(nml))

    # render semantic map
    u = _project_vertices(v_smpl_, img_w, img_h, camera_r, camera_t)
    vc_smpl = util.get_smpl_semantic_code()
    smap = _render_color_model_without_lighting(img_w,
                                                img_h,
                                                v_smpl_,
                                                vc_smpl,
                                                smpl['f'],
                                                u,
                                                bg_img=None)
    smap = np.float32(np.copy(smap))

    return img, msk, nml, smap
예제 #20
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack((ch.ch.MatVecMult(
        myJ_regressor, model.v_template[:, 0] +
        model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
                      ch.ch.MatVecMult(
                          myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack([
        ch.concatenate([
            myJ0[0, 0], (.6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]),
            myJ0[9, 2]
        ]),
        ch.vstack([myJ0[i] for i in range(1, 7)]),
        ch.concatenate(
            [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
        ch.concatenate(
            [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
        ch.concatenate(
            [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
        ch.vstack([myJ0[i] for i in range(10, 24)])
    ])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[
                (np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack(
                [ch.zeros(1),
                 ch.abs(lengths[ijoint]) / 2.,
                 ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1) -
                Rodrigues(rots[ijoint]).dot(len_offset), rots[ijoint],
                rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
예제 #21
0
def _simple_renderer(rn,
                     meshes,
                     yrot=0,
                     texture=None,
                     out_mesh_name=None,
                     out_texture_name=None):
    mesh = meshes[0]
    if texture is not None:
        if not hasattr(mesh, 'ft'):
            """mesh.ft = copy(mesh.f)
            vt = copy(mesh.v[:, :2])
            vt -= np.min(vt, axis=0).reshape((1, -1))
            vt /= np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt"""
            mesh.vt, mesh.ft = Template_tex()

        mesh.texture_filepath = rn.texture_image

    # Set camers parameters
    if texture is not None:
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=np.ones(3))

    for next_mesh in meshes[1:]:
        _stack_with(rn, next_mesh, texture)

    if out_mesh_name is not None:
        write_obj(mesh, out_mesh_name)

    if (texture is not None) and (out_texture_name is not None):
        cv2.imwrite(out_texture_name, texture)
        with open(out_mesh_name[:-3] + "mtl", 'w') as my_file:
            my_file.write("newmtl Material.001" + "\n" + "Ns 96.078431" +
                          "\n" + "Ka 1.000000 1.000000 1.000000" + "\n" +
                          "Kd 0.640000 0.640000 0.640000" + "\n" +
                          "Ks 0.000000 0.000000 0.000000" + "\n" +
                          "Ke 0.412673 0.432000 0.226290" + "\n" +
                          "Ni 1.000000" + "\n" + "d 1.000000" + "\n" +
                          "illum 1" + "\n" + "map_Kd " + out_texture_name)

    # Construct Back Light (on back right corner)

    if texture is not None:
        rn.vc = ch.ones(rn.v.shape)
    else:
        albedo = rn.vc
        # Construct Back Light (on back right corner)
        rn.vc = LambertianPointLight(f=rn.f,
                                     v=rn.v,
                                     num_verts=len(rn.v),
                                     light_pos=rotateY(
                                         np.array([-200, -100, -100]), yrot),
                                     vc=albedo,
                                     light_color=np.array([1, 1, 1]))

        # Construct Left Light
        rn.vc += LambertianPointLight(f=rn.f,
                                      v=rn.v,
                                      num_verts=len(rn.v),
                                      light_pos=rotateY(
                                          np.array([800, 10, 300]), yrot),
                                      vc=albedo,
                                      light_color=np.array([1, 1, 1]))

        # Construct Right Light
        rn.vc += LambertianPointLight(f=rn.f,
                                      v=rn.v,
                                      num_verts=len(rn.v),
                                      light_pos=rotateY(
                                          np.array([-500, 500, 1000]), yrot),
                                      vc=albedo,
                                      light_color=np.array([.7, .7, .7]))

    return rn.r
예제 #22
0
def get_capsules(model, wrt_betas=None, length_regs=None, rad_regs=None):
    from opendr.geometry import Rodrigues
    if length_regs is not None:
        n_shape_dofs = length_regs.shape[0] - 1
    else:
        n_shape_dofs = model.betas.r.size
    segm = np.argmax(model.weights_prior, axis=1)
    J_off = ch.zeros((len(joint2name), 3))
    rots = rots0.copy()
    mujoco_t_mid = [0, 3, 6, 9]
    if wrt_betas is not None:
        # if we want to differentiate wrt betas (shape), we must have the
        # regressors...
        assert (length_regs is not None and rad_regs is not None)
        # ... and betas must be a chumpy object
        assert (hasattr(wrt_betas, 'dterms'))
        pad = ch.concatenate(
            (wrt_betas, ch.zeros(n_shape_dofs - len(wrt_betas)), ch.ones(1)))
        lengths = pad.dot(length_regs)
        rads = pad.dot(rad_regs)
    else:
        lengths = ch.ones(len(joint2name))
        rads = ch.ones(len(joint2name))
    betas = wrt_betas if wrt_betas is not None else model.betas
    n_betas = len(betas)
    # the joint regressors are the original, pre-optimized ones
    # (middle of the part frontier)
    myJ_regressor = model.J_regressor_prior
    myJ0 = ch.vstack(
        (ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 0] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 0]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 1] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 1]),
         ch.ch.MatVecMult(myJ_regressor, model.v_template[:, 2] +
                          model.shapedirs[:, :, :n_betas].dot(betas)[:, 2]))).T
    # with small adjustments for hips, spine and feet
    myJ = ch.vstack(
        [ch.concatenate([myJ0[0, 0], (
            .6 * myJ0[0, 1] + .2 * myJ0[1, 1] + .2 * myJ0[2, 1]), myJ0[9, 2]]),
         ch.vstack([myJ0[i] for i in range(1, 7)]), ch.concatenate(
             [myJ0[7, 0], (1.1 * myJ0[7, 1] - .1 * myJ0[4, 1]), myJ0[7, 2]]),
         ch.concatenate(
             [myJ0[8, 0], (1.1 * myJ0[8, 1] - .1 * myJ0[5, 1]), myJ0[8, 2]]),
         ch.concatenate(
             [myJ0[9, 0], myJ0[9, 1], (.2 * myJ0[9, 2] + .8 * myJ0[12, 2])]),
         ch.vstack([myJ0[i] for i in range(10, 24)])])
    capsules = []
    # create one capsule per mujoco joint
    for ijoint, segms in enumerate(mujoco2segm):
        if wrt_betas is None:
            vidxs = np.asarray([segm == k for k in segms]).any(axis=0)
            verts = model.v_template[vidxs].r
            dims = (verts.max(axis=0) - verts.min(axis=0))
            rads[ijoint] = .5 * ((dims[(np.argmax(dims) + 1) % 3] + dims[(
                np.argmax(dims) + 2) % 3]) / 4.)
            lengths[ijoint] = max(dims) - 2. * rads[ijoint].r
        # the core joints are different, since the capsule is not in the joint
        # but in the middle
        if ijoint in mujoco_t_mid:
            len_offset = ch.vstack([ch.zeros(1), ch.abs(lengths[ijoint]) / 2.,
                                    ch.zeros(1)]).reshape(3, 1)
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(
                    3, 1) - Rodrigues(rots[ijoint]).dot(len_offset),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        else:
            caps = Capsule(
                (J_off[ijoint] + myJ[mujoco2segm[ijoint][0]]).reshape(3, 1),
                rots[ijoint], rads[ijoint], lengths[ijoint])
        caps.id = ijoint
        capsules.append(caps)
    return capsules
예제 #23
0
    def __call__(self, args):
        np.random.seed()
        di, data_item = args
        print("Processing data #", di)
        render_img_w = conf.render_img_w
        render_img_h = conf.render_img_h
        dataset_dir = conf.dataset_dir
        num_render_per_obj = self.render_setting["num_render_per_obj"]
        output_dir = self.render_setting["output_dir"]
        novel_view = self.render_setting["novel_view"]

        # preprocess 3D models
        mesh = load_models(dataset_dir, data_item)

        # align to smpl bone
        bone_pose, root_rot, root_trans = load_smpl_params(dataset_dir, data_item)
        mesh = util.translate_model_inplace(mesh, root_rot, root_trans)

        mesh, bone_pose = axis_transformation(mesh, bone_pose, conf.axis_transformation)

        # ---------- preprocess is done
        # save bone params
        np.save('%s/bone_params/bone_params_%08d.npy' % (output_dir, di), bone_pose)

        img_indices = np.arange(num_render_per_obj * di, num_render_per_obj * (di + 1))

        if not self.random_lighting:  # use one lighting condition
            vl_pos, vl_clr, sh = self.vl_pos, self.vl_clr, self.sh

        for vi in range(num_render_per_obj):
            mesh_ = copy.deepcopy(mesh)
            bone_pose_ = copy.deepcopy(bone_pose)
            mesh_, bone_pose_ = move_to_origin(mesh_, bone_pose_)
            mesh_, bone_pose_, param_0 = transform_model_randomly(mesh_, bone_pose_)
            img_ind = img_indices[vi]

            if self.random_lighting:  # random lighting condition
                vl_pos, vl_clr = util.sample_verticle_lighting(3)
                sh = util.sample_sh_component()

            cam_t, cam_r = self.sample_view(novel_view)
            img, pose_to_camera, pose_on_image = rd.render_training_pairs(mesh_, bone_pose_,
                                                                          render_img_w, render_img_h,
                                                                          cam_r, cam_t,  # bg,
                                                                          sh_comps=sh,
                                                                          light_c=ch.ones(3),
                                                                          vlight_pos=vl_pos,
                                                                          vlight_color=vl_clr)
            save_rendered_data(img, pose_on_image, output_dir, img_ind, pose_to_camera, bone_pose_)

            if hasattr(conf, "autoencoder") and conf.autoencoder:
                dir, base = os.path.split(output_dir)
                output_dir_view2 = os.path.join(dir + "_view2", base)
                cam_t, cam_r = self.sample_view(novel_view=False)
                img, pose_to_camera, pose_on_image = rd.render_training_pairs(mesh_, bone_pose_,
                                                                              render_img_w, render_img_h,
                                                                              cam_r, cam_t,  # bg,
                                                                              sh_comps=sh,
                                                                              light_c=ch.ones(3),
                                                                              vlight_pos=vl_pos,
                                                                              vlight_color=vl_clr)
                save_rendered_data(img, pose_on_image, output_dir_view2, img_ind, pose_to_camera, bone_pose_)