def test_empty_meshes(self):
        device = torch.device("cuda:0")
        target_length = 0
        N = 10
        V = 32
        verts_list = []
        faces_list = []
        for _ in range(N):
            vn = torch.randint(3, high=V, size=(1, ))[0].item()
            verts = torch.rand((vn, 3), dtype=torch.float32, device=device)
            faces = torch.tensor([], dtype=torch.int64, device=device)
            verts_list.append(verts)
            faces_list.append(faces)
        mesh = Meshes(verts=verts_list, faces=faces_list)
        loss = mesh_edge_loss(mesh, target_length=target_length)

        self.assertTrue(
            torch.allclose(
                loss, torch.tensor([0.0], dtype=torch.float32, device=device)))
        self.assertTrue(loss.requires_grad)
Exemple #2
0
    def test_getitem(self):
        N = 5
        V = 20
        F = 10
        source = {"atlas": torch.randn(size=(N, F, 4, 4, 3))}
        tex = TexturesAtlas(atlas=source["atlas"])

        verts = torch.rand(size=(N, V, 3))
        faces = torch.randint(size=(N, F, 3), high=V)
        meshes = Meshes(verts=verts, faces=faces, textures=tex)

        tryindex(self, 2, tex, meshes, source)
        tryindex(self, slice(0, 2, 1), tex, meshes, source)
        index = torch.tensor([1, 0, 1, 0, 0], dtype=torch.bool)
        tryindex(self, index, tex, meshes, source)
        index = torch.tensor([0, 0, 0, 0, 0], dtype=torch.bool)
        tryindex(self, index, tex, meshes, source)
        index = torch.tensor([1, 2], dtype=torch.int64)
        tryindex(self, index, tex, meshes, source)
        tryindex(self, [2, 4], tex, meshes, source)
    def test_taubin(self):
        N = 3
        device = get_random_cuda_device()

        mesh = ico_sphere(4, device).extend(N)
        ico_verts = mesh.verts_padded()
        ico_faces = mesh.faces_padded()

        rand_noise = torch.rand_like(ico_verts) * 0.2 - 0.1
        z_mask = (ico_verts[:, :, -1] > 0).view(N, -1, 1)
        rand_noise = rand_noise * z_mask
        verts = ico_verts + rand_noise
        mesh = Meshes(verts=verts, faces=ico_faces)

        smooth_mesh = taubin_smoothing(mesh, num_iter=50)
        smooth_verts = smooth_mesh.verts_padded()

        smooth_dist = (smooth_verts - ico_verts).norm(dim=-1).mean()
        dist = (verts - ico_verts).norm(dim=-1).mean()
        self.assertTrue(smooth_dist < dist)
    def __getitem__(self, index):
        path = self.paths[index][0]
        label = self.paths[index][1]

        # Load the obj and ignore the textures and materials.
        verts, faces_idx, _ = load_obj(path)
        faces = faces_idx.verts_idx

        # center = verts.mean(0)
        # verts = verts - center
        # scale = max(verts.abs().max(0)[0])
        # verts = verts / scale

        # Initialize each vertex to be white in color.
        verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
        textures = Textures(verts_rgb=verts_rgb)
        # Create a Meshes object for the teapot. Here we have only one mesh in the batch.
        mesh = Meshes(verts=[verts], faces=[faces], textures=textures)

        return mesh, label
    def _get_cube_mesh(self):
        # NOTE(ycho): duplicated from _get_cube_cloud()
        vertices = list(
            itertools.product(*zip([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])))
        vertices = np.insert(vertices, 0, [0, 0, 0], axis=0)
        vertices = th.as_tensor(vertices, dtype=th.float32, device=self.device)

        # FIXME(ycho): Hardcoded face indices.
        # (We can't use Box.FACE since we need triangulated faces)
        faces = [[7, 3, 5], [5, 3, 1], [5, 1, 6], [6, 1, 2], [6, 2, 8],
                 [8, 2, 4], [8, 4, 7], [7, 4, 3], [3, 4, 1], [1, 4, 2],
                 [8, 7, 6], [6, 7, 5]]
        face_indices = th.as_tensor(faces, dtype=th.int32,
                                    device=self.device).reshape(-1, 3, 3)
        textures = TexturesVertex(verts_features=(0.5 + 0.5 * vertices)[None])
        mesh = Meshes(verts=vertices[None],
                      faces=face_indices[None],
                      textures=textures)

        return mesh
    def forward(self, vertices, faces, attributes=None):
        """
        Args:
            meshes_world: a Meshes object representing a batch of meshes with
                          coordinates in world space.
        Returns:
            Fragments: Rasterization outputs as a named tuple.
        """
        fixed_vetices = vertices.clone()
        fixed_vetices[..., :2] = -fixed_vetices[..., :2]
        meshes_screen = Meshes(verts=fixed_vetices.float(), faces=faces.long())
        raster_settings = self.raster_settings

        pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
            meshes_screen,
            image_size=raster_settings.image_size,
            blur_radius=raster_settings.blur_radius,
            faces_per_pixel=raster_settings.faces_per_pixel,
            bin_size=raster_settings.bin_size,
            max_faces_per_bin=raster_settings.max_faces_per_bin,
            perspective_correct=raster_settings.perspective_correct,
        )

        vismask = (pix_to_face > -1).float()
        D = attributes.shape[-1]
        attributes = attributes.clone()
        attributes = attributes.view(attributes.shape[0] * attributes.shape[1], 3, attributes.shape[-1])
        N, H, W, K, _ = bary_coords.shape
        #plt.imshow(zbuf[0,:,:,0])
        #plt.show()
        mask = pix_to_face == -1  # []
        pix_to_face = pix_to_face.clone()
        pix_to_face[mask] = 0
        idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
        pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D)
        pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2)
        pixel_vals[mask] = 0  # Replace masked values in output.
        pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2)
        pixel_vals = torch.cat([pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1)
        # import ipdb; ipdb.set_trace()
        return pixel_vals
    def test_interpolate_texture_map(self):
        barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                          dtype=torch.float32).view(
                                              1, 1, 1, 2, -1)
        dummy_verts = torch.zeros(4, 3)
        vert_uvs = torch.tensor([[1, 0], [0, 1], [1, 1], [0, 0]],
                                dtype=torch.float32)
        face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64)
        interpolated_uvs = torch.tensor(
            [[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32)

        # Create a dummy texture map
        H = 2
        W = 2
        x = torch.linspace(0, 1, W).view(1, W).expand(H, W)
        y = torch.linspace(0, 1, H).view(H, 1).expand(H, W)
        tex_map = torch.stack([x, y], dim=2).view(1, H, W, 2)
        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=pix_to_face,
            dists=pix_to_face,
        )
        tex = Textures(
            maps=tex_map,
            faces_uvs=face_uvs[None, ...],
            verts_uvs=vert_uvs[None, ...],
        )
        meshes = Meshes(verts=[dummy_verts], faces=[face_uvs], textures=tex)
        texels = interpolate_texture_map(fragments, meshes)

        # Expected output
        pixel_uvs = interpolated_uvs * 2.0 - 1.0
        pixel_uvs = pixel_uvs.view(2, 1, 1, 2)
        tex_map = torch.flip(tex_map, [1])
        tex_map = tex_map.permute(0, 3, 1, 2)
        tex_map = torch.cat([tex_map, tex_map], dim=0)
        expected_out = F.grid_sample(tex_map, pixel_uvs, align_corners=False)
        self.assertTrue(
            torch.allclose(texels.squeeze(), expected_out.squeeze()))
Exemple #8
0
    def loss(self, data, epoch):

         
        pred = self.forward(data)  
        # embed()
        

         
        CE_Loss = nn.CrossEntropyLoss() 
        ce_loss = CE_Loss(pred[0][-1][3], data['y_voxels'])


        chamfer_loss = torch.tensor(0).float().cuda()
        edge_loss = torch.tensor(0).float().cuda()
        laplacian_loss = torch.tensor(0).float().cuda()
        normal_consistency_loss = torch.tensor(0).float().cuda()  

        for c in range(self.config.num_classes-1):
            target = data['surface_points'][c].cuda() 
            for k, (vertices, faces, _, _, _) in enumerate(pred[c][1:]):
      
                pred_mesh = Meshes(verts=list(vertices), faces=list(faces))
                pred_points = sample_points_from_meshes(pred_mesh, 3000)
                
                chamfer_loss +=  chamfer_distance(pred_points, target)[0]
                laplacian_loss +=   mesh_laplacian_smoothing(pred_mesh, method="uniform")
                normal_consistency_loss += mesh_normal_consistency(pred_mesh) 
                edge_loss += mesh_edge_loss(pred_mesh) 

        
        
 
        loss = 1 * chamfer_loss + 1 * ce_loss + 0.1 * laplacian_loss + 1 * edge_loss + 0.1 * normal_consistency_loss
 
        log = {"loss": loss.detach(),
               "chamfer_loss": chamfer_loss.detach(), 
               "ce_loss": ce_loss.detach(),
               "normal_consistency_loss": normal_consistency_loss.detach(),
               "edge_loss": edge_loss.detach(),
               "laplacian_loss": laplacian_loss.detach()}
        return loss, log
Exemple #9
0
def forward_step(th_scan_meshes, smplx, init_smplx_meshes, search_tree, pen_distance, tri_filtering_module):
    """
    Performs a forward step, given smplx and scan meshes.
    Then computes the losses.
    """

    # forward
    # verts, _, _, _ = smplx()
    verts = smplx()
    th_SMPLX_meshes = [tm.from_tensors(vertices=v,
                                      faces=smplx.faces) for v in verts]
    p3d_meshes = Meshes(verts=verts, faces=smplx.faces.expand(1,-1,-1))
    # losses
    loss = dict()
    loss['s2m'] = batch_point_to_surface([sm.vertices for sm in th_scan_meshes], th_SMPLX_meshes)
    loss['m2s'] = batch_point_to_surface([sm.vertices for sm in th_SMPLX_meshes], th_scan_meshes)
    loss['lap'] = torch.stack([laplacian_loss(sc, sm) for sc, sm in zip(init_smplx_meshes, th_SMPLX_meshes)])
    # loss['offsets'] = torch.mean(torch.mean(smplx.offsets**2, axis=1), axis=1)
    # loss['normal'] = mesh_normal_consistency(p3d_meshes).unsqueeze(0)
    # loss['interpenetration'] = interpenetration_loss(verts, smplx.faces, search_tree, pen_distance, tri_filtering_module, 1.0)
    return loss
    def test_verts_nan(self):
        num_verts = 30
        num_faces = 50
        for device in ["cpu", "cuda:0"]:
            for invalid in ["nan", "inf"]:
                verts = torch.rand((num_verts, 3),
                                   dtype=torch.float32,
                                   device=device)
                # randomly assign an invalid type
                verts[torch.randperm(num_verts)[:10]] = float(invalid)
                faces = torch.randint(num_verts,
                                      size=(num_faces, 3),
                                      dtype=torch.int64,
                                      device=device)
                meshes = Meshes(verts=[verts], faces=[faces])

                with self.assertRaisesRegex(ValueError,
                                            "Meshes contain nan or inf."):
                    sample_points_from_meshes(meshes,
                                              num_samples=100,
                                              return_normals=True)
Exemple #11
0
    def forward(self, images):
        '''
        输入: N张图片 N list C*H*W
        输出: 对应N个视角的图片 : N list C*H*W
        '''
        # 为每张图像提取特征
        # embeddings = []
        # for i in range(len(images)):
        #     embeddings.append(self.img_embedding_model(images[i]))
        # features_cat = torch.zeros([embeddings[0].shape[0],self.f_dim*self.N]).cuda()
        # for i in range(len(embeddings)):
        #     x = self.GAP(embeddings[i])
        #     x = x.view(x.size(0), -1)
        #     features_cat[:,i*self.f_dim:(i+1)*self.f_dim] = x

        # 图像通道混合后直接unet
        features_cat = images[0]
        for i in range(len(images) - 1):
            features_cat = torch.cat((features_cat, images[i + 1]), dim=1)
        # 特征提取
        # features_cat = self.img_embedding_model(features_cat)
        # features_cat = self.GAP(features_cat)
        # features_cat = features_cat.view(features_cat.size(0), -1)
        # 将特征输入解码器,得到displacement map和uv map
        pred_tex, _ = self.displace_net(features_cat)
        _, mesh_map = self.uvmap_net(features_cat)

        raw_vtx = self.meshtemp.get_vertex_positions(mesh_map)

        vertex_positions, mesh_faces, input_uvs, input_texture, mesh_face_textures = self.meshtemp.forward_renderer(
            raw_vtx, pred_tex)

        repro_imgs, img_probs = self.renderer(vertex_positions, mesh_faces,
                                              input_uvs, input_texture,
                                              mesh_face_textures)

        new_mesh = Meshes(verts=raw_vtx,
                          faces=self.meshtemp.mesh.faces.unsqueeze(0).repeat(
                              raw_vtx.shape[0], 1, 1))
        return repro_imgs, raw_vtx, img_probs, self.meshtemp.mesh.faces, new_mesh, input_texture
def deform_mesh_by_closest_vertices(mesh,
                                    src_mesh,
                                    tar_mesh,
                                    device=torch.device("cpu")):
    """
    mesh の頂点と最も近い src_mesh の頂点番号を元に、mesh を tar_mesh の形状に変更する。
    [ToDo] 複数バッチ処理に対応
    """
    if (len(mesh.verts_packed().shape) == 2):
        batch_size = 1
    else:
        batch_size = mesh.verts_packed().shape[0]

    # pytorch3d -> psbody.mesh への変換
    mesh_face_pytorch3d = mesh.faces_packed()
    mesh = Mesh(mesh.verts_packed().detach().cpu().numpy(),
                mesh.faces_packed().detach().cpu().numpy())
    src_mesh = Mesh(src_mesh.verts_packed().detach().cpu().numpy(),
                    src_mesh.faces_packed().detach().cpu().numpy())
    tar_mesh = Mesh(tar_mesh.verts_packed().detach().cpu().numpy(),
                    tar_mesh.faces_packed().detach().cpu().numpy())

    # verts_idx : 最も近い頂点番号
    verts_idx, _ = src_mesh.closest_vertices(mesh.v)
    verts_idx = np.array(verts_idx)
    new_mesh_verts = mesh.v - src_mesh.v[verts_idx] + tar_mesh.v[verts_idx]
    #print( "verts_idx : ", verts_idx )
    #print( "new_mesh_verts.shape : ", new_mesh_verts.shape )    # (7702, 3)
    #print( "mesh.f.shape : ", mesh.f.shape )                    # (15180, 3)

    # psbody.mesh -> pytorch3d への変換
    if (batch_size == 1):
        new_mesh = Meshes(
            torch.from_numpy(
                new_mesh_verts).requires_grad_(False).float().unsqueeze(0),
            mesh_face_pytorch3d.unsqueeze(0)).to(device)
    else:
        NotImplementedError()

    return new_mesh
Exemple #13
0
def transform_meshes(meshes, camera_info):
    """
    input:
    @meshes: mesh in local frame
    @camera_info: plane params from camera info, type = dict, must contain 'position' and 'rotation' as keys
    output:
    mesh in global frame.
    """
    tran = camera_info["position"]
    rot = camera_info["rotation"]
    verts_packed = meshes.verts_packed()
    verts_packed = verts_packed * torch.tensor(
        [1.0, -1.0, -1.0], dtype=torch.float32)  # suncg2habitat
    faces_list = meshes.faces_list()
    tex = meshes.textures
    rot_matrix = torch.tensor(quaternion.as_rotation_matrix(rot),
                              dtype=torch.float32)
    verts_packed = torch.mm(rot_matrix, verts_packed.T).T + torch.tensor(
        tran, dtype=torch.float32)
    verts_list = list(
        verts_packed.split(meshes.num_verts_per_mesh().tolist(), dim=0))
    return Meshes(verts=verts_list, faces=faces_list, textures=tex)
    def test_texture_sampling(self):
        device = torch.device("cuda:0")
        batch_size = 6
        # verts
        verts = torch.rand((batch_size, 6, 3), device=device, dtype=torch.float32)
        verts[:, :3, 2] = 1.0
        verts[:, 3:, 2] = -1.0
        # textures
        texts = torch.rand((batch_size, 6, 3), device=device, dtype=torch.float32)
        # faces
        faces = torch.tensor([[0, 1, 2], [3, 4, 5]], device=device, dtype=torch.int64)
        faces = faces.view(1, 2, 3).expand(batch_size, -1, -1)

        meshes = Meshes(verts=verts, faces=faces, textures=TexturesVertex(texts))

        num_samples = 24
        samples, normals, textures = sample_points_from_meshes(
            meshes, num_samples=num_samples, return_normals=True, return_textures=True
        )

        textures_naive = torch.zeros(
            (batch_size, num_samples, 3), dtype=torch.float32, device=device
        )
        for n in range(batch_size):
            for i in range(num_samples):
                p = samples[n, i]
                if p[2] > 0.0:  # sampled from 1st face
                    v0, v1, v2 = verts[n, 0, :2], verts[n, 1, :2], verts[n, 2, :2]
                    w0, w1, w2 = barycentric_coordinates(p[:2], v0, v1, v2)
                    t0, t1, t2 = texts[n, 0], texts[n, 1], texts[n, 2]
                else:  # sampled from 2nd face
                    v0, v1, v2 = verts[n, 3, :2], verts[n, 4, :2], verts[n, 5, :2]
                    w0, w1, w2 = barycentric_coordinates(p[:2], v0, v1, v2)
                    t0, t1, t2 = texts[n, 3], texts[n, 4], texts[n, 5]

                tt = w0 * t0 + w1 * t1 + w2 * t2
                textures_naive[n, i] = tt

        self.assertClose(textures, textures_naive)
Exemple #15
0
def load_objs_as_meshes(files: list, device=None, load_textures: bool = True):
    """
    Load meshes from a list of .obj files using the load_obj function, and
    return them as a Meshes object. This only works for meshes which have a
    single texture image for the whole mesh. See the load_obj function for more
    details. material_colors and normals are not stored.

    Args:
        f: A list of file-like objects (with methods read, readline, tell,
        and seek), pathlib paths or strings containing file names.
        device: Desired device of returned Meshes. Default:
            uses the current device for the default tensor type.
        load_textures: Boolean indicating whether material files are loaded

    Returns:
        New Meshes object.
    """
    mesh_list = []
    for f_obj in files:
        # TODO: update this function to support the two texturing options.
        verts, faces, aux = load_obj(f_obj, load_textures=load_textures)
        verts = verts.to(device)
        tex = None
        tex_maps = aux.texture_images
        if tex_maps is not None and len(tex_maps) > 0:
            verts_uvs = aux.verts_uvs[None, ...].to(device)  # (1, V, 2)
            faces_uvs = faces.textures_idx[None, ...].to(device)  # (1, F, 3)
            image = list(tex_maps.values())[0].to(device)[None]
            tex = Textures(verts_uvs=verts_uvs,
                           faces_uvs=faces_uvs,
                           maps=image)

        mesh = Meshes(verts=[verts],
                      faces=[faces.verts_idx.to(device)],
                      textures=tex)
        mesh_list.append(mesh)
    if len(mesh_list) == 1:
        return mesh_list[0]
    return join_meshes_as_batch(mesh_list)
Exemple #16
0
def generate_video_from_obj(obj_path, image_path, video_path, renderer):
    input_image = cv2.imread(image_path)
    input_image = input_image[:, :input_image.shape[1] // 3]
    input_image = cv2.resize(input_image, (512, 512))

    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = TexturesVertex(verts_features=verts_rgb_colors)
    # wo_textures = TexturesVertex(verts_features=torch.ones_like(verts_rgb_colors)*0.75)

    # Load obj
    mesh = load_objs_as_meshes([obj_path], device=device)

    # Set mesh
    vers = mesh._verts_list
    faces = mesh._faces_list
    mesh_w_tex = Meshes(vers, faces, textures)
    # mesh_wo_tex = Meshes(vers, faces, wo_textures)

    # create VideoWriter
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024, 512))

    for i in tqdm(range(90)):
        R, T = look_at_view_transform(1.8, 0, i * 4, device=device)
        images_w_tex = renderer(mesh_w_tex, R=R, T=T)
        images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0,
                               1.0)[:, :, ::-1] * 255
        # images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
        # images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255
        image = np.concatenate([input_image, images_w_tex], axis=1)
        out.write(image.astype('uint8'))
    out.release()
    def forward(self, coeffs, render=True):
        batch_num = coeffs.shape[0]

        id_coeff, exp_coeff, tex_coeff, angles, gamma, translation = self.split_coeffs(
            coeffs)

        vs = self.get_vs(id_coeff, exp_coeff)

        rotation = self.compute_rotation_matrix(angles)

        vs_t = self.rigid_transform(
            vs, rotation, translation)

        lms_t = self.get_lms(vs_t)
        lms_proj = self.project_vs(lms_t)
        lms_proj = torch.stack(
            [lms_proj[:, :, 0], self.img_size-lms_proj[:, :, 1]], dim=2)
        if render:
            face_texture = self.get_color(tex_coeff)
            face_norm = self.compute_norm(vs, self.tri, self.point_buf)
            face_norm_r = face_norm.bmm(rotation)
            face_color = self.add_illumination(
                face_texture, face_norm_r, gamma)
            face_color_tv = TexturesVertex(face_color)

            mesh = Meshes(vs_t, self.tri.repeat(
                batch_num, 1, 1), face_color_tv)
            rendered_img = self.renderer(mesh)
            rendered_img = torch.clamp(rendered_img, 0, 255)

            return {'rendered_img': rendered_img,
                    'lms_proj': lms_proj,
                    'face_texture': face_texture,
                    'vs': vs_t,
                    'tri': self.tri,
                    'color': face_color}
        else:
            return {'lms_proj': lms_proj}
Exemple #18
0
def validate_training_AE(validation_generator, model):
    '''
    This function is used to calculate validation loss during training NMF AE
    '''
    print("Validating model......")
    with torch.no_grad():
        total_loss = 0
        items = 0
        for input, _, _ in validation_generator:
            input = input.cuda()
            _, _, pred2, face = model(
                input
            )  # Point prediction after each deform block and face information (refer figure 4 in paper)
            mesh_p2 = Meshes(verts=pred2,
                             faces=face)  # Construct Differentiable mesh M_p2
            pts2 = sample_points_from_meshes(
                mesh_p2, num_samples=2562
            )  # Differentiably sample random points from mesh surfaces
            loss, _ = chamfer_distance(pts2, input)
            total_loss += loss.item()
            items += 1

    return total_loss / items  # Return average validation loss
    def __call__(self,
                 points,
                 faces,
                 colors=None,
                 mean=None,
                 std=None,
                 grayscale=True):
        assert len(points.shape) == 4 and points.shape[1] == 3
        colors = colors if colors is not None else torch.ones_like(points)
        points, colors = grid_to_list(points), grid_to_list(colors)
        if self.renderer is None:
            self.setup(points.device)
        textures = TexturesVertex(verts_features=colors)

        mesh = Meshes(verts=points, faces=faces, textures=textures)
        r_images = self.renderer(mesh)
        r_images = r_images.permute(0, 3, 1, 2).contiguous()
        r_images = r_images[:, :3, :, :]
        if grayscale:
            r_images = r_images.mean(dim=1, keepdim=True)
        if mean and std:
            r_images = (r_images - mean) / std
        return r_images
Exemple #20
0
 def collate_fn(batch):
     imgs, verts, faces, points, normals, voxels, Ps, id_strs = zip(*batch)
     imgs = torch.stack(imgs, dim=0)
     if verts[0] is not None and faces[0] is not None:
         # TODO(gkioxari) Meshes should accept tuples
         meshes = Meshes(verts=list(verts), faces=list(faces))
     else:
         meshes = None
     if points[0] is not None and normals[0] is not None:
         points = torch.stack(points, dim=0)
         normals = torch.stack(normals, dim=0)
     else:
         points, normals = None, None
     if voxels[0] is None:
         voxels = None
         Ps = None
     elif voxels[0].dim() == 2:
         # They are voxel coords
         Ps = torch.stack(Ps, dim=0)
     elif voxels[0].dim() == 3:
         # They are actual voxels
         voxels = torch.stack(voxels, dim=0)
     return imgs, meshes, points, normals, voxels, Ps, id_strs
Exemple #21
0
def generate_video_from_obj(obj_path, video_path, renderer):
    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = TexturesVertex(verts_features=verts_rgb_colors)
    wo_textures = TexturesVertex(
        verts_features=torch.ones_like(verts_rgb_colors) * 0.75)

    # Load obj
    #mesh = load_objs_as_meshes([obj_path], device=device)
    mesh = trimesh.load(obj_path)
    mesh.vertices -= mesh.center_mass

    # Set mesh
    mesh_wo_tex = Meshes(torch.FloatTensor(mesh.vertices),
                         torch.FloatTensor(mesh.faces), wo_textures)

    # create VideoWriter
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter(video_path, fourcc, 20.0, (512, 512))

    for i in tqdm(range(90)):
        R, T = look_at_view_transform(vers[0][1].mean(),
                                      0,
                                      i * 4,
                                      device=device)
        images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
        images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0,
                                1.0)[:, :, ::-1] * 255
        #image = np.concatenate([images_w_tex, images_wo_tex], axis=1)
        image = images_wo_tex
        out.write(image.astype('uint8'))
    out.release()
Exemple #22
0
def test_my_renderer():
    vert, normal, st, color, face = load_blender_ply_mesh(
        '../data/meshes/background.ply')
    # V, _ = vert.shape
    # meshes = Meshes(
    #     verts=[vert.to(device)],
    #     faces=[face.to(device)],
    #     textures=TexturesVertex([torch.cat((color.to(device), torch.ones(V, 1, device=device)), dim=1)])
    # )

    # vert = torch.tensor([
    #     [-1, -1, 0],
    #     [1, -1, 0],
    #     [1, 1, 0],
    #     [-1, 1, 0]
    # ], dtype=torch.float) * 30

    x = 80
    y = 93

    vert = torch.tensor([[-x, -y, 0], [x, -y, 0], [x, y, 0], [-x, y, 0]],
                        dtype=torch.float)

    face = torch.LongTensor([[0, 1, 2], [0, 2, 3]])

    meshes = Meshes(verts=[vert.to(device)],
                    faces=[face.to(device)],
                    textures=TexturesVertex([color.to(device)]))

    scene = join_meshes_as_scene(meshes)
    zbuffer_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras,
        raster_settings=raster_settings,
    ),
                                    shader=IdentityShader())

    plot_channels(render(zbuffer_renderer, scene))
Exemple #23
0
def compute_mesh_visibility(vert, tri, bfm_torch, H_img, W_img):
    N, nver, _ = vert.shape
    ntri = tri.shape[0]
    tri = torch.from_numpy(tri).to(vert.device).unsqueeze(0).expand(N, ntri, 3)
    # Transform to NDC
    vert_t = vert + torch.tensor((0.5, 0.5, 0), dtype=torch.float, device=vert.device).view(1, 1, 3)
    vert_t = vert_t * torch.tensor((-1, 1, -1), dtype=torch.float, device=vert.device).view(1, 1, 3)
    vert_t = vert_t * torch.tensor((2. / float(W_img), 2. / float(H_img), 1),
                                   dtype=torch.float, device=vert.device).view(1, 1, 3)
    # vert_t = vert * torch.tensor((-1, 1, -1), dtype=torch.float, device=vert.device).view(1, 1, 3)
    # vert_t = vert_t * torch.tensor((2. / (float(W_img) - 1.), 2. / (float(H_img) - 1.), 1),
    #                                dtype=torch.float, device=vert.device).view(1, 1, 3)
    z_offset = -vert_t[:, :, 2].min() + 10.
    vert_t = vert_t + torch.tensor((0, 0, z_offset.item()), dtype=torch.float, device=vert.device).view(1, 1, 3)
    # Render
    mesh_torch = Meshes(verts=vert_t, faces=tri)
    pix_to_face, zbuf, _, _ = rasterize_meshes(mesh_torch, image_size=H_img, faces_per_pixel=1, cull_backfaces=True)
    assert H_img == W_img
    # Compute visibility
    zbuf = (zbuf.view(N, 1, H_img, W_img) - z_offset) * -1 - 1.
    zbuf = torch.where(torch.eq(pix_to_face.view(N, 1, H_img, W_img), -1), torch.zeros_like(zbuf) - 2e2, zbuf)
    vert_zbuf = sample_per_vert_feat(zbuf, vert, bfm_torch, H_img, W_img).squeeze(1)
    mask = torch.gt(vert[:, :, 2], vert_zbuf)
    return mask, zbuf
Exemple #24
0
def transform_meshes_to_camera_coord_system(meshes, boxes, zranges, Ks,
                                            imsize):
    device = meshes.device
    new_verts, new_faces = [], []
    h, w = imsize
    im_size = torch.tensor([w, h], device=device).view(1, 2)
    assert len(meshes) == len(zranges)
    for i in range(len(meshes)):
        verts, faces = meshes.get_mesh_verts_faces(i)
        if verts.numel() == 0:
            verts, faces = ico_sphere(level=3,
                                      device=device).get_mesh_verts_faces(0)
        assert not torch.isnan(verts).any()
        assert not torch.isnan(faces).any()
        roi = boxes[i].view(1, 4)
        zrange = zranges[i].view(1, 2)
        K = Ks[i].view(1, 3)
        cub3D = shape_utils.box2D_to_cuboid3D(zrange, K, roi, im_size)
        txz, tyz = shape_utils.cuboid3D_to_unitbox3D(cub3D)

        # image to camera coords
        verts[:, 0] = -verts[:, 0]
        verts[:, 1] = -verts[:, 1]

        # transform to destination size
        xz = verts[:, [0, 2]]
        yz = verts[:, [1, 2]]
        pxz = txz.inverse(xz.view(1, -1, 2)).squeeze(0)
        pyz = tyz.inverse(yz.view(1, -1, 2)).squeeze(0)
        verts = torch.stack([pxz[:, 0], pyz[:, 0], pxz[:, 1]],
                            dim=1).to(device, dtype=torch.float32)

        new_verts.append(verts)
        new_faces.append(faces)

    return Meshes(verts=new_verts, faces=new_faces)
def load_mesh(obj_path):
    device = torch.device('cuda')

    vertices, faces, aux = load_obj(obj_path)

    vertices_uvs = aux.verts_uvs[None, ...].to(device)
    faces_uvs = faces.textures_idx[None, ...].to(device)

    texture_maps = aux.texture_images
    texture_maps = list(texture_maps.values())[0]
    texture_maps = texture_maps[None, ...].to(device)

    textures = Textures(
        verts_uvs=vertices_uvs,
        faces_uvs=faces_uvs,
        maps=texture_maps,
    )

    vertices = vertices.to(device)
    faces = faces.verts_idx.to(device)

    mesh = Meshes(verts=[vertices], faces=[faces], textures=textures)

    return mesh
Exemple #26
0
    def loss_rnet(self, dorig, drec, ds_name='train'):

        out_put = self.rhm_train(**drec)
        verts_rhand = out_put.vertices

        rh_mesh = Meshes(verts=verts_rhand, faces=self.rh_f).to(
            self.device).verts_normals_packed().view(-1, 778, 3)
        h2o_gt = dorig['h2o_gt']
        o2h_signed, h2o, _ = point2point_signed(verts_rhand,
                                                dorig['obj_sampled_verts_gt'],
                                                rh_mesh)
        ######### dist loss
        loss_dist_h = 35 * (1. - self.cfg.kl_coef) * torch.mean(
            torch.einsum('ij,j->ij', torch.abs(h2o.abs() - h2o_gt.abs()),
                         self.v_weights2))
        ########## verts loss
        loss_mesh_rec_w = 20 * (1. - self.cfg.kl_coef) * torch.mean(
            torch.einsum(
                'ijk,j->ijk', torch.abs(
                    (dorig['hand_verts_gt'] - verts_rhand)), self.v_weights2))
        ########## edge loss
        loss_edge = 10 * (1. - self.cfg.kl_coef) * self.LossL1(
            self.edges_for(verts_rhand, self.vpe),
            self.edges_for(dorig['hand_verts_gt'], self.vpe))
        ##########

        loss_dict = {
            'loss_edge_r': loss_edge,
            'loss_mesh_rec_r': loss_mesh_rec_w,
            'loss_dist_h_r': loss_dist_h,
        }

        loss_total = torch.stack(list(loss_dict.values())).sum()
        loss_dict['loss_total'] = loss_total

        return loss_total, loss_dict
def make_mesh(verts: torch.tensor,
              faces: np.ndarray,
              detach: bool,
              textures=None) -> Meshes:
    device = torch.device("cuda:0")
    if detach:
        verts = verts.detach()
    # Initialize each vertex to be white in color.
    if textures is None:
        verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)

        textures = Textures(verts_rgb=verts_rgb.to(device))

    faces = torch.tensor(np.int32(faces), dtype=torch.long).cuda()

    # return Meshes(
    #     verts=[verts.to(device)],
    #     faces=[faces.to(device)],
    #     textures=textures
    # )

    return Meshes(verts=verts.to(device),
                  faces=faces.to(device).repeat(verts.shape[0], 1, 1),
                  textures=textures.to(device))
Exemple #28
0
def collate_batched_meshes(batch: List[Dict]):  # pragma: no cover
    """
    Take a list of objects in the form of dictionaries and merge them
    into a single dictionary. This function can be used with a Dataset
    object to create a torch.utils.data.Dataloader which directly
    returns Meshes objects.
    TODO: Add support for textures.

    Args:
        batch: List of dictionaries containing information about objects
            in the dataset.

    Returns:
        collated_dict: Dictionary of collated lists. If batch contains both
            verts and faces, a collated mesh batch is also returned.
    """
    if batch is None or len(batch) == 0:
        return None
    collated_dict = {}
    for k in batch[0].keys():
        collated_dict[k] = [d[k] for d in batch]

    collated_dict["mesh"] = None
    if {"verts", "faces"}.issubset(collated_dict.keys()):

        textures = None
        if "textures" in collated_dict:
            textures = TexturesAtlas(atlas=collated_dict["textures"])

        collated_dict["mesh"] = Meshes(
            verts=collated_dict["verts"],
            faces=collated_dict["faces"],
            textures=textures,
        )

    return collated_dict
Exemple #29
0
def render_images(vertices, faces, texture, faces_uvs, verts_uvs, crop_size,
                  device):
    textures = Textures(maps=texture, faces_uvs=faces_uvs, verts_uvs=verts_uvs)
    meshes = Meshes(vertices, faces, textures)

    R, T = look_at_view_transform(1.0, 0.5, 0, device=device)
    camera = OpenGLPerspectiveCameras(R=R, T=T, fov=20, device=device)

    raster_settings = RasterizationSettings(image_size=crop_size * 2,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=None,
                                            max_faces_per_bin=None)

    lights = PointLights(location=[[0.0, 0.0, -3.0]], device=device)

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=camera, raster_settings=raster_settings),
                            shader=TexturedSoftPhongShader(cameras=camera,
                                                           device=device,
                                                           lights=lights))

    images = renderer(meshes)
    return images
Exemple #30
0
    def test_sample_textures_error(self):
        N = 5
        V = 20
        verts = torch.rand(size=(N, V, 3))
        faces = torch.randint(size=(N, 10, 3), high=V)
        tex = TexturesVertex(verts_features=torch.randn(size=(N, 10, 128)))

        # Verts features have the wrong number of verts
        with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
            Meshes(verts=verts, faces=faces, textures=tex)

        # Verts features have the wrong batch dim
        tex = TexturesVertex(verts_features=torch.randn(size=(1, V, 128)))
        with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
            Meshes(verts=verts, faces=faces, textures=tex)

        meshes = Meshes(verts=verts, faces=faces)
        meshes.textures = tex

        # Cannot use the texture attribute set on meshes for sampling
        # textures if the dimensions don't match
        with self.assertRaisesRegex(ValueError, "do not match the dimensions"):
            meshes.sample_textures(None)