Exemplo n.º 1
0
def upsample_ear(points,
                 normals,
                 n_points: Union[int, torch.Tensor],
                 num_points=None,
                 neighborhood_size=16,
                 repulsion_mu=0.4,
                 edge_sensitivity=1.0):
    """
    Args:
        points (N, P, 3)
        n_points (tensor of [N] or integer): target number of points per cloud

    """
    batch_size = points.shape[0]
    knn_k = neighborhood_size
    if num_points is None:
        num_points = torch.tensor([points.shape[1]] * points.shape[0],
                                  device=points.device,
                                  dtype=torch.long)
    if not ((num_points - num_points[0]) == 0).all():
        logger_py.warn(
            "May encounter unexpected behavior for heterogeneous batches")
    if num_points.sum() == 0:
        return points, num_points

    point_cloud_diag = (points.max(dim=-2)[0] -
                        points.min(dim=-2)[0]).norm(dim=-1)
    inv_sigma_spatial = num_points / point_cloud_diag
    spatial_dist = 16 / inv_sigma_spatial

    knn_result = knn_points(points,
                            points,
                            num_points,
                            num_points,
                            K=knn_k + 1,
                            return_nn=True,
                            return_sorted=True)
    # dists, idxs, nn, grid = frnn.frnn_grid_points(points_proj, points_proj, num_points, num_points, K=self.knn_k + 1,
    #                                               r=torch.sqrt(spatial_dist), return_nn=True)
    # knn_result = _KNN(dists=dists, idx=idxs, knn=nn)
    _knn_idx = knn_result.idx[..., 1:]
    _knn_dists = knn_result.dists[..., 1:]
    _knn_nn = knn_result.knn[..., 1:, :]
    move_clip = knn_result.dists[..., 1].mean().sqrt()

    # 2. LOP projection
    if denoise_normals:
        normals_denoised, weights_p, weights_n = denoise_normals(
            points, normals, num_points, knn_result=knn_result)
        normals = normals_denoised

    # (optional) search knn in the original points
    # e(-(<n, p-pi>)^2/sigma_p)
    weight_lop = torch.exp(-torch.sum(normals[:, :, None, :] *
                                      (points[:, :, None, :] - _knn_nn),
                                      dim=-1)**2 * inv_sigma_spatial)
    weight_lop[_knn_dists > spatial_dist] = 0
    # weight_lop[self._knn_idx < 0] = 0

    # spatial weight
    deltap = _knn_dists
    spatial_w = torch.exp(-deltap * inv_sigma_spatial)
    spatial_w[deltap > spatial_dist] = 0
    # spatial_w[self._knn_idx[..., 1:] < 0] = 0
    density_w = torch.sum(spatial_w, dim=-1) + 1.0
    move_data = torch.sum(
        weight_lop[..., None] * (points[:, :, None, :] - _knn_nn), dim=-2) / \
        eps_denom(torch.sum(weight_lop, dim=-1, keepdim=True))
    move_repul = repulsion_mu * density_w[..., None] * torch.sum(spatial_w[..., None] * (
        knn_result.knn[:, :, 1:, :] - points[:, :, None, :]), dim=-2) / \
        eps_denom(torch.sum(spatial_w, dim=-1, keepdim=True))
    move_repul = F.normalize(move_repul) * move_repul.norm(
        dim=-1, keepdim=True).clamp_max(move_clip)
    move_data = F.normalize(move_data) * move_data.norm(
        dim=-1, keepdim=True).clamp_max(move_clip)
    move = move_data + move_repul
    points = points - move

    n_remaining = n_points - num_points
    while True:
        if (n_remaining == 0).all():
            break
        # half of the points per batch
        sparse_pts = points
        sparse_dists = _knn_dists
        sparse_knn = _knn_nn
        batch_size, P, _ = sparse_pts.shape
        max_P = (P // 10)
        # sparse_knn_normals = frnn.frnn_gather(
        #     normals_init, knn_result.idx, num_points)[:, 1:]
        # get all mid points
        mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3
        # N,P,K,K,3
        mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3)
        # minimize among all the neighbors
        min_dist2 = torch.norm(mid_nn_diff, dim=-1)  # N,P,K,K
        min_dist2 = min_dist2.min(dim=-1)[0]  # N,P,K
        father_sparsity, father_nb = min_dist2.max(dim=-1)  # N,P
        # neighborhood to insert
        sparsity_sorted = father_sparsity.sort(dim=1).indices
        n_new_points = n_remaining.clone()
        n_new_points[n_new_points > max_P] = max_P
        sparsity_sorted = sparsity_sorted[:, -max_P:]
        # N, P//2, 3, sparsest at the end
        new_pts = torch.gather(
            mid_points[torch.arange(mid_points.shape[0]),
                       torch.arange(mid_points.shape[1]), father_nb], 1,
            sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3))
        total_pts_list = []
        for b, pts_batch in enumerate(
                padded_to_list(points, num_points.tolist())):
            total_pts_list.append(
                torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0))

        points_proj = list_to_padded(total_pts_list)
        n_remaining = n_remaining - n_new_points
        num_points = n_new_points + num_points
        knn_result = knn_points(points_proj,
                                points_proj,
                                num_points,
                                num_points,
                                K=knn_k + 1,
                                return_nn=True)
        _knn_idx = knn_result.idx[..., 1:]
        _knn_dists = knn_result.dists[..., 1:]
        _knn_nn = knn_result.knn[..., 1:, :]

    return points_proj, num_points
Exemplo n.º 2
0
 def _return_value(points, num_points, return_pcl):
     if return_pcl:
         points_list = padded_to_list(points, num_points.tolist())
         return pcl.__class__(points_list)
     else:
         return points, num_points
Exemplo n.º 3
0
Arquivo: cloud.py Projeto: yifita/DSS
def upsample(points,
             n_points: Union[int, torch.Tensor],
             num_points=None,
             neighborhood_size=16,
             knn_result=None):
    """
    Args:
        points (N, P, 3)
        n_points (tensor of [N] or integer): target number of points per cloud

    """
    batch_size = points.shape[0]
    knn_k = neighborhood_size
    if num_points is None:
        num_points = torch.tensor([points.shape[1]] * points.shape[0],
                                  device=points.device,
                                  dtype=torch.long)
    if not ((num_points - num_points[0]) == 0).all():
        logger_py.warn(
            "May encounter unexpected behavior for heterogeneous batches")
    if num_points.sum() == 0:
        return points, num_points
    n_remaining = n_points - num_points
    if (n_remaining == 0).all():
        return points, num_points

    point_cloud_diag = (points.max(dim=-2)[0] -
                        points.min(dim=-2)[0]).norm(dim=-1)
    inv_sigma_spatial = num_points / point_cloud_diag
    spatial_dist = 16 / inv_sigma_spatial

    if knn_result is None:
        knn_result = knn_points(points,
                                points,
                                num_points,
                                num_points,
                                K=knn_k + 1,
                                return_nn=True,
                                return_sorted=True)

        knn_result = _KNN(dists=knn_result.dists[..., 1:],
                          idx=knn_result.idx[..., 1:],
                          knn=knn_result.knn[..., 1:, :])

    while True:
        if (n_remaining == 0).all():
            break
        # half of the points per batch
        sparse_pts = points
        sparse_dists = knn_result.dists
        sparse_knn = knn_result.knn
        batch_size, P, _ = sparse_pts.shape
        max_P = (P // 10)
        # sparse_knn_normals = frnn.frnn_gather(
        #     normals_init, knn_result.idx, num_points)[:, 1:]
        # get all mid points
        mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3
        # N,P,K,K,3
        mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3)
        # minimize among all the neighbors
        min_dist2 = torch.norm(mid_nn_diff, dim=-1)  # N,P,K,K
        min_dist2 = min_dist2.min(dim=-1)[0]  # N,P,K
        father_sparsity, father_nb = min_dist2.max(dim=-1)  # N,P
        # neighborhood to insert
        sparsity_sorted = father_sparsity.sort(dim=1).indices
        n_new_points = n_remaining.clone()
        n_new_points[n_new_points > max_P] = max_P
        sparsity_sorted = sparsity_sorted[:, -max_P:]
        new_pts = torch.gather(
            mid_points[torch.arange(mid_points.shape[0]),
                       torch.arange(mid_points.shape[1]), father_nb], 1,
            sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3))

        from DSS.utils.io import save_ply
        sparse_selected = torch.gather(
            sparse_pts, 1,
            sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3))
        # save_ply('tests/outputs/test_uniform_projection/init.ply', sparse_pts.view(-1,3).cpu())
        # save_ply('tests/outputs/test_uniform_projection/sparse.ply', sparse_selected[0].cpu())
        # save_ply('tests/outputs/test_uniform_projection/new_pts.ply', new_pts.view(-1,3).cpu().detach())
        # import pdb; pdb.set_trace()
        total_pts_list = []
        for b, pts_batch in enumerate(
                padded_to_list(points, num_points.tolist())):
            total_pts_list.append(
                torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0))

        points = list_to_padded(total_pts_list)
        n_remaining = n_remaining - n_new_points
        num_points = n_new_points + num_points
        knn_result = knn_points(points,
                                points,
                                num_points,
                                num_points,
                                K=knn_k + 1,
                                return_nn=True)
        knn_result = _KNN(dists=knn_result.dists[..., 1:],
                          idx=knn_result.idx[..., 1:],
                          knn=knn_result.knn[..., 1:, :])

    return points, num_points
Exemplo n.º 4
0
def upsample(
        pcl: Union[Pointclouds, torch.Tensor],
        n_points: Union[int, torch.Tensor],
        num_points=None,
        neighborhood_size=16,
        knn_result=None
) -> Union[Pointclouds, Tuple[torch.Tensor, torch.Tensor]]:
    """
    Iteratively add points to the sparsest region
    Args:
        points (tensor of [N, P, 3] or Pointclouds)
        n_points (tensor of [N] or integer): target number of points per cloud
    Returns:
        Pointclouds or (padded_points, num_points)
    """
    def _return_value(points, num_points, return_pcl):
        if return_pcl:
            points_list = padded_to_list(points, num_points.tolist())
            return pcl.__class__(points_list)
        else:
            return points, num_points

    return_pcl = is_pointclouds(pcl)
    points, num_points = convert_pointclouds_to_tensor(pcl)

    knn_k = neighborhood_size

    if not ((num_points - num_points[0]) == 0).all():
        logger_py.warn(
            "Upsampling operation may encounter unexpected behavior for heterogeneous batches"
        )

    if num_points.sum() == 0:
        return _return_value(points, num_points, return_pcl)

    n_remaining = (n_points - num_points).to(dtype=torch.long)
    if (n_remaining <= 0).all():
        return _return_value(points, num_points, return_pcl)

    if knn_result is None:
        knn_result = knn_points(points,
                                points,
                                num_points,
                                num_points,
                                K=knn_k + 1,
                                return_nn=True,
                                return_sorted=True)

        knn_result = _KNN(dists=knn_result.dists[..., 1:],
                          idx=knn_result.idx[..., 1:],
                          knn=knn_result.knn[..., 1:, :])

    while True:
        if (n_remaining == 0).all():
            break
        # half of the points per batch
        sparse_pts = points
        sparse_dists = knn_result.dists
        sparse_knn = knn_result.knn
        batch_size, P, _ = sparse_pts.shape
        max_P = (P // 8)
        # sparse_knn_normals = frnn.frnn_gather(
        #     normals_init, knn_result.idx, num_points)[:, 1:]
        # get all mid points
        mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3
        # N,P,K,K,3
        mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3)
        # minimize among all the neighbors
        min_dist2 = torch.norm(mid_nn_diff, dim=-1)  # N,P,K,K
        min_dist2 = min_dist2.min(dim=-1)[0]  # N,P,K
        father_sparsity, father_nb = min_dist2.max(dim=-1)  # N,P
        # neighborhood to insert
        sparsity_sorted = father_sparsity.sort(dim=1).indices
        n_new_points = n_remaining.clone()
        n_new_points[n_new_points > max_P] = max_P
        sparsity_sorted = sparsity_sorted[:, -max_P:]
        new_pts = torch.gather(
            mid_points[torch.arange(mid_points.shape[0]).view(-1, 1, 1),
                       torch.arange(mid_points.shape[1]).view(1, -1, 1),
                       father_nb.unsqueeze(-1)].squeeze(-2), 1,
            sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3))

        sparse_selected = torch.gather(
            sparse_pts, 1,
            sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3))

        total_pts_list = []
        for b, pts_batch in enumerate(
                padded_to_list(points, num_points.tolist())):
            total_pts_list.append(
                torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0))

        points = list_to_padded(total_pts_list)
        n_remaining = n_remaining - n_new_points
        num_points = n_new_points + num_points
        knn_result = knn_points(points,
                                points,
                                num_points,
                                num_points,
                                K=knn_k + 1,
                                return_nn=True)
        knn_result = _KNN(dists=knn_result.dists[..., 1:],
                          idx=knn_result.idx[..., 1:],
                          knn=knn_result.knn[..., 1:, :])

    return _return_value(points, num_points, return_pcl)
Exemplo n.º 5
0
    def test_dataset(self):
        # 1. rerender input point clouds / meshes using the saved camera_mat
        #    compare mask image with saved mask image
        # 2. backproject masked points to space with dense depth map,
        #    fuse all views and save
        batch_size = 1
        device = torch.device('cuda:0')

        data_dir = 'data/synthetic/cube_mesh'
        output_dir = os.path.join('tests', 'outputs', 'test_data')
        if not os.path.isdir(output_dir):
            os.makedirs(output_dir)

        # dataset
        dataset = MVRDataset(data_dir=data_dir,
                             load_dense_depth=True,
                             mode="train")
        data_loader = torch.utils.data.DataLoader(dataset,
                                                  batch_size=batch_size,
                                                  num_workers=0,
                                                  shuffle=False)
        meshes = load_objs_as_meshes([os.path.join(data_dir,
                                                   'mesh.obj')]).to(device)
        cams = dataset.get_cameras().to(device)
        image_size = imageio.imread(dataset.image_files[0]).shape[0]

        # initialize rasterizer, we check mask pngs only, so no need to create lights and shaders etc
        raster_settings = RasterizationSettings(
            image_size=image_size,
            blur_radius=0.0,
            faces_per_pixel=5,
            bin_size=
            None,  # this setting controls whether naive or coarse-to-fine rasterization is used
            max_faces_per_bin=None  # this setting is for coarse rasterization
        )
        rasterizer = MeshRasterizer(cameras=None,
                                    raster_settings=raster_settings)

        # render with loaded cameras positions and training tranformation functions
        pixel_world_all = []
        for idx, data in enumerate(data_loader):
            # get datas
            img = data.get('img.rgb').to(device)
            assert (img.min() >= 0 and img.max() <= 1
                    ), "Image must be a floating number between 0 and 1."
            mask_gt = data.get('img.mask').to(device).permute(0, 2, 3, 1)

            camera_mat = data['camera_mat'].to(device)

            cams.R, cams.T = decompose_to_R_and_t(camera_mat)
            cams._N = cams.R.shape[0]
            cams.to(device)
            self.assertTrue(
                torch.equal(cams.get_world_to_view_transform().get_matrix(),
                            camera_mat))

            # transform to view and rerender with non-rotated camera
            verts_padded = transform_to_camera_space(meshes.verts_padded(),
                                                     cams)
            meshes_in_view = meshes.offset_verts(
                -meshes.verts_packed() + padded_to_packed(
                    verts_padded, meshes.mesh_to_verts_packed_first_idx(),
                    meshes.verts_packed().shape[0]))

            fragments = rasterizer(meshes_in_view,
                                   cameras=dataset.get_cameras().to(device))

            # compare mask
            mask = fragments.pix_to_face[..., :1] >= 0
            imageio.imwrite(os.path.join(output_dir, "mask_%06d.png" % idx),
                            mask[0, ...].cpu().to(dtype=torch.uint8) * 255)
            # allow 5 pixels difference
            self.assertTrue(torch.sum(mask_gt != mask) < 5)

            # check dense maps
            # backproject points to the world pixel range (-1, 1)
            pixels = arange_pixels((image_size, image_size),
                                   batch_size)[1].to(device)

            depth_img = data.get('img.depth').to(device)
            # get the depth and mask at the sampled pixel position
            depth_gt = get_tensor_values(depth_img,
                                         pixels,
                                         squeeze_channel_dim=True)
            mask_gt = get_tensor_values(mask.permute(0, 3, 1, 2).float(),
                                        pixels,
                                        squeeze_channel_dim=True).bool()
            # get pixels and depth inside the masked area
            pixels_packed = pixels[mask_gt]
            depth_gt_packed = depth_gt[mask_gt]
            first_idx = torch.zeros((pixels.shape[0], ),
                                    device=device,
                                    dtype=torch.long)
            num_pts_in_mask = mask_gt.sum(dim=1)
            first_idx[1:] = num_pts_in_mask.cumsum(dim=0)[:-1]
            pixels_padded = packed_to_padded(pixels_packed, first_idx,
                                             num_pts_in_mask.max().item())
            depth_gt_padded = packed_to_padded(depth_gt_packed, first_idx,
                                               num_pts_in_mask.max().item())
            # backproject to world coordinates
            # contains nan and infinite values due to depth_gt_padded containing 0.0
            pixel_world_padded = transform_to_world(pixels_padded,
                                                    depth_gt_padded[..., None],
                                                    cams)
            # transform back to list, containing no padded values
            split_size = num_pts_in_mask[..., None].repeat(1, 2)
            split_size[:, 1] = 3
            pixel_world_list = padded_to_list(pixel_world_padded, split_size)
            pixel_world_all.extend(pixel_world_list)

            idx += 1
            if idx >= 10:
                break

        pixel_world_all = torch.cat(pixel_world_all, dim=0)
        mesh = trimesh.Trimesh(vertices=pixel_world_all.cpu(),
                               faces=None,
                               process=False)
        mesh.export(os.path.join(output_dir, 'pixel_to_world.ply'))