def filter_with(self, point_clouds: PointClouds3D, filter_names: Tuple[str]): """ filter point clouds with all the specified filters, return the reduced point clouds """ if point_clouds.isempty(): return point_clouds filters = [ getattr(self, k) for k in filter_names if torch.is_tensor(getattr(self, k)) ] points_padded, num_points = convert_pointclouds_to_tensor(point_clouds) # point_clouds N, filter 1 matched_tensors = convert_to_tensors_and_broadcast(*filters, points_padded, num_points, device=self.device) filters = matched_tensors[:-2] points = matched_tensors[-2] num_points_per_cloud = matched_tensors[-1] assert (all(x.ndim == 2 for x in filters)) size1 = max([x.shape[1] for x in matched_tensors[:-1]]) filters = [x.expand(-1, size1) for x in filters] # make sure that filters at the padded positions are 0 filters = torch.stack(filters, dim=-1).all(dim=-1) for i, N in enumerate(num_points_per_cloud.cpu().tolist()): filters[i, N:] = False points_list = [ points[b][filters[b]] if points[b].shape[0] > 0 else points[b] for b in range(points.shape[0]) ] if not is_pointclouds(point_clouds): return PointClouds3D(points_list) normals = point_clouds.normals_padded() if normals is not None: normals = normals.expand(points.shape[0], -1, -1) normals = [ normals[b][filters[b]] if normals[b].shape[0] > 0 else normals[b] for b in range(normals.shape[0]) ] features = point_clouds.features_padded() if features is not None: features = features.expand(points.shape[0], -1, -1) features = [ features[b][filters[b]] if features[b].shape[0] > 0 else features[b] for b in range(features.shape[0]) ] return PointClouds3D(points_list, normals=normals, features=features)
def resample_uniformly( pointclouds: Union[Pointclouds, torch.Tensor], neighborhood_size: int = 8, knn=None, normals=None, shrink_ratio: float = 0.5, repulsion_mu: float = 1.0 ) -> Union[Pointclouds, Tuple[torch.Tensor, torch.Tensor]]: """ resample first use wlop to consolidate point clouds to a smaller point clouds (halve the points) then upsample with ear Returns: Pointclouds or padded points and number of points per batch """ import math import frnn points_init, num_points = convert_pointclouds_to_tensor(pointclouds) batch_size = num_points.shape[0] diag = (points_init.view(-1, 3).max(dim=0).values - points_init.view(-1, 3).min(0).values).norm().item() avg_spacing = math.sqrt(diag / points_init.shape[1]) search_radius = min(4 * avg_spacing * neighborhood_size, 0.2) if knn is None: dists, idxs, _, grid = frnn.frnn_grid_points(points_init, points_init, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) # estimate normals if isinstance(pointclouds, torch.Tensor): normals = normals else: normals = pointclouds.normals_padded() if normals is None: normals = estimate_pointcloud_normals( points_init, neighborhood_size=neighborhood_size, disambiguate_directions=False) else: normals = F.normalize(normals, dim=-1) points = points_init wlop_result = wlop(pointclouds, ratio=shrink_ratio, repulsion_mu=repulsion_mu) up_result = upsample(wlop_result, num_points) if is_pointclouds(pointclouds): return up_result return up_result.points_padded(), up_result.num_points_per_cloud()
def upsample( pcl: Union[Pointclouds, torch.Tensor], n_points: Union[int, torch.Tensor], num_points=None, neighborhood_size=16, knn_result=None ) -> Union[Pointclouds, Tuple[torch.Tensor, torch.Tensor]]: """ Iteratively add points to the sparsest region Args: points (tensor of [N, P, 3] or Pointclouds) n_points (tensor of [N] or integer): target number of points per cloud Returns: Pointclouds or (padded_points, num_points) """ def _return_value(points, num_points, return_pcl): if return_pcl: points_list = padded_to_list(points, num_points.tolist()) return pcl.__class__(points_list) else: return points, num_points return_pcl = is_pointclouds(pcl) points, num_points = convert_pointclouds_to_tensor(pcl) knn_k = neighborhood_size if not ((num_points - num_points[0]) == 0).all(): logger_py.warn( "Upsampling operation may encounter unexpected behavior for heterogeneous batches" ) if num_points.sum() == 0: return _return_value(points, num_points, return_pcl) n_remaining = (n_points - num_points).to(dtype=torch.long) if (n_remaining <= 0).all(): return _return_value(points, num_points, return_pcl) if knn_result is None: knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True, return_sorted=True) knn_result = _KNN(dists=knn_result.dists[..., 1:], idx=knn_result.idx[..., 1:], knn=knn_result.knn[..., 1:, :]) while True: if (n_remaining == 0).all(): break # half of the points per batch sparse_pts = points sparse_dists = knn_result.dists sparse_knn = knn_result.knn batch_size, P, _ = sparse_pts.shape max_P = (P // 8) # sparse_knn_normals = frnn.frnn_gather( # normals_init, knn_result.idx, num_points)[:, 1:] # get all mid points mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3 # N,P,K,K,3 mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3) # minimize among all the neighbors min_dist2 = torch.norm(mid_nn_diff, dim=-1) # N,P,K,K min_dist2 = min_dist2.min(dim=-1)[0] # N,P,K father_sparsity, father_nb = min_dist2.max(dim=-1) # N,P # neighborhood to insert sparsity_sorted = father_sparsity.sort(dim=1).indices n_new_points = n_remaining.clone() n_new_points[n_new_points > max_P] = max_P sparsity_sorted = sparsity_sorted[:, -max_P:] new_pts = torch.gather( mid_points[torch.arange(mid_points.shape[0]).view(-1, 1, 1), torch.arange(mid_points.shape[1]).view(1, -1, 1), father_nb.unsqueeze(-1)].squeeze(-2), 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) sparse_selected = torch.gather( sparse_pts, 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) total_pts_list = [] for b, pts_batch in enumerate( padded_to_list(points, num_points.tolist())): total_pts_list.append( torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0)) points = list_to_padded(total_pts_list) n_remaining = n_remaining - n_new_points num_points = n_new_points + num_points knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True) knn_result = _KNN(dists=knn_result.dists[..., 1:], idx=knn_result.idx[..., 1:], knn=knn_result.knn[..., 1:, :]) return _return_value(points, num_points, return_pcl)
def wlop(pointclouds: PointClouds3D, ratio: float = 0.5, neighborhood_size=16, iters=3, repulsion_mu=0.5) -> PointClouds3D: """ Consolidation of Unorganized Point Clouds for Surface Reconstruction Args: pointclouds containing max J points per cloud ratio: downsampling ratio (0, 1] """ P, num_points_P = convert_pointclouds_to_tensor(pointclouds) # (N, 3, 2) bbox = pointclouds.get_bounding_boxes() # (N,) diag = torch.norm(bbox[..., 0] - bbox[..., 1], dim=-1) h = 4 * torch.sqrt(diag / num_points_P.float()) search_radius = min(h * neighborhood_size, 0.2) theta_sigma_inv = 16 / h / h if ratio < 1.0: X0 = farthest_sampling(pointclouds, ratio=ratio) elif ratio == 1.0: X0 = pointclouds.clone() else: raise ValueError('ratio must be less or equal to 1.0') # slightly perturb so that we don't find the same point when searching NN XtoP offset = torch.randn_like(X0.points_packed()) * h * 0.1 X0.offset_(offset) X, num_points_X = convert_pointclouds_to_tensor(X0) def theta(r2): return torch.exp(-r2 * theta_sigma_inv) def eta(r): return -r def deta(r): return torch.ones_like(r) grid = None dists, idxs, _, grid = frnn.frnn_grid_points(P, P, num_points_P, num_points_P, K=neighborhood_size + 1, r=search_radius, grid=grid, return_nn=False) knn_PtoP = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) deltapp = torch.norm(P.unsqueeze(-2) - frnn.frnn_gather(P, knn_PtoP.idx, num_points_P), dim=-1) theta_pp_nn = theta(deltapp**2) # (B, P, K) theta_pp_nn[knn_PtoP.idx < 0] = 0 density_P = torch.sum(theta_pp_nn, dim=-1) + 1 for it in range(iters): # from each x find closest neighbors in pointclouds dists, idxs, _, grid = frnn.frnn_grid_points(X, P, num_points_X, num_points_P, K=neighborhood_size, r=search_radius, grid=grid, return_nn=False) knn_XtoP = _KNN(dists=dists, idx=idxs, knn=None) dists, idxs, _, _ = frnn.frnn_grid_points(X, X, num_points_X, num_points_X, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn_XtoX = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) # LOP local optimal projection nn_XtoP = frnn.frnn_gather(P, knn_XtoP.idx, num_points_P) epsilon = X.unsqueeze(-2) - frnn.frnn_gather(P, knn_XtoP.idx, num_points_P) delta = X.unsqueeze(-2) - frnn.frnn_gather(X, knn_XtoX.idx, num_points_X) # (B, I, I) deltaxx2 = (delta**2).sum(dim=-1) # (B, I, K) deltaxp2 = (epsilon**2).sum(dim=-1) # (B, I, K) alpha = theta(deltaxp2) / eps_denom(epsilon.norm(dim=-1)) # (B, I, K) beta = theta(deltaxx2) * deta(delta.norm(dim=-1)) / eps_denom( delta.norm(dim=-1)) density_X = torch.sum(theta(deltaxx2), dim=-1) + 1 new_alpha = alpha / frnn.frnn_gather( density_P.unsqueeze(-1), knn_XtoP.idx, num_points_P).squeeze(-1) new_alpha[knn_XtoP.idx < 0] = 0 new_beta = density_X.unsqueeze(-1) * beta new_beta[knn_XtoX.idx < 0] = 0 term_data = torch.sum(new_alpha[..., None] * nn_XtoP, dim=-2) / \ eps_denom(torch.sum(new_alpha, dim=-1, keepdim=True)) term_repul = repulsion_mu * torch.sum(new_beta[..., None] * delta, dim=-2) / \ eps_denom(torch.sum(new_beta, dim=-1, keepdim=True)) X = term_data + term_repul if is_pointclouds(X0): return X0.update_padded(X) return X
def resample_uniformly(pointclouds, neighborhood_size=8, iters=1, knn=None, normals=None, reproject=False, repulsion_mu=1.0): """ resample sample_iters times """ import math import frnn points_init, num_points = convert_pointclouds_to_tensor(pointclouds) batch_size = num_points.shape[0] # knn_result = knn_points( # points_init, points_init, num_points, num_points, K=neighborhood_size + 1, return_nn=True) diag = (points_init.view(-1, 3).max(dim=0).values - points_init.view(-1, 3).min(0).values).norm().item() avg_spacing = math.sqrt(diag / points_init.shape[1]) search_radius = min(4 * avg_spacing * neighborhood_size, 0.2) if knn is None: dists, idxs, _, grid = frnn.frnn_grid_points(points_init, points_init, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) # estimate normals if isinstance(pointclouds, torch.Tensor): normals = normals else: normals = pointclouds.normals_padded() if normals is None: normals = estimate_pointcloud_normals( points_init, neighborhood_size=neighborhood_size, disambiguate_directions=False) else: normals = F.normalize(normals, dim=-1) points = points_init for i in range(iters): if reproject: normals = denoise_normals(points, normals, num_points, knn_result=knn) points = project_to_latent_surface(points, normals, max_proj_iters=2, max_est_iter=3) if i > 0 and i % 3 == 0: dists, idxs, _, grid = frnn.frnn_grid_points(points_init, points_init, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) nn = frnn.frnn_gather(points, knn.idx, num_points) pts_diff = points.unsqueeze(-2) - nn dists = torch.sum(pts_diff**2, dim=-1) knn_result = _KNN(dists=dists, idx=knn.idx, knn=nn) deltap = knn_result.dists inv_sigma_spatial = num_points / 2.0 / 16 spatial_w = torch.exp(-deltap * inv_sigma_spatial) spatial_w[knn_result.idx < 0] = 0 # density_w = torch.sum(spatial_w, dim=-1) + 1.0 # 0.5 * derivative of (-r)exp(-r^2*inv) density = frnn.frnn_gather( spatial_w.sum(-1, keepdim=True) + 1.0, knn.idx, num_points) nn_normals = frnn.frnn_gather(normals, knn_result.idx, num_points) pts_diff_proj = pts_diff - (pts_diff * nn_normals).sum( dim=-1, keepdim=True) * nn_normals # move = 0.5 * torch.sum(density*spatial_w[..., None] * pts_diff_proj, dim=-2) / torch.sum(density.view_as(spatial_w)*spatial_w, dim=-1).unsqueeze(-1) # move = F.normalize(move, dim=-1) * move.norm(dim=-1, keepdim=True).clamp_max(2*avg_spacing) move = repulsion_mu * avg_spacing * torch.mean( density * spatial_w[..., None] * F.normalize(pts_diff_proj, dim=-1), dim=-2) points = points + move # then project to latent surface again if is_pointclouds(pointclouds): return pointclouds.update_padded(points) return points