def denoise_normals(points, normals, num_points, sharpness_sigma=30, knn_result=None, neighborhood_size=16): """ Weights exp(-(1-<n, n_i>)/(1-cos(sharpness_sigma))), for i in a local neighborhood """ points, num_points = convert_pointclouds_to_tensor(points) normals = F.normalize(normals, dim=-1) if knn_result is None: diag = (points.max(dim=-2)[0] - points.min(dim=-2)[0]).norm(dim=-1) avg_spacing = math.sqrt(diag / points.shape[1]) search_radius = min(4 * avg_spacing * neighborhood_size, 0.2) dists, idxs, _, grid = frnn.frnn_grid_points(points, points, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=True) knn_result = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) if knn_result.knn is None: knn = frnn.frnn_gather(points, knn_result.idx, num_points) knn_result = _KNN(idx=knn_result.idx, knn=knn, dists=knn_result.dists) # filter out knn_normals = frnn.frnn_gather(normals, knn_result.idx, num_points) # knn_normals = frnn.frnn_gather(normals, self._knn_idx, num_points) weights_n = ( (1 - torch.sum(knn_normals * normals[:, :, None, :], dim=-1)) / sharpness_sigma)**2 weights_n = torch.exp(-weights_n) inv_sigma_spatial = num_points / 2.0 spatial_dist = 16 / inv_sigma_spatial deltap = knn - points[:, :, None, :] deltap = torch.sum(deltap * deltap, dim=-1) weights_p = torch.exp(-deltap * inv_sigma_spatial) weights_p[deltap > spatial_dist] = 0 weights = weights_p * weights_n # weights[self._knn_idx < 0] = 0 normals_denoised = torch.sum(knn_normals * weights[:, :, :, None], dim=-2) / \ eps_denom(torch.sum(weights, dim=-1, keepdim=True)) normals_denoised = F.normalize(normals_denoised, dim=-1) return normals_denoised.view_as(normals)
def get_iso_bilateral_weights(points, normals, iso_points, iso_normals): """ find closest iso point, compute bilateral weight """ search_radius = 0.1 dim = iso_points.view(-1, 3).norm(dim=-1).max() * 2 avg_spacing = iso_points.shape[1] / dim / 16 dists, idxs, nn, _ = frnn.frnn_grid_points(points, iso_points, K=1, return_nn=True, grid=None, r=search_radius) iso_normals = F.normalize(iso_normals, dim=-1) iso_normals = frnn.frnn_gather(iso_normals, idxs).view(1, -1, 3) dists = torch.sum((nn.view_as(points) - points) * iso_normals, dim=-1)**2 # dists[idxs<0] = 10 * search_radius **2 # dists = dists.squeeze(-1) spatial_w = torch.exp(-dists * avg_spacing) normals = F.normalize(normals, dim=-1) normal_w = torch.exp(-((1 - torch.sum(normals * iso_normals, dim=-1)) / (1 - np.cos(np.deg2rad(60))))**2) weight = spatial_w * normal_w weight[idxs.view_as(weight) < 0] = 0 if not valid_value_mask(weight).all(): print("Illegal weights") breakpoint() return weight
def get_laplacian_weights(points, normals, iso_points, iso_normals, neighborhood_size=8): """ compute distance based on iso local neighborhood """ with autograd.no_grad(): P, _ = points.view(-1, 3).shape search_radius = 0.15 dim = iso_points.view(-1, 3).norm(dim=-1).max() * 2 avg_spacing = iso_points.shape[1] / dim / 16 dists, idxs, nn, _ = frnn.frnn_grid_points(points, iso_points, K=1, return_nn=True, grid=None, r=search_radius) nn_normals = frnn.frnn_gather(iso_normals, idxs) dists = torch.sum((points - nn.view_as(points)) * (normals + nn_normals.view_as(normals)), dim=-1) dists = dists * dists spatial_w = torch.exp(-dists * avg_spacing) spatial_w[idxs.view_as(spatial_w) < 0] = 0 return spatial_w.view(points.shape[:-1])
def get_heat_kernel_weights(points, normals, iso_points, iso_normals, neighborhood_size=8, sigma_p=0.4, sigma_n=0.7): """ find closest k points, compute point2face distance, and normal distance """ P, _ = points.view(-1, 3).shape search_radius = 0.15 dim = iso_points.view(-1, 3).norm(dim=-1).max() avg_spacing = iso_points.shape[1] / (dim * 2**2) / 16 dists, idxs, nn, _ = frnn.frnn_grid_points(points, iso_points, K=neighborhood_size, return_nn=True, grid=None, r=search_radius) # features with autograd.no_grad(): # normalize just to be sure iso_normals = F.normalize(iso_normals, dim=-1, eps=1e-15) normals = F.normalize(normals, dim=-1, eps=1e-15) # features are composite of points and normals features = torch.cat([points / sigma_p, normals / sigma_n], dim=-1) features_iso = torch.cat([iso_points / sigma_p, iso_normals / sigma_n], dim=-1) # compute kernels (N,P,K) k(x,xi), xi \in Neighbor(x) knn_idx = idxs # features_nb = knn_gather(features_iso, knn_idx) features_nb = frnn.frnn_gather(features_iso, knn_idx) # (N,P,K,D) features_diff = features.unsqueeze(2) - features_nb features_dist = torch.sum(features_diff**2, dim=-1) kernels = torch.exp(-features_dist) kernels[knn_idx < 0] = 0 # N,P,K,K,D features_diff_ij = features_nb[:, :, :, None, :] - features_nb[:, :, None, :, :] features_dist_ij = torch.sum(features_diff_ij**2, dim=-1) kernel_matrices = torch.exp(-features_dist_ij) kernel_matrices[knn_idx < 0] = 0 kernel_matrices[knn_idx.unsqueeze(-2).expand_as(kernel_matrices) < 0] kernel_matrices_inv = pinverse(kernel_matrices) weight = kernels.unsqueeze( -2) @ kernel_matrices_inv @ kernels.unsqueeze(-1) weight.clamp_max_(1.0) return weight.view(points.shape[:-1])
def wlop(pointclouds: PointClouds3D, ratio: float = 0.5, neighborhood_size=16, iters=3, repulsion_mu=0.5) -> PointClouds3D: """ Consolidation of Unorganized Point Clouds for Surface Reconstruction Args: pointclouds containing max J points per cloud ratio: downsampling ratio (0, 1] """ P, num_points_P = convert_pointclouds_to_tensor(pointclouds) # (N, 3, 2) bbox = pointclouds.get_bounding_boxes() # (N,) diag = torch.norm(bbox[..., 0] - bbox[..., 1], dim=-1) h = 4 * torch.sqrt(diag / num_points_P.float()) search_radius = min(h * neighborhood_size, 0.2) theta_sigma_inv = 16 / h / h if ratio < 1.0: X0 = farthest_sampling(pointclouds, ratio=ratio) elif ratio == 1.0: X0 = pointclouds.clone() else: raise ValueError('ratio must be less or equal to 1.0') # slightly perturb so that we don't find the same point when searching NN XtoP offset = torch.randn_like(X0.points_packed()) * h * 0.1 X0.offset_(offset) X, num_points_X = convert_pointclouds_to_tensor(X0) def theta(r2): return torch.exp(-r2 * theta_sigma_inv) def eta(r): return -r def deta(r): return torch.ones_like(r) grid = None dists, idxs, _, grid = frnn.frnn_grid_points(P, P, num_points_P, num_points_P, K=neighborhood_size + 1, r=search_radius, grid=grid, return_nn=False) knn_PtoP = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) deltapp = torch.norm(P.unsqueeze(-2) - frnn.frnn_gather(P, knn_PtoP.idx, num_points_P), dim=-1) theta_pp_nn = theta(deltapp**2) # (B, P, K) theta_pp_nn[knn_PtoP.idx < 0] = 0 density_P = torch.sum(theta_pp_nn, dim=-1) + 1 for it in range(iters): # from each x find closest neighbors in pointclouds dists, idxs, _, grid = frnn.frnn_grid_points(X, P, num_points_X, num_points_P, K=neighborhood_size, r=search_radius, grid=grid, return_nn=False) knn_XtoP = _KNN(dists=dists, idx=idxs, knn=None) dists, idxs, _, _ = frnn.frnn_grid_points(X, X, num_points_X, num_points_X, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn_XtoX = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) # LOP local optimal projection nn_XtoP = frnn.frnn_gather(P, knn_XtoP.idx, num_points_P) epsilon = X.unsqueeze(-2) - frnn.frnn_gather(P, knn_XtoP.idx, num_points_P) delta = X.unsqueeze(-2) - frnn.frnn_gather(X, knn_XtoX.idx, num_points_X) # (B, I, I) deltaxx2 = (delta**2).sum(dim=-1) # (B, I, K) deltaxp2 = (epsilon**2).sum(dim=-1) # (B, I, K) alpha = theta(deltaxp2) / eps_denom(epsilon.norm(dim=-1)) # (B, I, K) beta = theta(deltaxx2) * deta(delta.norm(dim=-1)) / eps_denom( delta.norm(dim=-1)) density_X = torch.sum(theta(deltaxx2), dim=-1) + 1 new_alpha = alpha / frnn.frnn_gather( density_P.unsqueeze(-1), knn_XtoP.idx, num_points_P).squeeze(-1) new_alpha[knn_XtoP.idx < 0] = 0 new_beta = density_X.unsqueeze(-1) * beta new_beta[knn_XtoX.idx < 0] = 0 term_data = torch.sum(new_alpha[..., None] * nn_XtoP, dim=-2) / \ eps_denom(torch.sum(new_alpha, dim=-1, keepdim=True)) term_repul = repulsion_mu * torch.sum(new_beta[..., None] * delta, dim=-2) / \ eps_denom(torch.sum(new_beta, dim=-1, keepdim=True)) X = term_data + term_repul if is_pointclouds(X0): return X0.update_padded(X) return X
def project_to_latent_surface(points, normals, sharpness_angle=60, neighborhood_size=31, max_proj_iters=10, max_est_iter=5): """ RIMLS """ points, num_points = convert_pointclouds_to_tensor(points) normals = F.normalize(normals, dim=-1) sharpness_sigma = 1 - math.cos(sharpness_angle / 180 * math.pi) diag = (points.max(dim=-2)[0] - points.min(dim=-2)[0]).norm(dim=-1) avg_spacing = math.sqrt(diag / points.shape[1]) search_radius = min(16 * avg_spacing * neighborhood_size, 0.2) dists, idxs, _, grid = frnn.frnn_grid_points(points, points, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn_result = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) # knn_normals = knn_gather(normals, knn_result.idx, num_points) knn_normals = frnn.frnn_gather(normals, knn_result.idx, num_points) inv_sigma_spatial = 1 / knn_result.dists[..., 0] / 16 # spatial_dist = 16 / inv_sigma_spatial not_converged = torch.full(points.shape[:-1], True, device=points.device, dtype=torch.bool) itt = 0 it = 0 while True: knn_pts = frnn.frnn_gather(points, knn_result.idx, num_points) pts_diff = points[not_converged].unsqueeze(-2) - knn_pts[not_converged] fx = torch.sum(pts_diff * knn_normals[not_converged], dim=-1) not_converged_1 = torch.full(fx.shape[:-1], True, dtype=torch.bool, device=fx.device) knn_normals_1 = knn_normals[not_converged] inv_sigma_spatial_1 = inv_sigma_spatial[not_converged] f = points.new_zeros(points[not_converged].shape[:-1], device=points.device) grad_f = points.new_zeros(points[not_converged].shape, device=points.device) alpha = torch.ones_like(fx) for itt in range(max_est_iter): if itt > 0: alpha_old = alpha weights_n = ( (knn_normals_1[not_converged_1] - grad_f[not_converged_1].unsqueeze(-2)).norm(dim=-1) / 0.5)**2 weights_n = torch.exp(-weights_n) weights_p = torch.exp(-( (fx[not_converged_1] - f[not_converged_1].unsqueeze(-1))**2 * inv_sigma_spatial_1[not_converged_1].unsqueeze(-1) / 4)) alpha[not_converged_1] = weights_n * weights_p not_converged_1[not_converged_1] = ( alpha[not_converged_1] - alpha_old[not_converged_1]).abs().max(dim=-1)[0] < 1e-4 if not not_converged_1.any(): break deltap = torch.sum(pts_diff[not_converged_1] * pts_diff[not_converged_1], dim=-1) phi = torch.exp(-deltap * inv_sigma_spatial_1[not_converged_1].unsqueeze(-1)) # phi[deltap > spatial_dist] = 0 dphi = inv_sigma_spatial_1[not_converged_1].unsqueeze(-1) * phi weights = phi * alpha[not_converged_1] grad_weights = 2 * pts_diff * (dphi * weights).unsqueeze(-1) sum_grad_weights = torch.sum(grad_weights, dim=-2) sum_weight = torch.sum(weights, dim=-1) sum_f = torch.sum(fx[not_converged_1] * weights, dim=-1) sum_Gf = torch.sum(grad_weights * fx[not_converged_1].unsqueeze(-1), dim=-2) sum_N = torch.sum(weights.unsqueeze(-1) * knn_normals_1[not_converged_1], dim=-2) tmp_f = sum_f / eps_denom(sum_weight) tmp_grad_f = (sum_Gf - tmp_f.unsqueeze(-1) * sum_grad_weights + sum_N) / eps_denom(sum_weight).unsqueeze(-1) grad_f[not_converged_1] = tmp_grad_f f[not_converged_1] = tmp_f move = f.unsqueeze(-1) * grad_f points[not_converged] = points[not_converged] - move mask = move.norm(dim=-1) > 5e-4 not_converged[not_converged] = mask it = it + 1 if not not_converged.any() or it >= max_proj_iters: break return points
def resample_uniformly(pointclouds, neighborhood_size=8, iters=1, knn=None, normals=None, reproject=False, repulsion_mu=1.0): """ resample sample_iters times """ import math import frnn points_init, num_points = convert_pointclouds_to_tensor(pointclouds) batch_size = num_points.shape[0] # knn_result = knn_points( # points_init, points_init, num_points, num_points, K=neighborhood_size + 1, return_nn=True) diag = (points_init.view(-1, 3).max(dim=0).values - points_init.view(-1, 3).min(0).values).norm().item() avg_spacing = math.sqrt(diag / points_init.shape[1]) search_radius = min(4 * avg_spacing * neighborhood_size, 0.2) if knn is None: dists, idxs, _, grid = frnn.frnn_grid_points(points_init, points_init, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) # estimate normals if isinstance(pointclouds, torch.Tensor): normals = normals else: normals = pointclouds.normals_padded() if normals is None: normals = estimate_pointcloud_normals( points_init, neighborhood_size=neighborhood_size, disambiguate_directions=False) else: normals = F.normalize(normals, dim=-1) points = points_init for i in range(iters): if reproject: normals = denoise_normals(points, normals, num_points, knn_result=knn) points = project_to_latent_surface(points, normals, max_proj_iters=2, max_est_iter=3) if i > 0 and i % 3 == 0: dists, idxs, _, grid = frnn.frnn_grid_points(points_init, points_init, num_points, num_points, K=neighborhood_size + 1, r=search_radius, grid=None, return_nn=False) knn = _KNN(dists=dists[..., 1:], idx=idxs[..., 1:], knn=None) nn = frnn.frnn_gather(points, knn.idx, num_points) pts_diff = points.unsqueeze(-2) - nn dists = torch.sum(pts_diff**2, dim=-1) knn_result = _KNN(dists=dists, idx=knn.idx, knn=nn) deltap = knn_result.dists inv_sigma_spatial = num_points / 2.0 / 16 spatial_w = torch.exp(-deltap * inv_sigma_spatial) spatial_w[knn_result.idx < 0] = 0 # density_w = torch.sum(spatial_w, dim=-1) + 1.0 # 0.5 * derivative of (-r)exp(-r^2*inv) density = frnn.frnn_gather( spatial_w.sum(-1, keepdim=True) + 1.0, knn.idx, num_points) nn_normals = frnn.frnn_gather(normals, knn_result.idx, num_points) pts_diff_proj = pts_diff - (pts_diff * nn_normals).sum( dim=-1, keepdim=True) * nn_normals # move = 0.5 * torch.sum(density*spatial_w[..., None] * pts_diff_proj, dim=-2) / torch.sum(density.view_as(spatial_w)*spatial_w, dim=-1).unsqueeze(-1) # move = F.normalize(move, dim=-1) * move.norm(dim=-1, keepdim=True).clamp_max(2*avg_spacing) move = repulsion_mu * avg_spacing * torch.mean( density * spatial_w[..., None] * F.normalize(pts_diff_proj, dim=-1), dim=-2) points = points + move # then project to latent surface again if is_pointclouds(pointclouds): return pointclouds.update_padded(points) return points