def update_normals_(self, others_packed): """ Update the point clouds normals. In place operation. Args: offsets_packed: A Tensor of the same shape as self.points_packed giving offsets to be added to all points. Returns: self. """ if self.isempty(): assert (others_packed.nelement( ) == 0), "Cannot update empty pointclouds with non-empty features" return self normals_packed = self.normals_packed() if normals_packed is not None: if others_packed.shape != normals_packed.shape: raise ValueError( "update normals must have dimension (all_p, 3).") if normals_packed is None: self._normals_packed = others_packed else: normals_packed += (-normals_packed + others_packed) new_normals_list = list( self._normals_packed.split(self.num_points_per_cloud().tolist(), 0)) # Note that since _compute_packed() has been executed, points_list # cannot be None even if not provided during construction. self._normals_list = new_normals_list self._normals_padded = list_to_padded(new_normals_list) return self
def update_features_(self, others_packed): """ Update the point clouds features. In place operation. Args: offsets_packed: A Tensor of the same shape as self.points_packed giving offsets to be added to all points. Returns: self. """ if self.isempty(): assert (others_packed.nelement( ) == 0), "Cannot update empty pointclouds with non-empty features" return self features_packed = self.features_packed() if features_packed is None or features_packed.shape != others_packed.shape: self._features_packed = others_packed self._C = others_packed.shape[-1] else: features_packed += (-features_packed + others_packed) new_features_list = list( self._features_packed.split(self.num_points_per_cloud().tolist(), 0)) # Note that since _compute_packed() has been executed, points_list # cannot be None even if not provided during construction. self._features_list = new_features_list self._features_padded = list_to_padded(new_features_list) return self
def renderBatch(self, Rs, ts, ids=[]): if (type(Rs) is list): batch_R = torch.tensor(np.stack(Rs), device=self.device, dtype=torch.float32) else: batch_R = Rs if (type(ts) is list): batch_T = torch.tensor(np.stack(ts), device=self.device, dtype=torch.float32) # Bx3 else: batch_T = ts if (len(ids) == 0): # No ids specified, assuming one object only ids = [0 for r in Rs] # Load meshes based on object ids batch_verts_rgb = list_to_padded([self.textures[i] for i in ids]) batch_textures = TexturesVertex( verts_features=batch_verts_rgb.to(self.device)) batch_verts = [self.vertices[i].to(self.device) for i in ids] batch_faces = [self.faces[i].to(self.device) for i in ids] mesh = Meshes(verts=batch_verts, faces=batch_faces, textures=batch_textures) images = self.renderer(meshes_world=mesh, R=batch_R, T=batch_T) if (self.method == "soft-silhouette"): images = images[..., 3] elif (self.method == "hard-silhouette"): images = images[..., 3] elif (self.method == "hard-phong"): images = images[..., :3] elif (self.method == "soft-phong"): images = images[..., :3] elif (self.method == "soft-depth"): images = images #[..., 0] #torch.mean(images, dim=3) elif (self.method == "hard-depth"): images = images #torch.mean(images, dim=3) elif (self.method == "blurry-depth"): images = torch.mean(images, dim=3) return images
def upsample_ear(points, normals, n_points: Union[int, torch.Tensor], num_points=None, neighborhood_size=16, repulsion_mu=0.4, edge_sensitivity=1.0): """ Args: points (N, P, 3) n_points (tensor of [N] or integer): target number of points per cloud """ batch_size = points.shape[0] knn_k = neighborhood_size if num_points is None: num_points = torch.tensor([points.shape[1]] * points.shape[0], device=points.device, dtype=torch.long) if not ((num_points - num_points[0]) == 0).all(): logger_py.warn( "May encounter unexpected behavior for heterogeneous batches") if num_points.sum() == 0: return points, num_points point_cloud_diag = (points.max(dim=-2)[0] - points.min(dim=-2)[0]).norm(dim=-1) inv_sigma_spatial = num_points / point_cloud_diag spatial_dist = 16 / inv_sigma_spatial knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True, return_sorted=True) # dists, idxs, nn, grid = frnn.frnn_grid_points(points_proj, points_proj, num_points, num_points, K=self.knn_k + 1, # r=torch.sqrt(spatial_dist), return_nn=True) # knn_result = _KNN(dists=dists, idx=idxs, knn=nn) _knn_idx = knn_result.idx[..., 1:] _knn_dists = knn_result.dists[..., 1:] _knn_nn = knn_result.knn[..., 1:, :] move_clip = knn_result.dists[..., 1].mean().sqrt() # 2. LOP projection if denoise_normals: normals_denoised, weights_p, weights_n = denoise_normals( points, normals, num_points, knn_result=knn_result) normals = normals_denoised # (optional) search knn in the original points # e(-(<n, p-pi>)^2/sigma_p) weight_lop = torch.exp(-torch.sum(normals[:, :, None, :] * (points[:, :, None, :] - _knn_nn), dim=-1)**2 * inv_sigma_spatial) weight_lop[_knn_dists > spatial_dist] = 0 # weight_lop[self._knn_idx < 0] = 0 # spatial weight deltap = _knn_dists spatial_w = torch.exp(-deltap * inv_sigma_spatial) spatial_w[deltap > spatial_dist] = 0 # spatial_w[self._knn_idx[..., 1:] < 0] = 0 density_w = torch.sum(spatial_w, dim=-1) + 1.0 move_data = torch.sum( weight_lop[..., None] * (points[:, :, None, :] - _knn_nn), dim=-2) / \ eps_denom(torch.sum(weight_lop, dim=-1, keepdim=True)) move_repul = repulsion_mu * density_w[..., None] * torch.sum(spatial_w[..., None] * ( knn_result.knn[:, :, 1:, :] - points[:, :, None, :]), dim=-2) / \ eps_denom(torch.sum(spatial_w, dim=-1, keepdim=True)) move_repul = F.normalize(move_repul) * move_repul.norm( dim=-1, keepdim=True).clamp_max(move_clip) move_data = F.normalize(move_data) * move_data.norm( dim=-1, keepdim=True).clamp_max(move_clip) move = move_data + move_repul points = points - move n_remaining = n_points - num_points while True: if (n_remaining == 0).all(): break # half of the points per batch sparse_pts = points sparse_dists = _knn_dists sparse_knn = _knn_nn batch_size, P, _ = sparse_pts.shape max_P = (P // 10) # sparse_knn_normals = frnn.frnn_gather( # normals_init, knn_result.idx, num_points)[:, 1:] # get all mid points mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3 # N,P,K,K,3 mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3) # minimize among all the neighbors min_dist2 = torch.norm(mid_nn_diff, dim=-1) # N,P,K,K min_dist2 = min_dist2.min(dim=-1)[0] # N,P,K father_sparsity, father_nb = min_dist2.max(dim=-1) # N,P # neighborhood to insert sparsity_sorted = father_sparsity.sort(dim=1).indices n_new_points = n_remaining.clone() n_new_points[n_new_points > max_P] = max_P sparsity_sorted = sparsity_sorted[:, -max_P:] # N, P//2, 3, sparsest at the end new_pts = torch.gather( mid_points[torch.arange(mid_points.shape[0]), torch.arange(mid_points.shape[1]), father_nb], 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) total_pts_list = [] for b, pts_batch in enumerate( padded_to_list(points, num_points.tolist())): total_pts_list.append( torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0)) points_proj = list_to_padded(total_pts_list) n_remaining = n_remaining - n_new_points num_points = n_new_points + num_points knn_result = knn_points(points_proj, points_proj, num_points, num_points, K=knn_k + 1, return_nn=True) _knn_idx = knn_result.idx[..., 1:] _knn_dists = knn_result.dists[..., 1:] _knn_nn = knn_result.knn[..., 1:, :] return points_proj, num_points
def upsample( pcl: Union[Pointclouds, torch.Tensor], n_points: Union[int, torch.Tensor], num_points=None, neighborhood_size=16, knn_result=None ) -> Union[Pointclouds, Tuple[torch.Tensor, torch.Tensor]]: """ Iteratively add points to the sparsest region Args: points (tensor of [N, P, 3] or Pointclouds) n_points (tensor of [N] or integer): target number of points per cloud Returns: Pointclouds or (padded_points, num_points) """ def _return_value(points, num_points, return_pcl): if return_pcl: points_list = padded_to_list(points, num_points.tolist()) return pcl.__class__(points_list) else: return points, num_points return_pcl = is_pointclouds(pcl) points, num_points = convert_pointclouds_to_tensor(pcl) knn_k = neighborhood_size if not ((num_points - num_points[0]) == 0).all(): logger_py.warn( "Upsampling operation may encounter unexpected behavior for heterogeneous batches" ) if num_points.sum() == 0: return _return_value(points, num_points, return_pcl) n_remaining = (n_points - num_points).to(dtype=torch.long) if (n_remaining <= 0).all(): return _return_value(points, num_points, return_pcl) if knn_result is None: knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True, return_sorted=True) knn_result = _KNN(dists=knn_result.dists[..., 1:], idx=knn_result.idx[..., 1:], knn=knn_result.knn[..., 1:, :]) while True: if (n_remaining == 0).all(): break # half of the points per batch sparse_pts = points sparse_dists = knn_result.dists sparse_knn = knn_result.knn batch_size, P, _ = sparse_pts.shape max_P = (P // 8) # sparse_knn_normals = frnn.frnn_gather( # normals_init, knn_result.idx, num_points)[:, 1:] # get all mid points mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3 # N,P,K,K,3 mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3) # minimize among all the neighbors min_dist2 = torch.norm(mid_nn_diff, dim=-1) # N,P,K,K min_dist2 = min_dist2.min(dim=-1)[0] # N,P,K father_sparsity, father_nb = min_dist2.max(dim=-1) # N,P # neighborhood to insert sparsity_sorted = father_sparsity.sort(dim=1).indices n_new_points = n_remaining.clone() n_new_points[n_new_points > max_P] = max_P sparsity_sorted = sparsity_sorted[:, -max_P:] new_pts = torch.gather( mid_points[torch.arange(mid_points.shape[0]).view(-1, 1, 1), torch.arange(mid_points.shape[1]).view(1, -1, 1), father_nb.unsqueeze(-1)].squeeze(-2), 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) sparse_selected = torch.gather( sparse_pts, 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) total_pts_list = [] for b, pts_batch in enumerate( padded_to_list(points, num_points.tolist())): total_pts_list.append( torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0)) points = list_to_padded(total_pts_list) n_remaining = n_remaining - n_new_points num_points = n_new_points + num_points knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True) knn_result = _KNN(dists=knn_result.dists[..., 1:], idx=knn_result.idx[..., 1:], knn=knn_result.knn[..., 1:, :]) return _return_value(points, num_points, return_pcl)
def upsample(points, n_points: Union[int, torch.Tensor], num_points=None, neighborhood_size=16, knn_result=None): """ Args: points (N, P, 3) n_points (tensor of [N] or integer): target number of points per cloud """ batch_size = points.shape[0] knn_k = neighborhood_size if num_points is None: num_points = torch.tensor([points.shape[1]] * points.shape[0], device=points.device, dtype=torch.long) if not ((num_points - num_points[0]) == 0).all(): logger_py.warn( "May encounter unexpected behavior for heterogeneous batches") if num_points.sum() == 0: return points, num_points n_remaining = n_points - num_points if (n_remaining == 0).all(): return points, num_points point_cloud_diag = (points.max(dim=-2)[0] - points.min(dim=-2)[0]).norm(dim=-1) inv_sigma_spatial = num_points / point_cloud_diag spatial_dist = 16 / inv_sigma_spatial if knn_result is None: knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True, return_sorted=True) knn_result = _KNN(dists=knn_result.dists[..., 1:], idx=knn_result.idx[..., 1:], knn=knn_result.knn[..., 1:, :]) while True: if (n_remaining == 0).all(): break # half of the points per batch sparse_pts = points sparse_dists = knn_result.dists sparse_knn = knn_result.knn batch_size, P, _ = sparse_pts.shape max_P = (P // 10) # sparse_knn_normals = frnn.frnn_gather( # normals_init, knn_result.idx, num_points)[:, 1:] # get all mid points mid_points = (sparse_knn + 2 * sparse_pts[..., None, :]) / 3 # N,P,K,K,3 mid_nn_diff = mid_points.unsqueeze(-2) - sparse_knn.unsqueeze(-3) # minimize among all the neighbors min_dist2 = torch.norm(mid_nn_diff, dim=-1) # N,P,K,K min_dist2 = min_dist2.min(dim=-1)[0] # N,P,K father_sparsity, father_nb = min_dist2.max(dim=-1) # N,P # neighborhood to insert sparsity_sorted = father_sparsity.sort(dim=1).indices n_new_points = n_remaining.clone() n_new_points[n_new_points > max_P] = max_P sparsity_sorted = sparsity_sorted[:, -max_P:] new_pts = torch.gather( mid_points[torch.arange(mid_points.shape[0]), torch.arange(mid_points.shape[1]), father_nb], 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) from DSS.utils.io import save_ply sparse_selected = torch.gather( sparse_pts, 1, sparsity_sorted.unsqueeze(-1).expand(-1, -1, 3)) # save_ply('tests/outputs/test_uniform_projection/init.ply', sparse_pts.view(-1,3).cpu()) # save_ply('tests/outputs/test_uniform_projection/sparse.ply', sparse_selected[0].cpu()) # save_ply('tests/outputs/test_uniform_projection/new_pts.ply', new_pts.view(-1,3).cpu().detach()) # import pdb; pdb.set_trace() total_pts_list = [] for b, pts_batch in enumerate( padded_to_list(points, num_points.tolist())): total_pts_list.append( torch.cat([new_pts[b][-n_new_points[b]:], pts_batch], dim=0)) points = list_to_padded(total_pts_list) n_remaining = n_remaining - n_new_points num_points = n_new_points + num_points knn_result = knn_points(points, points, num_points, num_points, K=knn_k + 1, return_nn=True) knn_result = _KNN(dists=knn_result.dists[..., 1:], idx=knn_result.idx[..., 1:], knn=knn_result.knn[..., 1:, :]) return points, num_points