def get_normal_w(self, point_clouds: PointClouds3D, normals: Optional[torch.Tensor] = None, **kwargs): """ Weights exp(-\|n-ni\|^2/sharpness_sigma^2), for i in a local neighborhood Args: point_clouds: whose normals will be used for ni normals (tensor): (N, maxP, 3) padded normals as n, if not provided, use the normals from point_clouds Returns: weight per point per neighbor (N,maxP,K) """ self.sharpness_sigma = kwargs.get('sharpness_sigma', self.sharpness_sigma) inv_sigma_normal = 1 / (self.sharpness_sigma * self.sharpness_sigma) lengths = point_clouds.num_points_per_cloud() if normals is None: normals = point_clouds.normals_padded() knn_normals = ops.knn_gather(normals, self.knn_tree.idx, lengths) normals = torch.nn.functional.normalize(normals, dim=-1) knn_normals = torch.nn.functional.normalize(knn_normals, dim=-1) w = knn_normals - normals[:, :, None, :] w = torch.exp(-torch.sum(w * w, dim=-1) * inv_sigma_normal) return w
def compute(self, pointclouds: PointClouds3D, neighborhood_size=None): if neighborhood_size is None: neighborhood_size = self.neighborhood_size num_points = pointclouds.num_points_per_cloud() normals_packed = pointclouds.normals_packed() assert (normals_packed is not None) normals_ref = estimate_pointcloud_normals( pointclouds, neighborhood_size=neighborhood_size) normals_ref = padded_to_packed(normals_ref, pointclouds.cloud_to_packed_first_idx(), num_points.sum().item()) return 1 - F.cosine_similarity(normals_packed, normals_ref).abs()
def get_density_w(self, point_clouds: PointClouds3D, points: Optional[torch.Tensor], **kwargs): """ 1 + sum_i (exp(-\|x-xi\|^2/(sigma*h)^2)) """ inv_sigma_spatial = point_clouds.num_points_per_cloud() / 2.0 if points is None: with torch.autograd.enable_grad(): points = point_clouds.points_padded() deltap = self.knn_tree.knn - points[:, :, None, :] w = 1 + torch.sum(torch.exp( -torch.sum(deltap * deltap, dim=-1) * inv_sigma_spatial), dim=-1) return w
def get_spatial_w( self, point_clouds: PointClouds3D, points: Optional[torch.Tensor] = None, ): """ Weights exp(\|p-pi\|^2/sigma^2), h=0.5 """ point_spacing_sq = 2.0 / point_clouds.num_points_per_cloud() inv_sigma_spatial = 1.0 / point_spacing_sq if points is None: points = point_clouds.points_padded() deltap = self.knn_tree.knn - points[:, :, None, :] w = torch.exp(-torch.sum(deltap * deltap, dim=-1) * inv_sigma_spatial) return w
def __init__(self, points, n_points, normals=None): super().__init__() B, P, _ = points.shape assert (B == 1) self.projection = UniformProjection(proj_max_iters=10, proj_tolerance=1e-5, total_iters=1, sample_iters=5, knn_k=16) self.ear_projection = EdgeAwareProjection(proj_max_iters=10, knn_k=16, proj_tolerance=1e-5, total_iters=1, resampling_clip=0.02, sample_iters=2, repulsion_mu=0.4, sharpness_angle=20, edge_sensitivity=1.0) rnd_idx = torch.randperm(P)[:n_points] points = points.view(-1, 3)[rnd_idx].view(1, -1, 3) if normals is not None: normals = normals.view(-1, 3)[rnd_idx].view(1, -1, 3) self.points = resample_uniformly(PointClouds3D(points, normals=normals), shrink_ratio=0.25, repulsion_mu=0.65, neighborhood_size=31).points_padded()
def get_spatial_w(self, point_clouds: PointClouds3D, points: Optional[torch.Tensor] = None, **kwargs): """ Weights exp(\|p-pi\|^2/sigma^2) """ bbox = point_clouds.get_bounding_boxes() diag2 = torch.sum((bbox[..., 1] - bbox[..., 0])**2, dim=-1) inv_sigma_spatial = point_clouds.num_points_per_cloud().float() / diag2 self.filter_scale = kwargs.get('filter_scale', self.filter_scale) if points is None: points = point_clouds.points_padded() deltap = self.knn_tree.knn - points[:, :, None, :] w = torch.exp(-torch.sum(deltap * deltap, dim=-1) * inv_sigma_spatial * self.filter_scale) return w
import torch from DSS.core.cloud import PointClouds3D if __name__ == '__main__': # note this only works for cuda # is_cuda = (torch.cuda.is_available()) device = torch.device("cuda") # testing the splatting renderer splatting_renderer = config.create_splatting_renderer().to(device) print('Splatting renderer type: {}'.format(type(splatting_renderer))) # generating 1 batche(s) of 1000 points with 10 features for each point batch_size = 1 rand_pts_pos = torch.randn((batch_size, 1000, 3), device=device) rand_pts_normals = torch.randn((batch_size, 1000, 3), device=device) rand_pts_feat = torch.randn((batch_size, 1000, 10), device=device) print('DEBUG: rand_pts_pos shape: {}'.format(rand_pts_pos.shape)) print('DEBUG: rand_pts_feat shape: {}'.format(rand_pts_feat.shape)) pt_cloud = PointClouds3D(points=rand_pts_pos, normals=rand_pts_normals, features=rand_pts_feat) if (batch_size > 1): pt_cloud.extend(batch_size) print('Point cloud: {}'.format(pt_cloud)) # checking output images shape and channel size images = splatting_renderer(pt_cloud) print('Images type: {}'.format(type(images))) print('Images shape: {}'.format(images.shape))
def compute(self, points: torch.Tensor, sdf: torch.Tensor, mesh_gt: Meshes): """ Rasterize mesh faces from an far camera facing the origin, transform the predicted points position to camera view and project to get the normalized image coordinates The number of points on the zbuf at the image coordinates that are larger than the predicted points determines the sign of sdf """ assert (points.ndim == 2 and points.shape[-1] == 3) device = points.device faces_per_pixel = 4 with torch.autograd.no_grad(): # a point that is definitely outside the mesh as camera center ray0 = torch.tensor([2, 2, 2], device=device, dtype=points.dtype).view(1, 3) R, T = look_at_view_transform(eye=ray0, at=((0, 0, 0), ), up=((0, 0, 1), )) cameras = PerspectiveCameras(R=R, T=T, device=device) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=RasterizationSettings( faces_per_pixel=faces_per_pixel, )) fragments = rasterizer(mesh_gt) z_predicted = cameras.get_world_to_view_transform( ).transform_points(points=points.unsqueeze(0))[..., -1:] # normalized pixel (top-left smallest values) screen_xy = -cameras.transform_points(points.unsqueeze(0))[..., :2] outside_screen = (screen_xy.abs() > 1.0).any(dim=-1) # pix_to_face, zbuf, bary_coords, dists assert (fragments.zbuf.shape[-1] == faces_per_pixel) zbuf = torch.nn.functional.grid_sample( fragments.zbuf.permute(0, 3, 1, 2), screen_xy.clamp(-1.0, 1.0).view(1, -1, 1, 2), align_corners=False, mode='nearest') zbuf[outside_screen.unsqueeze(1).expand(-1, zbuf.shape[1], -1)] = -1.0 sign = (((zbuf > z_predicted).sum(dim=1) % 2) == 0).type_as(points).view(screen_xy.shape[1]) sign = sign * 2 - 1 pcls = PointClouds3D(points.unsqueeze(0)).to(device=device) points_first_idx = pcls.cloud_to_packed_first_idx() max_points = pcls.num_points_per_cloud().max().item() # packed representation for faces verts_packed = mesh_gt.verts_packed() faces_packed = mesh_gt.faces_packed() tris = verts_packed[faces_packed] # (T, 3, 3) tris_first_idx = mesh_gt.mesh_to_faces_packed_first_idx() max_tris = mesh_gt.num_faces_per_mesh().max().item() # point to face distance: shape (P,) point_to_face = point_face_distance(points, points_first_idx, tris, tris_first_idx, max_points) point_to_face = sign * torch.sqrt(eps_sqrt(point_to_face)) loss = (point_to_face - sdf)**2 return loss
def compute(self, point_clouds: PointClouds3D, points_filters=None, rebuild_knn=True, **kwargs): self.knn_tree = kwargs.get('knn_tree', self.knn_tree) self.knn_mask = kwargs.get('knn_mask', self.knn_mask) lengths = point_clouds.num_points_per_cloud() P_total = lengths.sum().item() points_padded = point_clouds.points_padded() # Compute necessary weights to project points to local plane # TODO(yifan): This part is same as ProjectionLoss # how can we at best save repetitive computation with torch.autograd.no_grad(): if rebuild_knn or self.knn_tree is None or points_padded.shape[: 2] != self.knn_tree.shape[: 2]: self._build_knn(point_clouds) phi = self.get_phi(point_clouds, **kwargs) self._denoise_normals(point_clouds, phi, points_filters) # compute wn and wr # TODO(yifan): visibility weight? normal_w = self.get_normal_w(point_clouds, **kwargs) # update normals for a second iteration (?) Eq.(10) point_clouds = self._denoise_normals(point_clouds, phi * normal_w, points_filters) # compose weights weights = phi * normal_w weights[~self.knn_mask] = 0 # outside filter_scale*local_point_spacing weights mask_ball_query = self.knn_tree.dists > ( self.filter_scale * self.knn_tree.dists[:, :, :1] * 2.0) weights[mask_ball_query] = 0.0 # project the point to a local surface knn_normals = ops.knn_gather(point_clouds.normals_padded(), self.knn_tree.idx, lengths) dist_to_surface = torch.sum( (self.knn_tree.knn.detach() - points_padded.unsqueeze(-2)) * knn_normals, dim=-1) deltap = torch.sum( dist_to_surface[..., None] * weights[..., None] * knn_normals, dim=-2) / eps_denom(torch.sum(weights, dim=-1, keepdim=True)) points_projected = points_padded + deltap if get_debugging_mode(): # points_padded.requires_grad_(True) def save_grad(): lengths = point_clouds.num_points_per_cloud() def _save_grad(grad): dbg_tensor = get_debugging_tensor() if dbg_tensor is None: logger_py.error("dbg_tensor is None") if grad is None: logger_py.error('grad is None') # a dict of list of tensors dbg_tensor.pts_world_grad['repel'] = [ grad[b, :lengths[b]].detach().cpu() for b in range(grad.shape[0]) ] return _save_grad dbg_tensor = get_debugging_tensor() dbg_tensor.pts_world['repel'] = [ points_padded[b, :lengths[b]].detach().cpu() for b in range(points_padded.shape[0]) ] handle = points_padded.register_hook(save_grad()) self.hooks.append(handle) with torch.autograd.no_grad(): spatial_w = self.get_spatial_w(point_clouds, points_projected) # density_w = self.get_density_w(point_clouds) # density weight is actually spatial_w + 1 density_w = torch.sum(spatial_w, dim=-1, keepdim=True) + 1.0 weights = normal_w * spatial_w * density_w weights[~self.knn_mask] = 0 weights[mask_ball_query] = 0 deltap = points_projected[:, :, None, :] - self.knn_tree.knn.detach() point_to_point_dist = torch.sum(deltap * deltap, dim=-1) # convert everything to packed weights = ops.padded_to_packed( weights, point_clouds.cloud_to_packed_first_idx(), P_total) point_to_point_dist = ops.padded_to_packed( point_to_point_dist, point_clouds.cloud_to_packed_first_idx(), P_total) # we want to maximize this, so negative sign point_to_point_dist = -torch.sum(point_to_point_dist * weights, dim=1) / eps_denom( torch.sum(weights, dim=1)) return point_to_point_dist
def compute(self, point_clouds: PointClouds3D, points_filters=None, rebuild_knn=False, **kwargs): """ Args: point_clouds (optional) knn_tree: output from ops.knn_points excluding the query point itself (optional) knn_mask: mask valid knn results Returns: (P, N) """ self.sharpness_sigma = kwargs.get('sharpness_sigma', self.sharpness_sigma) self.filter_scale = kwargs.get('filter_scale', self.filter_scale) self.knn_tree = kwargs.get('knn_tree', self.knn_tree) self.knn_mask = kwargs.get('knn_mask', self.knn_mask) lengths = point_clouds.num_points_per_cloud() P_total = lengths.sum().item() points = point_clouds.points_padded() # - determine phi spatial with using local point spacing (i.e. 2*dist_to_nn) # - denoise normals # - determine w_normal # - mask out values outside ballneighbor i.e. d > filterSpatialScale * localPointSpacing # - projected distance dot(ni, x-xi) # - multiply and normalize the weights with torch.autograd.no_grad(): if rebuild_knn or self.knn_tree is None or self.knn_tree.idx.shape[: 2] != points.shape[: 2]: self._build_knn(point_clouds) phi = self.get_phi(point_clouds, **kwargs) # robust normal mollification (Sec 4.4), i.e. replace normals with a weighted average # from neighboring normals Eq.(11) point_clouds = self._denoise_normals(point_clouds, phi, points_filters) # compute wn and wr # TODO(yifan): visibility weight? normal_w = self.get_normal_w(point_clouds, **kwargs) spatial_w = self.get_spatial_w(point_clouds, **kwargs) # update normals for a second iteration (?) Eq.(10) point_clouds = self._denoise_normals(point_clouds, phi * normal_w, points_filters) # compose weights weights = phi * spatial_w * normal_w weights[~self.knn_mask] = 0 # outside filter_scale*local_point_spacing weights mask_ball_query = self.knn_tree.dists > ( self.filter_scale * self.knn_tree.dists[:, :, :1] * 2.0) weights[mask_ball_query] = 0.0 # (B, P, k), dot product distance to surface # (we need to gather again because the normals have been changed in the denoising step) knn_normals = ops.knn_gather(point_clouds.normals_padded(), self.knn_tree.idx, lengths) # if points.requires_grad: # from DSS.core.rasterizer import _dbg_tensor # def save_grad(name): # def _save_grad(grad): # _dbg_tensor[name] = grad.detach().cpu() # return _save_grad # points.register_hook(save_grad('proj_grad')) dist_to_surface = torch.sum( (self.knn_tree.knn.detach() - points.unsqueeze(-2)) * knn_normals, dim=-1) if get_debugging_mode(): # points.requires_grad_(True) def save_grad(): lengths = point_clouds.num_points_per_cloud() def _save_grad(grad): dbg_tensor = get_debugging_tensor() if dbg_tensor is None: logger_py.error("dbg_tensor is None") if grad is None: logger_py.error('grad is None') # a dict of list of tensors dbg_tensor.pts_world_grad['proj'] = [ grad[b, :lengths[b]].detach().cpu() for b in range(grad.shape[0]) ] return _save_grad dbg_tensor = get_debugging_tensor() dbg_tensor.pts_world['proj'] = [ points[b, :lengths[b]].detach().cpu() for b in range(points.shape[0]) ] handle = points.register_hook(save_grad()) self.hooks.append(handle) # convert everything to packed weights = ops.padded_to_packed( weights, point_clouds.cloud_to_packed_first_idx(), P_total) dist_to_surface = ops.padded_to_packed( dist_to_surface, point_clouds.cloud_to_packed_first_idx(), P_total) # compute weighted signed distance to surface dist_to_surface = torch.sum(weights * dist_to_surface, dim=-1) / eps_denom( torch.sum(weights, dim=-1)) loss = dist_to_surface * dist_to_surface return loss
def compute(self, point_clouds: PointClouds3D, points_filter=None, rebuild_knn=True, **kwargs): self.knn_tree = kwargs.get('knn_tree', self.knn_tree) self.knn_mask = kwargs.get('knn_mask', self.knn_mask) lengths = point_clouds.num_points_per_cloud() P_total = lengths.sum().item() points_padded = point_clouds.points_padded() if not points_padded.requires_grad: logger_py.warn( 'Computing repulsion loss, but points_padded is not differentiable.' ) # Compute necessary weights to project points to local plane # TODO(yifan): This part is same as ProjectionLoss # how can we at best save repetitive computation with torch.autograd.no_grad(): if rebuild_knn or self.knn_tree is None or points_padded.shape[: 2] != self.knn_tree.shape[: 2]: self._build_knn(point_clouds) phi = self.get_phi(point_clouds, **kwargs) point_clouds = self._denoise_normals(point_clouds, phi, points_filter, inplace=False) # project the point to a local surface knn_diff = points_padded.unsqueeze(-2) - self.knn_tree.knn.detach() knn_normals = ops.knn_gather(point_clouds.normals_padded(), self.knn_tree.idx, lengths) pts_diff_proj = knn_diff - \ (knn_diff * knn_normals).sum(dim=-1, keepdim=True) * knn_normals if get_debugging_mode(): # points_padded.requires_grad_(True) def save_grad(): lengths = point_clouds.num_points_per_cloud() def _save_grad(grad): dbg_tensor = get_debugging_tensor() if dbg_tensor is None: logger_py.error("dbg_tensor is None") if grad is None: logger_py.error('grad is None') # a dict of list of tensors dbg_tensor.pts_world_grad['repel'] = [ grad[b, :lengths[b]].detach().cpu() for b in range(grad.shape[0]) ] return _save_grad if points_padded.requires_grad: dbg_tensor = get_debugging_tensor() dbg_tensor.pts_world['repel'] = [ points_padded[b, :lengths[b]].detach().cpu() for b in range(points_padded.shape[0]) ] handle = points_padded.register_hook(save_grad()) self.hooks.append(handle) with torch.autograd.no_grad(): spatial_w = self.get_spatial_w(point_clouds, **kwargs) # set far neighbors' spatial_w to 0 normal_w = self.get_normal_w(point_clouds, **kwargs) density_w = torch.sum(spatial_w, dim=-1, keepdim=True) + 1.0 weights = spatial_w * normal_w # convert everything to packed weights = ops.padded_to_packed( weights, point_clouds.cloud_to_packed_first_idx(), P_total) pts_diff_proj = ops.padded_to_packed( pts_diff_proj.contiguous().view(pts_diff_proj.shape[0], pts_diff_proj.shape[1], -1), point_clouds.cloud_to_packed_first_idx(), P_total).view(P_total, -1, 3) density_w = ops.padded_to_packed( density_w, point_clouds.cloud_to_packed_first_idx(), P_total) # we want to maximize this, so negative sign repel_vec = torch.sum(pts_diff_proj * weights.unsqueeze(-1), dim=1) / eps_denom( torch.sum(weights, dim=1).unsqueeze(-1)) repel_vec = repel_vec * density_w loss = torch.exp(-repel_vec.abs()) # if get_debugging_mode(): # # save to dbg folder as normal # from ..utils.io import save_ply # save_ply('./dbg_repel_diff.ply', point_clouds.points_packed().cpu().detach(), normals=repel_vec.cpu().detach()) return loss
def compute(self, point_clouds: PointClouds3D, points_filter=None, rebuild_knn=False, **kwargs): """ Args: point_clouds (optional) knn_tree: output from ops.knn_points excluding the query point itself (optional) knn_mask: mask valid knn results Returns: (P, N) """ self.sharpness_sigma = kwargs.get('sharpness_sigma', self.sharpness_sigma) self.filter_scale = kwargs.get('filter_scale', self.filter_scale) self.knn_tree = kwargs.get('knn_tree', self.knn_tree) self.knn_mask = kwargs.get('knn_mask', self.knn_mask) lengths = point_clouds.num_points_per_cloud() P_total = lengths.sum().item() points = point_clouds.points_padded() # - determine phi spatial with using local point spacing (i.e. 2*dist_to_nn) # - denoise normals # - determine w_normal # - mask out values outside ballneighbor i.e. d > filterSpatialScale * localPointSpacing # - projected distance dot(ni, x-xi) # - multiply and normalize the weights with torch.autograd.no_grad(): if rebuild_knn or self.knn_tree is None or self.knn_tree.idx.shape[: 2] != points.shape[: 2]: self._build_knn(point_clouds) phi = self.get_phi(point_clouds, **kwargs) # robust normal mollification (Sec 4.4), i.e. replace normals with a weighted average # from neighboring normals Eq.(11) point_clouds = self._denoise_normals(point_clouds, phi, points_filter, inplace=False) # compute wn and wr normal_w = self.get_normal_w(point_clouds, **kwargs) # visibility weight visibility_nb = ops.knn_gather( points_filter.visibility.unsqueeze(-1), self.knn_tree.idx, lengths) visibility_w = visibility_nb.float() visibility_w[~visibility_nb] = 0.1 # compose weights weights = phi * normal_w * visibility_w.squeeze(-1) # (B, P, k), dot product distance to surface knn_normals = ops.knn_gather(point_clouds.normals_padded(), self.knn_tree.idx, lengths) if get_debugging_mode(): # points.requires_grad_(True) def save_grad(): lengths = point_clouds.num_points_per_cloud() def _save_grad(grad): dbg_tensor = get_debugging_tensor() if dbg_tensor is None: logger_py.error("dbg_tensor is None") if grad is None: logger_py.error('grad is None') # a dict of list of tensors dbg_tensor.pts_world_grad['proj'] = [ grad[b, :lengths[b]].detach().cpu() for b in range(grad.shape[0]) ] return _save_grad if points.requires_grad: dbg_tensor = get_debugging_tensor() dbg_tensor.pts_world['proj'] = [ points[b, :lengths[b]].detach().cpu() for b in range(points.shape[0]) ] handle = points.register_hook(save_grad()) self.hooks.append(handle) sdf = torch.sum( (self.knn_tree.knn.detach() - points.unsqueeze(-2)) * knn_normals, dim=-1) # convert everything to packed weights = ops.padded_to_packed( weights, point_clouds.cloud_to_packed_first_idx(), P_total) sdf = ops.padded_to_packed(sdf, point_clouds.cloud_to_packed_first_idx(), P_total) # if get_debugging_mode(): # # save to dbg folder as normal # from ..utils.io import save_ply # save_ply('./dbg_repel_diff.ply', point_clouds.points_packed().cpu().detach(), normals=repel_vec.cpu().detach()) distance_to_face = sdf * sdf # compute weighted signed distance to surface loss = torch.sum(weights * distance_to_face, dim=-1) / eps_denom( torch.sum(weights, dim=-1)) return loss