Beispiel #1
0
 def compute(self, predict, target):
     """
     compute negative intersection/union, predict and target are same
     shape tensors [N,...]
     """
     dims = tuple(range(predict.ndimension())[1:])
     intersect = (predict * target).sum(dims)
     union = (predict + target - predict * target).sum(dims)
     result = 1.0 - intersect / eps_denom(union)
     return result
Beispiel #2
0
    def _denoise_normals(self,
                         point_clouds,
                         weights,
                         point_clouds_filter=None):
        """
        robust normal mollification (Sec 4.4), i.e. replace normals with a weighted average
        from neighboring normals
        do this only for invisible points (?)
        Args:
            weights (tensors): (N,max_P,K)
        """
        lengths = point_clouds.num_points_per_cloud()
        P_total = lengths.sum().item()
        normals = point_clouds.normals_padded()
        batch_size, max_P, _ = normals.shape

        knn_normals = ops.knn_gather(normals, self.knn_tree.idx, lengths)
        normals_denoised = torch.sum(knn_normals * weights[:, :, :, None], dim=-2) / \
            eps_denom(torch.sum(weights, dim=-1, keepdim=True))

        # get point visibility so that we update only the non-visible or out-of-mask normals
        if point_clouds_filter is not None:
            try:
                reliable_normals_mask = point_clouds_filter.visibility & point_clouds_filter.inmask
                if len(point_clouds) != reliable_normals_mask.shape[0]:
                    if len(point_clouds
                           ) == 1 and reliable_normals_mask.shape[0] > 1:
                        reliable_normals_mask = reliable_normals_mask.any(
                            dim=0, keepdim=True)
                    else:
                        ValueError(
                            "Incompatible point clouds {} and mask {}".format(
                                len(point_clouds),
                                reliable_normals_mask.shape))

                # found visibility 0/1 as the last dimension of the features
                # reset visible points normals to its original ones
                normals_denoised[pts_reliable_normals_mask == 1] = normals[
                    reliable_normals_mask == 1]
            except KeyError as e:
                pass

        normals_packed = point_clouds.normals_packed()
        normals_denoised_packed = ops.padded_to_packed(
            normals_denoised, point_clouds.cloud_to_packed_first_idx(),
            P_total)
        point_clouds.update_normals_(normals_denoised_packed)
        return point_clouds
Beispiel #3
0
    def compute(self,
                point_clouds: PointClouds3D,
                points_filters=None,
                rebuild_knn=True,
                **kwargs):

        self.knn_tree = kwargs.get('knn_tree', self.knn_tree)
        self.knn_mask = kwargs.get('knn_mask', self.knn_mask)

        lengths = point_clouds.num_points_per_cloud()
        P_total = lengths.sum().item()
        points_padded = point_clouds.points_padded()

        # Compute necessary weights to project points to local plane
        # TODO(yifan): This part is same as ProjectionLoss
        # how can we at best save repetitive computation
        with torch.autograd.no_grad():
            if rebuild_knn or self.knn_tree is None or points_padded.shape[:
                                                                           2] != self.knn_tree.shape[:
                                                                                                     2]:
                self._build_knn(point_clouds)

            phi = self.get_phi(point_clouds, **kwargs)

            self._denoise_normals(point_clouds, phi, points_filters)

            # compute wn and wr
            # TODO(yifan): visibility weight?
            normal_w = self.get_normal_w(point_clouds, **kwargs)

            # update normals for a second iteration (?) Eq.(10)
            point_clouds = self._denoise_normals(point_clouds, phi * normal_w,
                                                 points_filters)

            # compose weights
            weights = phi * normal_w
            weights[~self.knn_mask] = 0

            # outside filter_scale*local_point_spacing weights
            mask_ball_query = self.knn_tree.dists > (
                self.filter_scale * self.knn_tree.dists[:, :, :1] * 2.0)
            weights[mask_ball_query] = 0.0

        # project the point to a local surface
        knn_normals = ops.knn_gather(point_clouds.normals_padded(),
                                     self.knn_tree.idx, lengths)
        dist_to_surface = torch.sum(
            (self.knn_tree.knn.detach() - points_padded.unsqueeze(-2)) *
            knn_normals,
            dim=-1)
        deltap = torch.sum(
            dist_to_surface[..., None] * weights[..., None] * knn_normals,
            dim=-2) / eps_denom(torch.sum(weights, dim=-1, keepdim=True))
        points_projected = points_padded + deltap

        if get_debugging_mode():
            # points_padded.requires_grad_(True)

            def save_grad():
                lengths = point_clouds.num_points_per_cloud()

                def _save_grad(grad):
                    dbg_tensor = get_debugging_tensor()
                    if dbg_tensor is None:
                        logger_py.error("dbg_tensor is None")
                    if grad is None:
                        logger_py.error('grad is None')
                    # a dict of list of tensors
                    dbg_tensor.pts_world_grad['repel'] = [
                        grad[b, :lengths[b]].detach().cpu()
                        for b in range(grad.shape[0])
                    ]

                return _save_grad

            dbg_tensor = get_debugging_tensor()
            dbg_tensor.pts_world['repel'] = [
                points_padded[b, :lengths[b]].detach().cpu()
                for b in range(points_padded.shape[0])
            ]
            handle = points_padded.register_hook(save_grad())
            self.hooks.append(handle)

        with torch.autograd.no_grad():
            spatial_w = self.get_spatial_w(point_clouds, points_projected)
            # density_w = self.get_density_w(point_clouds)
            # density weight is actually spatial_w + 1
            density_w = torch.sum(spatial_w, dim=-1, keepdim=True) + 1.0
            weights = normal_w * spatial_w * density_w
            weights[~self.knn_mask] = 0
            weights[mask_ball_query] = 0

        deltap = points_projected[:, :, None, :] - self.knn_tree.knn.detach()
        point_to_point_dist = torch.sum(deltap * deltap, dim=-1)

        # convert everything to packed
        weights = ops.padded_to_packed(
            weights, point_clouds.cloud_to_packed_first_idx(), P_total)
        point_to_point_dist = ops.padded_to_packed(
            point_to_point_dist, point_clouds.cloud_to_packed_first_idx(),
            P_total)

        # we want to maximize this, so negative sign
        point_to_point_dist = -torch.sum(point_to_point_dist * weights,
                                         dim=1) / eps_denom(
                                             torch.sum(weights, dim=1))
        return point_to_point_dist
Beispiel #4
0
    def compute(self,
                point_clouds: PointClouds3D,
                points_filters=None,
                rebuild_knn=False,
                **kwargs):
        """
        Args:
            point_clouds
            (optional) knn_tree: output from ops.knn_points excluding the query point itself
            (optional) knn_mask: mask valid knn results
        Returns:
            (P, N)
        """
        self.sharpness_sigma = kwargs.get('sharpness_sigma',
                                          self.sharpness_sigma)
        self.filter_scale = kwargs.get('filter_scale', self.filter_scale)
        self.knn_tree = kwargs.get('knn_tree', self.knn_tree)
        self.knn_mask = kwargs.get('knn_mask', self.knn_mask)

        lengths = point_clouds.num_points_per_cloud()
        P_total = lengths.sum().item()
        points = point_clouds.points_padded()
        # - determine phi spatial with using local point spacing (i.e. 2*dist_to_nn)
        # - denoise normals
        # - determine w_normal
        # - mask out values outside ballneighbor i.e. d > filterSpatialScale * localPointSpacing
        # - projected distance dot(ni, x-xi)
        # - multiply and normalize the weights
        with torch.autograd.no_grad():
            if rebuild_knn or self.knn_tree is None or self.knn_tree.idx.shape[:
                                                                               2] != points.shape[:
                                                                                                  2]:
                self._build_knn(point_clouds)

            phi = self.get_phi(point_clouds, **kwargs)

            # robust normal mollification (Sec 4.4), i.e. replace normals with a weighted average
            # from neighboring normals Eq.(11)
            point_clouds = self._denoise_normals(point_clouds, phi,
                                                 points_filters)

            # compute wn and wr
            # TODO(yifan): visibility weight?
            normal_w = self.get_normal_w(point_clouds, **kwargs)
            spatial_w = self.get_spatial_w(point_clouds, **kwargs)

            # update normals for a second iteration (?) Eq.(10)
            point_clouds = self._denoise_normals(point_clouds, phi * normal_w,
                                                 points_filters)

            # compose weights
            weights = phi * spatial_w * normal_w
            weights[~self.knn_mask] = 0

            # outside filter_scale*local_point_spacing weights
            mask_ball_query = self.knn_tree.dists > (
                self.filter_scale * self.knn_tree.dists[:, :, :1] * 2.0)
            weights[mask_ball_query] = 0.0

            # (B, P, k), dot product distance to surface
            # (we need to gather again because the normals have been changed in the denoising step)
            knn_normals = ops.knn_gather(point_clouds.normals_padded(),
                                         self.knn_tree.idx, lengths)

        # if points.requires_grad:
        #     from DSS.core.rasterizer import _dbg_tensor

        #     def save_grad(name):
        #         def _save_grad(grad):
        #             _dbg_tensor[name] = grad.detach().cpu()
        #         return _save_grad
        #     points.register_hook(save_grad('proj_grad'))

        dist_to_surface = torch.sum(
            (self.knn_tree.knn.detach() - points.unsqueeze(-2)) * knn_normals,
            dim=-1)

        if get_debugging_mode():
            # points.requires_grad_(True)

            def save_grad():
                lengths = point_clouds.num_points_per_cloud()

                def _save_grad(grad):
                    dbg_tensor = get_debugging_tensor()
                    if dbg_tensor is None:
                        logger_py.error("dbg_tensor is None")
                    if grad is None:
                        logger_py.error('grad is None')
                    # a dict of list of tensors
                    dbg_tensor.pts_world_grad['proj'] = [
                        grad[b, :lengths[b]].detach().cpu()
                        for b in range(grad.shape[0])
                    ]

                return _save_grad

            dbg_tensor = get_debugging_tensor()
            dbg_tensor.pts_world['proj'] = [
                points[b, :lengths[b]].detach().cpu()
                for b in range(points.shape[0])
            ]
            handle = points.register_hook(save_grad())
            self.hooks.append(handle)

        # convert everything to packed
        weights = ops.padded_to_packed(
            weights, point_clouds.cloud_to_packed_first_idx(), P_total)
        dist_to_surface = ops.padded_to_packed(
            dist_to_surface, point_clouds.cloud_to_packed_first_idx(), P_total)

        # compute weighted signed distance to surface
        dist_to_surface = torch.sum(weights * dist_to_surface,
                                    dim=-1) / eps_denom(
                                        torch.sum(weights, dim=-1))
        loss = dist_to_surface * dist_to_surface
        return loss
Beispiel #5
0
    def compute(self,
                point_clouds: PointClouds3D,
                points_filter=None,
                rebuild_knn=True,
                **kwargs):

        self.knn_tree = kwargs.get('knn_tree', self.knn_tree)
        self.knn_mask = kwargs.get('knn_mask', self.knn_mask)

        lengths = point_clouds.num_points_per_cloud()
        P_total = lengths.sum().item()
        points_padded = point_clouds.points_padded()

        if not points_padded.requires_grad:
            logger_py.warn(
                'Computing repulsion loss, but points_padded is not differentiable.'
            )

        # Compute necessary weights to project points to local plane
        # TODO(yifan): This part is same as ProjectionLoss
        # how can we at best save repetitive computation
        with torch.autograd.no_grad():
            if rebuild_knn or self.knn_tree is None or points_padded.shape[:
                                                                           2] != self.knn_tree.shape[:
                                                                                                     2]:
                self._build_knn(point_clouds)
            phi = self.get_phi(point_clouds, **kwargs)
            point_clouds = self._denoise_normals(point_clouds,
                                                 phi,
                                                 points_filter,
                                                 inplace=False)

        # project the point to a local surface
        knn_diff = points_padded.unsqueeze(-2) - self.knn_tree.knn.detach()

        knn_normals = ops.knn_gather(point_clouds.normals_padded(),
                                     self.knn_tree.idx, lengths)
        pts_diff_proj = knn_diff - \
                (knn_diff * knn_normals).sum(dim=-1, keepdim=True) * knn_normals

        if get_debugging_mode():
            # points_padded.requires_grad_(True)

            def save_grad():
                lengths = point_clouds.num_points_per_cloud()

                def _save_grad(grad):
                    dbg_tensor = get_debugging_tensor()
                    if dbg_tensor is None:
                        logger_py.error("dbg_tensor is None")
                    if grad is None:
                        logger_py.error('grad is None')
                    # a dict of list of tensors
                    dbg_tensor.pts_world_grad['repel'] = [
                        grad[b, :lengths[b]].detach().cpu()
                        for b in range(grad.shape[0])
                    ]

                return _save_grad

            if points_padded.requires_grad:
                dbg_tensor = get_debugging_tensor()
                dbg_tensor.pts_world['repel'] = [
                    points_padded[b, :lengths[b]].detach().cpu()
                    for b in range(points_padded.shape[0])
                ]
                handle = points_padded.register_hook(save_grad())
                self.hooks.append(handle)

        with torch.autograd.no_grad():
            spatial_w = self.get_spatial_w(point_clouds, **kwargs)
            # set far neighbors' spatial_w to 0
            normal_w = self.get_normal_w(point_clouds, **kwargs)
            density_w = torch.sum(spatial_w, dim=-1, keepdim=True) + 1.0
            weights = spatial_w * normal_w

        # convert everything to packed
        weights = ops.padded_to_packed(
            weights, point_clouds.cloud_to_packed_first_idx(), P_total)
        pts_diff_proj = ops.padded_to_packed(
            pts_diff_proj.contiguous().view(pts_diff_proj.shape[0],
                                            pts_diff_proj.shape[1], -1),
            point_clouds.cloud_to_packed_first_idx(),
            P_total).view(P_total, -1, 3)
        density_w = ops.padded_to_packed(
            density_w, point_clouds.cloud_to_packed_first_idx(), P_total)

        # we want to maximize this, so negative sign
        repel_vec = torch.sum(pts_diff_proj * weights.unsqueeze(-1),
                              dim=1) / eps_denom(
                                  torch.sum(weights, dim=1).unsqueeze(-1))
        repel_vec = repel_vec * density_w

        loss = torch.exp(-repel_vec.abs())

        # if get_debugging_mode():
        #     # save to dbg folder as normal
        #     from ..utils.io import save_ply
        #     save_ply('./dbg_repel_diff.ply', point_clouds.points_packed().cpu().detach(), normals=repel_vec.cpu().detach())

        return loss
Beispiel #6
0
    def compute(self,
                point_clouds: PointClouds3D,
                points_filter=None,
                rebuild_knn=False,
                **kwargs):
        """
        Args:
            point_clouds
            (optional) knn_tree: output from ops.knn_points excluding the query point itself
            (optional) knn_mask: mask valid knn results
        Returns:
            (P, N)
        """
        self.sharpness_sigma = kwargs.get('sharpness_sigma',
                                          self.sharpness_sigma)
        self.filter_scale = kwargs.get('filter_scale', self.filter_scale)
        self.knn_tree = kwargs.get('knn_tree', self.knn_tree)
        self.knn_mask = kwargs.get('knn_mask', self.knn_mask)

        lengths = point_clouds.num_points_per_cloud()
        P_total = lengths.sum().item()
        points = point_clouds.points_padded()
        # - determine phi spatial with using local point spacing (i.e. 2*dist_to_nn)
        # - denoise normals
        # - determine w_normal
        # - mask out values outside ballneighbor i.e. d > filterSpatialScale * localPointSpacing
        # - projected distance dot(ni, x-xi)
        # - multiply and normalize the weights
        with torch.autograd.no_grad():
            if rebuild_knn or self.knn_tree is None or self.knn_tree.idx.shape[:
                                                                               2] != points.shape[:
                                                                                                  2]:
                self._build_knn(point_clouds)

            phi = self.get_phi(point_clouds, **kwargs)

            # robust normal mollification (Sec 4.4), i.e. replace normals with a weighted average
            # from neighboring normals Eq.(11)
            point_clouds = self._denoise_normals(point_clouds,
                                                 phi,
                                                 points_filter,
                                                 inplace=False)

            # compute wn and wr
            normal_w = self.get_normal_w(point_clouds, **kwargs)
            # visibility weight
            visibility_nb = ops.knn_gather(
                points_filter.visibility.unsqueeze(-1), self.knn_tree.idx,
                lengths)
            visibility_w = visibility_nb.float()
            visibility_w[~visibility_nb] = 0.1
            # compose weights
            weights = phi * normal_w * visibility_w.squeeze(-1)

            # (B, P, k), dot product distance to surface
            knn_normals = ops.knn_gather(point_clouds.normals_padded(),
                                         self.knn_tree.idx, lengths)

        if get_debugging_mode():
            # points.requires_grad_(True)

            def save_grad():
                lengths = point_clouds.num_points_per_cloud()

                def _save_grad(grad):
                    dbg_tensor = get_debugging_tensor()
                    if dbg_tensor is None:
                        logger_py.error("dbg_tensor is None")
                    if grad is None:
                        logger_py.error('grad is None')
                    # a dict of list of tensors
                    dbg_tensor.pts_world_grad['proj'] = [
                        grad[b, :lengths[b]].detach().cpu()
                        for b in range(grad.shape[0])
                    ]

                return _save_grad

            if points.requires_grad:
                dbg_tensor = get_debugging_tensor()
                dbg_tensor.pts_world['proj'] = [
                    points[b, :lengths[b]].detach().cpu()
                    for b in range(points.shape[0])
                ]
                handle = points.register_hook(save_grad())
                self.hooks.append(handle)

        sdf = torch.sum(
            (self.knn_tree.knn.detach() - points.unsqueeze(-2)) * knn_normals,
            dim=-1)

        # convert everything to packed
        weights = ops.padded_to_packed(
            weights, point_clouds.cloud_to_packed_first_idx(), P_total)
        sdf = ops.padded_to_packed(sdf,
                                   point_clouds.cloud_to_packed_first_idx(),
                                   P_total)

        # if get_debugging_mode():
        #     # save to dbg folder as normal
        #     from ..utils.io import save_ply
        #     save_ply('./dbg_repel_diff.ply', point_clouds.points_packed().cpu().detach(), normals=repel_vec.cpu().detach())

        distance_to_face = sdf * sdf
        # compute weighted signed distance to surface
        loss = torch.sum(weights * distance_to_face, dim=-1) / eps_denom(
            torch.sum(weights, dim=-1))

        return loss