Пример #1
0
    def backward(ctx, grad_out):
        # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
        r"""
        Parameters
        ----------
        grad_out : torch.FloatTensor
            (N, c_out) tensor with gradients of ouputs

        Returns
        -------
        grad_features : torch.FloatTensor
            (N, c_in) tensor with gradients of features.

        grad_kernel: torch.FloatTensor
            (K, c_in, c_out) tensor with gradients of the kernel.

        None
        """

        features, kernel, neighbor_map, neighbor_offset, transpose = ctx.for_backwards
        K, c_in, c_out = kernel.size()
        N_in = features.size(0)
        N_out = grad_out.size(0)
        grad_features = torch.zeros(N_in, c_in, device=features.device)
        grad_kernel = torch.zeros(K, c_in, c_out, device=kernel.device)

        if 'cuda' in str(features.device):
            torchsparse_cuda.sparseconv_backward(features, grad_features,
                                                 grad_out.contiguous(), kernel,
                                                 grad_kernel, neighbor_map,
                                                 neighbor_offset, transpose)
        else:
            raise NotImplementedError
        return grad_features, grad_kernel, None, None, None, None
Пример #2
0
    def backward(ctx, grad_out):
        features, kernel, neighbor_map, neighbor_offset, transpose = ctx.for_backwards
        K, c_in, c_out = kernel.size()
        N_in = features.size(0)
        N_out = grad_out.size(0)
        grad_features = torch.zeros(N_in, c_in, device=features.device)
        grad_kernel = torch.zeros(K, c_in, c_out, device=kernel.device)

        if 'cuda' in str(features.device):
            torchsparse_cuda.sparseconv_backward(features, grad_features,
                                                 grad_out.contiguous(), kernel,
                                                 grad_kernel, neighbor_map,
                                                 neighbor_offset, transpose)
        else:
            raise NotImplementedError
        return grad_features, grad_kernel, None, None, None, None