Esempio n. 1
0
    def backward(ctx,
                 grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        r"""

        Parameters
        ----------
        grad_out : torch.Tensor
            (B, C, npoint, nsample) tensor of the gradients of the output from forward

        Returns
        -------
        torch.Tensor
            (B, C, N) gradient of the features
        None
        """
        idx, N = ctx.for_backwards

        #B, C, npoint, nsample = grad_out.size()
        #grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())

        #grad_out_data = grad_out.data.contiguous()
        #pointnet2.group_points_grad_wrapper(
        #   B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data
        #)
        grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
        return grad_features, None
Esempio n. 2
0
    def backward(ctx, grad_out):
        # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
        r"""

        Parameters
        ----------
        grad_out : torch.Tensor
            (B, C, npoint, nsample) tensor of the gradients of the output from forward

        Returns
        -------
        torch.Tensor
            (B, C, N) gradient of the features
        None
        """
        idx, N = ctx.for_backwards

        grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)

        return grad_features, None
 def backward(ctx, grad_out):
     idx, N = ctx.for_backwards
     grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
     return grad_features, None