def backward_pass(m, in_tensor, weight, bias=None):
        grad_out_feat = in_tensor
        with torch.no_grad():
            if not grad_out_feat.is_contiguous():
                grad_out_feat = grad_out_feat.contiguous()

            bw_fn = get_minkowski_function('ConvolutionTransposeBackward',
                                           grad_out_feat)
            grad_in_feat, grad_kernel = bw_fn(
                m.in_tensor, grad_out_feat, m.kernel,
                m.kernel_generator.kernel_size,
                m.kernel_generator.kernel_stride,
                m.kernel_generator.kernel_dilation,
                m.kernel_generator.region_type,
                m.kernel_generator.region_offsets, m.convolution_mode,
                m.in_coordinate_map_key, m.out_coordinate_map_key,
                m.coordinate_manager._manager)

            grad_in_feat.detach()

            assert grad_in_feat.size() == m.in_shape, (grad_in_feat.size(),
                                                       m.in_shape(),
                                                       m.out_shape,
                                                       m.in_coords.size())

            return grad_in_feat
    def backward_pass(m, tensor, weight):
        with torch.no_grad():
            grad_out_feat = tensor.contiguous()

            bw_fn = get_minkowski_function('GlobalPoolingBackward',
                                           grad_out_feat)
            grad_in_feat = bw_fn(m.in_tensor, grad_out_feat, m.num_nonzero,
                                 m.pooling_mode, m.in_coordinate_map_key,
                                 m.out_coordinate_map_key,
                                 m.coordinate_manager._manager)

            return grad_in_feat.detach()
    def forward_pass(m, in_tensor, weight, bias=None):
        with torch.no_grad():
            input_features = in_tensor.contiguous()

            fw_fn = get_minkowski_function('GlobalPoolingForward',
                                           input_features)
            out_feat, num_nonzero = fw_fn(input_features, m.pooling_mode,
                                          m.in_coordinate_map_key,
                                          m.out_coordinate_map_key,
                                          m.coordinate_manager._manager)

            setattr(m, "num_nonzero", num_nonzero)

        return out_feat.detach()
    def forward_pass(m, in_tensor, weight, bias=None, scale_groups=1):
        input_features = in_tensor

        if not input_features.is_contiguous():
            input_features = input_features.contiguous()

        fw_fn = get_minkowski_function('ConvolutionTransposeForward',
                                       input_features)
        out_feat = fw_fn(
            input_features, m.kernel, m.kernel_generator.kernel_size,
            m.kernel_generator.kernel_stride,
            m.kernel_generator.kernel_dilation, m.kernel_generator.region_type,
            m.kernel_generator.region_offsets,
            m.kernel_generator.expand_coordinates, m.convolution_mode,
            m.in_coordinate_map_key, m.out_coordinate_map_key,
            m.coordinate_manager._manager)

        out_feat.detach()

        return out_feat