Ejemplo n.º 1
0
def test_check_sparse():
    smat = scipy.sparse.csr_matrix(
        np.array([[0, 1], [0, 1]]).astype(np.float32))
    st = SparseTensor.from_scipy(smat)

    assert [False, True] == check_sparse(torch.tensor(0), st)
    assert [] == check_sparse()
Ejemplo n.º 2
0
    def _decide_dmmv_impl(self, X1, X2, v, w, opt: FalkonOptions):
        """Choose which `dmmv` function to use for this data.

        Note that `dmmv` functions compute double kernel-vector products (see :meth:`dmmv` for
        an explanation of what they are).

        Parameters
        ----------
        X1 : torch.Tensor
            First data matrix, of shape (N x D)
        X2 : torch.Tensor
            Second data matrix, of shape (M x D)
        v : torch.Tensor or None
            Vector for the matrix-vector multiplication (M x T)
        w : torch.Tensor or None
            Vector for the matrix-vector multiplicatoin (N x T)
        opt : FalkonOptions
            Falkon options. Options may be specified to force GPU or CPU usage.

        Returns
        -------
        dmmv_fn
            A function which allows to perform the `mmv` operation.

        Notes
        -----
        This function decides based on the inputs: if the inputs are sparse, it will choose
        the sparse implementations; if CUDA is detected, it will choose the CUDA implementation;
        otherwise it will simply choose the basic CPU implementation.
        """
        use_cuda = decide_cuda(opt)
        sparsity = check_sparse(X1, X2)
        if not all(sparsity) and any(sparsity):
            raise ValueError(
                "Either all or none of 'X1', 'X2' must be sparse.")
        if (X1.device.type == 'cuda') and (not use_cuda):
            warnings.warn(
                "kernel-vector double product backend was chosen to be CPU, but GPU "
                "input tensors found. Defaulting to use the GPU (note this may "
                "cause issues later). To force usage of the CPU backend, "
                "please pass CPU tensors; to avoid this warning if the GPU backend is "
                "desired, check your options (i.e. set 'use_cpu=False').")
            use_cuda = True
        sparsity = all(sparsity)
        if use_cuda:
            from falkon.mmv_ops.fmmv_cuda import fdmmv_cuda, fdmmv_cuda_sparse
            if sparsity:
                return fdmmv_cuda_sparse
            else:
                return fdmmv_cuda
        else:
            if sparsity:
                return fdmmv_cpu_sparse
            else:
                return fdmmv_cpu
Ejemplo n.º 3
0
    def _decide_mm_impl(self, X1, X2, opt: FalkonOptions):
        """Choose which `mm` function to use for this data.

        Note that `mm` functions compute the kernel itself so **KeOps may not be used**.

        Parameters
        ----------
        X1 : torch.Tensor
            First data matrix, of shape (N x D)
        X2 : torch.Tensor
            Second data matrix, of shape (M x D)
        opt : FalkonOptions
            Falkon options. Options may be specified to force GPU or CPU usage.

        Returns
        -------
        mm_fn
            A function which allows to perform the `mm` operation.

        Notes
        -----
        This function decides based on the inputs: if the inputs are sparse, it will choose
        the sparse implementations; if CUDA is detected, it will choose the CUDA implementation;
        otherwise it will simply choose the basic CPU implementation.
        """
        use_cuda = decide_cuda(opt)
        sparsity = check_sparse(X1, X2)
        if not all(sparsity) and any(sparsity):
            raise ValueError(
                "Either all or none of 'X1', 'X2' must be sparse.")
        sparsity = all(sparsity)
        if (X1.device.type == 'cuda') and (not use_cuda):
            warnings.warn(
                "kernel backend was chosen to be CPU, but GPU input tensors found. "
                "Defaulting to use the GPU (note this may cause issues later). "
                "To force usage of the CPU backend, please pass CPU tensors; "
                "to avoid this warning if the GPU backend is "
                "desired, check your options (i.e. set 'use_cpu=False').")
            use_cuda = True
        if use_cuda:
            from falkon.mmv_ops.fmm_cuda import fmm_cuda, fmm_cuda_sparse
            if sparsity:
                return fmm_cuda_sparse
            else:
                return fmm_cuda
        else:
            if sparsity:
                return fmm_cpu_sparse
            else:
                return fmm_cpu
Ejemplo n.º 4
0
 def _decide_dmmv_impl(self, X1, X2, v, w, opt: FalkonOptions):
     use_cuda = decide_cuda(opt)
     sparsity = check_sparse(X1, X2)
     if not all(sparsity) and any(sparsity):
         raise ValueError(
             "Either all or none of 'X1', 'X2' must be sparse.")
     sparsity = all(sparsity)
     if use_cuda:
         from falkon.mmv_ops.fmmv_cuda import fdmmv_cuda, fdmmv_cuda_sparse
         if sparsity:
             return fdmmv_cuda_sparse
         else:
             return fdmmv_cuda
     else:
         if sparsity:
             return fdmmv_cpu_sparse
         else:
             return fdmmv_cpu
Ejemplo n.º 5
0
 def _decide_dmmv_impl(self, X1, X2, v, w, opt: FalkonOptions):
     use_cuda = decide_cuda(opt)
     sparsity = check_sparse(X1, X2)
     if not all(sparsity) and any(sparsity):
         raise ValueError("Either all or none of 'X1', 'X2' must be sparse.")
     if (X1.device.type == 'cuda') and (not use_cuda):
         warnings.warn("kernel-vector double product backend was chosen to be CPU, but GPU "
                       "input tensors found. Defaulting to use the GPU (note this may "
                       "cause issues later). To force usage of the CPU backend, "
                       "please pass CPU tensors; to avoid this warning if the GPU backend is "
                       "desired, check your options (i.e. set 'use_cpu=False').")
         use_cuda = True
     sparsity = all(sparsity)
     if use_cuda:
         from falkon.mmv_ops.fmmv_cuda import fdmmv_cuda, fdmmv_cuda_sparse
         if sparsity:
             return fdmmv_cuda_sparse
         else:
             return fdmmv_cuda
     else:
         if sparsity:
             return fdmmv_cpu_sparse
         else:
             return fdmmv_cpu