Exemple #1
0
def eye(N, dtype=np.float32):
    """
    Construct a 2D matrix with ones on the diagonal and zeros elsewhere.

    Constructs a matrix in device memory whose diagonal elements
    are set to 1 and non-diagonal elements are set to 0.

    Parameters
    ----------
    N : int
        Number of rows or columns in the output matrix.

    Returns
    -------
    e_gpu : pycuda.gpuarray.GPUArray
        Diagonal matrix of dimensions `[N, N]` with diagonal values
        set to 1.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> N = 5
    >>> e_gpu = linalg.eye(N)
    >>> np.all(e_gpu.get() == np.eye(N))
    True
    >>> e_gpu = linalg.eye(v_gpu, np.complex64)
    >>> np.all(e_gpu.get() == np.eye(N, np.complex64))
    True

    """

    if dtype not in [np.float32, np.float64, np.complex64, np.complex128]:
        raise ValueError("unrecognized type")
    if N <= 0:
        raise ValueError("N must be greater than 0")

    use_double = int(dtype in [np.float64, np.complex128])
    use_complex = int(dtype in [np.complex64, np.complex128])

    # Initialize output matrix:
    e_gpu = misc.zeros((N, N), dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, e_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    eye_mod = SourceModule(eye_template.substitute(use_double=use_double, use_complex=use_complex), cache_dir=cache_dir)

    eye = eye_mod.get_function("eye")
    eye(e_gpu, np.uint32(N), block=block_dim, grid=grid_dim)

    return e_gpu
Exemple #2
0
def e1z(z_gpu):
    """
    Exponential integral with `n = 1` of complex arguments.

    Parameters
    ----------
    x_gpu : GPUArray
        Input matrix of shape `(m, n)`.
        
    Returns
    -------
    e_gpu : GPUArray
        GPUarrays containing the exponential integrals of
        the entries of `z_gpu`.

    Examples
    --------
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import scipy.special
    >>> import special
    >>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
    >>> z_gpu = gpuarray.to_gpu(z)
    >>> e_gpu = e1z(z_gpu, pycuda.autoinit.device)
    >>> e_sp = scipy.special.exp1(z)
    >>> np.allclose(e_sp, e_gpu.get())
    True

    """

    if z_gpu.dtype == np.complex64:
        use_double = 0
    elif z_gpu.dtype == np.complex128:
        use_double = 1
    else:
        raise ValueError('unsupported type')

    
    # Get block/grid sizes; the number of threads per block is limited
    # to 256 because the e1z kernel defined above uses too many
    # registers to be invoked more threads per block:
    dev = get_current_device()
    max_threads_per_block = 256
    block_dim, grid_dim = select_block_grid_sizes(dev, z_gpu.shape, max_threads_per_block)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    e1z_mod = \
             SourceModule(e1z_mod_template.substitute(use_double=use_double),
                          cache_dir=cache_dir)
    e1z_func = e1z_mod.get_function("e1z")

    e_gpu = gpuarray.empty_like(z_gpu)
    e1z_func(z_gpu, e_gpu,
              np.uint32(z_gpu.size),
              block=block_dim,
              grid=grid_dim)
    return e_gpu
Exemple #3
0
    def __init__(self, shape, in_dtype, out_dtype, batch=1, stream=None,
                 mode=0x01):

        if np.isscalar(shape):
            self.shape = (shape, )
        else:
            self.shape = shape

        self.in_dtype = in_dtype
        self.out_dtype = out_dtype

        if batch <= 0:
            raise ValueError('batch size must be greater than 0')
        self.batch = batch

        # Determine type of transformation:
        if in_dtype == np.float32 and out_dtype == np.complex64:
            self.fft_type = cufft.CUFFT_R2C
            self.fft_func = cufft.cufftExecR2C
        elif in_dtype == np.complex64 and out_dtype == np.float32:
            self.fft_type = cufft.CUFFT_C2R
            self.fft_func = cufft.cufftExecC2R
        elif in_dtype == np.complex64 and out_dtype == np.complex64:
            self.fft_type = cufft.CUFFT_C2C
            self.fft_func = cufft.cufftExecC2C
        elif in_dtype == np.float64 and out_dtype == np.complex128:
            self.fft_type = cufft.CUFFT_D2Z
            self.fft_func = cufft.cufftExecD2Z
        elif in_dtype == np.complex128 and out_dtype == np.float64:
            self.fft_type = cufft.CUFFT_Z2D
            self.fft_func = cufft.cufftExecZ2D
        elif in_dtype == np.complex128 and out_dtype == np.complex128:
            self.fft_type = cufft.CUFFT_Z2Z
            self.fft_func = cufft.cufftExecZ2Z
        else:
            raise ValueError('unsupported input/output type combination')

        # Check for double precision support:
        capability = misc.get_compute_capability(misc.get_current_device())
        if capability < 1.3 and \
           (misc.isdoubletype(in_dtype) or misc.isdoubletype(out_dtype)):
            raise RuntimeError('double precision requires compute capability '
                               '>= 1.3 (you have %g)' % capability)

        # Set up plan:
        if len(self.shape) > 0:
            n = np.asarray(self.shape, np.int32)
            self.handle = cufft.cufftPlanMany(len(self.shape), n.ctypes.data,
                                              None, 1, 0, None, 1, 0,
                                              self.fft_type, self.batch)
        else:
            raise ValueError('invalid transform size')

        # Set FFTW compatibility mode:
        cufft.cufftSetCompatibilityMode(self.handle, mode)

        # Associate stream with plan:
        if stream != None:
            cufft.cufftSetStream(self.handle, stream.handle)
Exemple #4
0
    def __init__(self, shape, in_dtype, out_dtype, batch=1, stream=None,
                 mode=0x01):

        if np.isscalar(shape):
            self.shape = (shape, )
        else:
            self.shape = shape

        self.in_dtype = in_dtype
        self.out_dtype = out_dtype

        if batch <= 0:
            raise ValueError('batch size must be greater than 0')
        self.batch = batch

        # Determine type of transformation:
        if in_dtype == np.float32 and out_dtype == np.complex64:
            self.fft_type = cufft.CUFFT_R2C
            self.fft_func = cufft.cufftExecR2C
        elif in_dtype == np.complex64 and out_dtype == np.float32:
            self.fft_type = cufft.CUFFT_C2R
            self.fft_func = cufft.cufftExecC2R
        elif in_dtype == np.complex64 and out_dtype == np.complex64:
            self.fft_type = cufft.CUFFT_C2C
            self.fft_func = cufft.cufftExecC2C
        elif in_dtype == np.float64 and out_dtype == np.complex128:
            self.fft_type = cufft.CUFFT_D2Z
            self.fft_func = cufft.cufftExecD2Z
        elif in_dtype == np.complex128 and out_dtype == np.float64:
            self.fft_type = cufft.CUFFT_Z2D
            self.fft_func = cufft.cufftExecZ2D
        elif in_dtype == np.complex128 and out_dtype == np.complex128:
            self.fft_type = cufft.CUFFT_Z2Z
            self.fft_func = cufft.cufftExecZ2Z
        else:
            raise ValueError('unsupported input/output type combination')

        # Check for double precision support:
        capability = misc.get_compute_capability(misc.get_current_device())
        if capability < 1.3 and \
           (misc.isdoubletype(in_dtype) or misc.isdoubletype(out_dtype)):
            raise RuntimeError('double precision requires compute capability '
                               '>= 1.3 (you have %g)' % capability)

        # Set up plan:
        if len(self.shape) > 0:
            n = np.asarray(self.shape, np.int32)
            self.handle = cufft.cufftPlanMany(len(self.shape), n.ctypes.data,
                                              None, 1, 0, None, 1, 0,
                                              self.fft_type, self.batch)
        else:
            raise ValueError('invalid transform size')

        # Set FFTW compatibility mode:
        cufft.cufftSetCompatibilityMode(self.handle, mode)

        # Associate stream with plan:
        if stream != None:
            cufft.cufftSetStream(self.handle, stream.handle)
Exemple #5
0
def gen_trapz2d_mult(mat_shape, mult_type):
    """
    Generate multiplication matrix for 2D trapezoidal integration.

    Generates a matrix whose dot product with some other matrix of
    equal length (when flattened) is equivalent to the definite double
    integral of the latter computed using trapezoidal integration.

    Parameters
    ----------
    mat_shape : tuple
        Shape of matrix.
    mult_type : float type
        Floating point type to use when generating the array.

    Returns
    -------
    result : pycuda.gpuarray.GPUArray
        Generated matrix.

    """

    if mult_type not in [np.float32, np.float64, np.complex64,
                         np.complex128]:
        raise ValueError('unrecognized type')
    
    use_double = int(mult_type in [np.float64, np.complex128])
    use_complex = int(mult_type in [np.complex64, np.complex128])

    # Allocate output matrix:
    Ny, Nx = mat_shape
    mult_gpu = gpuarray.empty(mat_shape, mult_type)

    # Get block/grid sizes:
    dev = get_current_device()
    block_dim, grid_dim = select_block_grid_sizes(dev, mat_shape)
    
    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    gen_trapz2d_mult_mod = \
                         SourceModule(gen_trapz2d_mult_template.substitute(use_double=use_double,
                                                                           use_complex=use_complex),
                                      cache_dir=cache_dir)

    gen_trapz2d_mult = gen_trapz2d_mult_mod.get_function("gen_trapz2d_mult")    
    gen_trapz2d_mult(mult_gpu, np.uint32(Ny), np.uint32(Nx),
                     block=block_dim,
                     grid=grid_dim)
    
    return mult_gpu
Exemple #6
0
def gen_trapz2d_mult(mat_shape, dtype):
    """
    Generate multiplication matrix for 2D trapezoidal integration.

    Generates a matrix whose dot product with some other matrix of
    equal length (when flattened) is equivalent to the definite double
    integral of the latter computed using trapezoidal integration.

    Parameters
    ----------
    mat_shape : tuple
        Shape of matrix.
    dtype : float type
        Floating point type to use when generating the array.

    Returns
    -------
    result : pycuda.gpuarray.GPUArray
        Generated matrix.
    """

    if dtype not in [np.float32, np.float64, np.complex64, np.complex128]:
        raise ValueError('unrecognized type')

    use_double = int(dtype in [np.float64, np.complex128])
    use_complex = int(dtype in [np.complex64, np.complex128])

    # Allocate output matrix:
    Ny, Nx = mat_shape
    mult_gpu = gpuarray.empty(mat_shape, dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, mat_shape)
    gen_trapz2d_mult = _get_trapz2d_mult_kernel(use_double, use_complex)
    gen_trapz2d_mult(mult_gpu,
                     np.uint32(Ny),
                     np.uint32(Nx),
                     block=block_dim,
                     grid=grid_dim)

    return mult_gpu
Exemple #7
0
def gen_trapz2d_mult(mat_shape, dtype):
    """
    Generate multiplication matrix for 2D trapezoidal integration.

    Generates a matrix whose dot product with some other matrix of
    equal length (when flattened) is equivalent to the definite double
    integral of the latter computed using trapezoidal integration.

    Parameters
    ----------
    mat_shape : tuple
        Shape of matrix.
    dtype : float type
        Floating point type to use when generating the array.

    Returns
    -------
    result : pycuda.gpuarray.GPUArray
        Generated matrix.
    """

    if dtype not in [np.float32, np.float64, np.complex64,
                         np.complex128]:
        raise ValueError('unrecognized type')

    use_double = int(dtype in [np.float64, np.complex128])
    use_complex = int(dtype in [np.complex64, np.complex128])

    # Allocate output matrix:
    Ny, Nx = mat_shape
    mult_gpu = gpuarray.empty(mat_shape, dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, mat_shape)
    gen_trapz2d_mult = _get_trapz2d_mult_kernel(use_double, use_complex)
    gen_trapz2d_mult(mult_gpu, np.uint32(Ny), np.uint32(Nx),
                     block=block_dim,
                     grid=grid_dim)

    return mult_gpu
Exemple #8
0
def hermitian(a_gpu):
    """
    Hermitian (conjugate) matrix transpose.

    Conjugate transpose a matrix in device memory and return an object
    representing the transposed matrix.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input matrix of shape `(m, n)`.

    Returns
    -------
    at_gpu : pycuda.gpuarray.GPUArray
        Transposed matrix of shape `(n, m)`.

    Examples
    --------
    >>> import pycuda.autoinit
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], np.float32)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> at_gpu = linalg.hermitian(a_gpu)
    >>> np.all(a.T == at_gpu.get())
    True
    >>> b = np.array([[1j, 2j, 3j, 4j, 5j, 6j], [7j, 8j, 9j, 10j, 11j, 12j]], np.complex64)
    >>> b_gpu = gpuarray.to_gpu(b)
    >>> bt_gpu = linalg.hermitian(b_gpu)
    >>> np.all(np.conj(b.T) == bt_gpu.get())
    True

    """

    if a_gpu.dtype not in [np.float32, np.float64, np.complex64,
                           np.complex128]:
        raise ValueError('unrecognized type')

    use_double = int(a_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(a_gpu.dtype in [np.complex64, np.complex128])

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    transpose_mod = \
                  SourceModule(transpose_template.substitute(use_double=use_double,
                                                             use_complex=use_complex,
                                                             hermitian=1,
                               cols=a_gpu.shape[1],
                               rows=a_gpu.shape[0]),
                               cache_dir=cache_dir)

    transpose = transpose_mod.get_function("transpose")
    at_gpu = gpuarray.empty(a_gpu.shape[::-1], a_gpu.dtype)
    transpose(at_gpu, a_gpu, np.uint32(a_gpu.size),
              block=block_dim,
              grid=grid_dim)

    return at_gpu
Exemple #9
0
def conj(a_gpu, overwrite=True):
    """
    Complex conjugate.

    Compute the complex conjugate of the array in device memory.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input array of shape `(m, n)`.
    overwrite : bool
        If true (default), save the result in the specified array.
        If false, return the result in a newly allocated array.

    Returns
    -------
    ac_gpu : pycuda.gpuarray.GPUArray
        Conjugate of the input array. If `overwrite` is true, the
        returned matrix is the same as the input array.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.array([[1+1j, 2-2j, 3+3j, 4-4j], [5+5j, 6-6j, 7+7j, 8-8j]], np.complex64)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> a_gpu = linalg.conj(a_gpu)
    >>> np.all(a == np.conj(a_gpu.get()))
    True

    """

    # Don't attempt to process non-complex matrix types:
    if a_gpu.dtype in [np.float32, np.float64]:
        return

    if a_gpu.dtype == np.complex64:
        use_double = 0
    elif a_gpu.dtype == np.complex128:
        use_double = 1
    else:
        raise ValueError('unsupported type')

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    conj_mod = \
             SourceModule(conj_template.substitute(use_double=use_double),
                          cache_dir=cache_dir)

    if overwrite:
        conj_inplace = conj_mod.get_function("conj_inplace")
        conj_inplace(a_gpu, np.uint32(a_gpu.size),
                     block=block_dim,
                     grid=grid_dim)
        return a_gpu
    else:
        conj = conj_mod.get_function("conj")
        ac_gpu = gpuarray.empty_like(a_gpu)
        conj(a_gpu, ac_gpu, np.uint32(a_gpu.size),
             block=block_dim,
             grid=grid_dim)
        return ac_gpu
Exemple #10
0
def tril(a_gpu, overwrite=True, handle=None):
    """
    Lower triangle of a matrix.

    Return the lower triangle of a square matrix.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input matrix of shape `(m, m)`
    overwrite : boolean
        If true (default), zero out the upper triangle of the matrix.
        If false, return the result in a newly allocated matrix.
    handle : int
        CUBLAS context. If no context is specified, the default handle from
        `scikits.misc._global_cublas_handle` is used.

    Returns
    -------
    l_gpu : pycuda.gpuarray
        The lower triangle of the original matrix.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.asarray(np.random.rand(4, 4), np.float32)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> l_gpu = linalg.tril(a_gpu, False)
    >>> np.allclose(np.tril(a), l_gpu.get())
    True

    """

    if handle is None:
        handle = misc._global_cublas_handle
        
    if len(a_gpu.shape) != 2 or a_gpu.shape[0] != a_gpu.shape[1]:
        raise ValueError('matrix must be square')

    if a_gpu.dtype == np.float32:
        swap_func = cublas.cublasSswap
        copy_func = cublas.cublasScopy
        use_double = 0
        use_complex = 0
    elif a_gpu.dtype == np.float64:
        swap_func = cublas.cublasDswap
        copy_func = cublas.cublasDcopy
        use_double = 1
        use_complex = 0
    elif a_gpu.dtype == np.complex64:
        swap_func = cublas.cublasCswap
        copy_func = cublas.cublasCcopy
        use_double = 0
        use_complex = 1
    elif a_gpu.dtype == np.complex128:
        swap_func = cublas.cublasZswap
        copy_func = cublas.cublasZcopy
        use_double = 1
        use_complex = 1
    else:
        raise ValueError('unrecognized type')

    N = a_gpu.shape[0]

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    tril_mod = \
             SourceModule(tril_template.substitute(use_double=use_double,
                                                   use_complex=use_complex,
                                                   cols=N),
                          cache_dir=cache_dir)
    tril = tril_mod.get_function("tril")

    if not overwrite:
        a_orig_gpu = gpuarray.empty(a_gpu.shape, a_gpu.dtype)
        copy_func(handle, a_gpu.size, int(a_gpu.gpudata), 1, int(a_orig_gpu.gpudata), 1)

    tril(a_gpu, np.uint32(a_gpu.size),
         block=block_dim,
         grid=grid_dim)

    if overwrite:
        return a_gpu
    else:

        # Restore original contents of a_gpu:
        swap_func(handle, a_gpu.size, int(a_gpu.gpudata), 1, int(a_orig_gpu.gpudata), 1)
        return a_orig_gpu
Exemple #11
0
def multiply(x_gpu, y_gpu, overwrite=True):
    """
    Multiply arguments element-wise.

    Parameters
    ----------
    x_gpu, y_gpu : pycuda.gpuarray.GPUArray
        Input arrays to be multiplied.
    dev : pycuda.driver.Device
        Device object to be used.
    overwrite : bool
        If true (default), return the result in `y_gpu`.
        is false, return the result in a newly allocated array.

    Returns
    -------
    z_gpu : pycuda.gpuarray.GPUArray
        The element-wise product of the input arrays.

    Examples
    --------
    >>> import pycuda.autoinit
    >>> import pycuda.gpuarray as gpuarray
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> x = np.asarray(np.random.rand(4, 4), np.float32)
    >>> y = np.asarray(np.random.rand(4, 4), np.float32)
    >>> x_gpu = gpuarray.to_gpu(x)
    >>> y_gpu = gpuarray.to_gpu(y)
    >>> z_gpu = linalg.multiply(x_gpu, y_gpu)
    >>> np.allclose(x*y, z_gpu.get())
    True

    """

    if x_gpu.shape != y_gpu.shape:
        raise ValueError('input arrays must have the same shape')

    if x_gpu.dtype not in [np.float32, np.float64, np.complex64,
                           np.complex128]:
        raise ValueError('unrecognized type')

    use_double = int(x_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(x_gpu.dtype in [np.complex64, np.complex128])

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, x_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    multiply_mod = \
             SourceModule(multiply_template.substitute(use_double=use_double,
                                                       use_complex=use_complex),
                          cache_dir=cache_dir)
    if overwrite:
        multiply = multiply_mod.get_function("multiply_inplace")
        multiply(x_gpu, y_gpu, np.uint32(x_gpu.size),
                 block=block_dim,
                 grid=grid_dim)
        return y_gpu
    else:
        multiply = multiply_mod.get_function("multiply")
        z_gpu = gpuarray.empty(x_gpu.shape, x_gpu.dtype)
        multiply(x_gpu, y_gpu, z_gpu, np.uint32(x_gpu.size),
                 block=block_dim,
                 grid=grid_dim)
        return z_gpu
Exemple #12
0
def pinv(a_gpu, rcond=1e-15):
    """
    Moore-Penrose pseudoinverse.

    Compute the Moore-Penrose pseudoinverse of the specified matrix.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input matrix of shape `(m, n)`.
    rcond : float
        Singular values smaller than `rcond`*max(singular_values)`
        are set to zero.

    Returns
    -------
    a_inv_gpu : pycuda.gpuarray.GPUArray
        Pseudoinverse of input matrix.

    Notes
    -----
    Double precision is only supported if the standard version of the
    CULA Dense toolkit is installed.

    This function destroys the contents of the input matrix.

    If the input matrix is square, the pseudoinverse uses less memory.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.asarray(np.random.rand(8, 4), np.float32)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> a_inv_gpu = linalg.pinv(a_gpu)
    >>> np.allclose(np.linalg.pinv(a), a_inv_gpu.get(), 1e-4)
    True
    >>> b = np.asarray(np.random.rand(8, 4)+1j*np.random.rand(8, 4), np.complex64)
    >>> b_gpu = gpuarray.to_gpu(b)
    >>> b_inv_gpu = linalg.pinv(b_gpu)
    >>> np.allclose(np.linalg.pinv(b), b_inv_gpu.get(), 1e-4)
    True

    """

    if not _has_cula:
        raise NotImplementedError('CULA not installed')

    # Perform in-place SVD if the matrix is square to save memory:
    if a_gpu.shape[0] == a_gpu.shape[1]:
        u_gpu, s_gpu, vh_gpu = svd(a_gpu, 's', 'o')
    else:
        u_gpu, s_gpu, vh_gpu = svd(a_gpu, 's', 's')

    # Get block/grid sizes; the number of threads per block is limited
    # to 512 because the cutoff_invert_s kernel defined above uses too
    # many registers to be invoked in 1024 threads per block (i.e., on
    # GPUs with compute capability >= 2.x):
    dev = misc.get_current_device()
    max_threads_per_block = 512
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, s_gpu.shape,
                                                       max_threads_per_block)

    # Suppress very small singular values:
    use_double = 1 if s_gpu.dtype == np.float64 else 0
    cutoff_invert_s_mod = \
        SourceModule(cutoff_invert_s_template.substitute(use_double=use_double))
    cutoff_invert_s = \
                    cutoff_invert_s_mod.get_function('cutoff_invert_s')
    cutoff_gpu = gpuarray.max(s_gpu) * rcond
    cutoff_invert_s(s_gpu,
                    cutoff_gpu,
                    np.uint32(s_gpu.size),
                    block=block_dim,
                    grid=grid_dim)

    # Compute the pseudoinverse without allocating a new diagonal matrix:
    return dot(vh_gpu, dot_diag(s_gpu, u_gpu, 't'), 'c', 'c')
Exemple #13
0
def pinv(a_gpu, rcond=1e-15):
    """
    Moore-Penrose pseudoinverse.

    Compute the Moore-Penrose pseudoinverse of the specified matrix.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input matrix of shape `(m, n)`.
    rcond : float
        Singular values smaller than `rcond`*max(singular_values)`
        are set to zero.

    Returns
    -------
    a_inv_gpu : pycuda.gpuarray.GPUArray
        Pseudoinverse of input matrix.

    Notes
    -----
    Double precision is only supported if the standard version of the
    CULA Dense toolkit is installed.

    This function destroys the contents of the input matrix.

    If the input matrix is square, the pseudoinverse uses less memory.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.asarray(np.random.rand(8, 4), np.float32)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> a_inv_gpu = linalg.pinv(a_gpu)
    >>> np.allclose(np.linalg.pinv(a), a_inv_gpu.get(), 1e-4)
    True
    >>> b = np.asarray(np.random.rand(8, 4)+1j*np.random.rand(8, 4), np.complex64)
    >>> b_gpu = gpuarray.to_gpu(b)
    >>> b_inv_gpu = linalg.pinv(b_gpu)
    >>> np.allclose(np.linalg.pinv(b), b_inv_gpu.get(), 1e-4)
    True

    """

    if not _has_cula:
        raise NotImplementedError('CULA not installed')

    # Perform in-place SVD if the matrix is square to save memory:
    if a_gpu.shape[0] == a_gpu.shape[1]:
        u_gpu, s_gpu, vh_gpu = svd(a_gpu, 's', 'o')
    else:
        u_gpu, s_gpu, vh_gpu = svd(a_gpu, 's', 's')

    # Get block/grid sizes; the number of threads per block is limited
    # to 512 because the cutoff_invert_s kernel defined above uses too
    # many registers to be invoked in 1024 threads per block (i.e., on
    # GPUs with compute capability >= 2.x):
    dev = misc.get_current_device()
    max_threads_per_block = 512
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, s_gpu.shape, max_threads_per_block)

    # Suppress very small singular values:
    use_double = 1 if s_gpu.dtype == np.float64 else 0
    cutoff_invert_s_mod = \
        SourceModule(cutoff_invert_s_template.substitute(use_double=use_double))
    cutoff_invert_s = \
                    cutoff_invert_s_mod.get_function('cutoff_invert_s')
    cutoff_gpu = gpuarray.max(s_gpu)*rcond
    cutoff_invert_s(s_gpu, cutoff_gpu,
                    np.uint32(s_gpu.size),
                    block=block_dim, grid=grid_dim)

    # Compute the pseudoinverse without allocating a new diagonal matrix:
    return dot(vh_gpu, dot_diag(s_gpu, u_gpu, 't'), 'c', 'c')
Exemple #14
0
def e1z(z_gpu):
    """
    Exponential integral with `n = 1` of complex arguments.

    Parameters
    ----------
    x_gpu : GPUArray
        Input matrix of shape `(m, n)`.
        
    Returns
    -------
    e_gpu : GPUArray
        GPUarrays containing the exponential integrals of
        the entries of `z_gpu`.

    Examples
    --------
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import scipy.special
    >>> import special
    >>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
    >>> z_gpu = gpuarray.to_gpu(z)
    >>> e_gpu = e1z(z_gpu, pycuda.autoinit.device)
    >>> e_sp = scipy.special.exp1(z)
    >>> np.allclose(e_sp, e_gpu.get())
    True

    """

    if z_gpu.dtype == np.complex64:
        use_double = 0
    elif z_gpu.dtype == np.complex128:
        use_double = 1
    else:
        raise ValueError('unsupported type')

    # Get block/grid sizes; the number of threads per block is limited
    # to 256 because the e1z kernel defined above uses too many
    # registers to be invoked more threads per block:
    dev = get_current_device()
    max_threads_per_block = 256
    block_dim, grid_dim = select_block_grid_sizes(dev, z_gpu.shape,
                                                  max_threads_per_block)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    e1z_mod = \
             SourceModule(e1z_mod_template.substitute(use_double=use_double),
                          cache_dir=cache_dir)
    e1z_func = e1z_mod.get_function("e1z")

    e_gpu = gpuarray.empty_like(z_gpu)
    e1z_func(z_gpu,
             e_gpu,
             np.uint32(z_gpu.size),
             block=block_dim,
             grid=grid_dim)
    return e_gpu
Exemple #15
0
def diag(v_gpu):
    """
    Construct a diagonal matrix.

    Constructs a matrix in device memory whose diagonal elements
    correspond to the elements in the specified array; all
    non-diagonal elements are set to 0.

    Parameters
    ----------
    v_obj : pycuda.gpuarray.GPUArray
        Input array of length `n`.

    Returns
    -------
    d_gpu : pycuda.gpuarray.GPUArray
        Diagonal matrix of dimensions `[n, n]`.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> v = np.array([1, 2, 3, 4, 5, 6], np.float32)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> np.all(d_gpu.get() == np.diag(v))
    True
    >>> v = np.array([1j, 2j, 3j, 4j, 5j, 6j], np.complex64)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> np.all(d_gpu.get() == np.diag(v))
    True

    """

    if v_gpu.dtype not in [
            np.float32, np.float64, np.complex64, np.complex128
    ]:
        raise ValueError('unrecognized type')

    if len(v_gpu.shape) > 1:
        raise ValueError('input array cannot be multidimensional')

    use_double = int(v_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(v_gpu.dtype in [np.complex64, np.complex128])

    # Initialize output matrix:
    d_gpu = misc.zeros((v_gpu.size, v_gpu.size), v_gpu.dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, d_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    diag_mod = \
             SourceModule(diag_template.substitute(use_double=use_double,
                                                   use_complex=use_complex),
                          cache_dir=cache_dir)

    diag = diag_mod.get_function("diag")
    diag(v_gpu, d_gpu, np.uint32(v_gpu.size), block=block_dim, grid=grid_dim)

    return d_gpu
Exemple #16
0
def multiply(x_gpu, y_gpu, overwrite=True):
    """
    Multiply arguments element-wise.

    Parameters
    ----------
    x_gpu, y_gpu : pycuda.gpuarray.GPUArray
        Input arrays to be multiplied.
    dev : pycuda.driver.Device
        Device object to be used.
    overwrite : bool
        If true (default), return the result in `y_gpu`.
        is false, return the result in a newly allocated array.

    Returns
    -------
    z_gpu : pycuda.gpuarray.GPUArray
        The element-wise product of the input arrays.

    Examples
    --------
    >>> import pycuda.autoinit
    >>> import pycuda.gpuarray as gpuarray
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> x = np.asarray(np.random.rand(4, 4), np.float32)
    >>> y = np.asarray(np.random.rand(4, 4), np.float32)
    >>> x_gpu = gpuarray.to_gpu(x)
    >>> y_gpu = gpuarray.to_gpu(y)
    >>> z_gpu = linalg.multiply(x_gpu, y_gpu)
    >>> np.allclose(x*y, z_gpu.get())
    True

    """

    if x_gpu.shape != y_gpu.shape:
        raise ValueError('input arrays must have the same shape')

    if x_gpu.dtype not in [
            np.float32, np.float64, np.complex64, np.complex128
    ]:
        raise ValueError('unrecognized type')

    use_double = int(x_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(x_gpu.dtype in [np.complex64, np.complex128])

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, x_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    multiply_mod = \
             SourceModule(multiply_template.substitute(use_double=use_double,
                                                       use_complex=use_complex),
                          cache_dir=cache_dir)
    if overwrite:
        multiply = multiply_mod.get_function("multiply_inplace")
        multiply(x_gpu,
                 y_gpu,
                 np.uint32(x_gpu.size),
                 block=block_dim,
                 grid=grid_dim)
        return y_gpu
    else:
        multiply = multiply_mod.get_function("multiply")
        z_gpu = gpuarray.empty(x_gpu.shape, x_gpu.dtype)
        multiply(x_gpu,
                 y_gpu,
                 z_gpu,
                 np.uint32(x_gpu.size),
                 block=block_dim,
                 grid=grid_dim)
        return z_gpu
Exemple #17
0
def tril(a_gpu, overwrite=True, handle=None):
    """
    Lower triangle of a matrix.

    Return the lower triangle of a square matrix.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input matrix of shape `(m, m)`
    overwrite : boolean
        If true (default), zero out the upper triangle of the matrix.
        If false, return the result in a newly allocated matrix.
    handle : int
        CUBLAS context. If no context is specified, the default handle from
        `scikits.misc._global_cublas_handle` is used.

    Returns
    -------
    l_gpu : pycuda.gpuarray
        The lower triangle of the original matrix.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.asarray(np.random.rand(4, 4), np.float32)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> l_gpu = linalg.tril(a_gpu, False)
    >>> np.allclose(np.tril(a), l_gpu.get())
    True

    """

    if handle is None:
        handle = misc._global_cublas_handle

    if len(a_gpu.shape) != 2 or a_gpu.shape[0] != a_gpu.shape[1]:
        raise ValueError('matrix must be square')

    if a_gpu.dtype == np.float32:
        swap_func = cublas.cublasSswap
        copy_func = cublas.cublasScopy
        use_double = 0
        use_complex = 0
    elif a_gpu.dtype == np.float64:
        swap_func = cublas.cublasDswap
        copy_func = cublas.cublasDcopy
        use_double = 1
        use_complex = 0
    elif a_gpu.dtype == np.complex64:
        swap_func = cublas.cublasCswap
        copy_func = cublas.cublasCcopy
        use_double = 0
        use_complex = 1
    elif a_gpu.dtype == np.complex128:
        swap_func = cublas.cublasZswap
        copy_func = cublas.cublasZcopy
        use_double = 1
        use_complex = 1
    else:
        raise ValueError('unrecognized type')

    N = a_gpu.shape[0]

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    tril_mod = \
             SourceModule(tril_template.substitute(use_double=use_double,
                                                   use_complex=use_complex,
                                                   cols=N),
                          cache_dir=cache_dir)
    tril = tril_mod.get_function("tril")

    if not overwrite:
        a_orig_gpu = gpuarray.empty(a_gpu.shape, a_gpu.dtype)
        copy_func(handle, a_gpu.size, int(a_gpu.gpudata), 1,
                  int(a_orig_gpu.gpudata), 1)

    tril(a_gpu, np.uint32(a_gpu.size), block=block_dim, grid=grid_dim)

    if overwrite:
        return a_gpu
    else:

        # Restore original contents of a_gpu:
        swap_func(handle, a_gpu.size, int(a_gpu.gpudata), 1,
                  int(a_orig_gpu.gpudata), 1)
        return a_orig_gpu
Exemple #18
0
def sici(x_gpu):
    """
    Sine/Cosine integral.

    Computes the sine and cosine integral of every element in the
    input matrix.

    Parameters
    ----------
    x_gpu : GPUArray
        Input matrix of shape `(m, n)`.
        
    Returns
    -------
    (si_gpu, ci_gpu) : tuple of GPUArrays
        Tuple of GPUarrays containing the sine integrals and cosine
        integrals of the entries of `x_gpu`.
        
    Examples
    --------
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import scipy.special
    >>> import special
    >>> x = np.array([[1, 2], [3, 4]], np.float32)
    >>> x_gpu = gpuarray.to_gpu(x)
    >>> (si_gpu, ci_gpu) = sici(x_gpu, pycuda.autoinit.device)
    >>> (si, ci) = scipy.special.sici(x)
    >>> np.allclose(si, si_gpu.get())
    True
    >>> np.allclose(ci, ci_gpu.get())
    True
    
    """

    if x_gpu.dtype == np.float32:
        use_double = 0
    elif x_gpu.dtype == np.float64:
        use_double = 1
    else:
        raise ValueError('unsupported type')
    
    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, x_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    sici_mod = \
             SourceModule(sici_template.substitute(use_double=use_double),
                          cache_dir=cache_dir,
                          options=["-I", install_headers])
    sici_func = sici_mod.get_function("sici_array")

    si_gpu = gpuarray.empty_like(x_gpu)
    ci_gpu = gpuarray.empty_like(x_gpu)
    sici_func(x_gpu, si_gpu, ci_gpu,
              np.uint32(x_gpu.size),
              block=block_dim,
              grid=grid_dim)
    return (si_gpu, ci_gpu)
Exemple #19
0
def hermitian(a_gpu):
    """
    Hermitian (conjugate) matrix transpose.

    Conjugate transpose a matrix in device memory and return an object
    representing the transposed matrix.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input matrix of shape `(m, n)`.

    Returns
    -------
    at_gpu : pycuda.gpuarray.GPUArray
        Transposed matrix of shape `(n, m)`.

    Examples
    --------
    >>> import pycuda.autoinit
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], np.float32)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> at_gpu = linalg.hermitian(a_gpu)
    >>> np.all(a.T == at_gpu.get())
    True
    >>> b = np.array([[1j, 2j, 3j, 4j, 5j, 6j], [7j, 8j, 9j, 10j, 11j, 12j]], np.complex64)
    >>> b_gpu = gpuarray.to_gpu(b)
    >>> bt_gpu = linalg.hermitian(b_gpu)
    >>> np.all(np.conj(b.T) == bt_gpu.get())
    True

    """

    if a_gpu.dtype not in [
            np.float32, np.float64, np.complex64, np.complex128
    ]:
        raise ValueError('unrecognized type')

    use_double = int(a_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(a_gpu.dtype in [np.complex64, np.complex128])

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    transpose_mod = \
                  SourceModule(transpose_template.substitute(use_double=use_double,
                                                             use_complex=use_complex,
                                                             hermitian=1,
                               cols=a_gpu.shape[1],
                               rows=a_gpu.shape[0]),
                               cache_dir=cache_dir)

    transpose = transpose_mod.get_function("transpose")
    at_gpu = gpuarray.empty(a_gpu.shape[::-1], a_gpu.dtype)
    transpose(at_gpu,
              a_gpu,
              np.uint32(a_gpu.size),
              block=block_dim,
              grid=grid_dim)

    return at_gpu
Exemple #20
0
def eye(N, dtype=np.float32):
    """
    Construct a 2D matrix with ones on the diagonal and zeros elsewhere.

    Constructs a matrix in device memory whose diagonal elements
    are set to 1 and non-diagonal elements are set to 0.

    Parameters
    ----------
    N : int
        Number of rows or columns in the output matrix.

    Returns
    -------
    e_gpu : pycuda.gpuarray.GPUArray
        Diagonal matrix of dimensions `[N, N]` with diagonal values
        set to 1.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> N = 5
    >>> e_gpu = linalg.eye(N)
    >>> np.all(e_gpu.get() == np.eye(N))
    True
    >>> e_gpu = linalg.eye(v_gpu, np.complex64)
    >>> np.all(e_gpu.get() == np.eye(N, np.complex64))
    True

    """

    if dtype not in [np.float32, np.float64, np.complex64, np.complex128]:
        raise ValueError('unrecognized type')
    if N <= 0:
        raise ValueError('N must be greater than 0')

    use_double = int(dtype in [np.float64, np.complex128])
    use_complex = int(dtype in [np.complex64, np.complex128])

    # Initialize output matrix:
    e_gpu = misc.zeros((N, N), dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, e_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    eye_mod = \
             SourceModule(eye_template.substitute(use_double=use_double,
                                                   use_complex=use_complex),
                          cache_dir=cache_dir)

    eye = eye_mod.get_function("eye")
    eye(e_gpu, np.uint32(N), block=block_dim, grid=grid_dim)

    return e_gpu
Exemple #21
0
def diag(v_gpu):
    """
    Construct a diagonal matrix.

    Constructs a matrix in device memory whose diagonal elements
    correspond to the elements in the specified array; all
    non-diagonal elements are set to 0.

    Parameters
    ----------
    v_obj : pycuda.gpuarray.GPUArray
        Input array of length `n`.

    Returns
    -------
    d_gpu : pycuda.gpuarray.GPUArray
        Diagonal matrix of dimensions `[n, n]`.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> v = np.array([1, 2, 3, 4, 5, 6], np.float32)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> np.all(d_gpu.get() == np.diag(v))
    True
    >>> v = np.array([1j, 2j, 3j, 4j, 5j, 6j], np.complex64)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> np.all(d_gpu.get() == np.diag(v))
    True

    """

    if v_gpu.dtype not in [np.float32, np.float64, np.complex64,
                           np.complex128]:
        raise ValueError('unrecognized type')

    if len(v_gpu.shape) > 1:
        raise ValueError('input array cannot be multidimensional')

    use_double = int(v_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(v_gpu.dtype in [np.complex64, np.complex128])

    # Initialize output matrix:
    d_gpu = misc.zeros((v_gpu.size, v_gpu.size), v_gpu.dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, d_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    diag_mod = \
             SourceModule(diag_template.substitute(use_double=use_double,
                                                   use_complex=use_complex),
                          cache_dir=cache_dir)

    diag = diag_mod.get_function("diag")
    diag(v_gpu, d_gpu, np.uint32(v_gpu.size),
         block=block_dim,
         grid=grid_dim)

    return d_gpu
Exemple #22
0
def sici(x_gpu):
    """
    Sine/Cosine integral.

    Computes the sine and cosine integral of every element in the
    input matrix.

    Parameters
    ----------
    x_gpu : GPUArray
        Input matrix of shape `(m, n)`.
        
    Returns
    -------
    (si_gpu, ci_gpu) : tuple of GPUArrays
        Tuple of GPUarrays containing the sine integrals and cosine
        integrals of the entries of `x_gpu`.
        
    Examples
    --------
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import scipy.special
    >>> import special
    >>> x = np.array([[1, 2], [3, 4]], np.float32)
    >>> x_gpu = gpuarray.to_gpu(x)
    >>> (si_gpu, ci_gpu) = sici(x_gpu, pycuda.autoinit.device)
    >>> (si, ci) = scipy.special.sici(x)
    >>> np.allclose(si, si_gpu.get())
    True
    >>> np.allclose(ci, ci_gpu.get())
    True
    
    """

    if x_gpu.dtype == np.float32:
        use_double = 0
    elif x_gpu.dtype == np.float64:
        use_double = 1
    else:
        raise ValueError('unsupported type')

    # Get block/grid sizes:
    dev = get_current_device()
    block_dim, grid_dim = select_block_grid_sizes(dev, x_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    sici_mod = \
             SourceModule(sici_mod_template.substitute(use_double=use_double),
                          cache_dir=cache_dir,
                          options=["-I", install_headers])
    sici_func = sici_mod.get_function("sici_array")

    si_gpu = gpuarray.empty_like(x_gpu)
    ci_gpu = gpuarray.empty_like(x_gpu)
    sici_func(x_gpu,
              si_gpu,
              ci_gpu,
              np.uint32(x_gpu.size),
              block=block_dim,
              grid=grid_dim)
    return (si_gpu, ci_gpu)
Exemple #23
0
def diag(v_gpu):
    """
    Construct a diagonal matrix if input array is one-dimensional,
    or extracts diagonal entries of a two-dimensional array.

    --- If input-array is one-dimensional: 
    Constructs a matrix in device memory whose diagonal elements
    correspond to the elements in the specified array; all
    non-diagonal elements are set to 0.
    
    --- If input-array is two-dimensional: 
    Constructs an array in device memory whose elements
    correspond to the elements along the main-diagonal of the specified 
    array.

    Parameters
    ----------
    v_obj : pycuda.gpuarray.GPUArray
            Input array of shape `(n,m)`.

    Returns
    -------
    d_gpu : pycuda.gpuarray.GPUArray
            ---If v_obj has shape `(n,1)`, output is 
               diagonal matrix of dimensions `[n, n]`.
            ---If v_obj has shape `(n,m)`, output is 
               array of length `min(n,m)`.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> v = np.array([1, 2, 3, 4, 5, 6], np.float32)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> np.all(d_gpu.get() == np.diag(v))
    True
    >>> v = np.array([1j, 2j, 3j, 4j, 5j, 6j], np.complex64)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> np.all(d_gpu.get() == np.diag(v))
    True
    >>> v = np.array([[1., 2., 3.],[4., 5., 6.]], np.float64)
    >>> v_gpu = gpuarray.to_gpu(v)
    >>> d_gpu = linalg.diag(v_gpu)
    >>> d_gpu
    array([ 1.,  5.])

    """

    if v_gpu.dtype not in [np.float32, np.float64, np.complex64,
                           np.complex128]:
        raise ValueError('unrecognized type')

    if (len(v_gpu.shape) > 1) and (len(v_gpu.shape) < 3):
        # Since CUDA assumes that arrays are stored in column-major
        # format, the input matrix is assumed to be transposed:
        n, m = v_gpu.shape
        square = (n == m)

        # Allocate the output array
        d_gpu = gpuarray.empty(min(m, n), v_gpu.dtype.type)

        diag_kernel = el.ElementwiseKernel("double *x, double *y, int z", "y[i] = x[(z+1)*i]", "diakernel")
        diag_kernel(v_gpu,d_gpu,max(m,n))

        return d_gpu
    elif len(v_gpu.shape) >= 3:
        raise ValueError('input array cannot have greater than 2-dimensions')

    use_double = int(v_gpu.dtype in [np.float64, np.complex128])
    use_complex = int(v_gpu.dtype in [np.complex64, np.complex128])

    # Initialize output matrix:
    d_gpu = misc.zeros((v_gpu.size, v_gpu.size), v_gpu.dtype)

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, d_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir=None
    diag_mod = \
             SourceModule(diag_template.substitute(use_double=use_double,
                                                   use_complex=use_complex),
                          cache_dir=cache_dir)

    diag = diag_mod.get_function("diag")
    diag(v_gpu, d_gpu, np.uint32(v_gpu.size),
         block=block_dim,
         grid=grid_dim)

    return d_gpu
Exemple #24
0
def conj(a_gpu, overwrite=True):
    """
    Complex conjugate.

    Compute the complex conjugate of the array in device memory.

    Parameters
    ----------
    a_gpu : pycuda.gpuarray.GPUArray
        Input array of shape `(m, n)`.
    overwrite : bool
        If true (default), save the result in the specified array.
        If false, return the result in a newly allocated array.

    Returns
    -------
    ac_gpu : pycuda.gpuarray.GPUArray
        Conjugate of the input array. If `overwrite` is true, the
        returned matrix is the same as the input array.

    Examples
    --------
    >>> import pycuda.driver as drv
    >>> import pycuda.gpuarray as gpuarray
    >>> import pycuda.autoinit
    >>> import numpy as np
    >>> import linalg
    >>> linalg.init()
    >>> a = np.array([[1+1j, 2-2j, 3+3j, 4-4j], [5+5j, 6-6j, 7+7j, 8-8j]], np.complex64)
    >>> a_gpu = gpuarray.to_gpu(a)
    >>> linalg.conj(a_gpu)
    >>> np.all(a == np.conj(a_gpu.get()))
    True

    """

    # Don't attempt to process non-complex matrix types:
    if a_gpu.dtype in [np.float32, np.float64]:
        return

    if a_gpu.dtype == np.complex64:
        use_double = 0
    elif a_gpu.dtype == np.complex128:
        use_double = 1
    else:
        raise ValueError('unsupported type')

    # Get block/grid sizes:
    dev = misc.get_current_device()
    block_dim, grid_dim = misc.select_block_grid_sizes(dev, a_gpu.shape)

    # Set this to False when debugging to make sure the compiled kernel is
    # not cached:
    cache_dir = None
    conj_mod = \
             SourceModule(conj_template.substitute(use_double=use_double),
                          cache_dir=cache_dir)

    if overwrite:
        conj_inplace = conj_mod.get_function("conj_inplace")
        conj_inplace(a_gpu,
                     np.uint32(a_gpu.size),
                     block=block_dim,
                     grid=grid_dim)
        return a_gpu
    else:
        conj = conj_mod.get_function("conj")
        ac_gpu = gpuarray.empty_like(a_gpu)
        conj(a_gpu,
             ac_gpu,
             np.uint32(a_gpu.size),
             block=block_dim,
             grid=grid_dim)
        return ac_gpu