예제 #1
0
 def setUp(self):
     if self.memory == 'managed':
         if cuda.runtime.is_hip:
             pytest.skip('HIP does not support managed memory')
         self.old_pool = cupy.get_default_memory_pool()
         self.new_pool = cuda.MemoryPool(cuda.malloc_managed)
         cuda.set_allocator(self.new_pool.malloc)
예제 #2
0
        pass

    def __eq__(self, other):
        return isinstance(other, DummyDevice)

    def __ne__(self, other):
        return not (self == other)


DummyDevice = DummyDeviceType()

# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
if available:
    memory_pool = cuda.MemoryPool()
    cuda.set_allocator(memory_pool.malloc)


# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
def get_device(*args):
    """Gets the device from an ID integer or an array object.

    This is a convenient utility to select a correct device if the type of
    ``arg`` is unknown (i.e., one can use this function on arrays that may be
    on CPU or GPU). The returned device object supports the context management
    protocol of Python for the *with* statement.

    Args:
예제 #3
0
파일: __init__.py 프로젝트: fujiisoup/cupy
    """
    for arg in args:
        if isinstance(
                arg,
            (ndarray, sparse.spmatrix, cupy.core.fusion.FusionVarPython)):
            return _cupy
    return numpy


fuse = cupy.core.fusion.fuse

disable_experimental_feature_warning = False

# set default allocator
_default_memory_pool = cuda.MemoryPool()
_default_pinned_memory_pool = cuda.PinnedMemoryPool()

cuda.set_allocator(_default_memory_pool.malloc)
cuda.set_pinned_memory_allocator(_default_pinned_memory_pool.malloc)


def get_default_memory_pool():
    """Returns CuPy default memory pool for GPU memory.

    Returns:
        cupy.cuda.MemoryPool: The memory pool object.

    .. note::
       If you want to disable memory pool, please use the following code.
예제 #4
0
    """
    for arg in args:
        if isinstance(
                arg,
            (ndarray, sparse.spmatrix, cupy.core.fusion._FusionVarScalar,
             cupy.core.fusion._FusionVarArray)):
            return _cupy
    return numpy


fuse = cupy.core.fusion.fuse

disable_experimental_feature_warning = False

# set default allocator
_default_memory_pool = cuda.MemoryPool(cupy.cuda.memory.malloc_managed)
_default_pinned_memory_pool = cuda.PinnedMemoryPool(
)  #cupy.cuda.memory.malloc_managed)
_default_device_memory_pool = cuda.MemoryPool()
#print("it is OC-cupy")
cuda.set_allocator(_default_memory_pool.malloc)
cuda.set_pinned_memory_allocator(_default_pinned_memory_pool.malloc)


def get_default_memory_pool():
    """Returns CuPy default memory pool for GPU memory.

    Returns:
        cupy.cuda.MemoryPool: The memory pool object.

    .. note::
예제 #5
0
        pass

    def __eq__(self, other):
        return isinstance(other, DummyDevice)

    def __ne__(self, other):
        return not (self == other)


DummyDevice = DummyDeviceType()

# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
if available:
    cuda.set_allocator(cuda.MemoryPool().malloc)


# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
def get_device(*args):
    """Gets the device from an ID integer or an array object.

    This is a convenient utility to select a correct device if the type of
    ``arg`` is unknown (i.e., one can use this function on arrays that may be
    on CPU or GPU). The returned device object supports the context management
    protocol of Python for the *with* statement.

    Args:
        args: Values to specify a GPU device. :class:`numpy.ndarray` objects