def test_percentile_memory_access(self, dtype): # Create an allocator that guarantees array allocated in # cupy.percentile call will be followed by a NaN original_allocator = cuda.get_allocator() def controlled_allocator(size): memptr = original_allocator(size) base_size = memptr.mem.size assert base_size % 512 == 0 item_size = dtype().itemsize shape = (base_size // item_size, ) x = cupy.ndarray(memptr=memptr, shape=shape, dtype=dtype) x.fill(cupy.nan) return memptr # Check that percentile still returns non-NaN results a = testing.shaped_random((5, ), cupy, dtype) q = cupy.array((0, 100), dtype=dtype) cuda.set_allocator(controlled_allocator) try: percentiles = cupy.percentile(a, q, axis=None, method='linear') finally: cuda.set_allocator(original_allocator) assert not cupy.any(cupy.isnan(percentiles))
def setUp(self): if self.memory == 'managed': if cuda.runtime.is_hip: pytest.skip('HIP does not support managed memory') self.old_pool = cupy.get_default_memory_pool() self.new_pool = cuda.MemoryPool(cuda.malloc_managed) cuda.set_allocator(self.new_pool.malloc)
def __init__(self, memory_pool): # The following line is essential to make sure that our memory allocator # setup is performed AFTER the default memory allocator setup, which runs # when ``chainer.cuda`` module is imported for the first time. import chainer.cuda self.memory_pool = memory_pool set_allocator(self.memory_pool.malloc)
def __eq__(self, other): return isinstance(other, DummyDevice) def __ne__(self, other): return not (self == other) DummyDevice = DummyDeviceType() # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ if available: memory_pool = cuda.MemoryPool() cuda.set_allocator(memory_pool.malloc) # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ def get_device(*args): """Gets the device from an ID integer or an array object. This is a convenient utility to select a correct device if the type of ``arg`` is unknown (i.e., one can use this function on arrays that may be on CPU or GPU). The returned device object supports the context management protocol of Python for the *with* statement. Args: args: Values to specify a GPU device. :class:`numpy.ndarray` objects
if isinstance( arg, (ndarray, sparse.spmatrix, cupy.core.fusion.FusionVarPython)): return _cupy return numpy fuse = cupy.core.fusion.fuse disable_experimental_feature_warning = False # set default allocator _default_memory_pool = cuda.MemoryPool() _default_pinned_memory_pool = cuda.PinnedMemoryPool() cuda.set_allocator(_default_memory_pool.malloc) cuda.set_pinned_memory_allocator(_default_pinned_memory_pool.malloc) def get_default_memory_pool(): """Returns CuPy default memory pool for GPU memory. Returns: cupy.cuda.MemoryPool: The memory pool object. .. note:: If you want to disable memory pool, please use the following code. >>> cupy.cuda.set_allocator(None) """
def __eq__(self, other): return isinstance(other, DummyDeviceType) def __ne__(self, other): return not (self == other) DummyDevice = DummyDeviceType() # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ if available: memory_pool = cuda.MemoryPool() cuda.set_allocator(memory_pool.malloc) pinned_memory_pool = cuda.PinnedMemoryPool() cuda.set_pinned_memory_allocator(pinned_memory_pool.malloc) if six.PY2: try: from future.types.newint import newint as _newint _integer_types = six.integer_types + (_newint,) except ImportError: _integer_types = six.integer_types else: _integer_types = six.integer_types # ------------------------------------------------------------------------------
def __eq__(self, other): return isinstance(other, DummyDevice) def __ne__(self, other): return not (self == other) DummyDevice = DummyDeviceType() # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ if available: cuda.set_allocator(cuda.MemoryPool().malloc) # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ def get_device(*args): """Gets the device from an ID integer or an array object. This is a convenient utility to select a correct device if the type of ``arg`` is unknown (i.e., one can use this function on arrays that may be on CPU or GPU). The returned device object supports the context management protocol of Python for the *with* statement. Args: args: Values to specify a GPU device. :class:`numpy.ndarray` objects
def tearDown(self): if self.old_pool is not None: cuda.set_allocator(self.old_pool.malloc)
pass def __eq__(self, other): return isinstance(other, DummyDevice) def __ne__(self, other): return not (self == other) DummyDevice = DummyDeviceType() # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ if available: cuda.set_allocator(cuda.MemoryPool().malloc) # ------------------------------------------------------------------------------ # Global states # ------------------------------------------------------------------------------ def get_device(*args): """Gets the device from an ID integer or an array object. This is a convenient utility to select a correct device if the type of ``arg`` is unknown (i.e., one can use this function on arrays that may be on CPU or GPU). The returned device object supports the context management protocol of Python for the *with* statement. Args: args: Values to specify a GPU device. :class:`numpy.ndarray` objects