def test_allocator_nested_context_manager(self): new_pool = memory.MemoryPool() with memory.using_allocator(new_pool.malloc): new_pool2 = memory.MemoryPool() assert memory.get_allocator() == new_pool.malloc with memory.using_allocator(new_pool2.malloc): assert memory.get_allocator() == new_pool2.malloc assert memory.get_allocator() == new_pool.malloc assert memory.get_allocator() == self.pool.malloc
def _reuse_between_thread(self, stream_main, stream_sub): new_pool = memory.MemoryPool() def job(stream): cupy.cuda.Device().use() with cupy.cuda.using_allocator(new_pool.malloc): with stream: arr = cupy.arange(16) self._ptr = arr.data.ptr del arr self._error = False # Run in main thread. self._ptr = -1 self._error = True job(stream_main) assert not self._error main_ptr = self._ptr # Run in sub thread. self._ptr = -1 self._error = True with cupy.cuda.Device(): t = threading.Thread(target=job, args=(stream_sub,)) t.daemon = True t.start() t.join() assert not self._error return main_ptr, self._ptr
def setUp(self): if ( cupy.cuda.runtime.is_hip and cupy.cuda.driver.get_build_version() < 40300000 and self.allocator is memory.malloc_managed ): raise unittest.SkipTest('Managed memory requires ROCm 4.3+') self.pool = memory.MemoryPool(self.allocator)
def test_thread_local_valid(self): new_pool = memory.MemoryPool() arr = None with memory.using_allocator(new_pool.malloc): arr = cupy.zeros(128, dtype=cupy.int64) arr += 1 # Check that arr and the pool have not ben released self.assertEqual(arr.data.mem.size, new_pool.used_bytes()) assert arr.sum() == 128
def thread_body(self): new_pool = memory.MemoryPool() with memory.using_allocator(new_pool.malloc): assert memory.get_allocator() == new_pool.malloc threading.Barrier(2) arr = cupy.zeros(128, dtype=cupy.int64) threading.Barrier(2) self.assertEqual(arr.data.mem.size, new_pool.used_bytes()) threading.Barrier(2) assert memory.get_allocator() == self.pool.malloc
def thread_body(self): cupy.cuda.Device().use() new_pool = memory.MemoryPool() with cupy.cuda.using_allocator(new_pool.malloc): assert memory.get_allocator() == new_pool.malloc threading.Barrier(2) arr = cupy.zeros(128, dtype=cupy.int64) threading.Barrier(2) assert arr.data.mem.size == new_pool.used_bytes() threading.Barrier(2) assert memory.get_allocator() == self.pool.malloc
def setUp(self): self.pool = memory.MemoryPool()
def test_set_allocator_cm(self): new_pool = memory.MemoryPool() new_pool2 = memory.MemoryPool() with memory.using_allocator(new_pool.malloc): with self.assertRaises(ValueError): memory.set_allocator(new_pool2.malloc)
def setUp(self): self.old_pool = cupy.get_default_memory_pool() self.pool = memory.MemoryPool() memory.set_allocator(self.pool.malloc)
def setUp(self): self.pool = memory.MemoryPool(self.allocator)
def setUp(self): self.pool = memory.MemoryPool() memory.set_allocator(self.pool.malloc)
def test_allocator_context_manager(self): new_pool = memory.MemoryPool() with cupy.cuda.using_allocator(new_pool.malloc): assert memory.get_allocator() == new_pool.malloc assert memory.get_allocator() == self.pool.malloc
def setUp(self): self.pool = memory.MemoryPool(self.allocator) if (cupy.cuda.runtime.is_hip and self.allocator is memory.malloc_managed): raise unittest.SkipTest('HIP does not support managed memory')
def setUp(self): self.io = io.StringIO() self.hook = memory_hooks.DebugPrintHook(file=self.io) self.pool = memory.MemoryPool()
def setUp(self): self.pool = memory.MemoryPool() self.unit = 512