def start_transfer_test_pool(**kwargs): address = kwargs.pop('address') plasma_size = kwargs.pop('plasma_size') with create_actor_pool(n_process=1, backend='gevent', address=address, **kwargs) as pool: pool.create_actor(SchedulerClusterInfoActor, [address], uid=SchedulerClusterInfoActor.default_uid()) pool.create_actor(WorkerClusterInfoActor, [address], uid=WorkerClusterInfoActor.default_uid()) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid()) pool.create_actor(DispatchActor, uid=DispatchActor.default_uid()) pool.create_actor(QuotaActor, 1024 * 1024 * 20, uid=MemQuotaActor.default_uid()) shared_holder_ref = pool.create_actor(SharedHolderActor, plasma_size, uid=SharedHolderActor.default_uid()) pool.create_actor(StatusActor, address, uid=StatusActor.default_uid()) pool.create_actor(IORunnerActor) pool.create_actor(StorageClientActor, uid=StorageClientActor.default_uid()) pool.create_actor(InProcHolderActor) pool.create_actor(ReceiverManagerActor, uid=ReceiverManagerActor.default_uid()) try: yield pool finally: shared_holder_ref.destroy()
def _start_calc_pool(self): mock_addr = f'127.0.0.1:{get_next_port()}' with self.create_pool(n_process=1, backend='gevent', address=mock_addr) as pool: pool.create_actor(SchedulerClusterInfoActor, [mock_addr], uid=SchedulerClusterInfoActor.default_uid()) pool.create_actor(WorkerClusterInfoActor, [mock_addr], uid=WorkerClusterInfoActor.default_uid()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid()) pool.create_actor(StatusActor, mock_addr, uid=StatusActor.default_uid()) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) pool.create_actor(DispatchActor, uid=DispatchActor.default_uid()) pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(IORunnerActor) pool.create_actor(QuotaActor, 1024**2, uid=MemQuotaActor.default_uid()) shared_holder_ref = pool.create_actor( SharedHolderActor, uid=SharedHolderActor.default_uid()) pool.create_actor(InProcHolderActor) pool.create_actor(CpuCalcActor, uid=CpuCalcActor.default_uid()) with self.run_actor_test(pool) as test_actor: try: yield pool, test_actor finally: shared_holder_ref.destroy()
def start_transfer_test_pool(**kwargs): address = kwargs.pop('address') plasma_size = kwargs.pop('plasma_size') with create_actor_pool(n_process=1, backend='gevent', address=address, **kwargs) as pool: pool.create_actor(SchedulerClusterInfoActor, schedulers=[address], uid=SchedulerClusterInfoActor.default_uid()) pool.create_actor(WorkerClusterInfoActor, schedulers=[address], uid=WorkerClusterInfoActor.default_uid()) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid()) pool.create_actor(DispatchActor, uid=DispatchActor.default_uid()) pool.create_actor(QuotaActor, 1024 * 1024 * 20, uid=MemQuotaActor.default_uid()) chunk_holder_ref = pool.create_actor( ChunkHolderActor, plasma_size, uid=ChunkHolderActor.default_uid()) pool.create_actor(SpillActor) pool.create_actor(StatusActor, address, uid=StatusActor.default_uid()) yield pool chunk_holder_ref.destroy()
def testSharedLoad(self, *_): test_addr = '127.0.0.1:%d' % get_next_port() with self.create_pool(n_process=1, address=test_addr) as pool, \ self.run_actor_test(pool) as test_actor: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) storage_manager_ref = pool.create_actor( StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid()) pool.create_actor(InProcHolderActor) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid()) data1 = np.random.random((10, 10)) data2 = np.random.random((10, 10)) ser_data1 = dataserializer.serialize(data1) session_id = str(uuid.uuid4()) data_key1 = str(uuid.uuid4()) data_key2 = str(uuid.uuid4()) storage_client = test_actor.storage_client handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY)) # load from bytes io disk_handler = storage_client.get_storage_handler((0, DataStorageDevice.DISK)) with disk_handler.create_bytes_writer( session_id, data_key1, ser_data1.total_bytes) as writer: ser_data1.write_to(writer) handler.load_from_bytes_io(session_id, data_key1, disk_handler) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, data_key1)), [(0, DataStorageDevice.SHARED_MEMORY), (0, DataStorageDevice.DISK)]) disk_handler.delete(session_id, data_key1) handler.delete(session_id, data_key1) ref_data2 = weakref.ref(data2) # load from object io proc_handler = storage_client.get_storage_handler((0, DataStorageDevice.PROC_MEMORY)) proc_handler.put_object(session_id, data_key2, data2) del data2 handler.load_from_object_io(session_id, data_key2, proc_handler) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, data_key2)), [(0, DataStorageDevice.PROC_MEMORY), (0, DataStorageDevice.SHARED_MEMORY)]) proc_handler.delete(session_id, data_key2) self.assertIsNone(ref_data2()) handler.delete(session_id, data_key2)
def testProcMemPutAndGet(self): test_addr = '127.0.0.1:%d' % get_next_port() with self.create_pool(n_process=1, address=test_addr) as pool, \ self.run_actor_test(pool) as test_actor: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) storage_manager_ref = pool.create_actor( StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(QuotaActor, 1024**2, uid=MemQuotaActor.default_uid()) pool.create_actor(InProcHolderActor) data1 = np.random.random((10, 10)) data2 = np.random.random((10, 10)) ser_data2 = dataserializer.serialize(data2) bytes_data2 = ser_data2.to_buffer() session_id = str(uuid.uuid4()) data_key1 = str(uuid.uuid4()) data_key2 = str(uuid.uuid4()) storage_client = test_actor.storage_client handler = storage_client.get_storage_handler( (0, DataStorageDevice.PROC_MEMORY)) handler.put_objects(session_id, [data_key1], [data1]) self.assertEqual( sorted( storage_manager_ref.get_data_locations( session_id, [data_key1])[0]), [(0, DataStorageDevice.PROC_MEMORY)]) assert_allclose(data1, handler.get_objects(session_id, [data_key1])[0]) handler.delete(session_id, [data_key1]) self.assertEqual( list( storage_manager_ref.get_data_locations( session_id, [data_key1])[0]), []) with self.assertRaises(KeyError): handler.get_objects(session_id, [data_key1]) handler.put_objects(session_id, [data_key2], [ser_data2], serialize=True) assert_allclose(data2, handler.get_objects(session_id, [data_key2])[0]) handler.delete(session_id, [data_key2]) handler.put_objects(session_id, [data_key2], [bytes_data2], serialize=True) assert_allclose(data2, handler.get_objects(session_id, [data_key2])[0]) handler.delete(session_id, [data_key2])
def testLoadStoreInOtherProcess(self): test_addr = '127.0.0.1:%d' % get_next_port() with self.create_pool(n_process=3, address=test_addr, distributor=MarsDistributor(3)) as pool: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(DispatchActor, uid=DispatchActor.default_uid()) pool.create_actor(QuotaActor, 1024**2, uid=MemQuotaActor.default_uid()) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(SharedHolderActor, self.plasma_storage_size, uid=SharedHolderActor.default_uid()) pool.create_actor(InProcHolderActor, uid='w:1:InProcHolderActor1') pool.create_actor(InProcHolderActor, uid='w:2:InProcHolderActor2') pool.create_actor(IORunnerActor, lock_free=True, dispatched=False, uid=IORunnerActor.gen_uid(1)) test_ref = pool.create_actor(OtherProcessTestActor, uid='w:0:OtherProcTest') def _get_result(): start_time = time.time() while test_ref.get_result() is None: pool.sleep(0.5) if time.time() - start_time > 10: raise TimeoutError test_ref.run_copy_test((0, DataStorageDevice.SHARED_MEMORY), (1, DataStorageDevice.PROC_MEMORY), _tell=True) _get_result() test_ref.run_copy_test((1, DataStorageDevice.PROC_MEMORY), (0, DataStorageDevice.SHARED_MEMORY), _tell=True) _get_result() test_ref.run_copy_test((1, DataStorageDevice.PROC_MEMORY), (2, DataStorageDevice.PROC_MEMORY), _tell=True) _get_result()
def testSharedLoadFromObjects(self, *_): test_addr = '127.0.0.1:%d' % get_next_port() with self.create_pool(n_process=1, address=test_addr) as pool, \ self.run_actor_test(pool) as test_actor: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) storage_manager_ref = pool.create_actor( StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(QuotaActor, 1024**2, uid=MemQuotaActor.default_uid()) pool.create_actor(InProcHolderActor) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid()) data1 = np.random.random((10, 10)) session_id = str(uuid.uuid4()) data_key1 = str(uuid.uuid4()) storage_client = test_actor.storage_client handler = storage_client.get_storage_handler( (0, DataStorageDevice.SHARED_MEMORY)) # load from object io ref_data1 = weakref.ref(data1) proc_handler = storage_client.get_storage_handler( (0, DataStorageDevice.PROC_MEMORY)) proc_handler.put_objects(session_id, [data_key1], [data1]) del data1 handler.load_from_object_io(session_id, [data_key1], proc_handler) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) self.assertEqual( sorted( storage_manager_ref.get_data_locations( session_id, [data_key1])[0]), [(0, DataStorageDevice.PROC_MEMORY), (0, DataStorageDevice.SHARED_MEMORY)]) proc_handler.delete(session_id, [data_key1]) self.assertIsNone(ref_data1()) handler.delete(session_id, [data_key1])
def testCudaMemPutAndGet(self): test_addr = f'127.0.0.1:{get_next_port()}' with self.create_pool(n_process=1, address=test_addr) as pool, \ self.run_actor_test(pool) as test_actor: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) storage_manager_ref = pool.create_actor( StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid()) pool.create_actor(CudaHolderActor) test_data = np.random.random((10, 10)) test_suites = [ (test_data, cp.ndarray, cp.asnumpy, assert_allclose), (pd.Series(test_data.flatten()), cudf.Series, lambda o: o.to_pandas(), pd.testing.assert_series_equal), (pd.DataFrame(dict(col=test_data.flatten())), cudf.DataFrame, lambda o: o.to_pandas(), pd.testing.assert_frame_equal), ] for data, cuda_type, move_to_mem, assert_obj_equal in test_suites: ser_data = dataserializer.serialize(data) session_id = str(uuid.uuid4()) data_key1 = str(uuid.uuid4()) data_key2 = str(uuid.uuid4()) storage_client = test_actor.storage_client handler = storage_client.get_storage_handler((0, DataStorageDevice.CUDA)) handler.put_objects(session_id, [data_key1], [data]) self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]), [(0, DataStorageDevice.CUDA)]) self.assertIsInstance(handler.get_objects(session_id, [data_key1])[0], cuda_type) assert_obj_equal(data, move_to_mem(handler.get_objects(session_id, [data_key1])[0])) handler.delete(session_id, [data_key1]) self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]), []) with self.assertRaises(KeyError): handler.get_objects(session_id, [data_key1]) handler.put_objects(session_id, [data_key2], [ser_data], serialize=True) self.assertIsInstance(handler.get_objects(session_id, [data_key2])[0], cuda_type) assert_obj_equal(data, move_to_mem(handler.get_objects(session_id, [data_key2])[0])) handler.delete(session_id, [data_key2])
def testMemQuotaAllocation(self): from mars import resource from mars.utils import AttributeDict mock_mem_stat = AttributeDict( dict(total=300, available=50, used=0, free=50)) local_pool_addr = 'localhost:%d' % get_next_port() with create_actor_pool(n_process=1, backend='gevent', address=local_pool_addr) as pool, \ patch_method(resource.virtual_memory, new=lambda: mock_mem_stat): pool.create_actor(WorkerClusterInfoActor, schedulers=[local_pool_addr], uid=WorkerClusterInfoActor.default_name()) pool.create_actor(StatusActor, local_pool_addr, uid=StatusActor.default_name()) pool.create_actor(DispatchActor, uid=DispatchActor.default_name()) pool.create_actor(ProcessHelperActor, uid=ProcessHelperActor.default_name()) quota_ref = pool.create_actor(MemQuotaActor, 300, refresh_time=0.1, uid=MemQuotaActor.default_name()) time_recs = [] with self.run_actor_test(pool) as test_actor: ref = test_actor.promise_ref(quota_ref) time_recs.append(time.time()) def actual_exec(x): ref.release_quota(x) time_recs.append(time.time()) test_actor.set_result(None) ref.request_quota('req', 100, _promise=True) \ .then(functools.partial(actual_exec, 'req')) pool.sleep(0.5) mock_mem_stat['available'] = 150 mock_mem_stat['free'] = 150 self.get_result(2) self.assertGreater(abs(time_recs[0] - time_recs[1]), 0.4)
def testTaskQueueActor(self): with create_actor_pool(n_process=1, backend='gevent') as pool: pool.create_actor(MockExecutionActor, 10, uid=ExecutionActor.default_name()) quota_ref = pool.create_actor(QuotaActor, 30, uid=MemQuotaActor.default_name()) pool.create_actor(TaskQueueActor, 4, uid=TaskQueueActor.default_name()) session_id = str(uuid.uuid4()) chunk_keys = [str(uuid.uuid4()).replace('-', '') for _ in range(6)] with self.run_actor_test(pool) as test_actor: queue_ref = test_actor.promise_ref( TaskQueueActor.default_name()) res_times = dict() def callback_fun(key): res_times[key] = time.time() for idx, k in enumerate(chunk_keys): depth = len(chunk_keys) - idx queue_ref.enqueue_task(session_id, k, dict(depth=depth), _promise=True) \ .then(functools.partial(callback_fun, k)) gevent.sleep(1) self.assertEqual(queue_ref.get_allocated_count(), 4) queue_ref.update_priority(session_id, chunk_keys[-1], dict(depth=len(chunk_keys))) quota_ref.release_quota(chunk_keys[0]) queue_ref.release_task(session_id, chunk_keys[0]) quota_ref.release_quota(chunk_keys[1]) queue_ref.release_task(session_id, chunk_keys[1]) gevent.sleep(0.5) self.assertIn(chunk_keys[-1], res_times) for k in chunk_keys[:3]: self.assertLessEqual(res_times[k], res_times[chunk_keys[-1]] - 0.5) self.assertIn(k, res_times)
def create_standard_actors(cls, pool, address, quota_size=None, with_daemon=True, with_status=True, with_resource=False): quota_size = quota_size or (1024 * 1024) pool.create_actor(SchedulerClusterInfoActor, [address], uid=SchedulerClusterInfoActor.default_uid()) pool.create_actor(WorkerClusterInfoActor, [address], uid=WorkerClusterInfoActor.default_uid()) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid()) if with_resource: pool.create_actor(ResourceActor, uid=ResourceActor.default_uid()) if with_daemon: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) if with_status: pool.create_actor(StatusActor, address, uid=StatusActor.default_uid()) pool.create_actor(SharedHolderActor, cls.plasma_storage_size, uid=SharedHolderActor.default_uid()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid()) pool.create_actor(DispatchActor, uid=DispatchActor.default_uid()) pool.create_actor(QuotaActor, quota_size, uid=MemQuotaActor.default_uid()) pool.create_actor(ExecutionActor, uid=ExecutionActor.default_uid())
def testClientSpill(self, *_): test_addr = '127.0.0.1:%d' % get_next_port() with self.create_pool(n_process=1, address=test_addr) as pool: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) storage_manager_ref = pool.create_actor( StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(DispatchActor, uid=DispatchActor.default_uid()) pool.create_actor(IORunnerActor) pool.create_actor(QuotaActor, 1024**2, uid=MemQuotaActor.default_uid()) pool.create_actor(InProcHolderActor) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(SharedHolderActor, self.plasma_storage_size, uid=SharedHolderActor.default_uid()) session_id = str(uuid.uuid4()) data_list = [ np.random.randint(0, 32767, (655360, ), np.int16) for _ in range(20) ] data_keys = [str(uuid.uuid4()) for _ in range(20)] with self.run_actor_test(pool) as test_actor: storage_client = test_actor.storage_client idx = 0 shared_handler = storage_client.get_storage_handler( (0, DataStorageDevice.SHARED_MEMORY)) proc_handler = storage_client.get_storage_handler( (0, DataStorageDevice.PROC_MEMORY)) def _fill_data(): i = 0 for i, (key, data) in enumerate(zip(data_keys[idx:], data_list)): try: shared_handler.put_objects(session_id, [key], [data]) except StorageFull: break return i + idx idx = _fill_data() # test copying non-existing keys storage_client.copy_to(session_id, ['non-exist-key'], [DataStorageDevice.SHARED_MEMORY]) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) with self.assertRaises(KeyError): self.get_result(5) # test copying into containing locations storage_client.copy_to(session_id, [data_keys[0]], [DataStorageDevice.SHARED_MEMORY]) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) self.assertEqual( sorted( storage_manager_ref.get_data_locations( session_id, [data_keys[0]])[0]), [(0, DataStorageDevice.SHARED_MEMORY)]) # test unsuccessful copy when no data at target def _mock_load_from(*_, **__): return promise.finished(*build_exc_info(SystemError), _accept=False) with patch_method(StorageHandler.load_from, _mock_load_from), \ self.assertRaises(SystemError): storage_client.copy_to(session_id, [data_keys[0]], [DataStorageDevice.DISK]) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) # test successful copy for multiple objects storage_client.delete(session_id, [data_keys[idx - 1]]) ref_data = weakref.ref(data_list[idx]) ref_data2 = weakref.ref(data_list[idx + 1]) proc_handler.put_objects(session_id, data_keys[idx:idx + 2], data_list[idx:idx + 2]) data_list[idx:idx + 2] = [None, None] storage_client.copy_to(session_id, data_keys[idx:idx + 2], [DataStorageDevice.SHARED_MEMORY, DataStorageDevice.DISK]) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) proc_handler.delete(session_id, data_keys[idx:idx + 2]) self.assertEqual( storage_manager_ref.get_data_locations( session_id, data_keys[idx:idx + 2]), [{(0, DataStorageDevice.SHARED_MEMORY)}, {(0, DataStorageDevice.DISK)}]) self.assertIsNone(ref_data()) self.assertIsNone(ref_data2()) # test copy with spill idx += 2 proc_handler.put_objects(session_id, [data_keys[idx]], [data_list[idx]]) storage_client.copy_to(session_id, [data_keys[idx]], [DataStorageDevice.SHARED_MEMORY]) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) self.assertEqual( sorted( storage_manager_ref.get_data_locations( session_id, [data_keys[idx]])[0]), [(0, DataStorageDevice.PROC_MEMORY), (0, DataStorageDevice.SHARED_MEMORY)])
def testCpuCalcSingleFetches(self): import gc with self._start_calc_pool() as (_pool, test_actor): quota_ref = test_actor.promise_ref(MemQuotaActor.default_uid()) calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid()) session_id = str(uuid.uuid4()) data_list = [np.random.random((10, 10)) for _ in range(3)] exec_graph, fetch_chunks, add_chunk = self._build_test_graph( data_list) storage_client = test_actor.storage_client for fetch_chunk, d in zip(fetch_chunks, data_list): self.waitp( storage_client.put_objects( session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]), ) self.assertEqual( list( storage_client.get_data_locations( session_id, [fetch_chunks[0].key])[0]), [(0, DataStorageDevice.SHARED_MEMORY)]) quota_batch = { build_quota_key(session_id, add_chunk.key, add_chunk.op.key): data_list[0].nbytes, } for idx in [1, 2]: quota_batch[build_quota_key(session_id, fetch_chunks[idx].key, add_chunk.op.key)] \ = data_list[idx].nbytes self.waitp( storage_client.copy_to( session_id, [fetch_chunks[idx].key], [DataStorageDevice.DISK ]).then(lambda *_: storage_client.delete( session_id, [fetch_chunks[idx].key], [DataStorageDevice.SHARED_MEMORY]))) self.assertEqual( list( storage_client.get_data_locations( session_id, [fetch_chunks[idx].key])[0]), [(0, DataStorageDevice.DISK)]) self.waitp( quota_ref.request_batch_quota(quota_batch, _promise=True), ) o_create = PlasmaSharedStore.create def _mock_plasma_create(store, session_id, data_key, size): if data_key == fetch_chunks[2].key: raise StorageFull return o_create(store, session_id, data_key, size) id_type_set = set() def _extract_value_ref(*_): inproc_handler = storage_client.get_storage_handler( (0, DataStorageDevice.PROC_MEMORY)) obj = inproc_handler.get_objects(session_id, [add_chunk.key])[0] id_type_set.add((id(obj), type(obj))) del obj with patch_method(PlasmaSharedStore.create, _mock_plasma_create): self.waitp( calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph), [add_chunk.key], _promise=True).then(_extract_value_ref).then( lambda *_: calc_ref.store_results( session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))) self.assertTrue( all((id(obj), type(obj)) not in id_type_set for obj in gc.get_objects())) quota_dump = quota_ref.dump_data() self.assertEqual(len(quota_dump.allocations), 0) self.assertEqual(len(quota_dump.requests), 0) self.assertEqual(len(quota_dump.proc_sizes), 0) self.assertEqual(len(quota_dump.hold_sizes), 0) self.assertEqual( sorted( storage_client.get_data_locations( session_id, [fetch_chunks[0].key])[0]), [(0, DataStorageDevice.SHARED_MEMORY)]) self.assertEqual( sorted( storage_client.get_data_locations( session_id, [fetch_chunks[1].key])[0]), [(0, DataStorageDevice.DISK)]) self.assertEqual( sorted( storage_client.get_data_locations( session_id, [fetch_chunks[2].key])[0]), [(0, DataStorageDevice.DISK)]) self.assertEqual( sorted( storage_client.get_data_locations(session_id, [add_chunk.key])[0]), [(0, DataStorageDevice.SHARED_MEMORY)])
def testSharedLoadFromBytes(self, *_): import logging logging.basicConfig(level=logging.DEBUG) test_addr = '127.0.0.1:%d' % get_next_port() with self.create_pool(n_process=1, address=test_addr) as pool, \ self.run_actor_test(pool) as test_actor: pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid()) storage_manager_ref = pool.create_actor( StorageManagerActor, uid=StorageManagerActor.default_uid()) pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid()) pool.create_actor(InProcHolderActor) pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid()) pool.create_actor(SharedHolderActor, uid=SharedHolderActor.default_uid()) data1 = np.random.random((10, 10)) ser_data1 = dataserializer.serialize(data1) session_id = str(uuid.uuid4()) data_key1 = str(uuid.uuid4()) storage_client = test_actor.storage_client handler = storage_client.get_storage_handler((0, DataStorageDevice.SHARED_MEMORY)) # load from bytes io disk_handler = storage_client.get_storage_handler((0, DataStorageDevice.DISK)) with disk_handler.create_bytes_writer( session_id, data_key1, ser_data1.total_bytes) as writer: ser_data1.write_to(writer) handler.load_from_bytes_io(session_id, [data_key1], disk_handler) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) self.get_result(5) self.assertEqual(sorted(storage_manager_ref.get_data_locations(session_id, [data_key1])[0]), [(0, DataStorageDevice.SHARED_MEMORY), (0, DataStorageDevice.DISK)]) disk_handler.delete(session_id, [data_key1]) handler.delete(session_id, [data_key1]) # load from bytes io till no capacity data_list = [np.random.randint(0, 32767, (655360,), np.int16) for _ in range(20)] data_keys = [str(uuid.uuid4()) for _ in range(20)] for key, data in zip(data_keys, data_list): ser_data = dataserializer.serialize(data) with disk_handler.create_bytes_writer( session_id, key, ser_data.total_bytes) as writer: ser_data.write_to(writer) handler.load_from_bytes_io(session_id, data_keys, disk_handler) \ .then(lambda *_: test_actor.set_result(None), lambda *exc: test_actor.set_result(exc, accept=False)) affected_keys = set() try: self.get_result(5) except StorageFull as ex: affected_keys.update(ex.affected_keys) storage_client.delete(session_id, data_keys, [DataStorageDevice.DISK]) self.assertLess(len(affected_keys), len(data_keys)) self.assertGreater(len(affected_keys), 1) for k, size in zip(data_keys, storage_client.get_data_sizes(session_id, data_keys)): if k in affected_keys: self.assertIsNone(size) else: self.assertIsNotNone(size)