def execute_case(): pool.create_actor(ClusterInfoActor, [pool.cluster_info.address], uid=ClusterInfoActor.default_name()) resource_ref = pool.create_actor( ResourceActor, uid=ResourceActor.default_name()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name()) pool.create_actor(AssignerActor, uid=AssignerActor.gen_name(session_id)) graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialize_graph(graph), uid=GraphActor.gen_name( session_id, graph_key)) execution_ref = pool.create_actor(FakeExecutionActor, sleep=1) # handle mock objects OperandActor._get_raw_execution_ref.side_effect = lambda: execution_ref mock_resource = dict( hardware=dict(cpu=4, cpu_total=4, memory=512)) def write_mock_meta(): resource_ref.set_worker_meta('localhost:12345', mock_resource) resource_ref.set_worker_meta('localhost:23456', mock_resource) v = gevent.spawn(write_mock_meta) v.join() graph_ref.prepare_graph() fetched_graph = graph_ref.get_chunk_graph() graph_ref.scan_node() graph_ref.place_initial_chunks() final_keys = set() for c in fetched_graph: if fetched_graph.count_successors(c) == 0: final_keys.add(c.op.key) graph_ref.create_operand_actors() graph_meta_ref = pool.actor_ref( GraphMetaActor.gen_name(session_id, graph_key)) start_time = time.time() cancel_called = False while True: gevent.sleep(0.1) if not cancel_called and time.time() > start_time + 0.8: cancel_called = True graph_ref.stop_graph(_tell=True) if time.time() - start_time > 30: raise SystemError('Wait for execution finish timeout') if graph_meta_ref.get_state() in (GraphState.SUCCEEDED, GraphState.FAILED, GraphState.CANCELLED): break
def execute_case(): pool.create_actor(ClusterInfoActor, [pool.cluster_info.address], uid=ClusterInfoActor.default_name()) resource_ref = pool.create_actor( ResourceActor, uid=ResourceActor.default_name()) kv_store_ref = pool.create_actor( KVStoreActor, uid=KVStoreActor.default_name()) pool.create_actor(AssignerActor, uid=AssignerActor.gen_name(session_id)) graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialize_graph(graph), uid=GraphActor.gen_name( session_id, graph_key)) execution_ref = execution_creator(pool) # handle mock objects OperandActor._get_raw_execution_ref.side_effect = lambda: execution_ref mock_resource = dict( hardware=dict(cpu=4, cpu_total=4, memory=512)) def write_mock_meta(): resource_ref.set_worker_meta('localhost:12345', mock_resource) resource_ref.set_worker_meta('localhost:23456', mock_resource) v = gevent.spawn(write_mock_meta) v.join() graph_ref.prepare_graph() graph_data = kv_store_ref.read( '/sessions/%s/graphs/%s/chunk_graph' % (session_id, graph_key)).value fetched_graph = deserialize_graph(graph_data) graph_ref.scan_node() graph_ref.place_initial_chunks() final_keys = set() for c in fetched_graph: if fetched_graph.count_successors(c) == 0: final_keys.add(c.op.key) graph_ref.create_operand_actors() start_time = time.time() while True: gevent.sleep(0.1) if time.time() - start_time > 30: raise SystemError('Wait for execution finish timeout') if kv_store_ref.read('/sessions/%s/graph/%s/state' % (session_id, graph_key)).value.lower() \ in ('succeeded', 'failed', 'cancelled'): break
def testGraphActor(self): session_id = str(uuid.uuid4()) graph_key = str(uuid.uuid4()) arr = mt.random.randint(10, size=(10, 8), chunk_size=4) arr_add = mt.random.randint(10, size=(10, 8), chunk_size=4) arr2 = arr + arr_add graph = arr2.build_graph(compose=False) serialized_graph = serialize_graph(graph) chunked_graph = arr2.build_graph(compose=False, tiled=True) addr = '127.0.0.1:%d' % get_next_port() with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool: pool.create_actor(ClusterInfoActor, [pool.cluster_info.address], uid=ClusterInfoActor.default_name()) resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name()) pool.create_actor(AssignerActor, uid=AssignerActor.gen_name(session_id)) graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph, uid=GraphActor.gen_name( session_id, graph_key)) graph_ref.prepare_graph(compose=False) fetched_graph = graph_ref.get_chunk_graph() self.assertIsNotNone(fetched_graph) self.assertEqual(len(chunked_graph), len(fetched_graph)) graph_ref.scan_node() op_infos = graph_ref.get_operand_info() for n in fetched_graph: depth = op_infos[n.op.key]['optimize']['depth'] self.assertIsNotNone(depth) successor_size = op_infos[ n.op.key]['optimize']['successor_size'] self.assertIsNotNone(successor_size) descendant_size = op_infos[ n.op.key]['optimize']['descendant_size'] self.assertIsNotNone(descendant_size) resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4))) resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4))) graph_ref.place_initial_chunks() op_infos = graph_ref.get_operand_info() for n in fetched_graph: if fetched_graph.count_predecessors(n) != 0: continue target_worker = op_infos[n.op.key]['target_worker'] self.assertIsNotNone(target_worker) graph_ref.create_operand_actors(_clean_io_meta=False) op_infos = graph_ref.get_operand_info() for n in fetched_graph: self.assertEqual(op_infos[n.op.key]['op_name'], type(n.op).__name__) io_meta = op_infos[n.op.key]['io_meta'] orig_io_meta = dict( predecessors=list( set(pn.op.key for pn in fetched_graph.iter_predecessors(n))), successors=list( set(sn.op.key for sn in fetched_graph.iter_successors(n))), input_chunks=list( set(pn.key for pn in fetched_graph.iter_predecessors(n))), chunks=list(c.key for c in n.op.outputs), ) self.assertSetEqual(set(io_meta['predecessors']), set(orig_io_meta['predecessors'])) self.assertSetEqual(set(io_meta['successors']), set(orig_io_meta['successors'])) self.assertSetEqual(set(io_meta['input_chunks']), set(orig_io_meta['input_chunks'])) self.assertSetEqual(set(io_meta['chunks']), set(orig_io_meta['chunks'])) self.assertEqual(op_infos[n.op.key]['output_size'], sum(ch.nbytes for ch in n.op.outputs))
def testSameKey(self, *_): session_id = str(uuid.uuid4()) graph_key = str(uuid.uuid4()) arr = mt.ones((5, 5), chunks=3) arr2 = mt.concatenate((arr, arr)) graph = arr2.build_graph(compose=False) serialized_graph = serialize_graph(graph) chunked_graph = arr2.build_graph(compose=False, tiled=True) with create_actor_pool(n_process=1, backend='gevent') as pool: pool.create_actor(ClusterInfoActor, [pool.cluster_info.address], uid=ClusterInfoActor.default_name()) resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name()) kv_store_ref = pool.create_actor(KVStoreActor, uid=KVStoreActor.default_name()) pool.create_actor(AssignerActor, uid=AssignerActor.gen_name(session_id)) graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph, uid=GraphActor.gen_name( session_id, graph_key)) graph_ref.prepare_graph(compose=False) graph_data = kv_store_ref.read( '/sessions/%s/graphs/%s/chunk_graph' % (session_id, graph_key)).value self.assertIsNotNone(graph_data) fetched_graph = deserialize_graph(graph_data) self.assertEqual(len(chunked_graph), len(fetched_graph)) graph_ref.scan_node() op_infos = graph_ref.get_operand_info() for n in fetched_graph: depth = op_infos[n.op.key]['optimize']['depth'] self.assertIsNotNone(depth) successor_size = op_infos[ n.op.key]['optimize']['successor_size'] self.assertIsNotNone(successor_size) descendant_size = op_infos[ n.op.key]['optimize']['descendant_size'] self.assertIsNotNone(descendant_size) def write_mock_meta(): resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4))) resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4))) v = gevent.spawn(write_mock_meta) v.join() graph_ref.place_initial_chunks() op_infos = graph_ref.get_operand_info() for n in fetched_graph: if fetched_graph.count_predecessors(n) != 0: continue target_worker = op_infos[n.op.key]['target_worker'] self.assertIsNotNone(target_worker) graph_ref.create_operand_actors() op_infos = graph_ref.get_operand_info() for n in fetched_graph: self.assertEqual(op_infos[n.op.key]['op_name'], type(n.op).__name__) io_meta = op_infos[n.op.key]['io_meta'] orig_io_meta = dict( predecessors=list( set(pn.op.key for pn in fetched_graph.iter_predecessors(n))), successors=list( set(sn.op.key for sn in fetched_graph.iter_successors(n))), input_chunks=list( set(pn.key for pn in fetched_graph.iter_predecessors(n))), chunks=list(c.key for c in n.op.outputs), ) self.assertSetEqual(set(io_meta['predecessors']), set(orig_io_meta['predecessors'])) self.assertSetEqual(set(io_meta['successors']), set(orig_io_meta['successors'])) self.assertSetEqual(set(io_meta['input_chunks']), set(orig_io_meta['input_chunks'])) self.assertSetEqual(set(io_meta['chunks']), set(orig_io_meta['chunks'])) self.assertEqual(op_infos[n.op.key]['output_size'], sum(ch.nbytes for ch in n.op.outputs))