Пример #1
0
    def testStatus(self):
        pool_address = '127.0.0.1:%d' % get_next_port()
        old_spill_dir = options.worker.spill_directory
        dir_name = options.worker.spill_directory = tempfile.mkdtemp(prefix='temp-mars-spill-')
        try:
            with create_actor_pool(n_process=1, backend='gevent', address=pool_address) as pool:
                pool.create_actor(SchedulerClusterInfoActor, schedulers=[pool_address],
                                  uid=SchedulerClusterInfoActor.default_uid())
                pool.create_actor(WorkerClusterInfoActor, schedulers=[pool_address],
                                  uid=WorkerClusterInfoActor.default_uid())

                resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
                pool.create_actor(ChunkHolderActor, self.plasma_storage_size,
                                  uid=ChunkHolderActor.default_uid())
                status_ref = pool.create_actor(StatusActor, pool_address,
                                               uid=StatusActor.default_uid())
                status_ref.enable_status_upload()

                status_ref.update_slots(dict(cpu=4))
                status_ref.update_stats(dict(min_est_finish_time=10))

                def delay_read():
                    gevent.sleep(1.5)
                    return resource_ref.get_workers_meta()

                gl = gevent.spawn(delay_read)
                gl.join()
                v = gl.value
                self.assertIsNotNone(v)

                pool.destroy_actor(status_ref)
        finally:
            options.worker.spill_directory = old_spill_dir
            shutil.rmtree(dir_name)
Пример #2
0
    def _prepare_test_graph(self, session_id, graph_key, mock_workers):
        addr = f'127.0.0.1:{get_next_port()}'
        a1 = mt.random.random((100,))
        a2 = mt.random.random((100,))
        s = a1 + a2
        v1, v2 = mt.split(s, 2)

        graph = TileableGraph([v1.data, v2.data])
        builder = TileableGraphBuilder(graph)
        next(iter(builder.build()))

        with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
            pool.create_actor(SchedulerClusterInfoActor, [pool.cluster_info.address],
                              uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(AssignerActor, uid=AssignerActor.gen_uid(session_id))
            graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialize_graph(graph),
                                          uid=GraphActor.gen_uid(session_id, graph_key))

            for w in mock_workers:
                resource_ref.set_worker_meta(w, dict(hardware=dict(cpu=4, cpu_total=4, memory=1600)))

            graph_ref.prepare_graph()
            graph_ref.analyze_graph()
            graph_ref.create_operand_actors(_start=False)

            yield pool, graph_ref
Пример #3
0
def start_transfer_test_pool(**kwargs):
    address = kwargs.pop('address')
    plasma_size = kwargs.pop('plasma_size')
    with create_actor_pool(n_process=1, backend='gevent', address=address, **kwargs) as pool:
        pool.create_actor(SchedulerClusterInfoActor, [address],
                          uid=SchedulerClusterInfoActor.default_uid())
        pool.create_actor(WorkerClusterInfoActor, [address],
                          uid=WorkerClusterInfoActor.default_uid())

        pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())
        pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid())
        pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
        pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
        pool.create_actor(QuotaActor, 1024 * 1024 * 20, uid=MemQuotaActor.default_uid())
        shared_holder_ref = pool.create_actor(SharedHolderActor,
                                              plasma_size, uid=SharedHolderActor.default_uid())
        pool.create_actor(StatusActor, address, uid=StatusActor.default_uid())
        pool.create_actor(IORunnerActor)
        pool.create_actor(StorageClientActor, uid=StorageClientActor.default_uid())
        pool.create_actor(InProcHolderActor)
        pool.create_actor(ReceiverManagerActor, uid=ReceiverManagerActor.default_uid())

        try:
            yield pool
        finally:
            shared_holder_ref.destroy()
Пример #4
0
def start_transfer_test_pool(**kwargs):
    address = kwargs.pop('address')
    plasma_size = kwargs.pop('plasma_size')
    with create_actor_pool(n_process=1,
                           backend='gevent',
                           address=address,
                           **kwargs) as pool:
        pool.create_actor(SchedulerClusterInfoActor,
                          schedulers=[address],
                          uid=SchedulerClusterInfoActor.default_uid())
        pool.create_actor(WorkerClusterInfoActor,
                          schedulers=[address],
                          uid=WorkerClusterInfoActor.default_uid())

        pool.create_actor(PlasmaKeyMapActor,
                          uid=PlasmaKeyMapActor.default_uid())
        pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
        pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
        pool.create_actor(QuotaActor,
                          1024 * 1024 * 20,
                          uid=MemQuotaActor.default_uid())
        chunk_holder_ref = pool.create_actor(
            ChunkHolderActor, plasma_size, uid=ChunkHolderActor.default_uid())
        pool.create_actor(SpillActor)
        pool.create_actor(StatusActor, address, uid=StatusActor.default_uid())

        yield pool

        chunk_holder_ref.destroy()
Пример #5
0
    def _start_calc_pool(self):
        mock_addr = f'127.0.0.1:{get_next_port()}'
        with self.create_pool(n_process=1, backend='gevent',
                              address=mock_addr) as pool:
            pool.create_actor(SchedulerClusterInfoActor, [mock_addr],
                              uid=SchedulerClusterInfoActor.default_uid())
            pool.create_actor(WorkerClusterInfoActor, [mock_addr],
                              uid=WorkerClusterInfoActor.default_uid())

            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(StatusActor,
                              mock_addr,
                              uid=StatusActor.default_uid())

            pool.create_actor(PlasmaKeyMapActor,
                              uid=PlasmaKeyMapActor.default_uid())
            pool.create_actor(WorkerDaemonActor,
                              uid=WorkerDaemonActor.default_uid())
            pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
            pool.create_actor(StorageManagerActor,
                              uid=StorageManagerActor.default_uid())
            pool.create_actor(IORunnerActor)
            pool.create_actor(QuotaActor,
                              1024**2,
                              uid=MemQuotaActor.default_uid())
            shared_holder_ref = pool.create_actor(
                SharedHolderActor, uid=SharedHolderActor.default_uid())
            pool.create_actor(InProcHolderActor)
            pool.create_actor(CpuCalcActor, uid=CpuCalcActor.default_uid())

            with self.run_actor_test(pool) as test_actor:
                try:
                    yield pool, test_actor
                finally:
                    shared_holder_ref.destroy()
Пример #6
0
    def testEmptyGraph(self, *_):
        session_id = str(uuid.uuid4())

        addr = '127.0.0.1:%d' % get_next_port()
        with create_actor_pool(n_process=1, backend='gevent',
                               address=addr) as pool:
            pool.create_actor(SchedulerClusterInfoActor,
                              [pool.cluster_info.address],
                              uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor,
                                             uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(AssignerActor,
                              uid=AssignerActor.gen_uid(session_id))

            resource_ref.set_worker_meta('localhost:12345',
                                         dict(hardware=dict(cpu_total=4)))
            resource_ref.set_worker_meta('localhost:23456',
                                         dict(hardware=dict(cpu_total=4)))

            graph_key = str(uuid.uuid4())
            serialized_graph = serialize_graph(DAG())

            graph_ref = pool.create_actor(GraphActor,
                                          session_id,
                                          graph_key,
                                          serialized_graph,
                                          uid=GraphActor.gen_uid(
                                              session_id, graph_key))
            graph_ref.execute_graph()
            self.assertEqual(graph_ref.get_state(), GraphState.SUCCEEDED)
Пример #7
0
    def testLocalCluster(self, *_):
        endpoint = gen_endpoint('0.0.0.0')
        with LocalDistributedCluster(endpoint,
                                     scheduler_n_process=2,
                                     worker_n_process=3,
                                     shared_memory='20M') as cluster:
            pool = cluster.pool

            self.assertTrue(
                pool.has_actor(
                    pool.actor_ref(SchedulerClusterInfoActor.default_uid())))
            self.assertTrue(
                pool.has_actor(
                    pool.actor_ref(SessionManagerActor.default_uid())))
            self.assertTrue(
                pool.has_actor(pool.actor_ref(DispatchActor.default_uid())))

            with new_session(endpoint) as session:
                api = session._api

                t = mt.ones((3, 3), chunk_size=2)
                result = session.run(t, timeout=_exec_timeout)

                np.testing.assert_array_equal(result, np.ones((3, 3)))

            self.assertNotIn(session._session_id,
                             api.session_manager.get_sessions())
Пример #8
0
    def testFailoverMessage(self):
        mock_session_id = str(uuid.uuid4())
        mock_graph_key = str(uuid.uuid4())
        mock_chunk_key = str(uuid.uuid4())
        addr = '127.0.0.1:%d' % get_next_port()
        mock_worker_addr = '127.0.0.1:54132'

        options.scheduler.worker_blacklist_time = 0.5

        with create_actor_pool(n_process=1, backend='gevent',
                               address=addr) as pool:
            cluster_info_ref = pool.create_actor(
                SchedulerClusterInfoActor, [pool.cluster_info.address],
                uid=SchedulerClusterInfoActor.default_uid())
            session_manager_ref = pool.create_actor(
                SessionManagerActor, uid=SessionManagerActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor,
                                             uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

            session_ref = pool.actor_ref(
                session_manager_ref.create_session(mock_session_id))
            chunk_meta_client = ChunkMetaClient(pool, cluster_info_ref)
            chunk_meta_client.set_chunk_meta(mock_session_id,
                                             mock_chunk_key,
                                             size=80,
                                             shape=(10, ),
                                             workers=(mock_worker_addr, ))

            with mock.patch(GraphActor.__module__ + '.' + GraphActor.__name__,
                            new=MockGraphActor):
                session_ref.submit_tileable_graph(None, mock_graph_key)
                graph_ref = pool.actor_ref(
                    GraphActor.gen_uid(mock_session_id, mock_graph_key))

                expire_time = time.time(
                ) - options.scheduler.status_timeout - 1
                resource_ref.set_worker_meta(mock_worker_addr,
                                             dict(update_time=expire_time))

                resource_ref.detect_dead_workers(_tell=True)
                pool.sleep(0.2)

                _, removes, lost_chunks = graph_ref.get_worker_change_args()
                self.assertListEqual(removes, [mock_worker_addr])
                self.assertListEqual(lost_chunks, [mock_chunk_key])

                self.assertNotIn(mock_worker_addr,
                                 resource_ref.get_workers_meta())
                resource_ref.set_worker_meta(mock_worker_addr,
                                             dict(update_time=time.time()))
                self.assertNotIn(mock_worker_addr,
                                 resource_ref.get_workers_meta())

                pool.sleep(0.4)
                resource_ref.set_worker_meta(mock_worker_addr,
                                             dict(update_time=time.time()))
                self.assertIn(mock_worker_addr,
                              resource_ref.get_workers_meta())
Пример #9
0
    def testOperandActorWithCancel(self, *_):
        arr = mt.random.randint(10, size=(10, 8), chunk_size=4)
        arr_add = mt.random.randint(10, size=(10, 8), chunk_size=4)
        arr2 = arr + arr_add

        session_id = str(uuid.uuid4())
        graph_key = str(uuid.uuid4())

        graph = arr2.build_graph(compose=False)

        with create_actor_pool(n_process=1, backend='gevent') as pool:
            pool.create_actor(SchedulerClusterInfoActor, [pool.cluster_info.address],
                              uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(AssignerActor, uid=AssignerActor.gen_uid(session_id))
            graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialize_graph(graph),
                                          uid=GraphActor.gen_uid(session_id, graph_key))

            def _build_mock_ref(uid=None, address=None):
                try:
                    return pool.create_actor(
                        FakeExecutionActor, exec_delay=0.2, uid=FakeExecutionActor.gen_uid(address))
                except ActorAlreadyExist:
                    return pool.actor_ref(FakeExecutionActor.gen_uid(address))

            # handle mock objects
            OperandActor._get_raw_execution_ref.side_effect = _build_mock_ref

            mock_resource = dict(hardware=dict(cpu=4, cpu_total=4, memory=512))

            for idx in range(20):
                resource_ref.set_worker_meta('localhost:%d' % (idx + 12345), mock_resource)

            graph_ref.prepare_graph(compose=False)
            fetched_graph = graph_ref.get_chunk_graph()

            graph_ref.analyze_graph()

            final_keys = set()
            for c in fetched_graph:
                if fetched_graph.count_successors(c) == 0:
                    final_keys.add(c.op.key)

            graph_ref.create_operand_actors()
            graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_uid(session_id, graph_key))
            start_time = time.time()
            cancel_called = False
            while True:
                pool.sleep(0.05)
                if not cancel_called and time.time() > start_time + 0.3:
                    cancel_called = True
                    graph_ref.stop_graph(_tell=True)
                if time.time() - start_time > 30:
                    raise SystemError('Wait for execution finish timeout')
                if graph_meta_ref.get_state() in (GraphState.SUCCEEDED, GraphState.FAILED, GraphState.CANCELLED):
                    break
            self.assertEqual(graph_meta_ref.get_state(), GraphState.CANCELLED)
Пример #10
0
    def testErrorOnPrepare(self, *_):
        session_id = str(uuid.uuid4())

        addr = '127.0.0.1:%d' % get_next_port()
        with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:
            pool.create_actor(SchedulerClusterInfoActor, [pool.cluster_info.address],
                              uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(AssignerActor, uid=AssignerActor.default_uid())

            resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))
            resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))

            # error occurred in create_operand_actors
            graph_key = str(uuid.uuid4())
            expr = mt.random.random((8, 2), chunk_size=2) + 1
            graph = expr.build_graph(compose=False)
            serialized_graph = serialize_graph(graph)

            graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
                                          uid=GraphActor.gen_uid(session_id, graph_key))

            def _mock_raises(*_, **__):
                raise RuntimeError

            with patch_method(GraphActor.create_operand_actors, new=_mock_raises):
                with self.assertRaises(RuntimeError):
                    graph_ref.execute_graph()
            self.assertEqual(graph_ref.get_state(), GraphState.FAILED)
            graph_ref.destroy()

            # interrupted during create_operand_actors
            graph_key = str(uuid.uuid4())
            graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
                                          uid=GraphActor.gen_uid(session_id, graph_key))

            def _mock_cancels(*_, **__):
                graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_uid(session_id, graph_key))
                graph_meta_ref.set_state(GraphState.CANCELLING)

            with patch_method(GraphActor.create_operand_actors, new=_mock_cancels):
                graph_ref.execute_graph()
            self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)

            # interrupted during previous steps
            graph_key = str(uuid.uuid4())
            graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,
                                          uid=GraphActor.gen_uid(session_id, graph_key))

            def _mock_cancels(*_, **__):
                graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_uid(session_id, graph_key))
                graph_meta_ref.set_state(GraphState.CANCELLING)
                return dict()

            with patch_method(GraphAnalyzer.calc_operand_assignments, new=_mock_cancels):
                graph_ref.execute_graph()
            self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)
Пример #11
0
    def _run_operand_case(session_id, graph_key, tensor, execution_creator):
        graph = tensor.build_graph(compose=False)

        with create_actor_pool(n_process=1, backend='gevent') as pool:
            pool.create_actor(SchedulerClusterInfoActor,
                              [pool.cluster_info.address],
                              uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor,
                                             uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(AssignerActor,
                              uid=AssignerActor.gen_uid(session_id))
            graph_ref = pool.create_actor(GraphActor,
                                          session_id,
                                          graph_key,
                                          serialize_graph(graph),
                                          uid=GraphActor.gen_uid(
                                              session_id, graph_key))

            def _build_mock_ref(uid=None, address=None):
                try:
                    return execution_creator(
                        pool, FakeExecutionActor.gen_uid(address))
                except ActorAlreadyExist:
                    return pool.actor_ref(FakeExecutionActor.gen_uid(address))

            # handle mock objects
            OperandActor._get_raw_execution_ref.side_effect = _build_mock_ref

            mock_resource = dict(hardware=dict(cpu=4, cpu_total=4, memory=512))

            resource_ref.set_worker_meta('localhost:12345', mock_resource)
            resource_ref.set_worker_meta('localhost:23456', mock_resource)

            graph_ref.prepare_graph()
            fetched_graph = graph_ref.get_chunk_graph()

            graph_ref.analyze_graph()

            final_keys = set()
            for c in fetched_graph:
                if fetched_graph.count_successors(c) == 0:
                    final_keys.add(c.op.key)

            graph_ref.create_operand_actors()

            graph_meta_ref = pool.actor_ref(
                GraphMetaActor.gen_uid(session_id, graph_key))
            start_time = time.time()
            while True:
                pool.sleep(0.1)
                if time.time() - start_time > 30:
                    raise SystemError('Wait for execution finish timeout')
                if graph_meta_ref.get_state() in (GraphState.SUCCEEDED,
                                                  GraphState.FAILED,
                                                  GraphState.CANCELLED):
                    break
Пример #12
0
    def setUp(self):
        endpoint = '127.0.0.1:%d' % get_next_port()
        self.endpoint = endpoint
        self.pool = create_actor_pool(n_process=1, backend='gevent', address=endpoint)
        self.pool.create_actor(SchedulerClusterInfoActor, [endpoint],
                               uid=SchedulerClusterInfoActor.default_uid())
        self.pool.create_actor(SessionManagerActor, uid=SessionManagerActor.default_uid())
        self.pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())

        self.api = MarsAPI(endpoint)
Пример #13
0
    def testReadyState(self, *_):
        session_id = str(uuid.uuid4())
        graph_key = str(uuid.uuid4())
        mock_workers = ['localhost:12345', 'localhost:23456']

        def _mock_get_workers_meta(*_, **__):
            return dict((w, dict(hardware=dict(cpu_total=1, memory=1024**3)))
                        for w in mock_workers)

        with patch_method(ResourceActor.get_workers_meta, new=_mock_get_workers_meta) as _, \
                self._prepare_test_graph(session_id, graph_key, mock_workers) as (pool, graph_ref):
            input_op_keys, mid_op_key, output_op_keys = self._filter_graph_level_op_keys(
                graph_ref)
            meta_client = ChunkMetaClient(
                pool, pool.actor_ref(SchedulerClusterInfoActor.default_uid()))
            op_ref = pool.actor_ref(
                OperandActor.gen_uid(session_id, mid_op_key))
            resource_ref = pool.actor_ref(ResourceActor.default_uid())

            input_refs = [
                pool.actor_ref(OperandActor.gen_uid(session_id, k))
                for k in input_op_keys
            ]

            def test_entering_state(target):
                for key in input_op_keys:
                    op_ref.remove_finished_predecessor(key)

                op_ref.start_operand(OperandState.UNSCHEDULED)
                for ref in input_refs:
                    ref.start_operand(OperandState.UNSCHEDULED)

                for ref in input_refs:
                    self.assertEqual(op_ref.get_state(),
                                     OperandState.UNSCHEDULED)
                    ref.start_operand(OperandState.FINISHED)
                pool.sleep(1)
                self.assertEqual(target, op_ref.get_state())
                for w in mock_workers:
                    resource_ref.deallocate_resource(session_id, mid_op_key, w)

            # test entering state with no input meta
            test_entering_state(OperandState.UNSCHEDULED)

            # fill meta
            input_chunk_keys, _, _ = self._filter_graph_level_chunk_keys(
                graph_ref)
            for ck in input_chunk_keys:
                meta_client.set_chunk_meta(session_id,
                                           ck,
                                           workers=('localhost:12345', ),
                                           size=800)

            # test successful entering state
            test_entering_state(OperandState.READY)
Пример #14
0
    def _start_worker_process(self,
                              cuda=False,
                              cuda_device=None,
                              extra_env=None,
                              modules=None,
                              check_timeout=None):
        mock_scheduler_addr = f'127.0.0.1:{get_next_port()}'
        try:
            with create_actor_pool(n_process=1,
                                   backend='gevent',
                                   address=mock_scheduler_addr) as pool:
                pool.create_actor(SchedulerClusterInfoActor,
                                  [mock_scheduler_addr],
                                  uid=SchedulerClusterInfoActor.default_uid())
                pool.create_actor(SessionManagerActor,
                                  uid=SessionManagerActor.default_uid())

                pool.create_actor(ChunkMetaActor,
                                  uid=ChunkMetaActor.default_uid())
                resource_ref = pool.create_actor(
                    ResourceActor, uid=ResourceActor.default_uid())

                args = [
                    sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1',
                    '--schedulers', mock_scheduler_addr, '--cpu-procs', '1',
                    '--cache-mem', '10m', '--spill-dir', self._spill_dir,
                    '--log-level', 'debug', '--log-format',
                    '%(asctime)-15s %(message)s', '--ignore-avail-mem'
                ]
                if modules:
                    args.extend(['--load-modules', ','.join(modules)])
                env = os.environ.copy()
                env.update(extra_env or dict())
                if cuda:
                    env['CUDA_VISIBLE_DEVICES'] = cuda_device
                proc = subprocess.Popen(args, env=env)
                worker_endpoint = self._wait_worker_ready(
                    proc, resource_ref, timeout=check_timeout)

                yield pool, worker_endpoint
        finally:
            if proc.poll() is None:
                proc.send_signal(signal.SIGINT)
                check_time = time.time()
                while True:
                    time.sleep(0.1)
                    if proc.poll(
                    ) is not None or time.time() - check_time >= 5:
                        break
                if proc.poll() is None:
                    proc.kill()
            if os.path.exists(options.worker.plasma_socket):
                os.unlink(options.worker.plasma_socket)
Пример #15
0
    def testAssignerActor(self):
        mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
        with create_actor_pool(n_process=1, backend='gevent', address=mock_scheduler_addr) as pool:
            cluster_info_ref = pool.create_actor(SchedulerClusterInfoActor, [pool.cluster_info.address],
                                                 uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

            endpoint1 = 'localhost:12345'
            endpoint2 = 'localhost:23456'
            res = dict(hardware=dict(cpu=4, memory=4096))

            def write_mock_meta():
                resource_ref.set_worker_meta(endpoint1, res)
                resource_ref.set_worker_meta(endpoint2, res)

            g = gevent.spawn(write_mock_meta)
            g.join()

            assigner_ref = pool.create_actor(AssignerActor, uid=AssignerActor.default_uid())

            session_id = str(uuid.uuid4())
            op_key = str(uuid.uuid4())
            chunk_key1 = str(uuid.uuid4())
            chunk_key2 = str(uuid.uuid4())
            chunk_key3 = str(uuid.uuid4())

            op_info = {
                'op_name': 'test_op',
                'io_meta': dict(input_chunks=[chunk_key1, chunk_key2, chunk_key3]),
                'retries': 0,
                'optimize': {
                    'depth': 0,
                    'demand_depths': (),
                    'successor_size': 1,
                    'descendant_size': 0
                }
            }

            chunk_meta_client = ChunkMetaClient(pool, cluster_info_ref)
            chunk_meta_client.set_chunk_meta(session_id, chunk_key1, size=512, workers=(endpoint1,))
            chunk_meta_client.set_chunk_meta(session_id, chunk_key2, size=512, workers=(endpoint1,))
            chunk_meta_client.set_chunk_meta(session_id, chunk_key3, size=512, workers=(endpoint2,))

            reply_ref = pool.create_actor(PromiseReplyTestActor)
            reply_callback = ((reply_ref.uid, reply_ref.address), 'reply')
            assigner_ref.apply_for_resource(session_id, op_key, op_info, callback=reply_callback)

            while not reply_ref.get_reply():
                gevent.sleep(0.1)
            _, ret_value = reply_ref.get_reply()
            self.assertEqual(ret_value[0], endpoint1)
Пример #16
0
    def testWorkerProcessRestart(self):
        mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
        try:
            with create_actor_pool(n_process=1,
                                   backend='gevent',
                                   address=mock_scheduler_addr) as pool:
                pool.create_actor(SchedulerClusterInfoActor,
                                  [mock_scheduler_addr],
                                  uid=SchedulerClusterInfoActor.default_uid())

                pool.create_actor(ChunkMetaActor,
                                  uid=ChunkMetaActor.default_uid())
                resource_ref = pool.create_actor(
                    ResourceActor, uid=ResourceActor.default_uid())

                proc = subprocess.Popen([
                    sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1',
                    '--schedulers', mock_scheduler_addr, '--cpu-procs', '1',
                    '--cache-mem', '10m', '--spill-dir', self._spill_dir,
                    '--ignore-avail-mem'
                ])
                worker_endpoint = self._wait_worker_ready(proc, resource_ref)

                daemon_ref = pool.actor_ref(WorkerDaemonActor.default_uid(),
                                            address=worker_endpoint)
                dispatch_ref = pool.actor_ref(DispatchActor.default_uid(),
                                              address=worker_endpoint)
                cpu_slots = dispatch_ref.get_slots('cpu')
                calc_ref = pool.actor_ref(cpu_slots[0],
                                          address=worker_endpoint)
                daemon_ref.kill_actor_process(calc_ref)

                check_start = time.time()
                while not daemon_ref.is_actor_process_alive(calc_ref):
                    gevent.sleep(0.1)
                    if time.time() - check_start > 10:
                        raise TimeoutError('Check process restart timeout')
        finally:
            if proc.poll() is None:
                proc.send_signal(signal.SIGINT)
                check_time = time.time()
                while True:
                    time.sleep(0.1)
                    if proc.poll(
                    ) is not None or time.time() - check_time >= 5:
                        break
                if proc.poll() is None:
                    proc.kill()
            if os.path.exists(options.worker.plasma_socket):
                os.unlink(options.worker.plasma_socket)
Пример #17
0
    def testExecuteWorker(self):
        mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
        try:
            with create_actor_pool(n_process=1,
                                   backend='gevent',
                                   address=mock_scheduler_addr) as pool:
                pool.create_actor(SchedulerClusterInfoActor,
                                  [mock_scheduler_addr],
                                  uid=SchedulerClusterInfoActor.default_uid())

                pool.create_actor(ChunkMetaActor,
                                  uid=ChunkMetaActor.default_uid())
                resource_ref = pool.create_actor(
                    ResourceActor, uid=ResourceActor.default_uid())

                proc = subprocess.Popen([
                    sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1',
                    '--schedulers', mock_scheduler_addr, '--cpu-procs', '1',
                    '--cache-mem', '10m', '--spill-dir', self._spill_dir,
                    '--ignore-avail-mem'
                ])
                worker_endpoint = self._wait_worker_ready(proc, resource_ref)

                test_ref = pool.create_actor(WorkerProcessTestActor)
                test_ref.run_test(worker_endpoint, _tell=True)

                check_time = time.time()
                while not test_ref.get_reply():
                    gevent.sleep(0.1)
                    if time.time() - check_time > 20:
                        raise TimeoutError('Check reply timeout')
        finally:
            if proc.poll() is None:
                proc.send_signal(signal.SIGINT)
                check_time = time.time()
                while True:
                    time.sleep(0.1)
                    if proc.poll(
                    ) is not None or time.time() - check_time >= 5:
                        break
                if proc.poll() is None:
                    proc.kill()
            if os.path.exists(options.worker.plasma_socket):
                os.unlink(options.worker.plasma_socket)
Пример #18
0
    def _start_worker_process(self, no_cuda=True, cuda_device=None):
        mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
        try:
            with create_actor_pool(n_process=1,
                                   backend='gevent',
                                   address=mock_scheduler_addr) as pool:
                pool.create_actor(SchedulerClusterInfoActor,
                                  [mock_scheduler_addr],
                                  uid=SchedulerClusterInfoActor.default_uid())

                pool.create_actor(ChunkMetaActor,
                                  uid=ChunkMetaActor.default_uid())
                resource_ref = pool.create_actor(
                    ResourceActor, uid=ResourceActor.default_uid())

                args = [
                    sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1',
                    '--schedulers', mock_scheduler_addr, '--cpu-procs', '1',
                    '--cache-mem', '10m', '--spill-dir', self._spill_dir,
                    '--ignore-avail-mem'
                ]
                env = os.environ.copy()
                if no_cuda:
                    args.append('--no-cuda')
                else:
                    env['CUDA_VISIBLE_DEVICES'] = cuda_device
                proc = subprocess.Popen(args, env=env)
                worker_endpoint = self._wait_worker_ready(proc, resource_ref)

                yield pool, worker_endpoint
        finally:
            if proc.poll() is None:
                proc.send_signal(signal.SIGINT)
                check_time = time.time()
                while True:
                    time.sleep(0.1)
                    if proc.poll(
                    ) is not None or time.time() - check_time >= 5:
                        break
                if proc.poll() is None:
                    proc.kill()
            if os.path.exists(options.worker.plasma_socket):
                os.unlink(options.worker.plasma_socket)
Пример #19
0
    def create_standard_actors(cls, pool, address, quota_size=None, with_daemon=True,
                               with_status=True, with_resource=False):
        quota_size = quota_size or (1024 * 1024)

        pool.create_actor(SchedulerClusterInfoActor, [address],
                          uid=SchedulerClusterInfoActor.default_uid())
        pool.create_actor(WorkerClusterInfoActor, [address],
                          uid=WorkerClusterInfoActor.default_uid())

        pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())
        if with_resource:
            pool.create_actor(ResourceActor, uid=ResourceActor.default_uid())
        if with_daemon:
            pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())
        if with_status:
            pool.create_actor(StatusActor, address, uid=StatusActor.default_uid())

        pool.create_actor(
            ChunkHolderActor, cls.plasma_storage_size, uid=ChunkHolderActor.default_uid())
        pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
        pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
        pool.create_actor(QuotaActor, quota_size, uid=MemQuotaActor.default_uid())
        pool.create_actor(ExecutionActor, uid=ExecutionActor.default_uid())
Пример #20
0
    def _start_processes(self, n_schedulers=2, n_workers=2, etcd=False, cuda=False, modules=None,
                         log_scheduler=True, log_worker=True, env=None, scheduler_args=None,
                         worker_args=None, worker_cpu=1):
        old_not_errors = gevent.hub.Hub.NOT_ERROR
        gevent.hub.Hub.NOT_ERROR = (Exception,)

        scheduler_ports = [str(get_next_port()) for _ in range(n_schedulers)]
        self.scheduler_endpoints = ['127.0.0.1:' + p for p in scheduler_ports]

        append_args = []
        append_args_scheduler = scheduler_args or []
        append_args_worker = worker_args or []
        if modules:
            append_args.extend(['--load-modules', ','.join(modules)])

        if etcd:
            etcd_port = get_next_port()
            self.etcd_helper = EtcdProcessHelper(port_range_start=etcd_port)
            self.etcd_helper.run()
            options.kv_store = f'etcd://127.0.0.1:{etcd_port}'
            append_args.extend(['--kv-store', options.kv_store])
        else:
            append_args.extend(['--schedulers', ','.join(self.scheduler_endpoints)])

        if 'DUMP_GRAPH_DATA' in os.environ:
            append_args_scheduler += ['-Dscheduler.dump_graph_data=true']

        proc_env = os.environ.copy()
        if env:
            proc_env.update(env)

        self.proc_schedulers = [
            subprocess.Popen([sys.executable, '-m', 'mars.scheduler',
                              '-H', '127.0.0.1',
                              '-p', p,
                              '--log-level', 'debug' if log_scheduler else 'warning',
                              '--log-format', f'SCH{idx} %(asctime)-15s %(message)s'
                              '-Dscheduler.retry_delay=5',
                              '-Dscheduler.default_cpu_usage=0',
                              '-Dscheduler.status_timeout=10']
                             + append_args + append_args_scheduler, env=proc_env)
            for idx, p in enumerate(scheduler_ports)]
        cuda_count = resource.cuda_count()
        cuda_devices = [int(d) for d in os.environ['CUDA_VISIBLE_DEVICES'].split(',')] \
            if os.environ.get('CUDA_VISIBLE_DEVICES') else list(range(cuda_count))
        self.proc_workers = [
            subprocess.Popen([sys.executable, '-m', 'mars.worker',
                              '-a', '127.0.0.1',
                              '--cpu-procs', str(worker_cpu),
                              '--log-level', 'debug' if log_worker else 'warning',
                              '--log-format', f'WOR{idx} %(asctime)-15s %(message)s',
                              '--cache-mem', '16m',
                              '--ignore-avail-mem',
                              '--cuda-device', str(cuda_devices[idx % cuda_count]) if cuda_count else '',
                              '-Dworker.prepare_data_timeout=30']
                             + append_args + append_args_worker, env=proc_env)
            for idx in range(n_workers)
        ]

        actor_client = new_client()
        self.cluster_info = actor_client.actor_ref(
            SchedulerClusterInfoActor.default_uid(), address=self.scheduler_endpoints[0])

        check_time = time.time()
        while True:
            try:
                try:
                    started_schedulers = self.cluster_info.get_schedulers()
                except Exception as e:
                    raise ProcessRequirementUnmetError(f'Failed to get scheduler numbers, {e}')
                if len(started_schedulers) < n_schedulers:
                    raise ProcessRequirementUnmetError(
                        f'Schedulers does not met requirement: {len(started_schedulers)} < {n_schedulers}.')
                actor_address = self.cluster_info.get_scheduler(SessionManagerActor.default_uid())
                self.session_manager_ref = actor_client.actor_ref(
                    SessionManagerActor.default_uid(), address=actor_address)

                actor_address = self.cluster_info.get_scheduler(ResourceActor.default_uid())
                resource_ref = actor_client.actor_ref(ResourceActor.default_uid(), address=actor_address)

                if not actor_client.has_actor(self.session_manager_ref) \
                        or resource_ref.get_worker_count() < n_workers:
                    raise ProcessRequirementUnmetError(
                        f'Workers does not met requirement: {resource_ref.get_worker_count()} < {n_workers}')
                break
            except:  # noqa: E722
                if time.time() - check_time > 20:
                    raise
                time.sleep(0.1)

        gevent.hub.Hub.NOT_ERROR = old_not_errors
Пример #21
0
    def testAssignerActor(self, *_):
        mock_scheduler_addr = f'127.0.0.1:{get_next_port()}'
        with create_actor_pool(n_process=1,
                               backend='gevent',
                               address=mock_scheduler_addr) as pool:
            cluster_info_ref = pool.create_actor(
                SchedulerClusterInfoActor, [pool.cluster_info.address],
                uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor,
                                             uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

            endpoint1 = 'localhost:12345'
            endpoint2 = 'localhost:23456'
            res = dict(hardware=dict(cpu=4, mem_quota=4096))

            resource_ref.set_worker_meta(endpoint1, res)
            resource_ref.set_worker_meta(endpoint2, res)

            assigner_ref = pool.create_actor(AssignerActor,
                                             uid=AssignerActor.default_uid())

            session_id = str(uuid.uuid4())
            op_key = str(uuid.uuid4())
            chunk_key1 = str(uuid.uuid4())
            chunk_key2 = str(uuid.uuid4())
            chunk_key3 = str(uuid.uuid4())

            op_info = {
                'op_name': 'test_op',
                'io_meta':
                dict(input_chunks=[chunk_key1, chunk_key2, chunk_key3]),
                'retries': 0,
                'optimize': {
                    'depth': 0,
                    'demand_depths': (),
                    'successor_size': 1,
                    'descendant_size': 0
                }
            }

            chunk_meta_client = ChunkMetaClient(pool, cluster_info_ref)
            chunk_meta_client.set_chunk_meta(session_id,
                                             chunk_key1,
                                             size=512,
                                             workers=(endpoint1, ))
            chunk_meta_client.set_chunk_meta(session_id,
                                             chunk_key2,
                                             size=512,
                                             workers=(endpoint1, ))
            chunk_meta_client.set_chunk_meta(session_id,
                                             chunk_key3,
                                             size=512,
                                             workers=(endpoint2, ))

            uid = OperandActor.gen_uid(session_id, op_key)
            reply_ref = pool.create_actor(MockOperandActor, uid=uid)
            assigner_ref.apply_for_resource(session_id, op_key, op_info)

            while not reply_ref.get_worker_ep():
                gevent.sleep(0.1)
            self.assertEqual(reply_ref.get_worker_ep(), endpoint1)

            with self.run_actor_test(pool) as test_actor, self.assertRaises(
                    TimeoutError):
                assigner_p_ref = test_actor.promise_ref(assigner_ref)

                try:
                    options.scheduler.assign_timeout = 1
                    res = dict(hardware=dict(cpu=4, mem_quota=0))
                    resource_ref.set_worker_meta(endpoint1, res)
                    resource_ref.set_worker_meta(endpoint2, res)

                    self.waitp(
                        assigner_p_ref.apply_for_resource(session_id,
                                                          op_key,
                                                          op_info,
                                                          _promise=True))
                finally:
                    options.scheduler.assign_timeout = 600
Пример #22
0
    def start_processes(self,
                        n_schedulers=2,
                        n_workers=2,
                        etcd=False,
                        cuda=False,
                        modules=None,
                        log_scheduler=True,
                        log_worker=True,
                        env=None):
        old_not_errors = gevent.hub.Hub.NOT_ERROR
        gevent.hub.Hub.NOT_ERROR = (Exception, )

        scheduler_ports = [str(get_next_port()) for _ in range(n_schedulers)]
        self.scheduler_endpoints = ['127.0.0.1:' + p for p in scheduler_ports]

        append_args = []
        append_args_scheduler = []
        append_args_worker = []
        if modules:
            append_args.extend(['--load-modules', ','.join(modules)])

        if etcd:
            etcd_port = get_next_port()
            self.etcd_helper = EtcdProcessHelper(port_range_start=etcd_port)
            self.etcd_helper.run()
            options.kv_store = 'etcd://127.0.0.1:%s' % etcd_port
            append_args.extend(['--kv-store', options.kv_store])
        else:
            append_args.extend(
                ['--schedulers', ','.join(self.scheduler_endpoints)])

        if 'DUMP_GRAPH_DATA' in os.environ:
            append_args_scheduler += ['-Dscheduler.dump_graph_data=true']
        if not cuda:
            append_args_worker += ['--no-cuda']

        proc_env = os.environ.copy()
        if env:
            proc_env.update(env)

        self.proc_schedulers = [
            subprocess.Popen([
                sys.executable, '-m', 'mars.scheduler', '-H', '127.0.0.1',
                '-p', p, '--log-level',
                'debug' if log_scheduler else 'warning', '--log-format',
                'SCH%d %%(asctime)-15s %%(message)s' % idx,
                '-Dscheduler.retry_delay=5', '-Dscheduler.default_cpu_usage=0',
                '-Dscheduler.status_timeout=10'
            ] + append_args + append_args_scheduler,
                             env=proc_env)
            for idx, p in enumerate(scheduler_ports)
        ]
        cuda_count = resource.cuda_count()
        self.proc_workers = [
            subprocess.Popen([
                sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1',
                '--cpu-procs', '1', '--log-level',
                'debug' if log_worker else 'warning', '--log-format',
                'WOR%d %%(asctime)-15s %%(message)s' % idx, '--cache-mem',
                '16m', '--ignore-avail-mem', '--cuda-device',
                str(idx % cuda_count) if cuda_count else '0',
                '-Dworker.prepare_data_timeout=30'
            ] + append_args + append_args_worker,
                             env=proc_env) for idx in range(n_workers)
        ]

        actor_client = new_client()
        self.cluster_info = actor_client.actor_ref(
            SchedulerClusterInfoActor.default_uid(),
            address=self.scheduler_endpoints[0])

        check_time = time.time()
        while True:
            try:
                started_schedulers = self.cluster_info.get_schedulers()
                if len(started_schedulers) < n_schedulers:
                    raise ProcessRequirementUnmetError(
                        'Schedulers does not met requirement: %d < %d.' %
                        (len(started_schedulers), n_schedulers))
                actor_address = self.cluster_info.get_scheduler(
                    SessionManagerActor.default_uid())
                self.session_manager_ref = actor_client.actor_ref(
                    SessionManagerActor.default_uid(), address=actor_address)

                actor_address = self.cluster_info.get_scheduler(
                    ResourceActor.default_uid())
                resource_ref = actor_client.actor_ref(
                    ResourceActor.default_uid(), address=actor_address)

                if resource_ref.get_worker_count() < n_workers:
                    raise ProcessRequirementUnmetError(
                        'Workers does not met requirement: %d < %d.' %
                        (resource_ref.get_worker_count(), n_workers))
                break
            except:
                if time.time() - check_time > 20:
                    raise
                time.sleep(0.1)

        gevent.hub.Hub.NOT_ERROR = old_not_errors
Пример #23
0
    def testChunkBroadcast(self, *_):
        proc_count = 2
        endpoints = ['127.0.0.1:%d' % get_next_port() for _ in range(proc_count)]
        keys = []

        def _mock_get_scheduler(key):
            return endpoints[keys.index(key[1]) % len(endpoints)]

        ChunkMetaClient.get_scheduler.side_effect = _mock_get_scheduler

        session_id = str(uuid.uuid4())
        with create_actor_pool(n_process=1, backend='gevent', address=endpoints[0]) as pool1:
            cluster_info1 = pool1.create_actor(SchedulerClusterInfoActor, endpoints,
                                               uid=SchedulerClusterInfoActor.default_uid())
            pool1.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

            with create_actor_pool(n_process=1, backend='gevent', address=endpoints[1]) as pool2:
                cluster_info2 = pool2.create_actor(SchedulerClusterInfoActor, endpoints,
                                                   uid=SchedulerClusterInfoActor.default_uid())
                pool2.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

                actor_client = new_client()
                client1 = ChunkMetaClient(actor_client, actor_client.actor_ref(cluster_info1))
                client2 = ChunkMetaClient(actor_client, actor_client.actor_ref(cluster_info2))
                local_ref1 = actor_client.actor_ref(ChunkMetaActor.default_uid(), address=endpoints[0])
                local_ref2 = actor_client.actor_ref(ChunkMetaActor.default_uid(), address=endpoints[1])

                key1 = str(uuid.uuid4())
                key2 = str(uuid.uuid4())
                key3 = str(uuid.uuid4())
                keys = [key1, key2, key3]

                client1.set_chunk_broadcasts(session_id, key1, [endpoints[1]])
                client1.set_chunk_size(session_id, key1, 512)
                client1.set_chunk_shape(session_id, key1, (10,) * 2)
                client1.add_worker(session_id, key1, 'abc')
                client2.set_chunk_broadcasts(session_id, key2, [endpoints[0]])
                client2.set_chunk_size(session_id, key2, 512)
                client1.set_chunk_shape(session_id, key2, (10,) * 2)
                client2.add_worker(session_id, key2, 'def')
                pool2.sleep(0.1)

                self.assertEqual(local_ref1.get_chunk_meta(session_id, key1).chunk_size, 512)
                self.assertEqual(local_ref1.get_chunk_meta(session_id, key1).chunk_shape, (10,) * 2)
                self.assertEqual(local_ref1.get_chunk_broadcasts(session_id, key1), [endpoints[1]])
                self.assertEqual(local_ref2.get_chunk_meta(session_id, key1).chunk_size, 512)
                self.assertEqual(local_ref2.get_chunk_meta(session_id, key1).chunk_shape, (10,) * 2)
                self.assertEqual(local_ref2.get_chunk_broadcasts(session_id, key2), [endpoints[0]])

                client1.batch_set_chunk_broadcasts(session_id, [key3], [[endpoints[1]]])
                meta3 = WorkerMeta(chunk_size=512, chunk_shape=(10,) * 2, workers=(endpoints[0],))
                local_ref1.batch_set_chunk_meta(session_id, [key3], [meta3])
                self.assertEqual(local_ref2.get_chunk_meta(session_id, key3).chunk_size, 512)
                self.assertEqual(local_ref2.get_chunk_meta(session_id, key3).chunk_shape, (10,) * 2)

                client1.delete_meta(session_id, key1)
                pool2.sleep(0.1)

                self.assertIsNone(local_ref1.get_chunk_meta(session_id, key1))
                self.assertIsNone(local_ref2.get_chunk_meta(session_id, key1))
                self.assertIsNone(local_ref1.get_chunk_broadcasts(session_id, key1))

                local_ref1.remove_workers_in_session(session_id, ['def'])
                local_ref2.remove_workers_in_session(session_id, ['def'])
                pool2.sleep(0.1)

                self.assertIsNone(local_ref1.get_chunk_meta(session_id, key2))
                self.assertIsNone(local_ref2.get_chunk_meta(session_id, key2))
                self.assertIsNone(local_ref2.get_chunk_broadcasts(session_id, key2))
Пример #24
0
    def testChunkMetaActors(self, *_):
        proc_count = 2
        endpoints = ['127.0.0.1:%d' % get_next_port() for _ in range(proc_count)]
        keys = []

        def _mock_get_scheduler(key):
            return endpoints[keys.index(key[1]) % len(endpoints)]

        ChunkMetaClient.get_scheduler.side_effect = _mock_get_scheduler

        session1 = str(uuid.uuid4())
        session2 = str(uuid.uuid4())
        with create_actor_pool(n_process=1, backend='gevent', address=endpoints[0]) as pool1:
            cluster_info1 = pool1.create_actor(SchedulerClusterInfoActor, endpoints,
                                               uid=SchedulerClusterInfoActor.default_uid())
            pool1.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

            with create_actor_pool(n_process=1, backend='gevent', address=endpoints[1]) as pool2:
                cluster_info2 = pool2.create_actor(SchedulerClusterInfoActor, endpoints,
                                                   uid=SchedulerClusterInfoActor.default_uid())
                pool2.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())

                actor_client = new_client()
                client1 = ChunkMetaClient(actor_client, actor_client.actor_ref(cluster_info1))
                client2 = ChunkMetaClient(actor_client, actor_client.actor_ref(cluster_info2))

                loc_ref1 = actor_client.actor_ref(ChunkMetaActor.default_uid(), address=endpoints[0])
                loc_ref2 = actor_client.actor_ref(ChunkMetaActor.default_uid(), address=endpoints[1])

                key1 = (str(uuid.uuid4()), str(uuid.uuid4()))
                key2 = str(uuid.uuid4())
                key3 = str(uuid.uuid4())
                key4 = (str(uuid.uuid4()), str(uuid.uuid4()))
                key5 = str(uuid.uuid4())
                key6 = str(uuid.uuid4())
                keys = [key1, key2, key3, key4, key5, key6]
                client1.set_chunk_size(session1, key1, 512)
                client2.set_chunk_size(session1, key2, 1024)
                client2.set_chunk_size(session2, key3, 1024)

                self.assertEqual(client1.get_chunk_size(session1, key1), 512)
                self.assertEqual(client2.get_chunk_size(session1, key2), 1024)
                self.assertEqual(client1.get_chunk_size(session1, key2), 1024)
                self.assertEqual(client2.get_chunk_size(session1, key1), 512)

                self.assertListEqual(client1.batch_get_chunk_size(session1, [key1, key2]), [512, 1024])
                self.assertListEqual(client2.batch_get_chunk_size(session1, [key1, key2]), [512, 1024])

                client1.set_chunk_shape(session1, key1, (10,))
                client2.set_chunk_shape(session1, key2, (10,) * 2)
                client2.set_chunk_shape(session2, key3, (10,) * 2)

                self.assertEqual(client1.get_chunk_shape(session1, key1), (10,))
                self.assertEqual(client2.get_chunk_shape(session1, key2), (10,) * 2)
                self.assertEqual(client1.get_chunk_shape(session1, key2), (10,) * 2)
                self.assertEqual(client2.get_chunk_shape(session1, key1), (10,))

                self.assertListEqual(client1.batch_get_chunk_shape(session1, [key1, key2]), [(10,), (10,) * 2])
                self.assertListEqual(client2.batch_get_chunk_shape(session1, [key1, key2]), [(10,), (10,) * 2])

                mock_endpoint = '127.0.0.1:%d' % get_next_port()
                with create_actor_pool(n_process=1, backend='gevent', address=mock_endpoint) as pool3:
                    cluster_info3 = pool3.create_actor(SchedulerClusterInfoActor, endpoints,
                                                       uid=SchedulerClusterInfoActor.default_uid())
                    client3 = ChunkMetaClient(actor_client, actor_client.actor_ref(cluster_info3))
                    self.assertListEqual(client3.batch_get_chunk_shape(session1, [key1, key2]), [(10,), (10,) * 2])

                client1.add_worker(session1, key1, 'abc')
                client1.add_worker(session1, key1, 'def')
                client2.add_worker(session1, key2, 'ghi')

                client1.add_worker(session2, key3, 'ghi')

                self.assertEqual(sorted(client1.get_workers(session1, key1)), sorted(('abc', 'def')))
                self.assertEqual(sorted(client2.get_workers(session1, key2)), sorted(('ghi',)))

                batch_result = client1.batch_get_workers(session1, [key1, key2])
                self.assertEqual(sorted(batch_result[0]), sorted(('abc', 'def')))
                self.assertEqual(sorted(batch_result[1]), sorted(('ghi',)))

                affected = []
                for loc_ref in (loc_ref1, loc_ref2):
                    affected.extend(loc_ref.remove_workers_in_session(session2, ['ghi']))
                self.assertEqual(affected, [key3])
                self.assertEqual(sorted(client1.get_workers(session1, key2)), sorted(('ghi',)))
                self.assertIsNone(client1.get_workers(session2, key3))

                client1.delete_meta(session1, key1)
                self.assertIsNone(client1.get_workers(session1, key1))
                self.assertIsNone(client1.batch_get_chunk_size(session1, [key1, key2])[0])
                self.assertIsNone(client1.batch_get_workers(session1, [key1, key2])[0])

                client2.batch_delete_meta(session1, [key1, key2])
                self.assertIsNone(client1.get_workers(session1, key2))
                self.assertIsNone(client1.batch_get_chunk_size(session1, [key1, key2])[1])
                self.assertIsNone(client1.batch_get_workers(session1, [key1, key2])[1])

                meta4 = WorkerMeta(chunk_size=512, chunk_shape=(10,) * 2, workers=(endpoints[0],))
                loc_ref2.batch_set_chunk_meta(session1, [key4], [meta4])
                self.assertEqual(loc_ref2.get_chunk_meta(session1, key4).chunk_size, 512)
                self.assertEqual(loc_ref2.get_chunk_meta(session1, key4).chunk_shape, (10,) * 2)

                meta5 = WorkerMeta(chunk_size=512, chunk_shape=(10,) * 2, workers=(endpoints[0],))
                meta6 = WorkerMeta(chunk_size=512, chunk_shape=(10,) * 2, workers=(endpoints[0],))
                client1.batch_set_chunk_meta(session1, [key5, key6], [meta5, meta6])
                self.assertEqual(loc_ref1.get_chunk_meta(session1, key5).chunk_size, 512)
                self.assertEqual(loc_ref2.get_chunk_meta(session1, key6).chunk_size, 512)
Пример #25
0
    def prepare_graph_in_pool(self, expr, clean_io_meta=True, compose=False):
        session_id = str(uuid.uuid4())
        graph_key = str(uuid.uuid4())

        graph = expr.build_graph(compose=compose)
        serialized_graph = serialize_graph(graph)
        chunked_graph = expr.build_graph(compose=compose, tiled=True)

        addr = '127.0.0.1:%d' % get_next_port()
        with create_actor_pool(n_process=1, backend='gevent',
                               address=addr) as pool:
            pool.create_actor(SchedulerClusterInfoActor,
                              [pool.cluster_info.address],
                              uid=SchedulerClusterInfoActor.default_uid())
            resource_ref = pool.create_actor(ResourceActor,
                                             uid=ResourceActor.default_uid())
            pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
            pool.create_actor(AssignerActor,
                              uid=AssignerActor.gen_uid(session_id))
            graph_ref = pool.create_actor(GraphActor,
                                          session_id,
                                          graph_key,
                                          serialized_graph,
                                          uid=GraphActor.gen_uid(
                                              session_id, graph_key))

            graph_ref.prepare_graph(compose=compose)
            fetched_graph = graph_ref.get_chunk_graph()
            self.assertIsNotNone(fetched_graph)
            self.assertEqual(len(chunked_graph), len(fetched_graph))

            graph_ref.analyze_graph(do_placement=False)
            op_infos = graph_ref.get_operand_info()
            for n in fetched_graph:
                depth = op_infos[n.op.key]['optimize']['depth']
                self.assertIsNotNone(depth)
                successor_size = op_infos[
                    n.op.key]['optimize']['successor_size']
                self.assertIsNotNone(successor_size)
                descendant_size = op_infos[
                    n.op.key]['optimize']['descendant_size']
                self.assertIsNotNone(descendant_size)

            resource_ref.set_worker_meta('localhost:12345',
                                         dict(hardware=dict(cpu_total=4)))
            resource_ref.set_worker_meta('localhost:23456',
                                         dict(hardware=dict(cpu_total=4)))

            graph_ref.analyze_graph()
            op_infos = graph_ref.get_operand_info()

            for n in fetched_graph:
                if fetched_graph.count_predecessors(n) != 0:
                    continue
                target_worker = op_infos[n.op.key]['target_worker']
                self.assertIsNotNone(target_worker)

            graph_ref.create_operand_actors(_clean_info=clean_io_meta)
            op_infos = graph_ref.get_operand_info()

            if not clean_io_meta:
                orig_metas = dict()
                for n in fetched_graph:
                    try:
                        meta = orig_metas[n.op.key]
                    except KeyError:
                        meta = orig_metas[n.op.key] = dict(predecessors=set(),
                                                           successors=set(),
                                                           input_chunks=set(),
                                                           chunks=set())
                    meta['predecessors'].update([
                        pn.op.key for pn in fetched_graph.iter_predecessors(n)
                    ])
                    meta['successors'].update(
                        [sn.op.key for sn in fetched_graph.iter_successors(n)])
                    meta['input_chunks'].update(
                        [pn.key for pn in fetched_graph.iter_predecessors(n)])
                    meta['chunks'].update([c.key for c in n.op.outputs])

                for n in fetched_graph:
                    self.assertEqual(op_infos[n.op.key]['op_name'],
                                     type(n.op).__name__)

                    io_meta = op_infos[n.op.key]['io_meta']
                    orig_io_meta = orig_metas[n.op.key]

                    self.assertSetEqual(set(io_meta['predecessors']),
                                        set(orig_io_meta['predecessors']))
                    self.assertSetEqual(set(io_meta['successors']),
                                        set(orig_io_meta['successors']))
                    self.assertSetEqual(set(io_meta['input_chunks']),
                                        set(orig_io_meta['input_chunks']))
                    self.assertSetEqual(set(io_meta['chunks']),
                                        set(orig_io_meta['chunks']))

            yield pool, graph_ref