Ejemplo n.º 1
0
 def QuitWorker(self, request, context):
     self._quit_event.set()
     return control_pb2.Void()
Ejemplo n.º 2
0
    async def RunClient(self, request_iterator, context):
        config_request = await context.read()
        config = config_request.setup
        _LOGGER.info('Received ClientConfig: %s', config)

        if config.client_processes <= 0:
            _LOGGER.info('client_processes can\'t be [%d]',
                         config.client_processes)
            _LOGGER.info('Using client_processes == [%d]', _NUM_CORES)
            config.client_processes = _NUM_CORES

        if config.client_processes == 1:
            # If client_processes == 1, run the benchmark in this process.
            await self._run_single_client(config, request_iterator, context)
        else:
            # If client_processes > 1, offload the work to other processes.
            sub_workers = await asyncio.gather(
                *(_create_sub_worker()
                  for _ in range(config.client_processes)))

            calls = [worker.stub.RunClient() for worker in sub_workers]

            config_request.setup.client_processes = 1

            for call in calls:
                await call.write(config_request)
                # An empty status indicates the peer is ready
                await call.read()

            start_time = time.monotonic()
            result = histogram.Histogram(config.histogram_params.resolution,
                                         config.histogram_params.max_possible)
            end_time = time.monotonic()
            await context.write(
                _get_client_status(start_time, end_time, result))

            async for request in request_iterator:
                end_time = time.monotonic()

                for call in calls:
                    _LOGGER.debug('Fetching status...')
                    await call.write(request)
                    sub_status = await call.read()
                    result.merge(sub_status.stats.latencies)
                    _LOGGER.debug('Update from sub worker count=[%d]',
                                  sub_status.stats.latencies.count)

                status = _get_client_status(start_time, end_time, result)
                if request.mark.reset:
                    result.reset()
                    start_time = time.monotonic()
                _LOGGER.debug('Reporting count=[%d]',
                              status.stats.latencies.count)
                await context.write(status)

            for call in calls:
                await call.done_writing()

            for worker in sub_workers:
                await worker.stub.QuitWorker(control_pb2.Void())
                await worker.channel.close()
                _LOGGER.info('Waiting for sub worker [%s] to quit...', worker)
                await worker.process.wait()
                _LOGGER.info('Sub worker [%s] quit', worker)
Ejemplo n.º 3
0
 async def QuitWorker(self, unused_request, unused_context):
     _LOGGER.info('QuitWorker command received.')
     self._quit_event.set()
     return control_pb2.Void()
Ejemplo n.º 4
0
    async def RunServer(self, request_iterator, context):
        config_request = await context.read()
        config = config_request.setup
        _LOGGER.info('Received ServerConfig: %s', config)

        if config.server_processes <= 0:
            _LOGGER.info('Using server_processes == [%d]', _NUM_CORES)
            config.server_processes = _NUM_CORES

        if config.port == 0:
            config.port = _pick_an_unused_port()
        _LOGGER.info('Port picked [%d]', config.port)

        if config.server_processes == 1:
            # If server_processes == 1, start the server in this process.
            await self._run_single_server(config, request_iterator, context)
        else:
            # If server_processes > 1, offload to other processes.
            sub_workers = await asyncio.gather(
                *(_create_sub_worker()
                  for _ in range(config.server_processes)))

            calls = [worker.stub.RunServer() for worker in sub_workers]

            config_request.setup.server_processes = 1

            for call in calls:
                await call.write(config_request)
                # An empty status indicates the peer is ready
                await call.read()

            start_time = time.monotonic()
            await context.write(
                _get_server_status(
                    start_time,
                    start_time,
                    config.port,
                ))

            _LOGGER.info('Servers are ready to serve.')

            async for request in request_iterator:
                end_time = time.monotonic()

                for call in calls:
                    await call.write(request)
                    # Reports from sub workers doesn't matter
                    await call.read()

                status = _get_server_status(
                    start_time,
                    end_time,
                    config.port,
                )
                if request.mark.reset:
                    start_time = end_time
                await context.write(status)

            for call in calls:
                await call.done_writing()

            for worker in sub_workers:
                await worker.stub.QuitWorker(control_pb2.Void())
                await worker.channel.close()
                _LOGGER.info('Waiting for [%s] to quit...', worker)
                await worker.process.wait()