Exemplo n.º 1
0
    def RunClient(self, request_iterator, context):
        config = next(request_iterator).setup
        client_runners = []
        qps_data = histogram.Histogram(config.histogram_params.resolution,
                                       config.histogram_params.max_possible)
        start_time = time.time()

        # Create a client for each channel
        for i in xrange(config.client_channels):
            server = config.server_targets[i % len(config.server_targets)]
            runner = self._create_client_runner(server, config, qps_data)
            client_runners.append(runner)
            runner.start()

        end_time = time.time()
        yield self._get_client_status(start_time, end_time, qps_data)

        # Respond to stat requests
        for request in request_iterator:
            end_time = time.time()
            status = self._get_client_status(start_time, end_time, qps_data)
            if request.mark.reset:
                qps_data.reset()
                start_time = time.time()
            yield status

        # Cleanup the clients
        for runner in client_runners:
            runner.stop()
Exemplo n.º 2
0
def run_test(args):
    test_cases = _parse_weighted_test_cases(args.test_cases)
    test_server_targets = args.server_addresses.split(',')
    # Propagate any client exceptions with a queue
    exception_queue = queue.Queue()
    stop_event = threading.Event()
    hist = histogram.Histogram(1, 1)
    runners = []

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=25))
    metrics_pb2.add_MetricsServiceServicer_to_server(
        metrics_server.MetricsServer(hist), server)
    server.add_insecure_port('[::]:{}'.format(args.metrics_port))
    server.start()

    for test_server_target in test_server_targets:
        for _ in xrange(args.num_channels_per_server):
            channel = grpc.insecure_channel(test_server_target)
            for _ in xrange(args.num_stubs_per_channel):
                stub = test_pb2.TestServiceStub(channel)
                runner = test_runner.TestRunner(stub, test_cases, hist,
                                                exception_queue, stop_event)
                runners.append(runner)

    for runner in runners:
        runner.start()
    try:
        timeout_secs = args.test_duration_secs
        if timeout_secs < 0:
            timeout_secs = None
        raise exception_queue.get(block=True, timeout=timeout_secs)
    except queue.Empty:
        # No exceptions thrown, success
        pass
    finally:
        stop_event.set()
        for runner in runners:
            runner.join()
        runner = None
        server.stop(None)
Exemplo n.º 3
0
def run_test(args):
  test_cases = _parse_weighted_test_cases(args.test_cases)
  test_servers = args.server_addresses.split(',')
  # Propagate any client exceptions with a queue
  exception_queue = queue.Queue()
  stop_event = threading.Event()
  hist = histogram.Histogram(1, 1)
  runners = []

  server = metrics_pb2.beta_create_MetricsService_server(
      metrics_server.MetricsServer(hist))
  server.add_insecure_port('[::]:{}'.format(args.metrics_port))
  server.start()

  for test_server in test_servers:
    host, port = test_server.split(':', 1)
    for _ in xrange(args.num_channels_per_server):
      channel = implementations.insecure_channel(host, int(port))
      for _ in xrange(args.num_stubs_per_channel):
        stub = test_pb2.beta_create_TestService_stub(channel)
        runner = test_runner.TestRunner(stub, test_cases, hist,
                                        exception_queue, stop_event)
        runners.append(runner)

  for runner in runners:
    runner.start()
  try:
    timeout_secs = args.test_duration_secs
    if timeout_secs < 0:
      timeout_secs = None
    raise exception_queue.get(block=True, timeout=timeout_secs)
  except queue.Empty:
    # No exceptions thrown, success
    pass
  finally:
    stop_event.set()
    for runner in runners:
      runner.join()
      runner = None
    server.stop(0)
Exemplo n.º 4
0
    async def RunClient(self, request_iterator, context):
        config_request = await context.read()
        config = config_request.setup
        _LOGGER.info('Received ClientConfig: %s', config)

        if config.client_processes <= 0:
            _LOGGER.info('client_processes can\'t be [%d]',
                         config.client_processes)
            _LOGGER.info('Using client_processes == [%d]', _NUM_CORES)
            config.client_processes = _NUM_CORES

        if config.client_processes == 1:
            # If client_processes == 1, run the benchmark in this process.
            await self._run_single_client(config, request_iterator, context)
        else:
            # If client_processes > 1, offload the work to other processes.
            sub_workers = await asyncio.gather(
                *(_create_sub_worker()
                  for _ in range(config.client_processes)))

            calls = [worker.stub.RunClient() for worker in sub_workers]

            config_request.setup.client_processes = 1

            for call in calls:
                await call.write(config_request)
                # An empty status indicates the peer is ready
                await call.read()

            start_time = time.monotonic()
            result = histogram.Histogram(config.histogram_params.resolution,
                                         config.histogram_params.max_possible)
            end_time = time.monotonic()
            await context.write(
                _get_client_status(start_time, end_time, result))

            async for request in request_iterator:
                end_time = time.monotonic()

                for call in calls:
                    _LOGGER.debug('Fetching status...')
                    await call.write(request)
                    sub_status = await call.read()
                    result.merge(sub_status.stats.latencies)
                    _LOGGER.debug('Update from sub worker count=[%d]',
                                  sub_status.stats.latencies.count)

                status = _get_client_status(start_time, end_time, result)
                if request.mark.reset:
                    result.reset()
                    start_time = time.monotonic()
                _LOGGER.debug('Reporting count=[%d]',
                              status.stats.latencies.count)
                await context.write(status)

            for call in calls:
                await call.done_writing()

            for worker in sub_workers:
                await worker.stub.QuitWorker(control_pb2.Void())
                await worker.channel.close()
                _LOGGER.info('Waiting for sub worker [%s] to quit...', worker)
                await worker.process.wait()
                _LOGGER.info('Sub worker [%s] quit', worker)