Example #1
0
def test_remote_execution_basics(http_test_server_fixture):
    """
  Verify remote execution via gRPC works as intended. We do that by running
  nighthawk_service and configuring nighthawk_client to request execution via that.
  """
    http_test_server_fixture.startNighthawkGrpcService()
    args = [
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--rps", "100", "--termination-predicate", "benchmark.http_2xx:24",
        "--nighthawk-service",
        "%s:%s" % (http_test_server_fixture.grpc_service.server_ip,
                   http_test_server_fixture.grpc_service.server_port)
    ]
    repeats = 3
    for i in range(repeats):
        parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
        counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
            parsed_json)
        assertCounterGreaterEqual(counters, "benchmark.http_2xx", 25)

    http_test_server_fixture.grpc_service.stop()
    # Ensure the gRPC service logs looks right. Specifically these logs ought to have sentinels
    # indicative of the right number of executions. (Avoids regression of #289).
    assertEqual(
        repeats,
        sum("Starting 1 threads / event loops" in line
            for line in http_test_server_fixture.grpc_service.log_lines))

    # As a control step, prove we are actually performing remote execution: re-run the command without an
    # operational gRPC service. That ought to fail.
    http_test_server_fixture.runNighthawkClient(args, expect_failure=True)
def test_http_h2(http_test_server_fixture):
    """Test h2 over plain http.

  Runs the CLI configured to use h2c against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--h2",
        http_test_server_fixture.getTestServerRootUri(),
        "--max-active-requests", "1", "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total",
                                      1030)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      403)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)
    asserts.assertEqual(len(counters), 12)
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture,
                                     duration_parameterization_fixture):
  """Test latency-sample-, query- and reply- counts in various configurations."""
  for concurrency in [1, 2]:
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration",
        str(duration_parameterization_fixture), "--rps",
        str(qps_parameterization_fixture), "--concurrency",
        str(concurrency)
    ])

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

    global_result = http_test_server_fixture.getGlobalResults(parsed_json)
    actual_duration = utility.get_execution_duration_from_global_result_json(global_result)
    # Ensure Nighthawk managed to execute for at least some time.
    assert actual_duration >= 1

    # The actual duration is a float, flooring if here allows us to use
    # the GreaterEqual matchers below.
    total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.request_to_response"]["count"]),
        total_requests)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)
    asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
                               total_requests)

    asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests))
    # Give system resources some time to recover after the last execution.
    time.sleep(2)
def test_http_request_release_timing(http_test_server_fixture,
                                     qps_parameterization_fixture,
                                     duration_parameterization_fixture):
    '''
  Verify latency-sample-, query- and reply- counts in various configurations.
  '''

    for concurrency in [1, 2]:
        parsed_json, _ = http_test_server_fixture.runNighthawkClient([
            http_test_server_fixture.getTestServerRootUri(), "--duration",
            str(duration_parameterization_fixture), "--rps",
            str(qps_parameterization_fixture), "--concurrency",
            str(concurrency)
        ])

        total_requests = qps_parameterization_fixture * concurrency * duration_parameterization_fixture
        global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
            parsed_json)
        counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
            parsed_json)
        assertEqual(
            int(global_histograms["benchmark_http_client.request_to_response"]
                ["count"]), total_requests)
        assertEqual(
            int(global_histograms["benchmark_http_client.queue_to_connect"]
                ["count"]), total_requests)

        assertCounterEqual(counters, "benchmark.http_2xx", (total_requests))
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    # It is possible that the # of upstream_cx > # of backend connections for H1
    # as new connections will spawn if the existing clients cannot keep up with the RPS.
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      500)
    asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertGreaterEqual(len(counters), 12)
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterEqual(
        counters, "upstream_cx_tx_bytes_total", 1375
        if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertEqual(len(counters), 12)
def test_nighthawk_client_v2_api_explicitly_set(http_test_server_fixture):
  """Test that the v2 api works when requested to."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
      "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate",
      "foo:1", "--allow-envoy-deprecated-v2-api", "--transport-socket",
      "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}"
  ])

  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.pool_connection_failure", 1)
def test_http_h1_failure_predicate(http_test_server_fixture):
  """Test with a failure predicate.

  Should result in failing execution, with 10 successfull requests.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
      "--connections", "1", "--failure-predicate", "benchmark.http_2xx:0"
  ],
                                                               expect_failure=True)
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1)
def test_http_h1_termination_predicate(http_test_server_fixture):
  """Test with a termination predicate.

  Should result in successful execution, with 10 successful requests.
  We would expect 25 based on rps and duration.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
      "--connections", "1", "--termination-predicate", "benchmark.http_2xx:9"
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10)
Example #10
0
def test_http_h1_termination_predicate(http_test_server_fixture):
    """
  Put in a termination predicate. Should result in failing execution, with 10 successfull requests.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "5",
        "--rps", "5", "--termination-predicate", "benchmark.http_2xx:0"
    ], True)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    assertCounterEqual(counters, "benchmark.http_2xx", 1)
    assertEqual(len(counters), 12)
Example #11
0
def test_grpc_service_happy_flow(http_test_server_fixture):
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--termination-predicate", "benchmark.http_2xx:5", "--rps 10",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
    utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)
Example #12
0
def test_grpc_service_down(http_test_server_fixture):
    parsed_json, _ = http_test_server_fixture.runNighthawkClient(
        [
            "--rps 100",
            "--request-source %s:%s" %
            (http_test_server_fixture.server_ip, "34589"),
            http_test_server_fixture.getTestServerRootUri()
        ],
        expect_failure=True)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    utility.assertEqual(
        counters["requestsource.upstream_rq_pending_failure_eject"], 1)
def test_http_concurrency(http_test_server_fixture):
  """Test that concurrency acts like a multiplier."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
      "benchmark.http_2xx:24",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
  asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 4)
Example #14
0
def test_grpc_service_stress(http_test_server_fixture):
    """Test high load."""
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--duration 100", "--rps 10000", "--concurrency 4",
        "--termination-predicate", "benchmark.http_2xx:5000",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5000)
    asserts.assertEqual(counters["requestsource.internal.upstream_rq_200"], 4)
Example #15
0
def test_http_concurrency(http_test_server_fixture):
  """
  Concurrency should act like a multiplier.
  """

  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      ["--concurrency 4 --rps 5 --connections 1",
       http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  assertCounterGreater(counters, "benchmark.http_2xx", 25)
  assertCounterLessEqual(counters, "benchmark.http_2xx", 100)
  assertCounterEqual(counters, "upstream_cx_http1_total", 4)
def test_cancellation_with_infinite_duration(http_test_server_fixture):
  """Test that we can use signals to cancel execution."""
  args = [
      http_test_server_fixture.nighthawk_client_path, "--concurrency", "2",
      http_test_server_fixture.getTestServerRootUri(), "--no-duration", "--output-format", "json"
  ]
  client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  Thread(target=(lambda: _send_sigterm(client_process))).start()
  stdout, stderr = client_process.communicate()
  client_process.wait()
  output = stdout.decode('utf-8')
  asserts.assertEqual(client_process.returncode, 0)
  parsed_json = json.loads(output)
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "graceful_stop_requested", 2)
  asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1)
def test_http_concurrency(http_test_server_fixture):
  """Test that concurrency acts like a multiplier."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
      "benchmark.http_2xx:24",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
  # Assert that we at least have 1 connection for each event loop (1*4). It is possible that the # of
  # upstream_cx > # of backend connections for H1 as new connections will spawn if the existing clients
  # cannot keep up with the RPS.
  asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 4)
Example #18
0
def test_http_h2(http_test_server_fixture):
  """
  Runs the CLI configured to use h2c against our test server, and sanity
  checks statistics from both client and server.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      ["--h2", http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  assertCounterEqual(counters, "benchmark.http_2xx", 25)
  assertCounterEqual(counters, "upstream_cx_http2_total", 1)
  assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1145)
  assertCounterEqual(counters, "upstream_cx_total", 1)
  assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403)
  assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  assertCounterEqual(counters, "upstream_rq_total", 25)
  assertCounterEqual(counters, "default.total_match_count", 1)
  assertEqual(len(counters), 12)
Example #19
0
def test_tracing_zipkin(http_test_server_fixture):
  """Test zipkin tracing.

  Test that we send spans when our zipkin tracing feature
  is enabled. Note there's no actual zipkin server started, so
  traffic will get (hopefully) get send into the void.
  """
  # TODO(https://github.com/envoyproxy/nighthawk/issues/141):
  # Boot up an actual zipkin server to accept spans we send here & validate based on that.
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--duration 5", "--termination-predicate", "benchmark.http_2xx:49", "--rps 100",
      "--trace zipkin://localhost:79/api/v1/spans",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 50)
  asserts.assertGreaterEqual(counters["tracing.zipkin.reports_dropped"], 9)
  asserts.assertGreaterEqual(counters["tracing.zipkin.spans_sent"], 45)
Example #20
0
def test_http_h1(http_test_server_fixture):
  """
  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      [http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  assertCounterEqual(counters, "benchmark.http_2xx", 25)
  assertCounterEqual(counters, "upstream_cx_http1_total", 1)
  assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
  assertCounterEqual(counters, "upstream_cx_total", 1)
  assertCounterEqual(counters, "upstream_cx_tx_bytes_total",
                     1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1500)
  assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  assertCounterEqual(counters, "upstream_rq_total", 25)
  assertCounterEqual(counters, "default.total_match_count", 1)
  assertEqual(len(counters), 12)
Example #21
0
def test_remote_execution_basics(http_test_server_fixture):
  """
  Verify remote execution via gRPC works as intended. We do that by running
  nighthawk_service and configuring nighthawk_client to request execution via that.
  """
  http_test_server_fixture.startNighthawkGrpcService()
  args = [
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--rps", "100",
      "--termination-predicate", "benchmark.http_2xx:24", "--nighthawk-service",
      "%s:%s" % (http_test_server_fixture.grpc_service.server_ip,
                 http_test_server_fixture.grpc_service.server_port)
  ]
  parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  assertCounterEqual(counters, "benchmark.http_2xx", 25)

  # As a control step, prove we are actually performing remote execution: re-run the command without an
  # operational gRPC service. That ought to fail.
  http_test_server_fixture.grpc_service.stop()
  http_test_server_fixture.runNighthawkClient(args, expect_failure=True)
def test_request_source_plugin_happy_flow_parametrized(
        http_test_server_fixture, request_source_config, expected_min,
        expected_max):
    """Test that the nighthawkClient can run with request-source-plugin option."""
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--termination-predicate", "benchmark.http_2xx:5", "--rps 10",
        "--request-source-plugin-config %s" % request_source_config,
        http_test_server_fixture.getTestServerRootUri(), "--request-header",
        "host: sni.com"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), expected_max)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), expected_min)
Example #23
0
def test_http_h1_maxrps_no_client_side_queueing(http_test_server_fixture):
    assert (http_test_server_fixture.test_server.enableCpuProfiler())
    MIN_EXPECTED_REQUESTS = 100
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--rps", "999999",
        "--duration", "30"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    # We expect to have executed a certain amount of requests
    assertCounterGreater(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS)
    # We expect to have created only a single connection
    assertCounterEqual(counters, "upstream_cx_http1_total", 1)
    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    assertGreater(int(global_histograms["sequencer.blocking"]["count"]),
                  MIN_EXPECTED_REQUESTS)
    assertGreater(
        int(global_histograms["benchmark_http_client.request_to_response"]
            ["count"]), MIN_EXPECTED_REQUESTS)
    # dump output
    logging.info(str(parsed_json))