Exemple #1
0
def test_h1_pool_strategy(http_test_server_fixture):
    """
  Test that with the "mru" strategy only the first created connection gets to send requests.
  Then, with the "lru" strategy, we expect the other connection to be used as well.
  """
    def countLogLinesWithSubstring(logs, substring):
        return len(
            [line for line in logs.split(os.linesep) if substring in line])

    _, logs = http_test_server_fixture.runNighthawkClient([
        "--rps 5", "-v", "trace", "--connections", "2",
        "--prefetch-connections",
        "--experimental-h1-connection-reuse-strategy", "mru",
        "--termination-predicate", "benchmark.http_2xx:4",
        http_test_server_fixture.getTestServerRootUri()
    ])

    assertNotIn("[C1] message complete", logs)
    assertEqual(countLogLinesWithSubstring(logs, "[C0] message complete"), 10)

    requests = 12
    connections = 3
    _, logs = http_test_server_fixture.runNighthawkClient([
        "--rps", "5", "-v trace", "--connections",
        str(connections), "--prefetch-connections",
        "--experimental-h1-connection-reuse-strategy", "lru",
        "--termination-predicate",
        "benchmark.http_2xx:%d" % (requests - 1),
        http_test_server_fixture.getTestServerRootUri()
    ])
    for i in range(1, connections):
        line_count = countLogLinesWithSubstring(logs,
                                                "[C%d] message complete" % i)
        strict_count = (requests / connections) * 2
        assertBetweenInclusive(line_count, strict_count, strict_count)
def test_cli_output_format(http_test_server_fixture):
  """Test that we observe latency percentiles with CLI output."""
  output, _ = http_test_server_fixture.runNighthawkClient(
      ["--duration 1", "--rps 10",
       http_test_server_fixture.getTestServerRootUri()], as_json=False)
  asserts.assertIn("Initiation to completion", output)
  asserts.assertIn("Percentile", output)
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture,
                                     duration_parameterization_fixture):
  """Test latency-sample-, query- and reply- counts in various configurations."""
  for concurrency in [1, 2]:
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration",
        str(duration_parameterization_fixture), "--rps",
        str(qps_parameterization_fixture), "--concurrency",
        str(concurrency)
    ])

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

    global_result = http_test_server_fixture.getGlobalResults(parsed_json)
    actual_duration = utility.get_execution_duration_from_global_result_json(global_result)
    # Ensure Nighthawk managed to execute for at least some time.
    assert actual_duration >= 1

    # The actual duration is a float, flooring if here allows us to use
    # the GreaterEqual matchers below.
    total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.request_to_response"]["count"]),
        total_requests)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)
    asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
                               total_requests)

    asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests))
    # Give system resources some time to recover after the last execution.
    time.sleep(2)
def test_bad_arg_error_messages(http_test_server_fixture):
  """Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error messages."""
  _, err = http_test_server_fixture.runNighthawkClient(
      [http_test_server_fixture.getTestServerRootUri(), "--termination-predicate ", "a:a"],
      expect_failure=True,
      as_json=False)
  assert "Bad argument: Termination predicate 'a:a' has an out of range threshold." in err
Exemple #5
0
def test_remote_execution_basics(http_test_server_fixture):
    """
  Verify remote execution via gRPC works as intended. We do that by running
  nighthawk_service and configuring nighthawk_client to request execution via that.
  """
    http_test_server_fixture.startNighthawkGrpcService()
    args = [
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--rps", "100", "--termination-predicate", "benchmark.http_2xx:24",
        "--nighthawk-service",
        "%s:%s" % (http_test_server_fixture.grpc_service.server_ip,
                   http_test_server_fixture.grpc_service.server_port)
    ]
    repeats = 3
    for i in range(repeats):
        parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
        counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
            parsed_json)
        assertCounterGreaterEqual(counters, "benchmark.http_2xx", 25)

    http_test_server_fixture.grpc_service.stop()
    # Ensure the gRPC service logs looks right. Specifically these logs ought to have sentinels
    # indicative of the right number of executions. (Avoids regression of #289).
    assertEqual(
        repeats,
        sum("Starting 1 threads / event loops" in line
            for line in http_test_server_fixture.grpc_service.log_lines))

    # As a control step, prove we are actually performing remote execution: re-run the command without an
    # operational gRPC service. That ought to fail.
    http_test_server_fixture.runNighthawkClient(args, expect_failure=True)
def test_http_h2(http_test_server_fixture):
    """Test h2 over plain http.

  Runs the CLI configured to use h2c against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--h2",
        http_test_server_fixture.getTestServerRootUri(),
        "--max-active-requests", "1", "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total",
                                      1030)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      403)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)
    asserts.assertEqual(len(counters), 12)
def test_bad_service_uri(http_test_server_fixture):
  """Test configuring a bad service uri."""
  args = [http_test_server_fixture.getTestServerRootUri(), "--nighthawk-service", "a:-1"]
  parsed_json, err = http_test_server_fixture.runNighthawkClient(args,
                                                                 expect_failure=True,
                                                                 as_json=False)
  assert "Bad service uri" in err
def test_http_request_release_timing(http_test_server_fixture,
                                     qps_parameterization_fixture,
                                     duration_parameterization_fixture):
    '''
  Verify latency-sample-, query- and reply- counts in various configurations.
  '''

    for concurrency in [1, 2]:
        parsed_json, _ = http_test_server_fixture.runNighthawkClient([
            http_test_server_fixture.getTestServerRootUri(), "--duration",
            str(duration_parameterization_fixture), "--rps",
            str(qps_parameterization_fixture), "--concurrency",
            str(concurrency)
        ])

        total_requests = qps_parameterization_fixture * concurrency * duration_parameterization_fixture
        global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
            parsed_json)
        counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
            parsed_json)
        assertEqual(
            int(global_histograms["benchmark_http_client.request_to_response"]
                ["count"]), total_requests)
        assertEqual(
            int(global_histograms["benchmark_http_client.queue_to_connect"]
                ["count"]), total_requests)

        assertCounterEqual(counters, "benchmark.http_2xx", (total_requests))
def test_http_h1_mini_stress_test_without_client_side_queueing(http_test_server_fixture):
  """Run a max rps test with the h1 pool against our test server, with no client-side queueing."""
  counters = _mini_stress_test(http_test_server_fixture, [
      http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--connections", "1",
      "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99"
  ])
  asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  asserts.assertNotIn("upstream_cx_overflow", counters)
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    # It is possible that the # of upstream_cx > # of backend connections for H1
    # as new connections will spawn if the existing clients cannot keep up with the RPS.
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      500)
    asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertGreaterEqual(len(counters), 12)
def test_dotted_output_format(http_test_server_fixture):
  """Test that we get the dotted string output format when requested, and ensure we get latency percentiles."""
  output, _ = http_test_server_fixture.runNighthawkClient([
      "--duration 1", "--rps 10", "--output-format dotted",
      http_test_server_fixture.getTestServerRootUri()
  ],
                                                          as_json=False)
  asserts.assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds",
                   output)
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterEqual(
        counters, "upstream_cx_tx_bytes_total", 1375
        if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertEqual(len(counters), 12)
def test_http_h2_mini_stress_test_open_loop(http_test_server_fixture):
  """Run an H2 open loop stress test. We expect higher overflow counts."""
  counters = _mini_stress_test(http_test_server_fixture, [
      http_test_server_fixture.getTestServerRootUri(), "--rps", "10000", "--max-pending-requests",
      "1", "--h2", "--open-loop", "--max-active-requests", "1", "--duration", "100",
      "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
  ])
  # we expect pool overflows
  asserts.assertCounterGreater(counters, "benchmark.pool_overflow", 10)
def test_http_h2_mini_stress_test_with_client_side_queueing(http_test_server_fixture):
  """Run a max rps test with the h2 pool against our test server, using a small client-side queue."""
  counters = _mini_stress_test(http_test_server_fixture, [
      http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests",
      "10", "--h2", "--max-active-requests", "1", "--connections", "1", "--duration", "100",
      "--termination-predicate", "benchmark.http_2xx:99", "--simple-warmup"
  ])
  asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_overflow", 10)
Exemple #15
0
def test_http_h1_mini_stress_test_with_client_side_queueing(http_test_server_fixture):
  """
  Run a max rps test with the h1 pool against our test server, using a small client-side
  queue."""
  counters = mini_stress_test(http_test_server_fixture, [
      http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests",
      "10", "--duration 10", "--connections", "1"
  ])
  assertCounterEqual(counters, "upstream_rq_pending_total", 11)
  assertCounterEqual(counters, "upstream_cx_overflow", 10)
def DISABLED_test_nighthawk_client_v2_api_breaks_by_default(http_test_server_fixture):
  """Test that the v2 api breaks us when it's not explicitly requested."""
  _, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
      "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate",
      "foo:1", "--transport-socket",
      "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}"
  ],
                                                     expect_failure=True,
                                                     as_json=False)
Exemple #17
0
def test_http_h2_mini_stress_test_open_loop(http_test_server_fixture):
    """
  H2 open loop stress test. We expect higher overflow counts 
  """
    counters = mini_stress_test(http_test_server_fixture, [
        http_test_server_fixture.getTestServerRootUri(), "--rps", "2500",
        "--max-pending-requests", "1", "--duration", "10", "--h2",
        "--open-loop", "--max-active-requests", "1"
    ])
    # we expect pool overflows
    assertCounterGreater(counters, "benchmark.pool_overflow", 10)
Exemple #18
0
def test_http_h2_mini_stress_test_without_client_side_queueing(http_test_server_fixture):
  """
  Run a max rps test with the h2 pool against our test server, with no client-side
  queueing. 
  """
  counters = mini_stress_test(http_test_server_fixture, [
      http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--duration", "2", "--h2",
      "--max-active-requests", "1", "--connections", "1"
  ])
  assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  assertNotIn("upstream_rq_pending_overflow", counters)
def test_nighthawk_client_v2_api_explicitly_set(http_test_server_fixture):
  """Test that the v2 api works when requested to."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
      "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate",
      "foo:1", "--allow-envoy-deprecated-v2-api", "--transport-socket",
      "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}"
  ])

  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.pool_connection_failure", 1)
def test_request_body_gets_transmitted(http_test_server_fixture):
    """
  Test that the number of bytes we request for the request body gets reflected in the upstream
  connection transmitted bytes counter for h1 and h2.
  """
    def check_upload_expectations(fixture, parsed_json,
                                  expected_transmitted_bytes,
                                  expected_received_bytes):
        counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
        assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                  expected_transmitted_bytes)
        server_stats = fixture.getTestServerStatisticsJson()
        assertGreaterEqual(
            fixture.getServerStatFromJson(
                server_stats,
                "http.ingress_http.downstream_cx_rx_bytes_total"),
            expected_received_bytes)

    upload_bytes = 10000

    # test h1
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "1",
        "--rps", "2", "--request-body-size",
        str(upload_bytes)
    ])

    # We expect rps * upload_bytes to be transferred/received.
    check_upload_expectations(http_test_server_fixture, parsed_json,
                              upload_bytes * 2, upload_bytes * 2)

    # test h2
    # Again, we expect rps * upload_bytes to be transferred/received. However, we didn't reset
    # the server in between, so our expectation for received bytes on the server side is raised.
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "1",
        "--h2", "--rps", "2", "--request-body-size",
        str(upload_bytes)
    ])
    check_upload_expectations(http_test_server_fixture, parsed_json,
                              upload_bytes * 2, upload_bytes * 4)
Exemple #21
0
def test_http_h1_mini_stress_test_open_loop(http_test_server_fixture):
    """
  H1 open loop stress test. We expect higher pending and overflow counts 
  """
    counters = mini_stress_test(http_test_server_fixture, [
        http_test_server_fixture.getTestServerRootUri(), "--rps", "10000",
        "--max-pending-requests", "1", "--open-loop", "--max-active-requests",
        "1", "--connections", "1", "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:99"
    ])
    # we expect pool overflows
    assertCounterGreater(counters, "benchmark.pool_overflow", 10)
def test_http_h1_failure_predicate(http_test_server_fixture):
  """Test with a failure predicate.

  Should result in failing execution, with 10 successfull requests.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
      "--connections", "1", "--failure-predicate", "benchmark.http_2xx:0"
  ],
                                                               expect_failure=True)
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1)
def test_http_h1_termination_predicate(http_test_server_fixture):
  """Test with a termination predicate.

  Should result in successful execution, with 10 successful requests.
  We would expect 25 based on rps and duration.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
      "--connections", "1", "--termination-predicate", "benchmark.http_2xx:9"
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10)
Exemple #24
0
def test_http_h1_termination_predicate(http_test_server_fixture):
    """
  Put in a termination predicate. Should result in failing execution, with 10 successfull requests.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "5",
        "--rps", "5", "--termination-predicate", "benchmark.http_2xx:0"
    ], True)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    assertCounterEqual(counters, "benchmark.http_2xx", 1)
    assertEqual(len(counters), 12)
def test_http_concurrency(http_test_server_fixture):
  """Test that concurrency acts like a multiplier."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
      "benchmark.http_2xx:24",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
  asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 4)
Exemple #26
0
def test_grpc_service_down(http_test_server_fixture):
    parsed_json, _ = http_test_server_fixture.runNighthawkClient(
        [
            "--rps 100",
            "--request-source %s:%s" %
            (http_test_server_fixture.server_ip, "34589"),
            http_test_server_fixture.getTestServerRootUri()
        ],
        expect_failure=True)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    utility.assertEqual(
        counters["requestsource.upstream_rq_pending_failure_eject"], 1)
Exemple #27
0
def test_grpc_service_happy_flow(http_test_server_fixture):
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--termination-predicate", "benchmark.http_2xx:5", "--rps 10",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
    utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)
Exemple #28
0
def test_http_concurrency(http_test_server_fixture):
  """
  Concurrency should act like a multiplier.
  """

  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      ["--concurrency 4 --rps 5 --connections 1",
       http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  assertCounterGreater(counters, "benchmark.http_2xx", 25)
  assertCounterLessEqual(counters, "benchmark.http_2xx", 100)
  assertCounterEqual(counters, "upstream_cx_http1_total", 4)
def test_http_h1_mini_stress_test_without_client_side_queueing(
        http_test_server_fixture):
    """
  Run a max rps test with the h1 pool against our test server, with no client-side
  queueing. We expect to observe:
  - upstream_rq_pending_total to be equal to 1
  - blocking to be reported by the sequencer
  - no upstream_cx_overflows
  """
    counters = mini_stress_test_h1(http_test_server_fixture, [
        http_test_server_fixture.getTestServerRootUri(), "--rps", "999999",
        "--duration 2"
    ])
    assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    assertNotIn("upstream_cx_overflow", counters)
def test_http_h1_mini_stress_test_with_client_side_queueing(
        http_test_server_fixture):
    """
  Run a max rps test with the h1 pool against our test server, using a small client-side
  queue. We expect to observe:
  - upstream_rq_pending_total increasing
  - upstream_cx_overflow overflows
  - blocking to be reported by the sequencer
  """
    counters = mini_stress_test_h1(http_test_server_fixture, [
        http_test_server_fixture.getTestServerRootUri(), "--rps", "999999",
        "--max-pending-requests", "10", "--duration 10"
    ])
    assertCounterGreater(counters, "upstream_rq_pending_total", 100)
    assertCounterGreater(counters, "upstream_cx_overflow", 0)