Esempio n. 1
0
def test_h1_pool_strategy(http_test_server_fixture):
    """
  Test that with the "mru" strategy only the first created connection gets to send requests.
  Then, with the "lru" strategy, we expect the other connection to be used as well.
  """
    def countLogLinesWithSubstring(logs, substring):
        return len(
            [line for line in logs.split(os.linesep) if substring in line])

    _, logs = http_test_server_fixture.runNighthawkClient([
        "--rps 5", "-v", "trace", "--connections", "2",
        "--prefetch-connections",
        "--experimental-h1-connection-reuse-strategy", "mru",
        "--termination-predicate", "benchmark.http_2xx:4",
        http_test_server_fixture.getTestServerRootUri()
    ])

    assertNotIn("[C1] message complete", logs)
    assertEqual(countLogLinesWithSubstring(logs, "[C0] message complete"), 10)

    requests = 12
    connections = 3
    _, logs = http_test_server_fixture.runNighthawkClient([
        "--rps", "5", "-v trace", "--connections",
        str(connections), "--prefetch-connections",
        "--experimental-h1-connection-reuse-strategy", "lru",
        "--termination-predicate",
        "benchmark.http_2xx:%d" % (requests - 1),
        http_test_server_fixture.getTestServerRootUri()
    ])
    for i in range(1, connections):
        line_count = countLogLinesWithSubstring(logs,
                                                "[C%d] message complete" % i)
        strict_count = (requests / connections) * 2
        assertBetweenInclusive(line_count, strict_count, strict_count)
Esempio n. 2
0
def test_remote_execution_basics(http_test_server_fixture):
    """
  Verify remote execution via gRPC works as intended. We do that by running
  nighthawk_service and configuring nighthawk_client to request execution via that.
  """
    http_test_server_fixture.startNighthawkGrpcService()
    args = [
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--rps", "100", "--termination-predicate", "benchmark.http_2xx:24",
        "--nighthawk-service",
        "%s:%s" % (http_test_server_fixture.grpc_service.server_ip,
                   http_test_server_fixture.grpc_service.server_port)
    ]
    repeats = 3
    for i in range(repeats):
        parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
        counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
            parsed_json)
        assertCounterGreaterEqual(counters, "benchmark.http_2xx", 25)

    http_test_server_fixture.grpc_service.stop()
    # Ensure the gRPC service logs looks right. Specifically these logs ought to have sentinels
    # indicative of the right number of executions. (Avoids regression of #289).
    assertEqual(
        repeats,
        sum("Starting 1 threads / event loops" in line
            for line in http_test_server_fixture.grpc_service.log_lines))

    # As a control step, prove we are actually performing remote execution: re-run the command without an
    # operational gRPC service. That ought to fail.
    http_test_server_fixture.runNighthawkClient(args, expect_failure=True)
def test_cli_output_format(http_test_server_fixture):
  """Test that we observe latency percentiles with CLI output."""
  output, _ = http_test_server_fixture.runNighthawkClient(
      ["--duration 1", "--rps 10",
       http_test_server_fixture.getTestServerRootUri()], as_json=False)
  asserts.assertIn("Initiation to completion", output)
  asserts.assertIn("Percentile", output)
def test_http_request_release_timing(http_test_server_fixture,
                                     qps_parameterization_fixture,
                                     duration_parameterization_fixture):
    '''
  Verify latency-sample-, query- and reply- counts in various configurations.
  '''

    for concurrency in [1, 2]:
        parsed_json, _ = http_test_server_fixture.runNighthawkClient([
            http_test_server_fixture.getTestServerRootUri(), "--duration",
            str(duration_parameterization_fixture), "--rps",
            str(qps_parameterization_fixture), "--concurrency",
            str(concurrency)
        ])

        total_requests = qps_parameterization_fixture * concurrency * duration_parameterization_fixture
        global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
            parsed_json)
        counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
            parsed_json)
        assertEqual(
            int(global_histograms["benchmark_http_client.request_to_response"]
                ["count"]), total_requests)
        assertEqual(
            int(global_histograms["benchmark_http_client.queue_to_connect"]
                ["count"]), total_requests)

        assertCounterEqual(counters, "benchmark.http_2xx", (total_requests))
def test_bad_arg_error_messages(http_test_server_fixture):
  """Test arguments that pass proto validation, but are found to be no good nonetheless, result in reasonable error messages."""
  _, err = http_test_server_fixture.runNighthawkClient(
      [http_test_server_fixture.getTestServerRootUri(), "--termination-predicate ", "a:a"],
      expect_failure=True,
      as_json=False)
  assert "Bad argument: Termination predicate 'a:a' has an out of range threshold." in err
def test_bad_service_uri(http_test_server_fixture):
  """Test configuring a bad service uri."""
  args = [http_test_server_fixture.getTestServerRootUri(), "--nighthawk-service", "a:-1"]
  parsed_json, err = http_test_server_fixture.runNighthawkClient(args,
                                                                 expect_failure=True,
                                                                 as_json=False)
  assert "Bad service uri" in err
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture,
                                     duration_parameterization_fixture):
  """Test latency-sample-, query- and reply- counts in various configurations."""
  for concurrency in [1, 2]:
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration",
        str(duration_parameterization_fixture), "--rps",
        str(qps_parameterization_fixture), "--concurrency",
        str(concurrency)
    ])

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

    global_result = http_test_server_fixture.getGlobalResults(parsed_json)
    actual_duration = utility.get_execution_duration_from_global_result_json(global_result)
    # Ensure Nighthawk managed to execute for at least some time.
    assert actual_duration >= 1

    # The actual duration is a float, flooring if here allows us to use
    # the GreaterEqual matchers below.
    total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.request_to_response"]["count"]),
        total_requests)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)
    asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
                               total_requests)

    asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests))
    # Give system resources some time to recover after the last execution.
    time.sleep(2)
def test_http_h2(http_test_server_fixture):
    """Test h2 over plain http.

  Runs the CLI configured to use h2c against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--h2",
        http_test_server_fixture.getTestServerRootUri(),
        "--max-active-requests", "1", "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total",
                                      1030)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      403)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)
    asserts.assertEqual(len(counters), 12)
def test_request_body_gets_transmitted(http_test_server_fixture,
                                       filter_configs):
    """Test request body transmission handling code for our extensions.

  Ensure that the number of bytes we request for the request body gets reflected in the upstream
  connection transmitted bytes counter for h1 and h2.
  """
    def check_upload_expectations(fixture, parsed_json,
                                  expected_transmitted_bytes,
                                  expected_received_bytes):
        counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
        asserts.assertCounterGreaterEqual(counters,
                                          "upstream_cx_tx_bytes_total",
                                          expected_transmitted_bytes)
        server_stats = fixture.getTestServerStatisticsJson()
        # Server side expectations start failing with larger upload sizes
        asserts.assertGreaterEqual(
            fixture.getServerStatFromJson(
                server_stats,
                "http.ingress_http.downstream_cx_rx_bytes_total"),
            expected_received_bytes)

    # TODO(#531): The dynamic-delay extension hangs unless we lower the request entity body size.
    upload_bytes = 1024 * 1024 if "static_delay" in filter_configs else 1024 * 1024 * 3
    requests = 10
    args = [
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--rps", "100", "--request-body-size",
        str(upload_bytes), "--termination-predicate",
        "benchmark.http_2xx:%s" % str(requests), "--connections", "1",
        "--request-method", "POST", "--max-active-requests", "1",
        "--request-header",
        "x-nighthawk-test-server-config:%s" % filter_configs
    ]
    # Test we transmit the expected amount of bytes with H1
    parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
    check_upload_expectations(http_test_server_fixture, parsed_json,
                              upload_bytes * requests, upload_bytes * requests)

    # Test we transmit the expected amount of bytes with H2
    args.append("--h2")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
    # We didn't reset the server in between, so our expectation for received bytes on the server side is raised.
    check_upload_expectations(http_test_server_fixture, parsed_json,
                              upload_bytes * requests,
                              upload_bytes * requests * 2)
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    # It is possible that the # of upstream_cx > # of backend connections for H1
    # as new connections will spawn if the existing clients cannot keep up with the RPS.
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      500)
    asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertGreaterEqual(len(counters), 12)
def test_dotted_output_format(http_test_server_fixture):
  """Test that we get the dotted string output format when requested, and ensure we get latency percentiles."""
  output, _ = http_test_server_fixture.runNighthawkClient([
      "--duration 1", "--rps 10", "--output-format dotted",
      http_test_server_fixture.getTestServerRootUri()
  ],
                                                          as_json=False)
  asserts.assertIn("global.benchmark_http_client.request_to_response.permilles-500.microseconds",
                   output)
Esempio n. 12
0
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterEqual(
        counters, "upstream_cx_tx_bytes_total", 1375
        if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertEqual(len(counters), 12)
Esempio n. 13
0
def DISABLED_test_nighthawk_client_v2_api_breaks_by_default(http_test_server_fixture):
  """Test that the v2 api breaks us when it's not explicitly requested."""
  _, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
      "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate",
      "foo:1", "--transport-socket",
      "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}"
  ],
                                                     expect_failure=True,
                                                     as_json=False)
Esempio n. 14
0
def test_nighthawk_client_v2_api_explicitly_set(http_test_server_fixture):
  """Test that the v2 api works when requested to."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
      "--termination-predicate", "benchmark.pool_connection_failure:0", "--failure-predicate",
      "foo:1", "--allow-envoy-deprecated-v2-api", "--transport-socket",
      "{name:\"envoy.transport_sockets.tls\",typed_config:{\"@type\":\"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext\",\"common_tls_context\":{}}}"
  ])

  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.pool_connection_failure", 1)
Esempio n. 15
0
def test_remote_execution_basics(http_test_server_fixture):
  """
  Verify remote execution via gRPC works as intended. We do that by running
  nighthawk_service and configuring nighthawk_client to request execution via that.
  """
  http_test_server_fixture.startNighthawkGrpcService()
  args = [
      http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--rps", "100",
      "--termination-predicate", "benchmark.http_2xx:24", "--nighthawk-service",
      "%s:%s" % (http_test_server_fixture.grpc_service.server_ip,
                 http_test_server_fixture.grpc_service.server_port)
  ]
  parsed_json, _ = http_test_server_fixture.runNighthawkClient(args)
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  assertCounterEqual(counters, "benchmark.http_2xx", 25)

  # As a control step, prove we are actually performing remote execution: re-run the command without an
  # operational gRPC service. That ought to fail.
  http_test_server_fixture.grpc_service.stop()
  http_test_server_fixture.runNighthawkClient(args, expect_failure=True)
Esempio n. 16
0
def test_request_body_gets_transmitted(http_test_server_fixture):
    """
  Test that the number of bytes we request for the request body gets reflected in the upstream
  connection transmitted bytes counter for h1 and h2.
  """
    def check_upload_expectations(fixture, parsed_json,
                                  expected_transmitted_bytes,
                                  expected_received_bytes):
        counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
        assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                  expected_transmitted_bytes)
        server_stats = fixture.getTestServerStatisticsJson()
        assertGreaterEqual(
            fixture.getServerStatFromJson(
                server_stats,
                "http.ingress_http.downstream_cx_rx_bytes_total"),
            expected_received_bytes)

    upload_bytes = 10000

    # test h1
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "1",
        "--rps", "2", "--request-body-size",
        str(upload_bytes)
    ])

    # We expect rps * upload_bytes to be transferred/received.
    check_upload_expectations(http_test_server_fixture, parsed_json,
                              upload_bytes * 2, upload_bytes * 2)

    # test h2
    # Again, we expect rps * upload_bytes to be transferred/received. However, we didn't reset
    # the server in between, so our expectation for received bytes on the server side is raised.
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "1",
        "--h2", "--rps", "2", "--request-body-size",
        str(upload_bytes)
    ])
    check_upload_expectations(http_test_server_fixture, parsed_json,
                              upload_bytes * 2, upload_bytes * 4)
def test_http_h1_failure_predicate(http_test_server_fixture):
  """Test with a failure predicate.

  Should result in failing execution, with 10 successfull requests.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
      "--connections", "1", "--failure-predicate", "benchmark.http_2xx:0"
  ],
                                                               expect_failure=True)
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 1)
Esempio n. 18
0
def test_http_h1_termination_predicate(http_test_server_fixture):
    """
  Put in a termination predicate. Should result in failing execution, with 10 successfull requests.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "5",
        "--rps", "5", "--termination-predicate", "benchmark.http_2xx:0"
    ], True)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    assertCounterEqual(counters, "benchmark.http_2xx", 1)
    assertEqual(len(counters), 12)
def test_http_h1_termination_predicate(http_test_server_fixture):
  """Test with a termination predicate.

  Should result in successful execution, with 10 successful requests.
  We would expect 25 based on rps and duration.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--duration", "5", "--rps", "500",
      "--connections", "1", "--termination-predicate", "benchmark.http_2xx:9"
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 10)
Esempio n. 20
0
def test_grpc_service_happy_flow(http_test_server_fixture):
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--termination-predicate", "benchmark.http_2xx:5", "--rps 10",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
    utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)
Esempio n. 21
0
def test_grpc_service_down(http_test_server_fixture):
    parsed_json, _ = http_test_server_fixture.runNighthawkClient(
        [
            "--rps 100",
            "--request-source %s:%s" %
            (http_test_server_fixture.server_ip, "34589"),
            http_test_server_fixture.getTestServerRootUri()
        ],
        expect_failure=True)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    utility.assertEqual(
        counters["requestsource.upstream_rq_pending_failure_eject"], 1)
Esempio n. 22
0
def test_http_concurrency(http_test_server_fixture):
  """Test that concurrency acts like a multiplier."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
      "benchmark.http_2xx:24",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
  asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 4)
Esempio n. 23
0
def test_http_concurrency(http_test_server_fixture):
  """
  Concurrency should act like a multiplier.
  """

  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      ["--concurrency 4 --rps 5 --connections 1",
       http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  assertCounterGreater(counters, "benchmark.http_2xx", 25)
  assertCounterLessEqual(counters, "benchmark.http_2xx", 100)
  assertCounterEqual(counters, "upstream_cx_http1_total", 4)
Esempio n. 24
0
def test_grpc_service_stress(http_test_server_fixture):
    """Test high load."""
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--duration 100", "--rps 10000", "--concurrency 4",
        "--termination-predicate", "benchmark.http_2xx:5000",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5000)
    asserts.assertEqual(counters["requestsource.internal.upstream_rq_200"], 4)
def test_http_concurrency(http_test_server_fixture):
  """Test that concurrency acts like a multiplier."""
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--concurrency 4 --rps 100 --connections 1", "--duration", "100", "--termination-predicate",
      "benchmark.http_2xx:24",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

  # Quite a loose expectation, but this may fluctuate depending on server load.
  # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 100)
  # Assert that we at least have 1 connection for each event loop (1*4). It is possible that the # of
  # upstream_cx > # of backend connections for H1 as new connections will spawn if the existing clients
  # cannot keep up with the RPS.
  asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 4)
Esempio n. 26
0
def test_http_h2(http_test_server_fixture):
  """
  Runs the CLI configured to use h2c against our test server, and sanity
  checks statistics from both client and server.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      ["--h2", http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  assertCounterEqual(counters, "benchmark.http_2xx", 25)
  assertCounterEqual(counters, "upstream_cx_http2_total", 1)
  assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total", 1145)
  assertCounterEqual(counters, "upstream_cx_total", 1)
  assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 403)
  assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  assertCounterEqual(counters, "upstream_rq_total", 25)
  assertCounterEqual(counters, "default.total_match_count", 1)
  assertEqual(len(counters), 12)
def test_h1_pool_strategy_mru(http_test_server_fixture):
    """Test connection re-use strategies of the http 1 connection pool.

  Test that with the "most recently used" (mru) strategy only the first created connection gets to send requests.
  """
    _, logs = http_test_server_fixture.runNighthawkClient([
        "--rps 5", "-v", "trace", "--duration", "20", "--connections", "2",
        "--prefetch-connections",
        "--experimental-h1-connection-reuse-strategy", "mru",
        "--termination-predicate", "benchmark.http_2xx:4",
        http_test_server_fixture.getTestServerRootUri()
    ])

    # Expect the second connection to not send any messages
    asserts.assertNotIn("[C1] message complete", logs)
    # Expect that we sent some traffic through the first connection
    asserts.assertIn("[C0] message complete", logs)
Esempio n. 28
0
def test_http_h1(http_test_server_fixture):
  """
  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient(
      [http_test_server_fixture.getTestServerRootUri()])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  assertCounterEqual(counters, "benchmark.http_2xx", 25)
  assertCounterEqual(counters, "upstream_cx_http1_total", 1)
  assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
  assertCounterEqual(counters, "upstream_cx_total", 1)
  assertCounterEqual(counters, "upstream_cx_tx_bytes_total",
                     1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1500)
  assertCounterEqual(counters, "upstream_rq_pending_total", 1)
  assertCounterEqual(counters, "upstream_rq_total", 25)
  assertCounterEqual(counters, "default.total_match_count", 1)
  assertEqual(len(counters), 12)
Esempio n. 29
0
def test_tracing_zipkin(http_test_server_fixture):
  """Test zipkin tracing.

  Test that we send spans when our zipkin tracing feature
  is enabled. Note there's no actual zipkin server started, so
  traffic will get (hopefully) get send into the void.
  """
  # TODO(https://github.com/envoyproxy/nighthawk/issues/141):
  # Boot up an actual zipkin server to accept spans we send here & validate based on that.
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--duration 5", "--termination-predicate", "benchmark.http_2xx:49", "--rps 100",
      "--trace zipkin://localhost:79/api/v1/spans",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 50)
  asserts.assertGreaterEqual(counters["tracing.zipkin.reports_dropped"], 9)
  asserts.assertGreaterEqual(counters["tracing.zipkin.spans_sent"], 45)
def test_http_h1_response_header_latency_tracking(http_test_server_fixture, server_config):
  """Test emission and tracking of response header latencies.

  Run the CLI configured to track latencies delivered by response header from the test-server.
  Ensure that the origin_latency_statistic histogram receives the correct number of inputs.
  """
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      http_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100",
      "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99",
      "--latency-response-header-name", "x-origin-request-receipt-delta"
  ])
  global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json)
  asserts.assertEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), 100)
  # Verify behavior is correct both with and without the timing filter enabled.
  expected_histogram_count = 99 if "nighthawk_track_timings.yaml" in server_config else 0
  asserts.assertEqual(
      int(global_histograms["benchmark_http_client.origin_latency_statistic"]["count"]),
      expected_histogram_count)