コード例 #1
0
def test_http_h2(http_test_server_fixture):
    """Test h2 over plain http.

  Runs the CLI configured to use h2c against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--h2",
        http_test_server_fixture.getTestServerRootUri(),
        "--max-active-requests", "1", "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24", "--rps", "100"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_rx_bytes_total",
                                      1030)
    asserts.assertCounterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      403)
    asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)
    asserts.assertGreaterEqual(len(counters), 12)
コード例 #2
0
def test_multiple_backends_https_h1(multi_https_test_server_fixture):
  """Test that we can load-test multiple backends on https.

  Runs the CLI configured to use HTTP/1 with TLS against multiple test servers, and sanity
  checks statistics from both client and server.
  """
  nighthawk_client_args = [
      "--multi-target-use-https", "--multi-target-path", "/", "--duration", "100",
      "--termination-predicate", "benchmark.http_2xx:24"
  ]
  for uri in multi_https_test_server_fixture.getAllTestServerRootUris():
    nighthawk_client_args.append("--multi-target-endpoint")
    nighthawk_client_args.append(uri.replace("https://", "").replace("/", ""))

  parsed_json, stderr = multi_https_test_server_fixture.runNighthawkClient(nighthawk_client_args)

  counters = multi_https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
  asserts.assertCounterGreater(counters, "upstream_cx_rx_bytes_total", 0)
  # Assert that we at least have 1 connection per backend. It is possible that
  # the # of upstream_cx > # of backend connections for H1 as new connections
  # will spawn if the existing clients cannot keep up with the RPS.
  asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 3)
  asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 3)
  asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 3)
  asserts.assertCounterGreater(counters, "upstream_cx_tx_bytes_total", 0)
  asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
  asserts.assertCounterEqual(counters, "default.total_match_count", 3)
  for parsed_server_json in multi_https_test_server_fixture.getAllTestServerStatisticsJsons():
    single_2xx = multi_https_test_server_fixture.getServerStatFromJson(
        parsed_server_json, "http.ingress_http.downstream_rq_2xx")
    # Confirm that each backend receives some traffic
    asserts.assertGreaterEqual(single_2xx, 1)
コード例 #3
0
def test_http_h1(http_test_server_fixture):
    """Test http1 over plain http.

  Runs the CLI configured to use plain HTTP/1 against our test server, and sanity
  checks statistics from both client and server.
  """
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration", "100",
        "--termination-predicate", "benchmark.http_2xx:24"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25)
    asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400)
    # It is possible that the # of upstream_cx > # of backend connections for H1
    # as new connections will spawn if the existing clients cannot keep up with the RPS.
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1)
    asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                      500)
    asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1)
    asserts.assertCounterEqual(counters, "upstream_rq_total", 25)
    asserts.assertCounterEqual(counters, "default.total_match_count", 1)

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["count"]), 25)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_mean"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_mean"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_min"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), 10)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_max"]), 97)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_pstdev"]), 0)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_header_size"]
            ["raw_pstdev"]), 0)

    asserts.assertGreaterEqual(len(counters), 12)
コード例 #4
0
 def check_upload_expectations(fixture, parsed_json, expected_transmitted_bytes,
                               expected_received_bytes):
   counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
   asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total",
                                     expected_transmitted_bytes)
   server_stats = fixture.getTestServerStatisticsJson()
   # Server side expectations start failing with larger upload sizes
   asserts.assertGreaterEqual(
       fixture.getServerStatFromJson(server_stats,
                                     "http.ingress_http.downstream_cx_rx_bytes_total"),
       expected_received_bytes)
コード例 #5
0
def test_grpc_service_happy_flow(http_test_server_fixture):
    """Test that the gRPC service is able to execute a load test against the test server."""
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--termination-predicate", "benchmark.http_2xx:5", "--rps 10",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
    asserts.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)
コード例 #6
0
def test_grpc_service_stress(http_test_server_fixture):
    """Test high load."""
    http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--duration 100", "--rps 10000", "--concurrency 4",
        "--termination-predicate", "benchmark.http_2xx:5000",
        "--request-source %s:%s" %
        (http_test_server_fixture.grpc_service.server_ip,
         http_test_server_fixture.grpc_service.server_port),
        http_test_server_fixture.getTestServerRootUri()
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5000)
    asserts.assertEqual(counters["requestsource.internal.upstream_rq_200"], 4)
コード例 #7
0
def test_request_source_plugin_happy_flow_parametrized(
        http_test_server_fixture, request_source_config, expected_min,
        expected_max):
    """Test that the nighthawkClient can run with request-source-plugin option."""
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        "--termination-predicate", "benchmark.http_2xx:5", "--rps 10",
        "--request-source-plugin-config %s" % request_source_config,
        http_test_server_fixture.getTestServerRootUri(), "--request-header",
        "host: sni.com"
    ])
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(
        parsed_json)
    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_max"]), expected_max)
    asserts.assertEqual(
        int(global_histograms["benchmark_http_client.response_body_size"]
            ["raw_min"]), expected_min)
コード例 #8
0
def _mini_stress_test(fixture, args):
    # run a test with more rps then we can handle, and a very small client-side queue.
    # we should observe both lots of successfull requests as well as time spend in blocking mode.,
    parsed_json, _ = fixture.runNighthawkClient(args)
    counters = fixture.getNighthawkCounterMapFromJson(parsed_json)
    # We set a reasonably low expectation of 100 requests. We set it low, because we want this
    # test to succeed on a reasonable share of setups (hopefully practically all).
    MIN_EXPECTED_REQUESTS = 100
    asserts.assertCounterEqual(counters, "benchmark.http_2xx",
                               MIN_EXPECTED_REQUESTS)
    if "--h2" in args:
        asserts.assertCounterEqual(counters, "upstream_cx_http2_total", 1)
    else:
        asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1)
    global_histograms = fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)

    if "--open-loop" in args:
        asserts.assertEqual(
            int(global_histograms["sequencer.blocking"]["count"]), 0)
    else:
        asserts.assertGreaterEqual(
            int(global_histograms["sequencer.blocking"]["count"]), 1)

    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.request_to_response"]
            ["count"]), 1)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
        1)
    return counters
コード例 #9
0
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture,
                                     duration_parameterization_fixture):
  """Test latency-sample-, query- and reply- counts in various configurations."""
  for concurrency in [1, 2]:
    parsed_json, _ = http_test_server_fixture.runNighthawkClient([
        http_test_server_fixture.getTestServerRootUri(), "--duration",
        str(duration_parameterization_fixture), "--rps",
        str(qps_parameterization_fixture), "--concurrency",
        str(concurrency)
    ])

    global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(
        parsed_json)
    counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)

    global_result = http_test_server_fixture.getGlobalResults(parsed_json)
    actual_duration = utility.get_execution_duration_from_global_result_json(global_result)
    # Ensure Nighthawk managed to execute for at least some time.
    assert actual_duration >= 1

    # The actual duration is a float, flooring if here allows us to use
    # the GreaterEqual matchers below.
    total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.request_to_response"]["count"]),
        total_requests)
    asserts.assertGreaterEqual(
        int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)
    asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]),
                               total_requests)

    asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests))
    # Give system resources some time to recover after the last execution.
    time.sleep(2)
コード例 #10
0
def test_tracing_zipkin(http_test_server_fixture):
  """Test zipkin tracing.

  Test that we send spans when our zipkin tracing feature
  is enabled. Note there's no actual zipkin server started, so
  traffic will get (hopefully) get send into the void.
  """
  # TODO(https://github.com/envoyproxy/nighthawk/issues/141):
  # Boot up an actual zipkin server to accept spans we send here & validate based on that.
  parsed_json, _ = http_test_server_fixture.runNighthawkClient([
      "--duration 5", "--termination-predicate", "benchmark.http_2xx:49", "--rps 100",
      "--trace zipkin://localhost:79/api/v1/spans",
      http_test_server_fixture.getTestServerRootUri()
  ])
  counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
  asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 50)
  asserts.assertGreaterEqual(counters["tracing.zipkin.reports_dropped"], 9)
  asserts.assertGreaterEqual(counters["tracing.zipkin.spans_sent"], 45)