def test_grpc_service_down(http_test_server_fixture): parsed_json, _ = http_test_server_fixture.runNighthawkClient( [ "--rps 100", "--request-source %s:%s" % (http_test_server_fixture.server_ip, "34589"), http_test_server_fixture.getTestServerRootUri() ], expect_failure=True) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) assertEqual(counters["requestsource.upstream_rq_pending_failure_eject"], 1)
def test_grpc_service_happy_flow(http_test_server_fixture): http_test_server_fixture.startNighthawkGrpcService("dummy-request-source") parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--termination-predicate", "benchmark.http_2xx:10", "--rps 100", "--request-source %s:%s" % (http_test_server_fixture.grpc_service.server_ip, http_test_server_fixture.grpc_service.server_port), http_test_server_fixture.getTestServerRootUri() ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) assertGreaterEqual(counters["benchmark.http_2xx"], 10) assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)
def test_http_h1_mini_stress_test_without_client_side_queueing(http_test_server_fixture): """ Run a max rps test with the h1 pool against our test server, with no client-side queueing. We expect to observe: - upstream_rq_pending_total to be equal to 1 - blocking to be reported by the sequencer - no upstream_cx_overflows """ counters = mini_stress_test_h1( http_test_server_fixture, [http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--duration 2"]) assertEqual(counters["upstream_rq_pending_total"], 1) assertNotIn("upstream_cx_overflow", counters)
def test_http_h1_mini_stress_test_with_client_side_queueing(http_test_server_fixture): """ Run a max rps test with the h1 pool against our test server, using a small client-side queue. We expect to observe: - upstream_rq_pending_total increasing - upstream_cx_overflow overflows - blocking to be reported by the sequencer """ counters = mini_stress_test_h1(http_test_server_fixture, [ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--max-pending-requests", "10", "--duration 2" ]) assertGreater(counters["upstream_rq_pending_total"], 100) assertGreater(counters["upstream_cx_overflow"], 0)
def test_http_concurrency(http_test_server_fixture): """ Concurrency should act like a multiplier. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient( ["--concurrency 4 --rps 5 --connections 1", http_test_server_fixture.getTestServerRootUri()]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) # Quite a loose expectation, but this may fluctuate depending on server load. # Ideally we'd see 4 workers * 5 rps * 5s = 100 requests total assertGreater(counters["benchmark.http_2xx"], 25) assertLessEqual(counters["benchmark.http_2xx"], 100) assertEqual(counters["upstream_cx_http1_total"], 4)
def test_tracing_zipkin(http_test_server_fixture): """ Test that we send spans when our zipkin tracing feature is enabled. Note there's no actual zipkin server started, so traffic will get (hopefully) get send into the void. """ # TODO(https://github.com/envoyproxy/nighthawk/issues/141): # Boot up an actual zipkin server to accept spans we send here & validate based on that. parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--duration 5", "--termination-predicate", "benchmark.http_2xx:49", "--rps 100", "--trace zipkin://localhost:79/api/v1/spans", http_test_server_fixture.getTestServerRootUri() ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) assertGreaterEqual(counters["benchmark.http_2xx"], 50) assertGreaterEqual(counters["tracing.zipkin.reports_dropped"], 9) assertGreaterEqual(counters["tracing.zipkin.spans_sent"], 45)
def test_http_h2(http_test_server_fixture): """ Runs the CLI configured to use h2c against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient( ["--h2", http_test_server_fixture.getTestServerRootUri()]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertEqual(counters["benchmark.http_2xx"], 25) assertEqual(counters["upstream_cx_destroy"], 1) assertEqual(counters["upstream_cx_destroy_local"], 1) assertEqual(counters["upstream_cx_http2_total"], 1) assertGreaterEqual(counters["upstream_cx_rx_bytes_total"], 1145) assertEqual(counters["upstream_cx_total"], 1) assertGreaterEqual(counters["upstream_cx_tx_bytes_total"], 403) assertEqual(counters["upstream_rq_pending_total"], 1) assertEqual(counters["upstream_rq_total"], 25) assertEqual(len(counters), 13)
def test_http_h1(http_test_server_fixture): """ Runs the CLI configured to use plain HTTP/1 against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient( [http_test_server_fixture.getTestServerRootUri()]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertEqual(counters["benchmark.http_2xx"], 25) assertEqual(counters["upstream_cx_destroy"], 1) assertEqual(counters["upstream_cx_destroy_local"], 1) assertEqual(counters["upstream_cx_http1_total"], 1) assertEqual(counters["upstream_cx_rx_bytes_total"], 3400) assertEqual(counters["upstream_cx_total"], 1) assertEqual(counters["upstream_cx_tx_bytes_total"], 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1500) assertEqual(counters["upstream_rq_pending_total"], 1) assertEqual(counters["upstream_rq_total"], 25) assertEqual(len(counters), 13)