def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture, duration_parameterization_fixture): """Test latency-sample-, query- and reply- counts in various configurations.""" for concurrency in [1, 2]: parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", str(duration_parameterization_fixture), "--rps", str(qps_parameterization_fixture), "--concurrency", str(concurrency) ]) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) global_result = http_test_server_fixture.getGlobalResults(parsed_json) actual_duration = utility.get_execution_duration_from_global_result_json(global_result) # Ensure Nighthawk managed to execute for at least some time. assert actual_duration >= 1 # The actual duration is a float, flooring if here allows us to use # the GreaterEqual matchers below. total_requests = qps_parameterization_fixture * concurrency * math.floor(actual_duration) asserts.assertGreaterEqual( int(global_histograms["benchmark_http_client.request_to_response"]["count"]), total_requests) asserts.assertGreaterEqual( int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests) asserts.assertGreaterEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), total_requests) asserts.assertCounterGreaterEqual(counters, "benchmark.http_2xx", (total_requests)) # Give system resources some time to recover after the last execution. time.sleep(2)
def test_http_request_release_timing(http_test_server_fixture, qps_parameterization_fixture, duration_parameterization_fixture): ''' Verify latency-sample-, query- and reply- counts in various configurations. ''' for concurrency in [1, 2]: parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", str(duration_parameterization_fixture), "--rps", str(qps_parameterization_fixture), "--concurrency", str(concurrency) ]) total_requests = qps_parameterization_fixture * concurrency * duration_parameterization_fixture global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) assertEqual( int(global_histograms["benchmark_http_client.request_to_response"] ["count"]), total_requests) assertEqual( int(global_histograms["benchmark_http_client.queue_to_connect"] ["count"]), total_requests) assertCounterEqual(counters, "benchmark.http_2xx", (total_requests))
def test_http_h1(http_test_server_fixture): """Test http1 over plain http. Runs the CLI configured to use plain HTTP/1 against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) # It is possible that the # of upstream_cx > # of backend connections for H1 # as new connections will spawn if the existing clients cannot keep up with the RPS. asserts.assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterGreaterEqual(counters, "upstream_cx_tx_bytes_total", 500) asserts.assertCounterGreaterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["count"]), 25) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["count"]), 25) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_mean"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_mean"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_min"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_min"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_max"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_max"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_pstdev"]), 0) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_pstdev"]), 0) asserts.assertGreaterEqual(len(counters), 12)
def test_http_h1(http_test_server_fixture): """Test http1 over plain http. Runs the CLI configured to use plain HTTP/1 against our test server, and sanity checks statistics from both client and server. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--duration", "100", "--termination-predicate", "benchmark.http_2xx:24" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) asserts.assertCounterEqual(counters, "benchmark.http_2xx", 25) asserts.assertCounterEqual(counters, "upstream_cx_http1_total", 1) asserts.assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) asserts.assertCounterEqual(counters, "upstream_cx_total", 1) asserts.assertCounterEqual( counters, "upstream_cx_tx_bytes_total", 1375 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) asserts.assertCounterEqual(counters, "upstream_rq_pending_total", 1) asserts.assertCounterEqual(counters, "upstream_rq_total", 25) asserts.assertCounterEqual(counters, "default.total_match_count", 1) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["count"]), 25) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["count"]), 25) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_mean"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_mean"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_min"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_min"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_max"]), 10) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_max"]), 97) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_pstdev"]), 0) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_header_size"] ["raw_pstdev"]), 0) asserts.assertEqual(len(counters), 12)
def test_http_h1_response_header_latency_tracking(http_test_server_fixture, server_config): """Test emission and tracking of response header latencies. Run the CLI configured to track latencies delivered by response header from the test-server. Ensure that the origin_latency_statistic histogram receives the correct number of inputs. """ parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--connections", "1", "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:99", "--latency-response-header-name", "x-origin-request-receipt-delta" ]) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) asserts.assertEqual(int(global_histograms["benchmark_http_client.latency_2xx"]["count"]), 100) # Verify behavior is correct both with and without the timing filter enabled. expected_histogram_count = 99 if "nighthawk_track_timings.yaml" in server_config else 0 asserts.assertEqual( int(global_histograms["benchmark_http_client.origin_latency_statistic"]["count"]), expected_histogram_count)
def test_request_source_plugin_happy_flow_parametrized( http_test_server_fixture, request_source_config, expected_min, expected_max): """Test that the nighthawkClient can run with request-source-plugin option.""" parsed_json, _ = http_test_server_fixture.runNighthawkClient([ "--termination-predicate", "benchmark.http_2xx:5", "--rps 10", "--request-source-plugin-config %s" % request_source_config, http_test_server_fixture.getTestServerRootUri(), "--request-header", "host: sni.com" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) asserts.assertGreaterEqual(counters["benchmark.http_2xx"], 5) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_max"]), expected_max) asserts.assertEqual( int(global_histograms["benchmark_http_client.response_body_size"] ["raw_min"]), expected_min)
def test_http_h1_maxrps_no_client_side_queueing(http_test_server_fixture): assert (http_test_server_fixture.test_server.enableCpuProfiler()) MIN_EXPECTED_REQUESTS = 100 parsed_json, _ = http_test_server_fixture.runNighthawkClient([ http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--duration", "30" ]) counters = http_test_server_fixture.getNighthawkCounterMapFromJson( parsed_json) # We expect to have executed a certain amount of requests assertCounterGreater(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS) # We expect to have created only a single connection assertCounterEqual(counters, "upstream_cx_http1_total", 1) global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson( parsed_json) assertGreater(int(global_histograms["sequencer.blocking"]["count"]), MIN_EXPECTED_REQUESTS) assertGreater( int(global_histograms["benchmark_http_client.request_to_response"] ["count"]), MIN_EXPECTED_REQUESTS) # dump output logging.info(str(parsed_json))