def test_request_latency_calculates_result(self): """ request_latency correctly calculates the percentage of scenario requests that complete within the specified time limit. """ results = [ { "scenario": { "name": "test-scenario", "type": "test-scenario-type", "metrics": { "call_durations": {"1.0": 10, "0.8": 10, "0.9": 10, "1.1": 10, "0.7": 10}, "ok_count": 50, "err_count": 0, }, } }, { "scenario": { "name": "test-scenario", "type": "test-scenario-type", "metrics": { "call_durations": {"1.0": 10, "0.8": 10, "0.9": 10, "1.1": 10, "0.7": 10}, "ok_count": 40, "err_count": 10, }, } }, ] latency_result = request_latency(results, 1) # 20/100 requests took more that 1 second. self.assertEqual(latency_result, 0.8)
def test_request_latency_no_matching_results(self): """ request_latency only considers results which have the 'scenario.metrics.call_durations' property. None is returned if no results match. """ results = [ {"scenario": {"name": "test-scenario", "type": "test-scenario-type"}}, {"scenario": {"name": "test-scenario", "type": "test-scenario-type", "metrics": {}}}, ] latency_result = request_latency(results, 10) self.assertEqual(latency_result, None)
def test_request_latency_calculates_result(self): """ request_latency correctly calculates the percentage of scenario requests that complete within the specified time limit. """ results = [ { 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type', 'metrics': { 'call_durations': { '1.0': 10, '0.8': 10, '0.9': 10, '1.1': 10, '0.7': 10 }, 'ok_count': 50, 'err_count': 0, } }, }, { 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type', 'metrics': { 'call_durations': { '1.0': 10, '0.8': 10, '0.9': 10, '1.1': 10, '0.7': 10 }, 'ok_count': 40, 'err_count': 10, } }, } ] latency_result = request_latency(results, 1) # 20/100 requests took more that 1 second. self.assertEqual(latency_result, 0.8)
def test_request_latency_no_matching_results(self): """ request_latency only considers results which have the 'scenario.metrics.call_durations' property. None is returned if no results match. """ results = [{ 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type' }, }, { 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type', 'metrics': {} }, }] latency_result = request_latency(results, 10) self.assertEqual(latency_result, None)
def test_request_latency_calculates_result(self): """ request_latency correctly calculates the percentage of scenario requests that complete within the specified time limit. """ results = [{ 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type', 'metrics': { 'call_durations': { '1.0': 10, '0.8': 10, '0.9': 10, '1.1': 10, '0.7': 10 }, 'ok_count': 50, 'err_count': 0, } }, }, { 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type', 'metrics': { 'call_durations': { '1.0': 10, '0.8': 10, '0.9': 10, '1.1': 10, '0.7': 10 }, 'ok_count': 40, 'err_count': 10, } }, }] latency_result = request_latency(results, 1) # 20/100 requests took more that 1 second. self.assertEqual(latency_result, 0.8)
def test_request_latency_no_matching_results(self): """ request_latency only considers results which have the 'scenario.metrics.call_durations' property. None is returned if no results match. """ results = [ { 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type' }, }, { 'scenario': { 'name': 'test-scenario', 'type': 'test-scenario-type', 'metrics': {} }, } ] latency_result = request_latency(results, 10) self.assertEqual(latency_result, None)