Example #1
0
class TestStatsPrinting(LocustTestCase):
    def setUp(self):
        super().setUp()

        self.stats = RequestStats()
        for i in range(100):
            for method, name, freq in [
                (
                    "GET",
                    "test_entry",
                    5,
                ),
                (
                    "DELETE",
                    "test" * int((STATS_NAME_WIDTH - STATS_TYPE_WIDTH + 4) / len("test")),
                    3,
                ),
            ]:
                self.stats.log_request(method, name, i, 2000 + i)
                if i % freq == 0:
                    self.stats.log_error(method, name, RuntimeError(f"{method} error"))

    def test_print_percentile_stats(self):
        locust.stats.print_percentile_stats(self.stats)
        info = self.mocked_log.info
        self.assertEqual(8, len(info))
        self.assertEqual("Response time percentiles (approximated)", info[0])
        # check that headline contains same number of column as the value rows
        headlines = info[1].replace("# reqs", "#reqs").split()
        self.assertEqual(len(headlines), len(info[3].split()))
        self.assertEqual(len(headlines) - 1, len(info[-2].split()))  # Aggregated, no "Type"
        self.assertEqual(info[2], info[-3])  # table ascii separators

    def test_print_stats(self):
        locust.stats.print_stats(self.stats)
        info = self.mocked_log.info
        self.assertEqual(7, len(info))

        headlines = info[0].replace("# ", "#").split()

        # check number of columns in separator
        self.assertEqual(len(headlines), len(info[1].split("|")) + 2)
        # check entry row
        self.assertEqual(len(headlines), len(info[2].split()))
        # check aggregated row, which is missing value in "type"-column
        self.assertEqual(len(headlines) - 1, len(info[-2].split()))
        # table ascii separators
        self.assertEqual(info[1], info[-3])

    def test_print_error_report(self):
        locust.stats.print_error_report(self.stats)
        info = self.mocked_log.info
        self.assertEqual(7, len(info))
        self.assertEqual("Error report", info[0])

        headlines = info[1].replace("# ", "#").split()
        # check number of columns in headlines vs table ascii separator
        self.assertEqual(len(headlines), len(info[2].split("|")))
        # table ascii seprators
        self.assertEqual(info[2], info[-2])
Example #2
0
File: web.py Project: whitmo/locust
def request_stats():
    global _request_stats_context_cache

    if not _request_stats_context_cache or _request_stats_context_cache[
        "last_time"
    ] < time() - _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME):
        cache_time = _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME)
        now = time()

        stats = []
        for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total")]):
            stats.append(
                {
                    "method": s.method,
                    "name": s.name,
                    "num_reqs": s.num_reqs,
                    "num_failures": s.num_failures,
                    "avg_response_time": s.avg_response_time,
                    "min_response_time": s.min_response_time,
                    "max_response_time": s.max_response_time,
                    "current_rps": s.current_rps,
                    "median_response_time": s.median_response_time,
                    "avg_content_length": s.avg_content_length,
                }
            )

        report = {"stats": stats, "errors": list(runners.locust_runner.errors.iteritems())}
        if stats:
            report["total_rps"] = stats[len(stats) - 1]["current_rps"]
            report["fail_ratio"] = RequestStats.sum_stats("Total").fail_ratio

            # since generating a total response times dict with all response times from all
            # urls is slow, we make a new total response time dict which will consist of one
            # entry per url with the median response time as key and the number of requests as
            # value
            response_times = defaultdict(int)  # used for calculating total median
            for i in xrange(len(stats) - 1):
                response_times[stats[i]["median_response_time"]] += stats[i]["num_reqs"]

            # calculate total median
            stats[len(stats) - 1]["median_response_time"] = median_from_dict(
                stats[len(stats) - 1]["num_reqs"], response_times
            )

        is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
        if is_distributed:
            report["slave_count"] = runners.locust_runner.slave_count

        report["state"] = runners.locust_runner.state
        report["user_count"] = runners.locust_runner.user_count

        elapsed = time() - now
        cache_time = max(
            cache_time, elapsed * 2.0
        )  # Increase cache_time when report generating starts to take longer time
        _request_stats_context_cache = {"last_time": elapsed - now, "report": report, "cache_time": cache_time}
    else:
        report = _request_stats_context_cache["report"]
    return json.dumps(report)
Example #3
0
 def test_error_grouping_errors_with_memory_addresses(self):
     # reset stats
     self.stats = RequestStats()
     class Dummy(object):
         pass
     
     self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy()))
     self.assertEqual(1, len(self.stats.errors))
Example #4
0
    def test_percentile(self):
        s = RequestStats("GET", "percentile_test")
        for x in xrange(100):
            s.log(x, 0)

        self.assertEqual(s.get_response_time_percentile(0.5), 50)
        self.assertEqual(s.get_response_time_percentile(0.6), 60)
        self.assertEqual(s.get_response_time_percentile(0.95), 95)
Example #5
0
    def test_request_connection_error(self):
        class MyLocust(Locust):
            host = "http://localhost:1"

        locust = MyLocust()
        response = locust.client.get("/", timeout=0.1)
        self.assertFalse(response)
        self.assertEqual(1, RequestStats.get("GET", "/").num_failures)
        self.assertEqual(0, RequestStats.get("GET", "/").num_reqs)
Example #6
0
 def test_request_connection_error(self):
     class MyLocust(Locust):
         host = "http://localhost:1"
     
     locust = MyLocust()
     response = locust.client.get("/", timeout=0.1)
     self.assertFalse(response)
     self.assertEqual(1, RequestStats.get("GET", "/").num_failures)
     self.assertEqual(0, RequestStats.get("GET", "/").num_reqs)
Example #7
0
 def test_request_stats_content_length(self):
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
 
     locust = MyLocust()
     locust.client.get("/ultra_fast")
     self.assertEqual(RequestStats.get("GET", "/ultra_fast").avg_content_length, len("This is an ultra fast response"))
     locust.client.get("/ultra_fast")
     self.assertEqual(RequestStats.get("GET", "/ultra_fast").avg_content_length, len("This is an ultra fast response"))
Example #8
0
 def test_request_stats_content_length(self):
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
 
     locust = MyLocust()
     locust.client.get("/ultra_fast")
     self.assertEqual(RequestStats.get("/ultra_fast").avg_content_length, len("This is an ultra fast response"))
     locust.client.get("/ultra_fast")
     self.assertEqual(RequestStats.get("/ultra_fast").avg_content_length, len("This is an ultra fast response"))
Example #9
0
 def test_print_percentile_stats(self):
     stats = RequestStats()
     for i in range(100):
         stats.log_request("GET", "test_entry", i, 2000+i)
     locust.stats.print_percentile_stats(stats)
     info = self.mocked_log.info
     self.assertEqual(7, len(info))
     # check that headline contains same number of column as the value rows
     headlines = info[1].replace("# reqs", "#reqs").split()
     self.assertEqual(len(headlines), len(info[3].split()))
     self.assertEqual(len(headlines), len(info[5].split()))
Example #10
0
 def setUp(self):
     RequestStats.global_start_time = time.time()
     self.s = RequestStats("GET", "test_entry")
     self.s.log(45, 0)
     self.s.log(135, 0)
     self.s.log(44, 0)
     self.s.log_error(Exception("dummy fail"))
     self.s.log_error(Exception("dummy fail"))
     self.s.log(375, 0)
     self.s.log(601, 0)
     self.s.log(35, 0)
     self.s.log(79, 0)
     self.s.log_error(Exception("dummy fail"))
Example #11
0
def request_stats():
    global _request_stats_context_cache
    
    if not _request_stats_context_cache or _request_stats_context_cache["last_time"] < time() - _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME):
        cache_time = _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME)
        now = time()
        
        stats = []
        for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total")]):
            stats.append({
                "method": s.method,
                "name": s.name,
                "num_reqs": s.num_reqs,
                "num_failures": s.num_failures,
                "avg_response_time": s.avg_response_time,
                "min_response_time": s.min_response_time,
                "max_response_time": s.max_response_time,
                "current_rps": s.current_rps,
                "median_response_time": s.median_response_time,
                "avg_content_length": s.avg_content_length,
            })
        
        report = {"stats":stats, "errors":list(runners.locust_runner.errors.iteritems())}
        if stats:
            report["total_rps"] = stats[len(stats)-1]["current_rps"]
            report["fail_ratio"] = RequestStats.sum_stats("Total").fail_ratio
            
            # since generating a total response times dict with all response times from all
            # urls is slow, we make a new total response time dict which will consist of one
            # entry per url with the median response time as key and the number of requests as
            # value
            response_times = defaultdict(int) # used for calculating total median
            for i in xrange(len(stats)-1):
                response_times[stats[i]["median_response_time"]] += stats[i]["num_reqs"]
            
            # calculate total median
            stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_reqs"], response_times)
        
        is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
        if is_distributed:
            report["slave_count"] = runners.locust_runner.slave_count
        
        report["state"] = runners.locust_runner.state
        report["user_count"] = runners.locust_runner.user_count

        elapsed = time() - now
        cache_time = max(cache_time, elapsed * 2.0) # Increase cache_time when report generating starts to take longer time
        _request_stats_context_cache = {"last_time": elapsed - now, "report": report, "cache_time": cache_time}
    else:
        report = _request_stats_context_cache["report"]
    return json.dumps(report)
Example #12
0
 def setUp(self):
     self.stats = RequestStats()
     self.stats.start_time = time.time()
     self.s = StatsEntry(self.stats, "test_entry", "GET")
     self.s.log(45, 0)
     self.s.log(135, 0)
     self.s.log(44, 0)
     self.s.log_error(Exception("dummy fail"))
     self.s.log_error(Exception("dummy fail"))
     self.s.log(375, 0)
     self.s.log(601, 0)
     self.s.log(35, 0)
     self.s.log(79, 0)
     self.s.log_error(Exception("dummy fail"))
Example #13
0
 def test_error_grouping(self):
     # reset stats
     self.stats = RequestStats()
     
     self.stats.log_error("GET", "/some-path", Exception("Exception!"))
     self.stats.log_error("GET", "/some-path", Exception("Exception!"))
         
     self.assertEqual(1, len(self.stats.errors))
     self.assertEqual(2, list(self.stats.errors.values())[0].occurrences)
     
     self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
     self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
     self.stats.log_error("GET", "/some-path", Exception("Third exception!"))
     self.assertEqual(3, len(self.stats.errors))
Example #14
0
 def test_request_stats_no_content_length_no_prefetch(self):
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
     l = MyLocust()
     path = "/no_content_length"
     r = l.client.get(path, prefetch=False)
     self.assertEqual(0, RequestStats.get("GET", path).avg_content_length)
Example #15
0
def distribution_stats_csv():
    rows = [
        ",".join((
            '"Name"',
            '"# requests"',
            '"50%"',
            '"66%"',
            '"75%"',
            '"80%"',
            '"90%"',
            '"95%"',
            '"98%"',
            '"99%"',
            '"100%"',
        ))
    ]
    for s in chain(
            _sort_stats(runners.locust_runner.request_stats),
        [RequestStats.sum_stats("Total", full_request_history=True)]):
        if s.num_reqs:
            rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
        else:
            rows.append(
                '"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"'
                % s.name)

    response = make_response("\n".join(rows))
    file_name = "distribution_{0}.csv".format(time())
    disposition = "attachment;filename={0}".format(file_name)
    response.headers["Content-type"] = "text/csv"
    response.headers["Content-disposition"] = disposition
    return response
Example #16
0
    def __init__(
            self, *,
            user_classes=[],
            shape_class=None,
            tags=None,
            exclude_tags=None,
            events=None,
            host=None,
            reset_stats=False,
            step_load=False,
            stop_timeout=None,
            catch_exceptions=True,
            parsed_options=None,
    ):
        if events:
            self.events = events
        else:
            self.events = Events()

        self.user_classes = user_classes
        self.shape_class = shape_class
        self.tags = tags
        self.exclude_tags = exclude_tags
        self.stats = RequestStats()
        self.host = host
        self.reset_stats = reset_stats
        self.step_load = step_load
        self.stop_timeout = stop_timeout
        self.catch_exceptions = catch_exceptions
        self.parsed_options = parsed_options

        self._filter_tasks_by_tags()
Example #17
0
def distribution_stats_csv():
    rows = [
        ",".join((
            '"Name"',
            '"# requests"',
            '"50%"',
            '"66%"',
            '"75%"',
            '"80%"',
            '"90%"',
            '"95%"',
            '"98%"',
            '"99%"',
            '"100%"',
        ))
    ]
    for s in chain(
            _sort_stats(runners.locust_runner.request_stats),
        [RequestStats.sum_stats("Total", full_request_history=True)]):
        try:
            rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
        except Exception, e:
            logger.error(
                "Failed to calculate percentile for url stats {0}".format(
                    s.name))
            logger.exception(e)
Example #18
0
def distribution_stats_csv():
    rows = [",".join((
        '"Name"',
        '"# requests"',
        '"50%"',
        '"66%"',
        '"75%"',
        '"80%"',
        '"90%"',
        '"95%"',
        '"98%"',
        '"99%"',
        '"100%"',
    ))]
    for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total", full_request_history=True)]):
        if s.num_reqs:
            rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
        else:
            rows.append('"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"' % s.name)

    response = make_response("\n".join(rows))
    file_name = "distribution_{0}.csv".format(time())
    disposition = "attachment;filename={0}".format(file_name)
    response.headers["Content-type"] = "text/csv"
    response.headers["Content-disposition"] = disposition
    return response
Example #19
0
File: web.py Project: d1on/locust
def request_stats_csv():
    from core import locust_runner
    
    rows = [
        ",".join([
            '"Name"',
            '"# requests"',
            '"# failures"',
            '"Median response time"',
            '"Average response time"',
            '"Min response time"', 
            '"Max response time"',
            '"Average Content-Length"',
            '"Reqests/s"',
        ])
    ]
    
    for s in chain(_sort_stats(locust_runner.request_stats), [RequestStats.sum_stats("Total", full_request_history=True)]):
        rows.append('"%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % (
            s.name,
            s.num_reqs,
            s.num_failures,
            s.median_response_time,
            s.avg_response_time,
            s.min_response_time or 0,
            s.avg_content_length,
            s.max_response_time,
            s.total_rps,
        ))
    
    response = make_response("\n".join(rows))
    response.headers["Content-type"] = "text/csv"
    return response
Example #20
0
    def test_request_stats_named_endpoint(self):
        class MyLocust(Locust):
            host = "http://127.0.0.1:%i" % self.port

        locust = MyLocust()
        locust.client.get("/ultra_fast", name="my_custom_name")
        self.assertEqual(1, RequestStats.get("GET", "my_custom_name").num_reqs)
Example #21
0
 def test_request_stats_no_content_length(self):
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
     l = MyLocust()
     path = "/no_content_length"
     r = l.client.get(path)
     self.assertEqual(RequestStats.get("GET", path).avg_content_length, len("This response does not have content-length in the header"))
Example #22
0
 def test_request_stats_query_variables(self):
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
 
     locust = MyLocust()
     locust.client.get("/ultra_fast?query=1")
     self.assertEqual(1, RequestStats.get("GET", "/ultra_fast?query=1").num_reqs)
Example #23
0
def distribution_stats_csv():
    from core import locust_runner

    rows = [
        ",".join((
            '"Name"',
            '"# requests"',
            '"50%"',
            '"66%"',
            '"75%"',
            '"80%"',
            '"90%"',
            '"95%"',
            '"98%"',
            '"99%"',
            '"100%"',
        ))
    ]
    for s in chain(
            _sort_stats(locust_runner.request_stats),
        [RequestStats.sum_stats("Total", full_request_history=True)]):
        rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))

    response = make_response("\n".join(rows))
    response.headers["Content-type"] = "text/csv"
    return response
Example #24
0
 def test_request_stats_named_endpoint(self):
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
 
     locust = MyLocust()
     locust.client.get("/ultra_fast", name="my_custom_name")
     self.assertEqual(1, RequestStats.get("GET", "my_custom_name").num_reqs)
Example #25
0
def request_stats_csv():
    from core import locust_runner

    rows = [
        ",".join([
            '"Name"',
            '"# requests"',
            '"# failures"',
            '"Median response time"',
            '"Average response time"',
            '"Min response time"',
            '"Max response time"',
            '"Average Content-Length"',
            '"Reqests/s"',
        ])
    ]

    for s in chain(
            _sort_stats(locust_runner.request_stats),
        [RequestStats.sum_stats("Total", full_request_history=True)]):
        rows.append('"%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % (
            s.name,
            s.num_reqs,
            s.num_failures,
            s.median_response_time,
            s.avg_response_time,
            s.min_response_time or 0,
            s.avg_content_length,
            s.max_response_time,
            s.total_rps,
        ))

    response = make_response("\n".join(rows))
    response.headers["Content-type"] = "text/csv"
    return response
Example #26
0
 def test_error_grouping_errors_with_memory_addresses(self):
     # reset stats
     self.stats = RequestStats()
     class Dummy(object):
         pass
     
     self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy()))
     assert 1 == len(self.stats.errors)
Example #27
0
    def test_log_request_name_argument(self):
        from locust.stats import RequestStats
        self.response = ""

        class MyLocust(Locust):
            tasks = []
            host = "http://127.0.0.1:%i" % self.port

            @task()
            def t1(l):
                self.response = l.client.get("/ultra_fast", name="new name!")

        my_locust = MyLocust()
        my_locust.t1()

        self.assertEqual(1, RequestStats.get("new name!").num_reqs)
        self.assertEqual(0, RequestStats.get("/ultra_fast").num_reqs)
Example #28
0
    def test_log_request_name_argument(self):
        from locust.stats import RequestStats
        self.response = ""
        
        class MyLocust(Locust):
            tasks = []
            host = "http://127.0.0.1:%i" % self.port
            
            @task()
            def t1(l):
                self.response = l.client.get("/ultra_fast", name="new name!")

        my_locust = MyLocust()
        my_locust.t1()
        
        self.assertEqual(1, RequestStats.get("GET", "new name!").num_reqs)
        self.assertEqual(0, RequestStats.get("GET", "/ultra_fast").num_reqs)
Example #29
0
    def test_request_stats_no_content_length_no_prefetch(self):
        class MyLocust(Locust):
            host = "http://127.0.0.1:%i" % self.port

        l = MyLocust()
        path = "/no_content_length"
        r = l.client.get(path, prefetch=False)
        self.assertEqual(0, RequestStats.get("GET", path).avg_content_length)
Example #30
0
    def test_request_stats_query_variables(self):
        class MyLocust(Locust):
            host = "http://127.0.0.1:%i" % self.port

        locust = MyLocust()
        locust.client.get("/ultra_fast?query=1")
        self.assertEqual(
            1,
            RequestStats.get("GET", "/ultra_fast?query=1").num_reqs)
Example #31
0
 def setUp(self):
     self.stats = RequestStats()
     def log(response_time, size):
         self.stats.log_request("GET", "test_entry", response_time, size)
     def log_error(exc):
         self.stats.log_error("GET", "test_entry", exc)
     log(45, 1)
     log(135, 1)
     log(44, 1)
     log(None, 1)        
     log_error(Exception("dummy fail"))
     log_error(Exception("dummy fail"))
     log(375, 1)
     log(601, 1)
     log(35, 1)
     log(79, 1)
     log(None, 1)        
     log_error(Exception("dummy fail"))
     self.s = self.stats.get("test_entry",  "GET")
Example #32
0
    def test_request_stats_no_content_length(self):
        class MyLocust(Locust):
            host = "http://127.0.0.1:%i" % self.port

        l = MyLocust()
        path = "/no_content_length"
        r = l.client.get(path)
        self.assertEqual(
            RequestStats.get("GET", path).avg_content_length,
            len("This response does not have content-length in the header"))
Example #33
0
 def test_serialize_through_message(self):
     """
     Serialize a RequestStats instance, then serialize it through a Message, 
     and unserialize the whole thing again. This is done "IRL" when stats are sent 
     from slaves to master.
     """
     s1 = RequestStats("GET", "test")
     s1.log(10, 0)
     s1.log(20, 0)
     s1.log(40, 0)
     u1 = RequestStats.unserialize(s1.serialize())
     
     data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data
     u1 = RequestStats.unserialize(data)
     
     self.assertEqual(20, u1.median_response_time)
Example #34
0
    def setUp(self):
        super().setUp()

        self.stats = RequestStats()
        for i in range(100):
            for method, name, freq in [
                (
                    "GET",
                    "test_entry",
                    5,
                ),
                (
                    "DELETE",
                    "test" * int((STATS_NAME_WIDTH - STATS_TYPE_WIDTH + 4) / len("test")),
                    3,
                ),
            ]:
                self.stats.log_request(method, name, i, 2000 + i)
                if i % freq == 0:
                    self.stats.log_error(method, name, RuntimeError(f"{method} error"))
Example #35
0
 def test_max_requests(self):
     class MyTaskSet(TaskSet):
         @task
         def my_task(self):
             self.client.get("/ultra_fast")
     class MyLocust(Locust):
         host = "http://127.0.0.1:%i" % self.port
         task_set = MyTaskSet
         min_wait = 1
         max_wait = 1
         
     try:
         from locust.exception import StopLocust
         RequestStats.clear_all()
         RequestStats.global_max_requests = 2
         
         l = MyLocust()
         self.assertRaises(StopLocust, lambda: l.task_set(l).run())
         self.assertEqual(2, RequestStats.total_num_requests)
         
         RequestStats.clear_all()
         RequestStats.global_max_requests = 2
         self.assertEqual(0, RequestStats.total_num_requests)
         
         l.run()
         self.assertEqual(2, RequestStats.total_num_requests)
     finally:
         RequestStats.clear_all()
         RequestStats.global_max_requests = None
Example #36
0
    def test_max_requests(self):
        class MyTaskSet(TaskSet):
            @task
            def my_task(self):
                self.client.get("/ultra_fast")

        class MyLocust(Locust):
            host = "http://127.0.0.1:%i" % self.port
            task_set = MyTaskSet
            min_wait = 1
            max_wait = 1

        try:
            from locust.exception import StopLocust
            RequestStats.clear_all()
            RequestStats.global_max_requests = 2

            l = MyLocust()
            self.assertRaises(StopLocust, lambda: l.task_set(l).run())
            self.assertEqual(2, RequestStats.total_num_requests)

            RequestStats.clear_all()
            RequestStats.global_max_requests = 2
            self.assertEqual(0, RequestStats.total_num_requests)

            l.run()
            self.assertEqual(2, RequestStats.total_num_requests)
        finally:
            RequestStats.clear_all()
            RequestStats.global_max_requests = None
Example #37
0
 def setUp(self):
     RequestStats.global_start_time = time.time()
     self.s = RequestStats("GET", "test_entry")
     self.s.log(45, 0)
     self.s.log(135, 0)
     self.s.log(44, 0)
     self.s.log_error(Exception("dummy fail"))
     self.s.log_error(Exception("dummy fail"))
     self.s.log(375, 0)
     self.s.log(601, 0)
     self.s.log(35, 0)
     self.s.log(79, 0)
     self.s.log_error(Exception("dummy fail"))
Example #38
0
    def test_percentile(self):
        s = RequestStats("GET", "percentile_test")
        for x in xrange(100):
            s.log(x, 0)

        self.assertEqual(s.get_response_time_percentile(0.5), 50)
        self.assertEqual(s.get_response_time_percentile(0.6), 60)
        self.assertEqual(s.get_response_time_percentile(0.95), 95)
Example #39
0
 def setUp(self):
     self.stats = RequestStats()
     self.stats.start_time = time.time()
     self.s = StatsEntry(self.stats, "test_entry", "GET")
     self.s.log(45, 0)
     self.s.log(135, 0)
     self.s.log(44, 0)
     self.s.log_error(Exception("dummy fail"))
     self.s.log_error(Exception("dummy fail"))
     self.s.log(375, 0)
     self.s.log(601, 0)
     self.s.log(35, 0)
     self.s.log(79, 0)
     self.s.log_error(Exception("dummy fail"))
Example #40
0
 def test_error_grouping(self):
     # reset stats
     self.stats = RequestStats()
     
     self.stats.log_error("GET", "/some-path", Exception("Exception!"))
     self.stats.log_error("GET", "/some-path", Exception("Exception!"))
         
     assert 1 == len(self.stats.errors)
     assert 2 == list(self.stats.errors.values())[0].occurences
     
     self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
     self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
     self.stats.log_error("GET", "/some-path", Exception("Third exception!"))
     assert 3 == len(self.stats.errors)
Example #41
0
    def create_worker_runner(self, master_host, master_port):
        """
        Create a :class:`WorkerRunner <locust.runners.WorkerRunner>` instance for this Environment

        :param master_host: Host/IP of a running master node
        :param master_port: Port on master node to connect to
        """
        # Create a new RequestStats with use_response_times_cache set to False to save some memory
        # and CPU cycles, since the response_times_cache is not needed for Worker nodes
        self.stats = RequestStats(use_response_times_cache=False)
        return self._create_runner(
            WorkerRunner,
            master_host=master_host,
            master_port=master_port,
        )
Example #42
0
 def test_slave_stats_report_median(self):
     import mock
     
     class MyTestLocust(Locust):
         pass
     
     with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
         master = MasterLocustRunner(MyTestLocust, 10, 10, None)
         server.mocked_send(Message("client_ready", None, "fake_client"))
         sleep(0)
         
         RequestStats.get("GET", "/").log(100, 23455)
         RequestStats.get("GET", "/").log(800, 23455)
         RequestStats.get("GET", "/").log(700, 23455)
         
         data = {"user_count":1}
         events.report_to_master.fire("fake_client", data)
         RequestStats.reset_all()
         
         server.mocked_send(Message("stats", data, "fake_client"))
         sleep(0)
         s = RequestStats.get("GET", "/")
         self.assertEqual(700, s.median_response_time)
Example #43
0
File: web.py Project: whitmo/locust
def request_stats_csv():
    rows = [
        ",".join(
            [
                '"Method"',
                '"Name"',
                '"# requests"',
                '"# failures"',
                '"Median response time"',
                '"Average response time"',
                '"Min response time"',
                '"Max response time"',
                '"Average Content-Length"',
                '"Reqests/s"',
            ]
        )
    ]

    for s in chain(
        _sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total", full_request_history=True)]
    ):
        rows.append(
            '"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f'
            % (
                s.method,
                s.name,
                s.num_reqs,
                s.num_failures,
                s.median_response_time,
                s.avg_response_time,
                s.min_response_time or 0,
                s.max_response_time,
                s.avg_content_length,
                s.total_rps,
            )
        )

    response = make_response("\n".join(rows))
    file_name = "requests_{0}.csv".format(time())
    disposition = "attachment;filename={0}".format(file_name)
    response.headers["Content-type"] = "text/csv"
    response.headers["Content-disposition"] = disposition
    return response
Example #44
0
File: web.py Project: heyman/locust
def distribution_stats_csv():
    rows = [",".join((
        '"Name"',
        '"# requests"',
        '"50%"',
        '"66%"',
        '"75%"',
        '"80%"',
        '"90%"',
        '"95%"',
        '"98%"',
        '"99%"',
        '"100%"',
    ))]
    for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total", full_request_history=True)]):
        rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
    
    response = make_response("\n".join(rows))
    response.headers["Content-type"] = "text/csv"
    return response
Example #45
0
File: web.py Project: hstack/locust
def distribution_stats_csv():
    rows = [",".join((
        '"Name"',
        '"# requests"',
        '"50%"',
        '"66%"',
        '"75%"',
        '"80%"',
        '"90%"',
        '"95%"',
        '"98%"',
        '"99%"',
        '"100%"',
    ))]
    for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total", full_request_history=True)]):
        try:
            rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i'))
        except Exception, e:
            logger.error("Failed to calculate percentile for url stats {0}".format(s.name))
            logger.exception(e)
Example #46
0
def request_stats_csv():
    rows = [
        ",".join([
            '"Method"',
            '"Name"',
            '"# requests"',
            '"# failures"',
            '"Median response time"',
            '"Average response time"',
            '"Min response time"',
            '"Max response time"',
            '"Average Content Size"',
            '"Reqests/s"',
        ])
    ]

    for s in chain(
            _sort_stats(runners.locust_runner.request_stats),
        [RequestStats.sum_stats("Total", full_request_history=True)]):
        rows.append('"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % (
            s.method,
            s.name,
            s.num_reqs,
            s.num_failures,
            s.median_response_time,
            s.avg_response_time,
            s.min_response_time or 0,
            s.max_response_time,
            s.avg_content_length,
            s.total_rps,
        ))

    response = make_response("\n".join(rows))
    file_name = "requests_{0}.csv".format(time())
    disposition = "attachment;filename={0}".format(file_name)
    response.headers["Content-type"] = "text/csv"
    response.headers["Content-disposition"] = disposition
    return response
Example #47
0
 def test_master_current_response_times(self):
     class MyTestLocust(Locust):
         pass
     
     start_time = 1
     with mock.patch("time.time") as mocked_time:
         mocked_time.return_value = start_time
         global_stats.reset_all()
         with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
             master = MasterLocustRunner(MyTestLocust, self.options)
             mocked_time.return_value += 1.0234
             server.mocked_send(Message("client_ready", None, "fake_client"))
             stats = RequestStats()
             stats.log_request("GET", "/1", 100, 3546)
             stats.log_request("GET", "/1", 800, 56743)
             server.mocked_send(Message("stats", {
                 "stats":stats.serialize_stats(),
                 "stats_total": stats.total.get_stripped_report(),
                 "errors":stats.serialize_errors(),
                 "user_count": 1,
             }, "fake_client"))
             mocked_time.return_value += 1
             stats2 = RequestStats()
             stats2.log_request("GET", "/2", 400, 2201)
             server.mocked_send(Message("stats", {
                 "stats":stats2.serialize_stats(),
                 "stats_total": stats2.total.get_stripped_report(),
                 "errors":stats2.serialize_errors(),
                 "user_count": 2,
             }, "fake_client"))
             mocked_time.return_value += 4
             self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5))
             self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95))
             
             # let 10 second pass, do some more requests, send it to the master and make
             # sure the current response time percentiles only accounts for these new requests
             mocked_time.return_value += 10.10023
             stats.log_request("GET", "/1", 20, 1)
             stats.log_request("GET", "/1", 30, 1)
             stats.log_request("GET", "/1", 3000, 1)
             server.mocked_send(Message("stats", {
                 "stats":stats.serialize_stats(),
                 "stats_total": stats.total.get_stripped_report(),
                 "errors":stats.serialize_errors(),
                 "user_count": 2,
             }, "fake_client"))
             self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5))
             self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
Example #48
0
 def test_master_total_stats_with_none_response_times(self):
     class MyTestLocust(Locust):
         pass
     
     with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server:
         master = MasterLocustRunner(MyTestLocust, self.options)
         server.mocked_send(Message("client_ready", None, "fake_client"))
         stats = RequestStats()
         stats.log_request("GET", "/1", 100, 3546)
         stats.log_request("GET", "/1", 800, 56743)
         stats.log_request("GET", "/1", None, 56743)
         stats2 = RequestStats()
         stats2.log_request("GET", "/2", 700, 2201)
         stats2.log_request("GET", "/2", None, 2201)
         stats3 = RequestStats()
         stats3.log_request("GET", "/3", None, 2201)
         server.mocked_send(Message("stats", {
             "stats":stats.serialize_stats(), 
             "stats_total": stats.total.serialize(),
             "errors":stats.serialize_errors(),
             "user_count": 1,
         }, "fake_client"))
         server.mocked_send(Message("stats", {
             "stats":stats2.serialize_stats(), 
             "stats_total": stats2.total.serialize(),
             "errors":stats2.serialize_errors(),
             "user_count": 2,
         }, "fake_client"))
         server.mocked_send(Message("stats", {
             "stats":stats3.serialize_stats(), 
             "stats_total": stats3.total.serialize(),
             "errors":stats3.serialize_errors(),
             "user_count": 2,
         }, "fake_client"))
         self.assertEqual(700, master.stats.total.median_response_time)
Example #49
0
 def setUp(self, *args, **kwargs):
     super(TestStatsEntry, self).setUp(*args, **kwargs)
     self.stats = RequestStats()
Example #50
0
 def setUp(self, *args, **kwargs):
     super(TestStatsEntryResponseTimesCache, self).setUp(*args, **kwargs)
     self.stats = RequestStats()
Example #51
0
class TestRequestStats(unittest.TestCase):
    def setUp(self):
        self.stats = RequestStats()

        def log(response_time, size):
            self.stats.log_request("GET", "test_entry", response_time, size)

        def log_error(exc):
            self.stats.log_error("GET", "test_entry", exc)

        log(45, 1)
        log(135, 1)
        log(44, 1)
        log(None, 1)
        log_error(Exception("dummy fail"))
        log_error(Exception("dummy fail"))
        log(375, 1)
        log(601, 1)
        log(35, 1)
        log(79, 1)
        log(None, 1)
        log_error(Exception("dummy fail"))
        self.s = self.stats.get("test_entry", "GET")

    def test_percentile(self):
        s = StatsEntry(self.stats, "percentile_test", "GET")
        for x in xrange(100):
            s.log(x, 0)

        self.assertEqual(s.get_response_time_percentile(0.5), 50)
        self.assertEqual(s.get_response_time_percentile(0.6), 60)
        self.assertEqual(s.get_response_time_percentile(0.95), 95)

    def test_median(self):
        self.assertEqual(self.s.median_response_time, 79)

    def test_median_out_of_min_max_bounds(self):
        s = StatsEntry(self.stats, "median_test", "GET")
        s.log(6034, 0)
        self.assertEqual(s.median_response_time, 6034)
        s.reset()
        s.log(6099, 0)
        self.assertEqual(s.median_response_time, 6099)

    def test_total_rps(self):
        self.assertEqual(self.s.total_rps, 9)

    def test_current_rps(self):
        self.stats.total.last_request_timestamp = int(time.time()) + 4
        self.assertEqual(self.s.current_rps, 4.5)

        self.stats.total.last_request_timestamp = int(time.time()) + 25
        self.assertEqual(self.s.current_rps, 0)

    def test_current_fail_per_sec(self):
        self.stats.total.last_request_timestamp = int(time.time()) + 4
        self.assertEqual(self.s.current_fail_per_sec, 1.5)

        self.stats.total.last_request_timestamp = int(time.time()) + 12
        self.assertEqual(self.s.current_fail_per_sec, 0.3)

        self.stats.total.last_request_timestamp = int(time.time()) + 25
        self.assertEqual(self.s.current_fail_per_sec, 0)

    def test_num_reqs_fails(self):
        self.assertEqual(self.s.num_requests, 9)
        self.assertEqual(self.s.num_failures, 3)

    def test_avg(self):
        self.assertEqual(self.s.avg_response_time, 187.71428571428572)

    def test_total_content_length(self):
        self.assertEqual(self.s.total_content_length, 9)

    def test_reset(self):
        self.s.reset()
        self.s.log(756, 0)
        self.s.log_error(Exception("dummy fail after reset"))
        self.s.log(85, 0)

        self.assertEqual(self.s.total_rps, 2)
        self.assertEqual(self.s.num_requests, 2)
        self.assertEqual(self.s.num_failures, 1)
        self.assertEqual(self.s.avg_response_time, 420.5)
        self.assertEqual(self.s.median_response_time, 85)
        self.assertNotEqual(None, self.s.last_request_timestamp)
        self.s.reset()
        self.assertEqual(None, self.s.last_request_timestamp)

    def test_avg_only_none(self):
        self.s.reset()
        self.s.log(None, 123)
        self.assertEqual(self.s.avg_response_time, 0)
        self.assertEqual(self.s.median_response_time, 0)
        self.assertEqual(self.s.get_response_time_percentile(0.5), 0)

    def test_reset_min_response_time(self):
        self.s.reset()
        self.s.log(756, 0)
        self.assertEqual(756, self.s.min_response_time)

    def test_aggregation(self):
        s1 = StatsEntry(self.stats, "aggregate me!", "GET")
        s1.log(12, 0)
        s1.log(12, 0)
        s1.log(38, 0)
        s1.log_error("Dummy exzeption")

        s2 = StatsEntry(self.stats, "aggregate me!", "GET")
        s2.log_error("Dummy exzeption")
        s2.log_error("Dummy exzeption")
        s2.log(12, 0)
        s2.log(99, 0)
        s2.log(14, 0)
        s2.log(55, 0)
        s2.log(38, 0)
        s2.log(55, 0)
        s2.log(97, 0)

        s = StatsEntry(self.stats, "GET", "")
        s.extend(s1)
        s.extend(s2)

        self.assertEqual(s.num_requests, 10)
        self.assertEqual(s.num_failures, 3)
        self.assertEqual(s.median_response_time, 38)
        self.assertEqual(s.avg_response_time, 43.2)

    def test_aggregation_with_rounding(self):
        s1 = StatsEntry(self.stats, "round me!", "GET")
        s1.log(122, 0)  # (rounded 120) min
        s1.log(992, 0)  # (rounded 990) max
        s1.log(142, 0)  # (rounded 140)
        s1.log(552, 0)  # (rounded 550)
        s1.log(557, 0)  # (rounded 560)
        s1.log(387, 0)  # (rounded 390)
        s1.log(557, 0)  # (rounded 560)
        s1.log(977, 0)  # (rounded 980)

        self.assertEqual(s1.num_requests, 8)
        self.assertEqual(s1.median_response_time, 550)
        self.assertEqual(s1.avg_response_time, 535.75)
        self.assertEqual(s1.min_response_time, 122)
        self.assertEqual(s1.max_response_time, 992)

    def test_aggregation_min_response_time(self):
        s1 = StatsEntry(self.stats, "min", "GET")
        s1.log(10, 0)
        self.assertEqual(10, s1.min_response_time)
        s2 = StatsEntry(self.stats, "min", "GET")
        s1.extend(s2)
        self.assertEqual(10, s1.min_response_time)

    def test_aggregation_last_request_timestamp(self):
        s1 = StatsEntry(self.stats, "r", "GET")
        s2 = StatsEntry(self.stats, "r", "GET")
        s1.extend(s2)
        self.assertEqual(None, s1.last_request_timestamp)
        s1 = StatsEntry(self.stats, "r", "GET")
        s2 = StatsEntry(self.stats, "r", "GET")
        s1.last_request_timestamp = 666
        s1.extend(s2)
        self.assertEqual(666, s1.last_request_timestamp)
        s1 = StatsEntry(self.stats, "r", "GET")
        s2 = StatsEntry(self.stats, "r", "GET")
        s2.last_request_timestamp = 666
        s1.extend(s2)
        self.assertEqual(666, s1.last_request_timestamp)
        s1 = StatsEntry(self.stats, "r", "GET")
        s2 = StatsEntry(self.stats, "r", "GET")
        s1.last_request_timestamp = 666
        s1.last_request_timestamp = 700
        s1.extend(s2)
        self.assertEqual(700, s1.last_request_timestamp)

    def test_percentile_rounded_down(self):
        s1 = StatsEntry(self.stats, "rounding down!", "GET")
        s1.log(122, 0)  # (rounded 120) min
        actual_percentile = s1.percentile()
        self.assertEqual(
            actual_percentile,
            " GET rounding down!                                                  1    120    120    120    120    120    120    120    120    120    120    120"
        )

    def test_percentile_rounded_up(self):
        s2 = StatsEntry(self.stats, "rounding up!", "GET")
        s2.log(127, 0)  # (rounded 130) min
        actual_percentile = s2.percentile()
        self.assertEqual(
            actual_percentile,
            " GET rounding up!                                                    1    130    130    130    130    130    130    130    130    130    130    130"
        )

    def test_error_grouping(self):
        # reset stats
        self.stats = RequestStats()

        self.stats.log_error("GET", "/some-path", Exception("Exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Exception!"))

        self.assertEqual(1, len(self.stats.errors))
        self.assertEqual(2, list(self.stats.errors.values())[0].occurrences)

        self.stats.log_error("GET", "/some-path",
                             Exception("Another exception!"))
        self.stats.log_error("GET", "/some-path",
                             Exception("Another exception!"))
        self.stats.log_error("GET", "/some-path",
                             Exception("Third exception!"))
        self.assertEqual(3, len(self.stats.errors))

    def test_error_grouping_errors_with_memory_addresses(self):
        # reset stats
        self.stats = RequestStats()

        class Dummy(object):
            pass

        self.stats.log_error("GET", "/",
                             Exception("Error caused by %r" % Dummy()))
        self.assertEqual(1, len(self.stats.errors))

    def test_serialize_through_message(self):
        """
        Serialize a RequestStats instance, then serialize it through a Message, 
        and unserialize the whole thing again. This is done "IRL" when stats are sent 
        from slaves to master.
        """
        s1 = StatsEntry(self.stats, "test", "GET")
        s1.log(10, 0)
        s1.log(20, 0)
        s1.log(40, 0)
        u1 = StatsEntry.unserialize(s1.serialize())

        data = Message.unserialize(
            Message("dummy", s1.serialize(), "none").serialize()).data
        u1 = StatsEntry.unserialize(data)

        self.assertEqual(20, u1.median_response_time)
Example #52
0
class TestRequestStats(unittest.TestCase):
    def setUp(self):
        RequestStats.global_start_time = time.time()
        self.s = RequestStats("GET", "test_entry")
        self.s.log(45, 0)
        self.s.log(135, 0)
        self.s.log(44, 0)
        self.s.log_error(Exception("dummy fail"))
        self.s.log_error(Exception("dummy fail"))
        self.s.log(375, 0)
        self.s.log(601, 0)
        self.s.log(35, 0)
        self.s.log(79, 0)
        self.s.log_error(Exception("dummy fail"))

    def test_percentile(self):
        s = RequestStats("GET", "percentile_test")
        for x in xrange(100):
            s.log(x, 0)

        self.assertEqual(s.get_response_time_percentile(0.5), 50)
        self.assertEqual(s.get_response_time_percentile(0.6), 60)
        self.assertEqual(s.get_response_time_percentile(0.95), 95)

    def test_median(self):
        self.assertEqual(self.s.median_response_time, 79)

    def test_total_rps(self):
        self.assertEqual(self.s.total_rps, 7)

    def test_current_rps(self):
        self.s.global_last_request_timestamp = int(time.time()) + 4
        self.assertEqual(self.s.current_rps, 3.5)

        self.s.global_last_request_timestamp = int(time.time()) + 25
        self.assertEqual(self.s.current_rps, 0)

    def test_num_reqs_fails(self):
        self.assertEqual(self.s.num_reqs, 7)
        self.assertEqual(self.s.num_failures, 3)

    def test_avg(self):
        self.assertEqual(self.s.avg_response_time, 187.71428571428571428571428571429)

    def test_reset(self):
        self.s.reset()
        self.s.log(756, 0)
        self.s.log_error(Exception("dummy fail after reset"))
        self.s.log(85, 0)

        self.assertEqual(self.s.total_rps, 2)
        self.assertEqual(self.s.num_reqs, 2)
        self.assertEqual(self.s.num_failures, 1)
        self.assertEqual(self.s.avg_response_time, 420.5)
        self.assertEqual(self.s.median_response_time, 85)

    def test_aggregation(self):
        s1 = RequestStats("GET", "aggregate me!")
        s1.log(12, 0)
        s1.log(12, 0)
        s1.log(38, 0)
        s1.log_error("Dummy exzeption")

        s2 = RequestStats("GET", "aggregate me!")
        s2.log_error("Dummy exzeption")
        s2.log_error("Dummy exzeption")
        s2.log(12, 0)
        s2.log(99, 0)
        s2.log(14, 0)
        s2.log(55, 0)
        s2.log(38, 0)
        s2.log(55, 0)
        s2.log(97, 0)

        s = RequestStats("GET", "")
        s.iadd_stats(s1, full_request_history=True)
        s.iadd_stats(s2, full_request_history=True)

        self.assertEqual(s.num_reqs, 10)
        self.assertEqual(s.num_failures, 3)
        self.assertEqual(s.median_response_time, 38)
        self.assertEqual(s.avg_response_time, 43.2)
    
    def test_serialize_through_message(self):
        """
        Serialize a RequestStats instance, then serialize it through a Message, 
        and unserialize the whole thing again. This is done "IRL" when stats are sent 
        from slaves to master.
        """
        s1 = RequestStats("GET", "test")
        s1.log(10, 0)
        s1.log(20, 0)
        s1.log(40, 0)
        u1 = RequestStats.unserialize(s1.serialize())
        
        data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data
        u1 = RequestStats.unserialize(data)
        
        self.assertEqual(20, u1.median_response_time)
Example #53
0
    def test_aggregation(self):
        s1 = RequestStats("GET", "aggregate me!")
        s1.log(12, 0)
        s1.log(12, 0)
        s1.log(38, 0)
        s1.log_error("Dummy exzeption")

        s2 = RequestStats("GET", "aggregate me!")
        s2.log_error("Dummy exzeption")
        s2.log_error("Dummy exzeption")
        s2.log(12, 0)
        s2.log(99, 0)
        s2.log(14, 0)
        s2.log(55, 0)
        s2.log(38, 0)
        s2.log(55, 0)
        s2.log(97, 0)

        s = RequestStats("GET", "")
        s.iadd_stats(s1, full_request_history=True)
        s.iadd_stats(s2, full_request_history=True)

        self.assertEqual(s.num_reqs, 10)
        self.assertEqual(s.num_failures, 3)
        self.assertEqual(s.median_response_time, 38)
        self.assertEqual(s.avg_response_time, 43.2)
Example #54
0
def reset_stats():
    RequestStats.reset_all()
    return "ok"
Example #55
0
class TestRequestStats(unittest.TestCase):
    def setUp(self):
        RequestStats.global_start_time = time.time()
        self.s = RequestStats("GET", "test_entry")
        self.s.log(45, 0)
        self.s.log(135, 0)
        self.s.log(44, 0)
        self.s.log_error(Exception("dummy fail"))
        self.s.log_error(Exception("dummy fail"))
        self.s.log(375, 0)
        self.s.log(601, 0)
        self.s.log(35, 0)
        self.s.log(79, 0)
        self.s.log_error(Exception("dummy fail"))

    def test_percentile(self):
        s = RequestStats("GET", "percentile_test")
        for x in xrange(100):
            s.log(x, 0)

        self.assertEqual(s.get_response_time_percentile(0.5), 50)
        self.assertEqual(s.get_response_time_percentile(0.6), 60)
        self.assertEqual(s.get_response_time_percentile(0.95), 95)

    def test_median(self):
        self.assertEqual(self.s.median_response_time, 79)

    def test_total_rps(self):
        self.assertEqual(self.s.total_rps, 7)

    def test_current_rps(self):
        self.s.global_last_request_timestamp = int(time.time()) + 4
        self.assertEqual(self.s.current_rps, 3.5)

        self.s.global_last_request_timestamp = int(time.time()) + 25
        self.assertEqual(self.s.current_rps, 0)

    def test_num_reqs_fails(self):
        self.assertEqual(self.s.num_reqs, 7)
        self.assertEqual(self.s.num_failures, 3)

    def test_avg(self):
        self.assertEqual(self.s.avg_response_time, 187.71428571428571428571428571429)

    def test_reset(self):
        self.s.reset()
        self.s.log(756, 0)
        self.s.log_error(Exception("dummy fail after reset"))
        self.s.log(85, 0)

        self.assertEqual(self.s.total_rps, 2)
        self.assertEqual(self.s.num_reqs, 2)
        self.assertEqual(self.s.num_failures, 1)
        self.assertEqual(self.s.avg_response_time, 420.5)
        self.assertEqual(self.s.median_response_time, 85)

    def test_aggregation(self):
        s1 = RequestStats("GET", "aggregate me!")
        s1.log(12, 0)
        s1.log(12, 0)
        s1.log(38, 0)
        s1.log_error("Dummy exzeption")

        s2 = RequestStats("GET", "aggregate me!")
        s2.log_error("Dummy exzeption")
        s2.log_error("Dummy exzeption")
        s2.log(12, 0)
        s2.log(99, 0)
        s2.log(14, 0)
        s2.log(55, 0)
        s2.log(38, 0)
        s2.log(55, 0)
        s2.log(97, 0)

        s = RequestStats("GET", "")
        s.iadd_stats(s1, full_request_history=True)
        s.iadd_stats(s2, full_request_history=True)

        self.assertEqual(s.num_reqs, 10)
        self.assertEqual(s.num_failures, 3)
        self.assertEqual(s.median_response_time, 38)
        self.assertEqual(s.avg_response_time, 43.2)
Example #56
0
 def test_master_current_response_times(self):
     import mock
     
     class MyTestLocust(Locust):
         pass
     
     start_time = 1
     with mock.patch("time.time") as mocked_time:
         mocked_time.return_value = start_time
         global_stats.reset_all()
         with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
             master = MasterLocustRunner(MyTestLocust, self.options)
             mocked_time.return_value += 1
             server.mocked_send(Message("client_ready", None, "fake_client"))
             stats = RequestStats()
             stats.log_request("GET", "/1", 100, 3546)
             stats.log_request("GET", "/1", 800, 56743)
             server.mocked_send(Message("stats", {
                 "stats":stats.serialize_stats(),
                 "stats_total": stats.total.get_stripped_report(),
                 "errors":stats.serialize_errors(),
                 "user_count": 1,
             }, "fake_client"))
             mocked_time.return_value += 1
             stats2 = RequestStats()
             stats2.log_request("GET", "/2", 400, 2201)
             server.mocked_send(Message("stats", {
                 "stats":stats2.serialize_stats(),
                 "stats_total": stats2.total.get_stripped_report(),
                 "errors":stats2.serialize_errors(),
                 "user_count": 2,
             }, "fake_client"))
             mocked_time.return_value += 4
             self.assertEqual(400, master.stats.total.get_current_response_time_percentile(0.5))
             self.assertEqual(800, master.stats.total.get_current_response_time_percentile(0.95))
             
             # let 10 second pass, do some more requests, send it to the master and make
             # sure the current response time percentiles only accounts for these new requests
             mocked_time.return_value += 10
             stats.log_request("GET", "/1", 20, 1)
             stats.log_request("GET", "/1", 30, 1)
             stats.log_request("GET", "/1", 3000, 1)
             server.mocked_send(Message("stats", {
                 "stats":stats.serialize_stats(),
                 "stats_total": stats.total.get_stripped_report(),
                 "errors":stats.serialize_errors(),
                 "user_count": 2,
             }, "fake_client"))
             self.assertEqual(30, master.stats.total.get_current_response_time_percentile(0.5))
             self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95))
Example #57
0
 def test_master_total_stats(self):
     import mock
     
     class MyTestLocust(Locust):
         pass
     
     with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
         master = MasterLocustRunner(MyTestLocust, self.options)
         server.mocked_send(Message("client_ready", None, "fake_client"))
         stats = RequestStats()
         stats.log_request("GET", "/1", 100, 3546)
         stats.log_request("GET", "/1", 800, 56743)
         stats2 = RequestStats()
         stats2.log_request("GET", "/2", 700, 2201)
         server.mocked_send(Message("stats", {
             "stats":stats.serialize_stats(), 
             "stats_total": stats.total.serialize(),
             "errors":stats.serialize_errors(),
             "user_count": 1,
         }, "fake_client"))
         server.mocked_send(Message("stats", {
             "stats":stats2.serialize_stats(), 
             "stats_total": stats2.total.serialize(),
             "errors":stats2.serialize_errors(),
             "user_count": 2,
         }, "fake_client"))
         self.assertEqual(700, master.stats.total.median_response_time)
Example #58
0
class TestRequestStats(unittest.TestCase):
    def setUp(self):
        self.stats = RequestStats()
        self.stats.start_time = time.time()
        self.s = StatsEntry(self.stats, "test_entry", "GET")
        self.s.log(45, 0)
        self.s.log(135, 0)
        self.s.log(44, 0)
        self.s.log_error(Exception("dummy fail"))
        self.s.log_error(Exception("dummy fail"))
        self.s.log(375, 0)
        self.s.log(601, 0)
        self.s.log(35, 0)
        self.s.log(79, 0)
        self.s.log_error(Exception("dummy fail"))

    def test_percentile(self):
        s = StatsEntry(self.stats, "percentile_test", "GET")
        for x in xrange(100):
            s.log(x, 0)

        self.assertEqual(s.get_response_time_percentile(0.5), 50)
        self.assertEqual(s.get_response_time_percentile(0.6), 60)
        self.assertEqual(s.get_response_time_percentile(0.95), 95)

    def test_median(self):
        self.assertEqual(self.s.median_response_time, 79)

    def test_total_rps(self):
        self.assertEqual(self.s.total_rps, 7)

    def test_current_rps(self):
        self.stats.total.last_request_timestamp = int(time.time()) + 4
        self.assertEqual(self.s.current_rps, 3.5)

        self.stats.total.last_request_timestamp = int(time.time()) + 25
        self.assertEqual(self.s.current_rps, 0)

    def test_num_reqs_fails(self):
        self.assertEqual(self.s.num_requests, 7)
        self.assertEqual(self.s.num_failures, 3)

    def test_avg(self):
        self.assertEqual(self.s.avg_response_time, 187.71428571428572)

    def test_reset(self):
        self.s.reset()
        self.s.log(756, 0)
        self.s.log_error(Exception("dummy fail after reset"))
        self.s.log(85, 0)

        self.assertEqual(self.s.total_rps, 2)
        self.assertEqual(self.s.num_requests, 2)
        self.assertEqual(self.s.num_failures, 1)
        self.assertEqual(self.s.avg_response_time, 420.5)
        self.assertEqual(self.s.median_response_time, 85)
    
    def test_reset_min_response_time(self):
        self.s.reset()
        self.s.log(756, 0)
        self.assertEqual(756, self.s.min_response_time)

    def test_aggregation(self):
        s1 = StatsEntry(self.stats, "aggregate me!", "GET")
        s1.log(12, 0)
        s1.log(12, 0)
        s1.log(38, 0)
        s1.log_error("Dummy exzeption")

        s2 = StatsEntry(self.stats, "aggregate me!", "GET")
        s2.log_error("Dummy exzeption")
        s2.log_error("Dummy exzeption")
        s2.log(12, 0)
        s2.log(99, 0)
        s2.log(14, 0)
        s2.log(55, 0)
        s2.log(38, 0)
        s2.log(55, 0)
        s2.log(97, 0)

        s = StatsEntry(self.stats, "GET", "")
        s.extend(s1)
        s.extend(s2)

        self.assertEqual(s.num_requests, 10)
        self.assertEqual(s.num_failures, 3)
        self.assertEqual(s.median_response_time, 38)
        self.assertEqual(s.avg_response_time, 43.2)

    def test_aggregation_with_rounding(self):
        s1 = StatsEntry(self.stats, "round me!", "GET")
        s1.log(122, 0)    # (rounded 120) min
        s1.log(992, 0)    # (rounded 990) max
        s1.log(142, 0)    # (rounded 140)
        s1.log(552, 0)    # (rounded 550)
        s1.log(557, 0)    # (rounded 560)
        s1.log(387, 0)    # (rounded 390)
        s1.log(557, 0)    # (rounded 560)
        s1.log(977, 0)    # (rounded 980)

        self.assertEqual(s1.num_requests, 8)
        self.assertEqual(s1.median_response_time, 550)
        self.assertEqual(s1.avg_response_time, 535.75)
        self.assertEqual(s1.min_response_time, 122)
        self.assertEqual(s1.max_response_time, 992)

    def test_percentile_rounded_down(self):
        s1 = StatsEntry(self.stats, "rounding down!", "GET")
        s1.log(122, 0)    # (rounded 120) min
        actual_percentile = s1.percentile()
        self.assertEqual(actual_percentile, " GET rounding down!                                                  1    120    120    120    120    120    120    120    120    120")

    def test_percentile_rounded_up(self):
        s2 = StatsEntry(self.stats, "rounding up!", "GET")
        s2.log(127, 0)    # (rounded 130) min
        actual_percentile = s2.percentile()
        self.assertEqual(actual_percentile, " GET rounding up!                                                    1    130    130    130    130    130    130    130    130    130")
    
    def test_error_grouping(self):
        # reset stats
        self.stats = RequestStats()
        
        self.stats.log_error("GET", "/some-path", Exception("Exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Exception!"))
            
        self.assertEqual(1, len(self.stats.errors))
        self.assertEqual(2, list(self.stats.errors.values())[0].occurences)
        
        self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Third exception!"))
        self.assertEqual(3, len(self.stats.errors))
    
    def test_error_grouping_errors_with_memory_addresses(self):
        # reset stats
        self.stats = RequestStats()
        class Dummy(object):
            pass
        
        self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy()))
        self.assertEqual(1, len(self.stats.errors))
    
    def test_serialize_through_message(self):
        """
        Serialize a RequestStats instance, then serialize it through a Message, 
        and unserialize the whole thing again. This is done "IRL" when stats are sent 
        from slaves to master.
        """
        s1 = StatsEntry(self.stats, "test", "GET")
        s1.log(10, 0)
        s1.log(20, 0)
        s1.log(40, 0)
        u1 = StatsEntry.unserialize(s1.serialize())
        
        data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data
        u1 = StatsEntry.unserialize(data)
        
        self.assertEqual(20, u1.median_response_time)
Example #59
0
class TestRequestStats(unittest.TestCase):
    def setUp(self):
        self.stats = RequestStats()
        self.stats.start_time = time.time()
        self.s = StatsEntry(self.stats, "test_entry", "GET")
        self.s.log(45, 0)
        self.s.log(135, 0)
        self.s.log(44, 0)
        self.s.log_error(Exception("dummy fail"))
        self.s.log_error(Exception("dummy fail"))
        self.s.log(375, 0)
        self.s.log(601, 0)
        self.s.log(35, 0)
        self.s.log(79, 0)
        self.s.log_error(Exception("dummy fail"))

    def test_percentile(self):
        s = StatsEntry(self.stats, "percentile_test", "GET")
        for x in xrange(100):
            s.log(x, 0)

        assert s.get_response_time_percentile(0.5) == 50
        assert s.get_response_time_percentile(0.6) == 60
        assert s.get_response_time_percentile(0.95) == 95

    def test_median(self):
        assert self.s.median_response_time == 79

    def test_total_rps(self):
        assert self.s.total_rps == 7

    def test_current_rps(self):
        self.stats.total.last_request_timestamp = int(time.time()) + 4
        assert self.s.current_rps == 3.5

        self.stats.total.last_request_timestamp = int(time.time()) + 25
        assert self.s.current_rps == 0

    def test_num_reqs_fails(self):
        assert self.s.num_requests == 7
        assert self.s.num_failures == 3

    def test_avg(self):
        assert self.s.avg_response_time == 187.71428571428572

    def test_reset(self):
        self.s.reset()
        self.s.log(756, 0)
        self.s.log_error(Exception("dummy fail after reset"))
        self.s.log(85, 0)

        assert self.s.total_rps == 2
        assert self.s.num_requests == 2
        assert self.s.num_failures == 1
        assert self.s.avg_response_time == 420.5
        assert self.s.median_response_time == 85
    
    def test_reset_min_response_time(self):
        self.s.reset()
        self.s.log(756, 0)
        assert 756 == self.s.min_response_time

    def test_aggregation(self):
        s1 = StatsEntry(self.stats, "aggregate me!", "GET")
        s1.log(12, 0)
        s1.log(12, 0)
        s1.log(38, 0)
        s1.log_error("Dummy exzeption")

        s2 = StatsEntry(self.stats, "aggregate me!", "GET")
        s2.log_error("Dummy exzeption")
        s2.log_error("Dummy exzeption")
        s2.log(12, 0)
        s2.log(99, 0)
        s2.log(14, 0)
        s2.log(55, 0)
        s2.log(38, 0)
        s2.log(55, 0)
        s2.log(97, 0)

        s = StatsEntry(self.stats, "GET", "")
        s.extend(s1)
        s.extend(s2)

        assert s.num_requests == 10
        assert s.num_failures == 3
        assert s.median_response_time == 38
        assert s.avg_response_time == 43.2

    def test_aggregation_with_rounding(self):
        s1 = StatsEntry(self.stats, "round me!", "GET")
        s1.log(122, 0)    # (rounded 120) min
        s1.log(992, 0)    # (rounded 990) max
        s1.log(142, 0)    # (rounded 140)
        s1.log(552, 0)    # (rounded 550)
        s1.log(557, 0)    # (rounded 560)
        s1.log(387, 0)    # (rounded 390)
        s1.log(557, 0)    # (rounded 560)
        s1.log(977, 0)    # (rounded 980)

        assert s1.num_requests == 8
        assert s1.median_response_time == 550
        assert s1.avg_response_time == 535.75
        assert s1.min_response_time == 122
        assert s1.max_response_time == 992

    def test_percentile_rounded_down(self):
        s1 = StatsEntry(self.stats, "rounding down!", "GET")
        s1.log(122, 0)    # (rounded 120) min
        actual_percentile = s1.percentile()
        assert actual_percentile == " GET rounding down!                                                  1    120    120    120    120    120    120    120    120    120"

    def test_percentile_rounded_up(self):
        s2 = StatsEntry(self.stats, "rounding up!", "GET")
        s2.log(127, 0)    # (rounded 130) min
        actual_percentile = s2.percentile()
        assert actual_percentile == " GET rounding up!                                                    1    130    130    130    130    130    130    130    130    130"
    
    def test_error_grouping(self):
        # reset stats
        self.stats = RequestStats()
        
        self.stats.log_error("GET", "/some-path", Exception("Exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Exception!"))
            
        assert 1 == len(self.stats.errors)
        assert 2 == list(self.stats.errors.values())[0].occurences
        
        self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Another exception!"))
        self.stats.log_error("GET", "/some-path", Exception("Third exception!"))
        assert 3 == len(self.stats.errors)
    
    def test_error_grouping_errors_with_memory_addresses(self):
        # reset stats
        self.stats = RequestStats()
        class Dummy(object):
            pass
        
        self.stats.log_error("GET", "/", Exception("Error caused by %r" % Dummy()))
        assert 1 == len(self.stats.errors)
    
    def test_serialize_through_message(self):
        """
        Serialize a RequestStats instance, then serialize it through a Message, 
        and unserialize the whole thing again. This is done "IRL" when stats are sent 
        from slaves to master.
        """
        s1 = StatsEntry(self.stats, "test", "GET")
        s1.log(10, 0)
        s1.log(20, 0)
        s1.log(40, 0)
        u1 = StatsEntry.unserialize(s1.serialize())
        
        data = Message.unserialize(Message("dummy", s1.serialize(), "none").serialize()).data
        u1 = StatsEntry.unserialize(data)
        
        assert 20 == u1.median_response_time