예제 #1
0
파일: web.py 프로젝트: whitmo/locust
def request_stats():
    global _request_stats_context_cache

    if not _request_stats_context_cache or _request_stats_context_cache[
        "last_time"
    ] < time() - _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME):
        cache_time = _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME)
        now = time()

        stats = []
        for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total")]):
            stats.append(
                {
                    "method": s.method,
                    "name": s.name,
                    "num_reqs": s.num_reqs,
                    "num_failures": s.num_failures,
                    "avg_response_time": s.avg_response_time,
                    "min_response_time": s.min_response_time,
                    "max_response_time": s.max_response_time,
                    "current_rps": s.current_rps,
                    "median_response_time": s.median_response_time,
                    "avg_content_length": s.avg_content_length,
                }
            )

        report = {"stats": stats, "errors": list(runners.locust_runner.errors.iteritems())}
        if stats:
            report["total_rps"] = stats[len(stats) - 1]["current_rps"]
            report["fail_ratio"] = RequestStats.sum_stats("Total").fail_ratio

            # since generating a total response times dict with all response times from all
            # urls is slow, we make a new total response time dict which will consist of one
            # entry per url with the median response time as key and the number of requests as
            # value
            response_times = defaultdict(int)  # used for calculating total median
            for i in xrange(len(stats) - 1):
                response_times[stats[i]["median_response_time"]] += stats[i]["num_reqs"]

            # calculate total median
            stats[len(stats) - 1]["median_response_time"] = median_from_dict(
                stats[len(stats) - 1]["num_reqs"], response_times
            )

        is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
        if is_distributed:
            report["slave_count"] = runners.locust_runner.slave_count

        report["state"] = runners.locust_runner.state
        report["user_count"] = runners.locust_runner.user_count

        elapsed = time() - now
        cache_time = max(
            cache_time, elapsed * 2.0
        )  # Increase cache_time when report generating starts to take longer time
        _request_stats_context_cache = {"last_time": elapsed - now, "report": report, "cache_time": cache_time}
    else:
        report = _request_stats_context_cache["report"]
    return json.dumps(report)
예제 #2
0
파일: web.py 프로젝트: getodacu/locust
def request_stats():
    global _request_stats_context_cache
    
    if not _request_stats_context_cache or _request_stats_context_cache["last_time"] < time() - _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME):
        cache_time = _request_stats_context_cache.get("cache_time", DEFAULT_CACHE_TIME)
        now = time()
        
        stats = []
        for s in chain(_sort_stats(runners.locust_runner.request_stats), [RequestStats.sum_stats("Total")]):
            stats.append({
                "method": s.method,
                "name": s.name,
                "num_reqs": s.num_reqs,
                "num_failures": s.num_failures,
                "avg_response_time": s.avg_response_time,
                "min_response_time": s.min_response_time,
                "max_response_time": s.max_response_time,
                "current_rps": s.current_rps,
                "median_response_time": s.median_response_time,
                "avg_content_length": s.avg_content_length,
            })
        
        report = {"stats":stats, "errors":list(runners.locust_runner.errors.iteritems())}
        if stats:
            report["total_rps"] = stats[len(stats)-1]["current_rps"]
            report["fail_ratio"] = RequestStats.sum_stats("Total").fail_ratio
            
            # since generating a total response times dict with all response times from all
            # urls is slow, we make a new total response time dict which will consist of one
            # entry per url with the median response time as key and the number of requests as
            # value
            response_times = defaultdict(int) # used for calculating total median
            for i in xrange(len(stats)-1):
                response_times[stats[i]["median_response_time"]] += stats[i]["num_reqs"]
            
            # calculate total median
            stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_reqs"], response_times)
        
        is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
        if is_distributed:
            report["slave_count"] = runners.locust_runner.slave_count
        
        report["state"] = runners.locust_runner.state
        report["user_count"] = runners.locust_runner.user_count

        elapsed = time() - now
        cache_time = max(cache_time, elapsed * 2.0) # Increase cache_time when report generating starts to take longer time
        _request_stats_context_cache = {"last_time": elapsed - now, "report": report, "cache_time": cache_time}
    else:
        report = _request_stats_context_cache["report"]
    return json.dumps(report)
예제 #3
0
def request_stats():
    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats),
                   [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            "method": s.method,
            "name": s.name,
            "num_requests": s.num_requests,
            "num_failures": s.num_failures,
            "avg_response_time": s.avg_response_time,
            "min_response_time": s.min_response_time or 0,
            "max_response_time": s.max_response_time,
            "current_rps": s.current_rps,
            "median_response_time": s.median_response_time,
            "avg_content_length": s.avg_content_length,
        })

    errors = [
        e.to_dict() for e in six.itervalues(runners.locust_runner.errors)
    ]

    # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
    # to render extremely slowly. Aggregate stats should be preserved.
    report = {"stats": stats[:500], "errors": errors[:500]}

    if stats:
        report["total_rps"] = stats[len(stats) - 1]["current_rps"]
        report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats(
            "Total").fail_ratio

        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int)  # used for calculating total median
        for i in xrange(len(stats) - 1):
            response_times[
                stats[i]["median_response_time"]] += stats[i]["num_requests"]

        # calculate total median
        stats[len(stats) - 1]["median_response_time"] = median_from_dict(
            stats[len(stats) - 1]["num_requests"], response_times)

    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report["slave_count"] = runners.locust_runner.slave_count

    report["state"] = runners.locust_runner.state
    report["user_count"] = runners.locust_runner.user_count
    return json.dumps(report)
예제 #4
0
파일: web.py 프로젝트: kaeawc/locust
def request_stats():
    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats),
                   [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            'method': s.method,
            'name': s.name,
            'num_requests': s.num_requests,
            'num_failures': s.num_failures,
            'avg_response_time': s.avg_response_time,
            'min_response_time': s.min_response_time or 0,
            'max_response_time': s.max_response_time,
            'current_rps': s.current_rps,
            'median_response_time': s.median_response_time,
            'avg_content_length': s.avg_content_length,
        })

    report = {
        'stats':
        stats,
        'errors':
        [e.to_dict() for e in six.itervalues(runners.locust_runner.errors)]
    }
    if stats:
        report['total_rps'] = stats[len(stats) - 1]['current_rps']
        report['fail_ratio'] = runners.locust_runner.stats.aggregated_stats(
            'Total').fail_ratio

        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int)  # used for calculating total median
        for i in xrange(len(stats) - 1):
            response_times[
                stats[i]['median_response_time']] += stats[i]['num_requests']

        # calculate total median
        stats[len(stats) - 1]['median_response_time'] = median_from_dict(
            stats[len(stats) - 1]['num_requests'], response_times)

    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report['slave_count'] = runners.locust_runner.slave_count

    report['state'] = runners.locust_runner.state
    report['user_count'] = runners.locust_runner.user_count
    return json.dumps(report)
예제 #5
0
    def request_stats_dict(self):
        stats = []
        for s in chain(self._sort_stats(self.request_stats),
                       [self.stats.aggregated_stats("Total")]):
            stats.append({
                "method": s.method,
                "name": s.name,
                "num_requests": s.num_requests,
                "num_failures": s.num_failures,
                "avg_response_time": s.avg_response_time,
                "min_response_time": s.min_response_time or 0,
                "max_response_time": s.max_response_time,
                "current_rps": s.current_rps,
                "median_response_time": s.median_response_time,
                "avg_content_length": s.avg_content_length,
            })

        report = {
            "stats": stats,
            "errors": [e.to_dict() for e in six.itervalues(self.errors)]
        }
        if stats:
            report["total_rps"] = stats[len(stats) - 1]["current_rps"]
            report["fail_ratio"] = self.stats.aggregated_stats(
                "Total").fail_ratio

            # since generating a total response times dict with all response times from all
            # urls is slow, we make a new total response time dict which will consist of one
            # entry per url with the median response time as key and the number of requests as
            # value
            response_times = defaultdict(
                int)  # used for calculating total median
            for i in xrange(len(stats) - 1):
                response_times[stats[i]["median_response_time"]] += stats[i][
                    "num_requests"]

            # calculate total median
            stats[len(stats) - 1]["median_response_time"] = median_from_dict(
                stats[len(stats) - 1]["num_requests"], response_times)

        is_distributed = isinstance(self, MasterLocustRunner)
        if is_distributed:
            report["slave_count"] = self.slave_count

        report["state"] = self.state
        report["user_count"] = self.user_count
        return report
예제 #6
0
def request_stats():

    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            "method": s.method,
            "name": s.name,
            "num_requests": s.num_requests,
            "num_failures": s.num_failures,
            "avg_response_time": s.avg_response_time,
            "min_response_time": s.min_response_time or 0,
            "max_response_time": s.max_response_time,
            "current_rps": s.current_rps,
            "median_response_time": s.median_response_time,
            "avg_content_length": s.avg_content_length,
        })
    
    report = {"stats":stats, "errors":[e.to_dict() for e in runners.locust_runner.errors.itervalues()]}
    if stats:
        report["total_rps"] = stats[len(stats)-1]["current_rps"]
        report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats("Total").fail_ratio
        
        if runners.locust_runner.state != ("stopped" or "ready"):
        # update run time
            runners.locust_runner.stats.total_run_time()
        report["total_run_time"] = runners.locust_runner.stats.run_time
        report["start_run_time"] = runners.locust_runner.stats.start_run_time
        report["end_run_time"] = runners.locust_runner.stats.end_run_time
        
        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int) # used for calculating total median
        for i in xrange(len(stats)-1):
            response_times[stats[i]["median_response_time"]] += stats[i]["num_requests"]
        
        # calculate total median
        stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_requests"], response_times)
    
    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report["slave_count"] = runners.locust_runner.slave_count
    
    report["state"] = runners.locust_runner.state
    report["user_count"] = runners.locust_runner.user_count
    return json.dumps(report)
예제 #7
0
파일: web.py 프로젝트: cgoldberg/locust
def request_stats():
    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            "method": s.method,
            "name": s.name,
            "num_requests": s.num_requests,
            "num_failures": s.num_failures,
            "avg_response_time": s.avg_response_time,
            "min_response_time": s.min_response_time or 0,
            "max_response_time": s.max_response_time,
            "current_rps": s.current_rps,
            "median_response_time": s.median_response_time,
            "avg_content_length": s.avg_content_length,
        })

    errors = [e.to_dict() for e in six.itervalues(runners.locust_runner.errors)]

    # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
    # to render extremely slowly. Aggregate stats should be preserved.
    report = {"stats": stats[:500], "errors": errors[:500]}

    if stats:
        report["total_rps"] = stats[len(stats)-1]["current_rps"]
        report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats("Total").fail_ratio
        
        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int) # used for calculating total median
        for i in xrange(len(stats)-1):
            response_times[stats[i]["median_response_time"]] += stats[i]["num_requests"]
        
        # calculate total median
        stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_requests"], response_times)
    
    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report["slave_count"] = runners.locust_runner.slave_count
    
    report["state"] = runners.locust_runner.state
    report["user_count"] = runners.locust_runner.user_count
    return json.dumps(report)
예제 #8
0
파일: web.py 프로젝트: kaeawc/locust
def request_stats():
    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            'method': s.method,
            'name': s.name,
            'num_requests': s.num_requests,
            'num_failures': s.num_failures,
            'avg_response_time': s.avg_response_time,
            'min_response_time': s.min_response_time or 0,
            'max_response_time': s.max_response_time,
            'current_rps': s.current_rps,
            'median_response_time': s.median_response_time,
            'avg_content_length': s.avg_content_length,
        })
    
    report = {
        'stats': stats,
        'errors': [e.to_dict() for e in six.itervalues(runners.locust_runner.errors)]
    }
    if stats:
        report['total_rps'] = stats[len(stats)-1]['current_rps']
        report['fail_ratio'] = runners.locust_runner.stats.aggregated_stats('Total').fail_ratio
        
        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int) # used for calculating total median
        for i in xrange(len(stats)-1):
            response_times[stats[i]['median_response_time']] += stats[i]['num_requests']
        
        # calculate total median
        stats[len(stats)-1]['median_response_time'] = median_from_dict(stats[len(stats)-1]['num_requests'], response_times)
    
    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report['slave_count'] = runners.locust_runner.slave_count
    
    report['state'] = runners.locust_runner.state
    report['user_count'] = runners.locust_runner.user_count
    return json.dumps(report)
예제 #9
0
파일: web.py 프로젝트: obtkhadija/dossier
def generate_report(runner):
    stats = get_stats(runner)
    report = {
        "stats": stats,
        "errors": [e.to_dict() for e in runner.errors.itervalues()]
    }
    if stats:
        report["total_rps"] = stats[len(stats) - 1]["current_rps"]
        report["fail_ratio"] = runner.stats.aggregated_stats(
            "Total").fail_ratio

        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int)  # used for calculating total median
        for i in xrange(len(stats) - 1):
            response_times[
                stats[i]["median_response_time"]] += stats[i]["num_requests"]

        # calculate total median
        stats[len(stats) - 1]["median_response_time"] = median_from_dict(
            stats[len(stats) - 1]["num_requests"], response_times)

    is_distributed = isinstance(runner, MasterLocustRunner)
    if is_distributed:
        report["slave_count"] = runner.slave_count

    num_requests = get_total_stats(stats)["num_requests"]
    #logger.info("Checking if num_requests above max_requests {} => {}".format(num_requests, runner.get_num_requests()))
    if runner.get_num_requests() is not None\
            and runner.state == STATE_RUNNING \
            and num_requests >= runner.get_num_requests():
        logger.info("Stopping tests")
        stop(runners.locust_runner)

    report["state"] = runner.state
    report["user_count"] = runner.user_count
    return report
예제 #10
0
def request_stats():
    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats),
                   [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            "method": s.method,
            "name": s.name,
            "num_requests": s.num_requests,
            "num_failures": s.num_failures,
            "avg_response_time": s.avg_response_time,
            "min_response_time": s.min_response_time or 0,
            "max_response_time": s.max_response_time,
            "current_rps": s.current_rps,
            "median_response_time": s.median_response_time,
            "avg_content_length": s.avg_content_length,
        })

    errors = [
        e.to_dict() for e in six.itervalues(runners.locust_runner.errors)
    ]

    # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
    # to render extremely slowly. Aggregate stats should be preserved.
    report = {"stats": stats[:500], "errors": errors[:500]}

    if stats:
        report["total_rps"] = stats[len(stats) - 1]["current_rps"]
        report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats(
            "Total").fail_ratio

        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int)  # used for calculating total median
        for i in xrange(len(stats) - 1):
            response_times[
                stats[i]["median_response_time"]] += stats[i]["num_requests"]

        # calculate total median
        stats[len(stats) - 1]["median_response_time"] = median_from_dict(
            stats[len(stats) - 1]["num_requests"], response_times)

    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report["slave_count"] = runners.locust_runner.slave_count

    # add all entries brief information(2 seconds ago)
    now_time = int(time())
    entry_infos = {}
    for stat_entry in runners.locust_runner.stats.entries.itervalues():
        assert isinstance(stat_entry, StatsEntry)
        entry_name = stat_entry.name
        entry_meth = stat_entry.method or 'Unknown'
        if entry_meth not in entry_infos:
            entry_infos[entry_meth] = {}

        entry_infos[entry_meth][entry_name] = dict()
        for collect_time in xrange(now_time - 2, now_time):
            entry_infos[entry_meth][entry_name][collect_time] = \
                {'rps': stat_entry.num_reqs_per_sec.get(collect_time, 0),
                 'total_content_length': stat_entry.total_content_length_per_sec.get(collect_time, 0),
                 'total_response_time': stat_entry.total_response_time_per_sec.get(collect_time, 0),
                 }

    report['entry_infos'] = entry_infos

    report["state"] = runners.locust_runner.state
    report["user_count"] = runners.locust_runner.user_count
    return json.dumps(report)
예제 #11
0
def request_stats(g_state=[''], last_user_count=[None], count_list=[None]):
    stats = []
    for s in chain(_sort_stats(runners.locust_runner.request_stats),
                   [runners.locust_runner.stats.aggregated_stats("Total")]):
        stats.append({
            "method": s.method,
            "name": s.name,
            "num_requests": s.num_requests,
            "num_failures": s.num_failures,
            "avg_response_time": s.avg_response_time,
            "min_response_time": s.min_response_time or 0,
            "max_response_time": s.max_response_time,
            "current_rps": s.current_rps,
            "median_response_time": s.median_response_time,
            "avg_content_length": s.avg_content_length,
        })

    errors = [
        e.to_dict() for e in six.itervalues(runners.locust_runner.errors)
    ]

    # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
    # to render extremely slowly. Aggregate stats should be preserved.
    report = {"stats": stats[:500], "errors": errors[:500]}

    if stats:
        report["total_rps"] = stats[len(stats) - 1]["current_rps"]
        report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats(
            "Total").fail_ratio

        # since generating a total response times dict with all response times from all
        # urls is slow, we make a new total response time dict which will consist of one
        # entry per url with the median response time as key and the number of requests as
        # value
        response_times = defaultdict(int)  # used for calculating total median
        for i in xrange(len(stats) - 1):
            response_times[
                stats[i]["median_response_time"]] += stats[i]["num_requests"]

        # calculate total median
        stats[len(stats) - 1]["median_response_time"] = median_from_dict(
            stats[len(stats) - 1]["num_requests"], response_times)

    is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
    if is_distributed:
        report["slave_count"] = runners.locust_runner.slave_count

    report["state"] = runners.locust_runner.state
    report["user_count"] = runners.locust_runner.user_count
    # start write test data to influxdb and mysqldb.
    task_id = os.environ.get('TASK_ID')
    # user_id = os.environ.get('USER_ID')
    # area_id = os.environ.get('AREA_ID')
    client = connect.connect_influx()
    write_time = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
    if report["state"] == "running" or report["state"] == "hatching" or (
            report["state"] == "stopped" and report['state'] != g_state[0]):
        for i in report['stats']:
            json_body = [{
                "measurement": "stats_1",
                "tags": {
                    "name": "%s" % i['name'],
                    "method": "%s" % i['method'],
                    "report_id": "%s" % report_id,
                    "task_id": "%s" % task_id
                },
                "time": "%s" % write_time,
                "fields": {
                    "median_response_time": "%s" % i['median_response_time'],
                    "min_response_time": "%s" % i['min_response_time'],
                    "current_rps": "%s" % i['current_rps'],
                    "num_failures": "%d" % i['num_failures'],
                    "max_response_time": "%s" % i['max_response_time'],
                    "avg_content_length": "%d" % i['avg_content_length'],
                    "avg_response_time": "%s" % i['avg_response_time'],
                    "num_requests": "%d" % i['num_requests']
                }
            }]
            client.write_points(json_body)

        json_body_stated = [{
            "measurement": "stated_1",
            "tags": {
                "report_id": "%s" % report_id,
                "task_id": "%s" % task_id
            },
            "time": "%s" % write_time,
            "fields": {
                "state":
                "%s" % report['state'],
                "total_rps":
                "%s" % report['total_rps'],
                "fail_ratio":
                "%s" % report['fail_ratio'],
                "user_count":
                "%s" %
                (report['user_count'] if
                 (report["state"] == "running"
                  or report["state"] == "hatching") else last_user_count[0])
            }
        }]

        client.write_points(json_body_stated)
    if report['state'] == 'stopped' and report['state'] != g_state[0]:
        B, C, D = round(
            report['total_rps'],
            2), round(report['fail_ratio'], 4) * 100, max(count_list)
        conn = connect.connect_mysql()
        try:
            with conn.cursor() as cursor:
                sql_report = 'UPDATE dtp_report set total_rps=%s,total_fail_ratio=%s,simulate_users=%s WHERE task_id=%s and id=%s'
                sql_result = 'INSERT INTO dtp_result(url,total_average_rt,total_requests,total_failed,report_id) values(%s,%s,%s,%s,%s)'
                cursor.execute(sql_report, (B, C, D, task_id, report_id))

                for i in report['stats']:
                    cursor.execute(
                        sql_result,
                        (i['name'], i['avg_response_time'], i['num_requests'],
                         i['num_failures'], report_id))
            conn.commit()
        finally:
            conn.close()

    g_state[0] = report["state"]
    last_user_count[0] = report["user_count"]
    count_list.append(int(report["user_count"]))
    if len(count_list) > 10:
        count_list.pop(0)
    return json.dumps(report)