Ejemplo n.º 1
0
        def stats_report():
            stats = self.environment.runner.stats
            if not stats or not stats.start_time or not stats.last_request_timestamp or not stats.entries:
                return  render_template(
                "report.html")

            start_ts = stats.start_time
            start_time = datetime.datetime.fromtimestamp(start_ts)
            start_time = start_time.strftime("%Y-%m-%d %H:%M:%S")

            end_ts = stats.last_request_timestamp
            end_time = datetime.datetime.fromtimestamp(end_ts)
            end_time = end_time.strftime("%Y-%m-%d %H:%M:%S")

            host = None
            if environment.host:
                host = environment.host
            elif environment.runner.user_classes:
                all_hosts = set([l.host for l in environment.runner.user_classes])
                if len(all_hosts) == 1:
                    host = list(all_hosts)[0]

            requests_statistics = list(chain(sort_stats(stats.entries),[stats.total]))
            failures_statistics = sort_stats(stats.errors)
            exceptions_statistics = []
            for exc in environment.runner.exceptions.values():
                exc["nodes"] = ", ".join(exc["nodes"])
                exceptions_statistics.append(exc)

            history = stats.history

            static_js = ""
            js_files = ["jquery-1.11.3.min.js","echarts.common.min.js","vintage.js","chart.js"]
            for js_file in js_files:
                path = os.path.join(os.path.dirname(__file__),"static",js_file)
                with open(path,encoding="utf8") as f:
                    content = f.read()
                static_js += "// " + js_file + "\n"
                static_js += content
                static_js += "\n\n\n"

            res = render_template(
                "report.html",
                int=int,
                round=round,
                requests_statistics=requests_statistics,
                failures_statistics=failures_statistics,
                exceptions_statistics=exceptions_statistics,
                start_time=start_time,
                end_time=end_time,
                host=host,
                history=history,
                static_js=static_js,
            )
            if request.args.get("download"):
                res = app.make_response(res)
                res.headers["Content-Disposition"] = "attachment;filename=report_%s.html" % time()
            return res
Ejemplo n.º 2
0
    def collect(self):
        # locust_runner is not None, it indicates that test started.
        if runners.locust_runner:

            stats = []

            for s in chain(locust_stats.sort_stats(runners.locust_runner.request_stats),
                           [runners.locust_runner.stats.total]):
                stats.append({
                    "method": s.method,
                    "name": s.name,
                    "num_requests": s.num_requests,
                    "num_failures": s.num_failures,
                    "avg_response_time": s.avg_response_time,
                    "min_response_time": s.min_response_time or 0,
                    "max_response_time": s.max_response_time,
                    "current_rps": s.current_rps,
                    "median_response_time": s.median_response_time,
                    "avg_content_length": s.avg_content_length,
                })

            errors = [e.to_dict() for e in six.itervalues(runners.locust_runner.errors)]

            metric = Metric('locust_user_count', 'Swarmed users', 'gauge')
            metric.add_sample('locust_user_count', value=runners.locust_runner.user_count, labels={})
            yield metric

            metric = Metric('locust_errors', 'Locust requests errors', 'gauge')
            for err in errors:
                metric.add_sample('locust_errors', value=err['occurences'],
                                  labels={'path': err['name'], 'method': err['method']})
            yield metric

            is_distributed = isinstance(runners.locust_runner, runners.MasterLocustRunner)
            if is_distributed:
                metric = Metric('locust_slave_count', 'Locust number of slaves', 'gauge')
                metric.add_sample('locust_slave_count', value=len(runners.locust_runner.clients.values()), labels={})
                yield metric

            metric = Metric('locust_fail_ratio', 'Locust failure ratio', 'gauge')
            metric.add_sample('locust_fail_ratio', value=runners.locust_runner.stats.total.fail_ratio, labels={})
            yield metric

            metric = Metric('locust_state', 'State of the locust swarm', 'gauge')
            metric.add_sample('locust_state', value=1, labels={'state': runners.locust_runner.state})
            yield metric

            stats_metrics = ['avg_content_length', 'avg_response_time', 'current_rps', 'max_response_time',
                             'median_response_time', 'min_response_time', 'num_failures', 'num_requests']

            for mtr in stats_metrics:
                mtype = 'gauge'
                if mtr in ['num_requests', 'num_failures']:
                    mtype = 'counter'
                metric = Metric('locust_requests_' + mtr, 'Locust requests ' + mtr, mtype)
                for stat in stats:
                    if 'Total' not in stat['name']:
                        metric.add_sample('locust_requests_' + mtr, value=stat[mtr],
                                          labels={'path': stat['name'], 'method': stat['method']})
                yield metric
Ejemplo n.º 3
0
    def _get_stats_entries_list(self) -> List[Dict[str, any]]:
        """Returns a list of dictionaries representing the performance statistics of _all_ Locust tasks that ran

        Returns:
            List[Dict[str, any]]: A List of Dicts that represent the performance statistics of all Locust tasks
        """
        stats = self.locust_env.stats
        return [
            self._get_stats_entry_dict(stats_entry)
            for stats_entry in sort_stats(stats.entries)
        ]
Ejemplo n.º 4
0
        def request_stats():
            stats = []

            for s in chain(sort_stats(self.environment.runner.stats.entries), [environment.runner.stats.total]):
                stats.append({
                    "method": s.method,
                    "name": s.name,
                    "safe_name": escape(s.name, quote=False),
                    "num_requests": s.num_requests,
                    "num_failures": s.num_failures,
                    "avg_response_time": s.avg_response_time,
                    "min_response_time": 0 if s.min_response_time is None else proper_round(s.min_response_time),
                    "max_response_time": proper_round(s.max_response_time),
                    "current_rps": s.current_rps,
                    "current_fail_per_sec": s.current_fail_per_sec,
                    "median_response_time": s.median_response_time,
                    "ninetieth_response_time": s.get_response_time_percentile(0.9),
                    "avg_content_length": s.avg_content_length,
                })

            errors = []
            for e in environment.runner.errors.values():
                err_dict = e.to_dict()
                err_dict["name"] = escape(err_dict["name"])
                err_dict["error"] = escape(err_dict["error"])
                errors.append(err_dict)

            # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
            # to render extremely slowly. Aggregate stats should be preserved.
            report = {"stats": stats[:500], "errors": errors[:500]}
            if len(stats) > 500:
                report["stats"] += [stats[-1]]

            if stats:
                report["total_rps"] = stats[len(stats) - 1]["current_rps"]
                report["fail_ratio"] = environment.runner.stats.total.fail_ratio
                report["current_response_time_percentile_95"] = environment.runner.stats.total.get_current_response_time_percentile(0.95)
                report["current_response_time_percentile_50"] = environment.runner.stats.total.get_current_response_time_percentile(0.5)
                report["current_response_time_percentile_90"] = environment.runner.stats.total.get_current_response_time_percentile(0.9)

            is_distributed = isinstance(environment.runner, MasterRunner)
            if is_distributed:
                workers = []
                for worker in environment.runner.clients.values():
                    workers.append({"id": worker.id, "state": worker.state, "user_count": worker.user_count, "cpu_usage": worker.cpu_usage})

                report["workers"] = workers

            report["state"] = environment.runner.state
            report["user_count"] = environment.runner.user_count

            return jsonify(report)
Ejemplo n.º 5
0
    def collect(self):
        # collect metrics only when locust runner is hatching or running.
        runner = self.runner

        if runner and runner.state in (locust_runners.STATE_HATCHING, locust_runners.STATE_RUNNING):
            stats = []
            for s in chain(locust_stats.sort_stats(runner.stats.entries), [runner.stats.total]):
                stats.append({
                    "method": s.method,
                    "name": s.name,
                    "num_requests": s.num_requests,
                    "num_failures": s.num_failures,
                    "avg_response_time": s.avg_response_time,
                    "min_response_time": s.min_response_time or 0,
                    "max_response_time": s.max_response_time,
                    "current_rps": s.current_rps,
                    "median_response_time": s.median_response_time,
                    "ninetieth_response_time": s.get_response_time_percentile(0.9),
                    # only total stats can use current_response_time, so sad.
                    #"current_response_time_percentile_95": s.get_current_response_time_percentile(0.95),
                    "avg_content_length": s.avg_content_length,
                    "current_fail_per_sec": s.current_fail_per_sec
                })

            # perhaps StatsError.parse_error in e.to_dict only works in python slave, take notices!
            errors = [e.to_dict() for e in six.itervalues(runner.stats.errors)]

            metric = Metric('locust_user_count', 'Swarmed users', 'gauge')
            metric.add_sample('locust_user_count', value=runner.user_count, labels={})
            yield metric
            
            metric = Metric('locust_errors', 'Locust requests errors', 'gauge')
            for err in errors:
                metric.add_sample('locust_errors', value=err['occurrences'],
                                  labels={'path': err['name'], 'method': err['method'],
                                          'error': err['error']})
            yield metric

            is_distributed = isinstance(runner, locust_runners.MasterRunner)
            if is_distributed:
                metric = Metric('locust_slave_count', 'Locust number of slaves', 'gauge')
                metric.add_sample('locust_slave_count', value=len(runner.clients.values()), labels={})
                yield metric

            metric = Metric('locust_fail_ratio', 'Locust failure ratio', 'gauge')
            metric.add_sample('locust_fail_ratio', value=runner.stats.total.fail_ratio, labels={})
            yield metric

            metric = Metric('locust_state', 'State of the locust swarm', 'gauge')
            metric.add_sample('locust_state', value=1, labels={'state': runner.state})
            yield metric

            stats_metrics = ['avg_content_length', 'avg_response_time', 'current_rps', 'current_fail_per_sec',
                             'max_response_time', 'ninetieth_response_time', 'median_response_time', 'min_response_time',
                             'num_failures', 'num_requests']

            for mtr in stats_metrics:
                mtype = 'gauge'
                if mtr in ['num_requests', 'num_failures']:
                    mtype = 'counter'
                metric = Metric('locust_stats_' + mtr, 'Locust stats ' + mtr, mtype)
                for stat in stats:
                    # Aggregated stat's method label is None, so name it as Aggregated
                    # locust has changed name Total to Aggregated since 0.12.1
                    if 'Aggregated' != stat['name']:
                        metric.add_sample('locust_stats_' + mtr, value=stat[mtr],
                                          labels={'path': stat['name'], 'method': stat['method']})
                    else:
                        metric.add_sample('locust_stats_' + mtr, value=stat[mtr],
                                          labels={'path': stat['name'], 'method': 'Aggregated'})
                yield metric
    def collect(self):
        # locust_runner is not None, it indicates that test started.
        if runners.locust_runner:

            stats = []

            for s in locust_stats.sort_stats(runners.locust_runner.request_stats):
                stats.append(
                    {
                        "method": s.method,
                        "name": s.name,
                        "num_requests": s.num_requests,
                        "num_failures": s.num_failures,
                        "avg_response_time": s.avg_response_time,
                        "min_response_time": s.min_response_time or 0,
                        "max_response_time": s.max_response_time,
                        "current_rps": s.current_rps,
                        "median_response_time": s.median_response_time,
                        "avg_content_length": s.avg_content_length,
                    }
                )

            metric = Metric("locust_user_count", "Swarmed users", "gauge")
            metric.add_sample(
                "locust_user_count", value=runners.locust_runner.user_count, labels={}
            )

            yield metric

            errors = [e.to_dict() for e in runners.locust_runner.errors.values()]

            metric = Metric("locust_errors", "Locust requests errors", "gauge")
            for err in errors:
                metric.add_sample(
                    "locust_errors",
                    value=err["occurrences"],
                    labels={"path": err["name"], "method": err["method"]},
                )
            yield metric

            is_distributed = isinstance(
                runners.locust_runner, runners.MasterLocustRunner
            )
            if is_distributed:
                metric = Metric(
                    "locust_slave_count", "Locust number of slaves", "gauge"
                )
                metric.add_sample(
                    "locust_slave_count",
                    value=len(runners.locust_runner.clients.values()),
                    labels={},
                )
                yield metric

            metric = Metric("locust_fail_ratio", "Locust failure ratio", "gauge")
            metric.add_sample(
                "locust_fail_ratio",
                value=runners.locust_runner.stats.total.fail_ratio,
                labels={},
            )
            yield metric

            metric = Metric("locust_state", "State of the locust swarm", "gauge")
            metric.add_sample(
                "locust_state", value=1, labels={"state": runners.locust_runner.state}
            )
            yield metric

            stats_metrics = [
                "avg_content_length",
                "avg_response_time",
                "current_rps",
                "max_response_time",
                "median_response_time",
                "min_response_time",
                "num_failures",
                "num_requests",
            ]

            for mtr in stats_metrics:
                mtype = "gauge"
                if mtr in ["num_requests", "num_failures"]:
                    mtype = "counter"
                metric = Metric(
                    "locust_requests_" + mtr, "Locust requests " + mtr, mtype
                )
                for stat in stats:
                    if "Total" not in stat["name"]:
                        metric.add_sample(
                            "locust_requests_" + mtr,
                            value=stat[mtr],
                            labels={"path": stat["name"], "method": stat["method"]},
                        )
                yield metric
Ejemplo n.º 7
0
def request_stats(test_id):
    if locust_runner_id == test_id:
        stats = []

        for s in chain(sort_stats(runners.locust_runner.request_stats),
                       [runners.locust_runner.stats.total]):
            stats.append({
                "method":
                s.method,
                "name":
                s.name,
                "num_requests":
                s.num_requests,
                "num_failures":
                s.num_failures,
                "avg_response_time":
                s.avg_response_time,
                "min_response_time":
                round(s.min_response_time) if s.min_response_time else 0,
                "max_response_time":
                round(s.max_response_time),
                "current_rps":
                s.current_rps,
                "median_response_time":
                s.median_response_time,
                "avg_content_length":
                s.avg_content_length,
            })

        errors = [
            e.to_dict() for e in six.itervalues(runners.locust_runner.errors)
        ]

        # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
        # to render extremely slowly. Aggregate stats should be preserved.
        report = {"stats": stats[:500], "errors": errors[:500]}

        if stats:
            report["total_rps"] = stats[len(stats) - 1]["current_rps"]
            report["fail_ratio"] = runners.locust_runner.stats.total.fail_ratio
            report[
                "current_response_time_percentile_95"] = runners.locust_runner.stats.total.get_current_response_time_percentile(
                    0.95)
            report[
                "current_response_time_percentile_50"] = runners.locust_runner.stats.total.get_current_response_time_percentile(
                    0.5)

        is_distributed = isinstance(runners.locust_runner, MasterLocustRunner)
        if is_distributed:
            slaves = []
            for slave in runners.locust_runner.clients.values():
                slaves.append({
                    "id": slave.id,
                    "state": slave.state,
                    "user_count": slave.user_count
                })

            report["slaves"] = slaves

        report["state"] = runners.locust_runner.state
        report["user_count"] = runners.locust_runner.user_count
    else:
        report = json.loads(
            get_data_from_django(DJANGO_GET_STATISTICS_DATA_URL % test_id))

    return jsonify(report)
Ejemplo n.º 8
0
        def request_stats():
            stats = []

            for s in chain(sort_stats(self.environment.runner.stats.entries),[environment.runner.stats.total]):
                stats.append(
                    {
                        "method":                  s.method,
                        "name":                    s.name,
                        "safe_name":               escape(s.name,quote=False),
                        "num_requests":            s.num_requests,
                        "num_failures":            s.num_failures,
                        "avg_response_time":       s.avg_response_time,
                        "min_response_time":       0 if s.min_response_time is None else proper_round(s.min_response_time),
                        "max_response_time":       proper_round(s.max_response_time),
                        "current_rps":             s.current_rps,
                        "current_fail_per_sec":    s.current_fail_per_sec,
                        "median_response_time":    s.median_response_time,
                        "ninetieth_response_time": s.get_response_time_percentile(0.9),
                        "avg_content_length":      s.avg_content_length,
                    }
                )

            errors = []
            for e in environment.runner.errors.values():
                err_dict = e.to_dict()
                err_dict["name"] = escape(err_dict["name"])
                err_dict["error"] = escape(err_dict["error"])
                errors.append(err_dict)

            # Truncate the total number of stats and errors displayed since a large number of rows will cause the app
            # to render extremely slowly. Aggregate stats should be preserved.
            report = {"stats": stats[:500],"errors": errors[:500]}
            if len(stats) > 500:
                report["stats"] += [stats[-1]]

            if stats:
                report["total_rps"] = stats[len(stats) - 1]["current_rps"]
                report["fail_ratio"] = environment.runner.stats.total.fail_ratio
                report[
                    "current_response_time_percentile_95"] = environment.runner.stats.total.get_current_response_time_percentile(
                    0.95)
                report[
                    "current_response_time_percentile_50"] = environment.runner.stats.total.get_current_response_time_percentile(
                    0.5)

            is_distributed = isinstance(environment.runner,MasterRunner)
            if is_distributed:
                workers = []
                missingClientIds=[]
                for key,worker in environment.runner.clients.items():
                    if worker.state==runners.STATE_MISSING:
                        missingClientIds.append(key)
                        continue
                    workers.append({
                        "id":        worker.id,
                        "state":     worker.state,
                        "user_count": worker.user_count,
                        "cpu_usage": worker.cpu_usage
                    })
                # 移除missing的worker
                for missingClientId in missingClientIds:
                    del environment.runner.clients[missingClientId]
                report["workers"] = workers
                report["slaves"] = [{
                    "slave":x,
                    "clientId": (lambda t:"-" if not t or not t in [w.get("id") for w in workers] else t)(self.workedServser.get(x)),
                    "rectMsg": (lambda t: "" if not t else t)(self.recvMesg.get(x))
                } for x in self.etcdt.servAddressList ]
            # print("environment.runner.state",environment.runner.state)
            report["state"] = environment.runner.state
            report["user_count"] = environment.runner.user_count

            return jsonify(report)
Ejemplo n.º 9
0
def prometheus_metrics():
    is_distributed = isinstance(runners.locust_runner,
                                runners.MasterLocustRunner)
    if is_distributed:
        slave_count = runners.locust_runner.slave_count
    else:
        slave_count = 0

    if runners.locust_runner.host:
        host = runners.locust_runner.host
    elif len(runners.locust_runner.locust_classes) > 0:
        host = runners.locust_runner.locust_classes[0].host
    else:
        host = None

    state = 1
    if runners.locust_runner.state != "running":
        state = 0

    rows = []
    for s in stats.sort_stats(runners.locust_runner.request_stats):
        rows.append(
            "locust_request_count{{endpoint=\"{}\", method=\"{}\"}} {}\n"
            "locust_request_per_second{{endpoint=\"{}\"}} {}\n"
            "locust_failed_requests{{endpoint=\"{}\", method=\"{}\"}} {}\n"
            "locust_average_response{{endpoint=\"{}\", method=\"{}\"}} {}\n"
            "locust_average_content_length{{endpoint=\"{}\", method=\"{}\"}} {}\n"
            "locust_max_response_time{{endpoint=\"{}\", method=\"{}\"}} {}\n"
            "locust_running{{site=\"{}\"}} {}\n"
            "locust_workers{{site=\"{}\"}} {}\n"
            "locust_users{{site=\"{}\"}} {}\n".format(
                s.name,
                s.method,
                s.num_requests,
                s.name,
                s.total_rps,
                s.name,
                s.method,
                s.num_failures,
                s.name,
                s.method,
                s.avg_response_time,
                s.name,
                s.method,
                s.avg_content_length,
                s.name,
                s.method,
                s.max_response_time,
                host,
                state,
                host,
                slave_count,
                host,
                runners.locust_runner.user_count,
            ))

    response = make_response("".join(rows))
    response.mimetype = "text/plain; charset=utf-8'"
    response.content_type = "text/plain; charset=utf-8'"
    response.headers["Content-Type"] = "text/plain; charset=utf-8'"
    return response