コード例 #1
0
    def test_add_iteration_and_render(self, info, data, expected):

        table = charts.MainStatsTable(info)
        for el in data:
            table.add_iteration(el)

        self.assertEqual(expected, table.render())
コード例 #2
0
ファイル: test_charts.py プロジェクト: ePlusPS/gbp-rally
 def test_add_iteration_and_render(self):
     table = charts.MainStatsTable({
         "iterations_count": 42,
         "atomic": {
             "foo": {},
             "bar": {}
         }
     })
     [
         table.add_iteration({
             "atomic_actions":
             costilius.OrderedDict([("foo", i), ("bar", 43 - 1)]),
             "duration":
             i,
             "error":
             i % 40
         }) for i in range(1, 43)
     ]
     expected_rows = [[
         "foo", 1.0, 21.5, 38.5, 40.5, 42.0, 21.5, "100.0%", 42.0
     ], ["bar", 42.0, 42.0, 42.0, 42.0, 42.0, 42.0, "100.0%",
         42.0], ["total", 0.0, 0.0, 0.0, 0.0, 40.0, 0.952, "100.0%", 42.0]]
     self.assertEqual({
         "cols": self.columns,
         "rows": expected_rows
     }, table.render())
コード例 #3
0
    def test_add_iteration_and_render(self, info, data, expected_rows):

        table = charts.MainStatsTable(info)
        for el in data:
            table.add_iteration(el)
        expected = {"cols": ["Action", "Min (sec)", "Median (sec)",
                             "90%ile (sec)", "95%ile (sec)", "Max (sec)",
                             "Avg (sec)", "Success", "Count"],
                    "rows": expected_rows}
        self.assertEqual(expected, table.render())
コード例 #4
0
ファイル: test_charts.py プロジェクト: robertgitlab/rally
    def test_to_dict(self):
        table = charts.MainStatsTable(
            {"total_iteration_count": 4, "statistics": {
                "atomics": collections.OrderedDict([("foo", {}),
                                                    ("bar", {})])}})
        data = [generate_iteration(1.6, True, ("foo", 1.2)),
                generate_iteration(5.2, False, ("foo", 1.2)),
                generate_iteration(5.0, True, ("bar", 4.8)),
                generate_iteration(12.3, False, ("foo", 4.2), ("bar", 5.6))]
        for el in data:
            table.add_iteration(el)

        self.assertEqual(
            {"atomics": [{"90%ile": 3.9,
                          "95%ile": 4.05,
                          "avg": 2.7,
                          "count": 3,
                          "max": 4.2,
                          "median": 2.7,
                          "min": 1.2,
                          "name": "foo",
                          "success": "66.7%"},
                         {"90%ile": 5.6,
                          "95%ile": 5.6,
                          "avg": 5.6,
                          "count": 2,
                          "max": 5.6,
                          "median": 5.6,
                          "min": 5.6,
                          "name": "bar",
                          "success": "50.0%"}],
             "total": {"90%ile": 11.59,
                       "95%ile": 11.945,
                       "avg": 8.75,
                       "count": 4,
                       "max": 12.3,
                       "median": 8.75,
                       "min": 5.2,
                       "name": "total",
                       "success": "50.0%"}}, table.to_dict())
コード例 #5
0
ファイル: api.py プロジェクト: ktibi/rally
    def workload_set_results(self, workload_uuid, subtask_uuid, task_uuid,
                             load_duration, full_duration, start_time,
                             sla_results, hooks_results, contexts_results):
        session = get_session()
        with session.begin():
            workload_results = self._task_workload_data_get_all(workload_uuid)

            iter_count = len(workload_results)

            failed_iter_count = 0
            max_duration = None
            min_duration = None

            for d in workload_results:
                if d.get("error"):
                    failed_iter_count += 1

                duration = d.get("duration", 0)

                if max_duration is None or duration > max_duration:
                    max_duration = duration

                if min_duration is None or min_duration > duration:
                    min_duration = duration

            durations_stat = charts.MainStatsTable(
                {"total_iteration_count": iter_count})

            for itr in workload_results:
                durations_stat.add_iteration(itr)

            sla = sla_results or []
            # NOTE(ikhudoshyn): we call it 'pass_sla'
            # for the sake of consistency with other models
            # so if no SLAs were specified, then we assume pass_sla == True
            success = all([s.get("success") for s in sla])

            session.query(
                models.Workload).filter_by(uuid=workload_uuid).update({
                    "sla_results": {
                        "sla": sla
                    },
                    "contexts_results":
                    contexts_results,
                    "hooks":
                    hooks_results or [],
                    "load_duration":
                    load_duration,
                    "full_duration":
                    full_duration,
                    "min_duration":
                    min_duration,
                    "max_duration":
                    max_duration,
                    "total_iteration_count":
                    iter_count,
                    "failed_iteration_count":
                    failed_iter_count,
                    "start_time":
                    start_time,
                    "statistics": {
                        "durations": durations_stat.to_dict()
                    },
                    "pass_sla":
                    success
                })
            task_values = {
                "task_duration": models.Task.task_duration + load_duration
            }
            if not success:
                task_values["pass_sla"] = False

            subtask_values = {
                "duration": models.Subtask.duration + load_duration
            }
            if not success:
                subtask_values["pass_sla"] = False
            session.query(
                models.Task).filter_by(uuid=task_uuid).update(task_values)

            session.query(models.Subtask).filter_by(
                uuid=subtask_uuid).update(subtask_values)
コード例 #6
0
ファイル: test_charts.py プロジェクト: tuniu1985/rally-1
    def test_to_dict(self):
        table = charts.MainStatsTable({"total_iteration_count": 4})
        data = [generate_iteration(1.6, True, ("foo", 1.2)),
                generate_iteration(5.2, False, ("foo", 1.2)),
                generate_iteration(5.0, True, ("bar", 4.8)),
                generate_iteration(12.3, False, ("foo", 4.2), ("bar", 5.6))]
        for el in data:
            table.add_iteration(el)

        self.assertEqual({
            "atomics": [{"children": [],
                         "data": {"90%ile": 3.6,
                                  "95%ile": 3.9,
                                  "avg": 2.2,
                                  "iteration_count": 3,
                                  "max": 4.2,
                                  "median": 1.2,
                                  "min": 1.2,
                                  "success": "66.7%"},
                         "display_name": "foo",
                         "count_per_iteration": 1,
                         "name": "foo"},
                        {"children": [],
                         "data": {"90%ile": 5.52,
                                  "95%ile": 5.56,
                                  "avg": 5.2,
                                  "iteration_count": 2,
                                  "max": 5.6,
                                  "median": 5.2,
                                  "min": 4.8,
                                  "success": "50.0%"},
                         "display_name": "bar",
                         "count_per_iteration": 1,
                         "name": "bar"}],
            "total": {"data": {"90%ile": 10.17,
                               "95%ile": 11.235,
                               "avg": 6.025,
                               "iteration_count": 4,
                               "max": 12.3,
                               "median": 5.1,
                               "min": 1.6,
                               "success": "50.0%"},
                      "display_name": "total",
                      "count_per_iteration": 1,
                      "name": "total",
                      "children": [
                          {"children": [],
                           "count_per_iteration": 1,
                           "data": {"90%ile": 10.17,
                                    "95%ile": 11.235,
                                    "avg": 6.025,
                                    "iteration_count": 4,
                                    "max": 12.3,
                                    "median": 5.1,
                                    "min": 1.6,
                                    "success": "50.0%"},
                           "display_name": "duration",
                           "name": "duration"},
                          {"children": [],
                           "count_per_iteration": 1,
                           "data": {"90%ile": 0.0,
                                    "95%ile": 0.0,
                                    "avg": 0.0,
                                    "iteration_count": 4,
                                    "max": 0.0,
                                    "median": 0.0,
                                    "min": 0.0,
                                    "success": "50.0%"},
                           "display_name": "idle_duration",
                           "name": "idle_duration"}]
                      }
        }, table.to_dict())
コード例 #7
0
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results`"""

        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = json.loads(inp_js.read())

        if isinstance(tasks_results, list):
            # it is an old format:

            task = {
                "version": 2,
                "title": "Task loaded from a file.",
                "description": "Auto-ported from task format V1.",
                "uuid": "n/a",
                "env_name": "n/a",
                "env_uuid": "n/a",
                "tags": [],
                "status": consts.TaskStatus.FINISHED,
                "subtasks": []
            }

            start_time = None

            for result in tasks_results:
                try:
                    jsonschema.validate(result, OLD_TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = None
                max_duration = None

                for itr in result["result"]:
                    if start_time is None or itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if max_duration is None or duration > max_duration:
                        max_duration = duration

                    if min_duration is None or min_duration > duration:
                        min_duration = duration

                durations_stat = charts.MainStatsTable(
                    {"total_iteration_count": iter_count})

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                created_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%d-%mT%H:%M:%S")
                updated_at = created_at + dt.timedelta(
                    seconds=result["full_duration"])
                created_at = created_at.strftime(consts.TimeFormat.ISO8601)
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                runner_type = result["key"]["kw"]["runner"].pop("type")
                for h in result["hooks"]:
                    trigger = h["config"]["trigger"]
                    h["config"] = {
                        "description": h["config"].get("description"),
                        "action": (h["config"]["name"], h["config"]["args"]),
                        "trigger": (trigger["name"], trigger["args"])
                    }
                workload = {
                    "uuid": "n/a",
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description", ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": created_at,
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner_type": runner_type,
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": result["hooks"],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {
                        "sla": result["sla"]
                    },
                    "pass_sla": pass_sla,
                    "contexts": result["key"]["kw"]["context"],
                    "contexts_results": [],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict()
                    },
                }
                task["subtasks"].append({
                    "title": "A SubTask",
                    "description": "",
                    "workloads": [workload]
                })
            return [task]
        elif isinstance(tasks_results, dict) and "tasks" in tasks_results:
            for task_result in tasks_results["tasks"]:
                try:
                    jsonschema.validate(task_result, api.task.TASK_SCHEMA)
                except jsonschema.ValidationError as e:
                    msg = six.text_type(e)
                    raise exceptions.RallyException(
                        "ERROR: Invalid task result format\n\n\t%s" % msg)
                task_result.setdefault("env_name", "n/a")
                task_result.setdefault("env_uuid", "n/a")
                for subtask in task_result["subtasks"]:
                    for workload in subtask["workloads"]:
                        workload.setdefault("contexts_results", [])
                        workload["runner_type"], workload["runner"] = list(
                            workload["runner"].items())[0]
                        workload["name"], workload["args"] = list(
                            workload.pop("scenario").items())[0]

            return tasks_results["tasks"]
        else:
            raise FailedToLoadResults(source=task_id, msg="Wrong format")
コード例 #8
0
def _process_scenario(data, pos):
    main_area = charts.MainStackedAreaChart(data["info"])
    main_hist = charts.MainHistogramChart(data["info"])
    main_stat = charts.MainStatsTable(data["info"])
    load_profile = charts.LoadProfileChart(data["info"])
    atomic_pie = charts.AtomicAvgChart(data["info"])
    atomic_area = charts.AtomicStackedAreaChart(data["info"])
    atomic_hist = charts.AtomicHistogramChart(data["info"])

    errors = []
    output_errors = []
    additive_output_charts = []
    complete_output = []
    for idx, itr in enumerate(data["iterations"], 1):
        if itr["error"]:
            typ, msg, trace = itr["error"]
            errors.append({"iteration": idx,
                           "type": typ, "message": msg, "traceback": trace})

        for i, additive in enumerate(itr["output"]["additive"]):
            try:
                additive_output_charts[i].add_iteration(additive["data"])
            except IndexError:
                chart_cls = plugin.Plugin.get(additive["chart_plugin"])
                chart = chart_cls(
                    data["info"], title=additive["title"],
                    description=additive.get("description", ""),
                    label=additive.get("label", ""),
                    axis_label=additive.get("axis_label",
                                            "Iteration sequence number"))
                chart.add_iteration(additive["data"])
                additive_output_charts.append(chart)

        complete_charts = []
        for complete in itr["output"]["complete"]:
            complete_chart = dict(complete)
            chart_cls = plugin.Plugin.get(complete_chart.pop("chart_plugin"))
            complete_chart["widget"] = chart_cls.widget
            complete_charts.append(complete_chart)
        complete_output.append(complete_charts)

        for chart in (main_area, main_hist, main_stat, load_profile,
                      atomic_pie, atomic_area, atomic_hist):
            chart.add_iteration(itr)

    kw = data["key"]["kw"]
    cls, method = data["key"]["name"].split(".")
    additive_output = [chart.render() for chart in additive_output_charts]
    iterations_count = data["info"]["iterations_count"]
    return {
        "cls": cls,
        "met": method,
        "pos": str(pos),
        "name": method + (pos and " [%d]" % (pos + 1) or ""),
        "runner": kw["runner"]["type"],
        "config": json.dumps({data["key"]["name"]: [kw]}, indent=2),
        "iterations": {
            "iter": main_area.render(),
            "pie": [("success", (data["info"]["iterations_count"]
                                 - len(errors))),
                    ("errors", len(errors))],
            "histogram": main_hist.render()},
        "load_profile": load_profile.render(),
        "atomic": {"histogram": atomic_hist.render(),
                   "iter": atomic_area.render(),
                   "pie": atomic_pie.render()},
        "table": main_stat.render(),
        "additive_output": additive_output,
        "complete_output": complete_output,
        "output_errors": output_errors,
        "errors": errors,
        "load_duration": data["info"]["load_duration"],
        "full_duration": data["info"]["full_duration"],
        "sla": data["sla"],
        "sla_success": all([s["success"] for s in data["sla"]]),
        "iterations_count": iterations_count,
    }
コード例 #9
0
ファイル: task.py プロジェクト: KoutaCS/rally
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results` """
        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = yaml.safe_load(inp_js)

        if type(tasks_results) == list:
            # it is an old format:

            task = {
                "version": 2,
                "title": "Task loaded from a file.",
                "description": "Auto-ported from task format V1.",
                "uuid": "n/a",
                "tags": [],
                "subtasks": []
            }

            start_time = None

            for result in tasks_results:
                try:
                    jsonschema.validate(result, OLD_TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = None
                max_duration = None

                for itr in result["result"]:
                    if start_time is None or itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if max_duration is None or duration > max_duration:
                        max_duration = duration

                    if min_duration is None or min_duration > duration:
                        min_duration = duration

                durations_stat = charts.MainStatsTable(
                    {"total_iteration_count": iter_count})

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                updated_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%m-%dT%H:%M:%S")
                updated_at += dt.timedelta(seconds=result["full_duration"])
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                workload = {
                    "uuid": "n/a",
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description", ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": result["created_at"],
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": [{
                        "config": h
                    } for h in result["key"]["kw"]["hooks"]],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {
                        "sla": result["sla"]
                    },
                    "pass_sla": pass_sla,
                    "context": result["key"]["kw"]["context"],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict()
                    },
                }
                task["subtasks"].append({
                    "title": "A SubTask",
                    "description": "",
                    "workloads": [workload]
                })
            return task
        else:
            raise FailedToLoadResults(source=task_id, msg="Wrong format")
コード例 #10
0
ファイル: task.py プロジェクト: stefan-stojkovski/rally
    def extend_results(cls, results, serializable=False):
        """Modify and extend results with aggregated data.

        This is a workaround method that tries to adapt task results
        to schema of planned DB refactoring, so this method is expected
        to be simplified after DB refactoring since all the data should
        be taken as-is directly from the database.

        Each scenario results have extra `info' with aggregated data,
        and iterations data is represented by iterator - this simplifies
        its future implementation as generator and gives ability to process
        arbitrary number of iterations with low memory usage.

        :param results: list of db.sqlalchemy.models.TaskResult
        :param serializable: bool, whether to convert json non-serializable
                             types (like datetime) to serializable ones
        :returns: list of dicts, each dict represents scenario results:
                  key - dict, scenario input data
                  sla - list, SLA results
                  iterations - if serializable, then iterator with
                               iterations data, otherwise a list
                  created_at - str datetime,
                  updated_at - str datetime,
                  info:
                      atomic - dict where key is one of atomic action names
                               and value is dict {min_duration: number,
                                                  max_duration: number}
                      iterations_count - int number of iterations
                      iterations_failed - int number of iterations with errors
                      min_duration - float minimum iteration duration
                      max_duration - float maximum iteration duration
                      tstamp_start - float timestamp of the first iteration
                      full_duration - float full scenario duration
                      load_duration - float load scenario duration
        """
        def _merge_atomic(atomic_actions):
            merged_atomic = collections.OrderedDict()
            for action in atomic_actions:
                name = action["name"]
                duration = action["finished_at"] - action["started_at"]
                if name not in merged_atomic:
                    merged_atomic[name] = {"duration": duration, "count": 1}
                else:
                    merged_atomic[name]["duration"] += duration
                    merged_atomic[name]["count"] += 1
            return merged_atomic

        extended = []
        for scenario_result in results:
            scenario = dict(scenario_result)
            tstamp_start = 0
            min_duration = 0
            max_duration = 0
            iterations_failed = 0
            atomic = collections.OrderedDict()

            for itr in scenario["data"]["raw"]:
                merged_atomic = _merge_atomic(itr["atomic_actions"])
                for name, value in merged_atomic.items():
                    duration = value["duration"]
                    count = value["count"]
                    if name not in atomic or count > atomic[name]["count"]:
                        atomic[name] = {
                            "min_duration": duration,
                            "max_duration": duration,
                            "count": count
                        }
                    elif count == atomic[name]["count"]:
                        if duration < atomic[name]["min_duration"]:
                            atomic[name]["min_duration"] = duration
                        if duration > atomic[name]["max_duration"]:
                            atomic[name]["max_duration"] = duration

                if not tstamp_start or itr["timestamp"] < tstamp_start:
                    tstamp_start = itr["timestamp"]

                if "output" not in itr:
                    itr["output"] = {"additive": [], "complete": []}

                    # NOTE(amaretskiy): Deprecated "scenario_output"
                    #     is supported for backward compatibility
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        itr["output"]["additive"].append({
                            "items":
                            itr["scenario_output"]["data"].items(),
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart":
                            "OutputStackedAreaChart"
                        })
                        del itr["scenario_output"]

                if itr["error"]:
                    iterations_failed += 1
                else:
                    duration = itr["duration"] or 0
                    if not min_duration or duration < min_duration:
                        min_duration = duration
                    if not max_duration or duration > max_duration:
                        max_duration = duration

            for k in "created_at", "updated_at":
                if scenario[k] and isinstance(scenario[k], dt.datetime):
                    scenario[k] = scenario[k].strftime("%Y-%d-%m %H:%M:%S")

            durations_stat = charts.MainStatsTable({
                "iterations_count":
                len(scenario["data"]["raw"]),
                "atomic":
                atomic
            })

            for itr in scenario["data"]["raw"]:
                durations_stat.add_iteration(itr)

            scenario["info"] = {
                "stat": durations_stat.render(),
                "atomic": atomic,
                "iterations_count": len(scenario["data"]["raw"]),
                "iterations_failed": iterations_failed,
                "min_duration": min_duration,
                "max_duration": max_duration,
                "tstamp_start": tstamp_start,
                "full_duration": scenario["data"]["full_duration"],
                "load_duration": scenario["data"]["load_duration"]
            }
            iterations = sorted(scenario["data"]["raw"],
                                key=lambda itr: itr["timestamp"])
            if serializable:
                scenario["iterations"] = list(iterations)
            else:
                scenario["iterations"] = iter(iterations)
            scenario["sla"] = scenario["data"]["sla"]
            scenario["hooks"] = scenario["data"].get("hooks", [])
            del scenario["data"]
            del scenario["task_uuid"]
            del scenario["id"]
            extended.append(scenario)
        return extended
コード例 #11
0
def _process_scenario(data, pos):
    main_area = charts.MainStackedAreaChart(data["info"])
    main_hist = charts.MainHistogramChart(data["info"])
    main_stat = charts.MainStatsTable(data["info"])
    load_profile = charts.LoadProfileChart(data["info"])
    atomic_pie = charts.AtomicAvgChart(data["info"])
    atomic_area = charts.AtomicStackedAreaChart(data["info"])
    atomic_hist = charts.AtomicHistogramChart(data["info"])
    output_area = charts.OutputStackedAreaChart(data["info"])

    errors = []
    output_errors = []
    for idx, itr in enumerate(data["iterations"]):
        if itr["error"]:
            typ, msg, trace = itr["error"]
            errors.append({
                "iteration": idx,
                "type": typ,
                "message": msg,
                "traceback": trace
            })

        if itr["scenario_output"]["errors"]:
            output_errors.append((idx, itr["scenario_output"]["errors"]))

        for chart in (main_area, main_hist, main_stat, load_profile,
                      atomic_pie, atomic_area, atomic_hist, output_area):
            chart.add_iteration(itr)

    kw = data["key"]["kw"]
    cls, method = data["key"]["name"].split(".")

    return {
        "cls": cls,
        "met": method,
        "pos": str(pos),
        "name": method + (pos and " [%d]" % (pos + 1) or ""),
        "runner": kw["runner"]["type"],
        "config": json.dumps({data["key"]["name"]: [kw]}, indent=2),
        "iterations": {
            "iter":
            main_area.render(),
            "pie":
            [("success", (data["info"]["iterations_count"] - len(errors))),
             ("errors", len(errors))],
            "histogram":
            main_hist.render()[0]
        },
        "load_profile": load_profile.render(),
        "atomic": {
            "histogram": atomic_hist.render(),
            "iter": atomic_area.render(),
            "pie": atomic_pie.render()
        },
        "table": main_stat.render(),
        "output": output_area.render(),
        "output_errors": output_errors,
        "errors": errors,
        "load_duration": data["info"]["load_duration"],
        "full_duration": data["info"]["full_duration"],
        "sla": data["sla"],
        "sla_success": all([s["success"] for s in data["sla"]]),
        "iterations_count": data["info"]["iterations_count"],
    }
コード例 #12
0
def _update_old_results(tasks_results, path):
    """Converts tasks results in old format to latest one."""
    task = {"version": 2,
            "title": "Task loaded from a file.",
            "description": "Auto-ported from task format V1.",
            "uuid": "n/a",
            "env_name": "n/a",
            "env_uuid": "n/a",
            "tags": [],
            "status": consts.TaskStatus.FINISHED,
            "subtasks": []}

    start_time = None

    for result in tasks_results:
        try:
            jsonschema.validate(
                result, OLD_TASK_RESULT_SCHEMA)
        except jsonschema.ValidationError as e:
            raise FailedToLoadResults(source=path,
                                      msg=str(e))

        iter_count = 0
        failed_iter_count = 0
        min_duration = None
        max_duration = None

        for itr in result["result"]:
            if start_time is None or itr["timestamp"] < start_time:
                start_time = itr["timestamp"]
            # NOTE(chenhb): back compatible for atomic_actions
            itr["atomic_actions"] = _update_atomic_actions(
                itr["atomic_actions"], started_at=itr["timestamp"])

            iter_count += 1
            if itr.get("error"):
                failed_iter_count += 1

            duration = itr.get("duration", 0)

            if max_duration is None or duration > max_duration:
                max_duration = duration

            if min_duration is None or min_duration > duration:
                min_duration = duration

        durations_stat = charts.MainStatsTable(
            {"total_iteration_count": iter_count})

        for itr in result["result"]:
            durations_stat.add_iteration(itr)

        created_at = dt.datetime.strptime(result["created_at"],
                                          "%Y-%d-%mT%H:%M:%S")
        updated_at = created_at + dt.timedelta(
            seconds=result["full_duration"])
        created_at = created_at.strftime(consts.TimeFormat.ISO8601)
        updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
        pass_sla = all(s.get("success") for s in result["sla"])
        runner_type = result["key"]["kw"]["runner"].pop("type")
        for h in result["hooks"]:
            trigger = h["config"]["trigger"]
            h["config"] = {
                "description": h["config"].get("description"),
                "action": (h["config"]["name"], h["config"]["args"]),
                "trigger": (trigger["name"], trigger["args"])}
        workload = {"uuid": "n/a",
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description",
                                                     ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": created_at,
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner_type": runner_type,
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": result["hooks"],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {"sla": result["sla"]},
                    "pass_sla": pass_sla,
                    "contexts": result["key"]["kw"]["context"],
                    "contexts_results": [],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict()},
                    }
        task["subtasks"].append(
            {"title": "A SubTask",
             "description": "",
             "workloads": [workload]})
    return [task]
コード例 #13
0
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results` """
        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = yaml.safe_load(inp_js)

        if type(tasks_results) == list:
            # it is an old format:

            task = {"subtasks": []}

            start_time = float("inf")

            for result in tasks_results:
                try:
                    jsonschema.validate(result, api.task.TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = float("inf")
                max_duration = 0

                atomics = collections.OrderedDict()

                for itr in result["result"]:
                    if itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if duration > max_duration:
                        max_duration = duration

                    if min_duration and min_duration > duration:
                        min_duration = duration

                    merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                    for key, value in merged_atomic.items():
                        duration = value["duration"]
                        count = value["count"]
                        if key not in atomics or count > atomics[key]["count"]:
                            atomics[key] = {
                                "min_duration": duration,
                                "max_duration": duration,
                                "count": count
                            }
                        elif count == atomics[key]["count"]:
                            if duration < atomics[key]["min_duration"]:
                                atomics[key]["min_duration"] = duration
                            if duration > atomics[key]["max_duration"]:
                                atomics[key]["max_duration"] = duration

                durations_stat = charts.MainStatsTable({
                    "total_iteration_count":
                    iter_count,
                    "statistics": {
                        "atomics": atomics
                    }
                })

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                updated_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%m-%dT%H:%M:%S")
                updated_at += dt.timedelta(seconds=result["full_duration"])
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                workload = {
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description", ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": result["created_at"],
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": [{
                        "config": h
                    } for h in result["key"]["kw"]["hooks"]],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {
                        "sla": result["sla"]
                    },
                    "pass_sla": pass_sla,
                    "context": result["key"]["kw"]["context"],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict(),
                        "atomics": atomics
                    },
                }
                task["subtasks"].append({"workloads": [workload]})
            return task
        else:
            raise FailedToLoadResults(source=task_id, msg="Wrong format")
コード例 #14
0
def upgrade():
    connection = op.get_bind()

    for workload in connection.execute(workload_helper.select()):
        full_data = []
        for wdata in connection.execute(
                workload_data_helper.select(
                    workload_data_helper.c.workload_uuid == workload.uuid)):
            chunk_data = wdata.chunk_data["raw"]

            require_updating = False
            for itr in chunk_data:
                if "output" not in itr:
                    itr["output"] = {"additive": [], "complete": []}
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        items = list(itr["scenario_output"]["data"].items())
                        itr["output"]["additive"].append({
                            "items":
                            items,
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart":
                            "OutputStackedAreaChart"
                        })
                        del itr["scenario_output"]
                    require_updating = True
                if isinstance(itr["atomic_actions"], dict):
                    new_atomic_actions = []
                    started_at = itr["timestamp"]
                    for name, d in itr["atomic_actions"].items():
                        finished_at = started_at + d
                        new_atomic_actions.append({
                            "name": name,
                            "children": [],
                            "started_at": started_at,
                            "finished_at": finished_at
                        })
                        started_at = finished_at
                    itr["atomic_actions"] = new_atomic_actions
                    require_updating = True

            if require_updating:
                connection.execute(workload_data_helper.update().where(
                    workload_data_helper.c.uuid == wdata.uuid).values(
                        chunk_data={"raw": chunk_data}))

            full_data.extend(chunk_data)

        if full_data:
            full_data.sort(key=lambda itr: itr["timestamp"])

            start_time = full_data[0]["timestamp"]

            atomics = collections.OrderedDict()

            for itr in full_data:
                merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                for name, value in merged_atomic.items():
                    duration = value["duration"]
                    count = value["count"]
                    if name not in atomics or count > atomics[name]["count"]:
                        atomics[name] = {
                            "min_duration": duration,
                            "max_duration": duration,
                            "count": count
                        }
                    elif count == atomics[name]["count"]:
                        if duration < atomics[name]["min_duration"]:
                            atomics[name]["min_duration"] = duration
                        if duration > atomics[name]["max_duration"]:
                            atomics[name]["max_duration"] = duration

            durations_stat = charts.MainStatsTable({
                "total_iteration_count":
                len(full_data),
                "statistics": {
                    "atomics": atomics
                }
            })

            for itr in full_data:
                durations_stat.add_iteration(itr)

            connection.execute(workload_helper.update().where(
                workload_helper.c.uuid == workload.uuid).values(
                    start_time=start_time,
                    statistics={
                        "durations": durations_stat.render(),
                        "atomics": atomics
                    }))
コード例 #15
0
def _process_workload(workload, workload_cfg, pos):
    main_area = charts.MainStackedAreaChart(workload)
    main_hist = charts.MainHistogramChart(workload)
    main_stat = charts.MainStatsTable(workload)
    load_profile = charts.LoadProfileChart(workload)
    atomic_pie = charts.AtomicAvgChart(workload)
    atomic_area = charts.AtomicStackedAreaChart(workload)
    atomic_hist = charts.AtomicHistogramChart(workload)

    errors = []
    output_errors = []
    additive_output_charts = []
    complete_output = []
    for idx, itr in enumerate(workload["data"], 1):
        if itr["error"]:
            typ, msg, trace = itr["error"]
            timestamp = dt.datetime.fromtimestamp(
                itr["timestamp"]).isoformat(sep="\n")
            errors.append({"iteration": idx, "timestamp": timestamp,
                           "type": typ, "message": msg, "traceback": trace})

        for i, additive in enumerate(itr["output"]["additive"]):
            try:
                additive_output_charts[i].add_iteration(additive["data"])
            except IndexError:
                chart_cls = plugin.Plugin.get(additive["chart_plugin"])
                chart = chart_cls(
                    workload, title=additive["title"],
                    description=additive.get("description", ""),
                    label=additive.get("label", ""),
                    axis_label=additive.get("axis_label",
                                            "Iteration sequence number"))
                chart.add_iteration(additive["data"])
                additive_output_charts.append(chart)

        complete_charts = []
        for complete in itr["output"]["complete"]:
            chart_cls = plugin.Plugin.get(complete["chart_plugin"])
            complete["widget"] = chart_cls.widget
            complete_charts.append(chart_cls.render_complete_data(complete))
        complete_output.append(complete_charts)

        for chart in (main_area, main_hist, main_stat, load_profile,
                      atomic_pie, atomic_area, atomic_hist):
            chart.add_iteration(itr)

    cls, method = workload["name"].split(".")
    additive_output = [chart.render() for chart in additive_output_charts]

    return {
        "cls": cls,
        "met": method,
        "pos": str(pos),
        "name": method + (pos and " [%d]" % (pos + 1) or ""),
        "runner": workload["runner_type"],
        "config": json.dumps(workload_cfg, indent=2),
        "hooks": _process_hooks(workload["hooks"]),
        "description": workload.get("description", ""),
        "iterations": {
            "iter": main_area.render(),
            "pie": [("success", (workload["total_iteration_count"]
                                 - len(errors))),
                    ("errors", len(errors))],
            "histogram": main_hist.render()},
        "load_profile": load_profile.render(),
        "atomic": {"histogram": atomic_hist.render(),
                   "iter": atomic_area.render(),
                   "pie": atomic_pie.render()},
        "table": main_stat.render(),
        "additive_output": additive_output,
        "complete_output": complete_output,
        "has_output": any(additive_output) or any(complete_output),
        "output_errors": output_errors,
        "errors": errors,
        "load_duration": workload["load_duration"],
        "full_duration": workload["full_duration"],
        "created_at": workload["created_at"],
        "sla": workload["sla_results"].get("sla"),
        "sla_success": workload["pass_sla"],
        "iterations_count": workload["total_iteration_count"],
    }
コード例 #16
0
    def workload_set_results(self, workload_uuid, subtask_uuid, task_uuid,
                             load_duration, full_duration, start_time,
                             sla_results, hooks_results):
        session = get_session()
        with session.begin():
            workload_results = self._task_workload_data_get_all(workload_uuid)

            iter_count = len(workload_results)

            failed_iter_count = 0
            max_duration = 0
            min_duration = 0

            for d in workload_results:
                if d.get("error"):
                    failed_iter_count += 1

                duration = d.get("duration", 0)

                if duration > max_duration:
                    max_duration = duration

                if min_duration and min_duration > duration:
                    min_duration = duration

            atomics = collections.OrderedDict()

            for itr in workload_results:
                merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                for name, value in merged_atomic.items():
                    duration = value["duration"]
                    count = value["count"]
                    if name not in atomics or count > atomics[name]["count"]:
                        atomics[name] = {
                            "min_duration": duration,
                            "max_duration": duration,
                            "count": count
                        }
                    elif count == atomics[name]["count"]:
                        if duration < atomics[name]["min_duration"]:
                            atomics[name]["min_duration"] = duration
                        if duration > atomics[name]["max_duration"]:
                            atomics[name]["max_duration"] = duration

            durations_stat = charts.MainStatsTable({
                "total_iteration_count": iter_count,
                "statistics": {
                    "atomics": atomics
                }
            })

            for itr in workload_results:
                durations_stat.add_iteration(itr)

            sla = sla_results or []
            # NOTE(ikhudoshyn): we call it 'pass_sla'
            # for the sake of consistency with other models
            # so if no SLAs were specified, then we assume pass_sla == True
            success = all([s.get("success") for s in sla])

            session.query(
                models.Workload).filter_by(uuid=workload_uuid).update({
                    "sla_results": {
                        "sla": sla
                    },
                    "context_execution": {},
                    "hooks":
                    hooks_results or [],
                    "load_duration":
                    load_duration,
                    "full_duration":
                    full_duration,
                    "min_duration":
                    min_duration,
                    "max_duration":
                    max_duration,
                    "total_iteration_count":
                    iter_count,
                    "failed_iteration_count":
                    failed_iter_count,
                    "start_time":
                    start_time,
                    "statistics": {
                        "durations": durations_stat.to_dict(),
                        "atomics": atomics
                    },
                    "pass_sla":
                    success
                })
            task_values = {
                "task_duration": models.Task.task_duration + load_duration
            }
            if not success:
                task_values["pass_sla"] = False

            subtask_values = {
                "duration": models.Subtask.duration + load_duration
            }
            if not success:
                subtask_values["pass_sla"] = False
            session.query(
                models.Task).filter_by(uuid=task_uuid).update(task_values)

            session.query(models.Subtask).filter_by(
                uuid=subtask_uuid).update(subtask_values)
コード例 #17
0
def upgrade():
    connection = op.get_bind()
    workloads = {}
    for wdata in connection.execute(workload_data_helper.select()):
        workloads.setdefault(wdata.workload_uuid, [])

        chunk_data = wdata.chunk_data["raw"]

        require_updating = False
        for itr in chunk_data:
            if "output" not in itr:
                itr["output"] = {"additive": [], "complete": []}
                if "scenario_output" in itr and itr["scenario_output"]["data"]:
                    itr["output"]["additive"].append({
                        "items":
                        list(itr["scenario_output"]["data"].items()),
                        "title":
                        "Scenario output",
                        "description":
                        "",
                        "chart":
                        "OutputStackedAreaChart"
                    })
                    del itr["scenario_output"]
                require_updating = True

        if require_updating:
            connection.execute(workload_data_helper.update().where(
                workload_data_helper.c.uuid == wdata.uuid).values(
                    chunk_data={"raw": chunk_data}))

        workloads[wdata.workload_uuid].extend(chunk_data)

    for workload in connection.execute(workload_helper.select()):
        if workload.uuid not in workloads or not workloads[workload.uuid]:
            continue
        data = sorted(workloads[workload.uuid],
                      key=lambda itr: itr["timestamp"])

        start_time = data[0]["timestamp"]

        atomics = collections.OrderedDict()

        for itr in workloads[workload.uuid]:
            merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
            for name, value in merged_atomic.items():
                duration = value["duration"]
                count = value["count"]
                if name not in atomics or count > atomics[name]["count"]:
                    atomics[name] = {
                        "min_duration": duration,
                        "max_duration": duration,
                        "count": count
                    }
                elif count == atomics[name]["count"]:
                    if duration < atomics[name]["min_duration"]:
                        atomics[name]["min_duration"] = duration
                    if duration > atomics[name]["max_duration"]:
                        atomics[name]["max_duration"] = duration

        durations_stat = charts.MainStatsTable({
            "total_iteration_count":
            len(workloads[workload.uuid]),
            "statistics": {
                "atomics": atomics
            }
        })

        for itr in workloads[workload.uuid]:
            durations_stat.add_iteration(itr)

        connection.execute(workload_helper.update().where(
            workload_helper.c.uuid == workload.uuid).values(
                start_time=start_time,
                statistics={
                    "durations": durations_stat.render(),
                    "atomics": atomics
                }))
コード例 #18
0
def upgrade():
    connection = op.get_bind()

    for workload in connection.execute(workload_helper.select()):
        full_data = []
        for wdata in connection.execute(
                workload_data_helper.select(
                    workload_data_helper.c.workload_uuid == workload.uuid)):
            chunk_data = wdata.chunk_data["raw"]

            require_updating = False
            for itr in chunk_data:
                if "output" not in itr:
                    itr["output"] = {"additive": [], "complete": []}
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        items = list(itr["scenario_output"]["data"].items())
                        itr["output"]["additive"].append({
                            "items":
                            items,
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart":
                            "OutputStackedAreaChart"
                        })
                        del itr["scenario_output"]
                    require_updating = True
                if isinstance(itr["atomic_actions"], dict):
                    new_atomic_actions = []
                    started_at = itr["timestamp"]
                    for name, d in itr["atomic_actions"].items():
                        finished_at = started_at + d
                        new_atomic_actions.append({
                            "name": name,
                            "children": [],
                            "started_at": started_at,
                            "finished_at": finished_at
                        })
                        started_at = finished_at
                    itr["atomic_actions"] = new_atomic_actions
                    require_updating = True

                if itr.get("error"):
                    _mark_the_last_as_an_error(itr["atomic_actions"])
                    require_updating = True

            if require_updating:
                connection.execute(workload_data_helper.update().where(
                    workload_data_helper.c.uuid == wdata.uuid).values(
                        chunk_data={"raw": chunk_data}))

            full_data.extend(chunk_data)

        if full_data:
            full_data.sort(key=lambda itr: itr["timestamp"])

            start_time = full_data[0]["timestamp"]

            durations_stat = charts.MainStatsTable(
                {"total_iteration_count": len(full_data)})

            for itr in full_data:
                durations_stat.add_iteration(itr)

            connection.execute(workload_helper.update().where(
                workload_helper.c.uuid == workload.uuid).values(
                    start_time=start_time,
                    statistics={"durations": durations_stat.to_dict()}))