Пример #1
0
 def test_merge_atomic(self):
     expected = [("foo", {
         "duration": 2,
         "count": 1
     }), ("bar", {
         "duration": 5,
         "count": 2
     })]
     result = atomic.merge_atomic([{
         "name": "foo",
         "started_at": 4,
         "finished_at": 6
     }, {
         "name": "bar",
         "started_at": 6,
         "finished_at": 8
     }, {
         "name": "bar",
         "started_at": 8,
         "finished_at": 11
     }])
     result = list(result.items())
     self.assertEqual(expected, result)
Пример #2
0
def upgrade():
    connection = op.get_bind()
    workloads = {}
    for wdata in connection.execute(workload_data_helper.select()):
        workloads.setdefault(wdata.workload_uuid, [])

        chunk_data = wdata.chunk_data["raw"]

        require_updating = False
        for itr in chunk_data:
            if "output" not in itr:
                itr["output"] = {"additive": [], "complete": []}
                if "scenario_output" in itr and itr["scenario_output"]["data"]:
                    itr["output"]["additive"].append({
                        "items":
                        list(itr["scenario_output"]["data"].items()),
                        "title":
                        "Scenario output",
                        "description":
                        "",
                        "chart":
                        "OutputStackedAreaChart"
                    })
                    del itr["scenario_output"]
                require_updating = True

        if require_updating:
            connection.execute(workload_data_helper.update().where(
                workload_data_helper.c.uuid == wdata.uuid).values(
                    chunk_data={"raw": chunk_data}))

        workloads[wdata.workload_uuid].extend(chunk_data)

    for workload in connection.execute(workload_helper.select()):
        if workload.uuid not in workloads or not workloads[workload.uuid]:
            continue
        data = sorted(workloads[workload.uuid],
                      key=lambda itr: itr["timestamp"])

        start_time = data[0]["timestamp"]

        atomics = collections.OrderedDict()

        for itr in workloads[workload.uuid]:
            merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
            for name, value in merged_atomic.items():
                duration = value["duration"]
                count = value["count"]
                if name not in atomics or count > atomics[name]["count"]:
                    atomics[name] = {
                        "min_duration": duration,
                        "max_duration": duration,
                        "count": count
                    }
                elif count == atomics[name]["count"]:
                    if duration < atomics[name]["min_duration"]:
                        atomics[name]["min_duration"] = duration
                    if duration > atomics[name]["max_duration"]:
                        atomics[name]["max_duration"] = duration

        durations_stat = charts.MainStatsTable({
            "total_iteration_count":
            len(workloads[workload.uuid]),
            "statistics": {
                "atomics": atomics
            }
        })

        for itr in workloads[workload.uuid]:
            durations_stat.add_iteration(itr)

        connection.execute(workload_helper.update().where(
            workload_helper.c.uuid == workload.uuid).values(
                start_time=start_time,
                statistics={
                    "durations": durations_stat.render(),
                    "atomics": atomics
                }))
Пример #3
0
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results` """
        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = yaml.safe_load(inp_js)

        if type(tasks_results) == list:
            # it is an old format:

            task = {"subtasks": []}

            start_time = float("inf")

            for result in tasks_results:
                try:
                    jsonschema.validate(result, api.task.TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = float("inf")
                max_duration = 0

                atomics = collections.OrderedDict()

                for itr in result["result"]:
                    if itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if duration > max_duration:
                        max_duration = duration

                    if min_duration and min_duration > duration:
                        min_duration = duration

                    merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                    for key, value in merged_atomic.items():
                        duration = value["duration"]
                        count = value["count"]
                        if key not in atomics or count > atomics[key]["count"]:
                            atomics[key] = {
                                "min_duration": duration,
                                "max_duration": duration,
                                "count": count
                            }
                        elif count == atomics[key]["count"]:
                            if duration < atomics[key]["min_duration"]:
                                atomics[key]["min_duration"] = duration
                            if duration > atomics[key]["max_duration"]:
                                atomics[key]["max_duration"] = duration

                durations_stat = charts.MainStatsTable({
                    "total_iteration_count":
                    iter_count,
                    "statistics": {
                        "atomics": atomics
                    }
                })

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                updated_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%m-%dT%H:%M:%S")
                updated_at += dt.timedelta(seconds=result["full_duration"])
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                workload = {
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description", ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": result["created_at"],
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": [{
                        "config": h
                    } for h in result["key"]["kw"]["hooks"]],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {
                        "sla": result["sla"]
                    },
                    "pass_sla": pass_sla,
                    "context": result["key"]["kw"]["context"],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict(),
                        "atomics": atomics
                    },
                }
                task["subtasks"].append({"workloads": [workload]})
            return task
        else:
            raise FailedToLoadResults(source=task_id, msg="Wrong format")
Пример #4
0
    def workload_set_results(self, workload_uuid, subtask_uuid, task_uuid,
                             load_duration, full_duration, start_time,
                             sla_results, hooks_results):
        session = get_session()
        with session.begin():
            workload_results = self._task_workload_data_get_all(workload_uuid)

            iter_count = len(workload_results)

            failed_iter_count = 0
            max_duration = 0
            min_duration = 0

            for d in workload_results:
                if d.get("error"):
                    failed_iter_count += 1

                duration = d.get("duration", 0)

                if duration > max_duration:
                    max_duration = duration

                if min_duration and min_duration > duration:
                    min_duration = duration

            atomics = collections.OrderedDict()

            for itr in workload_results:
                merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                for name, value in merged_atomic.items():
                    duration = value["duration"]
                    count = value["count"]
                    if name not in atomics or count > atomics[name]["count"]:
                        atomics[name] = {
                            "min_duration": duration,
                            "max_duration": duration,
                            "count": count
                        }
                    elif count == atomics[name]["count"]:
                        if duration < atomics[name]["min_duration"]:
                            atomics[name]["min_duration"] = duration
                        if duration > atomics[name]["max_duration"]:
                            atomics[name]["max_duration"] = duration

            durations_stat = charts.MainStatsTable({
                "total_iteration_count": iter_count,
                "statistics": {
                    "atomics": atomics
                }
            })

            for itr in workload_results:
                durations_stat.add_iteration(itr)

            sla = sla_results or []
            # NOTE(ikhudoshyn): we call it 'pass_sla'
            # for the sake of consistency with other models
            # so if no SLAs were specified, then we assume pass_sla == True
            success = all([s.get("success") for s in sla])

            session.query(
                models.Workload).filter_by(uuid=workload_uuid).update({
                    "sla_results": {
                        "sla": sla
                    },
                    "context_execution": {},
                    "hooks":
                    hooks_results or [],
                    "load_duration":
                    load_duration,
                    "full_duration":
                    full_duration,
                    "min_duration":
                    min_duration,
                    "max_duration":
                    max_duration,
                    "total_iteration_count":
                    iter_count,
                    "failed_iteration_count":
                    failed_iter_count,
                    "start_time":
                    start_time,
                    "statistics": {
                        "durations": durations_stat.to_dict(),
                        "atomics": atomics
                    },
                    "pass_sla":
                    success
                })
            task_values = {
                "task_duration": models.Task.task_duration + load_duration
            }
            if not success:
                task_values["pass_sla"] = False

            subtask_values = {
                "duration": models.Subtask.duration + load_duration
            }
            if not success:
                subtask_values["pass_sla"] = False
            session.query(
                models.Task).filter_by(uuid=task_uuid).update(task_values)

            session.query(models.Subtask).filter_by(
                uuid=subtask_uuid).update(subtask_values)
Пример #5
0
def upgrade():
    connection = op.get_bind()

    for workload in connection.execute(workload_helper.select()):
        full_data = []
        for wdata in connection.execute(
                workload_data_helper.select(
                    workload_data_helper.c.workload_uuid == workload.uuid)):
            chunk_data = wdata.chunk_data["raw"]

            require_updating = False
            for itr in chunk_data:
                if "output" not in itr:
                    itr["output"] = {"additive": [], "complete": []}
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        items = list(itr["scenario_output"]["data"].items())
                        itr["output"]["additive"].append({
                            "items":
                            items,
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart":
                            "OutputStackedAreaChart"
                        })
                        del itr["scenario_output"]
                    require_updating = True
                if isinstance(itr["atomic_actions"], dict):
                    new_atomic_actions = []
                    started_at = itr["timestamp"]
                    for name, d in itr["atomic_actions"].items():
                        finished_at = started_at + d
                        new_atomic_actions.append({
                            "name": name,
                            "children": [],
                            "started_at": started_at,
                            "finished_at": finished_at
                        })
                        started_at = finished_at
                    itr["atomic_actions"] = new_atomic_actions
                    require_updating = True

            if require_updating:
                connection.execute(workload_data_helper.update().where(
                    workload_data_helper.c.uuid == wdata.uuid).values(
                        chunk_data={"raw": chunk_data}))

            full_data.extend(chunk_data)

        if full_data:
            full_data.sort(key=lambda itr: itr["timestamp"])

            start_time = full_data[0]["timestamp"]

            atomics = collections.OrderedDict()

            for itr in full_data:
                merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                for name, value in merged_atomic.items():
                    duration = value["duration"]
                    count = value["count"]
                    if name not in atomics or count > atomics[name]["count"]:
                        atomics[name] = {
                            "min_duration": duration,
                            "max_duration": duration,
                            "count": count
                        }
                    elif count == atomics[name]["count"]:
                        if duration < atomics[name]["min_duration"]:
                            atomics[name]["min_duration"] = duration
                        if duration > atomics[name]["max_duration"]:
                            atomics[name]["max_duration"] = duration

            durations_stat = charts.MainStatsTable({
                "total_iteration_count":
                len(full_data),
                "statistics": {
                    "atomics": atomics
                }
            })

            for itr in full_data:
                durations_stat.add_iteration(itr)

            connection.execute(workload_helper.update().where(
                workload_helper.c.uuid == workload.uuid).values(
                    start_time=start_time,
                    statistics={
                        "durations": durations_stat.render(),
                        "atomics": atomics
                    }))