def test_list_atomic(self): atomic_actions = [{ "name": "action_1", "started_at": 1, "finished_at": 2, "children": [] }, { "name": "action_2", "started_at": 2, "finished_at": 4, "children": [] }] atomic_wrapper = utils.WrapperForAtomicActions(atomic_actions) self.assertEqual(1, atomic_wrapper["action_1"]) self.assertEqual(2, atomic_wrapper["action_2"]) self.assertEqual( collections.OrderedDict([("action_1", 1), ("action_2", 2)]).items(), atomic_wrapper.items()) self.assertEqual(atomic_actions[0], atomic_wrapper[0]) self.assertEqual(atomic_actions[1], atomic_wrapper[1]) self.assertEqual(1, atomic_wrapper.get("action_1")) self.assertIsNone(atomic_wrapper.get("action_3")) self.assertEqual(2, len(atomic_wrapper)) self.assertEqual(atomic_actions[0], six.next(iter(atomic_wrapper)))
def results(self, api, task_id=None): """Display raw task results. This will produce a lot of output data about every iteration. :param task_id: Task uuid """ task = api.task.get(task_id) finished_statuses = (consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED) if task["status"] not in finished_statuses: print( _("Task status is %s. Results available when it is one " "of %s.") % (task["status"], ", ".join(finished_statuses))) return 1 # TODO(chenhb): Ensure `rally task results` puts out old format. for result in task["results"]: for itr in result["data"]["raw"]: if "atomic_actions" in itr: itr["atomic_actions"] = collections.OrderedDict( tutils.WrapperForAtomicActions( itr["atomic_actions"]).items()) results = [{ "key": x["key"], "result": x["data"]["raw"], "sla": x["data"]["sla"], "hooks": x["data"].get("hooks", []), "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"], "created_at": x["created_at"] } for x in task["results"]] print(json.dumps(results, sort_keys=False, indent=4))
def _test_atomic_action_timer(self, atomic_actions, name): atomic_wrapper = tutils.WrapperForAtomicActions(atomic_actions) action_duration = atomic_wrapper.get(name) if action_duration is None: self.fail("The duration of atomic action '%s' should not be None. " "None duration means that it had not called before the " "check is executed." % name) self.assertIsInstance(action_duration, float)
def test__convert_new_atomic_actions(self): atomic_actions = collections.OrderedDict( [("action_1", 1), ("action_2", 2)]) atomic_wrapper = utils.WrapperForAtomicActions(atomic_actions) self.assertEqual( [{"name": "action_1", "started_at": 0, "finished_at": 1, "children": []}, {"name": "action_2", "started_at": 1, "finished_at": 3, "children": []}], atomic_wrapper._convert_old_atomic_actions(atomic_actions))
def add_iteration(self, iteration): """Process the result of a single iteration. The call to add_iteration() will return True if all the SLA checks passed, and False otherwise. :param iteration: iteration result object """ if isinstance(iteration, dict): atomic_actions = iteration.get("atomic_actions", None) iteration["atomic_actions"] = utils.WrapperForAtomicActions( atomic_actions) return all([sla.add_iteration(iteration) for sla in self.sla_criteria])
def results(self, api, task_id=None): """Display raw task results. This will produce a lot of output data about every iteration. :param task_id: Task uuid """ task = api.task.get(task_id=task_id, detailed=True) finished_statuses = (consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED) if task["status"] not in finished_statuses: print(_("Task status is %s. Results available when it is one " "of %s.") % (task["status"], ", ".join(finished_statuses))) return 1 # TODO(chenhb): Ensure `rally task results` puts out old format. for workload in itertools.chain( *[s["workloads"] for s in task["subtasks"]]): for itr in workload["data"]: itr["atomic_actions"] = collections.OrderedDict( tutils.WrapperForAtomicActions( itr["atomic_actions"]).items() ) results = [ { "key": { "name": w["name"], "description": w["description"], "pos": w["position"], "kw": { "args": w["args"], "runner": w["runner"], "context": w["context"], "sla": w["sla"], "hooks": [r["config"] for r in w["hooks"]], } }, "result": w["data"], "sla": w["sla_results"].get("sla", []), "hooks": w["hooks"], "load_duration": w["load_duration"], "full_duration": w["full_duration"], "created_at": w["created_at"]} for w in itertools.chain( *[s["workloads"] for s in task["subtasks"]])] print(json.dumps(results, sort_keys=False, indent=4))
def test_dict_atomic(self): atomic_actions = collections.OrderedDict( [("action_1", 1), ("action_2", 2)]) atomic_wrapper = utils.WrapperForAtomicActions(atomic_actions) self.assertEqual(1, atomic_wrapper["action_1"]) self.assertEqual(2, atomic_wrapper["action_2"]) self.assertEqual(atomic_actions.items(), atomic_wrapper.items()) self.assertEqual(1, atomic_wrapper.get("action_1")) self.assertIsNone(atomic_wrapper.get("action_3")) self.assertEqual(2, len(atomic_wrapper)) self.assertEqual([{"name": "action_1", "started_at": 0, "finished_at": 1, "children": []}, {"name": "action_2", "started_at": 0, "finished_at": 2, "children": []} ], atomic_wrapper)
def _load_task_results_file(self, api, task_id): """Load the json file which is created by `rally task results` """ with open(os.path.expanduser(task_id), "r") as inp_js: tasks_results = json.load(inp_js) for result in tasks_results: try: jsonschema.validate(result, api.task.TASK_RESULT_SCHEMA) # TODO(chenhb): back compatible for atomic_actions for r in result["result"]: r["atomic_actions"] = list( tutils.WrapperForAtomicActions( r["atomic_actions"], r["timestamp"])) except jsonschema.ValidationError as e: raise FailedToLoadResults(source=task_id, msg=six.text_type(e)) return tasks_results
def _load_task_results_file(self, api, task_id): """Load the json file which is created by `rally task results`""" with open(os.path.expanduser(task_id)) as inp_js: tasks_results = json.loads(inp_js.read()) if isinstance(tasks_results, list): # it is an old format: task = { "version": 2, "title": "Task loaded from a file.", "description": "Auto-ported from task format V1.", "uuid": "n/a", "env_name": "n/a", "env_uuid": "n/a", "tags": [], "status": consts.TaskStatus.FINISHED, "subtasks": [] } start_time = None for result in tasks_results: try: jsonschema.validate(result, OLD_TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: raise FailedToLoadResults(source=task_id, msg=six.text_type(e)) iter_count = 0 failed_iter_count = 0 min_duration = None max_duration = None for itr in result["result"]: if start_time is None or itr["timestamp"] < start_time: start_time = itr["timestamp"] # NOTE(chenhb): back compatible for atomic_actions itr["atomic_actions"] = list( tutils.WrapperForAtomicActions(itr["atomic_actions"], itr["timestamp"])) iter_count += 1 if itr.get("error"): failed_iter_count += 1 duration = itr.get("duration", 0) if max_duration is None or duration > max_duration: max_duration = duration if min_duration is None or min_duration > duration: min_duration = duration durations_stat = charts.MainStatsTable( {"total_iteration_count": iter_count}) for itr in result["result"]: durations_stat.add_iteration(itr) created_at = dt.datetime.strptime(result["created_at"], "%Y-%d-%mT%H:%M:%S") updated_at = created_at + dt.timedelta( seconds=result["full_duration"]) created_at = created_at.strftime(consts.TimeFormat.ISO8601) updated_at = updated_at.strftime(consts.TimeFormat.ISO8601) pass_sla = all(s.get("success") for s in result["sla"]) runner_type = result["key"]["kw"]["runner"].pop("type") for h in result["hooks"]: trigger = h["config"]["trigger"] h["config"] = { "description": h["config"].get("description"), "action": (h["config"]["name"], h["config"]["args"]), "trigger": (trigger["name"], trigger["args"]) } workload = { "uuid": "n/a", "name": result["key"]["name"], "position": result["key"]["pos"], "description": result["key"].get("description", ""), "full_duration": result["full_duration"], "load_duration": result["load_duration"], "total_iteration_count": iter_count, "failed_iteration_count": failed_iter_count, "min_duration": min_duration, "max_duration": max_duration, "start_time": start_time, "created_at": created_at, "updated_at": updated_at, "args": result["key"]["kw"]["args"], "runner_type": runner_type, "runner": result["key"]["kw"]["runner"], "hooks": result["hooks"], "sla": result["key"]["kw"]["sla"], "sla_results": { "sla": result["sla"] }, "pass_sla": pass_sla, "contexts": result["key"]["kw"]["context"], "contexts_results": [], "data": sorted(result["result"], key=lambda x: x["timestamp"]), "statistics": { "durations": durations_stat.to_dict() }, } task["subtasks"].append({ "title": "A SubTask", "description": "", "workloads": [workload] }) return [task] elif isinstance(tasks_results, dict) and "tasks" in tasks_results: for task_result in tasks_results["tasks"]: try: jsonschema.validate(task_result, api.task.TASK_SCHEMA) except jsonschema.ValidationError as e: msg = six.text_type(e) raise exceptions.RallyException( "ERROR: Invalid task result format\n\n\t%s" % msg) task_result.setdefault("env_name", "n/a") task_result.setdefault("env_uuid", "n/a") for subtask in task_result["subtasks"]: for workload in subtask["workloads"]: workload.setdefault("contexts_results", []) workload["runner_type"], workload["runner"] = list( workload["runner"].items())[0] workload["name"], workload["args"] = list( workload.pop("scenario").items())[0] return tasks_results["tasks"] else: raise FailedToLoadResults(source=task_id, msg="Wrong format")
def results(self, api, task_id=None): """Display raw task results. This will produce a lot of output data about every iteration. """ task = api.task.get(task_id=task_id, detailed=True) finished_statuses = (consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED) if task["status"] not in finished_statuses: print( "Task status is %s. Results available when it is one of %s." % (task["status"], ", ".join(finished_statuses))) return 1 # TODO(chenhb): Ensure `rally task results` puts out old format. for workload in itertools.chain( *[s["workloads"] for s in task["subtasks"]]): for itr in workload["data"]: itr["atomic_actions"] = collections.OrderedDict( tutils.WrapperForAtomicActions( itr["atomic_actions"]).items()) results = [] for w in itertools.chain(*[s["workloads"] for s in task["subtasks"]]): w["runner"]["type"] = w["runner_type"] def port_hook_cfg(h): h["config"] = { "name": h["config"]["action"][0], "args": h["config"]["action"][1], "description": h["config"].get("description", ""), "trigger": { "name": h["config"]["trigger"][0], "args": h["config"]["trigger"][1] } } return h hooks = [port_hook_cfg(h) for h in w["hooks"]] created_at = dt.datetime.strptime(w["created_at"], "%Y-%m-%dT%H:%M:%S") created_at = created_at.strftime("%Y-%d-%mT%H:%M:%S") results.append({ "key": { "name": w["name"], "description": w["description"], "pos": w["position"], "kw": { "args": w["args"], "runner": w["runner"], "context": w["contexts"], "sla": w["sla"], "hooks": [h["config"] for h in w["hooks"]], } }, "result": w["data"], "sla": w["sla_results"].get("sla", []), "hooks": hooks, "load_duration": w["load_duration"], "full_duration": w["full_duration"], "created_at": created_at }) print(json.dumps(results, sort_keys=False, indent=4))
def test_convert_new_atomic_actions(self, atomic_actions, expected): atomic_wrapper = utils.WrapperForAtomicActions(atomic_actions) self.assertEqual( expected, atomic_wrapper._convert_new_atomic_actions(atomic_actions))
def _load_task_results_file(self, api, task_id): """Load the json file which is created by `rally task results` """ with open(os.path.expanduser(task_id)) as inp_js: tasks_results = yaml.safe_load(inp_js) if type(tasks_results) == list: # it is an old format: task = {"subtasks": []} start_time = float("inf") for result in tasks_results: try: jsonschema.validate(result, api.task.TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: raise FailedToLoadResults(source=task_id, msg=six.text_type(e)) iter_count = 0 failed_iter_count = 0 min_duration = float("inf") max_duration = 0 atomics = collections.OrderedDict() for itr in result["result"]: if itr["timestamp"] < start_time: start_time = itr["timestamp"] # NOTE(chenhb): back compatible for atomic_actions itr["atomic_actions"] = list( tutils.WrapperForAtomicActions(itr["atomic_actions"], itr["timestamp"])) iter_count += 1 if itr.get("error"): failed_iter_count += 1 duration = itr.get("duration", 0) if duration > max_duration: max_duration = duration if min_duration and min_duration > duration: min_duration = duration merged_atomic = atomic.merge_atomic(itr["atomic_actions"]) for key, value in merged_atomic.items(): duration = value["duration"] count = value["count"] if key not in atomics or count > atomics[key]["count"]: atomics[key] = { "min_duration": duration, "max_duration": duration, "count": count } elif count == atomics[key]["count"]: if duration < atomics[key]["min_duration"]: atomics[key]["min_duration"] = duration if duration > atomics[key]["max_duration"]: atomics[key]["max_duration"] = duration durations_stat = charts.MainStatsTable({ "total_iteration_count": iter_count, "statistics": { "atomics": atomics } }) for itr in result["result"]: durations_stat.add_iteration(itr) updated_at = dt.datetime.strptime(result["created_at"], "%Y-%m-%dT%H:%M:%S") updated_at += dt.timedelta(seconds=result["full_duration"]) updated_at = updated_at.strftime(consts.TimeFormat.ISO8601) pass_sla = all(s.get("success") for s in result["sla"]) workload = { "name": result["key"]["name"], "position": result["key"]["pos"], "description": result["key"].get("description", ""), "full_duration": result["full_duration"], "load_duration": result["load_duration"], "total_iteration_count": iter_count, "failed_iteration_count": failed_iter_count, "min_duration": min_duration, "max_duration": max_duration, "start_time": start_time, "created_at": result["created_at"], "updated_at": updated_at, "args": result["key"]["kw"]["args"], "runner": result["key"]["kw"]["runner"], "hooks": [{ "config": h } for h in result["key"]["kw"]["hooks"]], "sla": result["key"]["kw"]["sla"], "sla_results": { "sla": result["sla"] }, "pass_sla": pass_sla, "context": result["key"]["kw"]["context"], "data": sorted(result["result"], key=lambda x: x["timestamp"]), "statistics": { "durations": durations_stat.to_dict(), "atomics": atomics }, } task["subtasks"].append({"workloads": [workload]}) return task else: raise FailedToLoadResults(source=task_id, msg="Wrong format")
def _load_task_results_file(self, api, task_id): """Load the json file which is created by `rally task results` """ with open(os.path.expanduser(task_id)) as inp_js: tasks_results = yaml.safe_load(inp_js) if type(tasks_results) == list: # it is an old format: task = { "version": 2, "title": "Task loaded from a file.", "description": "Auto-ported from task format V1.", "uuid": "n/a", "tags": [], "subtasks": [] } start_time = None for result in tasks_results: try: jsonschema.validate(result, OLD_TASK_RESULT_SCHEMA) except jsonschema.ValidationError as e: raise FailedToLoadResults(source=task_id, msg=six.text_type(e)) iter_count = 0 failed_iter_count = 0 min_duration = None max_duration = None for itr in result["result"]: if start_time is None or itr["timestamp"] < start_time: start_time = itr["timestamp"] # NOTE(chenhb): back compatible for atomic_actions itr["atomic_actions"] = list( tutils.WrapperForAtomicActions(itr["atomic_actions"], itr["timestamp"])) iter_count += 1 if itr.get("error"): failed_iter_count += 1 duration = itr.get("duration", 0) if max_duration is None or duration > max_duration: max_duration = duration if min_duration is None or min_duration > duration: min_duration = duration durations_stat = charts.MainStatsTable( {"total_iteration_count": iter_count}) for itr in result["result"]: durations_stat.add_iteration(itr) updated_at = dt.datetime.strptime(result["created_at"], "%Y-%m-%dT%H:%M:%S") updated_at += dt.timedelta(seconds=result["full_duration"]) updated_at = updated_at.strftime(consts.TimeFormat.ISO8601) pass_sla = all(s.get("success") for s in result["sla"]) workload = { "uuid": "n/a", "name": result["key"]["name"], "position": result["key"]["pos"], "description": result["key"].get("description", ""), "full_duration": result["full_duration"], "load_duration": result["load_duration"], "total_iteration_count": iter_count, "failed_iteration_count": failed_iter_count, "min_duration": min_duration, "max_duration": max_duration, "start_time": start_time, "created_at": result["created_at"], "updated_at": updated_at, "args": result["key"]["kw"]["args"], "runner": result["key"]["kw"]["runner"], "hooks": [{ "config": h } for h in result["key"]["kw"]["hooks"]], "sla": result["key"]["kw"]["sla"], "sla_results": { "sla": result["sla"] }, "pass_sla": pass_sla, "context": result["key"]["kw"]["context"], "data": sorted(result["result"], key=lambda x: x["timestamp"]), "statistics": { "durations": durations_stat.to_dict() }, } task["subtasks"].append({ "title": "A SubTask", "description": "", "workloads": [workload] }) return task else: raise FailedToLoadResults(source=task_id, msg="Wrong format")
def _test_atomic_action_timer(self, atomic_actions, name): atomic_wrapper = tutils.WrapperForAtomicActions(atomic_actions) action_duration = atomic_wrapper.get(name) self.assertIsNotNone(action_duration) self.assertIsInstance(action_duration, float)