def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % strutils.format_float_to_str( load_duration)) LOG.info("Full runner duration is: %s" % strutils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is: %s" % strutils.format_float_to_str( self.finish - self.start)) results = {} if self.workload_cfg["hooks"]: self.event_thread.join() results["hooks_results"] = self.hook_executor.results() if self.results: # NOTE(boris-42): Sort in order of starting # instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) self.workload.add_workload_data(self.workload_data_count, {"raw": self.results}) start_time = (self.load_started_at if self.load_started_at != float("inf") else None) self.workload.set_results(load_duration=load_duration, full_duration=(self.finish - self.start), sla_results=self.sla_checker.results(), start_time=start_time, contexts_results=self._cm.contexts_results(), **results)
def _detailed(self, api, task_id=None, iterations_data=False, filters=None): """Print detailed information about given task.""" scenarios_filter = [] only_sla_failures = False for filter in filters or []: if filter.startswith("scenario="): filter_value = filter.split("=")[1] scenarios_filter = filter_value.split(",") if filter == "sla-failures": only_sla_failures = True task = api.task.get(task_id=task_id, detailed=True) print() print("-" * 80) print("Task %(task_id)s: %(status)s" % { "task_id": task_id, "status": task["status"] }) if task["status"] == consts.TaskStatus.CRASHED or task["status"] == ( consts.TaskStatus.VALIDATION_FAILED): print("-" * 80) validation = task["validation_result"] if logging.is_debug(): print(yaml.safe_load(validation["trace"])) else: print(validation["etype"]) print(validation["msg"]) print("\nFor more details run:\nrally -d task detailed %s" % task["uuid"]) return 0 elif task["status"] not in [ consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED ]: print("-" * 80) print("\nThe task %s marked as '%s'. Results " "available when it is '%s'." % (task_id, task["status"], consts.TaskStatus.FINISHED)) return 0 for workload in itertools.chain( *[s["workloads"] for s in task["subtasks"]]): if scenarios_filter and workload["name"] not in scenarios_filter: continue if only_sla_failures and workload["pass_sla"]: continue print("-" * 80) print() print("test scenario %s" % workload["name"]) print("args position %s" % workload["position"]) print("args values:") print( json.dumps( { "args": workload["args"], "runner": workload["runner"], "contexts": workload["contexts"], "sla": workload["sla"], "hooks": [r["config"] for r in workload["hooks"]] }, indent=2)) print() duration_stats = workload["statistics"]["durations"] iterations = [] iterations_headers = ["iteration", "duration"] iterations_actions = [] output = [] task_errors = [] if iterations_data: atomic_names = [ a["display_name"] for a in duration_stats["atomics"] ] for i, atomic_name in enumerate(atomic_names, 1): action = "%i. %s" % (i, atomic_name) iterations_headers.append(action) iterations_actions.append((atomic_name, action)) for idx, itr in enumerate(workload["data"], 1): if iterations_data: row = {"iteration": idx, "duration": itr["duration"]} for name, action in iterations_actions: atomic_actions = atomic.merge_atomic_actions( itr["atomic_actions"]) row[action] = atomic_actions.get(name, {}).get( "duration", 0) iterations.append(row) if "output" in itr: iteration_output = itr["output"] else: iteration_output = {"additive": [], "complete": []} for idx, additive in enumerate(iteration_output["additive"]): if len(output) <= idx + 1: output_table = charts.OutputStatsTable( workload, title=additive["title"]) output.append(output_table) output[idx].add_iteration(additive["data"]) if itr.get("error"): task_errors.append(TaskCommands._format_task_error(itr)) self._print_task_errors(task_id, task_errors) cols = charts.MainStatsTable.columns formatters = { "Action": lambda x: x["display_name"], "Min (sec)": lambda x: x["data"]["min"], "Median (sec)": lambda x: x["data"]["median"], "90%ile (sec)": lambda x: x["data"]["90%ile"], "95%ile (sec)": lambda x: x["data"]["95%ile"], "Max (sec)": lambda x: x["data"]["max"], "Avg (sec)": lambda x: x["data"]["avg"], "Success": lambda x: x["data"]["success"], "Count": lambda x: x["data"]["iteration_count"] } rows = [] def make_flat(r, depth=0): if depth > 0: r["display_name"] = (" %s> %s" % ("-" * depth, r["display_name"])) rows.append(r) for children in r["children"]: make_flat(children, depth + 1) for row in itertools.chain(duration_stats["atomics"], [duration_stats["total"]]): make_flat(row) cliutils.print_list(rows, fields=cols, formatters=formatters, normalize_field_names=True, table_label="Response Times (sec)", sortby_index=None) print() if iterations_data: formatters = dict( zip(iterations_headers[1:], [ cliutils.pretty_float_formatter(col, 3) for col in iterations_headers[1:] ])) cliutils.print_list(iterations, fields=iterations_headers, table_label="Atomics per iteration", formatters=formatters) print() if output: cols = charts.OutputStatsTable.columns float_cols = cols[1:7] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) for out in output: data = out.render() rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] if rows: # NOTE(amaretskiy): print title explicitly because # prettytable fails if title length is too long print(data["title"]) cliutils.print_list(rows, fields=cols, formatters=formatters) print() print("Load duration: %s" % strutils.format_float_to_str(workload["load_duration"])) print("Full duration: %s" % strutils.format_float_to_str(workload["full_duration"])) print("\nHINTS:") print("* To plot HTML graphics with this data, run:") print("\trally task report %s --out output.html\n" % task["uuid"]) print("* To generate a JUnit report, run:") print("\trally task export %s --type junit --to output.xml\n" % task["uuid"]) print("* To get raw JSON output of task results, run:") print("\trally task report %s --json --out output.json\n" % task["uuid"]) if not task["pass_sla"]: print("At least one workload did not pass SLA criteria.\n") return 1
def test_format_float_to_str(self, num_float, num_str): self.assertEqual(num_str, strutils.format_float_to_str(num_float))
def details(self): res = strutils.format_float_to_str(self.degradation.result() or 0.0) return "Current degradation: %s%% - %s" % (res, self.status())
def _detailed(self, api, task_id=None, iterations_data=False): """Print detailed information about given task.""" task = api.task.get(task_id=task_id, detailed=True) print() print("-" * 80) print("Task %(task_id)s: %(status)s" % {"task_id": task_id, "status": task["status"]}) if task["status"] == consts.TaskStatus.CRASHED or task["status"] == ( consts.TaskStatus.VALIDATION_FAILED): print("-" * 80) validation = task["validation_result"] if logging.is_debug(): print(yaml.safe_load(validation["trace"])) else: print(validation["etype"]) print(validation["msg"]) print("\nFor more details run:\nrally -d task detailed %s" % task["uuid"]) return 0 elif task["status"] not in [consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED]: print("-" * 80) print("\nThe task %s marked as '%s'. Results " "available when it is '%s'." % (task_id, task["status"], consts.TaskStatus.FINISHED)) return 0 for workload in itertools.chain( *[s["workloads"] for s in task["subtasks"]]): print("-" * 80) print() print("test scenario %s" % workload["name"]) print("args position %s" % workload["position"]) print("args values:") print(json.dumps( {"args": workload["args"], "runner": workload["runner"], "contexts": workload["contexts"], "sla": workload["sla"], "hooks": [r["config"] for r in workload["hooks"]]}, indent=2)) print() duration_stats = workload["statistics"]["durations"] iterations = [] iterations_headers = ["iteration", "duration"] iterations_actions = [] output = [] task_errors = [] if iterations_data: atomic_names = [a["display_name"] for a in duration_stats["atomics"]] for i, atomic_name in enumerate(atomic_names, 1): action = "%i. %s" % (i, atomic_name) iterations_headers.append(action) iterations_actions.append((atomic_name, action)) for idx, itr in enumerate(workload["data"], 1): if iterations_data: row = {"iteration": idx, "duration": itr["duration"]} for name, action in iterations_actions: atomic_actions = atomic.merge_atomic_actions( itr["atomic_actions"]) row[action] = atomic_actions.get(name, {}).get( "duration", 0) iterations.append(row) if "output" in itr: iteration_output = itr["output"] else: iteration_output = {"additive": [], "complete": []} for idx, additive in enumerate(iteration_output["additive"]): if len(output) <= idx + 1: output_table = charts.OutputStatsTable( workload, title=additive["title"]) output.append(output_table) output[idx].add_iteration(additive["data"]) if itr.get("error"): task_errors.append(TaskCommands._format_task_error(itr)) self._print_task_errors(task_id, task_errors) cols = charts.MainStatsTable.columns formatters = { "Action": lambda x: x["display_name"], "Min (sec)": lambda x: x["data"]["min"], "Median (sec)": lambda x: x["data"]["median"], "90%ile (sec)": lambda x: x["data"]["90%ile"], "95%ile (sec)": lambda x: x["data"]["95%ile"], "Max (sec)": lambda x: x["data"]["max"], "Avg (sec)": lambda x: x["data"]["avg"], "Success": lambda x: x["data"]["success"], "Count": lambda x: x["data"]["iteration_count"] } rows = [] def make_flat(r, depth=0): if depth > 0: r["display_name"] = (" %s> %s" % ("-" * depth, r["display_name"])) rows.append(r) for children in r["children"]: make_flat(children, depth + 1) for row in itertools.chain(duration_stats["atomics"], [duration_stats["total"]]): make_flat(row) cliutils.print_list(rows, fields=cols, formatters=formatters, normalize_field_names=True, table_label="Response Times (sec)", sortby_index=None) print() if iterations_data: formatters = dict(zip(iterations_headers[1:], [cliutils.pretty_float_formatter(col, 3) for col in iterations_headers[1:]])) cliutils.print_list(iterations, fields=iterations_headers, table_label="Atomics per iteration", formatters=formatters) print() if output: cols = charts.OutputStatsTable.columns float_cols = cols[1:7] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) for out in output: data = out.render() rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] if rows: # NOTE(amaretskiy): print title explicitly because # prettytable fails if title length is too long print(data["title"]) cliutils.print_list(rows, fields=cols, formatters=formatters) print() print("Load duration: %s" % strutils.format_float_to_str(workload["load_duration"])) print("Full duration: %s" % strutils.format_float_to_str(workload["full_duration"])) print("\nHINTS:") print("* To plot HTML graphics with this data, run:") print("\trally task report %s --out output.html\n" % task["uuid"]) print("* To generate a JUnit report, run:") print("\trally task export %s --type junit --to output.xml\n" % task["uuid"]) print("* To get raw JSON output of task results, run:") print("\trally task report %s --json --out output.json\n" % task["uuid"]) if not task["pass_sla"]: print("At least one workload did not pass SLA criteria.\n") return 1
def format_float_to_str(num): """DEPRECATED. Use rally.utils.strutils.format_float_to_str instead.""" return strutils.format_float_to_str(num)