def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() self.event_thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() # NOTE(boris-42): Sort in order of starting instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % utils.format_float_to_str( load_duration)) LOG.info("Full runner duration is: %s" % utils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is %s" % utils.format_float_to_str( self.finish - self.start)) self.task.append_results(self.key, { "raw": self.results, "load_duration": load_duration, "full_duration": self.finish - self.start, "sla": self.sla_checker.results(), "hooks": self.hook_executor.results(), })
def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() # NOTE(boris-42): Sort in order of starting instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % utils.format_float_to_str(load_duration)) LOG.info("Full runner duration is: %s" % utils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is %s" % utils.format_float_to_str(self.finish - self.start)) self.task.append_results( self.key, { "raw": self.results, "load_duration": load_duration, "full_duration": self.finish - self.start, "sla": self.sla_checker.results() })
def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % utils.format_float_to_str(load_duration)) LOG.info("Full runner duration is: %s" % utils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is: %s" % utils.format_float_to_str(self.finish - self.start)) results = {} if self.workload_cfg["hooks"]: self.event_thread.join() results["hooks_results"] = self.hook_executor.results() if self.results: # NOTE(boris-42): Sort in order of starting # instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) self.workload.add_workload_data(self.workload_data_count, {"raw": self.results}) start_time = (self.load_started_at if self.load_started_at != float("inf") else None) self.workload.set_results(load_duration=load_duration, full_duration=(self.finish - self.start), sla_results=self.sla_checker.results(), start_time=start_time, contexts_results=self._cm.contexts_results(), **results)
def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() # NOTE(boris-42): Sort in order of starting instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % utils.format_float_to_str( load_duration)) LOG.info("Full runner duration is: %s" % utils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is %s" % utils.format_float_to_str( self.finish - self.start)) results = { "load_duration": load_duration, "full_duration": self.finish - self.start, "sla": self.sla_checker.results(), } self.runner.send_event(type="load_finished", value=results) if "hooks" in self.key["kw"]: self.event_thread.join() results["hooks"] = self.hook_executor.results() set_trace('stop_trace', self.task["uuid"]) write_trace_to_file(self.task["uuid"]) self.workload.add_workload_data({"raw": self.results}) self.workload.set_results(results)
def detailed(self, api, task_id=None, iterations_data=False): """Print detailed information about given task. :param task_id: str, task uuid :param iterations_data: bool, include results for each iteration """ task = api.task.get_detailed(task_id, extended_results=True) if not task: print("The task %s can not be found" % task_id) return 1 print() print("-" * 80) print( _("Task %(task_id)s: %(status)s") % { "task_id": task_id, "status": task["status"] }) if task["status"] == consts.TaskStatus.CRASHED or task["status"] == ( consts.TaskStatus.VALIDATION_FAILED): print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if logging.is_debug(): print(yaml.safe_load(verification["trace"])) else: print(verification["etype"]) print(verification["msg"]) print( _("\nFor more details run:\nrally -d task detailed %s") % task["uuid"]) return 0 elif task["status"] not in [ consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED ]: print("-" * 80) print( _("\nThe task %s marked as '%s'. Results " "available when it is '%s'.") % (task_id, task["status"], consts.TaskStatus.FINISHED)) return 0 for result in task["results"]: key = result["key"] print("-" * 80) print() print("test scenario %s" % key["name"]) print("args position %s" % key["pos"]) print("args values:") print(json.dumps(key["kw"], indent=2)) print() iterations = [] iterations_headers = ["iteration", "duration"] iterations_actions = [] output = [] task_errors = [] if iterations_data: for i, atomic_name in enumerate(result["info"]["atomic"], 1): action = "%i. %s" % (i, atomic_name) iterations_headers.append(action) iterations_actions.append((atomic_name, action)) for idx, itr in enumerate(result["iterations"], 1): if iterations_data: row = {"iteration": idx, "duration": itr["duration"]} for name, action in iterations_actions: row[action] = itr["atomic_actions"].get(name, 0) iterations.append(row) if "output" in itr: iteration_output = itr["output"] else: iteration_output = {"additive": [], "complete": []} # NOTE(amaretskiy): "scenario_output" is supported # for backward compatibility if ("scenario_output" in itr and itr["scenario_output"]["data"]): iteration_output["additive"].append({ "data": itr["scenario_output"]["data"].items(), "title": "Scenario output", "description": "", "chart_plugin": "StackedArea" }) for idx, additive in enumerate(iteration_output["additive"]): if len(output) <= idx + 1: output_table = plot.charts.OutputStatsTable( result["info"], title=additive["title"]) output.append(output_table) output[idx].add_iteration(additive["data"]) if itr.get("error"): task_errors.append(TaskCommands._format_task_error(itr)) self._print_task_errors(task_id, task_errors) cols = plot.charts.MainStatsTable.columns float_cols = result["info"]["stat"]["cols"][1:7] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) rows = [dict(zip(cols, r)) for r in result["info"]["stat"]["rows"]] cliutils.print_list(rows, fields=cols, formatters=formatters, table_label="Response Times (sec)", sortby_index=None) print() if iterations_data: formatters = dict( zip(iterations_headers[1:], [ cliutils.pretty_float_formatter(col, 3) for col in iterations_headers[1:] ])) cliutils.print_list(iterations, fields=iterations_headers, table_label="Atomics per iteration", formatters=formatters) print() if output: cols = plot.charts.OutputStatsTable.columns float_cols = cols[1:7] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) for out in output: data = out.render() rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] if rows: # NOTE(amaretskiy): print title explicitly because # prettytable fails if title length is too long print(data["title"]) cliutils.print_list(rows, fields=cols, formatters=formatters) print() print( _("Load duration: %s") % rutils.format_float_to_str(result["info"]["load_duration"])) print( _("Full duration: %s") % rutils.format_float_to_str(result["info"]["full_duration"])) print("\nHINTS:") print(_("* To plot HTML graphics with this data, run:")) print("\trally task report %s --out output.html\n" % task["uuid"]) print(_("* To generate a JUnit report, run:")) print("\trally task report %s --junit --out output.xml\n" % task["uuid"]) print(_("* To get raw JSON output of task results, run:")) print("\trally task results %s\n" % task["uuid"])
def details(self): return (_("Current degradation: %s%% - %s") % (utils.format_float_to_str(self.degradation.result() or 0.0), self.status()))
def test_format_float_to_str(self, num_float, num_str): self.assertEquals(num_str, utils.format_float_to_str(num_float))
def _detailed(self, api, task_id=None, iterations_data=False): """Print detailed information about given task.""" task = api.task.get(task_id=task_id, detailed=True) print() print("-" * 80) print("Task %(task_id)s: %(status)s" % { "task_id": task_id, "status": task["status"] }) if task["status"] == consts.TaskStatus.CRASHED or task["status"] == ( consts.TaskStatus.VALIDATION_FAILED): print("-" * 80) validation = task["validation_result"] if logging.is_debug(): print(yaml.safe_load(validation["trace"])) else: print(validation["etype"]) print(validation["msg"]) print("\nFor more details run:\nrally -d task detailed %s" % task["uuid"]) return 0 elif task["status"] not in [ consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED ]: print("-" * 80) print("\nThe task %s marked as '%s'. Results " "available when it is '%s'." % (task_id, task["status"], consts.TaskStatus.FINISHED)) return 0 for workload in itertools.chain( *[s["workloads"] for s in task["subtasks"]]): print("-" * 80) print() print("test scenario %s" % workload["name"]) print("args position %s" % workload["position"]) print("args values:") print( json.dumps( { "args": workload["args"], "runner": workload["runner"], "context": workload["context"], "sla": workload["sla"], "hooks": [r["config"] for r in workload["hooks"]] }, indent=2)) print() duration_stats = workload["statistics"]["durations"] iterations = [] iterations_headers = ["iteration", "duration"] iterations_actions = [] output = [] task_errors = [] if iterations_data: atomic_names = [ a["display_name"] for a in duration_stats["atomics"] ] for i, atomic_name in enumerate(atomic_names, 1): action = "%i. %s" % (i, atomic_name) iterations_headers.append(action) iterations_actions.append((atomic_name, action)) for idx, itr in enumerate(workload["data"], 1): if iterations_data: row = {"iteration": idx, "duration": itr["duration"]} for name, action in iterations_actions: atomic_actions = atomic.merge_atomic_actions( itr["atomic_actions"]) row[action] = atomic_actions.get(name, {}).get( "duration", 0) iterations.append(row) if "output" in itr: iteration_output = itr["output"] else: iteration_output = {"additive": [], "complete": []} for idx, additive in enumerate(iteration_output["additive"]): if len(output) <= idx + 1: output_table = charts.OutputStatsTable( workload, title=additive["title"]) output.append(output_table) output[idx].add_iteration(additive["data"]) if itr.get("error"): task_errors.append(TaskCommands._format_task_error(itr)) self._print_task_errors(task_id, task_errors) cols = charts.MainStatsTable.columns formatters = { "Action": lambda x: x["display_name"], "Min (sec)": lambda x: x["data"]["min"], "Median (sec)": lambda x: x["data"]["median"], "90%ile (sec)": lambda x: x["data"]["90%ile"], "95%ile (sec)": lambda x: x["data"]["95%ile"], "Max (sec)": lambda x: x["data"]["max"], "Avg (sec)": lambda x: x["data"]["avg"], "Success": lambda x: x["data"]["success"], "Count": lambda x: x["data"]["iteration_count"] } rows = [] def make_flat(r, depth=0): if depth > 0: r["display_name"] = (" %s> %s" % ("-" * depth, r["display_name"])) rows.append(r) for children in r["children"]: make_flat(children, depth + 1) for row in itertools.chain(duration_stats["atomics"], [duration_stats["total"]]): make_flat(row) cliutils.print_list(rows, fields=cols, formatters=formatters, normalize_field_names=True, table_label="Response Times (sec)", sortby_index=None) print() if iterations_data: formatters = dict( zip(iterations_headers[1:], [ cliutils.pretty_float_formatter(col, 3) for col in iterations_headers[1:] ])) cliutils.print_list(iterations, fields=iterations_headers, table_label="Atomics per iteration", formatters=formatters) print() if output: cols = charts.OutputStatsTable.columns float_cols = cols[1:7] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) for out in output: data = out.render() rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] if rows: # NOTE(amaretskiy): print title explicitly because # prettytable fails if title length is too long print(data["title"]) cliutils.print_list(rows, fields=cols, formatters=formatters) print() print("Load duration: %s" % rutils.format_float_to_str(workload["load_duration"])) print("Full duration: %s" % rutils.format_float_to_str(workload["full_duration"])) print("\nHINTS:") print("* To plot HTML graphics with this data, run:") print("\trally task report %s --out output.html\n" % task["uuid"]) print("* To generate a JUnit report, run:") print("\trally task export %s --type junit --to output.xml\n" % task["uuid"]) print("* To get raw JSON output of task results, run:") print("\trally task report %s --json --out output.json\n" % task["uuid"]) if not task["pass_sla"]: print("At least one workload did not pass SLA criteria.\n") return 1
def detailed(self, task_id=None, iterations_data=False): """Print detailed information about given task. :param task_id: str, task uuid :param iterations_data: bool, include results for each iteration """ task = api.Task.get_detailed(task_id, extended_results=True) if not task: print("The task %s can not be found" % task_id) return 1 print() print("-" * 80) print(_("Task %(task_id)s: %(status)s") % {"task_id": task_id, "status": task["status"]}) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if logging.is_debug(): print(yaml.safe_load(verification["trace"])) else: print(verification["etype"]) print(verification["msg"]) print(_("\nFor more details run:\nrally -d task detailed %s") % task["uuid"]) return 0 elif task["status"] not in [consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED]: print("-" * 80) print( _("\nThe task %s marked as '%s'. Results " "available when it is '%s'.") % (task_id, task["status"], consts.TaskStatus.FINISHED) ) return 0 for result in task["results"]: key = result["key"] print("-" * 80) print() print("test scenario %s" % key["name"]) print("args position %s" % key["pos"]) print("args values:") print(json.dumps(key["kw"], indent=2)) print() iterations = [] iterations_headers = ["iteration", "duration"] iterations_actions = [] output = [] task_errors = [] if iterations_data: for i, atomic_name in enumerate(result["info"]["atomic"], 1): action = "%i. %s" % (i, atomic_name) iterations_headers.append(action) iterations_actions.append((atomic_name, action)) for idx, itr in enumerate(result["iterations"], 1): if iterations_data: row = {"iteration": idx, "duration": itr["duration"]} for name, action in iterations_actions: row[action] = itr["atomic_actions"].get(name, 0) iterations.append(row) if "output" in itr: iteration_output = itr["output"] else: iteration_output = {"additive": [], "complete": []} # NOTE(amaretskiy): "scenario_output" is supported # for backward compatibility if "scenario_output" in itr and itr["scenario_output"]["data"]: iteration_output["additive"].append( { "data": itr["scenario_output"]["data"].items(), "title": "Scenario output", "description": "", "chart_plugin": "StackedArea", } ) for idx, additive in enumerate(iteration_output["additive"]): if len(output) <= idx + 1: output_table = plot.charts.OutputStatsTable(result["info"], title=additive["title"]) output.append(output_table) output[idx].add_iteration(additive["data"]) if itr.get("error"): task_errors.append(TaskCommands._format_task_error(itr)) self._print_task_errors(task_id, task_errors) cols = plot.charts.MainStatsTable.columns float_cols = result["info"]["stat"]["cols"][1:7] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) rows = [dict(zip(cols, r)) for r in result["info"]["stat"]["rows"]] cliutils.print_list( rows, fields=cols, formatters=formatters, table_label="Response Times (sec)", sortby_index=None ) print() if iterations_data: formatters = dict( zip( iterations_headers[1:], [cliutils.pretty_float_formatter(col, 3) for col in iterations_headers[1:]], ) ) cliutils.print_list( iterations, fields=iterations_headers, table_label="Atomics per iteration", formatters=formatters ) print() if output: cols = plot.charts.OutputStatsTable.columns float_cols = cols[1:7] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) for out in output: data = out.render() rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] if rows: # NOTE(amaretskiy): print title explicitly because # prettytable fails if title length is too long print(data["title"]) cliutils.print_list(rows, fields=cols, formatters=formatters) print() print(_("Load duration: %s") % rutils.format_float_to_str(result["info"]["load_duration"])) print(_("Full duration: %s") % rutils.format_float_to_str(result["info"]["full_duration"])) print("\nHINTS:") print(_("* To plot HTML graphics with this data, run:")) print("\trally task report %s --out output.html\n" % task["uuid"]) print(_("* To generate a JUnit report, run:")) print("\trally task report %s --junit --out output.xml\n" % task["uuid"]) print(_("* To get raw JSON output of task results, run:")) print("\trally task results %s\n" % task["uuid"])
def detailed(self, api, task_id=None, iterations_data=False): """Print detailed information about given task.""" task = api.task.get(task_id=task_id, detailed=True) if not task: print("The task %s can not be found" % task_id) return 1 print() print("-" * 80) print("Task %(task_id)s: %(status)s" % {"task_id": task_id, "status": task["status"]}) if task["status"] == consts.TaskStatus.CRASHED or task["status"] == ( consts.TaskStatus.VALIDATION_FAILED): print("-" * 80) validation = task["validation_result"] if logging.is_debug(): print(yaml.safe_load(validation["trace"])) else: print(validation["etype"]) print(validation["msg"]) print("\nFor more details run:\nrally -d task detailed %s" % task["uuid"]) return 0 elif task["status"] not in [consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED]: print("-" * 80) print("\nThe task %s marked as '%s'. Results " "available when it is '%s'." % (task_id, task["status"], consts.TaskStatus.FINISHED)) return 0 for workload in itertools.chain( *[s["workloads"] for s in task["subtasks"]]): print("-" * 80) print() print("test scenario %s" % workload["name"]) print("args position %s" % workload["position"]) print("args values:") print(json.dumps( {"args": workload["args"], "runner": workload["runner"], "context": workload["context"], "sla": workload["sla"], "hooks": [r["config"] for r in workload["hooks"]]}, indent=2)) print() duration_stats = workload["statistics"]["durations"] iterations = [] iterations_headers = ["iteration", "duration"] iterations_actions = [] output = [] task_errors = [] if iterations_data: atomic_names = [a["display_name"] for a in duration_stats["atomics"]] for i, atomic_name in enumerate(atomic_names, 1): action = "%i. %s" % (i, atomic_name) iterations_headers.append(action) iterations_actions.append((atomic_name, action)) for idx, itr in enumerate(workload["data"], 1): if iterations_data: row = {"iteration": idx, "duration": itr["duration"]} for name, action in iterations_actions: atomic_actions = atomic.merge_atomic_actions( itr["atomic_actions"]) row[action] = atomic_actions.get(name, {}).get( "duration", 0) iterations.append(row) if "output" in itr: iteration_output = itr["output"] else: iteration_output = {"additive": [], "complete": []} for idx, additive in enumerate(iteration_output["additive"]): if len(output) <= idx + 1: output_table = charts.OutputStatsTable( workload, title=additive["title"]) output.append(output_table) output[idx].add_iteration(additive["data"]) if itr.get("error"): task_errors.append(TaskCommands._format_task_error(itr)) self._print_task_errors(task_id, task_errors) cols = charts.MainStatsTable.columns formatters = { "Action": lambda x: x["display_name"], "Min (sec)": lambda x: x["data"]["min"], "Median (sec)": lambda x: x["data"]["median"], "90%ile (sec)": lambda x: x["data"]["90%ile"], "95%ile (sec)": lambda x: x["data"]["95%ile"], "Max (sec)": lambda x: x["data"]["max"], "Avg (sec)": lambda x: x["data"]["avg"], "Success": lambda x: x["data"]["success"], "Count": lambda x: x["data"]["iteration_count"] } rows = [] def make_flat(r, depth=0): if depth > 0: r["display_name"] = (" %s> %s" % ("-" * depth, r["display_name"])) rows.append(r) for children in r["children"]: make_flat(children, depth + 1) for row in itertools.chain(duration_stats["atomics"], [duration_stats["total"]]): make_flat(row) cliutils.print_list(rows, fields=cols, formatters=formatters, normalize_field_names=True, table_label="Response Times (sec)", sortby_index=None) print() if iterations_data: formatters = dict(zip(iterations_headers[1:], [cliutils.pretty_float_formatter(col, 3) for col in iterations_headers[1:]])) cliutils.print_list(iterations, fields=iterations_headers, table_label="Atomics per iteration", formatters=formatters) print() if output: cols = charts.OutputStatsTable.columns float_cols = cols[1:7] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) for out in output: data = out.render() rows = [dict(zip(cols, r)) for r in data["data"]["rows"]] if rows: # NOTE(amaretskiy): print title explicitly because # prettytable fails if title length is too long print(data["title"]) cliutils.print_list(rows, fields=cols, formatters=formatters) print() print("Load duration: %s" % rutils.format_float_to_str(workload["load_duration"])) print("Full duration: %s" % rutils.format_float_to_str(workload["full_duration"])) print("\nHINTS:") print("* To plot HTML graphics with this data, run:") print("\trally task report %s --out output.html\n" % task["uuid"]) print("* To generate a JUnit report, run:") print("\trally task export %s --type junit --to output.xml\n" % task["uuid"]) print("* To get raw JSON output of task results, run:") print("\trally task report %s --json --out output.json\n" % task["uuid"])