def test_task_get_detailed(self): validation_result = { "etype": "FooError", "msg": "foo message", "trace": "foo t/b", } task1 = self._create_task({ "validation_result": validation_result, "tag": "bar" }) key = { "name": "atata", "pos": 0, "kw": { "args": { "a": "A" }, "context": { "c": "C" }, "sla": { "s": "S" }, "runner": { "r": "R", "type": "T" } } } data = { "raw": [], "sla": [{ "s": "S", "success": True }, { "1": "2", "success": True }, { "a": "A", "success": True }], "load_duration": 13, "full_duration": 42, "hooks": [], } db.task_result_create(task1["uuid"], key, data) task1_full = db.task_get_detailed(task1["uuid"]) self.assertEqual(validation_result, json.loads(task1_full["verification_log"])) self.assertEqual("bar", task1_full["tag"]) results = task1_full["results"] self.assertEqual(1, len(results)) self.assertEqual(key, results[0]["key"]) self.assertEqual(data, results[0]["data"])
def test_task_get_detailed(self): validation_result = { "etype": "FooError", "msg": "foo message", "trace": "foo t/b", } task1 = self._create_task({"validation_result": validation_result, "tags": ["bar"]}) key = { "name": "atata", "description": "tatata", "pos": 0, "kw": { "args": {"a": "A"}, "context": {"c": "C"}, "sla": {"s": "S"}, "runner": {"r": "R", "type": "T"}, "hooks": [], } } data = { "sla": [ {"s": "S", "success": True}, {"1": "2", "success": True}, {"a": "A", "success": True} ], "load_duration": 13, "full_duration": 42, "hooks": [], } subtask = db.subtask_create(task1["uuid"], title="foo") workload = db.workload_create(task1["uuid"], subtask["uuid"], key) db.workload_data_create( task1["uuid"], workload["uuid"], 0, {"raw": []}) db.workload_set_results(workload["uuid"], data) task1_full = db.task_get_detailed(task1["uuid"]) self.assertEqual(validation_result, json.loads(task1_full["verification_log"])) self.assertEqual(["bar"], task1_full["tags"]) results = task1_full["results"] self.assertEqual(1, len(results)) self.assertEqual(key, results[0]["key"]) self.assertEqual({ "raw": [], "sla": [ {"s": "S", "success": True}, {"1": "2", "success": True}, {"a": "A", "success": True} ], "load_duration": 13, "full_duration": 42, "hooks": [], }, results[0]["data"])
def test_task_get_detailed(self): task1 = self._create_task() key = {"name": "atata"} data = {"a": "b", "c": "d"} db.task_result_create(task1["uuid"], key, data) task1_full = db.task_get_detailed(task1["uuid"]) results = task1_full["results"] self.assertEqual(len(results), 1) self.assertEqual(results[0]["key"], key) self.assertEqual(results[0]["data"], data)
def detailed(self, task_id=None, iterations_data=False): """Display results table. :param task_id: Task uuid :param iterations_data: print detailed results for each iteration Prints detailed information of task. """ def _print_iterations_data(raw_data): headers = ["iteration", "full duration"] float_cols = ["full duration"] atomic_actions = [] for row in raw_data: # find first non-error result to get atomic actions names if not row["error"] and "atomic_actions" in row: atomic_actions = row["atomic_actions"].keys() for row in raw_data: if row["atomic_actions"]: for (c, a) in enumerate(atomic_actions, 1): action = "%(no)i. %(action)s" % {"no": c, "action": a} headers.append(action) float_cols.append(action) break table_rows = [] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) for (c, r) in enumerate(raw_data, 1): dlist = [c] dlist.append(r["duration"]) if r["atomic_actions"]: for action in atomic_actions: dlist.append(r["atomic_actions"].get(action) or 0) table_rows.append(rutils.Struct(**dict(zip(headers, dlist)))) cliutils.print_list(table_rows, fields=headers, formatters=formatters) print() task = db.task_get_detailed(task_id) if task is None: print("The task %s can not be found" % task_id) return(1) print() print("-" * 80) print(_("Task %(task_id)s: %(status)s") % {"task_id": task_id, "status": task["status"]}) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if not logging.is_debug(): print(verification[0]) print(verification[1]) print() print(_("For more details run:\nrally -vd task detailed %s") % task["uuid"]) else: print(yaml.safe_load(verification[2])) return for result in task["results"]: key = result["key"] print("-" * 80) print() print("test scenario %s" % key["name"]) print("args position %s" % key["pos"]) print("args values:") print(json.dumps(key["kw"], indent=2)) raw = result["data"]["raw"] table_cols = ["action", "min", "median", "90%ile", "95%ile", "max", "avg", "success", "count"] float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) table_rows = [] actions_data = utils.get_atomic_actions_data(raw) for action in actions_data: durations = actions_data[action] if durations: data = [action, round(min(durations), 3), round(utils.median(durations), 3), round(utils.percentile(durations, 0.90), 3), round(utils.percentile(durations, 0.95), 3), round(max(durations), 3), round(utils.mean(durations), 3), "%.1f%%" % (len(durations) * 100.0 / len(raw)), len(raw)] else: data = [action, None, None, None, None, None, None, "0.0%", len(raw)] table_rows.append(rutils.Struct(**dict(zip(table_cols, data)))) cliutils.print_list(table_rows, fields=table_cols, formatters=formatters, table_label="Response Times (sec)", sortby_index=None) if iterations_data: _print_iterations_data(raw) print(_("Load duration: %s") % result["data"]["load_duration"]) print(_("Full duration: %s") % result["data"]["full_duration"]) # NOTE(hughsaunders): ssrs=scenario specific results ssrs = [] for result in raw: data = result["scenario_output"].get("data") if data: ssrs.append(data) if ssrs: keys = set() for ssr in ssrs: keys.update(ssr.keys()) headers = ["key", "min", "median", "90%ile", "95%ile", "max", "avg"] float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"] formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) table_rows = [] for key in keys: values = [float(ssr[key]) for ssr in ssrs if key in ssr] if values: row = [str(key), round(min(values), 3), round(utils.median(values), 3), round(utils.percentile(values, 0.90), 3), round(utils.percentile(values, 0.95), 3), round(max(values), 3), round(utils.mean(values), 3)] else: row = [str(key)] + ["n/a"] * 6 table_rows.append(rutils.Struct(**dict(zip(headers, row)))) print("\nScenario Specific Results\n") cliutils.print_list(table_rows, fields=headers, formatters=formatters, table_label="Response Times (sec)") for result in raw: errors = result["scenario_output"].get("errors") if errors: print(errors) print() print("HINTS:") print(_("* To plot HTML graphics with this data, run:")) print("\trally task report %s --out output.html" % task["uuid"]) print() print(_("* To generate a JUnit report, run:")) print("\trally task report %s --junit --out output.xml" % task["uuid"]) print() print(_("* To get raw JSON output of task results, run:")) print("\trally task results %s\n" % task["uuid"])
def detailed(self, task_id=None, iterations_data=False): """Display results table. :param task_id: Task uuid :param iterations_data: print detailed results for each iteration Prints detailed information of task. """ def _print_iterations_data(raw_data): headers = ["iteration", "full duration"] float_cols = ["full duration"] atomic_actions = [] for row in raw_data: # find first non-error result to get atomic actions names if not row["error"] and "atomic_actions" in row: atomic_actions = row["atomic_actions"].keys() for row in raw_data: if row["atomic_actions"]: for (c, a) in enumerate(atomic_actions, 1): action = "%(no)i. %(action)s" % {"no": c, "action": a} headers.append(action) float_cols.append(action) break table_rows = [] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) for (c, r) in enumerate(raw_data, 1): dlist = [c] dlist.append(r["duration"]) if r["atomic_actions"]: for action in atomic_actions: dlist.append(r["atomic_actions"].get(action) or 0) table_rows.append(rutils.Struct(**dict(zip(headers, dlist)))) cliutils.print_list(table_rows, fields=headers, formatters=formatters) print() task = db.task_get_detailed(task_id) if task is None: print("The task %s can not be found" % task_id) return (1) print() print("-" * 80) print( _("Task %(task_id)s: %(status)s") % { "task_id": task_id, "status": task["status"] }) if task["status"] == consts.TaskStatus.FAILED: print("-" * 80) verification = yaml.safe_load(task["verification_log"]) if not logging.is_debug(): print(verification[0]) print(verification[1]) print() print( _("For more details run:\nrally -vd task detailed %s") % task["uuid"]) else: print(yaml.safe_load(verification[2])) return for result in task["results"]: key = result["key"] print("-" * 80) print() print("test scenario %s" % key["name"]) print("args position %s" % key["pos"]) print("args values:") print(json.dumps(key["kw"], indent=2)) raw = result["data"]["raw"] table_cols = [ "action", "min", "median", "90%ile", "95%ile", "max", "avg", "success", "count" ] float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) table_rows = [] actions_data = utils.get_atomic_actions_data(raw) for action in actions_data: durations = actions_data[action] if durations: data = [ action, round(min(durations), 3), round(utils.median(durations), 3), round(utils.percentile(durations, 0.90), 3), round(utils.percentile(durations, 0.95), 3), round(max(durations), 3), round(utils.mean(durations), 3), "%.1f%%" % (len(durations) * 100.0 / len(raw)), len(raw) ] else: data = [ action, None, None, None, None, None, None, "0.0%", len(raw) ] table_rows.append(rutils.Struct(**dict(zip(table_cols, data)))) cliutils.print_list(table_rows, fields=table_cols, formatters=formatters, table_label="Response Times (sec)", sortby_index=None) if iterations_data: _print_iterations_data(raw) print(_("Load duration: %s") % result["data"]["load_duration"]) print(_("Full duration: %s") % result["data"]["full_duration"]) # NOTE(hughsaunders): ssrs=scenario specific results ssrs = [] for result in raw: data = result["scenario_output"].get("data") if data: ssrs.append(data) if ssrs: keys = set() for ssr in ssrs: keys.update(ssr.keys()) headers = [ "key", "min", "median", "90%ile", "95%ile", "max", "avg" ] float_cols = [ "min", "median", "90%ile", "95%ile", "max", "avg" ] formatters = dict( zip(float_cols, [ cliutils.pretty_float_formatter(col, 3) for col in float_cols ])) table_rows = [] for key in keys: values = [float(ssr[key]) for ssr in ssrs if key in ssr] if values: row = [ str(key), round(min(values), 3), round(utils.median(values), 3), round(utils.percentile(values, 0.90), 3), round(utils.percentile(values, 0.95), 3), round(max(values), 3), round(utils.mean(values), 3) ] else: row = [str(key)] + ["n/a"] * 6 table_rows.append(rutils.Struct(**dict(zip(headers, row)))) print("\nScenario Specific Results\n") cliutils.print_list(table_rows, fields=headers, formatters=formatters, table_label="Response Times (sec)") for result in raw: errors = result["scenario_output"].get("errors") if errors: print(errors) print() print("HINTS:") print(_("* To plot HTML graphics with this data, run:")) print("\trally task report %s --out output.html" % task["uuid"]) print() print(_("* To generate a JUnit report, run:")) print("\trally task report %s --junit --out output.xml" % task["uuid"]) print() print(_("* To get raw JSON output of task results, run:")) print("\trally task results %s\n" % task["uuid"])
def detailed(self, uuid): return db.task_get_detailed(uuid)