def test_add_result_once_and_get_data(self): trends = plot.Trends() trends.add_result(self._make_result(42, sla_success=False)) expected = [ {"actions": [{"durations": [("90%ile", [(123498789, 0.9)]), ("95%ile", [(123498789, 0.87)]), ("avg", [(123498789, 0.67)]), ("max", [(123498789, 1.25)]), ("median", [(123498789, 0.85)]), ("min", [(123498789, 0.7)])], "name": "a", "success": [("success", [(123498789, 100.0)])]}, {"durations": [("90%ile", [(123498789, 0.85)]), ("95%ile", [(123498789, 0.9)]), ("avg", [(123498789, 0.58)]), ("max", [(123498789, 1.1)]), ("median", [(123498789, 0.75)]), ("min", [(123498789, 0.5)])], "name": "b", "success": [("success", [(123498789, 100.0)])]}], "cls": "Scenario", "config": "\"kw_42\"", "durations": [("90%ile", [(123498789, 1.7)]), ("95%ile", [(123498789, 1.8)]), ("avg", [(123498789, 0.8)]), ("max", [(123498789, 1.5)]), ("median", [(123498789, 1.55)]), ("min", [(123498789, 1.2)])], "length": 1, "met": "name_42", "name": "Scenario.name_42", "sla_failures": 1, "stat": {"avg": 1.425, "max": 1.8, "min": 0.8}, "success": [("success", [(123498789, 100.0)])]}] self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_obtaining_workload_description(self): trends = plot.Trends() workload_1 = self._make_result(42) workload_1["name"] = "Dummy.dummy" workload_1["description"] = "foo!!!" trends.add_result("task_uuid", workload_1) data = trends.get_data() self.assertEqual(1, len(data)) cfg = json.loads(data[0]["config"]) self.assertEqual("foo!!!", cfg["subtasks"][0]["description"]) workload_2 = self._make_result(42) workload_2["name"] = "Dummy.dummy" workload_2["description"] = "bar!!!" trends.add_result("task_uuid", workload_2) data = trends.get_data() self.assertEqual(1, len(data)) cfg = json.loads(data[0]["config"]) self.assertEqual("Do nothing and sleep for the given number of " "seconds (0 by default).", cfg["subtasks"][0]["description"])
def test__make_hash(self, mock_hashlib): mock_hashlib.md5.return_value.hexdigest.return_value = "md5_digest" trends = plot.Trends() trends._to_str = mock.Mock() trends._to_str.return_value.encode.return_value = "foo_str" self.assertEqual("md5_digest", trends._make_hash("foo_obj")) trends._to_str.assert_called_once_with("foo_obj") trends._to_str.return_value.encode.assert_called_once_with("utf8") mock_hashlib.md5.assert_called_once_with("foo_str")
def test_add_result_with_na_and_get_data(self, mock_workload_to_task): mock_workload_to_task.return_value = "kw_42" trends = plot.Trends() trends.add_result( self._make_result(42, sla_success=False, with_na=True)) expected = [{ "actions": [{ "durations": [("90%ile", [(123498789, "n/a")]), ("95%ile", [(123498789, "n/a")]), ("avg", [(123498789, "n/a")]), ("max", [(123498789, "n/a")]), ("median", [(123498789, "n/a")]), ("min", [(123498789, "n/a")])], "name": "a", "success": [("success", [(123498789, 0)])] }, { "durations": [("90%ile", [(123498789, "n/a")]), ("95%ile", [(123498789, "n/a")]), ("avg", [(123498789, "n/a")]), ("max", [(123498789, "n/a")]), ("median", [(123498789, "n/a")]), ("min", [(123498789, "n/a")])], "name": "b", "success": [("success", [(123498789, 0)])] }], "cls": "Scenario", "config": "\"kw_42\"", "durations": [("90%ile", [(123498789, "n/a")]), ("95%ile", [(123498789, "n/a")]), ("avg", [(123498789, "n/a")]), ("max", [(123498789, "n/a")]), ("median", [(123498789, "n/a")]), ("min", [(123498789, "n/a")])], "length": 1, "met": "name_42", "name": "Scenario.name_42", "sla_failures": 1, "stat": { "avg": None, "max": None, "min": None }, "success": [("success", [(123498789, 0)])] }] self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_with_na_and_get_data(self): trends = plot.Trends() trends.add_result( self._make_result("foo", sla_success=False, with_na=True)) expected = [{ "atomic": [{ "name": "a", "success": [("success", [(1, 0)])], "values": [("90%ile", [(1, "n/a")]), ("95%ile", [(1, "n/a")]), ("avg", [(1, "n/a")]), ("max", [(1, "n/a")]), ("median", [(1, "n/a")]), ("min", [(1, "n/a")])] }, { "name": "b", "success": [("success", [(1, 0)])], "values": [("90%ile", [(1, "n/a")]), ("95%ile", [(1, "n/a")]), ("avg", [(1, "n/a")]), ("max", [(1, "n/a")]), ("median", [(1, "n/a")]), ("min", [(1, "n/a")])] }], "cls": "Scenario", "config": "\"foo_kw\"", "met": "name_foo", "name": "Scenario.name_foo", "seq": 1, "single": True, "sla_failures": 1, "stat": { "avg": None, "max": None, "min": None }, "total": { "success": [("success", [(1, 0)])], "values": [("90%ile", [(1, "n/a")]), ("95%ile", [(1, "n/a")]), ("avg", [(1, "n/a")]), ("max", [(1, "n/a")]), ("median", [(1, "n/a")]), ("min", [(1, "n/a")])] } }] self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_with_na_and_get_data(self, mock_workload_to_task, mock_dumps): mock_dumps.side_effect = lambda x, **j: x workload_cfg = {"description": "foo", "subtasks": [{"description": "descr"}]} mock_workload_to_task.return_value = workload_cfg trends = plot.Trends() trends.add_result( "task_uuid", self._make_result(42, sla_success=False, with_na=True)) actual = self._sort_trends(trends.get_data()) workload_cfg["description"] = "Task(s) with the workload: task_uuid" expected = [ {"actions": [{"durations": [("90%ile", [(123498789, "n/a")]), ("95%ile", [(123498789, "n/a")]), ("avg", [(123498789, "n/a")]), ("max", [(123498789, "n/a")]), ("median", [(123498789, "n/a")]), ("min", [(123498789, "n/a")])], "name": "a", "success": [("success", [(123498789, 0)])]}, {"durations": [("90%ile", [(123498789, "n/a")]), ("95%ile", [(123498789, "n/a")]), ("avg", [(123498789, "n/a")]), ("max", [(123498789, "n/a")]), ("median", [(123498789, "n/a")]), ("min", [(123498789, "n/a")])], "name": "b", "success": [("success", [(123498789, 0)])]}], "cls": "Scenario", "config": workload_cfg, "durations": [("90%ile", [(123498789, "n/a")]), ("95%ile", [(123498789, "n/a")]), ("avg", [(123498789, "n/a")]), ("max", [(123498789, "n/a")]), ("median", [(123498789, "n/a")]), ("min", [(123498789, "n/a")])], "length": 1, "met": "name_42", "name": "Scenario.name_42", "sla_failures": 1, "stat": {"avg": None, "max": None, "min": None}, "success": [("success", [(123498789, 0)])]}] self.assertEqual(expected, actual)
def test_add_result_once_and_get_data(self): trends = plot.Trends() trends.add_result(self._make_result("foo", sla_success=False)) expected = [{ "atomic": [{ "name": "a", "success": [("success", [(1, 100.0)])], "values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]), ("avg", [(1, 0.67)]), ("max", [(1, 1.25)]), ("median", [(1, 0.85)]), ("min", [(1, 0.7)])] }, { "name": "b", "success": [("success", [(1, 100.0)])], "values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]), ("avg", [(1, 0.58)]), ("max", [(1, 1.1)]), ("median", [(1, 0.75)]), ("min", [(1, 0.5)])] }], "cls": "Scenario", "config": "\"foo_kw\"", "met": "name_foo", "name": "Scenario.name_foo", "seq": 1, "single": True, "sla_failures": 1, "stat": { "avg": 1.6, "max": 1.5, "min": 1.2 }, "total": { "success": [("success", [(1, 100.0)])], "values": [("90%ile", [(1, 1.7)]), ("95%ile", [(1, 1.9)]), ("avg", [(1, 1.6)]), ("max", [(1, 1.5)]), ("median", [(1, 1.55)]), ("min", [(1, 1.2)])] } }] self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_get_data_no_results_added(self): trends = plot.Trends() self.assertEqual([], trends.get_data())
def test__to_str(self, args, result=None, raises=None): trends = plot.Trends() if raises: self.assertRaises(raises, trends._to_str, *args) else: self.assertEqual(result, trends._to_str(*args))
def test___init__(self): trends = plot.Trends() self.assertEqual({}, trends._tasks) self.assertRaises(TypeError, plot.Trends, 42)
def test_add_result_and_get_data(self): trends = plot.Trends() for i in 0, 1: trends.add_result(self._make_result(str(i))) expected = [{ "atomic": [{ "name": "a", "values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]), ("avg", [(1, 0.67)]), ("max", [(1, 1.25)]), ("median", [(1, 0.85)]), ("min", [(1, 0.7)])] }, { "name": "b", "values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]), ("avg", [(1, 0.58)]), ("max", [(1, 1.1)]), ("median", [(1, 0.75)]), ("min", [(1, 0.5)])] }], "cls": "Scenario", "config": "\"0_kw\"", "met": "name_0", "name": "Scenario.name_0", "seq": 1, "single": True, "sla_failures": 0, "stat": { "avg": 1.6, "max": 1.5, "min": 1.2 }, "total": [("90%ile", [(1, 1.7)]), ("95%ile", [(1, 1.9)]), ("avg", [(1, 1.6)]), ("max", [(1, 1.5)]), ("median", [(1, 1.55)]), ("min", [(1, 1.2)])] }, { "atomic": [{ "name": "a", "values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]), ("avg", [(1, 0.67)]), ("max", [(1, 1.25)]), ("median", [(1, 0.85)]), ("min", [(1, 0.7)])] }, { "name": "b", "values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]), ("avg", [(1, 0.58)]), ("max", [(1, 1.1)]), ("median", [(1, 0.75)]), ("min", [(1, 0.5)])] }], "cls": "Scenario", "config": "\"1_kw\"", "met": "name_1", "name": "Scenario.name_1", "seq": 1, "single": True, "sla_failures": 0, "stat": { "avg": 1.6, "max": 1.5, "min": 1.2 }, "total": [("90%ile", [(1, 1.7)]), ("95%ile", [(1, 1.9)]), ("avg", [(1, 1.6)]), ("max", [(1, 1.5)]), ("median", [(1, 1.55)]), ("min", [(1, 1.2)])] }] self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_and_get_data(self, mock_workload_to_task): mock_workload_to_task.side_effect = ("kw_0", "kw_1") trends = plot.Trends() for i in 0, 1: trends.add_result(self._make_result(i)) expected = [{ "actions": [{ "durations": [("90%ile", [(123456789, 0.9)]), ("95%ile", [(123456789, 0.87)]), ("avg", [(123456789, 0.67)]), ("max", [(123456789, 1.25)]), ("median", [(123456789, 0.85)]), ("min", [(123456789, 0.7)])], "name": "a", "success": [("success", [(123456789, 100.0)])] }, { "durations": [("90%ile", [(123456789, 0.85)]), ("95%ile", [(123456789, 0.9)]), ("avg", [(123456789, 0.58)]), ("max", [(123456789, 1.1)]), ("median", [(123456789, 0.75)]), ("min", [(123456789, 0.5)])], "name": "b", "success": [("success", [(123456789, 100.0)])] }], "cls": "Scenario", "config": "\"kw_0\"", "durations": [("90%ile", [(123456789, 1.7)]), ("95%ile", [(123456789, 1.8)]), ("avg", [(123456789, 0.8)]), ("max", [(123456789, 1.5)]), ("median", [(123456789, 1.55)]), ("min", [(123456789, 1.2)])], "length": 1, "met": "name_0", "name": "Scenario.name_0", "sla_failures": 0, "stat": { "avg": 1.425, "max": 1.8, "min": 0.8 }, "success": [("success", [(123456789, 100.0)])] }, { "actions": [{ "durations": [("90%ile", [(123457789, 0.9)]), ("95%ile", [(123457789, 0.87)]), ("avg", [(123457789, 0.67)]), ("max", [(123457789, 1.25)]), ("median", [(123457789, 0.85)]), ("min", [(123457789, 0.7)])], "name": "a", "success": [("success", [(123457789, 100.0)])] }, { "durations": [("90%ile", [(123457789, 0.85)]), ("95%ile", [(123457789, 0.9)]), ("avg", [(123457789, 0.58)]), ("max", [(123457789, 1.1)]), ("median", [(123457789, 0.75)]), ("min", [(123457789, 0.5)])], "name": "b", "success": [("success", [(123457789, 100.0)])] }], "cls": "Scenario", "config": "\"kw_1\"", "durations": [("90%ile", [(123457789, 1.7)]), ("95%ile", [(123457789, 1.8)]), ("avg", [(123457789, 0.8)]), ("max", [(123457789, 1.5)]), ("median", [(123457789, 1.55)]), ("min", [(123457789, 1.2)])], "length": 1, "met": "name_1", "name": "Scenario.name_1", "sla_failures": 0, "stat": { "avg": 1.425, "max": 1.8, "min": 0.8 }, "success": [("success", [(123457789, 100.0)])] }] self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_and_get_data(self, mock_workload_to_task, mock_dumps): mock_dumps.side_effect = lambda x, **j: x workload_cfg = [ { "description": "foo", "name": "Name1", "subtasks": [{"description": "descr"}]}, { "description": "foo", "name": "Name2", "subtasks": [{"description": "descr"}]}] mock_workload_to_task.side_effect = workload_cfg trends = plot.Trends() for i in 0, 1: trends.add_result( "task_uuid_%s" % i, self._make_result(i)) actual = self._sort_trends(trends.get_data()) workload_cfg[0]["description"] = ( "Task(s) with the workload: task_uuid_1") workload_cfg[1]["description"] = ( "Task(s) with the workload: task_uuid_2") expected = [ {"actions": [{"durations": [("90%ile", [(123456789, 0.9)]), ("95%ile", [(123456789, 0.87)]), ("avg", [(123456789, 0.67)]), ("max", [(123456789, 1.25)]), ("median", [(123456789, 0.85)]), ("min", [(123456789, 0.7)])], "name": "a", "success": [("success", [(123456789, 100.0)])]}, {"durations": [("90%ile", [(123456789, 0.85)]), ("95%ile", [(123456789, 0.9)]), ("avg", [(123456789, 0.58)]), ("max", [(123456789, 1.1)]), ("median", [(123456789, 0.75)]), ("min", [(123456789, 0.5)])], "name": "b", "success": [("success", [(123456789, 100.0)])]}], "cls": "Scenario", "config": workload_cfg[0], "durations": [("90%ile", [(123456789, 1.7)]), ("95%ile", [(123456789, 1.8)]), ("avg", [(123456789, 0.8)]), ("max", [(123456789, 1.5)]), ("median", [(123456789, 1.55)]), ("min", [(123456789, 1.2)])], "length": 1, "met": "name_0", "name": "Scenario.name_0", "sla_failures": 0, "stat": {"avg": 1.425, "max": 1.8, "min": 0.8}, "success": [("success", [(123456789, 100.0)])]}, {"actions": [{"durations": [("90%ile", [(123457789, 0.9)]), ("95%ile", [(123457789, 0.87)]), ("avg", [(123457789, 0.67)]), ("max", [(123457789, 1.25)]), ("median", [(123457789, 0.85)]), ("min", [(123457789, 0.7)])], "name": "a", "success": [("success", [(123457789, 100.0)])]}, {"durations": [("90%ile", [(123457789, 0.85)]), ("95%ile", [(123457789, 0.9)]), ("avg", [(123457789, 0.58)]), ("max", [(123457789, 1.1)]), ("median", [(123457789, 0.75)]), ("min", [(123457789, 0.5)])], "name": "b", "success": [("success", [(123457789, 100.0)])]}], "cls": "Scenario", "config": workload_cfg[1], "durations": [("90%ile", [(123457789, 1.7)]), ("95%ile", [(123457789, 1.8)]), ("avg", [(123457789, 0.8)]), ("max", [(123457789, 1.5)]), ("median", [(123457789, 1.55)]), ("min", [(123457789, 1.2)])], "length": 1, "met": "name_1", "name": "Scenario.name_1", "sla_failures": 0, "stat": {"avg": 1.425, "max": 1.8, "min": 0.8}, "success": [("success", [(123457789, 100.0)])]}] self.assertEqual(expected, actual)