def test_delete(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) rally("task list") self.assertIn("finished", rally("task status")) rally("task delete") self.assertNotIn("finished", rally("task list"))
def test_validate_is_invalid(self): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") cfg = {"invalid": "config"} config = utils.TaskConfig(cfg) self.assertRaises(utils.RallyCmdError, rally, ("task validate --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id})
def test_start_v2(self): rally = utils.Rally() deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env) cfg = self._get_sample_task_config_v2() config = utils.TaskConfig(cfg) output = rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % { "task_file": config.filename, "deployment_id": deployment_id }) result = re.search(r"(?P<task_id>[0-9a-f\-]{36}): started", output) self.assertIsNotNone(result)
def _test_start_abort_on_sla_failure_success(self, cfg, times): rally = utils.Rally() deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env) config = utils.TaskConfig(cfg) rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s --abort-on-sla-failure") % { "task_file": config.filename, "deployment_id": deployment_id }) results = json.loads(rally("task results")) iterations_completed = len(results[0]["result"]) self.assertEqual(times, iterations_completed)
def _test_start_abort_on_sla_failure(self, cfg, times): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") config = utils.TaskConfig(cfg) rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s --abort-on-sla-failure") % {"task_file": config.filename, "deployment_id": deployment_id}) results = json.loads(rally("task results")) iterations_completed = len(results[0]["result"]) self.assertTrue(iterations_completed < times)
def test_import_results(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) json_report = rally.gen_report_path(extension="json") with open(json_report, "w+") as f: f.write(rally("task results")) import_print = rally("task import --file %s" % json_report) self.assertIn("successfully", import_print) task_uuid = re.search("UUID:\s([a-z0-9\-]+)", import_print).group(1) self.assertIn("Dummy.dummy_random_fail_in_atomic", rally("task results --uuid %s" % task_uuid))
def test_start(self): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) output = rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id}) result = re.search( r"(?P<task_id>[0-9a-f\-]{36}): started", output) self.assertIsNotNone(result)
def test_sla_success(self): rally = utils.Rally() config = utils.TaskConfig(self._get_sample_task_config()) rally("task start --task %s" % config.filename) rally("task sla-check") expected = [ {"benchmark": "Dummy.dummy_random_action", "criterion": "performance_degradation", "detail": mock.ANY, "pos": 0, "status": "PASS"}, ] data = rally("task sla-check --json", getjson=True) self.assertEqual(expected, data)
def test_export_bunch_uuids(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) task_uuids = [] for i in range(3): res = rally("task start --task %s" % config.filename) for line in res.splitlines(): if "finished" in line: task_uuids.append(line.split(" ")[1][:-1]) html_report = rally.gen_report_path(extension="html") rally("task export --uuid %s --type html --to %s" % ( " ".join(task_uuids), html_report)) self.assertTrue(os.path.exists(html_report))
def test_hook_result_error(self): rally = utils.Rally() cfg = self._get_sample_task_config( cmd="/bin/false", description="event_hook", runner={"type": "constant", "times": 20, "concurrency": 3}) config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) results = json.loads(rally("task results")) hook_results = results[0]["hooks"] hooks_cfg = cfg["Dummy.dummy"][0]["hooks"] expected = [self._get_result(hooks_cfg[0], iterations=[5], error=True)] self.assertEqual(expected, hook_results) self._assert_results_time(hook_results)
def test_use(self): rally = utils.Rally() deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env) config = utils.TaskConfig(self._get_sample_task_config()) output = rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id}) result = re.search( r"(?P<uuid>[0-9a-f\-]{36}): started", output) uuid = result.group("uuid") rally("task use --task %s" % uuid) current_task = utils.get_global("RALLY_TASK", rally.env) self.assertEqual(uuid, current_task)
def test_hook_result_with_serial_runner(self): rally = utils.Rally() cfg = self._get_sample_task_config( cmd="/bin/true", description="event_hook", runner={"type": "serial", "times": 10}) config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) results = json.loads(rally("task results")) hook_results = results[0]["hooks"] hooks_cfg = cfg["Dummy.dummy"][0]["hooks"] expected = [self._get_result(hooks_cfg[0], iterations=[5])] self.assertEqual(expected, hook_results) self._assert_results_time(hook_results)
def test_report_one_uuid(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) rally("task report --out %s" % rally.gen_report_path(extension="html")) self.assertTrue(os.path.exists( rally.gen_report_path(extension="html"))) self.assertRaises(utils.RallyCliError, rally, "task report --report %s" % FAKE_TASK_UUID) rally("task report --junit --out %s" % rally.gen_report_path(extension="junit")) self.assertTrue(os.path.exists( rally.gen_report_path(extension="junit"))) self.assertRaises(utils.RallyCliError, rally, "task report --report %s" % FAKE_TASK_UUID)
def test_list(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) self.assertIn("finished", rally("task list --deployment MAIN")) self.assertIn("There are no tasks", rally("task list --status failed")) self.assertIn("finished", rally("task list --status finished")) self.assertIn("deployment_name", rally("task list --all-deployments")) self.assertRaises(utils.RallyCliError, rally, "task list --status not_existing_status")
def test_report_bunch_files(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) files = [] for i in range(3): rally("task start --task %s" % config.filename) path = "/tmp/task_%d.json" % i files.append(path) if os.path.exists(path): os.remove(path) rally("task results", report_path=path, raw=True) rally("task report --tasks %s --out %s" % ( " ".join(files), rally.gen_report_path(extension="html"))) self.assertTrue(os.path.exists( rally.gen_report_path(extension="html")))
def test_detailed_with_errors(self): rally = utils.Rally() cfg = { "Dummy.dummy_exception": [{ "runner": { "type": "constant", "times": 1, "concurrency": 1 } }] } config = utils.TaskConfig(cfg) output = rally("task start --task %s" % config.filename) uuid = re.search(r"(?P<uuid>[0-9a-f\-]{36}): started", output).group("uuid") output = rally("task detailed") self.assertIn("Task %s has 1 error(s)" % uuid, output)
def test_detailed_no_atomic_actions(self): rally = utils.Rally() cfg = { "Dummy.dummy": [{ "runner": { "type": "constant", "times": 100, "concurrency": 5 } }] } config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) detailed = rally("task detailed") self.assertIn("Dummy.dummy", detailed) detailed_iterations_data = rally("task detailed --iterations-data") self.assertNotIn("n/a", detailed_iterations_data)
def test_sla_success(self): rally = utils.Rally() config = utils.TaskConfig(self._get_sample_task_config()) rally("task start --task %s" % config.filename) rally("task sla-check") expected = [ {"benchmark": "KeystoneBasic.create_and_list_users", "criterion": "failure_rate", "detail": mock.ANY, "pos": 0, "status": "PASS"}, {"benchmark": "KeystoneBasic.create_and_list_users", "criterion": "max_seconds_per_iteration", "detail": mock.ANY, "pos": 0, "status": "PASS"} ] data = rally("task sla-check --json", getjson=True) self.assertEqual(expected, data)
def test_export_one_uuid(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) html_report = rally.gen_report_path(extension="html") rally("task export --type html --to %s" % html_report) self.assertTrue(os.path.exists(html_report)) self._assert_html_report_libs_are_embedded(html_report, False) rally("task export --type html-static --to %s" % html_report) self.assertTrue(os.path.exists(html_report)) self._assert_html_report_libs_are_embedded(html_report) junit_report = rally.gen_report_path(extension="junit") rally("task export --type junit-xml --to %s" % junit_report) self.assertTrue(os.path.exists(junit_report))
def test_list(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) output = rally("task start --task %s --tag foo" % config.filename) task_uuid = self._get_task_uuid(output) # obtain the task object from the database, to check that CLI prints # everything right rapi = api.API(config_file=rally.config_filename) task = rapi.task.get(task_id=task_uuid) actual = rally("task list --deployment MAIN") duration = "%s" % round(task["task_duration"], 3) duration += " " * (13 - len(duration)) expected = ( "+--------------------------------------+-----------------+" "---------------------+---------------+----------+--------+\n" "| UUID | Deployment name " "| Created at | Load duration | Status | Tag(s) |\n" "+--------------------------------------+-----------------+" "---------------------+---------------+----------+--------+\n" "| %(uuid)s | MAIN | %(created_at)s " "| %(duration)s | finished | 'foo' |\n" "+--------------------------------------+-----------------+" "---------------------+---------------+----------+--------+\n" % { "uuid": task_uuid, "created_at": task["created_at"].replace("T", " "), "duration": duration}) # self.assertEqual is not used here, since it doesn't show a big diff # and error message become useless if expected != actual: self.fail("AssertionError: Expected output is not equal to actual." "\nExpected:\"\"\"\n%s\n\"\"\"" "\nActual:\"\"\"\n%s\n\"\"\"" % (expected, actual)) self.assertIn("There are no tasks", rally("task list --status crashed")) self.assertIn("finished", rally("task list --status finished")) self.assertIn( "Deployment name", rally("task list --all-deployments")) self.assertRaises(utils.RallyCliError, rally, "task list --status not_existing_status")
def test_export_with_wrong_connection(self): rally = utils.Rally() cfg = { "Dummy.dummy": [{ "runner": { "type": "constant", "times": 100, "concurrency": 5 } }] } config = utils.TaskConfig(cfg) output = rally("task start --task %s" % config.filename) uuid = re.search(r"(?P<uuid>[0-9a-f\-]{36}): started", output).group("uuid") connection = ("fake:///" + rally.gen_report_path(extension="json")) self.assertRaises( utils.RallyCliError, rally, "task export --uuid %s --connection %s" % (uuid, connection))
def _start_task_in_new_thread(self, rally, cfg, report_file): deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env) config = utils.TaskConfig(cfg) cmd = (("task start --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id}) report_path = os.path.join( os.environ.get("REPORTS_ROOT", "rally-cli-output-files"), "TaskTestCase", report_file) task = threading.Thread(target=rally, args=(cmd, ), kwargs={"report_path": report_path}) task.start() uuid = None while not uuid: if not uuid: uuid = utils.get_global("RALLY_TASK", rally.env) time.sleep(0.5) return task, uuid
def test_abort_on_sla_fail(self): rally = utils.Rally() cfg = { "Dummy.dummy_exception": [{ "args": {}, "runner": { "type": "constant", "times": 5, "concurrency": 5 }, "sla": { "failure_rate": { "max": 0 } } }] } config = utils.TaskConfig(cfg) rally("task start --task %s --abort-on-sla-failure" % config.filename) expected = [{ "benchmark": "Dummy.dummy_exception", "criterion": "aborted_on_sla", "detail": "Task was aborted due to SLA failure(s).", "pos": 0, "status": "FAIL" }, { "benchmark": "Dummy.dummy_exception", "criterion": "failure_rate", "detail": mock.ANY, "pos": 0, "status": "FAIL" }] try: rally("task sla_check --json", getjson=True) except utils.RallyCliError as expected_error: self.assertEqual(json.loads(expected_error.output), expected) else: self.fail("`rally task sla_check` command should return non-zero " "exit code")
def test_task(self): cfg = { "Dummy.dummy_random_fail_in_atomic": [{ "runner": { "type": "constant", "times": 100, "concurrency": 5 } }] } with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") config = utils.TaskConfig(cfg) output = self.rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % { "task_file": config.filename, "deployment_id": deployment_id }) result = re.search(r"(?P<uuid>[0-9a-f\-]{36}): started", output) uuid = result.group("uuid") self.rally("use task --uuid %s" % uuid) current_task = envutils.get_global("RALLY_TASK") self.assertEqual(uuid, current_task)
def test_time_hook(self): rally = utils.Rally() cfg = self._get_sample_task_config(cmd="/bin/true", description="event_hook", runner={ "type": "constant_for_duration", "concurrency": 3, "duration": 10 }) cfg["Dummy.dummy"][0]["hooks"].append({ "name": "sys_call", "description": "time_hook", "args": "/bin/true", "trigger": { "name": "event", "args": { "unit": "time", "at": [3, 6, 9], } } }) config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) results = json.loads(rally("task results")) hook_results = results[0]["hooks"] hooks_cfg = cfg["Dummy.dummy"][0]["hooks"] expected = [ self._get_result(hooks_cfg[0], iterations=[5]), self._get_result(hooks_cfg[1], seconds=[3, 6, 9]) ] self.assertEqual( expected, sorted(hook_results, key=lambda i: i["config"]["trigger"]["args"]["unit"])) self._assert_results_time(hook_results)
def test_sla_fail(self): rally = utils.Rally() cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001) config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) self.assertRaises(utils.RallyCliError, rally, "task sla-check")
def test_validate_is_valid(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) output = rally("task validate --task %s" % config.filename) self.assertIn("Task config is valid", output)
def test_results(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) self.assertIn("result", rally("task results"))
def test_start_with_empty_config(self): rally = utils.Rally() config = utils.TaskConfig(None) with self.assertRaises(utils.RallyCliError) as err: rally("task start --task %s" % config.filename) self.assertIn("Input task is empty", err.exception.output)