def test_task(self): cfg = { "Dummy.dummy_random_fail_in_atomic": [ { "runner": { "type": "constant", "times": 100, "concurrency": 5 } } ] } with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") config = utils.TaskConfig(cfg) output = self.rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id}) result = re.search( r"(?P<uuid>[0-9a-f\-]{36}): started", output) uuid = result.group("uuid") self.rally("use task --uuid %s" % uuid) current_task = envutils.get_global("RALLY_TASK") self.assertEqual(uuid, current_task)
def test_use(self): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") config = utils.TaskConfig(self._get_sample_task_config()) output = rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id}) result = re.search( r"(?P<uuid>[0-9a-f\-]{36}): started", output) uuid = result.group("uuid") rally("task use --task %s" % uuid) current_task = envutils.get_global("RALLY_TASK") self.assertEqual(uuid, current_task)
def list_verifiers(self, api, status=None): """List all verifiers.""" verifiers = api.verifier.list(status) if verifiers: fields = [ "UUID", "Name", "Type", "Namespace", "Created at", "Updated at", "Status", "Version", "System-wide", "Active" ] cv = envutils.get_global(envutils.ENV_VERIFIER) formatters = { "Created at": lambda v: v.created_at.replace(microsecond=0), "Updated at": lambda v: v.updated_at.replace(microsecond=0), "Active": lambda v: u"\u2714" if v.uuid == cv else "", } cliutils.print_list(verifiers, fields, formatters=formatters, normalize_field_names=True, sortby_index=4) elif status: print(_("There are no verifiers with status '%s'.") % status) else: print( _("There are no verifiers. You can create verifier, using " "command `rally verify create-verifier`."))
def show_verifier(self, api, verifier_id=None): """Show detailed information about a verifier.""" verifier = api.verifier.get(verifier_id=verifier_id) fields = [ "UUID", "Status", "Created at", "Updated at", "Active", "Name", "Description", "Type", "Platform", "Source", "Version", "System-wide", "Extra settings", "Location", "Venv location" ] used_verifier = envutils.get_global(envutils.ENV_VERIFIER) formatters = { "Created at": lambda v: v["created_at"].replace("T", " "), "Updated at": lambda v: v["updated_at"].replace("T", " "), "Active": lambda v: (ACTIVE if v["uuid"] == used_verifier else None), "Extra settings": lambda v: (json.dumps(v["extra_settings"], indent=4) if v["extra_settings"] else None), "Location": lambda v: self._get_location((v["uuid"]), "repo") } if not verifier["system_wide"]: formatters["Venv location"] = lambda v: self._get_location( v["uuid"], ".venv") cliutils.print_dict(verifier, fields=fields, formatters=formatters, normalize_field_names=True, print_header=False, table_label="Verifier") print("Attention! All you do in the verifier repository or verifier " "virtual environment, you do it at your own risk!")
def show_verifier(self, api, verifier_id=None): """Show detailed information about a verifier.""" verifier = api.verifier.get(verifier_id=verifier_id) fields = ["UUID", "Status", "Created at", "Updated at", "Active", "Name", "Description", "Type", "Platform", "Source", "Version", "System-wide", "Extra settings", "Location", "Venv location"] used_verifier = envutils.get_global(envutils.ENV_VERIFIER) formatters = { "Created at": lambda v: v["created_at"].replace("T", " "), "Updated at": lambda v: v["updated_at"].replace("T", " "), "Active": lambda v: u"\u2714" if v["uuid"] == used_verifier else None, "Extra settings": lambda v: (json.dumps(v["extra_settings"], indent=4) if v["extra_settings"] else None), "Location": lambda v: self._get_location((v["uuid"]), "repo") } if not verifier["system_wide"]: formatters["Venv location"] = lambda v: self._get_location( v["uuid"], ".venv") cliutils.print_dict(verifier, fields=fields, formatters=formatters, normalize_field_names=True, print_header=False, table_label="Verifier") print("Attention! All you do in the verifier repository or verifier " "virtual environment, you do it at your own risk!")
def list_verifiers(self, api, status=None): """List all verifiers.""" verifiers = api.verifier.list(status=status) if verifiers: fields = [ "UUID", "Name", "Type", "Platform", "Created at", "Updated at", "Status", "Version", "System-wide", "Active" ] cv = envutils.get_global(envutils.ENV_VERIFIER) formatters = { "Created at": lambda v: v["created_at"], "Updated at": lambda v: v["updated_at"], "Active": lambda v: ACTIVE if v["uuid"] == cv else "", } cliutils.print_list(verifiers, fields, formatters=formatters, normalize_field_names=True, sortby_index=4) elif status: print("There are no verifiers with status '%s'." % status) else: print("There are no verifiers. You can create verifier, using " "command `rally verify create-verifier`.")
def show_verifier(self, api, verifier_id=None): """Show detailed information about a verifier.""" verifier = api.verifier.get(verifier_id) fields = [ "UUID", "Status", "Created at", "Updated at", "Active", "Name", "Description", "Type", "Namespace", "Source", "Version", "System-wide", "Extra settings", "Location" ] used_verifier = envutils.get_global(envutils.ENV_VERIFIER) formatters = { "Created at": lambda v: v.created_at.replace(microsecond=0), "Updated at": lambda v: v.updated_at.replace(microsecond=0), "Active": lambda v: u"\u2714" if v.uuid == used_verifier else None, "Extra settings": lambda v: (json.dumps(v.extra_settings, indent=4) if v.extra_settings else None), "Location": lambda v: v.manager.repo_dir } if not verifier.system_wide: fields.append("Venv location") formatters["Venv location"] = lambda v: v.manager.venv_dir cliutils.print_dict(verifier, fields=fields, formatters=formatters, normalize_field_names=True, print_header=False, table_label="Verifier") print( _("Attention! All you do in the verifier repository or " "verifier virtual environment, you do it at your own risk!"))
def test_deployment(self): with mock.patch.dict("os.environ", utils.TEST_ENV): output = self.rally( "deployment create --name t_create_env1 --fromenv") uuid = self._get_deployment_uuid(output) self.rally("deployment create --name t_create_env2 --fromenv") self.rally("use deployment --deployment %s" % uuid) current_deployment = envutils.get_global("RALLY_DEPLOYMENT") self.assertEqual(uuid, current_deployment)
def test_use(self): with mock.patch.dict("os.environ", utils.TEST_ENV): output = self.rally( "deployment create --name t_create_env1 --fromenv") uuid = re.search(r"Using deployment: (?P<uuid>[0-9a-f\-]{36})", output).group("uuid") self.rally("deployment create --name t_create_env2 --fromenv") self.rally("deployment use --deployment %s" % uuid) current_deployment = envutils.get_global("RALLY_DEPLOYMENT") self.assertEqual(uuid, current_deployment)
def test_validate_is_invalid(self): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") cfg = {"invalid": "config"} config = utils.TaskConfig(cfg) self.assertRaises(utils.RallyCliError, rally, ("task validate --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id})
def _test_start_abort_on_sla_failure(self, cfg, times): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") config = utils.TaskConfig(cfg) rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s --abort-on-sla-failure") % {"task_file": config.filename, "deployment_id": deployment_id}) results = json.loads(rally("task results")) iterations_completed = len(results[0]["result"]) self.assertTrue(iterations_completed < times)
def test_start(self): rally = utils.Rally() with mock.patch.dict("os.environ", utils.TEST_ENV): deployment_id = envutils.get_global("RALLY_DEPLOYMENT") cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) output = rally(("task start --task %(task_file)s " "--deployment %(deployment_id)s") % {"task_file": config.filename, "deployment_id": deployment_id}) result = re.search( r"(?P<task_id>[0-9a-f\-]{36}): started", output) self.assertIsNotNone(result)
def setup(): if CONF.profile: profile = CONF.profile else: profile = envutils.get_global("RALLY_PROFILE") if profile == None: profile = PROFILE_OPENSTACK if not profile in PROFILE_ALL_LIST: raise InvalidArgumentsException("Unknown profile %s" % profile) fileutils.update_globals_file("RALLY_PROFILE", profile) print("Using profile: %s" % profile)
def setup(): if CONF.profile: profile = CONF.profile else: profile = envutils.get_global("RALLY_PROFILE") if profile == None: profile = PROFILE_OPENSTACK if not profile in PROFILE_ALL_LIST: raise InvalidArgumentsException("Unknown profile %s" % profile) fileutils.update_globals_file("RALLY_PROFILE", profile) print("Using profile: %s" % profile)
def launch_verification_once(launch_parameters): """Launch verification and show results in different formats.""" results = call_rally("verify start %s" % launch_parameters) results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION) results["result_in_html"] = call_rally( "verify results --html", output_type="html") results["result_in_json"] = call_rally( "verify results --json", output_type="json") results["show"] = call_rally("verify show") results["show_detailed"] = call_rally("verify show --detailed") # NOTE(andreykurilin): we need to clean verification uuid from global # environment to be able to load it next time(for another verification). envutils.clear_global(envutils.ENV_VERIFICATION) return results
def start_verification(args): """Start a verification, show results and generate reports.""" results = call_rally("verify start %s" % args) results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION) results["show"] = call_rally("verify show") results["show_detailed"] = call_rally("verify show --detailed") for output_type in ("json", "html", "junit-xml"): results[output_type.replace("-", "_")] = call_rally("verify report", output_type=output_type) # NOTE(andreykurilin): we need to clean verification uuid from global # environment to be able to load it next time(for another verification). envutils.clear_global(envutils.ENV_VERIFICATION) return results
def launch_verification_once(launch_parameters): """Launch verification and show results in different formats.""" results = call_rally("verify start %s" % launch_parameters) results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION) results["result_in_html"] = call_rally("verify results", output_type="html") results["result_in_json"] = call_rally("verify results", output_type="json") results["show"] = call_rally("verify show") results["show_detailed"] = call_rally("verify show --detailed") # NOTE(andreykurilin): we need to clean verification uuid from global # environment to be able to load it next time(for another verification). envutils.clear_global(envutils.ENV_VERIFICATION) return results
def list(self, api, deployment_list=None): """List existing deployments.""" headers = ["uuid", "created_at", "name", "status", "active"] current_deployment = envutils.get_global("RALLY_DEPLOYMENT") deployment_list = deployment_list or api.deployment.list() table_rows = [] if deployment_list: for t in deployment_list: r = [str(t[column]) for column in headers[:-1]] r.append("" if t["uuid"] != current_deployment else "*") table_rows.append(utils.Struct(**dict(zip(headers, r)))) cliutils.print_list(table_rows, headers, sortby_index=headers.index("created_at")) else: print("There are no deployments. To create a new deployment, use:" "\nrally deployment create")
def list(self, api, deployment_list=None): """List existing deployments.""" headers = ["uuid", "created_at", "name", "status", "active"] current_deployment = envutils.get_global("RALLY_DEPLOYMENT") deployment_list = deployment_list or api.deployment.list() table_rows = [] if deployment_list: for t in deployment_list: r = [str(t[column]) for column in headers[:-1]] r.append("" if t["uuid"] != current_deployment else "*") table_rows.append(utils.Struct(**dict(zip(headers, r)))) cliutils.print_list(table_rows, headers, sortby_index=headers.index("created_at")) else: print("There are no deployments. To create a new deployment, use:" "\nrally deployment create")
def list(self, api, to_json=False): """List existing environments.""" envs = env_mgr.EnvManager.list() if to_json: print(json.dumps([env.cached_data for env in envs], indent=2)) elif not envs: print(self.MSG_NO_ENVS) else: cur_env = envutils.get_global(envutils.ENV_ENV) table = prettytable.PrettyTable() fields = ["uuid", "name", "status", "created_at", "description"] table.field_names = fields + ["default"] for env in envs: row = [env.cached_data[f] for f in fields] row.append(cur_env == env.cached_data["uuid"] and "*" or "") table.add_row(row) table.sortby = "created_at" table.reversesort = True table.align = "l" print(table.get_string())
def list(self, api, to_json=False): """List existing environments.""" envs = env_mgr.EnvManager.list() if to_json: print(json.dumps([env.cached_data for env in envs], indent=2)) elif not envs: print(self.MSG_NO_ENVS) else: cur_env = envutils.get_global(envutils.ENV_ENV) table = prettytable.PrettyTable() fields = ["uuid", "name", "status", "created_at", "description"] table.field_names = fields + ["default"] for env in envs: row = [env.cached_data[f] for f in fields] row.append(cur_env == env.cached_data["uuid"] and "*" or "") table.add_row(row) table.sortby = "created_at" table.reversesort = True table.align = "l" print(table.get_string())
def list_verifiers(self, api, status=None): """List all verifiers.""" verifiers = api.verifier.list(status=status) if verifiers: fields = ["UUID", "Name", "Type", "Platform", "Created at", "Updated at", "Status", "Version", "System-wide", "Active"] cv = envutils.get_global(envutils.ENV_VERIFIER) formatters = { "Created at": lambda v: v["created_at"], "Updated at": lambda v: v["updated_at"], "Active": lambda v: u"\u2714" if v["uuid"] == cv else "", } cliutils.print_list(verifiers, fields, formatters=formatters, normalize_field_names=True, sortby_index=4) elif status: print("There are no verifiers with status '%s'." % status) else: print("There are no verifiers. You can create verifier, using " "command `rally verify create-verifier`.")
def main(): parser = argparse.ArgumentParser(description="Launch rally-verify job.") parser.add_argument( "--mode", type=str, default="light", help="Mode of job. The 'full' mode corresponds to the full set of " "Tempest tests. The 'light' mode corresponds to the smoke set " "of Tempest tests.", choices=MODES_PARAMETERS.keys()) parser.add_argument( "--compare", action="store_true", help="Launch 2 verifications and compare them.") parser.add_argument( "--ctx-create-resources", action="store_true", help="Make Tempest context create needed resources for the tests.") args = parser.parse_args() if not os.path.exists("%s/extra" % BASE_DIR): os.makedirs("%s/extra" % BASE_DIR) # Check deployment call_rally("deployment use --deployment devstack", print_output=True) call_rally("deployment check", print_output=True) config = json.loads( subprocess.check_output(["rally", "deployment", "config"])) config.update(config.pop("admin")) del config["type"] clients = osclients.Clients(objects.Credential(**config)) if args.ctx_create_resources: # If the 'ctx-create-resources' arg is provided, delete images and # flavors, and also create a shared network to make Tempest context # create needed resources. LOG.info("The 'ctx-create-resources' arg is provided. Deleting " "images and flavors, and also creating a shared network " "to make Tempest context create needed resources.") LOG.info("Deleting images.") for image in clients.glance().images.list(): clients.glance().images.delete(image.id) LOG.info("Deleting flavors.") for flavor in clients.nova().flavors.list(): clients.nova().flavors.delete(flavor.id) LOG.info("Creating a shared network.") net_body = { "network": { "name": "shared-net-%s" % str(uuid.uuid4()), "tenant_id": clients.keystone.auth_ref.project_id, "shared": True } } clients.neutron().create_network(net_body) else: # Otherwise, just in case create only flavors with the following # properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make # Tempest context discover them. LOG.info("The 'ctx-create-resources' arg is not provided. " "Creating flavors to make Tempest context discover them.") for flv_ram in [64, 128]: params = { "name": "flavor-%s" % str(uuid.uuid4()), "ram": flv_ram, "vcpus": 1, "disk": 0 } LOG.info( "Creating flavor '%s' with the following properties: RAM " "= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram)) clients.nova().flavors.create(**params) render_vars = {"verifications": []} # Install the latest Tempest version render_vars["install"] = call_rally("verify install") # Get Rally deployment ID rally_deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT) # Get the penultimate Tempest commit ID tempest_dir = ( "/home/jenkins/.rally/tempest/for-deployment-%s" % rally_deployment_id) tempest_commit_id = subprocess.check_output( ["git", "log", "-n", "1", "--pretty=format:'%H'"], cwd=tempest_dir).strip() # Install the penultimate Tempest version render_vars["reinstall"] = call_rally( "verify reinstall --version %s" % tempest_commit_id) # Install a Tempest plugin render_vars["installplugin"] = call_rally( "verify installplugin --source %s" % TEMPEST_PLUGIN) # List installed Tempest plugins render_vars["listplugins"] = call_rally("verify listplugins") # Discover tests depending on Tempest suite discover_cmd = "verify discover" if args.mode == "light": discover_cmd += " --pattern smoke" render_vars["discover"] = call_rally(discover_cmd) # Generate and show Tempest config file render_vars["genconfig"] = call_rally("verify genconfig") render_vars["showconfig"] = call_rally("verify showconfig") # Create a file with a list of tests that are expected to fail xfails_file_path = create_file_with_xfails() # Launch verification launch_params = "%s --xfails-file %s" % ( MODES_PARAMETERS[args.mode], xfails_file_path) render_vars["verifications"].append( launch_verification_once(launch_params)) if args.compare: render_vars["verifications"].append( launch_verification_once(launch_params)) render_vars["compare"] = do_compare( render_vars["verifications"][-2]["uuid"], render_vars["verifications"][-1]["uuid"]) render_vars["list"] = call_rally("verify list") render_page(**render_vars) return _return_status
def test_get_deployment_id_in_env(self): deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT) self.assertEqual("my_deployment_id", deployment_id)
def main(): parser = argparse.ArgumentParser(description="Launch rally-verify job.") parser.add_argument("--mode", type=str, default="light", help="Mode of job. The 'full' mode corresponds to the " "full set of verifier tests. The 'light' mode " "corresponds to the smoke set of verifier tests.", choices=MODES.keys()) parser.add_argument("--compare", action="store_true", help="Start the second verification to generate a " "trends report for two verifications.") # TODO(ylobankov): Remove hard-coded Tempest related things and make it # configurable. parser.add_argument("--ctx-create-resources", action="store_true", help="Make Tempest context create needed resources " "for the tests.") args = parser.parse_args() if not os.path.exists("%s/extra" % BASE_DIR): os.makedirs("%s/extra" % BASE_DIR) # Choose and check the deployment call_rally("deployment use --deployment %s" % DEPLOYMENT_NAME) call_rally("deployment check") config = json.loads( subprocess.check_output(["rally", "deployment", "config"])) config = config["creds"]["openstack"] config.update(config.pop("admin")) clients = credential.OpenStackCredential(**config).clients() if args.ctx_create_resources: # If the 'ctx-create-resources' arg is provided, delete images and # flavors, and also create a shared network to make Tempest context # create needed resources. LOG.info("The 'ctx-create-resources' arg is provided. Deleting " "images and flavors, and also creating a shared network " "to make Tempest context create needed resources.") LOG.info("Deleting images.") for image in clients.glance().images.list(): clients.glance().images.delete(image.id) LOG.info("Deleting flavors.") for flavor in clients.nova().flavors.list(): clients.nova().flavors.delete(flavor.id) LOG.info("Creating a shared network.") net_body = { "network": { "name": "shared-net-%s" % str(uuid.uuid4()), "tenant_id": clients.keystone.auth_ref.project_id, "shared": True } } clients.neutron().create_network(net_body) else: # Otherwise, just in case create only flavors with the following # properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make # Tempest context discover them. LOG.info("The 'ctx-create-resources' arg is not provided. " "Creating flavors to make Tempest context discover them.") for flv_ram in [64, 128]: params = { "name": "flavor-%s" % str(uuid.uuid4()), "ram": flv_ram, "vcpus": 1, "disk": 0 } LOG.info("Creating flavor '%s' with the following properties: RAM " "= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram)) clients.nova().flavors.create(**params) render_vars = dict(verifications=[]) # List plugins for verifiers management render_vars["list_plugins"] = call_rally("verify list-plugins") # Create a verifier render_vars["create_verifier"] = call_rally( "verify create-verifier --type %s --name my-verifier --source %s" % (VERIFIER_TYPE, VERIFIER_SOURCE)) # Show the verifier render_vars["show_verifier"] = call_rally("verify show-verifier") # List verifiers render_vars["list_verifiers"] = call_rally("verify list-verifiers") # Get verifier ID verifier_id = envutils.get_global(envutils.ENV_VERIFIER) # Get the penultimate verifier commit ID repo_dir = os.path.join( os.path.expanduser("~"), ".rally/verification/verifier-%s/repo" % verifier_id) p_commit_id = subprocess.check_output( ["git", "log", "-n", "1", "--pretty=format:%H"], cwd=repo_dir).strip() # Switch the verifier to the penultimate version render_vars["update_verifier"] = call_rally( "verify update-verifier --version %s --update-venv" % p_commit_id) # Generate and show the verifier config file render_vars["configure_verifier"] = call_rally( "verify configure-verifier --show") # Add a verifier extension render_vars["add_verifier_ext"] = call_rally( "verify add-verifier-ext --source %s" % VERIFIER_EXT_REPO) # List verifier extensions render_vars["list_verifier_exts"] = call_rally("verify list-verifier-exts") # List verifier tests render_vars["list_verifier_tests"] = call_rally( "verify list-verifier-tests %s" % MODES[args.mode]) # Start a verification, show results and generate reports skip_list_path = write_file("skip-list.yaml", SKIP_TESTS) xfail_list_path = write_file("xfail-list.yaml", XFAIL_TESTS) run_args = ("%s --skip-list %s --xfail-list %s --tag first-run %s-set " "--detailed" % (MODES[args.mode], skip_list_path, xfail_list_path, args.mode)) render_vars["verifications"].append(start_verification(run_args)) if args.compare: # Start another verification, show results and generate reports with gzip.open(render_vars["list_verifier_tests"]["stdout_file"]) as f: tests = [t for t in f.read().split("\n") if TEST_NAME_RE.match(t)] load_list_path = write_file("load-list.txt", "\n".join(tests)) run_args = "--load-list %s --tag second-run %s-set --detailed" % ( load_list_path, args.mode) render_vars["verifications"].append(start_verification(run_args)) # Generate trends reports for two verifications render_vars["compare"] = generate_trends_reports( render_vars["verifications"][-2]["uuid"], render_vars["verifications"][-1]["uuid"]) # List verifications render_vars["list"] = call_rally("verify list") # Delete the verifier extension render_vars["delete_verifier_ext"] = call_rally( "verify delete-verifier-ext --name %s" % VERIFIER_EXT_NAME) # Delete the verifier and all verifications render_vars["delete_verifier"] = call_rally( "verify delete-verifier --id %s --force" % verifier_id) render_page(**render_vars) return _return_status
def test_get_task_id_with_none(self, mock_load_env_file): self.assertIsNone(envutils.get_global("RALLY_TASK")) mock_load_env_file.assert_called_once_with( os.path.expanduser("~/.rally/globals"))
def test_get_task_id_in_env(self): self.assertEqual("my_task_id", envutils.get_global(envutils.ENV_TASK))
def test_get_deployment_id_with_none(self, mock_load_env_file): self.assertIsNone(envutils.get_global(envutils.ENV_DEPLOYMENT)) mock_load_env_file.assert_called_once_with( os.path.expanduser("~/.rally/globals"))
def results(self, uuids=None, output_file=None, output_html=False, output_json=False, output_csv=False): """Display results of verifications. :param verification: UUID of a verification :param output_file: Path to a file to save results :param output_json: Display results in JSON format (Default) :param output_html: Display results in HTML format :param output_csv: Display results in CSV format """ if not uuids: uuid = envutils.get_global(envutils.ENV_VERIFICATION) if not uuid: raise exceptions.InvalidArgumentsException( "Verification UUID is missing") uuids = [uuid] data = [] for uuid in uuids: try: verification = api.Verification.get(uuid) except exceptions.NotFoundException as e: print(six.text_type(e)) return 1 data.append(verification) if output_json + output_html + output_csv > 1: print(_("Please specify only one format option from %s.") % "--json, --html, --csv") return 1 verifications = {} for ver in data: uuid = ver.db_object["uuid"] res = ver.get_results() or {} tests = {} for test in list(res.get("test_cases", {}).values()): name = test["name"] if name in tests: mesg = ("Duplicated test in verification " "%(uuid)s: %(test)s" % {"uuid": uuid, "test": name}) raise exceptions.RallyException(mesg) tests[name] = {"tags": test["tags"], "status": test["status"], "duration": test["time"], "details": (test.get("traceback", "").strip() or test.get("reason"))} verifications[uuid] = { "tests": tests, "duration": res.get("time", 0), "total": res.get("tests", 0), "skipped": res.get("skipped", 0), "success": res.get("success", 0), "expected_failures": res.get("expected_failures", 0), "unexpected_success": res.get("unexpected_success", 0), "failures": res.get("failures", 0), "started_at": ver.db_object[ "created_at"].strftime("%Y-%d-%m %H:%M:%S"), "finished_at": ver.db_object[ "updated_at"].strftime("%Y-%d-%m %H:%M:%S"), "status": ver.db_object["status"], "set_name": ver.db_object["set_name"] } if output_html: result = report.VerificationReport(verifications).to_html() elif output_csv: result = report.VerificationReport(verifications).to_csv() else: result = report.VerificationReport(verifications).to_json() if output_file: output_file = os.path.expanduser(output_file) with open(output_file, "wb") as f: f.write(result) else: print(result)
def test_get_deployment_id_with_none(self, mock_load_env_file): self.assertIsNone(envutils.get_global(envutils.ENV_DEPLOYMENT)) mock_load_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals"))
def test_get_task_id_in_env(self): self.assertEqual("my_task_id", envutils.get_global(envutils.ENV_TASK))
def results(self, uuids=None, output_file=None, output_html=False, output_json=False, output_csv=False): """Display results of verifications. :param verification: UUID of a verification :param output_file: Path to a file to save results :param output_json: Display results in JSON format (Default) :param output_html: Display results in HTML format :param output_csv: Display results in CSV format """ if not uuids: uuid = envutils.get_global(envutils.ENV_VERIFICATION) if not uuid: raise exceptions.InvalidArgumentsException( "Verification UUID is missing") uuids = [uuid] data = [] for uuid in uuids: try: verification = api.Verification.get(uuid) except exceptions.NotFoundException as e: print(six.text_type(e)) return 1 data.append(verification) if output_json + output_html + output_csv > 1: print(_("Please specify only one format option from %s.") % "--json, --html, --csv") return 1 verifications = {} for ver in data: uuid = ver.db_object["uuid"] res = ver.get_results() or {} tests = {} for test in list(res.get("test_cases", {}).values()): name = test["name"] if name in tests: mesg = ("Duplicated test in verification " "%(uuid)s: %(test)s" % {"uuid": uuid, "test": name}) raise exceptions.RallyException(mesg) reason = test.get("reason", "") traceback = test.get("traceback", "") sep = "\n\n" if reason and traceback else "" tests[name] = {"tags": test["tags"], "status": test["status"], "duration": test["time"], "details": (reason + sep + traceback.strip()) or None} verifications[uuid] = { "tests": tests, "duration": res.get("time", 0), "total": res.get("tests", 0), "skipped": res.get("skipped", 0), "success": res.get("success", 0), "expected_failures": res.get("expected_failures", 0), "unexpected_success": res.get("unexpected_success", 0), "failures": res.get("failures", 0), "started_at": ver.db_object[ "created_at"].strftime("%Y-%d-%m %H:%M:%S"), "finished_at": ver.db_object[ "updated_at"].strftime("%Y-%d-%m %H:%M:%S"), "status": ver.db_object["status"], "set_name": ver.db_object["set_name"] } if output_html: result = report.VerificationReport(verifications).to_html() elif output_csv: result = report.VerificationReport(verifications).to_csv() else: result = report.VerificationReport(verifications).to_json() if output_file: output_file = os.path.expanduser(output_file) with open(output_file, "wb") as f: f.write(result) else: print(result)
def test_get_deployment_id_in_env(self): deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT) self.assertEqual("my_deployment_id", deployment_id)
def test_get_task_id_with_none(self, mock_load_env_file): self.assertIsNone(envutils.get_global("RALLY_TASK")) mock_load_env_file.assert_called_once_with(os.path.expanduser( "~/.rally/globals"))