Esempio n. 1
0
    def test_generate(self, mock_dt, mock_json_dumps):
        mock_dt.datetime.utcnow.return_value = dt.datetime.utcnow()
        tasks_results = test_html.get_tasks_results()

        # print
        reporter = json_exporter.JSONExporter(tasks_results, None)
        reporter._generate_tasks = mock.MagicMock()
        self.assertEqual({"print": "json"}, reporter.generate())
        results = {
            "info": {"rally_version": rally_version.version_string(),
                     "generated_at": mock_dt.datetime.strftime.return_value,
                     "format_version": "1.1"},
            "tasks": reporter._generate_tasks.return_value
        }
        mock_dt.datetime.strftime.assert_called_once_with(
            mock_dt.datetime.utcnow.return_value,
            json_exporter.TIMEFORMAT)
        reporter._generate_tasks.assert_called_once_with()
        mock_json_dumps.assert_called_once_with(results,
                                                sort_keys=False,
                                                indent=4)

        # export to file
        reporter = json_exporter.JSONExporter(tasks_results,
                                              output_destination="path")
        self.assertEqual({"files": {"path": "json"},
                          "open": "file://path"}, reporter.generate())
Esempio n. 2
0
def trends(tasks_results):
    trends = Trends()
    for i, scenario in enumerate(_extend_results(tasks_results), 1):
        trends.add_result(scenario)
    template = ui_utils.get_template("task/trends.html")
    return template.render(version=version.version_string(),
                           data=json.dumps(trends.get_data()))
Esempio n. 3
0
    def start(
        self,
        task,
        deployment=None,
        task_args=None,
        task_args_file=None,
        tag=None,
        do_use=False,
        abort_on_sla_failure=False,
    ):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            print("Running Rally version", version.version_string())
            input_task = self._load_and_validate_task(
                task, task_args, task_args_file, deployment, task_instance=task_instance
            )

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {"uuid": task_instance["uuid"], "tag": task_instance["tag"]}
                )
            )
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment, input_task, task=task_instance, abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(type(e).__name__, str(e), json.dumps(traceback.format_exc()))
            print(e, file=sys.stderr)
            return 1
Esempio n. 4
0
    def generate(self):
        root = ET.Element("testsuites")
        root.append(ET.Comment("Report is generated by Rally %s at %s" % (
            version.version_string(),
            dt.datetime.utcnow().strftime(consts.TimeFormat.ISO8601))))

        for t in self.tasks_results:
            created_at = dt.datetime.strptime(t["created_at"],
                                              "%Y-%m-%dT%H:%M:%S")
            updated_at = dt.datetime.strptime(t["updated_at"],
                                              "%Y-%m-%dT%H:%M:%S")
            task = {
                "id": t["uuid"],
                "tests": 0,
                "errors": "0",
                "skipped": "0",
                "failures": 0,
                "time": "%.2f" % (updated_at - created_at).total_seconds(),
                "timestamp": t["created_at"],
            }
            test_cases = []
            for workload in itertools.chain(
                    *[s["workloads"] for s in t["subtasks"]]):
                class_name, name = workload["name"].split(".", 1)
                test_case = {
                    "id": workload["uuid"],
                    "time": "%.2f" % workload["full_duration"],
                    "name": name,
                    "classname": class_name,
                    "timestamp": workload["created_at"]
                }
                if not workload["pass_sla"]:
                    task["failures"] += 1
                    test_case["failure"] = "\n".join(
                        [s["detail"]
                         for s in workload["sla_results"]["sla"]
                         if not s["success"]])
                test_cases.append(test_case)

            task["tests"] = str(len(test_cases))
            task["failures"] = str(task["failures"])

            testsuite = ET.SubElement(root, "testsuite", task)
            for test_case in test_cases:
                failure = test_case.pop("failure", None)
                test_case = ET.SubElement(testsuite, "testcase", test_case)
                if failure:
                    ET.SubElement(test_case, "failure").text = failure

        utils.prettify_xml(root)

        raw_report = ET.tostring(root, encoding="utf-8").decode("utf-8")

        if self.output_destination:
            return {"files": {self.output_destination: raw_report},
                    "open": "file://" + os.path.abspath(
                        self.output_destination)}
        else:
            return {"print": raw_report}
Esempio n. 5
0
def plot(tasks_results, include_libs=False):
    extended_results = _extend_results(tasks_results)
    template = ui_utils.get_template("task/report.html")
    source, data = _process_tasks(extended_results)
    return template.render(version=version.version_string(),
                           source=json.dumps(source),
                           data=json.dumps(data),
                           include_libs=include_libs)
Esempio n. 6
0
def _print_version():
    from rally.common import version

    print("Rally version: %s" % version.version_string())
    packages = version.plugins_versions()
    if packages:
        print("\nInstalled Plugins:")
        print("\n".join("\t%s: %s" % p for p in sorted(packages.items())))
Esempio n. 7
0
def trends(tasks):
    trends = Trends()
    for task in tasks:
        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            trends.add_result(task["uuid"], workload)
    template = ui_utils.get_template("task/trends.html")
    return template.render(version=version.version_string(),
                           data=json.dumps(trends.get_data()))
Esempio n. 8
0
    def generate(self):
        results = {"info": {"rally_version": rally_version.version_string(),
                            "generated_at": dt.datetime.strftime(
                                timeutils.utcnow(), TIMEFORMAT),
                            "format_version": "1"},
                   "tasks": self._generate_tasks()}

        results = json.dumps(results, sort_keys=False, indent=4)

        if self.output_destination:
            return {"files": {self.output_destination: results},
                    "open": "file://" + self.output_destination}
        else:
            return {"print": results}
Esempio n. 9
0
def plot(tasks_results, include_libs=False):
    source = _make_source(tasks_results)
    tasks = []
    subtasks = []
    workloads = []
    for task in tasks_results:
        tasks.append(task)
        for subtask in tasks[-1]["subtasks"]:
            workloads.extend(subtask.pop("workloads"))
        subtasks.extend(tasks[-1].pop("subtasks"))

    template = ui_utils.get_template("task/report.html")
    data = _process_workloads(workloads)
    return template.render(version=version.version_string(),
                           source=json.dumps(source),
                           data=json.dumps(data),
                           include_libs=include_libs)
Esempio n. 10
0
    def start(self, api, task_file, deployment=None, task_args=None,
              task_args_file=None, tags=None, do_use=False,
              abort_on_sla_failure=False):
        """Run task.

        If both task_args and task_args_file are specified, they are going to
        be merged. task_args has a higher priority so it overrides
        values from task_args_file.
        There are 3 kinds of return codes, 0: no error, 1: running error,
        2: sla check failed.
        """

        input_task = self._load_and_validate_task(api, task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(cliutils.make_header(
                "Task %(tags)s %(uuid)s: started"
                % {"uuid": task_instance["uuid"], "tags": tags}))
            print("Running Task... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment, config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print("Cannot start a task on unfinished deployment: %s" % e)
            return 1

        if self._detailed(api, task_id=task_instance["uuid"]):
            return 2
        return 0
Esempio n. 11
0
def run(argv, categories):
    parser = lambda subparsers: _add_command_parsers(categories, subparsers)
    category_opt = cfg.SubCommandOpt("category",
                                     title="Command categories",
                                     help="Available categories",
                                     handler=parser)

    CONF.register_cli_opt(category_opt)
    help_msg = ("Additional custom plugin locations. Multiple files or "
                "directories may be specified. All plugins in the specified"
                " directories and subdirectories will be imported. Plugins in"
                " /opt/rally/plugins and ~/.rally/plugins will always be "
                "imported.")

    CONF.register_cli_opt(cfg.ListOpt("plugin-paths",
                                      default=os.environ.get(
                                          "RALLY_PLUGIN_PATHS"),
                                      help=help_msg))

    try:
        CONF(argv[1:], project="rally", version=version.version_string(),
             default_config_files=find_config_files(CONFIG_SEARCH_PATHS))
        logging.setup("rally")
        if not CONF.get("log_config_append"):
            # The below two lines are to disable noise from request module. The
            # standard way should be we make such lots of settings on the root
            # rally. However current oslo codes doesn't support such interface.
            # So I choose to use a 'hacking' way to avoid INFO logs from
            # request module where user didn't give specific log configuration.
            # And we could remove this hacking after oslo.log has such
            # interface.
            LOG.debug("INFO logs from urllib3 and requests module are hide.")
            requests_log = logging.getLogger("requests").logger
            requests_log.setLevel(logging.WARNING)
            urllib3_log = logging.getLogger("urllib3").logger
            urllib3_log.setLevel(logging.WARNING)

            # NOTE(wtakase): This is for suppressing boto error logging.
            LOG.debug("ERROR log from boto module is hide.")
            boto_log = logging.getLogger("boto").logger
            boto_log.setLevel(logging.CRITICAL)

    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv)
            except Exception:
                print(_("sudo failed, continuing as if nothing happened"))

        print(_("Please re-run %s as root.") % argv[0])
        return(2)

    if CONF.category.name == "version":
        print(version.version_string())
        return(0)

    if CONF.category.name == "bash-completion":
        print(_generate_bash_completion_script())
        return(0)

    fn = CONF.category.action_fn
    fn_args = [encodeutils.safe_decode(arg)
               for arg in CONF.category.action_args]
    fn_kwargs = {}
    for k in CONF.category.action_kwargs:
        v = getattr(CONF.category, "action_kwarg_" + k)
        if v is None:
            continue
        if isinstance(v, six.string_types):
            v = encodeutils.safe_decode(v)
        fn_kwargs[k] = v

    # call the action with the remaining arguments
    # check arguments
    try:
        validate_args(fn, *fn_args, **fn_kwargs)
    except MissingArgs as e:
        # NOTE(mikal): this isn't the most helpful error message ever. It is
        # long, and tells you a lot of things you probably don't want to know
        # if you just got a single arg wrong.
        print(fn.__doc__)
        CONF.print_help()
        print("Missing arguments:")
        for missing in e.missing:
            for arg in fn.args:
                if arg[1].get("dest", "").endswith(missing):
                    print(" " + arg[0][0])
                    break
        return(1)

    try:
        for path in CONF.plugin_paths or []:
            discover.load_plugins(path)

        validate_deprecated_args(argv, fn)

        if getattr(fn, "_suppress_warnings", False):
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                ret = fn(*fn_args, **fn_kwargs)
        else:
            ret = fn(*fn_args, **fn_kwargs)
        return(ret)

    except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound,
            exceptions.TaskNotFound, jsonschema.ValidationError) as e:
        if logging.is_debug():
            LOG.exception(e)
        print(e)
        return 1
    except sqlalchemy.exc.OperationalError as e:
        if logging.is_debug():
            LOG.exception(e)
        print(e)
        print("Looks like Rally can't connect to its DB.")
        print("Make a sure that connection string in rally.conf is proper:")
        print(CONF.database.connection)
        return 1
    except Exception:
        print(_("Command failed, please check log for more info"))
        raise
Esempio n. 12
0
 def test_version_string(self, mock_version_info):
     mock_sv = mock.Mock()
     mock_sv.debian_string.return_value = "foo_version"
     mock_version_info.semantic_version.return_value = mock_sv
     self.assertEqual("foo_version", version.version_string())
Esempio n. 13
0
 def init_rally_config(self):
     CONF([], project="rally", version=version.version_string())
Esempio n. 14
0
    def generate(self):
        root = ET.Element("testsuites")

        root.append(ET.Comment("Report is generated by Rally %s at %s" % (
            version.version_string(),
            dt.datetime.utcnow().strftime(TIME_FORMAT))))

        for v in self.verifications:
            verification = ET.SubElement(root, "testsuite", {
                "id": v.uuid,
                "time": str(v.tests_duration),
                "tests": str(v.tests_count),
                "errors": "0",
                "skipped": str(v.skipped),
                "failures": str(v.failures + v.unexpected_success),
                "timestamp": v.created_at.strftime(TIME_FORMAT)
            })
            tests = sorted(v.tests.values(),
                           key=lambda t: (t.get("timestamp", ""), t["name"]))
            for result in tests:
                class_name, name = result["name"].rsplit(".", 1)
                test_case = {
                    "time": result["duration"],
                    "name": name, "classname": class_name
                }

                test_id = [tag[3:] for tag in result.get("tags", [])
                           if tag.startswith("id-")]
                if test_id:
                    test_case["id"] = test_id[0]
                if "timestamp" in result:
                    test_case["timestamp"] = result["timestamp"]

                test_case_element = ET.SubElement(verification, "testcase",
                                                  test_case)
                if result["status"] == "success":
                    # nothing to add
                    pass
                elif result["status"] == "uxsuccess":
                    # NOTE(andreykurilin): junit doesn't support uxsuccess
                    #   status, so let's display it like "fail" with proper
                    # comment.
                    failure = ET.SubElement(test_case_element, "failure")
                    failure.text = ("It is an unexpected success. The test "
                                    "should fail due to: %s" %
                                    result.get("reason", "Unknown reason"))
                elif result["status"] == "fail":
                    failure = ET.SubElement(test_case_element, "failure")
                    failure.text = result.get("traceback", None)
                elif result["status"] == "xfail":
                    # NOTE(andreykurilin): junit doesn't support xfail status,
                    # so let's display it like "success" with proper comment
                    test_case_element.append(ET.Comment(
                        "It is an expected failure due to: %s" %
                        result.get("reason", "Unknown reason")))
                    trace = result.get("traceback", None)
                    if trace:
                        test_case_element.append(ET.Comment(
                            "Traceback:\n%s" % trace))
                elif result["status"] == "skip":
                    skipped = ET.SubElement(test_case_element, "skipped")
                    skipped.text = result.get("reason", "Unknown reason")
                else:
                    # wtf is it?! we should add validation of results...
                    pass

            utils.prettify_xml(root)

        raw_report = ET.tostring(root, encoding="utf-8").decode("utf-8")
        if self.output_destination:
            return {"files": {self.output_destination: raw_report},
                    "open": self.output_destination}
        else:
            return {"print": raw_report}