Example #1
0
    def start(
        self,
        task,
        deployment=None,
        task_args=None,
        task_args_file=None,
        tag=None,
        do_use=False,
        abort_on_sla_failure=False,
    ):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            print("Running Rally version", version.version_string())
            input_task = self._load_and_validate_task(
                task, task_args, task_args_file, deployment, task_instance=task_instance
            )

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {"uuid": task_instance["uuid"], "tag": task_instance["tag"]}
                )
            )
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment, input_task, task=task_instance, abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(type(e).__name__, str(e), json.dumps(traceback.format_exc()))
            print(e, file=sys.stderr)
            return 1
Example #2
0
    def test_generate(self, mock_dt, mock_json_dumps):
        mock_dt.datetime.utcnow.return_value = dt.datetime.utcnow()
        tasks_results = dummy_data.get_tasks_results()

        # print
        reporter = json_exporter.JSONExporter(tasks_results, None)
        reporter._generate_tasks = mock.MagicMock()
        self.assertEqual({"print": "json"}, reporter.generate())
        results = {
            "info": {"rally_version": rally_version.version_string(),
                     "generated_at": mock_dt.datetime.strftime.return_value,
                     "format_version": "1.2"},
            "tasks": reporter._generate_tasks.return_value
        }
        mock_dt.datetime.strftime.assert_called_once_with(
            mock_dt.datetime.utcnow.return_value,
            json_exporter.TIMEFORMAT)
        reporter._generate_tasks.assert_called_once_with()
        mock_json_dumps.assert_called_once_with(results,
                                                sort_keys=False,
                                                indent=4)

        # export to file
        reporter = json_exporter.JSONExporter(tasks_results,
                                              output_destination="path")
        self.assertEqual({"files": {"path": "json"},
                          "open": "file://path"}, reporter.generate())
Example #3
0
def trends(tasks_results):
    trends = Trends()
    for i, scenario in enumerate(_extend_results(tasks_results), 1):
        trends.add_result(scenario)
    template = ui_utils.get_template("task/trends.html")
    return template.render(version=version.version_string(),
                           data=json.dumps(trends.get_data()))
Example #4
0
def trends(tasks_results):
    trends = Trends()
    for i, scenario in enumerate(_extend_results(tasks_results), 1):
        trends.add_result(scenario)
    template = ui_utils.get_template("task/trends.html")
    return template.render(version=version.version_string(),
                           data=json.dumps(trends.get_data()))
Example #5
0
    def test_generate(self, mock_dt, mock_json_dumps):
        mock_dt.datetime.utcnow.return_value = dt.datetime.utcnow()
        tasks_results = test_html.get_tasks_results()

        # print
        reporter = json_exporter.JSONExporter(tasks_results, None)
        reporter._generate_tasks = mock.MagicMock()
        self.assertEqual({"print": "json"}, reporter.generate())
        results = {
            "info": {"rally_version": rally_version.version_string(),
                     "generated_at": mock_dt.datetime.strftime.return_value,
                     "format_version": "1.1"},
            "tasks": reporter._generate_tasks.return_value
        }
        mock_dt.datetime.strftime.assert_called_once_with(
            mock_dt.datetime.utcnow.return_value,
            json_exporter.TIMEFORMAT)
        reporter._generate_tasks.assert_called_once_with()
        mock_json_dumps.assert_called_once_with(results,
                                                sort_keys=False,
                                                indent=4)

        # export to file
        reporter = json_exporter.JSONExporter(tasks_results,
                                              output_destination="path")
        self.assertEqual({"files": {"path": "json"},
                          "open": "file://path"}, reporter.generate())
Example #6
0
File: task.py Project: sapcc/rally
    def start(self,
              api,
              task_file,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tags=None,
              do_use=False,
              abort_on_sla_failure=False):
        """Run task.

        If both task_args and task_args_file are specified, they are going to
        be merged. task_args has a higher priority so it overrides
        values from task_args_file.
        There are 3 kinds of return codes, 0: no error, 1: running error,
        2: sla check failed.
        """

        input_task = self._load_and_validate_task(api,
                                                  task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        return self._start_task(api,
                                deployment,
                                task_config=input_task,
                                tags=tags,
                                do_use=do_use,
                                abort_on_sla_failure=abort_on_sla_failure)
    def generate(self):
        test_cases = []
        t = self.tasks_results[0]
        created_at = dt.datetime.strptime(t["created_at"], "%Y-%m-%dT%H:%M:%S")
        updated_at = dt.datetime.strptime(t["updated_at"], "%Y-%m-%dT%H:%M:%S")
        testsuite_data = {
            "id": t["uuid"],
            "name": "heketi-rally-cases",
            "tests": 0,
            "errors": "0",
            "skipped": "0",
            "failures": 0,
            "time": "%.2f" % (updated_at - created_at).total_seconds(),
            "timestamp": t["created_at"],
        }
        for test_suite in self.tasks_results:
            for subtask in test_suite["subtasks"]:
                for workload in subtask["workloads"]:
                    test_case = {
                        "time": "%.2f" % workload["full_duration"],
                        "name": subtask["title"],
                        "classname": workload["name"],
                        "timestamp": workload["created_at"],
                    }
                    if not workload["pass_sla"]:
                        testsuite_data["failures"] += 1
                        test_case["failure"] = "\n".join([
                            s["detail"] for s in workload["sla_results"]["sla"]
                            if not s["success"]
                        ])
                    test_cases.append(test_case)

        testsuite_data["tests"] = str(len(test_cases))
        testsuite_data["failures"] = str(testsuite_data["failures"])

        testsuite = ET.Element("testsuite", testsuite_data)
        testsuite.append(
            ET.Comment(
                "Report is generated by Rally %s at %s" %
                (version.version_string(), dt.datetime.utcnow().strftime(
                    consts.TimeFormat.ISO8601))))
        for test_case in test_cases:
            failure = test_case.pop("failure", None)
            test_case = ET.SubElement(testsuite, "testcase", test_case)
            if failure:
                ET.SubElement(test_case, "failure").text = failure

        utils.prettify_xml(testsuite)
        raw_report = ET.tostring(testsuite, encoding="utf-8").decode("utf-8")

        if self.output_destination:
            return {
                "files": {
                    self.output_destination: raw_report
                },
                "open": "file://" + os.path.abspath(self.output_destination),
            }
        else:
            return {"print": raw_report}
Example #8
0
    def generate(self):
        root = ET.Element("testsuites")
        root.append(ET.Comment("Report is generated by Rally %s at %s" % (
            version.version_string(),
            dt.datetime.utcnow().strftime(consts.TimeFormat.ISO8601))))

        for t in self.tasks_results:
            created_at = dt.datetime.strptime(t["created_at"],
                                              "%Y-%m-%dT%H:%M:%S")
            updated_at = dt.datetime.strptime(t["updated_at"],
                                              "%Y-%m-%dT%H:%M:%S")
            task = {
                "id": t["uuid"],
                "tests": 0,
                "errors": "0",
                "skipped": "0",
                "failures": 0,
                "time": "%.2f" % (updated_at - created_at).total_seconds(),
                "timestamp": t["created_at"],
            }
            test_cases = []
            for workload in itertools.chain(
                    *[s["workloads"] for s in t["subtasks"]]):
                class_name, name = workload["name"].split(".", 1)
                test_case = {
                    "id": workload["uuid"],
                    "time": "%.2f" % workload["full_duration"],
                    "name": name,
                    "classname": class_name,
                    "timestamp": workload["created_at"]
                }
                if not workload["pass_sla"]:
                    task["failures"] += 1
                    test_case["failure"] = "\n".join(
                        [s["detail"]
                         for s in workload["sla_results"]["sla"]
                         if not s["success"]])
                test_cases.append(test_case)

            task["tests"] = str(len(test_cases))
            task["failures"] = str(task["failures"])

            testsuite = ET.SubElement(root, "testsuite", task)
            for test_case in test_cases:
                failure = test_case.pop("failure", None)
                test_case = ET.SubElement(testsuite, "testcase", test_case)
                if failure:
                    ET.SubElement(test_case, "failure").text = failure

        utils.prettify_xml(root)

        raw_report = ET.tostring(root, encoding="utf-8").decode("utf-8")

        if self.output_destination:
            return {"files": {self.output_destination: raw_report},
                    "open": "file://" + os.path.abspath(
                        self.output_destination)}
        else:
            return {"print": raw_report}
Example #9
0
def _print_version():
    from rally.common import version

    print("Rally version: %s" % version.version_string())
    packages = version.plugins_versions()
    if packages:
        print("\nInstalled Plugins:")
        print("\n".join("\t%s: %s" % p for p in sorted(packages.items())))
Example #10
0
File: junit.py Project: sapcc/rally
    def __init__(self):
        self._root = ET.Element("testsuites")
        self._test_suites = []

        self._root.append(
            ET.Comment(
                "Report is generated by Rally %s at %s" %
                (version.version_string(), dt.datetime.utcnow().isoformat())))
Example #11
0
def plot(tasks_results, include_libs=False):
    extended_results = _extend_results(tasks_results)
    template = ui_utils.get_template("task/report.html")
    source, data = _process_tasks(extended_results)
    return template.render(version=version.version_string(),
                           source=json.dumps(source),
                           data=json.dumps(data),
                           include_libs=include_libs)
Example #12
0
def _print_version():
    from rally.common import version

    print("Rally version: %s" % version.version_string())
    packages = version.plugins_versions()
    if packages:
        print("\nInstalled Plugins:")
        print("\n".join("\t%s: %s" % p for p in sorted(packages.items())))
Example #13
0
def plot(tasks_results, include_libs=False):
    extended_results = _extend_results(tasks_results)
    template = ui_utils.get_template("task/report.html")
    source, data = _process_tasks(extended_results)
    return template.render(version=version.version_string(),
                           source=json.dumps(source),
                           data=json.dumps(data),
                           include_libs=include_libs)
Example #14
0
def trends(tasks):
    trends = Trends()
    for task in tasks:
        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            trends.add_result(task["uuid"], workload)
    template = ui_utils.get_template("task/trends.html")
    return template.render(version=version.version_string(),
                           data=json.dumps(trends.get_data()))
Example #15
0
def trends(tasks):
    trends = Trends()
    for task in tasks:
        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            trends.add_result(workload)
    template = ui_utils.get_template("task/trends.html")
    return template.render(version=version.version_string(),
                           data=json.dumps(trends.get_data()))
Example #16
0
    def start(self, api, task_file, deployment=None, task_args=None,
              task_args_file=None, tags=None, do_use=False,
              abort_on_sla_failure=False):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task_file: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tags: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """
        input_task = self._load_and_validate_task(api, task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(cliutils.make_header(
                _("Task %(tags)s %(uuid)s: started")
                % {"uuid": task_instance["uuid"], "tags": tags}))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment, config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print(_("Cannot start a task on unfinished deployment: %s") % e)
            return 1

        self.detailed(api, task_id=task_instance["uuid"])
Example #17
0
    def start(self,
              api,
              task_file,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tags=None,
              do_use=False,
              abort_on_sla_failure=False):
        """Run task.

        If both task_args and task_args_file are specified, they are going to
        be merged. task_args has a higher priority so it overrides
        values from task_args_file.
        There are 3 kinds of return codes, 0: no error, 1: running error,
        2: sla check failed.
        """

        input_task = self._load_and_validate_task(api,
                                                  task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(
                cliutils.make_header("Task %(tags)s %(uuid)s: started" % {
                    "uuid": task_instance["uuid"],
                    "tags": tags
                }))
            print("Running Task... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment,
                           config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print("Cannot start a task on unfinished deployment: %s" % e)
            return 1

        if self._detailed(api, task_id=task_instance["uuid"]):
            return 2
        return 0
Example #18
0
    def generate(self):
        results = {"info": {"rally_version": rally_version.version_string(),
                            "generated_at": dt.datetime.strftime(
                                timeutils.utcnow(), TIMEFORMAT),
                            "format_version": "1"},
                   "tasks": self._generate_tasks()}

        results = json.dumps(results, sort_keys=False, indent=4)

        if self.output_destination:
            return {"files": {self.output_destination: results},
                    "open": "file://" + self.output_destination}
        else:
            return {"print": results}
Example #19
0
def plot(tasks_results, include_libs=False):
    source = _make_source(tasks_results)
    tasks = []
    subtasks = []
    workloads = []
    for task in tasks_results:
        tasks.append(task)
        for subtask in tasks[-1]["subtasks"]:
            workloads.extend(subtask.pop("workloads"))
        subtasks.extend(tasks[-1].pop("subtasks"))

    template = ui_utils.get_template("task/report.html")
    data = _process_workloads(workloads)
    return template.render(version=version.version_string(),
                           source=json.dumps(source),
                           data=json.dumps(data),
                           include_libs=include_libs)
Example #20
0
def plot(tasks_results, include_libs=False):
    source = _make_source(tasks_results)
    tasks = []
    subtasks = []
    workloads = []
    for task in tasks_results:
        tasks.append(task)
        for subtask in tasks[-1]["subtasks"]:
            workloads.extend(subtask.pop("workloads"))
        subtasks.extend(tasks[-1].pop("subtasks"))

    template = ui_utils.get_template("task/report.html")
    data = _process_workloads(workloads)
    return template.render(version=version.version_string(),
                           source=json.dumps(source),
                           data=json.dumps(data),
                           include_libs=include_libs)
Example #21
0
    def start(self, api, task_file, deployment=None, task_args=None,
              task_args_file=None, tags=None, do_use=False,
              abort_on_sla_failure=False):
        """Run task.

        If both task_args and task_args_file are specified, they are going to
        be merged. task_args has a higher priority so it overrides
        values from task_args_file.
        There are 3 kinds of return codes, 0: no error, 1: running error,
        2: sla check failed.
        """

        input_task = self._load_and_validate_task(api, task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(cliutils.make_header(
                "Task %(tags)s %(uuid)s: started"
                % {"uuid": task_instance["uuid"], "tags": tags}))
            print("Running Task... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment, config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print("Cannot start a task on unfinished deployment: %s" % e)
            return 1

        if self._detailed(api, task_id=task_instance["uuid"]):
            return 2
        return 0
def main():
    if len(sys.argv) == 1:
        with open(xrally_docs_tools.PLUGINS_FILE) as f:
            plugins = json.loads(f.read())
        requested_version = utils.Tag.parse(
            [p["versions"][1] for p in plugins if p["name"] == "rally"][0])
        actual_version = utils.Tag.parse(version.version_string())
        if requested_version != actual_version:
            print("Requested Rally version (%s) from plugins.json is not "
                  "equal to the release which you have installed (%s)."
                  "Try to recreate virtual environment with "
                  "`tox -r -e update_cli`." %
                  (requested_version, actual_version))
            return 1
        collected_data = discover_cli()
    else:
        if os.path.exists(CLI_SOURCE):
            with open(CLI_SOURCE) as f:
                collected_data = json.loads(f.read())
        else:
            collected_data = discover_cli()

    generate_page(collected_data["categories"])
Example #23
0
    def generate(self):
        results = {
            "info": {
                "rally_version":
                rally_version.version_string(),
                "generated_at":
                dt.datetime.strftime(timeutils.utcnow(), TIMEFORMAT),
                "format_version":
                self.REVISION
            },
            "tasks": self._generate_tasks()
        }

        results = json.dumps(results, sort_keys=False, indent=4)

        if self.output_destination:
            return {
                "files": {
                    self.output_destination: results
                },
                "open": "file://" + self.output_destination
            }
        else:
            return {"print": results}
def discover_cli():
    categories = []

    parser = Parser()

    raw_categories = copy.copy(rally_cli_main.categories)
    cliutils._add_command_parsers(raw_categories, parser)

    for cg in sorted(raw_categories.keys()):
        if cg == "deployment":
            # oops. let's skip it
            continue
        cparser = parser.parsers[cg]["parser"]
        # NOTE(andreykurilin): we are re-using `_add_command_parsers`
        #   method from `rally.cli.cliutils`, but, since it was designed
        #   to print help message, generated description for categories
        #   contains specification for all sub-commands. We don't need
        #   information about sub-commands at this point, so let's skip
        #   "generated description" and take it directly from category
        #   class.
        description = cparser.defaults["command_object"].__doc__

        commands = []

        for command in sorted(cparser.subparser.parsers.keys()):
            subparser = cparser.subparser.parsers[command]

            arguments = []
            defaults = utils.get_defaults(
                subparser["parser"].defaults["action_fn"])
            for args, kwargs in subparser["parser"].arguments:
                # for future changes...
                # :param args: a single command argument which can represented
                #    by several names(for example, --uuid and --task-id) in cli
                # :type args: tuple
                # :param kwargs: description of argument. Have next format:
                #   {"dest": "action_kwarg_<name of keyword argument in code>",
                #    "help": "just a description of argument"
                #    "metavar": "[optional] metavar of argument."
                #               "Example: argument '--file'; metavar 'path' ",
                #    "type": "[optional] class object of argument's type",
                #    "required": "[optional] boolean value"}
                # :type kwargs: dict

                argument = {
                    "dest": kwargs.get("dest").replace("action_kwarg_", ""),
                    "args": args,
                    "metavar": kwargs.get("metavar"),
                    "description": kwargs.get("help", ""),
                }
                action = kwargs.get("action")
                if not action:
                    arg_type = kwargs.get("type")
                    if arg_type:
                        argument["type"] = arg_type.__name__

                    skip_default = argument["dest"] in ("deployment", "env",
                                                        "task_id",
                                                        "verification")
                    if not skip_default and argument["dest"] in defaults:
                        argument["defaults"] = defaults[argument["dest"]]

                arguments.append(argument)
            commands.append({
                "name": "rally %s %s" % (cg, command),
                "command": command,
                "description": subparser["description"],
                "arguments": arguments
            })

        categories.append({
            "name": cg,
            "description": description,
            "commands": commands
        })
    data = {"categories": categories, "rally": version.version_string()}
    with open(CLI_SOURCE, "w") as f:
        f.write(json.dumps(data, indent=4))
    return data
Example #25
0
def run(argv, categories):
    parser = lambda subparsers: _add_command_parsers(categories, subparsers)
    category_opt = cfg.SubCommandOpt("category",
                                     title="Command categories",
                                     help="Available categories",
                                     handler=parser)

    CONF.register_cli_opt(category_opt)
    help_msg = ("Additional custom plugin locations. Multiple files or "
                "directories may be specified. All plugins in the specified"
                " directories and subdirectories will be imported. Plugins in"
                " /opt/rally/plugins and ~/.rally/plugins will always be "
                "imported.")

    CONF.register_cli_opt(cfg.ListOpt("plugin-paths",
                                      default=os.environ.get(
                                          "RALLY_PLUGIN_PATHS"),
                                      help=help_msg))

    try:
        CONF(argv[1:], project="rally", version=version.version_string(),
             default_config_files=find_config_files(CONFIG_SEARCH_PATHS))
        logging.setup("rally")
        if not CONF.get("log_config_append"):
            # The below two lines are to disable noise from request module. The
            # standard way should be we make such lots of settings on the root
            # rally. However current oslo codes doesn't support such interface.
            # So I choose to use a 'hacking' way to avoid INFO logs from
            # request module where user didn't give specific log configuration.
            # And we could remove this hacking after oslo.log has such
            # interface.
            LOG.debug("INFO logs from urllib3 and requests module are hide.")
            requests_log = logging.getLogger("requests").logger
            requests_log.setLevel(logging.WARNING)
            urllib3_log = logging.getLogger("urllib3").logger
            urllib3_log.setLevel(logging.WARNING)

            # NOTE(wtakase): This is for suppressing boto error logging.
            LOG.debug("ERROR log from boto module is hide.")
            boto_log = logging.getLogger("boto").logger
            boto_log.setLevel(logging.CRITICAL)

    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv)
            except Exception:
                print(_("sudo failed, continuing as if nothing happened"))

        print(_("Please re-run %s as root.") % argv[0])
        return(2)

    if CONF.category.name == "version":
        print(version.version_string())
        return(0)

    if CONF.category.name == "bash-completion":
        print(_generate_bash_completion_script())
        return(0)

    fn = CONF.category.action_fn
    fn_args = [encodeutils.safe_decode(arg)
               for arg in CONF.category.action_args]
    fn_kwargs = {}
    for k in CONF.category.action_kwargs:
        v = getattr(CONF.category, "action_kwarg_" + k)
        if v is None:
            continue
        if isinstance(v, six.string_types):
            v = encodeutils.safe_decode(v)
        fn_kwargs[k] = v

    # call the action with the remaining arguments
    # check arguments
    try:
        validate_args(fn, *fn_args, **fn_kwargs)
    except MissingArgs as e:
        # NOTE(mikal): this isn't the most helpful error message ever. It is
        # long, and tells you a lot of things you probably don't want to know
        # if you just got a single arg wrong.
        print(fn.__doc__)
        CONF.print_help()
        print("Missing arguments:")
        for missing in e.missing:
            for arg in fn.args:
                if arg[1].get("dest", "").endswith(missing):
                    print(" " + arg[0][0])
                    break
        return(1)

    try:
        for path in CONF.plugin_paths or []:
            discover.load_plugins(path)

        validate_deprecated_args(argv, fn)

        if getattr(fn, "_suppress_warnings", False):
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                ret = fn(*fn_args, **fn_kwargs)
        else:
            ret = fn(*fn_args, **fn_kwargs)
        return(ret)

    except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound,
            exceptions.TaskNotFound, jsonschema.ValidationError) as e:
        if logging.is_debug():
            LOG.exception(e)
        print(e)
        return 1
    except sqlalchemy.exc.OperationalError as e:
        if logging.is_debug():
            LOG.exception(e)
        print(e)
        print("Looks like Rally can't connect to its DB.")
        print("Make a sure that connection string in rally.conf is proper:")
        print(CONF.database.connection)
        return 1
    except Exception:
        print(_("Command failed, please check log for more info"))
        raise
Example #26
0
    def generate(self):
        root = ET.Element("testsuites")

        root.append(
            ET.Comment("Report is generated by Rally %s at %s" %
                       (version.version_string(),
                        dt.datetime.utcnow().strftime(TIME_FORMAT))))

        for v in self.verifications:
            verification = ET.SubElement(
                root, "testsuite", {
                    "id": v.uuid,
                    "time": str(v.tests_duration),
                    "tests": str(v.tests_count),
                    "errors": "0",
                    "skipped": str(v.skipped),
                    "failures": str(v.failures + v.unexpected_success),
                    "timestamp": v.created_at.strftime(TIME_FORMAT)
                })
            tests = sorted(v.tests.values(),
                           key=lambda t: (t.get("timestamp", ""), t["name"]))
            for result in tests:
                class_name, name = result["name"].rsplit(".", 1)
                test_case = {
                    "time": result["duration"],
                    "name": name,
                    "classname": class_name
                }

                test_id = [
                    tag[3:] for tag in result.get("tags", [])
                    if tag.startswith("id-")
                ]
                if test_id:
                    test_case["id"] = test_id[0]
                if "timestamp" in result:
                    test_case["timestamp"] = result["timestamp"]

                test_case_element = ET.SubElement(verification, "testcase",
                                                  test_case)
                if result["status"] == "success":
                    # nothing to add
                    pass
                elif result["status"] == "uxsuccess":
                    # NOTE(andreykurilin): junit doesn't support uxsuccess
                    #   status, so let's display it like "fail" with proper
                    # comment.
                    failure = ET.SubElement(test_case_element, "failure")
                    failure.text = ("It is an unexpected success. The test "
                                    "should fail due to: %s" %
                                    result.get("reason", "Unknown reason"))
                elif result["status"] == "fail":
                    failure = ET.SubElement(test_case_element, "failure")
                    failure.text = result.get("traceback", None)
                elif result["status"] == "xfail":
                    # NOTE(andreykurilin): junit doesn't support xfail status,
                    # so let's display it like "success" with proper comment
                    test_case_element.append(
                        ET.Comment("It is an expected failure due to: %s" %
                                   result.get("reason", "Unknown reason")))
                    trace = result.get("traceback", None)
                    if trace:
                        test_case_element.append(
                            ET.Comment("Traceback:\n%s" % trace))
                elif result["status"] == "skip":
                    skipped = ET.SubElement(test_case_element, "skipped")
                    skipped.text = result.get("reason", "Unknown reason")
                else:
                    # wtf is it?! we should add validation of results...
                    pass

            self._prettify_xml(root)

        raw_report = ET.tostring(root, encoding="utf-8").decode("utf-8")
        if self.output_destination:
            return {
                "files": {
                    self.output_destination: raw_report
                },
                "open": self.output_destination
            }
        else:
            return {"print": raw_report}
Example #27
0
    def generate(self):
        root = ET.Element("testsuites")

        root.append(ET.Comment("Report is generated by Rally %s at %s" % (
            version.version_string(),
            dt.datetime.utcnow().strftime(TIME_FORMAT))))

        for v in self.verifications:
            verification = ET.SubElement(root, "testsuite", {
                "id": v.uuid,
                "time": str(v.tests_duration),
                "tests": str(v.tests_count),
                "errors": "0",
                "skipped": str(v.skipped),
                "failures": str(v.failures + v.unexpected_success),
                "timestamp": v.created_at.strftime(TIME_FORMAT)
            })
            tests = sorted(v.tests.values(),
                           key=lambda t: (t.get("timestamp", ""), t["name"]))
            for result in tests:
                class_name, name = result["name"].rsplit(".", 1)
                test_case = {
                    "time": result["duration"],
                    "name": name, "classname": class_name
                }

                test_id = [tag[3:] for tag in result.get("tags", [])
                           if tag.startswith("id-")]
                if test_id:
                    test_case["id"] = test_id[0]
                if "timestamp" in result:
                    test_case["timestamp"] = result["timestamp"]

                test_case_element = ET.SubElement(verification, "testcase",
                                                  test_case)
                if result["status"] == "success":
                    # nothing to add
                    pass
                elif result["status"] == "uxsuccess":
                    # NOTE(andreykurilin): junit doesn't support uxsuccess
                    #   status, so let's display it like "fail" with proper
                    # comment.
                    failure = ET.SubElement(test_case_element, "failure")
                    failure.text = ("It is an unexpected success. The test "
                                    "should fail due to: %s" %
                                    result.get("reason", "Unknown reason"))
                elif result["status"] == "fail":
                    failure = ET.SubElement(test_case_element, "failure")
                    failure.text = result.get("traceback", None)
                elif result["status"] == "xfail":
                    # NOTE(andreykurilin): junit doesn't support xfail status,
                    # so let's display it like "success" with proper comment
                    test_case_element.append(ET.Comment(
                        "It is an expected failure due to: %s" %
                        result.get("reason", "Unknown reason")))
                    trace = result.get("traceback", None)
                    if trace:
                        test_case_element.append(ET.Comment(
                            "Traceback:\n%s" % trace))
                elif result["status"] == "skip":
                    skipped = ET.SubElement(test_case_element, "skipped")
                    skipped.text = result.get("reason", "Unknown reason")
                else:
                    # wtf is it?! we should add validation of results...
                    pass

            utils.prettify_xml(root)

        raw_report = ET.tostring(root, encoding="utf-8").decode("utf-8")
        if self.output_destination:
            return {"files": {self.output_destination: raw_report},
                    "open": self.output_destination}
        else:
            return {"print": raw_report}
 def init_rally_config(self):
     CONF([], project="rally", version=version.version_string())
 def init_rally_config(self):
     CONF([], project='rally', version=version.version_string())
Example #30
0
    def start(self,
              api,
              task,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tag=None,
              do_use=False,
              abort_on_sla_failure=False,
              os_profile=None):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        :param os_profile: use a secret key to sign trace information
        """

        try:
            if os_profile is not None:
                osprofiler_profiler.init(os_profile)

            task_instance = api.task.create(deployment, tag)

            print("Running Rally version", version.version_string())
            input_task = self._load_and_validate_task(
                api,
                task,
                task_args,
                task_args_file,
                deployment,
                task_instance=task_instance)

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {
                        "uuid": task_instance["uuid"],
                        "tag": task_instance["tag"]
                    }))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment,
                           input_task,
                           task=task_instance,
                           abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(api, task_id=task_instance["uuid"])

            if os_profile is not None:
                print(
                    "Display trace with command:\n"
                    "osprofiler trace show --html",
                    osprofiler_profiler.get().get_base_id())

        except exceptions.DeploymentNotFinishedStatus as e:
            print(_("Cannot start a task on unfinished deployment: %s") % e)
            return 1
        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_validation_failed({
                "etype":
                type(e).__name__,
                "msg":
                str(e),
                "trace":
                json.dumps(traceback.format_exc())
            })
            print(e, file=sys.stderr)
            return (1)
Example #31
0
 def test_version_string(self, mock_version_info):
     mock_sv = mock.Mock()
     mock_sv.debian_string.return_value = "foo_version"
     mock_version_info.semantic_version.return_value = mock_sv
     self.assertEqual("foo_version", version.version_string())
Example #32
0
 def test_version_string(self, mock_version_info):
     mock_sv = mock.Mock()
     mock_sv.debian_string.return_value = "foo_version"
     mock_version_info.semantic_version.return_value = mock_sv
     self.assertEqual("foo_version", version.version_string())
Example #33
0
    def __init__(self,
                 config_file=None,
                 config_args=None,
                 rally_endpoint=None,
                 plugin_paths=None,
                 skip_db_check=False):
        """Initialize Rally API instance

        :param config_file: Path to rally configuration file. If None, default
                            path will be selected
        :type config_file: str
        :param config_args: Arguments for initialization current configuration
        :type config_args: list
        :param rally_endpoint: [Restricted]Rally endpoint connection string.
        :type rally_endpoint: str
        :param plugin_paths: Additional custom plugin locations
        :type plugin_paths: list
        :param skip_db_check: Allows to skip db revision check
        :type skip_db_check: bool
        """
        if rally_endpoint:
            raise NotImplementedError(
                _LE("Sorry, but Rally-as-a-Service is "
                    "not ready yet."))
        try:
            config_files = ([config_file]
                            if config_file else self._default_config_file())
            CONF(config_args or [],
                 project="rally",
                 version=rally_version.version_string(),
                 default_config_files=config_files)
            logging.setup("rally")
            if not CONF.get("log_config_append"):
                # The below two lines are to disable noise from request module.
                # The standard way should be we make such lots of settings on
                # the root rally. However current oslo codes doesn't support
                # such interface. So I choose to use a 'hacking' way to avoid
                # INFO logs from request module where user didn't give specific
                # log configuration. And we could remove this hacking after
                # oslo.log has such interface.
                LOG.debug(
                    "INFO logs from urllib3 and requests module are hide.")
                requests_log = logging.getLogger("requests").logger
                requests_log.setLevel(logging.WARNING)
                urllib3_log = logging.getLogger("urllib3").logger
                urllib3_log.setLevel(logging.WARNING)

                LOG.debug("urllib3 insecure warnings are hidden.")
                for warning in ("InsecurePlatformWarning", "SNIMissingWarning",
                                "InsecureRequestWarning"):
                    warning_cls = getattr(urllib3.exceptions, warning, None)
                    if warning_cls is not None:
                        urllib3.disable_warnings(warning_cls)

            # NOTE(wtakase): This is for suppressing boto error logging.
            LOG.debug("ERROR log from boto module is hide.")
            boto_log = logging.getLogger("boto").logger
            boto_log.setLevel(logging.CRITICAL)

            # Set alembic log level to ERROR
            alembic_log = logging.getLogger("alembic").logger
            alembic_log.setLevel(logging.ERROR)

        except cfg.ConfigFilesNotFoundError as e:
            cfg_files = e.config_files
            raise exceptions.RallyException(
                _LE("Failed to read configuration file(s): %s") % cfg_files)

        # Check that db is upgraded to the latest revision
        if not skip_db_check:
            self.check_db_revision()

        # Load plugins
        plugin_paths = plugin_paths or []
        if "plugin_paths" in CONF:
            plugin_paths.extend(CONF.get("plugin_paths") or [])
        for path in plugin_paths:
            discover.load_plugins(path)

        # NOTE(andreykurilin): There is no reason to auto-discover API's. We
        # have only 4 classes, so let's do it in good old way - hardcode them:)
        self._deployment = _Deployment
        self._task = _Task
        self._verifier = _Verifier
        self._verification = _Verification
Example #34
0
    def start(self,
              task,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tag=None,
              do_use=False,
              abort_on_sla_failure=False):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            print("Running Rally version", version.version_string())
            input_task = self._load_and_validate_task(
                task,
                task_args,
                task_args_file,
                deployment,
                task_instance=task_instance)

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {
                        "uuid": task_instance["uuid"],
                        "tag": task_instance["tag"]
                    }))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment,
                           input_task,
                           task=task_instance,
                           abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(
                type(e).__name__, str(e), json.dumps(traceback.format_exc()))
            print(e, file=sys.stderr)
            return (1)
Example #35
0
def run(argv, categories):
    parser = lambda subparsers: _add_command_parsers(categories, subparsers)
    category_opt = cfg.SubCommandOpt("category",
                                     title="Command categories",
                                     help="Available categories",
                                     handler=parser)

    CONF.register_cli_opt(category_opt)

    try:
        CONF(argv[1:], project="rally", version=version.version_string())
        logging.setup("rally")
        if not CONF.get("log_config_append"):
            # The below two lines are to disable noise from request module. The
            # standard way should be we make such lots of settings on the root
            # rally. However current oslo codes doesn't support such interface.
            # So I choose to use a 'hacking' way to avoid INFO logs from
            # request module where user didn't give specific log configuration.
            # And we could remove this hacking after oslo.log has such
            # interface.
            LOG.debug("INFO logs from urllib3 and requests module are hide.")
            requests_log = logging.getLogger("requests").logger
            requests_log.setLevel(logging.WARNING)
            urllib3_log = logging.getLogger("urllib3").logger
            urllib3_log.setLevel(logging.WARNING)

    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv)
            except Exception:
                print(_("sudo failed, continuing as if nothing happened"))

        print(_("Please re-run %s as root.") % argv[0])
        return (2)

    if CONF.category.name == "version":
        print(version.version_string())
        return (0)

    if CONF.category.name == "bash-completion":
        print(_generate_bash_completion_script())
        return (0)

    fn = CONF.category.action_fn
    fn_args = [arg.decode("utf-8") for arg in CONF.category.action_args]
    fn_kwargs = {}
    for k in CONF.category.action_kwargs:
        v = getattr(CONF.category, "action_kwarg_" + k)
        if v is None:
            continue
        if isinstance(v, six.string_types):
            v = v.decode("utf-8")
        fn_kwargs[k] = v

    # call the action with the remaining arguments
    # check arguments
    try:
        validate_args(fn, *fn_args, **fn_kwargs)
    except MissingArgs as e:
        # NOTE(mikal): this isn't the most helpful error message ever. It is
        # long, and tells you a lot of things you probably don't want to know
        # if you just got a single arg wrong.
        print(fn.__doc__)
        CONF.print_help()
        print("Missing arguments:")
        for missing in e.missing:
            for arg in fn.args:
                if arg[1].get("dest", "").endswith(missing):
                    print(" " + arg[0][0])
                    break
        return (1)

    try:
        utils.load_plugins("/opt/rally/plugins/")
        utils.load_plugins(os.path.expanduser("~/.rally/plugins/"))

        validate_deprecated_args(argv, fn)

        if getattr(fn, "_suppress_warnings", False):
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                ret = fn(*fn_args, **fn_kwargs)
        else:
            ret = fn(*fn_args, **fn_kwargs)
        return (ret)

    except (IOError, TypeError, ValueError, exceptions.DeploymentNotFound,
            exceptions.TaskNotFound, jsonschema.ValidationError) as e:
        if logging.is_debug():
            LOG.exception(e)
        print(e)
        return 1
    except Exception:
        print(_("Command failed, please check log for more info"))
        raise