Example #1
0
File: task.py Project: sapcc/rally
    def _start_task(self,
                    api,
                    deployment,
                    task_config,
                    tags=None,
                    do_use=False,
                    abort_on_sla_failure=False):
        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(
                cliutils.make_header("Task %(tags)s %(uuid)s: started" % {
                    "uuid": task_instance["uuid"],
                    "tags": tags
                }))
            print("Running Task... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment,
                           config=task_config,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print("Cannot start a task on unfinished deployment: %s" % e)
            return 1

        if self._detailed(api, task_id=task_instance["uuid"]):
            return 2
        return 0
Example #2
0
 def _print_failures(h_text, failures, symbol="-"):
     print("\n%s" % cliutils.make_header(
         h_text, size=len(h_text), symbol=symbol).strip())
     for f in failures:
         header = "%s\n%s\n" % (f["name"], "-" * len(f["name"]))
         failure = "\n%s%s\n" % (header, f["traceback"].strip())
         print(failure)
Example #3
0
    def report(self, api, verification_uuid=None, output_type=None,
               output_dest=None, open_it=None):
        """Generate a report for a verification or a few verifications."""

        if not isinstance(verification_uuid, list):
            verification_uuid = [verification_uuid]

        result = api.verification.report(uuids=verification_uuid,
                                         output_type=output_type,
                                         output_dest=output_dest)
        if "files" in result:
            print("Saving the report to '%s' file. It may take some time."
                  % output_dest)
            for path in result["files"]:
                full_path = os.path.abspath(os.path.expanduser(path))
                if not os.path.exists(os.path.dirname(full_path)):
                    os.makedirs(os.path.dirname(full_path))
                with open(full_path, "w") as f:
                    f.write(result["files"][path])
            print("The report has been successfully saved.")

            if open_it:
                if "open" not in result:
                    print("Cannot open '%s' report in the browser because "
                          "report type doesn't support it." % output_type)
                    return 1
                webbrowser.open_new_tab(
                    "file://" + os.path.abspath(result["open"]))

        if "print" in result:
            # NOTE(andreykurilin): we need a separation between logs and
            #   printed information to be able to parse output
            h = "Verification Report"
            print("\n%s\n%s" % (cliutils.make_header(h, len(h)),
                                result["print"]))
Example #4
0
 def _print_failures(h_text, failures, symbol="-"):
     print("\n%s" % cliutils.make_header(
         h_text, size=len(h_text), symbol=symbol).strip())
     for f in failures:
         header = "%s\n%s\n" % (f["name"], "-" * len(f["name"]))
         failure = "\n%s%s\n" % (header, f["traceback"].strip())
         print(failure)
Example #5
0
    def show(self, verification=None, sort_by="name", detailed=False):
        """Display results table of a verification.

        :param verification: UUID of a verification
        :param sort_by: Sort results by 'name' or 'duration'
        :param detailed: Display detailed errors of failed tests
        """
        try:
            verification = api.Verification.get(verification)
            tests = verification.get_results()
        except exceptions.NotFoundException as e:
            print(six.text_type(e))
            return 1

        print(_("Total results of verification:\n"))
        total_fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures", "Created at", "Status"]
        cliutils.print_list([verification], fields=total_fields)

        print(_("\nTests:\n"))
        fields = ["name", "time", "status"]

        results = tests["test_cases"]
        values = [utils.Struct(**results[test_name]) for test_name in results]
        sortby_index = ("name", "duration").index(sort_by)
        cliutils.print_list(values, fields, sortby_index=sortby_index)

        if detailed:
            for test in six.itervalues(tests["test_cases"]):
                if test["status"] == "fail":
                    header = cliutils.make_header(
                        "FAIL: %(name)s\n" "Time: %(time)s" % {"name": test["name"], "time": test["time"]}
                    )
                    formatted_test = "%(header)s%(log)s\n" % {"header": header, "log": test["traceback"]}
                    print(formatted_test)
Example #6
0
    def check(self, api, deployment=None):
        """Check all credentials and list all available services.

        :param deployment: UUID or name of the deployment
        """
        def is_field_there(lst, field):
            return bool([item for item in lst if field in item])

        def print_error(user_type, error):
            print(_("Error while checking %s credentials:") % user_type)
            if logging.is_debug():
                print(error["trace"])
            else:
                print("\t%s: %s" % (error["etype"], error["msg"]))

        exit_code = 0

        info = api.deployment.check(deployment)
        for platform in info:
            for i, credentials in enumerate(info[platform]):
                failed = False

                n = "" if len(info[platform]) == 1 else " #%s" % (i + 1)
                header = "Platform %s%s:" % (platform, n)
                print(cliutils.make_header(header))
                if "admin_error" in credentials:
                    print_error("admin", credentials["admin_error"])
                    failed = True
                if "user_error" in credentials:
                    print_error("users", credentials["user_error"])
                    failed = True

                if not failed:
                    print("Available services:")
                    formatters = {
                        "Service": lambda x: x.get("name"),
                        "Service Type": lambda x: x.get("type"),
                        "Status": lambda x: x.get("status", "Available")
                    }
                    if (is_field_there(credentials["services"], "type") and
                            is_field_there(credentials["services"], "name")):
                        headers = ["Service", "Service Type", "Status"]
                    else:
                        headers = ["Service", "Status"]

                    if is_field_there(credentials["services"], "version"):
                        headers.append("Version")

                    if is_field_there(credentials["services"], "description"):
                        headers.append("Description")

                    cliutils.print_list(credentials["services"],
                                        headers,
                                        normalize_field_names=True,
                                        formatters=formatters)
                else:
                    exit_code = 1
                print("\n")

        return exit_code
Example #7
0
 def _print_task_errors(task_id, task_errors):
     print(
         cliutils.make_header("Task %s has %d error(s)" %
                              (task_id, len(task_errors))))
     for err_data in task_errors:
         print(*err_data, sep="\n")
         print("-" * 80)
Example #8
0
    def show(self, name, namespace=None):
        """Show detailed information about a Rally plugin."""
        name_lw = name.lower()
        all_plugins = plugin.Plugin.get_all(namespace=namespace)
        found = [p for p in all_plugins if name_lw in p.get_name().lower()]
        exact_match = [p for p in found if name_lw == p.get_name().lower()]

        if not found:
            if namespace:
                print(
                    "There is no plugin: %(name)s in %(namespace)s namespace" % {"name": name, "namespace": namespace}
                )
            else:
                print("There is no plugin: %s" % name)

        elif len(found) == 1 or exact_match:
            plugin_ = found[0] if len(found) == 1 else exact_match[0]
            plugin_info = plugin_.get_info()
            print(cliutils.make_header(plugin_info["title"]))
            print("NAME\n\t%s" % plugin_info["name"])
            print("NAMESPACE\n\t%s" % plugin_info["namespace"])
            print("MODULE\n\t%s" % plugin_info["module"])
            if plugin_info["description"]:
                print("DESCRIPTION\n\t", end="")
                print(textwrap.fill(plugin_info["description"], subsequent_indent="\t"))
            if plugin_info["parameters"]:
                print("PARAMETERS")
                rows = [utils.Struct(name=p["name"], description="g%s\n" % p["doc"]) for p in plugin_info["parameters"]]
                cliutils.print_list(rows, fields=["name", "description"])
        else:
            print("Multiple plugins found:")
            self._print_plugins_list(found)
Example #9
0
    def start(
        self,
        task,
        deployment=None,
        task_args=None,
        task_args_file=None,
        tag=None,
        do_use=False,
        abort_on_sla_failure=False,
    ):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            print("Running Rally version", version.version_string())
            input_task = self._load_and_validate_task(
                task, task_args, task_args_file, deployment, task_instance=task_instance
            )

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {"uuid": task_instance["uuid"], "tag": task_instance["tag"]}
                )
            )
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment, input_task, task=task_instance, abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(type(e).__name__, str(e), json.dumps(traceback.format_exc()))
            print(e, file=sys.stderr)
            return 1
Example #10
0
    def start(self,
              task,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tag=None,
              do_use=False,
              abort_on_sla_failure=False):
        """Start benchmark task.

        :param task: a file with yaml/json task
        :param task_args: Input task args (dict in json/yaml). These args are
                          used to render input task that is jinja2 template.
        :param task_args_file: File with input task args (dict in json/yaml).
                               These args are used to render input task that
                               is jinja2 template.
        :param deployment: UUID or name of a deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            input_task = self._load_and_validate_task(
                task,
                task_args,
                task_args_file,
                deployment,
                task_instance=task_instance)

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {
                        "uuid": task_instance["uuid"],
                        "tag": task_instance["tag"]
                    }))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment,
                           input_task,
                           task=task_instance,
                           abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(log=e.format_message())
            print(e, file=sys.stderr)
            return (1)
Example #11
0
    def check(self, api, deployment=None):
        """Check all credentials and list all available services."""

        def is_field_there(lst, field):
            return bool([item for item in lst if field in item])

        def print_error(user_type, error):
            print("Error while checking %s credentials:" % user_type)
            if logging.is_debug():
                print(error["trace"])
            else:
                print("\t%s: %s" % (error["etype"], error["msg"]))

        exit_code = 0

        info = api.deployment.check(deployment=deployment)
        for platform in info:
            for i, credentials in enumerate(info[platform]):
                failed = False

                n = "" if len(info[platform]) == 1 else " #%s" % (i + 1)
                header = "Platform %s%s:" % (platform, n)
                print(cliutils.make_header(header))
                if "admin_error" in credentials:
                    print_error("admin", credentials["admin_error"])
                    failed = True
                if "user_error" in credentials:
                    print_error("users", credentials["user_error"])
                    failed = True

                if not failed:
                    print("Available services:")
                    formatters = {
                        "Service": lambda x: x.get("name"),
                        "Service Type": lambda x: x.get("type"),
                        "Status": lambda x: x.get("status", "Available")}
                    if (is_field_there(credentials["services"], "type") and
                            is_field_there(credentials["services"], "name")):
                        headers = ["Service", "Service Type", "Status"]
                    else:
                        headers = ["Service", "Status"]

                    if is_field_there(credentials["services"], "version"):
                        headers.append("Version")

                    if is_field_there(credentials["services"], "description"):
                        headers.append("Description")

                    cliutils.print_list(credentials["services"], headers,
                                        normalize_field_names=True,
                                        formatters=formatters)
                else:
                    exit_code = 1
                print("\n")

        return exit_code
Example #12
0
    def start(self, api, task_file, deployment=None, task_args=None,
              task_args_file=None, tags=None, do_use=False,
              abort_on_sla_failure=False):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task_file: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tags: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """
        input_task = self._load_and_validate_task(api, task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(cliutils.make_header(
                _("Task %(tags)s %(uuid)s: started")
                % {"uuid": task_instance["uuid"], "tags": tags}))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment, config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print(_("Cannot start a task on unfinished deployment: %s") % e)
            return 1

        self.detailed(api, task_id=task_instance["uuid"])
Example #13
0
    def start(self, task, deployment=None, task_args=None, task_args_file=None,
              tag=None, do_use=False, abort_on_sla_failure=False):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            input_task = self._load_and_validate_task(
                task, task_args, task_args_file, deployment,
                task_instance=task_instance)

            print(cliutils.make_header(
                  _("Task %(tag)s %(uuid)s: started")
                  % {"uuid": task_instance["uuid"],
                     "tag": task_instance["tag"]}))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment, input_task, task=task_instance,
                           abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(type(e).__name__,
                                     str(e),
                                     json.dumps(traceback.format_exc()))
            print(e, file=sys.stderr)
            return(1)
Example #14
0
    def start(self,
              api,
              task_file,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tags=None,
              do_use=False,
              abort_on_sla_failure=False):
        """Run task.

        If both task_args and task_args_file are specified, they are going to
        be merged. task_args has a higher priority so it overrides
        values from task_args_file.
        There are 3 kinds of return codes, 0: no error, 1: running error,
        2: sla check failed.
        """

        input_task = self._load_and_validate_task(api,
                                                  task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(
                cliutils.make_header("Task %(tags)s %(uuid)s: started" % {
                    "uuid": task_instance["uuid"],
                    "tags": tags
                }))
            print("Running Task... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment,
                           config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print("Cannot start a task on unfinished deployment: %s" % e)
            return 1

        if self._detailed(api, task_id=task_instance["uuid"]):
            return 2
        return 0
Example #15
0
    def show(self, verification_uuid=None, sort_by="name", detailed=False):
        """Display results table of the verification."""

        try:
            sortby_index = ("name", "duration").index(sort_by)
        except ValueError:
            print("Sorry, but verification results can't be sorted "
                  "by '%s'." % sort_by)
            return 1

        try:
            verification = db.verification_get(verification_uuid)
            tests = db.verification_result_get(verification_uuid)
        except exceptions.NotFoundException as e:
            print(six.text_type(e))
            return 1

        print("Total results of verification:\n")
        total_fields = [
            "UUID", "Deployment UUID", "Set name", "Tests", "Failures",
            "Created at", "Status"
        ]
        cliutils.print_list([verification], fields=total_fields)

        print("\nTests:\n")
        fields = ["name", "time", "status"]

        values = [
            objects.Verification(test)
            for test in six.itervalues(tests.data["test_cases"])
        ]
        cliutils.print_list(values, fields, sortby_index=sortby_index)

        if detailed:
            for test in six.itervalues(tests.data["test_cases"]):
                if test["status"] == "FAIL":
                    header = cliutils.make_header(
                        "FAIL: %(name)s\n"
                        "Time: %(time)s\n"
                        "Type: %(type)s" % {
                            "name": test["name"],
                            "time": test["time"],
                            "type": test["failure"]["type"]
                        })
                    formatted_test = "%(header)s%(log)s\n" % {
                        "header": header,
                        "log": test["failure"]["log"]
                    }
                    print(formatted_test)
Example #16
0
    def start(self, task, deployment=None, task_args=None, task_args_file=None,
              tag=None, do_use=False, abort_on_sla_failure=False):
        """Start benchmark task.

        :param task: a file with yaml/json task
        :param task_args: Input task args (dict in json/yaml). These args are
                          used to render input task that is jinja2 template.
        :param task_args_file: File with input task args (dict in json/yaml).
                               These args are used to render input task that
                               is jinja2 template.
        :param deployment: UUID or name of a deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """

        task_instance = api.Task.create(deployment, tag)

        try:
            input_task = self._load_and_validate_task(
                task, task_args, task_args_file, deployment,
                task_instance=task_instance)

            print(cliutils.make_header(
                  _("Task %(tag)s %(uuid)s: started")
                  % {"uuid": task_instance["uuid"],
                     "tag": task_instance["tag"]}))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(task_instance["uuid"])

            api.Task.start(deployment, input_task, task=task_instance,
                           abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(task_id=task_instance["uuid"])

        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_failed(log=e.format_message())
            print(e, file=sys.stderr)
            return(1)
Example #17
0
    def show(self, verification=None, sort_by="name", detailed=False):
        """Display results table of a verification.

        :param verification: UUID of a verification
        :param sort_by: Sort results by 'name' or 'duration'
        :param detailed: Display detailed errors of failed tests
        """
        try:
            verification = api.Verification.get(verification)
            tests = verification.get_results()
        except exceptions.NotFoundException as e:
            print(six.text_type(e))
            return 1

        print(_("Total results of verification:\n"))
        total_fields = [
            "UUID", "Deployment UUID", "Set name", "Tests", "Failures",
            "Created at", "Status"
        ]
        cliutils.print_list([verification],
                            fields=total_fields,
                            normalize_field_names=True)

        print(_("\nTests:\n"))
        fields = ["name", "time", "status"]

        results = tests["test_cases"]
        values = [utils.Struct(**results[test_name]) for test_name in results]
        sortby_index = ("name", "duration").index(sort_by)
        cliutils.print_list(values, fields, sortby_index=sortby_index)

        if detailed:
            for test in six.itervalues(tests["test_cases"]):
                if test["status"] == "fail":
                    header = cliutils.make_header("FAIL: %(name)s\n"
                                                  "Time: %(time)s" % {
                                                      "name": test["name"],
                                                      "time": test["time"]
                                                  })
                    formatted_test = "%(header)s%(log)s\n" % {
                        "header": header,
                        "log": test["traceback"]
                    }
                    print(formatted_test)
Example #18
0
    def show(self, verification_uuid=None, sort_by="name", detailed=False):
        """Display results table of the verification."""

        try:
            sortby_index = ("name", "duration").index(sort_by)
        except ValueError:
            print("Sorry, but verification results can't be sorted "
                  "by '%s'." % sort_by)
            return 1

        try:
            verification = db.verification_get(verification_uuid)
            tests = db.verification_result_get(verification_uuid)
        except exceptions.NotFoundException as e:
            print(six.text_type(e))
            return 1

        print ("Total results of verification:\n")
        total_fields = ["UUID", "Deployment UUID", "Set name", "Tests",
                        "Failures", "Created at", "Status"]
        cliutils.print_list([verification], fields=total_fields)

        print ("\nTests:\n")
        fields = ["name", "time", "status"]

        values = [objects.Verification(test)
                  for test in six.itervalues(tests.data["test_cases"])]
        cliutils.print_list(values, fields, sortby_index=sortby_index)

        if detailed:
            for test in six.itervalues(tests.data["test_cases"]):
                if test["status"] == "FAIL":
                    header = cliutils.make_header(
                        "FAIL: %(name)s\n"
                        "Time: %(time)s\n"
                        "Type: %(type)s" % {"name": test["name"],
                                            "time": test["time"],
                                            "type": test["failure"]["type"]})
                    formatted_test = "%(header)s%(log)s\n" % {
                        "header": header,
                        "log": test["failure"]["log"]}
                    print (formatted_test)
Example #19
0
    def start(self, api, task_file, deployment=None, task_args=None,
              task_args_file=None, tags=None, do_use=False,
              abort_on_sla_failure=False):
        """Run task.

        If both task_args and task_args_file are specified, they are going to
        be merged. task_args has a higher priority so it overrides
        values from task_args_file.
        There are 3 kinds of return codes, 0: no error, 1: running error,
        2: sla check failed.
        """

        input_task = self._load_and_validate_task(api, task_file,
                                                  raw_args=task_args,
                                                  args_file=task_args_file)
        print("Running Rally version", version.version_string())

        try:
            task_instance = api.task.create(deployment=deployment, tags=tags)
            tags = "[tags: '%s']" % "', '".join(tags) if tags else ""

            print(cliutils.make_header(
                "Task %(tags)s %(uuid)s: started"
                % {"uuid": task_instance["uuid"], "tags": tags}))
            print("Running Task... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment=deployment, config=input_task,
                           task=task_instance["uuid"],
                           abort_on_sla_failure=abort_on_sla_failure)

        except exceptions.DeploymentNotFinishedStatus as e:
            print("Cannot start a task on unfinished deployment: %s" % e)
            return 1

        if self._detailed(api, task_id=task_instance["uuid"]):
            return 2
        return 0
Example #20
0
    def show(self, name, namespace=None):
        """Show detailed information about a Rally plugin."""
        name_lw = name.lower()
        all_plugins = plugin.Plugin.get_all(namespace=namespace)
        found = [p for p in all_plugins if name_lw in p.get_name().lower()]
        exact_match = [p for p in found if name_lw == p.get_name().lower()]

        if not found:
            if namespace:
                print(
                    "There is no plugin: %(name)s in %(namespace)s namespace" %
                    {
                        "name": name,
                        "namespace": namespace
                    })
            else:
                print("There is no plugin: %s" % name)

        elif len(found) == 1 or exact_match:
            plugin_ = found[0] if len(found) == 1 else exact_match[0]
            plugin_info = plugin_.get_info()
            print(cliutils.make_header(plugin_info["title"]))
            print("NAME\n\t%s" % plugin_info["name"])
            print("NAMESPACE\n\t%s" % plugin_info["namespace"])
            print("MODULE\n\t%s" % plugin_info["module"])
            if plugin_info["description"]:
                print("DESCRIPTION\n\t", end="")
                print(
                    textwrap.fill(plugin_info["description"],
                                  subsequent_indent="\t"))
            if plugin_info["parameters"]:
                print("PARAMETERS")
                rows = [
                    utils.Struct(name=p["name"],
                                 description="g%s\n" % p["doc"])
                    for p in plugin_info["parameters"]
                ]
                cliutils.print_list(rows, fields=["name", "description"])
        else:
            print("Multiple plugins found:")
            self._print_plugins_list(found)
Example #21
0
    def show(self, api, name, platform=None):
        """Show detailed information about a Rally plugin."""
        name_lw = name.lower()
        all_plugins = plugin.Plugin.get_all(platform=platform)
        found = [p for p in all_plugins if name_lw in p.get_name().lower()]
        exact_match = [p for p in found if name_lw == p.get_name().lower()]

        if not found:
            if platform:
                print("Plugin %(name)s@%(platform)s not found" % {
                    "name": name,
                    "platform": platform
                })
            else:
                print("Plugin %s not found at any platform" % name)
            return exceptions.PluginNotFound.error_code

        elif len(found) == 1 or exact_match:
            plugin_ = found[0] if len(found) == 1 else exact_match[0]
            plugin_info = plugin_.get_info()
            print(cliutils.make_header(plugin_info["title"]))
            print("NAME\n\t%s" % plugin_info["name"])
            print("PLATFORM\n\t%s" % plugin_info["platform"])
            print("MODULE\n\t%s" % plugin_info["module"])
            if plugin_info["description"]:
                print("DESCRIPTION\n\t", end="")
                print("\n\t".join(plugin_info["description"].split("\n")))
            if plugin_info["parameters"]:
                print("PARAMETERS")
                rows = [
                    utils.Struct(name=p["name"], description=p["doc"])
                    for p in plugin_info["parameters"]
                ]
                cliutils.print_list(rows,
                                    fields=["name", "description"],
                                    sortby_index=None)
        else:
            print("Multiple plugins found:")
            self._print_plugins_list(found)
            return exceptions.MultiplePluginsFound.error_code
Example #22
0
    def show(self, api, name, platform=None):
        """Show detailed information about a Rally plugin."""
        name_lw = name.lower()
        all_plugins = plugin.Plugin.get_all(platform=platform)
        found = [p for p in all_plugins if name_lw in p.get_name().lower()]
        exact_match = [p for p in found if name_lw == p.get_name().lower()]

        if not found:
            if platform:
                print(
                    "Plugin %(name)s@%(platform)s not found"
                    % {"name": name, "platform": platform}
                )
            else:
                print("Plugin %s not found at any platform" % name)

        elif len(found) == 1 or exact_match:
            plugin_ = found[0] if len(found) == 1 else exact_match[0]
            plugin_info = plugin_.get_info()
            print(cliutils.make_header(plugin_info["title"]))
            print("NAME\n\t%s" % plugin_info["name"])
            print("PLATFORM\n\t%s" % plugin_info["platform"])
            print("MODULE\n\t%s" % plugin_info["module"])
            if plugin_info["description"]:
                print("DESCRIPTION\n\t", end="")
                print("\n\t".join(plugin_info["description"].split("\n")))
            if plugin_info["parameters"]:
                print("PARAMETERS")
                rows = [utils.Struct(name=p["name"],
                                     description=p["doc"])
                        for p in plugin_info["parameters"]]
                cliutils.print_list(rows, fields=["name", "description"],
                                    sortby_index=None)
        else:
            print("Multiple plugins found:")
            self._print_plugins_list(found)
Example #23
0
 def test_make_header(self):
     h1 = cliutils.make_header("msg", size=4, symbol="=")
     self.assertEqual("====\nmsg\n====\n", h1)
Example #24
0
    def _load_task(self, task_file, task_args=None, task_args_file=None):
        """Load tasks template from file and render it with passed args.

        :param task_file: Path to file with input task
        :param task_args: JSON or YAML representation of dict with args that
                          will be used to render input task with jinja2
        :param task_args_file: Path to file with JSON or YAML representation
                               of dict, that will be used to render input
                               with jinja2. If both specified task_args and
                               task_args_file they will be merged. task_args
                               has bigger priority so it will update values
                               from task_args_file.
        :returns: Str with loaded and rendered task
        """
        print(cliutils.make_header("Preparing input task"))

        def print_invalid_header(source_name, args):
            print(_("Invalid %(source)s passed: \n\n %(args)s \n")
                  % {"source": source_name, "args": args},
                  file=sys.stderr)

        def parse_task_args(src_name, args):
            try:
                kw = args and yaml.safe_load(args)
                kw = {} if kw is None else kw
            except yaml.parser.ParserError as e:
                print_invalid_header(src_name, args)
                print(_("%(source)s has to be YAML or JSON. Details:"
                        "\n\n%(err)s\n")
                      % {"source": src_name, "err": e},
                      file=sys.stderr)
                raise TypeError()

            if not isinstance(kw, dict):
                print_invalid_header(src_name, args)
                print(_("%(src)s has to be dict, actually %(src_type)s\n")
                      % {"src": src_name, "src_type": type(kw)},
                      file=sys.stderr)
                raise TypeError()
            return kw

        try:
            kw = {}
            if task_args_file:
                with open(task_args_file) as f:
                    kw.update(parse_task_args("task_args_file", f.read()))
            kw.update(parse_task_args("task_args", task_args))
        except TypeError:
            raise FailedToLoadTask()

        with open(task_file) as f:
            try:
                input_task = f.read()
                task_dir = os.path.expanduser(
                    os.path.dirname(task_file)) or "./"
                rendered_task = api.Task.render_template(input_task,
                                                         task_dir, **kw)
            except Exception as e:
                print(_("Failed to render task template:\n%(task)s\n%(err)s\n")
                      % {"task": input_task, "err": e},
                      file=sys.stderr)
                raise FailedToLoadTask()

            print(_("Input task is:\n%s\n") % rendered_task)
            try:
                parsed_task = yaml.safe_load(rendered_task)

            except Exception as e:
                print(_("Wrong format of rendered input task. It should be "
                        "YAML or JSON.\n%s") % e,
                      file=sys.stderr)
                raise FailedToLoadTask()

            print(_("Task syntax is correct :)"))
            return parsed_task
Example #25
0
    def show(self, api, verification_uuid=None, sort_by="name",
             detailed=False):
        """Show detailed information about a verification."""

        verification = api.verification.get(
            verification_uuid=verification_uuid)
        verifier = api.verifier.get(verifier_id=verification["verifier_uuid"])
        deployment = api.deployment.get(
            deployment=verification["deployment_uuid"])

        def run_args_formatter(v):
            run_args = []
            for k in sorted(v["run_args"]):
                if k in ("load_list", "skip_list", "xfail_list"):
                    value = "(value is too long, %s)"
                    if detailed:
                        value %= "will be displayed separately"
                    else:
                        value %= "use 'detailed' flag to display it"
                else:
                    value = v["run_args"][k]
                run_args.append("%s: %s" % (k, value))
            return "\n".join(run_args)

        # Main table
        fields = ["UUID", "Status", "Started at", "Finished at", "Duration",
                  "Run arguments", "Tags", "Verifier name", "Verifier type",
                  "Deployment name", "Tests count", "Tests duration, sec",
                  "Success", "Skipped", "Expected failures",
                  "Unexpected success", "Failures"]
        formatters = {
            "Started at": lambda v: v["created_at"].replace("T", " "),
            "Finished at": lambda v: v["updated_at"].replace("T", " "),
            "Duration": lambda v: (dt.datetime.strptime(v["updated_at"],
                                                        TIME_FORMAT) -
                                   dt.datetime.strptime(v["created_at"],
                                                        TIME_FORMAT)),
            "Run arguments": run_args_formatter,
            "Tags": lambda v: ", ".join(v["tags"]) or None,
            "Verifier name": lambda v: "%s (UUID: %s)" % (verifier["name"],
                                                          verifier["uuid"]),
            "Verifier type": (
                lambda v: "%s (platform: %s)" % (verifier["type"],
                                                 verifier["platform"])),
            "Deployment name": (
                lambda v: "%s (UUID: %s)" % (deployment["name"],
                                             deployment["uuid"])),
            "Tests duration, sec": lambda v: v["tests_duration"]
        }
        cliutils.print_dict(verification, fields, formatters=formatters,
                            normalize_field_names=True, print_header=False,
                            table_label="Verification")

        if detailed:
            h = "Run arguments"
            print("\n%s" % cliutils.make_header(h, len(h)).strip())
            print("\n%s\n" % json.dumps(verification["run_args"], indent=4))

        # Tests table
        tests = verification["tests"]
        values = [tests[test_id] for test_id in tests]
        fields = ["Name", "Duration, sec", "Status"]
        formatters = {"Duration, sec": lambda v: v["duration"]}
        index = ("name", "duration", "status").index(sort_by)
        cliutils.print_list(values, fields, formatters=formatters,
                            table_label="Tests", normalize_field_names=True,
                            sortby_index=index)

        if detailed:
            failures = [t for t in tests.values() if t["status"] == "fail"]
            if failures:
                self._print_failures("Failures", failures)
            else:
                print("\nCongratulations! Verification passed all tests ;)")
Example #26
0
    def start(self,
              api,
              task,
              deployment=None,
              task_args=None,
              task_args_file=None,
              tag=None,
              do_use=False,
              abort_on_sla_failure=False,
              os_profile=None):
        """Start benchmark task.

        If both task_args and task_args_file are specified, they will
        be merged. task_args has a higher priority so it will override
        values from task_args_file.

        :param task: Path to the input task file.
        :param task_args: Input task args (JSON dict). These args are
                          used to render the Jinja2 template in the
                          input task.
        :param task_args_file: Path to the file with input task args
                               (dict in JSON/YAML). These args are
                               used to render the Jinja2 template in
                               the input task.
        :param deployment: UUID or name of the deployment
        :param tag: optional tag for this task
        :param do_use: if True, the new task will be stored as the default one
                       for future operations
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        :param os_profile: use a secret key to sign trace information
        """

        try:
            if os_profile is not None:
                osprofiler_profiler.init(os_profile)

            task_instance = api.task.create(deployment, tag)

            print("Running Rally version", version.version_string())
            input_task = self._load_and_validate_task(
                api,
                task,
                task_args,
                task_args_file,
                deployment,
                task_instance=task_instance)

            print(
                cliutils.make_header(
                    _("Task %(tag)s %(uuid)s: started") % {
                        "uuid": task_instance["uuid"],
                        "tag": task_instance["tag"]
                    }))
            print("Benchmarking... This can take a while...\n")
            print("To track task status use:\n")
            print("\trally task status\n\tor\n\trally task detailed\n")

            if do_use:
                self.use(api, task_instance["uuid"])

            api.task.start(deployment,
                           input_task,
                           task=task_instance,
                           abort_on_sla_failure=abort_on_sla_failure)
            self.detailed(api, task_id=task_instance["uuid"])

            if os_profile is not None:
                print(
                    "Display trace with command:\n"
                    "osprofiler trace show --html",
                    osprofiler_profiler.get().get_base_id())

        except exceptions.DeploymentNotFinishedStatus as e:
            print(_("Cannot start a task on unfinished deployment: %s") % e)
            return 1
        except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
            task_instance.set_validation_failed({
                "etype":
                type(e).__name__,
                "msg":
                str(e),
                "trace":
                json.dumps(traceback.format_exc())
            })
            print(e, file=sys.stderr)
            return (1)
Example #27
0
    def _load_and_validate_task(self,
                                api,
                                task_file,
                                args_file=None,
                                raw_args=None):
        """Load, render and validate tasks template from file with passed args.

        :param task_file: Path to file with input task
        :param raw_args: JSON or YAML representation of dict with args that
            will be used to render input task with jinja2
        :param args_file: Path to file with JSON or YAML representation
            of dict, that will be used to render input with jinja2. If both
            specified task_args and task_args_file they will be merged.
            raw_args has bigger priority so it will update values
            from args_file.
        :returns: Str with loaded and rendered task
        """

        print(cliutils.make_header("Preparing input task"))

        try:
            input_task = open(task_file).read()
        except IOError as err:
            raise FailedToLoadTask(source="--task",
                                   msg="Error reading %s: %s" %
                                   (task_file, err))

        task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./"

        task_args = {}
        if args_file:
            try:
                task_args.update(yaml.safe_load(open(args_file).read()))
            except yaml.ParserError as e:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="File '%s' has to be YAML or JSON. Details:\n\n%s" %
                    (args_file, e))
            except IOError as err:
                raise FailedToLoadTask(source="--task-args-file",
                                       msg="Error reading %s: %s" %
                                       (args_file, err))

        if raw_args:
            try:
                data = yaml.safe_load(raw_args)
                if isinstance(data, (six.text_type, six.string_types)):
                    raise yaml.ParserError("String '%s' doesn't look like a "
                                           "dictionary." % raw_args)
                task_args.update(data)
            except yaml.ParserError as e:
                args = [
                    keypair.split("=", 1) for keypair in raw_args.split(",")
                ]
                if len([a for a in args if len(a) != 1]) != len(args):
                    raise FailedToLoadTask(
                        source="--task-args",
                        msg="Value has to be YAML or JSON. Details:\n\n%s" % e)
                else:
                    task_args.update(dict(args))

        try:
            rendered_task = api.task.render_template(task_template=input_task,
                                                     template_dir=task_dir,
                                                     **task_args)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Failed to render task template.\n\n%s" % e)

        print("Task is:\n%s\n" % rendered_task.strip())
        try:
            parsed_task = yaml.safe_load(rendered_task)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Wrong format of rendered input task. It should be YAML or"
                " JSON. Details:\n\n%s" % e)

        print("Task syntax is correct :)")
        return parsed_task
Example #28
0
    def test_show_task_errors_no_trace(self, mock_task, mock_stdout,
                                       error_type, error_message,
                                       error_traceback=None):

        test_uuid = "test_task_id"
        error_data = [error_type, error_message]
        if error_traceback:
            error_data.append(error_traceback)
        mock_task.get_detailed.return_value = {
            "id": "task",
            "uuid": test_uuid,
            "status": "status",
            "results": [{
                "key": {
                    "name": "fake_name",
                    "pos": "fake_pos",
                    "kw": "fake_kw"
                },
                "info": {
                    "load_duration": 3.2,
                    "full_duration": 3.5,
                    "iterations_count": 1,
                    "iterations_failed": 1,
                    "atomic": {"foo": {}, "bar": {}}},

                "iterations": [
                    {"duration": 0.9,
                     "idle_duration": 0.1,
                     "output": {"additive": [], "complete": []},
                     "atomic_actions": {"foo": 0.6, "bar": 0.7},
                     "error": error_data
                     },
                ]}
            ]}
        self.task.detailed(test_uuid)
        mock_task.get_detailed.assert_called_once_with(test_uuid,
                                                       extended_results=True)
        err_report = "%(error_type)s: %(error_message)s\n" % (
            {"error_type": error_type, "error_message": error_message})
        header = cliutils.make_header("Task %s has %d error(s)" %
                                      (test_uuid, 1))

        mock_stdout.write.assert_has_calls([
            mock.call("\n"), mock.call("-" * 80), mock.call("\n"),
            mock.call("Task test_task_id: status"),
            mock.call("\n"), mock.call("-" * 80), mock.call("\n"),
            mock.call("\n"), mock.call("test scenario fake_name"),
            mock.call("\n"), mock.call("args position fake_pos"),
            mock.call("\n"), mock.call("args values:"),
            mock.call("\n"), mock.call("\"fake_kw\""),
            mock.call("\n"), mock.call("\n"),
            mock.call(header), mock.call("\n"),
            mock.call(err_report), mock.call("\n"),
            mock.call(error_traceback or "No traceback available."),
            mock.call("\n"), mock.call("-" * 80), mock.call("\n"),
            mock.call("\n"), mock.call("Load duration: 3.2"),
            mock.call("\n"), mock.call("Full duration: 3.5"),
            mock.call("\n"), mock.call("\nHINTS:"),
            mock.call("\n"),
            mock.call("* To plot HTML graphics with this data, run:"),
            mock.call("\n"),
            mock.call("\trally task report test_task_id --out output.html\n"),
            mock.call("\n"), mock.call("* To generate a JUnit report, run:"),
            mock.call("\n"),
            mock.call("\trally task report test_task_id "
                      "--junit --out output.xml\n"),
            mock.call("\n"),
            mock.call("* To get raw JSON output of task results, run:"),
            mock.call("\n"),
            mock.call("\trally task results test_task_id\n"),
            mock.call("\n")
        ])
Example #29
0
 def _print_task_errors(task_id, task_errors):
     print(cliutils.make_header("Task %s has %d error(s)" %
                                (task_id, len(task_errors))))
     for err_data in task_errors:
         print(*err_data, sep="\n")
         print("-" * 80)
Example #30
0
 def test_make_header(self):
     h1 = cliutils.make_header("msg", size=4, symbol="=")
     self.assertEqual(h1, "====\n msg\n====\n")
Example #31
0
    def _load_task(self, task_file, task_args=None, task_args_file=None):
        """Load tasks template from file and render it with passed args.

        :param task_file: Path to file with input task
        :param task_args: JSON or YAML representation of dict with args that
                          will be used to render input task with jinja2
        :param task_args_file: Path to file with JSON or YAML representation
                               of dict, that will be used to render input
                               with jinja2. If both specified task_args and
                               task_args_file they will be merged. task_args
                               has bigger priority so it will update values
                               from task_args_file.
        :returns: Str with loaded and rendered task
        """
        print(cliutils.make_header("Preparing input task"))

        def print_invalid_header(source_name, args):
            print(_("Invalid %(source)s passed: \n\n %(args)s \n") % {
                "source": source_name,
                "args": args
            },
                  file=sys.stderr)

        def parse_task_args(src_name, args):
            try:
                kw = args and yaml.safe_load(args)
                kw = {} if kw is None else kw
            except yaml.parser.ParserError as e:
                print_invalid_header(src_name, args)
                print(_("%(source)s has to be YAML or JSON. Details:"
                        "\n\n%(err)s\n") % {
                            "source": src_name,
                            "err": e
                        },
                      file=sys.stderr)
                raise TypeError()

            if not isinstance(kw, dict):
                print_invalid_header(src_name, args)
                print(_("%(src)s has to be dict, actually %(src_type)s\n") % {
                    "src": src_name,
                    "src_type": type(kw)
                },
                      file=sys.stderr)
                raise TypeError()
            return kw

        try:
            kw = {}
            if task_args_file:
                with open(task_args_file) as f:
                    kw.update(parse_task_args("task_args_file", f.read()))
            kw.update(parse_task_args("task_args", task_args))
        except TypeError:
            raise FailedToLoadTask()

        with open(task_file) as f:
            try:
                input_task = f.read()
                task_dir = os.path.expanduser(
                    os.path.dirname(task_file)) or "./"
                rendered_task = api.Task.render_template(
                    input_task, task_dir, **kw)
            except Exception as e:
                print(
                    _("Failed to render task template:\n%(task)s\n%(err)s\n") %
                    {
                        "task": input_task,
                        "err": e
                    },
                    file=sys.stderr)
                raise FailedToLoadTask()

            print(_("Input task is:\n%s\n") % rendered_task)
            try:
                parsed_task = yaml.safe_load(rendered_task)

            except Exception as e:
                print(_("Wrong format of rendered input task. It should be "
                        "YAML or JSON.\n%s") % e,
                      file=sys.stderr)
                raise FailedToLoadTask()

            print(_("Task syntax is correct :)"))
            return parsed_task
Example #32
0
    def _load_and_validate_task(self, api, task_file, args_file=None,
                                raw_args=None):
        """Load, render and validate tasks template from file with passed args.

        :param task_file: Path to file with input task
        :param raw_args: JSON or YAML representation of dict with args that
            will be used to render input task with jinja2
        :param args_file: Path to file with JSON or YAML representation
            of dict, that will be used to render input with jinja2. If both
            specified task_args and task_args_file they will be merged.
            raw_args has bigger priority so it will update values
            from args_file.
        :returns: Str with loaded and rendered task
        """

        print(cliutils.make_header("Preparing input task"))

        try:
            input_task = open(task_file).read()
        except IOError as err:
            raise FailedToLoadTask(
                source="--task",
                msg="Error reading %s: %s" % (task_file, err))

        task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./"

        task_args = {}
        if args_file:
            try:
                task_args.update(yaml.safe_load(open(args_file).read()))
            except yaml.ParserError as e:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="File '%s' has to be YAML or JSON. Details:\n\n%s"
                    % (args_file, e))
            except IOError as err:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="Error reading %s: %s" % (args_file, err))

        if raw_args:
            try:
                data = yaml.safe_load(raw_args)
                if isinstance(data, (six.text_type, six.string_types)):
                    raise yaml.ParserError("String '%s' doesn't look like a "
                                           "dictionary." % raw_args)
                task_args.update(data)
            except yaml.ParserError as e:
                args = [keypair.split("=", 1)
                        for keypair in raw_args.split(",")]
                if len([a for a in args if len(a) != 1]) != len(args):
                    raise FailedToLoadTask(
                        source="--task-args",
                        msg="Value has to be YAML or JSON. Details:\n\n%s" % e)
                else:
                    task_args.update(dict(args))

        try:
            rendered_task = api.task.render_template(task_template=input_task,
                                                     template_dir=task_dir,
                                                     **task_args)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Failed to render task template.\n\n%s" % e)

        print("Task is:\n%s\n" % rendered_task.strip())
        try:
            parsed_task = yaml.safe_load(rendered_task)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Wrong format of rendered input task. It should be YAML or"
                    " JSON. Details:\n\n%s" % e)

        print("Task syntax is correct :)")
        return parsed_task
Example #33
0
    def show(self,
             api,
             verification_uuid=None,
             sort_by="name",
             detailed=False):
        """Show detailed information about a verification."""

        verification = api.verification.get(
            verification_uuid=verification_uuid)
        verifier = api.verifier.get(verifier_id=verification["verifier_uuid"])
        deployment = api.deployment.get(
            deployment=verification["deployment_uuid"])

        def run_args_formatter(v):
            run_args = []
            for k in sorted(v["run_args"]):
                if k in ("load_list", "skip_list", "xfail_list"):
                    value = "(value is too long, %s)"
                    if detailed:
                        value %= "will be displayed separately"
                    else:
                        value %= "use 'detailed' flag to display it"
                else:
                    value = v["run_args"][k]
                run_args.append("%s: %s" % (k, value))
            return "\n".join(run_args)

        # Main table
        fields = [
            "UUID", "Status", "Started at", "Finished at", "Duration",
            "Run arguments", "Tags", "Verifier name", "Verifier type",
            "Deployment name", "Tests count", "Tests duration, sec", "Success",
            "Skipped", "Expected failures", "Unexpected success", "Failures"
        ]
        formatters = {
            "Started at":
            lambda v: v["created_at"].replace("T", " "),
            "Finished at":
            lambda v: v["updated_at"].replace("T", " "),
            "Duration":
            lambda v: (dt.datetime.strptime(v["updated_at"], TIME_FORMAT) - dt.
                       datetime.strptime(v["created_at"], TIME_FORMAT)),
            "Run arguments":
            run_args_formatter,
            "Tags":
            lambda v: ", ".join(v["tags"]) or None,
            "Verifier name":
            lambda v: "%s (UUID: %s)" % (verifier["name"], verifier["uuid"]),
            "Verifier type": (lambda v: "%s (platform: %s)" %
                              (verifier["type"], verifier["platform"])),
            "Deployment name": (lambda v: "%s (UUID: %s)" %
                                (deployment["name"], deployment["uuid"])),
            "Tests duration, sec":
            lambda v: v["tests_duration"]
        }
        cliutils.print_dict(verification,
                            fields,
                            formatters=formatters,
                            normalize_field_names=True,
                            print_header=False,
                            table_label="Verification")

        if detailed:
            h = "Run arguments"
            print("\n%s" % cliutils.make_header(h, len(h)).strip())
            print("\n%s\n" % json.dumps(verification["run_args"], indent=4))

        # Tests table
        tests = verification["tests"]
        values = [tests[test_id] for test_id in tests]
        fields = ["Name", "Duration, sec", "Status"]
        formatters = {"Duration, sec": lambda v: v["duration"]}
        index = ("name", "duration", "status").index(sort_by)
        cliutils.print_list(values,
                            fields,
                            formatters=formatters,
                            table_label="Tests",
                            normalize_field_names=True,
                            sortby_index=index)

        if detailed:
            failures = [t for t in tests.values() if t["status"] == "fail"]
            if failures:
                self._print_failures("Failures", failures)
            else:
                print("\nCongratulations! Verification passed all tests ;)")
Example #34
0
    def test_show_task_errors_no_trace(self,
                                       mock_task,
                                       mock_stdout,
                                       error_type,
                                       error_message,
                                       error_traceback=None):

        test_uuid = "test_task_id"
        error_data = [error_type, error_message]
        if error_traceback:
            error_data.append(error_traceback)
        mock_task.get_detailed.return_value = {
            "id":
            "task",
            "uuid":
            test_uuid,
            "status":
            "status",
            "results": [{
                "key": {
                    "name": "fake_name",
                    "pos": "fake_pos",
                    "kw": "fake_kw"
                },
                "info": {
                    "load_duration": 3.2,
                    "full_duration": 3.5,
                    "iterations_count": 1,
                    "iterations_failed": 1,
                    "atomic": {
                        "foo": {},
                        "bar": {}
                    }
                },
                "iterations": [
                    {
                        "duration": 0.9,
                        "idle_duration": 0.1,
                        "output": {
                            "additive": [],
                            "complete": []
                        },
                        "atomic_actions": {
                            "foo": 0.6,
                            "bar": 0.7
                        },
                        "error": error_data
                    },
                ]
            }]
        }
        self.task.detailed(test_uuid)
        mock_task.get_detailed.assert_called_once_with(test_uuid,
                                                       extended_results=True)
        err_report = "%(error_type)s: %(error_message)s\n" % (
            {
                "error_type": error_type,
                "error_message": error_message
            })
        header = cliutils.make_header("Task %s has %d error(s)" %
                                      (test_uuid, 1))

        mock_stdout.write.assert_has_calls([
            mock.call("\n"),
            mock.call("-" * 80),
            mock.call("\n"),
            mock.call("Task test_task_id: status"),
            mock.call("\n"),
            mock.call("-" * 80),
            mock.call("\n"),
            mock.call("\n"),
            mock.call("test scenario fake_name"),
            mock.call("\n"),
            mock.call("args position fake_pos"),
            mock.call("\n"),
            mock.call("args values:"),
            mock.call("\n"),
            mock.call("\"fake_kw\""),
            mock.call("\n"),
            mock.call("\n"),
            mock.call(header),
            mock.call("\n"),
            mock.call(err_report),
            mock.call("\n"),
            mock.call(error_traceback or "No traceback available."),
            mock.call("\n"),
            mock.call("-" * 80),
            mock.call("\n"),
            mock.call("\n"),
            mock.call("Load duration: 3.2"),
            mock.call("\n"),
            mock.call("Full duration: 3.5"),
            mock.call("\n"),
            mock.call("\nHINTS:"),
            mock.call("\n"),
            mock.call("* To plot HTML graphics with this data, run:"),
            mock.call("\n"),
            mock.call("\trally task report test_task_id --out output.html\n"),
            mock.call("\n"),
            mock.call("* To generate a JUnit report, run:"),
            mock.call("\n"),
            mock.call("\trally task report test_task_id "
                      "--junit --out output.xml\n"),
            mock.call("\n"),
            mock.call("* To get raw JSON output of task results, run:"),
            mock.call("\n"),
            mock.call("\trally task results test_task_id\n"),
            mock.call("\n")
        ])