Beispiel #1
0
    def images(self, deployment=None):
        """Display available images.

        :param deployment: UUID or name of a deployment
        """

        headers = ["UUID", "Name", "Size (B)"]
        mixed_case_fields = ["UUID", "Name"]
        float_cols = ["Size (B)"]
        formatters = dict(zip(float_cols,
                              [cliutils.pretty_float_formatter(col)
                               for col in float_cols]))

        for endpoint_dict in self._get_endpoints(deployment):
            self._print_header("Images", endpoint_dict)
            table_rows = []

            clients = osclients.Clients(objects.Endpoint(**endpoint_dict))
            glance_client = clients.glance()
            for image in glance_client.images.list():
                data = [image.id, image.name, image.size]
                table_rows.append(utils.Struct(**dict(zip(headers, data))))

            cliutils.print_list(table_rows,
                                fields=headers,
                                formatters=formatters,
                                mixed_case_fields=mixed_case_fields)
Beispiel #2
0
    def flavors(self, deployment=None):
        """Display available flavors.

        :param deployment: UUID or name of a deployment
        """
        headers = ["ID", "Name", "vCPUs", "RAM (MB)", "Swap (MB)", "Disk (GB)"]
        mixed_case_fields = ["ID", "Name", "vCPUs"]
        float_cols = ["RAM (MB)", "Swap (MB)", "Disk (GB)"]
        formatters = dict(zip(float_cols,
                              [cliutils.pretty_float_formatter(col)
                               for col in float_cols]))

        for endpoint_dict in self._get_endpoints(deployment):
            self._print_header("Flavors", endpoint_dict)
            table_rows = []
            clients = osclients.Clients(objects.Endpoint(**endpoint_dict))
            nova_client = clients.nova()
            for flavor in nova_client.flavors.list():
                data = [flavor.id, flavor.name, flavor.vcpus,
                        flavor.ram, flavor.swap, flavor.disk]
                table_rows.append(utils.Struct(**dict(zip(headers, data))))

            cliutils.print_list(table_rows,
                                fields=headers,
                                formatters=formatters,
                                mixed_case_fields=mixed_case_fields)
Beispiel #3
0
    def images(self, deployment=None):
        """Display available images.

        :param deployment: UUID or name of a deployment
        """

        headers = ["UUID", "Name", "Size (B)"]
        mixed_case_fields = ["UUID", "Name"]
        float_cols = ["Size (B)"]
        formatters = dict(
            zip(float_cols,
                [cliutils.pretty_float_formatter(col) for col in float_cols]))

        for endpoint_dict in self._get_endpoints(deployment):
            self._print_header("Images", endpoint_dict)
            table_rows = []

            clients = osclients.Clients(objects.Endpoint(**endpoint_dict))
            glance_client = clients.glance()
            for image in glance_client.images.list():
                data = [image.id, image.name, image.size]
                table_rows.append(utils.Struct(**dict(zip(headers, data))))

            cliutils.print_list(table_rows,
                                fields=headers,
                                formatters=formatters,
                                mixed_case_fields=mixed_case_fields)
Beispiel #4
0
 def _print_iterations_data(result):
     raw_data = result["data"]["raw"]
     headers = ["iteration", "full duration"]
     float_cols = ["full duration"]
     atomic_actions = []
     for row in raw_data:
         # find first non-error result to get atomic actions names
         if not row["error"] and "atomic_actions" in row:
             atomic_actions = row["atomic_actions"].keys()
     for row in raw_data:
         if row["atomic_actions"]:
             for (c, a) in enumerate(atomic_actions, 1):
                 action = "%(no)i. %(action)s" % {"no": c, "action": a}
                 headers.append(action)
                 float_cols.append(action)
             break
     table_rows = []
     formatters = dict(
         zip(float_cols, [
             cliutils.pretty_float_formatter(col, 3)
             for col in float_cols
         ]))
     for (c, r) in enumerate(raw_data, 1):
         dlist = [c]
         dlist.append(r["duration"])
         if r["atomic_actions"]:
             for action in atomic_actions:
                 dlist.append(r["atomic_actions"].get(action) or 0)
         table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
     cliutils.print_list(table_rows,
                         fields=headers,
                         formatters=formatters)
     print()
Beispiel #5
0
    def flavors(self, deployment=None):
        """Display available flavors.

        :param deployment: UUID or name of a deployment
        """
        headers = ["ID", "Name", "vCPUs", "RAM (MB)", "Swap (MB)", "Disk (GB)"]
        mixed_case_fields = ["ID", "Name", "vCPUs"]
        float_cols = ["RAM (MB)", "Swap (MB)", "Disk (GB)"]
        formatters = dict(
            zip(float_cols,
                [cliutils.pretty_float_formatter(col) for col in float_cols]))

        for endpoint_dict in self._get_endpoints(deployment):
            self._print_header("Flavors", endpoint_dict)
            table_rows = []
            clients = osclients.Clients(objects.Endpoint(**endpoint_dict))
            nova_client = clients.nova()
            for flavor in nova_client.flavors.list():
                data = [
                    flavor.id, flavor.name, flavor.vcpus, flavor.ram,
                    flavor.swap, flavor.disk
                ]
                table_rows.append(utils.Struct(**dict(zip(headers, data))))

            cliutils.print_list(table_rows,
                                fields=headers,
                                formatters=formatters,
                                mixed_case_fields=mixed_case_fields)
Beispiel #6
0
 def _print_iterations_data(result):
     raw_data = result["data"]["raw"]
     headers = ["iteration", "full duration"]
     float_cols = ["full duration"]
     atomic_actions = []
     for row in raw_data:
         # find first non-error result to get atomic actions names
         if not row["error"] and "atomic_actions" in row:
             atomic_actions = row["atomic_actions"].keys()
     for row in raw_data:
         if row["atomic_actions"]:
             for (c, a) in enumerate(atomic_actions, 1):
                 action = "%(no)i. %(action)s" % {"no": c, "action": a}
                 headers.append(action)
                 float_cols.append(action)
             break
     table_rows = []
     formatters = dict(zip(float_cols,
                           [cliutils.pretty_float_formatter(col, 3)
                            for col in float_cols]))
     for (c, r) in enumerate(raw_data, 1):
         dlist = [c]
         dlist.append(r["duration"])
         if r["atomic_actions"]:
             for action in atomic_actions:
                 dlist.append(r["atomic_actions"].get(action) or 0)
         table_rows.append(rutils.Struct(**dict(zip(headers,
                                                    dlist))))
     cliutils.print_list(table_rows,
                         fields=headers,
                         formatters=formatters)
     print()
Beispiel #7
0
    def test_pretty_float_formatter_none_value(self):
        test_table_rows = {"test_header": None}
        self.__dict__.update(**test_table_rows)

        formatter = cliutils.pretty_float_formatter("test_header")
        return_value = formatter(self)

        self.assertEqual(return_value, "n/a")
Beispiel #8
0
    def test_pretty_float_formatter_none_value(self):
        test_table_rows = {"test_header": None}
        self.__dict__.update(**test_table_rows)

        formatter = cliutils.pretty_float_formatter("test_header")
        return_value = formatter(self)

        self.assertEqual(return_value, "n/a")
Beispiel #9
0
    def test_pretty_float_formatter_nonrounding(self):
        test_table_rows = {"test_header": 6.56565}
        self.__dict__.update(**test_table_rows)

        formatter = cliutils.pretty_float_formatter("test_header")
        return_value = formatter(self)

        self.assertEqual(return_value, 6.56565)
Beispiel #10
0
    def test_pretty_float_formatter_rounding(self):
        test_table_rows = {"test_header": 6.56565}
        self.__dict__.update(**test_table_rows)

        formatter = cliutils.pretty_float_formatter("test_header", 3)
        return_value = formatter(self)

        self.assertEqual(return_value, 6.566)
Beispiel #11
0
    def list(self, api, deployment=None, all_deployments=False, status=None,
             tags=None, uuids_only=False):
        """List tasks, started and finished.

        Displayed tasks can be filtered by status or deployment.  By
        default 'rally task list' will display tasks from the active
        deployment without filtering by status.
        """

        filters = {}
        headers = ["UUID", "Deployment name", "Created at", "Load duration",
                   "Status", "Tag(s)"]

        if status in consts.TaskStatus:
            filters["status"] = status
        elif status:
            print("Error: Invalid task status '%s'.\nAvailable statuses: %s"
                  % (status, ", ".join(consts.TaskStatus)),
                  file=sys.stderr)
            return(1)

        if not all_deployments:
            filters["deployment"] = deployment

        if tags:
            filters["tags"] = tags

        task_list = api.task.list(**filters)

        if uuids_only:
            if task_list:
                print("\n".join([t["uuid"] for t in task_list]))
        elif task_list:
            def tags_formatter(t):
                if not t["tags"]:
                    return ""
                return "'%s'" % "', '".join(t["tags"])

            formatters = {
                "Tag(s)": tags_formatter,
                "Load duration": cliutils.pretty_float_formatter(
                    "task_duration", 3),
                "Created at": lambda t: t["created_at"].replace("T", " ")
            }

            cliutils.print_list(
                task_list, fields=headers, normalize_field_names=True,
                sortby_index=headers.index("Created at"),
                formatters=formatters)
        else:
            if status:
                print("There are no tasks in '%s' status. "
                      "To run a new task, use:\n\trally task start"
                      % status)
            else:
                print("There are no tasks. To run a new task, use:\n"
                      "\trally task start")
Beispiel #12
0
    def list(self, api, deployment=None, all_deployments=False, status=None,
             tags=None, uuids_only=False):
        """List tasks, started and finished.

        Displayed tasks can be filtered by status or deployment.  By
        default 'rally task list' will display tasks from the active
        deployment without filtering by status.
        """

        filters = {}
        headers = ["UUID", "Deployment name", "Created at", "Load duration",
                   "Status", "Tag(s)"]

        if status in consts.TaskStatus:
            filters["status"] = status
        elif status:
            print("Error: Invalid task status '%s'.\nAvailable statuses: %s"
                  % (status, ", ".join(consts.TaskStatus)),
                  file=sys.stderr)
            return(1)

        if not all_deployments:
            filters["deployment"] = deployment

        if tags:
            filters["tags"] = tags

        task_list = api.task.list(**filters)

        if uuids_only:
            if task_list:
                print("\n".join([t["uuid"] for t in task_list]))
        elif task_list:
            def tags_formatter(t):
                if not t["tags"]:
                    return ""
                return "'%s'" % "', '".join(t["tags"])

            formatters = {
                "Tag(s)": tags_formatter,
                "Load duration": cliutils.pretty_float_formatter(
                    "task_duration", 3),
                "Created at": lambda t: t["created_at"].replace("T", " ")
            }

            cliutils.print_list(
                task_list, fields=headers, normalize_field_names=True,
                sortby_index=headers.index("Created at"),
                formatters=formatters)
        else:
            if status:
                print("There are no tasks in '%s' status. "
                      "To run a new task, use:\n\trally task start"
                      % status)
            else:
                print("There are no tasks. To run a new task, use:\n"
                      "\trally task start")
Beispiel #13
0
        def _print_ssrs_result(result):
            raw = result["data"]["raw"]
            # NOTE(hughsaunders): ssrs=scenario specific results
            ssrs = []
            for result in raw:
                data = result["scenario_output"].get("data")
                if data:
                    ssrs.append(data)
            if ssrs:
                keys = set()
                for ssr in ssrs:
                    keys.update(ssr.keys())
                headers = [
                    "key", "min", "median", "90%ile", "95%ile", "max", "avg"
                ]
                float_cols = [
                    "min", "median", "90%ile", "95%ile", "max", "avg"
                ]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))
                table_rows = []
                for key in keys:
                    values = [float(ssr[key]) for ssr in ssrs if key in ssr]

                    if values:
                        row = [
                            str(key),
                            round(min(values), 3),
                            round(utils.median(values), 3),
                            round(utils.percentile(values, 0.90), 3),
                            round(utils.percentile(values, 0.95), 3),
                            round(max(values), 3),
                            round(utils.mean(values), 3)
                        ]
                    else:
                        row = [str(key)] + ["n/a"] * 6
                    table_rows.append(rutils.Struct(**dict(zip(headers, row))))
                print("\nScenario Specific Results\n")
                cliutils.print_list(table_rows,
                                    fields=headers,
                                    formatters=formatters,
                                    table_label="Response Times (sec)")

                for result in raw:
                    errors = result["scenario_output"].get("errors")
                    if errors:
                        print(errors)
Beispiel #14
0
        def _print_ssrs_result(result):
            raw = result["data"]["raw"]
            # NOTE(hughsaunders): ssrs=scenario specific results
            ssrs = []
            for result in raw:
                data = result["scenario_output"].get("data")
                if data:
                    ssrs.append(data)
            if ssrs:
                keys = set()
                for ssr in ssrs:
                    keys.update(ssr.keys())
                headers = ["key", "min", "median",
                           "90%ile", "95%ile", "max",
                           "avg"]
                float_cols = ["min", "median", "90%ile",
                              "95%ile", "max", "avg"]
                formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))
                table_rows = []
                for key in keys:
                    values = [float(ssr[key]) for ssr in ssrs if key in ssr]

                    if values:
                        row = [str(key),
                               round(min(values), 3),
                               round(utils.median(values), 3),
                               round(utils.percentile(values, 0.90), 3),
                               round(utils.percentile(values, 0.95), 3),
                               round(max(values), 3),
                               round(utils.mean(values), 3)]
                    else:
                        row = [str(key)] + ["n/a"] * 6
                    table_rows.append(rutils.Struct(**dict(zip(headers,
                                                               row))))
                print("\nScenario Specific Results\n")
                cliutils.print_list(table_rows,
                                    fields=headers,
                                    formatters=formatters,
                                    table_label="Response Times (sec)")

                for result in raw:
                    errors = result["scenario_output"].get("errors")
                    if errors:
                        print(errors)
Beispiel #15
0
        def _print_summrized_result(result):
            raw = result["data"]["raw"]
            table_cols = [
                "action", "min", "median", "90%ile", "95%ile", "max", "avg",
                "success", "count"
            ]
            float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"]
            formatters = dict(
                zip(float_cols, [
                    cliutils.pretty_float_formatter(col, 3)
                    for col in float_cols
                ]))
            table_rows = []

            actions_data = utils.get_atomic_actions_data(raw)
            for action in actions_data:
                durations = actions_data[action]
                if durations:
                    data = [
                        action,
                        round(min(durations), 3),
                        round(utils.median(durations), 3),
                        round(utils.percentile(durations, 0.90), 3),
                        round(utils.percentile(durations, 0.95), 3),
                        round(max(durations), 3),
                        round(utils.mean(durations), 3),
                        "%.1f%%" % (len(durations) * 100.0 / len(raw)),
                        len(raw)
                    ]
                else:
                    data = [
                        action, None, None, None, None, None, None, "0.0%",
                        len(raw)
                    ]
                table_rows.append(rutils.Struct(**dict(zip(table_cols, data))))

            cliutils.print_list(table_rows,
                                fields=table_cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)
Beispiel #16
0
        def _print_summrized_result(result):
            raw = result["data"]["raw"]
            table_cols = ["action", "min", "median",
                          "90%ile", "95%ile", "max",
                          "avg", "success", "count"]
            float_cols = ["min", "median",
                          "90%ile", "95%ile", "max",
                          "avg"]
            formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))
            table_rows = []

            actions_data = utils.get_atomic_actions_data(raw)
            for action in actions_data:
                durations = actions_data[action]
                if durations:
                    data = [action,
                            round(min(durations), 3),
                            round(utils.median(durations), 3),
                            round(utils.percentile(durations, 0.90), 3),
                            round(utils.percentile(durations, 0.95), 3),
                            round(max(durations), 3),
                            round(utils.mean(durations), 3),
                            "%.1f%%" % (len(durations) * 100.0 / len(raw)),
                            len(raw)]
                else:
                    data = [action, None, None, None, None, None, None,
                            "0.0%", len(raw)]
                table_rows.append(rutils.Struct(**dict(zip(table_cols,
                                                           data))))

            cliutils.print_list(table_rows, fields=table_cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)
Beispiel #17
0
 def test_pretty_float_formatter_raises(self):
     self.__dict__.update({"foo": 123})
     formatter = cliutils.pretty_float_formatter("not_foo")
     self.assertRaises(AttributeError, formatter, self)
Beispiel #18
0
    def detailed(self, task_id=None, iterations_data=False):
        """Display results table.

        :param task_id: Task uuid
        :param iterations_data: print detailed results for each iteration
        Prints detailed information of task.
        """

        def _print_iterations_data(raw_data):
            headers = ["iteration", "full duration"]
            float_cols = ["full duration"]
            atomic_actions = []
            for row in raw_data:
                # find first non-error result to get atomic actions names
                if not row["error"] and "atomic_actions" in row:
                    atomic_actions = row["atomic_actions"].keys()
            for row in raw_data:
                if row["atomic_actions"]:
                    for (c, a) in enumerate(atomic_actions, 1):
                        action = "%(no)i. %(action)s" % {"no": c, "action": a}
                        headers.append(action)
                        float_cols.append(action)
                    break
            table_rows = []
            formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))
            for (c, r) in enumerate(raw_data, 1):
                dlist = [c]
                dlist.append(r["duration"])
                if r["atomic_actions"]:
                    for action in atomic_actions:
                        dlist.append(r["atomic_actions"].get(action) or 0)
                table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
            cliutils.print_list(table_rows,
                                fields=headers,
                                formatters=formatters)
            print()

        task = db.task_get_detailed(task_id)

        if task is None:
            print("The task %s can not be found" % task_id)
            return(1)

        print()
        print("-" * 80)
        print(_("Task %(task_id)s: %(status)s")
              % {"task_id": task_id, "status": task["status"]})

        if task["status"] == consts.TaskStatus.FAILED:
            print("-" * 80)
            verification = yaml.safe_load(task["verification_log"])

            if not logging.is_debug():
                print(verification[0])
                print(verification[1])
                print()
                print(_("For more details run:\nrally -vd task detailed %s")
                      % task["uuid"])
            else:
                print(yaml.safe_load(verification[2]))
            return

        for result in task["results"]:
            key = result["key"]
            print("-" * 80)
            print()
            print("test scenario %s" % key["name"])
            print("args position %s" % key["pos"])
            print("args values:")
            print(json.dumps(key["kw"], indent=2))

            raw = result["data"]["raw"]
            table_cols = ["action", "min", "median",
                          "90%ile", "95%ile", "max",
                          "avg", "success", "count"]
            float_cols = ["min", "median",
                          "90%ile", "95%ile", "max",
                          "avg"]
            formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))
            table_rows = []

            actions_data = utils.get_atomic_actions_data(raw)
            for action in actions_data:
                durations = actions_data[action]
                if durations:
                    data = [action,
                            round(min(durations), 3),
                            round(utils.median(durations), 3),
                            round(utils.percentile(durations, 0.90), 3),
                            round(utils.percentile(durations, 0.95), 3),
                            round(max(durations), 3),
                            round(utils.mean(durations), 3),
                            "%.1f%%" % (len(durations) * 100.0 / len(raw)),
                            len(raw)]
                else:
                    data = [action, None, None, None, None, None, None,
                            "0.0%", len(raw)]
                table_rows.append(rutils.Struct(**dict(zip(table_cols, data))))

            cliutils.print_list(table_rows, fields=table_cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)

            if iterations_data:
                _print_iterations_data(raw)

            print(_("Load duration: %s") % result["data"]["load_duration"])
            print(_("Full duration: %s") % result["data"]["full_duration"])

            # NOTE(hughsaunders): ssrs=scenario specific results
            ssrs = []
            for result in raw:
                data = result["scenario_output"].get("data")
                if data:
                    ssrs.append(data)
            if ssrs:
                keys = set()
                for ssr in ssrs:
                    keys.update(ssr.keys())
                headers = ["key", "min", "median",
                           "90%ile", "95%ile", "max",
                           "avg"]
                float_cols = ["min", "median", "90%ile",
                              "95%ile", "max", "avg"]
                formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))
                table_rows = []
                for key in keys:
                    values = [float(ssr[key]) for ssr in ssrs if key in ssr]

                    if values:
                        row = [str(key),
                               round(min(values), 3),
                               round(utils.median(values), 3),
                               round(utils.percentile(values, 0.90), 3),
                               round(utils.percentile(values, 0.95), 3),
                               round(max(values), 3),
                               round(utils.mean(values), 3)]
                    else:
                        row = [str(key)] + ["n/a"] * 6
                    table_rows.append(rutils.Struct(**dict(zip(headers, row))))
                print("\nScenario Specific Results\n")
                cliutils.print_list(table_rows,
                                    fields=headers,
                                    formatters=formatters,
                                    table_label="Response Times (sec)")

                for result in raw:
                    errors = result["scenario_output"].get("errors")
                    if errors:
                        print(errors)

        print()
        print("HINTS:")
        print(_("* To plot HTML graphics with this data, run:"))
        print("\trally task report %s --out output.html" % task["uuid"])
        print()
        print(_("* To generate a JUnit report, run:"))
        print("\trally task report %s --junit --out output.xml" %
              task["uuid"])
        print()
        print(_("* To get raw JSON output of task results, run:"))
        print("\trally task results %s\n" % task["uuid"])
Beispiel #19
0
    def detailed(self, task_id=None, iterations_data=False):
        """Print detailed information about given task.

        :param task_id: str, task uuid
        :param iterations_data: bool, include results for each iteration
        """
        task = api.Task.get_detailed(task_id, extended_results=True)

        if not task:
            print("The task %s can not be found" % task_id)
            return 1

        print()
        print("-" * 80)
        print(_("Task %(task_id)s: %(status)s")
              % {"task_id": task_id, "status": task["status"]})

        if task["status"] == consts.TaskStatus.FAILED:
            print("-" * 80)
            verification = yaml.safe_load(task["verification_log"])
            if logging.is_debug():
                print(yaml.safe_load(verification[2]))
            else:
                print(verification[0])
                print(verification[1])
                print(_("\nFor more details run:\nrally -vd task detailed %s")
                      % task["uuid"])
            return 0
        elif task["status"] not in [consts.TaskStatus.FINISHED,
                                    consts.TaskStatus.ABORTED]:
            print("-" * 80)
            print(_("\nThe task %s marked as '%s'. Results "
                    "available when it is '%s'.") % (
                task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0
        for result in task["results"]:
            key = result["key"]
            print("-" * 80)
            print()
            print("test scenario %s" % key["name"])
            print("args position %s" % key["pos"])
            print("args values:")
            print(json.dumps(key["kw"], indent=2))
            print()

            iterations = []
            iterations_headers = ["iteration", "full duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                for i, atomic_name in enumerate(result["info"]["atomic"], 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(result["iterations"], 1):

                if iterations_data:
                    row = {"iteration": idx,
                           "full duration": itr["duration"]}
                    for name, action in iterations_actions:
                        row[action] = itr["atomic_actions"].get(name, 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                    # NOTE(amaretskiy): "scenario_output" is supported
                    #   for backward compatibility
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        iteration_output["additive"].append(
                            {"data": itr["scenario_output"]["data"].items(),
                             "title": "Scenario output",
                             "description": "",
                             "chart_plugin": "StackedArea"})

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = plot.charts.OutputStatsTable(
                            result["info"], title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = plot.charts.MainStatsTable.columns
            float_cols = result["info"]["stat"]["cols"][1:7]
            formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))
            rows = [dict(zip(cols, r)) for r in result["info"]["stat"]["rows"]]
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(zip(iterations_headers[1:],
                                      [cliutils.pretty_float_formatter(col, 3)
                                       for col in iterations_headers[1:]]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = plot.charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows, fields=cols,
                                            formatters=formatters)
                        print()

            print(_("Load duration: %s") %
                  result["info"]["load_duration"])
            print(_("Full duration: %s") %
                  result["info"]["full_duration"])

            print("\nHINTS:")
            print(_("* To plot HTML graphics with this data, run:"))
            print("\trally task report %s --out output.html\n" % task["uuid"])
            print(_("* To generate a JUnit report, run:"))
            print("\trally task report %s --junit --out output.xml\n" %
                  task["uuid"])
            print(_("* To get raw JSON output of task results, run:"))
            print("\trally task results %s\n" % task["uuid"])
Beispiel #20
0
class CliUtilsTestCase(test.TestCase):
    def setUp(self):
        super(CliUtilsTestCase, self).setUp()
        self.categories = {
            "deployment": deployment.DeploymentCommands,
            "show": show.ShowCommands,
            "task": task.TaskCommands,
            "verify": verify.VerifyCommands
        }

    def tearDown(self):
        self._unregister_opts()
        super(CliUtilsTestCase, self).tearDown()

    @mock.patch("rally.cli.cliutils.os.path")
    def test_find_config_files(self, mock_os_path):

        mock_os_path.expanduser.return_value = "expanduser"
        mock_os_path.abspath.return_value = "abspath"
        mock_os_path.isfile.return_value = True

        result = cliutils.find_config_files(["path1", "path2"])
        mock_os_path.expanduser.assert_called_once_with("path1")
        mock_os_path.abspath.assert_called_once_with(
            mock_os_path.expanduser.return_value)
        mock_os_path.isfile.assert_called_once_with(
            mock_os_path.abspath.return_value + "/rally.conf")
        self.assertEqual([mock_os_path.abspath.return_value + "/rally.conf"],
                         result)

        mock_os_path.isfile.return_value = False

        result = cliutils.find_config_files(["path1", "path2"])
        self.assertIsNone(result)

    def test_make_header(self):
        h1 = cliutils.make_header("msg", size=4, symbol="=")
        self.assertEqual(h1, "====\n msg\n====\n")

    def test_make_table_header(self):
        actual = cliutils.make_table_header("Response Times (sec)", 40)
        expected = "\n".join((
            "+--------------------------------------+",
            "|         Response Times (sec)         |",
        ))
        self.assertEqual(expected, actual)

        actual = cliutils.make_table_header("Response Times (sec)", 39)
        expected = "\n".join((
            "+-------------------------------------+",
            "|        Response Times (sec)         |",
        ))
        self.assertEqual(expected, actual)

        self.assertRaises(ValueError, cliutils.make_table_header,
                          "Response Times (sec)", len("Response Times (sec)"))

    @ddt.data(
        {
            "obj": mock.Mock(foo=6.56565),
            "args": ["foo", 3],
            "expected": 6.566
        }, {
            "obj": mock.Mock(foo=6.56565),
            "args": ["foo"],
            "expected": 6.56565
        }, {
            "obj": mock.Mock(foo=None),
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": mock.Mock(foo="n/a"),
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": mock.Mock(foo="n/a"),
            "args": ["foo", 3],
            "expected": "n/a"
        }, {
            "obj": {
                "foo": 6.56565
            },
            "args": ["foo", 3],
            "expected": 6.566
        }, {
            "obj": {
                "foo": 6.56565
            },
            "args": ["foo"],
            "expected": 6.56565
        }, {
            "obj": {
                "foo": None
            },
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": {
                "foo": "n/a"
            },
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": {
                "foo": "n/a"
            },
            "args": ["foo", 3],
            "expected": "n/a"
        }, {
            "obj": object,
            "args": ["unexpected_field", 3],
            "expected": AttributeError
        }, {
            "obj": {
                "foo": 42
            },
            "args": ["unexpected_field", 3],
            "expected": KeyError
        })
    @ddt.unpack
    def test_pretty_float_formatter(self, obj, args, expected=None):
        formatter = cliutils.pretty_float_formatter(*args)
        if type(expected) == type and issubclass(expected, Exception):
            self.assertRaises(expected, formatter, obj)
        else:
            self.assertEqual(expected, formatter(obj))

    def test_process_keyestone_exc(self):
        @cliutils.process_keystone_exc
        def a(a):
            if a == 1:
                raise keystone_exc.Unauthorized()

            if a == 2:
                raise keystone_exc.AuthorizationFailure()

            if a == 3:
                raise keystone_exc.ConnectionRefused()

            return a

        self.assertEqual(1, a(1))
        self.assertEqual(1, a(2))
        self.assertEqual(1, a(3))
        self.assertEqual(4, a(4))

    def test__methods_of_with_class(self):
        class fake_class(object):
            def public(self):
                pass

            def _private(self):
                pass

        result = cliutils._methods_of(fake_class)
        self.assertEqual(1, len(result))
        self.assertEqual("public", result[0][0])

    def test__methods_of_with_object(self):
        class fake_class(object):
            def public(self):
                pass

            def _private(self):
                pass

        mock_obj = fake_class()
        result = cliutils._methods_of(mock_obj)
        self.assertEqual(1, len(result))
        self.assertEqual("public", result[0][0])

    def test__methods_of_empty_result(self):
        class fake_class(object):
            def _private(self):
                pass

            def _private2(self):
                pass

        mock_obj = fake_class()
        result = cliutils._methods_of(mock_obj)
        self.assertEqual(result, [])

    def _unregister_opts(self):
        CONF.reset()
        category_opt = cfg.SubCommandOpt("category",
                                         title="Command categories",
                                         help="Available categories")
        CONF.unregister_opt(category_opt)

    @mock.patch("rally.cli.cliutils.CONF",
                config_file=None,
                side_effect=cfg.ConfigFilesNotFoundError("config_file"))
    def test_run_fails(self, mock_cliutils_conf):
        ret = cliutils.run(["rally", "show", "flavors"], self.categories)
        self.assertEqual(ret, 2)

    def test_run_version(self):
        ret = cliutils.run(["rally", "version"], self.categories)
        self.assertEqual(ret, 0)

    def test_run_bash_completion(self):
        ret = cliutils.run(["rally", "bash-completion"], self.categories)
        self.assertEqual(ret, 0)

    def test_run_show(self):
        ret = cliutils.run(["rally", "show", "keypairs"], self.categories)
        self.assertEqual(ret, 1)

    @mock.patch("rally.common.db.task_get",
                side_effect=exceptions.TaskNotFound(uuid=FAKE_TASK_UUID))
    def test_run_task_not_found(self, mock_task_get):
        ret = cliutils.run(["rally", "task", "status",
                            "%s" % FAKE_TASK_UUID], self.categories)
        self.assertTrue(mock_task_get.called)
        self.assertEqual(ret, 1)

    @mock.patch("rally.cli.cliutils.validate_args",
                side_effect=cliutils.MissingArgs("missing"))
    def test_run_show_fails(self, mock_validate_args):
        ret = cliutils.run(["rally", "show", "keypairs"], self.categories)
        self.assertTrue(mock_validate_args.called)
        self.assertEqual(ret, 1)

    def test_run_failed_to_open_file(self):
        class FailuresCommands(object):
            def failed_to_open_file(self):
                raise IOError("No such file")

        ret = cliutils.run(["rally", "failure", "failed_to_open_file"],
                           {"failure": FailuresCommands})
        self.assertEqual(1, ret)

    def test_run_sqlalchmey_operational_failure(self):
        class SQLAlchemyCommands(object):
            def operational_failure(self):
                raise sqlalchemy.exc.OperationalError("Can't open DB file")

        ret = cliutils.run(["rally", "failure", "operational_failure"],
                           {"failure": SQLAlchemyCommands})
        self.assertEqual(1, ret)

    class TestObj(object):
        x = 1
        y = 2
        z = 3.142857142857143
        aOrB = 3  # mixed case field

    @ddt.data(
        {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        }, {
            "args": [[TestObj()], ["z"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None,
                "formatters": {
                    "z": cliutils.pretty_float_formatter("z", 5)
                }
            },
            "expected": ("+---------+\n"
                         "| z       |\n"
                         "+---------+\n"
                         "| 3.14286 |\n"
                         "+---------+")
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+\n"
                         "| x |\n"
                         "+---+\n"
                         "| 1 |\n"
                         "+---+")
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": False,
                "print_border": False
            },
            "expected": "1"
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": False,
                "print_border": False
            },
            "expected": "1 2"
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": True,
                "print_border": False
            },
            "expected": "x \n1"
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": False
            },
            "expected": "x y \n1 2"
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": False,
                "print_border": True
            },
            "expected": ("+--+\n"
                         "|1 |\n"
                         "+--+")
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": False,
                "print_border": True
            },
            "expected": ("+--+--+\n"
                         "|1 |2 |\n"
                         "+--+--+")
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": ("+------+\n"
                         "| aOrB |\n"
                         "+------+\n"
                         "| 3    |\n"
                         "+------+")
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": False,
                "print_border": True,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": ("+--+\n"
                         "|3 |\n"
                         "+--+")
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": True,
                "print_border": False,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": "aOrB \n3"
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": False,
                "print_border": False,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": "3"
        }, {
            "args": [[{
                "x": 1,
                "y": 2
            }], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        }, {
            "args": [[{
                "z": 3.142857142857143
            }], ["z"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None,
                "formatters": {
                    "z": cliutils.pretty_float_formatter("z", 5)
                }
            },
            "expected": ("+---------+\n"
                         "| z       |\n"
                         "+---------+\n"
                         "| 3.14286 |\n"
                         "+---------+")
        }, {
            "args": [[{
                "x": 1
            }], ["x"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+\n"
                         "| x |\n"
                         "+---+\n"
                         "| 1 |\n"
                         "+---+")
        }, {
            "args": [[{
                "x": 1,
                "y": 2
            }], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        })
    @ddt.unpack
    def test_print_list(self, args, kwargs, expected):
        out = moves.StringIO()
        kwargs["out"] = out
        cliutils.print_list(*args, **kwargs)
        self.assertEqual(expected, out.getvalue().strip())

    def test_print_list_raises(self):
        out = moves.StringIO()
        self.assertRaisesRegexp(ValueError,
                                "Field labels list.*has different number "
                                "of elements than fields list",
                                cliutils.print_list, [self.TestObj()], ["x"],
                                field_labels=["x", "y"],
                                sortby_index=None,
                                out=out)
Beispiel #21
0
 def test_pretty_float_formatter(self, obj, args, expected=None):
     formatter = cliutils.pretty_float_formatter(*args)
     if type(expected) == type and issubclass(expected, Exception):
         self.assertRaises(expected, formatter, obj)
     else:
         self.assertEqual(expected, formatter(obj))
Beispiel #22
0
 def test_pretty_float_formatter_nonrounding_with_dict(self):
     formatter = cliutils.pretty_float_formatter("foo")
     self.assertEqual(6.56565, formatter({"foo": 6.56565}))
Beispiel #23
0
    def detailed(self, api, task_id=None, iterations_data=False):
        """Print detailed information about given task.

        :param task_id: str, task uuid
        :param iterations_data: bool, include results for each iteration
        """
        task = api.task.get_detailed(task_id, extended_results=True)

        if not task:
            print("The task %s can not be found" % task_id)
            return 1

        print()
        print("-" * 80)
        print(
            _("Task %(task_id)s: %(status)s") % {
                "task_id": task_id,
                "status": task["status"]
            })

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            verification = yaml.safe_load(task["verification_log"])
            if logging.is_debug():
                print(yaml.safe_load(verification["trace"]))
            else:
                print(verification["etype"])
                print(verification["msg"])
                print(
                    _("\nFor more details run:\nrally -d task detailed %s") %
                    task["uuid"])
            return 0
        elif task["status"] not in [
                consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED
        ]:
            print("-" * 80)
            print(
                _("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'.") %
                (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0
        for result in task["results"]:
            key = result["key"]
            print("-" * 80)
            print()
            print("test scenario %s" % key["name"])
            print("args position %s" % key["pos"])
            print("args values:")
            print(json.dumps(key["kw"], indent=2))
            print()

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                for i, atomic_name in enumerate(result["info"]["atomic"], 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(result["iterations"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        row[action] = itr["atomic_actions"].get(name, 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                    # NOTE(amaretskiy): "scenario_output" is supported
                    #   for backward compatibility
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        iteration_output["additive"].append({
                            "data":
                            itr["scenario_output"]["data"].items(),
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart_plugin":
                            "StackedArea"
                        })

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = plot.charts.OutputStatsTable(
                            result["info"], title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = plot.charts.MainStatsTable.columns
            float_cols = result["info"]["stat"]["cols"][1:7]
            formatters = dict(
                zip(float_cols, [
                    cliutils.pretty_float_formatter(col, 3)
                    for col in float_cols
                ]))
            rows = [dict(zip(cols, r)) for r in result["info"]["stat"]["rows"]]
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(
                    zip(iterations_headers[1:], [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in iterations_headers[1:]
                    ]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = plot.charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows,
                                            fields=cols,
                                            formatters=formatters)
                        print()

            print(
                _("Load duration: %s") %
                rutils.format_float_to_str(result["info"]["load_duration"]))
            print(
                _("Full duration: %s") %
                rutils.format_float_to_str(result["info"]["full_duration"]))

            print("\nHINTS:")
            print(_("* To plot HTML graphics with this data, run:"))
            print("\trally task report %s --out output.html\n" % task["uuid"])
            print(_("* To generate a JUnit report, run:"))
            print("\trally task report %s --junit --out output.xml\n" %
                  task["uuid"])
            print(_("* To get raw JSON output of task results, run:"))
            print("\trally task results %s\n" % task["uuid"])
Beispiel #24
0
    def detailed(self, task_id=None, iterations_data=False):
        """Display results table.

        :param task_id: Task uuid
        :param iterations_data: print detailed results for each iteration
        Prints detailed information of task.
        """
        def _print_iterations_data(raw_data):
            headers = ["iteration", "full duration"]
            float_cols = ["full duration"]
            atomic_actions = []
            for row in raw_data:
                # find first non-error result to get atomic actions names
                if not row["error"] and "atomic_actions" in row:
                    atomic_actions = row["atomic_actions"].keys()
            for row in raw_data:
                if row["atomic_actions"]:
                    for (c, a) in enumerate(atomic_actions, 1):
                        action = "%(no)i. %(action)s" % {"no": c, "action": a}
                        headers.append(action)
                        float_cols.append(action)
                    break
            table_rows = []
            formatters = dict(
                zip(float_cols, [
                    cliutils.pretty_float_formatter(col, 3)
                    for col in float_cols
                ]))
            for (c, r) in enumerate(raw_data, 1):
                dlist = [c]
                dlist.append(r["duration"])
                if r["atomic_actions"]:
                    for action in atomic_actions:
                        dlist.append(r["atomic_actions"].get(action) or 0)
                table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
            cliutils.print_list(table_rows,
                                fields=headers,
                                formatters=formatters)
            print()

        task = db.task_get_detailed(task_id)

        if task is None:
            print("The task %s can not be found" % task_id)
            return (1)

        print()
        print("-" * 80)
        print(
            _("Task %(task_id)s: %(status)s") % {
                "task_id": task_id,
                "status": task["status"]
            })

        if task["status"] == consts.TaskStatus.FAILED:
            print("-" * 80)
            verification = yaml.safe_load(task["verification_log"])

            if not logging.is_debug():
                print(verification[0])
                print(verification[1])
                print()
                print(
                    _("For more details run:\nrally -vd task detailed %s") %
                    task["uuid"])
            else:
                print(yaml.safe_load(verification[2]))
            return

        for result in task["results"]:
            key = result["key"]
            print("-" * 80)
            print()
            print("test scenario %s" % key["name"])
            print("args position %s" % key["pos"])
            print("args values:")
            print(json.dumps(key["kw"], indent=2))

            raw = result["data"]["raw"]
            table_cols = [
                "action", "min", "median", "90%ile", "95%ile", "max", "avg",
                "success", "count"
            ]
            float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"]
            formatters = dict(
                zip(float_cols, [
                    cliutils.pretty_float_formatter(col, 3)
                    for col in float_cols
                ]))
            table_rows = []

            actions_data = utils.get_atomic_actions_data(raw)
            for action in actions_data:
                durations = actions_data[action]
                if durations:
                    data = [
                        action,
                        round(min(durations), 3),
                        round(utils.median(durations), 3),
                        round(utils.percentile(durations, 0.90), 3),
                        round(utils.percentile(durations, 0.95), 3),
                        round(max(durations), 3),
                        round(utils.mean(durations), 3),
                        "%.1f%%" % (len(durations) * 100.0 / len(raw)),
                        len(raw)
                    ]
                else:
                    data = [
                        action, None, None, None, None, None, None, "0.0%",
                        len(raw)
                    ]
                table_rows.append(rutils.Struct(**dict(zip(table_cols, data))))

            cliutils.print_list(table_rows,
                                fields=table_cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)

            if iterations_data:
                _print_iterations_data(raw)

            print(_("Load duration: %s") % result["data"]["load_duration"])
            print(_("Full duration: %s") % result["data"]["full_duration"])

            # NOTE(hughsaunders): ssrs=scenario specific results
            ssrs = []
            for result in raw:
                data = result["scenario_output"].get("data")
                if data:
                    ssrs.append(data)
            if ssrs:
                keys = set()
                for ssr in ssrs:
                    keys.update(ssr.keys())
                headers = [
                    "key", "min", "median", "90%ile", "95%ile", "max", "avg"
                ]
                float_cols = [
                    "min", "median", "90%ile", "95%ile", "max", "avg"
                ]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))
                table_rows = []
                for key in keys:
                    values = [float(ssr[key]) for ssr in ssrs if key in ssr]

                    if values:
                        row = [
                            str(key),
                            round(min(values), 3),
                            round(utils.median(values), 3),
                            round(utils.percentile(values, 0.90), 3),
                            round(utils.percentile(values, 0.95), 3),
                            round(max(values), 3),
                            round(utils.mean(values), 3)
                        ]
                    else:
                        row = [str(key)] + ["n/a"] * 6
                    table_rows.append(rutils.Struct(**dict(zip(headers, row))))
                print("\nScenario Specific Results\n")
                cliutils.print_list(table_rows,
                                    fields=headers,
                                    formatters=formatters,
                                    table_label="Response Times (sec)")

                for result in raw:
                    errors = result["scenario_output"].get("errors")
                    if errors:
                        print(errors)

        print()
        print("HINTS:")
        print(_("* To plot HTML graphics with this data, run:"))
        print("\trally task report %s --out output.html" % task["uuid"])
        print()
        print(_("* To generate a JUnit report, run:"))
        print("\trally task report %s --junit --out output.xml" % task["uuid"])
        print()
        print(_("* To get raw JSON output of task results, run:"))
        print("\trally task results %s\n" % task["uuid"])
Beispiel #25
0
    def test_print_list(self):
        class TestObj(object):
            x = 1
            y = 2
            z = 3.142857142857143
            aOrB = 3  # mixed case field

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=True,
                            print_border=True,
                            sortby_index=None,
                            out=out)
        self.assertEqual(
            "+---+---+\n"
            "| x | y |\n"
            "+---+---+\n"
            "| 1 | 2 |\n"
            "+---+---+",
            out.getvalue().strip())

        out = moves.StringIO()
        formatter = cliutils.pretty_float_formatter("z", 5)
        cliutils.print_list([TestObj()], ["z"],
                            print_header=True,
                            print_border=True,
                            sortby_index=None,
                            formatters={"z": formatter},
                            out=out)
        self.assertEqual(
            "+---------+\n"
            "| z       |\n"
            "+---------+\n"
            "| 3.14286 |\n"
            "+---------+",
            out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=True,
                            print_border=True,
                            out=out)
        self.assertEqual("+---+\n"
                         "| x |\n"
                         "+---+\n"
                         "| 1 |\n"
                         "+---+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=True,
                            print_border=True,
                            out=out)
        self.assertEqual(
            "+---+---+\n"
            "| x | y |\n"
            "+---+---+\n"
            "| 1 | 2 |\n"
            "+---+---+",
            out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=False,
                            print_border=False,
                            out=out)
        self.assertEqual("1", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=False,
                            print_border=False,
                            out=out)
        self.assertEqual("1 2", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=True,
                            print_border=False,
                            out=out)
        self.assertEqual("x \n1", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=True,
                            print_border=False,
                            out=out)
        self.assertEqual("x y \n1 2", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=False,
                            print_border=True,
                            out=out)
        self.assertEqual("+--+\n" "|1 |\n" "+--+", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=False,
                            print_border=True,
                            out=out)
        self.assertEqual("+--+--+\n"
                         "|1 |2 |\n"
                         "+--+--+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=True,
                            print_border=True,
                            out=out)
        self.assertEqual(
            "+------+\n"
            "| aOrB |\n"
            "+------+\n"
            "| 3    |\n"
            "+------+",
            out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=False,
                            print_border=True,
                            out=out)
        self.assertEqual("+--+\n" "|3 |\n" "+--+", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=True,
                            print_border=False,
                            out=out)
        self.assertEqual("aOrB \n" "3", out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=False,
                            print_border=False,
                            out=out)
        self.assertEqual("3", out.getvalue().strip())

        out = moves.StringIO()
        self.assertRaisesRegexp(ValueError,
                                "Field labels list.*has different number "
                                "of elements than fields list",
                                cliutils.print_list, [TestObj()], ["x"],
                                field_labels=["x", "y"],
                                sortby_index=None,
                                out=out)
Beispiel #26
0
 def test_pretty_float_formatter_none_value_with_dict(self):
     formatter = cliutils.pretty_float_formatter("foo")
     self.assertEqual("n/a", formatter({"foo": None}))
Beispiel #27
0
 def test_pretty_float_formatter_raises_with_dict(self):
     formatter = cliutils.pretty_float_formatter("foo")
     self.assertRaises(KeyError, formatter, {"not_foo": 123})
Beispiel #28
0
    def _detailed(self,
                  api,
                  task_id=None,
                  iterations_data=False,
                  filters=None):
        """Print detailed information about given task."""
        scenarios_filter = []
        only_sla_failures = False
        for filter in filters or []:
            if filter.startswith("scenario="):
                filter_value = filter.split("=")[1]
                scenarios_filter = filter_value.split(",")
            if filter == "sla-failures":
                only_sla_failures = True

        task = api.task.get(task_id=task_id, detailed=True)

        print()
        print("-" * 80)
        print("Task %(task_id)s: %(status)s" % {
            "task_id": task_id,
            "status": task["status"]
        })

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            validation = task["validation_result"]
            if logging.is_debug():
                print(yaml.safe_load(validation["trace"]))
            else:
                print(validation["etype"])
                print(validation["msg"])
                print("\nFor more details run:\nrally -d task detailed %s" %
                      task["uuid"])
            return 0
        elif task["status"] not in [
                consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED
        ]:
            print("-" * 80)
            print("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'." %
                  (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0

        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            if scenarios_filter and workload["name"] not in scenarios_filter:
                continue
            if only_sla_failures and workload["pass_sla"]:
                continue

            print("-" * 80)
            print()
            print("test scenario %s" % workload["name"])
            print("args position %s" % workload["position"])
            print("args values:")
            print(
                json.dumps(
                    {
                        "args": workload["args"],
                        "runner": workload["runner"],
                        "contexts": workload["contexts"],
                        "sla": workload["sla"],
                        "hooks": [r["config"] for r in workload["hooks"]]
                    },
                    indent=2))
            print()

            duration_stats = workload["statistics"]["durations"]

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                atomic_names = [
                    a["display_name"] for a in duration_stats["atomics"]
                ]
                for i, atomic_name in enumerate(atomic_names, 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(workload["data"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        atomic_actions = atomic.merge_atomic_actions(
                            itr["atomic_actions"])
                        row[action] = atomic_actions.get(name, {}).get(
                            "duration", 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = charts.OutputStatsTable(
                            workload, title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = charts.MainStatsTable.columns
            formatters = {
                "Action": lambda x: x["display_name"],
                "Min (sec)": lambda x: x["data"]["min"],
                "Median (sec)": lambda x: x["data"]["median"],
                "90%ile (sec)": lambda x: x["data"]["90%ile"],
                "95%ile (sec)": lambda x: x["data"]["95%ile"],
                "Max (sec)": lambda x: x["data"]["max"],
                "Avg (sec)": lambda x: x["data"]["avg"],
                "Success": lambda x: x["data"]["success"],
                "Count": lambda x: x["data"]["iteration_count"]
            }

            rows = []

            def make_flat(r, depth=0):
                if depth > 0:
                    r["display_name"] = (" %s> %s" %
                                         ("-" * depth, r["display_name"]))

                rows.append(r)
                for children in r["children"]:
                    make_flat(children, depth + 1)

            for row in itertools.chain(duration_stats["atomics"],
                                       [duration_stats["total"]]):
                make_flat(row)
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                normalize_field_names=True,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(
                    zip(iterations_headers[1:], [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in iterations_headers[1:]
                    ]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows,
                                            fields=cols,
                                            formatters=formatters)
                        print()

            print("Load duration: %s" %
                  strutils.format_float_to_str(workload["load_duration"]))
            print("Full duration: %s" %
                  strutils.format_float_to_str(workload["full_duration"]))

        print("\nHINTS:")
        print("* To plot HTML graphics with this data, run:")
        print("\trally task report %s --out output.html\n" % task["uuid"])
        print("* To generate a JUnit report, run:")
        print("\trally task export %s --type junit --to output.xml\n" %
              task["uuid"])
        print("* To get raw JSON output of task results, run:")
        print("\trally task report %s --json --out output.json\n" %
              task["uuid"])

        if not task["pass_sla"]:
            print("At least one workload did not pass SLA criteria.\n")
            return 1
Beispiel #29
0
    def _detailed(self, api, task_id=None, iterations_data=False):
        """Print detailed information about given task."""

        task = api.task.get(task_id=task_id, detailed=True)

        print()
        print("-" * 80)
        print("Task %(task_id)s: %(status)s"
              % {"task_id": task_id, "status": task["status"]})

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            validation = task["validation_result"]
            if logging.is_debug():
                print(yaml.safe_load(validation["trace"]))
            else:
                print(validation["etype"])
                print(validation["msg"])
                print("\nFor more details run:\nrally -d task detailed %s"
                      % task["uuid"])
            return 0
        elif task["status"] not in [consts.TaskStatus.FINISHED,
                                    consts.TaskStatus.ABORTED]:
            print("-" * 80)
            print("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'."
                  % (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0

        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            print("-" * 80)
            print()
            print("test scenario %s" % workload["name"])
            print("args position %s" % workload["position"])
            print("args values:")
            print(json.dumps(
                {"args": workload["args"],
                 "runner": workload["runner"],
                 "contexts": workload["contexts"],
                 "sla": workload["sla"],
                 "hooks": [r["config"] for r in workload["hooks"]]},
                indent=2))
            print()

            duration_stats = workload["statistics"]["durations"]

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                atomic_names = [a["display_name"]
                                for a in duration_stats["atomics"]]
                for i, atomic_name in enumerate(atomic_names, 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(workload["data"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        atomic_actions = atomic.merge_atomic_actions(
                            itr["atomic_actions"])
                        row[action] = atomic_actions.get(name, {}).get(
                            "duration", 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = charts.OutputStatsTable(
                            workload, title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = charts.MainStatsTable.columns
            formatters = {
                "Action": lambda x: x["display_name"],
                "Min (sec)": lambda x: x["data"]["min"],
                "Median (sec)": lambda x: x["data"]["median"],
                "90%ile (sec)": lambda x: x["data"]["90%ile"],
                "95%ile (sec)": lambda x: x["data"]["95%ile"],
                "Max (sec)": lambda x: x["data"]["max"],
                "Avg (sec)": lambda x: x["data"]["avg"],
                "Success": lambda x: x["data"]["success"],
                "Count": lambda x: x["data"]["iteration_count"]
            }

            rows = []

            def make_flat(r, depth=0):
                if depth > 0:
                    r["display_name"] = (" %s> %s" % ("-" * depth,
                                                      r["display_name"]))

                rows.append(r)
                for children in r["children"]:
                    make_flat(children, depth + 1)

            for row in itertools.chain(duration_stats["atomics"],
                                       [duration_stats["total"]]):
                make_flat(row)
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                normalize_field_names=True,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(zip(iterations_headers[1:],
                                      [cliutils.pretty_float_formatter(col, 3)
                                       for col in iterations_headers[1:]]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows, fields=cols,
                                            formatters=formatters)
                        print()

            print("Load duration: %s"
                  % strutils.format_float_to_str(workload["load_duration"]))
            print("Full duration: %s"
                  % strutils.format_float_to_str(workload["full_duration"]))

        print("\nHINTS:")
        print("* To plot HTML graphics with this data, run:")
        print("\trally task report %s --out output.html\n" % task["uuid"])
        print("* To generate a JUnit report, run:")
        print("\trally task export %s --type junit --to output.xml\n" %
              task["uuid"])
        print("* To get raw JSON output of task results, run:")
        print("\trally task report %s --json --out output.json\n" %
              task["uuid"])

        if not task["pass_sla"]:
            print("At least one workload did not pass SLA criteria.\n")
            return 1
Beispiel #30
0
        def _print_ssrs_result(result):
            raw = result["data"]["raw"]
            # NOTE(hughsaunders): ssrs=scenario specific results
            ssrs = []
            for itr in raw:
                if "output" not in itr:
                    itr["output"] = {"additive": [], "complete": []}

                    # NOTE(amaretskiy): "scenario_output" is supported
                    #   for backward compatibility
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        itr["output"]["additive"].append({
                            "data":
                            itr["scenario_output"]["data"].items(),
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart_plugin":
                            "StackedArea"
                        })
                        del itr["scenario_output"]

                for idx, additive in enumerate(itr["output"]["additive"]):
                    try:
                        for key, value in additive["data"]:
                            ssrs[idx]["data"][key].append(value)
                    except IndexError:
                        data = {}
                        keys = []
                        for key, value in additive["data"]:
                            if key not in data:
                                data[key] = []
                                keys.append(key)
                            data[key].append(value)
                        ssrs.append({
                            "title": additive["title"],
                            "keys": keys,
                            "data": data
                        })
            if not ssrs:
                return

            print("\nScenario Specific Results\n")

            headers = [
                "key", "min", "median", "90%ile", "95%ile", "max", "avg"
            ]
            float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"]
            formatters = dict(
                zip(float_cols, [
                    cliutils.pretty_float_formatter(col, 3)
                    for col in float_cols
                ]))

            for ssr in ssrs:
                rows = []
                for key in ssr["keys"]:
                    values = ssr["data"][key]

                    if values:
                        row = [
                            str(key),
                            round(min(values), 3),
                            round(utils.median(values), 3),
                            round(utils.percentile(values, 0.90), 3),
                            round(utils.percentile(values, 0.95), 3),
                            round(max(values), 3),
                            round(utils.mean(values), 3)
                        ]
                    else:
                        row = [str(key)] + ["n/a"] * 6
                    rows.append(rutils.Struct(**dict(zip(headers, row))))

                cliutils.print_list(rows,
                                    fields=headers,
                                    formatters=formatters,
                                    table_label=ssr["title"])
                print()
Beispiel #31
0
        def _print_ssrs_result(result):
            raw = result["data"]["raw"]
            # NOTE(hughsaunders): ssrs=scenario specific results
            ssrs = []
            for itr in raw:
                if "output" not in itr:
                    itr["output"] = {"additive": [], "complete": []}

                    # NOTE(amaretskiy): "scenario_output" is supported
                    #   for backward compatibility
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        itr["output"]["additive"].append(
                            {"data": itr["scenario_output"]["data"].items(),
                             "title": "Scenario output",
                             "description": "",
                             "chart_plugin": "StackedArea"})
                        del itr["scenario_output"]

                for idx, additive in enumerate(itr["output"]["additive"]):
                    try:
                        for key, value in additive["data"]:
                            ssrs[idx]["data"][key].append(value)
                    except IndexError:
                        data = {}
                        keys = []
                        for key, value in additive["data"]:
                            if key not in data:
                                data[key] = []
                                keys.append(key)
                            data[key].append(value)
                        ssrs.append({"title": additive["title"],
                                     "keys": keys,
                                     "data": data})
            if not ssrs:
                return

            print("\nScenario Specific Results\n")

            headers = ["key", "min", "median", "90%ile", "95%ile",
                       "max", "avg"]
            float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"]
            formatters = dict(zip(float_cols,
                              [cliutils.pretty_float_formatter(col, 3)
                               for col in float_cols]))

            for ssr in ssrs:
                rows = []
                for key in ssr["keys"]:
                    values = ssr["data"][key]

                    if values:
                        row = [str(key),
                               round(min(values), 3),
                               round(utils.median(values), 3),
                               round(utils.percentile(values, 0.90), 3),
                               round(utils.percentile(values, 0.95), 3),
                               round(max(values), 3),
                               round(utils.mean(values), 3)]
                    else:
                        row = [str(key)] + ["n/a"] * 6
                    rows.append(rutils.Struct(**dict(zip(headers, row))))

                cliutils.print_list(rows,
                                    fields=headers,
                                    formatters=formatters,
                                    table_label=ssr["title"])
                print()
Beispiel #32
0
 def test_pretty_float_formatter(self, obj, args, expected=None):
     formatter = cliutils.pretty_float_formatter(*args)
     if type(expected) == type and issubclass(expected, Exception):
         self.assertRaises(expected, formatter, obj)
     else:
         self.assertEqual(expected, formatter(obj))
Beispiel #33
0
class CliUtilsTestCase(test.TestCase):
    def setUp(self):
        super(CliUtilsTestCase, self).setUp()
        self.categories = {
            "deployment": deployment.DeploymentCommands,
            "task": task.TaskCommands,
            "verify": verify.VerifyCommands
        }

    def tearDown(self):
        self._unregister_opts()
        super(CliUtilsTestCase, self).tearDown()

    def test_print_dict(self):
        out = six.StringIO()
        dict = {"key": "value"}
        cliutils.print_dict(dict, out=out)
        self.assertEqual(
            "+----------+-------+\n"
            "| Property | Value |\n"
            "+----------+-------+\n"
            "| key      | value |\n"
            "+----------+-------+\n", out.getvalue())

    def test_print_dict_wrap(self):
        out = six.StringIO()
        dict = {"key1": "not wrapped", "key2": "this will be wrapped"}
        cliutils.print_dict(dict, wrap=16, out=out)
        self.assertEqual(
            "+----------+--------------+\n"
            "| Property | Value        |\n"
            "+----------+--------------+\n"
            "| key1     | not wrapped  |\n"
            "| key2     | this will be |\n"
            "|          | wrapped      |\n"
            "+----------+--------------+\n", out.getvalue())

    def test_print_dict_formatters_and_fields(self):
        out = six.StringIO()
        dict = {"key1": "value", "key2": "Value", "key3": "vvv"}
        formatters = {"foo": lambda x: x["key1"], "bar": lambda x: x["key2"]}
        fields = ["foo", "bar"]
        cliutils.print_dict(dict,
                            formatters=formatters,
                            fields=fields,
                            out=out)
        self.assertEqual(
            "+----------+-------+\n"
            "| Property | Value |\n"
            "+----------+-------+\n"
            "| foo      | value |\n"
            "| bar      | Value |\n"
            "+----------+-------+\n", out.getvalue())

    def test_print_dict_header(self):
        out = six.StringIO()
        dict = {"key": "value"}
        cliutils.print_dict(dict,
                            table_label="Some Table",
                            print_header=False,
                            out=out)
        self.assertEqual(
            "+-------------+\n"
            "| Some Table  |\n"
            "+-----+-------+\n"
            "| key | value |\n"
            "+-----+-------+\n", out.getvalue())

    def test_print_dict_objects(self):
        class SomeStruct(object):
            def __init__(self, a, b):
                self.a = a
                self.b = b

            @property
            def c(self):
                return self.a + self.b

            def foo(self):
                pass

            @classmethod
            def bar(cls):
                pass

            @staticmethod
            def foobar():
                pass

        out = six.StringIO()
        formatters = {"c": lambda x: "a + b = %s" % x.c}
        cliutils.print_dict(SomeStruct(1, 2), formatters=formatters, out=out)
        self.assertEqual(
            "+----------+-----------+\n"
            "| Property | Value     |\n"
            "+----------+-----------+\n"
            "| a        | 1         |\n"
            "| b        | 2         |\n"
            "| c        | a + b = 3 |\n"
            "+----------+-----------+\n", out.getvalue())

    def test_print_dict_with_spec_chars(self):
        out = six.StringIO()
        dict = {"key": "line1\r\nline2"}
        cliutils.print_dict(dict, out=out)
        self.assertEqual(
            "+----------+-------+\n"
            "| Property | Value |\n"
            "+----------+-------+\n"
            "| key      | line1 |\n"
            "|          | line2 |\n"
            "+----------+-------+\n", out.getvalue())

    def test_make_header(self):
        h1 = cliutils.make_header("msg", size=4, symbol="=")
        self.assertEqual("====\nmsg\n====\n", h1)

    def test_make_table_header(self):
        actual = cliutils.make_table_header("Response Times (sec)", 40)
        expected = "\n".join((
            "+--------------------------------------+",
            "|         Response Times (sec)         |",
        ))
        self.assertEqual(expected, actual)

        actual = cliutils.make_table_header("Response Times (sec)", 39)
        expected = "\n".join((
            "+-------------------------------------+",
            "|        Response Times (sec)         |",
        ))
        self.assertEqual(expected, actual)

        self.assertRaises(ValueError, cliutils.make_table_header,
                          "Response Times (sec)", len("Response Times (sec)"))

    @ddt.data(
        {
            "obj": mock.Mock(foo=6.56565),
            "args": ["foo", 3],
            "expected": 6.566
        }, {
            "obj": mock.Mock(foo=6.56565),
            "args": ["foo"],
            "expected": 6.56565
        }, {
            "obj": mock.Mock(foo=None),
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": mock.Mock(foo="n/a"),
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": mock.Mock(foo="n/a"),
            "args": ["foo", 3],
            "expected": "n/a"
        }, {
            "obj": {
                "foo": 6.56565
            },
            "args": ["foo", 3],
            "expected": 6.566
        }, {
            "obj": {
                "foo": 6.56565
            },
            "args": ["foo"],
            "expected": 6.56565
        }, {
            "obj": {
                "foo": None
            },
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": {
                "foo": "n/a"
            },
            "args": ["foo"],
            "expected": "n/a"
        }, {
            "obj": {
                "foo": "n/a"
            },
            "args": ["foo", 3],
            "expected": "n/a"
        }, {
            "obj": object,
            "args": ["unexpected_field", 3],
            "expected": AttributeError
        }, {
            "obj": {
                "foo": 42
            },
            "args": ["unexpected_field", 3],
            "expected": KeyError
        })
    @ddt.unpack
    def test_pretty_float_formatter(self, obj, args, expected=None):
        formatter = cliutils.pretty_float_formatter(*args)
        if type(expected) == type and issubclass(expected, Exception):
            self.assertRaises(expected, formatter, obj)
        else:
            self.assertEqual(expected, formatter(obj))

    def test__methods_of_with_class(self):
        class fake_class(object):
            def public(self):
                pass

            def _private(self):
                pass

        result = cliutils._methods_of(fake_class)
        self.assertEqual(1, len(result))
        self.assertEqual("public", result[0][0])

    def test__methods_of_with_object(self):
        class fake_class(object):
            def public(self):
                pass

            def _private(self):
                pass

        mock_obj = fake_class()
        result = cliutils._methods_of(mock_obj)
        self.assertEqual(1, len(result))
        self.assertEqual("public", result[0][0])

    def test__methods_of_empty_result(self):
        class fake_class(object):
            def _private(self):
                pass

            def _private2(self):
                pass

        mock_obj = fake_class()
        result = cliutils._methods_of(mock_obj)
        self.assertEqual([], result)

    def _unregister_opts(self):
        CONF.reset()
        category_opt = cfg.SubCommandOpt("category",
                                         title="Command categories",
                                         help="Available categories")
        CONF.unregister_opt(category_opt)

    @mock.patch("rally.api.API",
                side_effect=exceptions.RallyException("config_file"))
    def test_run_fails(self, mock_rally_api_api):
        ret = cliutils.run(["rally", "task list"], self.categories)
        self.assertEqual(2, ret)
        mock_rally_api_api.assert_called_once_with(config_args=["task list"],
                                                   skip_db_check=True)

    @mock.patch("rally.api.API.check_db_revision")
    def test_run_version(self, mock_api_check_db_revision):
        ret = cliutils.run(["rally", "version"], self.categories)
        self.assertEqual(0, ret)

    @mock.patch("rally.api.API.check_db_revision")
    def test_run_bash_completion(self, mock_api_check_db_revision):
        ret = cliutils.run(["rally", "bash-completion"], self.categories)
        self.assertEqual(0, ret)

    @mock.patch("rally.api.API.check_db_revision")
    @mock.patch("rally.common.db.api.task_get",
                side_effect=exceptions.DBRecordNotFound(criteria="uuid: %s" %
                                                        FAKE_TASK_UUID,
                                                        table="tasks"))
    def test_run_task_not_found(self, mock_task_get,
                                mock_api_check_db_revision):
        ret = cliutils.run(["rally", "task", "status",
                            "%s" % FAKE_TASK_UUID], self.categories)
        self.assertTrue(mock_task_get.called)
        self.assertEqual(203, ret)

    @mock.patch("rally.api.API.check_db_revision")
    @mock.patch("rally.cli.cliutils.validate_args",
                side_effect=cliutils.MissingArgs("missing"))
    def test_run_task_failed(self, mock_validate_args,
                             mock_api_check_db_revision):
        ret = cliutils.run(["rally", "task", "status",
                            "%s" % FAKE_TASK_UUID], self.categories)
        self.assertTrue(mock_validate_args.called)
        self.assertEqual(1, ret)

    @mock.patch("rally.api.API.check_db_revision")
    def test_run_failed_to_open_file(self, mock_api_check_db_revision):
        class FailuresCommands(object):
            def failed_to_open_file(self):
                raise IOError("No such file")

        ret = cliutils.run(["rally", "failure", "failed-to-open-file"],
                           {"failure": FailuresCommands})
        self.assertEqual(1, ret)

    @mock.patch("rally.api.API.check_db_revision")
    def test_run_sqlalchmey_operational_failure(self,
                                                mock_api_check_db_revision):
        class SQLAlchemyCommands(object):
            def operational_failure(self):
                raise sqlalchemy.exc.OperationalError("Can't open DB file")

        ret = cliutils.run(["rally", "failure", "operational-failure"],
                           {"failure": SQLAlchemyCommands})
        self.assertEqual(1, ret)

    class TestObj(object):
        x = 1
        y = 2
        z = 3.142857142857143
        aOrB = 3  # mixed case field

    @ddt.data(
        {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        }, {
            "args": [[TestObj()], ["z"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None,
                "formatters": {
                    "z": cliutils.pretty_float_formatter("z", 5)
                }
            },
            "expected": ("+---------+\n"
                         "| z       |\n"
                         "+---------+\n"
                         "| 3.14286 |\n"
                         "+---------+")
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+\n"
                         "| x |\n"
                         "+---+\n"
                         "| 1 |\n"
                         "+---+")
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": False,
                "print_border": False
            },
            "expected": "1"
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": False,
                "print_border": False
            },
            "expected": "1 2"
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": True,
                "print_border": False
            },
            "expected": "x \n1"
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": False
            },
            "expected": "x y \n1 2"
        }, {
            "args": [[TestObj()], ["x"]],
            "kwargs": {
                "print_header": False,
                "print_border": True
            },
            "expected": ("+--+\n"
                         "|1 |\n"
                         "+--+")
        }, {
            "args": [[TestObj()], ["x", "y"]],
            "kwargs": {
                "print_header": False,
                "print_border": True
            },
            "expected": ("+--+--+\n"
                         "|1 |2 |\n"
                         "+--+--+")
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": ("+------+\n"
                         "| aOrB |\n"
                         "+------+\n"
                         "| 3    |\n"
                         "+------+")
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": False,
                "print_border": True,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": ("+--+\n"
                         "|3 |\n"
                         "+--+")
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": True,
                "print_border": False,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": "aOrB \n3"
        }, {
            "args": [[TestObj()], ["aOrB"]],
            "kwargs": {
                "print_header": False,
                "print_border": False,
                "mixed_case_fields": ["aOrB"]
            },
            "expected": "3"
        }, {
            "args": [[{
                "x": 1,
                "y": 2
            }], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        }, {
            "args": [[{
                "z": 3.142857142857143
            }], ["z"]],
            "kwargs": {
                "print_header": True,
                "print_border": True,
                "sortby_index": None,
                "formatters": {
                    "z": cliutils.pretty_float_formatter("z", 5)
                }
            },
            "expected": ("+---------+\n"
                         "| z       |\n"
                         "+---------+\n"
                         "| 3.14286 |\n"
                         "+---------+")
        }, {
            "args": [[{
                "x": 1
            }], ["x"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+\n"
                         "| x |\n"
                         "+---+\n"
                         "| 1 |\n"
                         "+---+")
        }, {
            "args": [[{
                "x": 1,
                "y": 2
            }], ["x", "y"]],
            "kwargs": {
                "print_header": True,
                "print_border": True
            },
            "expected": ("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+")
        })
    @ddt.unpack
    def test_print_list(self, args, kwargs, expected):
        out = six.moves.StringIO()
        kwargs["out"] = out
        cliutils.print_list(*args, **kwargs)
        self.assertEqual(expected, out.getvalue().strip())

    def test_print_list_raises(self):
        out = six.moves.StringIO()
        self.assertRaisesRegex(ValueError,
                               "Field labels list.*has different number "
                               "of elements than fields list",
                               cliutils.print_list, [self.TestObj()], ["x"],
                               field_labels=["x", "y"],
                               sortby_index=None,
                               out=out)

    def test_help_for_grouped_methods(self):
        class SomeCommand(object):
            @cliutils.help_group("1_manage")
            def install(self):
                pass

            @cliutils.help_group("1_manage")
            def uninstall(self):
                pass

            @cliutils.help_group("1_manage")
            def reinstall(self):
                pass

            @cliutils.help_group("2_launch")
            def run(self):
                pass

            @cliutils.help_group("2_launch")
            def rerun(self):
                pass

            @cliutils.help_group("3_results")
            def show(self):
                pass

            @cliutils.help_group("3_results")
            def list(self):
                pass

            def do_do_has_do_has_mesh(self):
                pass

        self.assertEqual(
            "\n\nCommands:\n"
            "   do-do-has-do-has-mesh   \n"
            "\n"
            "   install                 \n"
            "   reinstall               \n"
            "   uninstall               \n"
            "\n"
            "   rerun                   \n"
            "   run                     \n"
            "\n"
            "   list                    \n"
            "   show                    \n",
            cliutils._compose_category_description(SomeCommand))
Beispiel #34
0
    def test_print_list(self):
        class TestObj(object):
            x = 1
            y = 2
            z = 3.142857142857143
            aOrB = 3            # mixed case field

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=True,
                            print_border=True,
                            sortby_index=None,
                            out=out)
        self.assertEqual("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+",
                         out.getvalue().strip())

        out = moves.StringIO()
        formatter = cliutils.pretty_float_formatter("z", 5)
        cliutils.print_list([TestObj()], ["z"],
                            print_header=True,
                            print_border=True,
                            sortby_index=None,
                            formatters={"z": formatter},
                            out=out)
        self.assertEqual("+---------+\n"
                         "| z       |\n"
                         "+---------+\n"
                         "| 3.14286 |\n"
                         "+---------+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=True,
                            print_border=True,
                            out=out)
        self.assertEqual("+---+\n"
                         "| x |\n"
                         "+---+\n"
                         "| 1 |\n"
                         "+---+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=True,
                            print_border=True,
                            out=out)
        self.assertEqual("+---+---+\n"
                         "| x | y |\n"
                         "+---+---+\n"
                         "| 1 | 2 |\n"
                         "+---+---+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=False,
                            print_border=False,
                            out=out)
        self.assertEqual("1",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=False,
                            print_border=False,
                            out=out)
        self.assertEqual("1 2",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=True,
                            print_border=False,
                            out=out)
        self.assertEqual("x \n1",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=True,
                            print_border=False,
                            out=out)
        self.assertEqual("x y \n1 2",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x"],
                            print_header=False,
                            print_border=True,
                            out=out)
        self.assertEqual("+--+\n"
                         "|1 |\n"
                         "+--+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["x", "y"],
                            print_header=False,
                            print_border=True,
                            out=out)
        self.assertEqual("+--+--+\n"
                         "|1 |2 |\n"
                         "+--+--+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=True,
                            print_border=True,
                            out=out)
        self.assertEqual("+------+\n"
                         "| aOrB |\n"
                         "+------+\n"
                         "| 3    |\n"
                         "+------+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=False,
                            print_border=True,
                            out=out)
        self.assertEqual("+--+\n"
                         "|3 |\n"
                         "+--+",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=True,
                            print_border=False,
                            out=out)
        self.assertEqual("aOrB \n"
                         "3",
                         out.getvalue().strip())

        out = moves.StringIO()
        cliutils.print_list([TestObj()], ["aOrB"],
                            mixed_case_fields=["aOrB"],
                            print_header=False,
                            print_border=False,
                            out=out)
        self.assertEqual("3",
                         out.getvalue().strip())

        out = moves.StringIO()
        self.assertRaisesRegexp(ValueError,
                                "Field labels list.*has different number "
                                "of elements than fields list",
                                cliutils.print_list,
                                [TestObj()],
                                ["x"],
                                field_labels=["x", "y"],
                                sortby_index=None,
                                out=out)