Exemple #1
0
    def import_results(self,
                       api,
                       verifier_id=None,
                       deployment=None,
                       file_to_parse=None,
                       run_args=None,
                       do_use=True):
        """Import results of a test run into the Rally database."""

        if not os.path.exists(file_to_parse):
            print("File '%s' not found." % file_to_parse)
            return 1
        with open(file_to_parse, "r") as f:
            data = f.read()

        run_args = yaml.safe_load(run_args) if run_args else {}
        verification, results = api.verification.import_results(
            verifier_id=verifier_id,
            deployment_id=deployment,
            data=data,
            **run_args)
        self._print_totals(results["totals"])

        verification_uuid = verification["uuid"]
        if do_use:
            self.use(api, verification_uuid)
        else:
            print("Verification UUID: %s." % verification_uuid)
Exemple #2
0
    def create(self, api, name, description=None, extras=None,
               spec=None, from_sysenv=False, to_json=False, do_use=True):
        """Create new environment."""

        if spec is not None and from_sysenv:
            print("Arguments '--spec' and '--from-sysenv' cannot be used "
                  "together, use only one of them.")
            return 1
        spec = spec or {}
        if spec:
            with open(os.path.expanduser(spec), "rb") as f:
                spec = yaml.safe_load(f.read())
        if extras:
            extras = yaml.safe_load(extras)

        if from_sysenv:
            result = env_mgr.EnvManager.create_spec_from_sys_environ()
            spec = result["spec"]
            _print("Your system environment includes specifications of "
                   "%s platform(s)." % len(spec), to_json)
            _print("Discovery information:", to_json)
            for p_name, p_result in result["discovery_details"].items():
                _print("\t - %s : %s." % (p_name, p_result["message"]),
                       to_json)

                if "traceback" in p_result:
                    _print("".join(p_result["traceback"]), to_json)
        try:
            env = env_mgr.EnvManager.create(
                name, spec, description=description, extras=extras)
        except exceptions.ManagerInvalidSpec as e:
            _print("Env spec has wrong format:", to_json)
            _print(json.dumps(e.kwargs["spec"], indent=2), to_json)
            for err in e.kwargs["errors"]:
                _print(err, to_json)
            return 1
        except Exception:
            _print("Something went wrong during env creation:", to_json)
            _print(traceback.print_exc(), to_json)
            return 1

        if do_use:
            self._use(env.uuid, to_json)
        self._show(env.data, to_json=to_json, only_spec=False)
        return 0
 def consumer(_cache, sample):
     """Validate one sample"""
     full_path, rendered_task = sample
     task_config = yaml.safe_load(rendered_task)
     try:
         rapi.task.validate(deployment="MAIN", config=task_config)
     except Exception as e:
         if not self._skip(str(e)):
             failed_samples[full_path] = traceback.format_exc()
    def test_schema_is_valid(self):
        discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))

        files = {
            f
            for f in os.listdir(self.rally_jobs_path)
            if (os.path.isfile(os.path.join(self.rally_jobs_path, f))
                and f.endswith(".yaml") and not f.endswith("_args.yaml"))
        }

        # TODO(andreykurilin): figure out why it fails
        files -= {"rally-mos.yaml", "sahara-clusters.yaml"}

        for filename in files:
            full_path = os.path.join(self.rally_jobs_path, filename)

            with open(full_path) as task_file:
                try:
                    args_file = os.path.join(
                        self.rally_jobs_path,
                        filename.rsplit(".", 1)[0] + "_args.yaml")

                    args = {}
                    if os.path.exists(args_file):
                        args = yaml.safe_load(open(args_file).read())
                        if not isinstance(args, dict):
                            raise TypeError(
                                "args file %s must be dict in yaml or json "
                                "presentation" % args_file)

                    task_inst = api._Task(api.API(skip_db_check=True))
                    task = task_inst.render_template(
                        task_template=task_file.read(), **args)
                    task = task_cfg.TaskConfig(yaml.safe_load(task))
                    task_obj = fakes.FakeTask({"uuid": full_path})

                    eng = engine.TaskEngine(task, task_obj, mock.Mock())
                    eng.validate(only_syntax=True)
                except Exception:
                    print(traceback.format_exc())
                    self.fail("Wrong task input file: %s" % full_path)
Exemple #5
0
    def recreate(self, api, deployment=None, filename=None):
        """Destroy and create an existing deployment.

        Unlike 'deployment destroy', the deployment database record
        will not be deleted, so the deployment UUID stays the same.
        """
        config = None
        if filename:
            with open(filename, "rb") as deploy_file:
                config = yaml.safe_load(deploy_file.read())

        api.deployment.recreate(deployment=deployment, config=config)
Exemple #6
0
    def configure_verifier(self,
                           api,
                           verifier_id=None,
                           deployment=None,
                           reconfigure=False,
                           extra_options=None,
                           new_configuration=None,
                           show=False):
        """Configure a verifier for a specific deployment."""

        # TODO(ylobankov): Add an ability to read extra options from
        #                  a json or yaml file.
        if new_configuration and (extra_options or reconfigure):
            print("Argument '--override' cannot be used with arguments "
                  "'--reconfigure' and '--extend'.")
            return 1

        if new_configuration:
            if not os.path.exists(new_configuration):
                print("File '%s' not found." % new_configuration)
                return 1

            with open(new_configuration) as f:
                config = f.read()
            api.verifier.override_configuration(verifier_id=verifier_id,
                                                deployment_id=deployment,
                                                new_configuration=config)
        else:
            if extra_options:
                if os.path.isfile(extra_options):
                    conf = configparser.ConfigParser()
                    conf.optionxform = str
                    conf.read(extra_options)
                    extra_options = dict(conf._sections)
                    for s in extra_options:
                        extra_options[s] = dict(extra_options[s])
                        extra_options[s].pop("__name__", None)

                    defaults = dict(conf.defaults())
                    if defaults:
                        extra_options["DEFAULT"] = dict(conf.defaults())
                else:
                    extra_options = yaml.safe_load(extra_options)

            config = api.verifier.configure(verifier=verifier_id,
                                            deployment_id=deployment,
                                            extra_options=extra_options,
                                            reconfigure=reconfigure)

        if show:
            print("\n%s\n" % config.strip())
Exemple #7
0
 def parse(filename):
     with open(filename, "r") as f:
         return yaml.safe_load(f.read())
Exemple #8
0
    def _load_and_validate_task(self,
                                api,
                                task_file,
                                args_file=None,
                                raw_args=None):
        """Load, render and validate tasks template from file with passed args.

        :param task_file: Path to file with input task
        :param raw_args: JSON or YAML representation of dict with args that
            will be used to render input task with jinja2
        :param args_file: Path to file with JSON or YAML representation
            of dict, that will be used to render input with jinja2. If both
            specified task_args and task_args_file they will be merged.
            raw_args has bigger priority so it will update values
            from args_file.
        :returns: Str with loaded and rendered task
        """

        print(cliutils.make_header("Preparing input task"))

        try:
            input_task = open(task_file).read()
        except IOError as err:
            raise FailedToLoadTask(source="--task",
                                   msg="Error reading %s: %s" %
                                   (task_file, err))

        task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./"

        task_args = {}
        if args_file:
            try:
                task_args.update(yaml.safe_load(open(args_file).read()))
            except yaml.ParserError as e:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="File '%s' has to be YAML or JSON. Details:\n\n%s" %
                    (args_file, e))
            except IOError as err:
                raise FailedToLoadTask(source="--task-args-file",
                                       msg="Error reading %s: %s" %
                                       (args_file, err))

        if raw_args:
            try:
                data = yaml.safe_load(raw_args)
                if isinstance(data, str):
                    raise yaml.ParserError("String '%s' doesn't look like a "
                                           "dictionary." % raw_args)
                task_args.update(data)
            except yaml.ParserError as e:
                args = [
                    keypair.split("=", 1) for keypair in raw_args.split(",")
                ]
                if len([a for a in args if len(a) != 1]) != len(args):
                    raise FailedToLoadTask(
                        source="--task-args",
                        msg="Value has to be YAML or JSON. Details:\n\n%s" % e)
                else:
                    task_args.update(dict(args))

        try:
            rendered_task = api.task.render_template(task_template=input_task,
                                                     template_dir=task_dir,
                                                     **task_args)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Failed to render task template.\n\n%s" % e)

        print("Task is:\n%s\n" % rendered_task.strip())
        try:
            parsed_task = yaml.safe_load(rendered_task)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Wrong format of rendered input task. It should be YAML or"
                " JSON. Details:\n\n%s" % e)

        print("Task syntax is correct :)")
        return parsed_task
Exemple #9
0
    def _detailed(self,
                  api,
                  task_id=None,
                  iterations_data=False,
                  filters=None):
        """Print detailed information about given task."""
        scenarios_filter = []
        only_sla_failures = False
        for filter in filters or []:
            if filter.startswith("scenario="):
                filter_value = filter.split("=")[1]
                scenarios_filter = filter_value.split(",")
            if filter == "sla-failures":
                only_sla_failures = True

        task = api.task.get(task_id=task_id, detailed=True)

        print()
        print("-" * 80)
        print("Task %(task_id)s: %(status)s" % {
            "task_id": task_id,
            "status": task["status"]
        })

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            validation = task["validation_result"]
            if logging.is_debug():
                print(yaml.safe_load(validation["trace"]))
            else:
                print(validation["etype"])
                print(validation["msg"])
                print("\nFor more details run:\nrally -d task detailed %s" %
                      task["uuid"])
            return 0
        elif task["status"] not in [
                consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED
        ]:
            print("-" * 80)
            print("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'." %
                  (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0

        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            if scenarios_filter and workload["name"] not in scenarios_filter:
                continue
            if only_sla_failures and workload["pass_sla"]:
                continue

            print("-" * 80)
            print()
            print("test scenario %s" % workload["name"])
            print("args position %s" % workload["position"])
            print("args values:")
            print(
                json.dumps(
                    {
                        "args": workload["args"],
                        "runner": workload["runner"],
                        "contexts": workload["contexts"],
                        "sla": workload["sla"],
                        "hooks": [r["config"] for r in workload["hooks"]]
                    },
                    indent=2))
            print()

            duration_stats = workload["statistics"]["durations"]

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                atomic_names = [
                    a["display_name"] for a in duration_stats["atomics"]
                ]
                for i, atomic_name in enumerate(atomic_names, 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(workload["data"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        atomic_actions = atomic.merge_atomic_actions(
                            itr["atomic_actions"])
                        row[action] = atomic_actions.get(name, {}).get(
                            "duration", 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = charts.OutputStatsTable(
                            workload, title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = charts.MainStatsTable.columns
            formatters = {
                "Action": lambda x: x["display_name"],
                "Min (sec)": lambda x: x["data"]["min"],
                "Median (sec)": lambda x: x["data"]["median"],
                "90%ile (sec)": lambda x: x["data"]["90%ile"],
                "95%ile (sec)": lambda x: x["data"]["95%ile"],
                "Max (sec)": lambda x: x["data"]["max"],
                "Avg (sec)": lambda x: x["data"]["avg"],
                "Success": lambda x: x["data"]["success"],
                "Count": lambda x: x["data"]["iteration_count"]
            }

            rows = []

            def make_flat(r, depth=0):
                if depth > 0:
                    r["display_name"] = (" %s> %s" %
                                         ("-" * depth, r["display_name"]))

                rows.append(r)
                for children in r["children"]:
                    make_flat(children, depth + 1)

            for row in itertools.chain(duration_stats["atomics"],
                                       [duration_stats["total"]]):
                make_flat(row)
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                normalize_field_names=True,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(
                    zip(iterations_headers[1:], [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in iterations_headers[1:]
                    ]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows,
                                            fields=cols,
                                            formatters=formatters)
                        print()

            print("Load duration: %s" %
                  strutils.format_float_to_str(workload["load_duration"]))
            print("Full duration: %s" %
                  strutils.format_float_to_str(workload["full_duration"]))

        print("\nHINTS:")
        print("* To plot HTML graphics with this data, run:")
        print("\trally task report %s --out output.html\n" % task["uuid"])
        print("* To generate a JUnit report, run:")
        print("\trally task export %s --type junit-xml --to output.xml\n" %
              task["uuid"])
        print("* To get raw JSON output of task results, run:")
        print("\trally task report %s --json --out output.json\n" %
              task["uuid"])

        if not task["pass_sla"]:
            print("At least one workload did not pass SLA criteria.\n")
            return 1
Exemple #10
0
    def restart(self,
                api,
                deployment=None,
                task_id=None,
                scenarios=None,
                tags=None,
                do_use=False,
                abort_on_sla_failure=False):
        """Restart a task or some scenarios in workloads of task."""
        if scenarios is not None:
            scenarios = (isinstance(scenarios, list) and scenarios
                         or [scenarios])
        task = api.task.get(task_id=task_id, detailed=True)
        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            print("\nUnable to restart task.")
            validation = task["validation_result"]
            if logging.is_debug():
                print(yaml.safe_load(validation["trace"]))
            else:
                print(validation["etype"])
                print(validation["msg"])
                print("\nFor more details run:\nrally -d task detailed %s" %
                      task["uuid"])
            return 1
        retask = {
            "version": 2,
            "title": task["title"],
            "description": task["description"],
            "tags": task["tags"],
            "subtasks": []
        }
        for subtask in task["subtasks"]:
            workloads = []
            for workload in subtask["workloads"]:
                if scenarios is None or workload["name"] in scenarios:
                    workloads.append({
                        "scenario": {
                            workload["name"]: workload["args"]
                        },
                        "contexts": workload["contexts"],
                        "runner": {
                            workload["runner_type"]: workload["runner"]
                        },
                        "hooks": workload["hooks"],
                        "sla": workload["sla"]
                    })
            if workloads:
                retask["subtasks"].append({
                    "title": subtask["title"],
                    "description": subtask["description"],
                    "workloads": workloads
                })

        if retask["subtasks"]:
            return self._start_task(api,
                                    deployment,
                                    retask,
                                    tags=tags,
                                    do_use=do_use,
                                    abort_on_sla_failure=abort_on_sla_failure)
        else:
            print("Not Found matched scenario.")
            return 1
Exemple #11
0
 def test_safe_load_order_key(self):
     stream = "{'b': 1, 'a': 1, 'c': 1}"
     stream_obj = yamlutils.safe_load(stream)
     self.assertEqual({"a": 1, "b": 1, "c": 1}, stream_obj)
     self.assertEqual(["b", "a", "c"], list(stream_obj))
Exemple #12
0
 def test_safe_load(self):
     stream = "{'a': 1, 'b': {'a': 2}}"
     stream_obj = yamlutils.safe_load(stream)
     self.assertEqual({"a": 1, "b": {"a": 2}}, stream_obj)
Exemple #13
0
    def create(self, api, name, fromenv=False, filename=None, do_use=False):
        """Create new deployment.

        This command will create a new deployment record in rally
        database. In the case of ExistingCloud deployment engine, it
        will use the cloud represented in the configuration. If the
        cloud doesn't exist, Rally can deploy a new one for you with
        Devstack or Fuel. Different deployment engines exist for these
        cases (see `rally plugin list --plugin-base Engine` for
        more details).

        If you use the ExistingCloud deployment engine, you can pass
        the deployment config by environment variables with ``--fromenv``:

            OS_USERNAME
            OS_PASSWORD
            OS_AUTH_URL
            OS_TENANT_NAME or OS_PROJECT_NAME
            OS_ENDPOINT_TYPE or OS_INTERFACE
            OS_ENDPOINT
            OS_REGION_NAME
            OS_CACERT
            OS_INSECURE
            OS_IDENTITY_API_VERSION

        All other deployment engines need more complex configuration
        data, so it should be stored in a configuration file.

        You can use physical servers, LXC containers, KVM virtual
        machines or virtual machines in OpenStack for deploying the
        cloud. Except physical servers, Rally can create cluster nodes
        for you. Interaction with virtualization software, OpenStack
        cloud or physical servers is provided by server providers.
        """

        if fromenv:
            result = env_mgr.EnvManager.create_spec_from_sys_environ()
            config = result["spec"]
            if "existing@openstack" in config:
                # NOTE(andreykurilin): if we are here it means that
                #   rally-openstack package is installed
                import rally_openstack
                if rally_openstack.__version_tuple__ <= (1, 4, 0):
                    if ("https_key" in config["existing@openstack"]
                            and config["existing@openstack"]["https_key"]):
                        print("WARNING: OS_KEY is ignored due to old version "
                              "of rally-openstack package.")
                        # NOTE(andreykurilin): To support
                        #    rally-openstack<=1.4.0 we need to remove
                        #    https_key, since OpenStackCredentials object
                        #    doesn't support it.
                        #    Latest rally-openstack fixed this issue with
                        #    https://github.com/openstack/rally-openstack/commit/c7483386e6b59474c83e3ecd0c7ee0e77ff50c02
                        config["existing@openstack"].pop("https_key")
        else:
            if not filename:
                config = {}
            else:
                with open(os.path.expanduser(filename), "rb") as deploy_file:
                    config = yaml.safe_load(deploy_file.read())

        try:
            deployment = api.deployment.create(config=config, name=name)
        except jsonschema.ValidationError:
            print("Config schema validation error: %s." % sys.exc_info()[1])
            return 1
        except exceptions.DBRecordExists:
            print("Error: %s" % sys.exc_info()[1])
            return 1

        self.list(api, deployment_list=[deployment])
        if do_use:
            self.use(api, deployment)