Example #1
0
    def create(self,
               api,
               name,
               description=None,
               extras=None,
               spec=None,
               to_json=False,
               do_use=True):
        """Create new environment."""
        spec = spec or {}
        if spec:
            with open(os.path.expanduser(spec), "rb") as f:
                spec = yaml.safe_load(f.read())
        if extras:
            extras = yaml.safe_load(extras)
        try:
            env = env_mgr.EnvManager.create(name,
                                            spec,
                                            description=description,
                                            extras=extras)
        except exceptions.ManagerInvalidSpec as e:
            _print("Env spec has wrong format:", to_json)
            _print(json.dumps(e.kwargs["spec"], indent=2), to_json)
            for err in e.kwargs["errors"]:
                _print(err, to_json)
            return 1
        except Exception:
            _print("Something went wrong during env creation:", to_json)
            _print(traceback.print_exc(), to_json)
            return 1

        if do_use:
            self._use(env.uuid, to_json)
        self._show(env.data, to_json)
        return 0
Example #2
0
    def create(self,
               api,
               name,
               description=None,
               extras=None,
               spec=None,
               from_sysenv=False,
               to_json=False,
               do_use=True):
        """Create new environment."""

        if spec is not None and from_sysenv:
            print("Arguments '--spec' and '--from-sysenv' cannot be used "
                  "together, use only one of them.")
            return 1
        spec = spec or {}
        if spec:
            with open(os.path.expanduser(spec), "rb") as f:
                spec = yaml.safe_load(f.read())
        if extras:
            extras = yaml.safe_load(extras)

        if from_sysenv:
            result = env_mgr.EnvManager.create_spec_from_sys_environ()
            spec = result["spec"]
            _print(
                "Your system environment includes specifications of "
                "%s platform(s)." % len(spec), to_json)
            _print("Discovery information:", to_json)
            for p_name, p_result in result["discovery_details"].items():
                _print("\t - %s : %s." % (p_name, p_result["message"]),
                       to_json)

                if "traceback" in p_result:
                    _print("".join(p_result["traceback"]), to_json)
        try:
            env = env_mgr.EnvManager.create(name,
                                            spec,
                                            description=description,
                                            extras=extras)
        except exceptions.ManagerInvalidSpec as e:
            _print("Env spec has wrong format:", to_json)
            _print(json.dumps(e.kwargs["spec"], indent=2), to_json)
            for err in e.kwargs["errors"]:
                _print(err, to_json)
            return 1
        except Exception:
            _print("Something went wrong during env creation:", to_json)
            _print(traceback.print_exc(), to_json)
            return 1

        if do_use:
            self._use(env.uuid, to_json)
        self._show(env.data, to_json=to_json, only_spec=False)
        return 0
Example #3
0
    def import_results(self,
                       api,
                       verifier_id=None,
                       deployment=None,
                       file_to_parse=None,
                       run_args=None,
                       do_use=True):
        """Import results of a test run into the Rally database."""

        if not os.path.exists(file_to_parse):
            print("File '%s' not found." % file_to_parse)
            return 1
        with open(file_to_parse, "r") as f:
            data = f.read()

        run_args = yaml.safe_load(run_args) if run_args else {}
        verification, results = api.verification.import_results(
            verifier_id=verifier_id,
            deployment_id=deployment,
            data=data,
            **run_args)
        self._print_totals(results["totals"])

        verification_uuid = verification["uuid"]
        if do_use:
            self.use(api, verification_uuid)
        else:
            print("Verification UUID: %s." % verification_uuid)
Example #4
0
    def test_detailed_task_failed(self, mock_logging, mock_stdout, debug):
        test_uuid = "test_task_id"
        value = {
            "id": "task",
            "uuid": test_uuid,
            "status": consts.TaskStatus.CRASHED,
            "results": [],
            "verification_log": json.dumps({"etype": "error_type",
                                            "msg": "error_message",
                                            "trace": "error_traceback"})
        }
        self.fake_api.task.get_detailed.return_value = value

        mock_logging.is_debug.return_value = debug
        self.task.detailed(self.fake_api, test_uuid)
        verification = yaml.safe_load(value["verification_log"])
        if debug:
            expected_calls = [mock.call("Task test_task_id: crashed"),
                              mock.call("%s" % verification["trace"])]
            mock_stdout.write.assert_has_calls(expected_calls, any_order=True)
        else:
            expected_calls = [mock.call("Task test_task_id: crashed"),
                              mock.call("%s" % verification["etype"]),
                              mock.call("%s" % verification["msg"]),
                              mock.call("\nFor more details run:\nrally "
                                        "-d task detailed %s" % test_uuid)]
            mock_stdout.write.assert_has_calls(expected_calls, any_order=True)
Example #5
0
    def create(self, api, name, fromenv=False, filename=None, do_use=False):
        """Create new deployment.

        This command will create a new deployment record in rally
        database. In the case of ExistingCloud deployment engine, it
        will use the cloud represented in the configuration. If the
        cloud doesn't exist, Rally can deploy a new one for you with
        Devstack or Fuel. Different deployment engines exist for these
        cases (see `rally plugin list --plugin-base Engine` for
        more details).

        If you use the ExistingCloud deployment engine, you can pass
        the deployment config by environment variables with ``--fromenv``:

            OS_USERNAME
            OS_PASSWORD
            OS_AUTH_URL
            OS_TENANT_NAME or OS_PROJECT_NAME
            OS_ENDPOINT_TYPE or OS_INTERFACE
            OS_ENDPOINT
            OS_REGION_NAME
            OS_CACERT
            OS_INSECURE
            OS_IDENTITY_API_VERSION

        All other deployment engines need more complex configuration
        data, so it should be stored in a configuration file.

        You can use physical servers, LXC containers, KVM virtual
        machines or virtual machines in OpenStack for deploying the
        cloud. Except physical servers, Rally can create cluster nodes
        for you. Interaction with virtualization software, OpenStack
        cloud or physical servers is provided by server providers.
        """

        if fromenv:
            # TODO(astudenov): move this to Credential plugin
            config = {
                "type": "ExistingCloud",
                "creds": {"openstack": envutils.get_creds_from_env_vars()}}
        else:
            if not filename:
                config = {}
            else:
                with open(os.path.expanduser(filename), "rb") as deploy_file:
                    config = yaml.safe_load(deploy_file.read())

        try:
            deployment = api.deployment.create(config=config, name=name)
        except jsonschema.ValidationError:
            print("Config schema validation error: %s." % sys.exc_info()[1])
            return 1
        except exceptions.DeploymentNameExists:
            print("Error: %s" % sys.exc_info()[1])
            return 1

        self.list(api, deployment_list=[deployment])
        if do_use:
            self.use(api, deployment)
Example #6
0
    def create(self, api, name, description=None, extras=None,
               spec=None, from_sysenv=False, to_json=False, do_use=True):
        """Create new environment."""

        if spec is not None and from_sysenv:
            print("Arguments '--spec' and '--from-sysenv' cannot be used "
                  "together, use only one of them.")
            return 1
        spec = spec or {}
        if spec:
            with open(os.path.expanduser(spec), "rb") as f:
                spec = yaml.safe_load(f.read())
        if extras:
            extras = yaml.safe_load(extras)

        if from_sysenv:
            result = env_mgr.EnvManager.create_spec_from_sys_environ()
            spec = result["spec"]
            _print("Your system environment includes specifications of "
                   "%s platform(s)." % len(spec), to_json)
            _print("Discovery information:", to_json)
            for p_name, p_result in result["discovery_details"].items():
                _print("\t - %s : %s." % (p_name, p_result["message"]),
                       to_json)

                if "traceback" in p_result:
                    _print("".join(p_result["traceback"]), to_json)
        try:
            env = env_mgr.EnvManager.create(
                name, spec, description=description, extras=extras)
        except exceptions.ManagerInvalidSpec as e:
            _print("Env spec has wrong format:", to_json)
            _print(json.dumps(e.kwargs["spec"], indent=2), to_json)
            for err in e.kwargs["errors"]:
                _print(err, to_json)
            return 1
        except Exception:
            _print("Something went wrong during env creation:", to_json)
            _print(traceback.print_exc(), to_json)
            return 1

        if do_use:
            self._use(env.uuid, to_json)
        self._show(env.data, to_json=to_json, only_spec=False)
        return 0
Example #7
0
 def consumer(_cache, sample):
     """Validate one sample"""
     full_path, rendered_task = sample
     task_config = yaml.safe_load(rendered_task)
     try:
         rapi.task.validate(deployment="MAIN", config=task_config)
     except Exception as e:
         if not self._skip(six.text_type(e)):
             failed_samples[full_path] = traceback.format_exc()
Example #8
0
    def create(self, api, name, fromenv=False, filename=None, do_use=False):
        """Create new deployment.

        This command will create a new deployment record in rally
        database. In the case of ExistingCloud deployment engine, it
        will use the cloud represented in the configuration. If the
        cloud doesn't exist, Rally can deploy a new one for you with
        Devstack or Fuel. Different deployment engines exist for these
        cases (see `rally plugin list --plugin-base Engine` for
        more details).

        If you use the ExistingCloud deployment engine, you can pass
        the deployment config by environment variables with ``--fromenv``:

            OS_USERNAME
            OS_PASSWORD
            OS_AUTH_URL
            OS_TENANT_NAME or OS_PROJECT_NAME
            OS_ENDPOINT_TYPE or OS_INTERFACE
            OS_ENDPOINT
            OS_REGION_NAME
            OS_CACERT
            OS_INSECURE
            OS_IDENTITY_API_VERSION

        All other deployment engines need more complex configuration
        data, so it should be stored in a configuration file.

        You can use physical servers, LXC containers, KVM virtual
        machines or virtual machines in OpenStack for deploying the
        cloud. Except physical servers, Rally can create cluster nodes
        for you. Interaction with virtualization software, OpenStack
        cloud or physical servers is provided by server providers.
        """

        if fromenv:
            # TODO(astudenov): move this to Credential plugin
            config = {"openstack": envutils.get_creds_from_env_vars()}
        else:
            if not filename:
                config = {}
            else:
                with open(os.path.expanduser(filename), "rb") as deploy_file:
                    config = yaml.safe_load(deploy_file.read())

        try:
            deployment = api.deployment.create(config=config, name=name)
        except jsonschema.ValidationError:
            print("Config schema validation error: %s." % sys.exc_info()[1])
            return 1
        except exceptions.DBRecordExists:
            print("Error: %s" % sys.exc_info()[1])
            return 1

        self.list(api, deployment_list=[deployment])
        if do_use:
            self.use(api, deployment)
Example #9
0
 def run(self, **kwargs):
     self.start_time = time.time()
     if not os.path.exists(self.res_dir):
         os.makedirs(self.res_dir)
     rapi = api.API()
     api.CONF.set_default("use_stderr", False)
     api.CONF.set_default('log_dir', self.res_dir)
     api.CONF.set_default('log_file', 'rally.log')
     rally.common.logging.setup("rally")
     spec = env_mgr.EnvManager.create_spec_from_sys_environ()["spec"]
     try:
         env_mgr.EnvManager.get('my-kubernetes').delete(force=True)
     except exceptions.DBRecordNotFound:
         pass
     env = env_mgr.EnvManager.create('my-kubernetes', spec)
     result = env.check_health()
     self.__logger.debug("check health %s: %s", 'my-kubernetes', result)
     if not result['existing@kubernetes']['available']:
         self.__logger.error(
             "Cannot check env heath: %s",
             result['existing@kubernetes']['message'])
         return
     with open(pkg_resources.resource_filename(
             'functest_kubernetes', 'rally/all-in-one.yaml')) as file:
         template = Template(file.read())
     task = yaml.safe_load(template.render(
         concurrency=kwargs.get("concurrency", self.concurrency),
         times=kwargs.get("times", self.times),
         namespaces_count=kwargs.get(
             "namespaces_count", self.namespaces_count)))
     rapi.task.validate(deployment='my-kubernetes', config=task)
     task_instance = rapi.task.create(deployment='my-kubernetes')
     rapi.task.start(
         deployment='my-kubernetes', config=task,
         task=task_instance["uuid"])
     self.details = rapi.task.get(task_instance["uuid"], detailed=False)
     self.__logger.debug("details: %s", self.details)
     if self.details['pass_sla']:
         self.result = 100
     result = rapi.task.export(
         [task_instance["uuid"]], "html",
         output_dest=os.path.join(
             self.res_dir, "{}.html".format(self.case_name)))
     if "files" in result:
         for path in result["files"]:
             with open(path, "w+") as output:
                 output.write(result["files"][path])
     result = rapi.task.export(
         [task_instance["uuid"]], "junit-xml",
         output_dest=os.path.join(
             self.res_dir, "{}.xml".format(self.case_name)))
     if "files" in result:
         for path in result["files"]:
             with open(path, "w+") as output:
                 output.write(result["files"][path])
     self.stop_time = time.time()
Example #10
0
 def consumer(_cache, sample):
     """Validate one sample"""
     full_path, rendered_task = sample
     task_config = yaml.safe_load(rendered_task)
     try:
         rapi.task.validate(deployment="MAIN",
                            config=task_config)
     except Exception as e:
         if not self._skip(six.text_type(e)):
             failed_samples[full_path] = traceback.format_exc()
Example #11
0
    def setUp(self):
        super(RallyJobsTestCase, self).setUp()
        with open(os.path.join(self.zuul_jobs_path, "zuul.yaml")) as f:
            self.zuul_cfg = yaml.safe_load(f)

        self.project_cfg = None
        for item in self.zuul_cfg:
            if "project" in item:
                self.project_cfg = item["project"]
                break
Example #12
0
    def test_schema_is_valid(self):
        discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))

        files = {
            f
            for f in os.listdir(self.rally_jobs_path)
            if (os.path.isfile(os.path.join(self.rally_jobs_path, f))
                and f.endswith(".yaml") and not f.endswith("_args.yaml"))
        }

        # TODO(andreykurilin): figure out why it fails
        files -= {"rally-mos.yaml", "sahara-clusters.yaml"}

        for filename in files:
            full_path = os.path.join(self.rally_jobs_path, filename)

            with open(full_path) as task_file:
                try:
                    args_file = os.path.join(
                        self.rally_jobs_path,
                        filename.rsplit(".", 1)[0] + "_args.yaml")

                    args = {}
                    if os.path.exists(args_file):
                        args = yaml.safe_load(open(args_file).read())
                        if not isinstance(args, dict):
                            raise TypeError(
                                "args file %s must be dict in yaml or json "
                                "presentation" % args_file)

                    task_inst = api._Task(api.API(skip_db_check=True))
                    task = task_inst.render_template(
                        task_template=task_file.read(), **args)
                    task = task_cfg.TaskConfig(yaml.safe_load(task))
                    task_obj = fakes.FakeTask({"uuid": full_path})

                    eng = engine.TaskEngine(task, task_obj, mock.Mock())
                    eng.validate(only_syntax=True)
                except Exception:
                    print(traceback.format_exc())
                    self.fail("Wrong task input file: %s" % full_path)
Example #13
0
    def recreate(self, api, deployment=None, filename=None):
        """Destroy and create an existing deployment.

        Unlike 'deployment destroy', the deployment database record
        will not be deleted, so the deployment UUID stays the same.
        """
        config = None
        if filename:
            with open(filename, "rb") as deploy_file:
                config = yaml.safe_load(deploy_file.read())

        api.deployment.recreate(deployment=deployment, config=config)
Example #14
0
    def recreate(self, api, deployment=None, filename=None):
        """Destroy and create an existing deployment.

        Unlike 'deployment destroy', the deployment database record
        will not be deleted, so the deployment UUID stays the same.
        """
        config = None
        if filename:
            with open(filename, "rb") as deploy_file:
                config = yaml.safe_load(deploy_file.read())

        api.deployment.recreate(deployment=deployment, config=config)
Example #15
0
    def configure_verifier(self,
                           api,
                           verifier_id=None,
                           deployment=None,
                           reconfigure=False,
                           extra_options=None,
                           new_configuration=None,
                           show=False):
        """Configure a verifier for a specific deployment."""

        # TODO(ylobankov): Add an ability to read extra options from
        #                  a json or yaml file.

        if new_configuration and (extra_options or reconfigure):
            print(
                _("Argument '--override' cannot be used with arguments "
                  "'--reconfigure' and '--extend'."))
            return 1

        if new_configuration:
            if not os.path.exists(new_configuration):
                print(_("File '%s' not found.") % new_configuration)
                return 1

            with open(new_configuration) as f:
                config = f.read()
            api.verifier.override_configuration(verifier_id=verifier_id,
                                                deployment_id=deployment,
                                                new_configuration=config)
        else:
            if extra_options:
                if os.path.isfile(extra_options):
                    conf = configparser.ConfigParser()
                    conf.read(extra_options)
                    extra_options = dict(conf._sections)
                    for s in extra_options:
                        extra_options[s] = dict(extra_options[s])
                        extra_options[s].pop("__name__", None)

                    defaults = dict(conf.defaults())
                    if defaults:
                        extra_options["DEFAULT"] = dict(conf.defaults())
                else:
                    extra_options = yaml.safe_load(extra_options)

            config = api.verifier.configure(verifier=verifier_id,
                                            deployment_id=deployment,
                                            extra_options=extra_options,
                                            reconfigure=reconfigure)

        if show:
            print("\n%s\n" % config.strip())
Example #16
0
    def validate(self, context, config, plugin_cls, plugin_cfg):
        wf_name = config.get("args", {}).get(self.workflow)
        if wf_name:
            wb_path = config.get("args", {}).get(self.workbook)
            wb_path = os.path.expanduser(wb_path)
            self._file_access_ok(wb_path, mode=os.R_OK,
                                 param_name=self.workbook)

            with open(wb_path, "r") as wb_def:
                wb_def = yaml.safe_load(wb_def)
                if wf_name not in wb_def["workflows"]:
                    self.fail("workflow '%s' not found in the definition '%s'"
                              % (wf_name, wb_def))
Example #17
0
    def test_schema_is_valid(self):
        discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))

        files = {f for f in os.listdir(self.rally_jobs_path)
                 if (os.path.isfile(os.path.join(self.rally_jobs_path, f)) and
                     f.endswith(".yaml") and not f.endswith("_args.yaml"))}

        # TODO(andreykurilin): figure out why it fails
        files -= {"rally-mos.yaml", "sahara-clusters.yaml"}

        for filename in files:
            full_path = os.path.join(self.rally_jobs_path, filename)

            with open(full_path) as task_file:
                try:
                    args_file = os.path.join(
                        self.rally_jobs_path,
                        filename.rsplit(".", 1)[0] + "_args.yaml")

                    args = {}
                    if os.path.exists(args_file):
                        args = yaml.safe_load(open(args_file).read())
                        if not isinstance(args, dict):
                            raise TypeError(
                                "args file %s must be dict in yaml or json "
                                "presentation" % args_file)

                    task_inst = api._Task(api.API(skip_db_check=True))
                    task = task_inst.render_template(
                        task_template=task_file.read(), **args)
                    task = engine.TaskConfig(yaml.safe_load(task))
                    task_obj = fakes.FakeTask({"uuid": full_path})

                    eng = engine.TaskEngine(task, task_obj, mock.Mock())
                    eng.validate(only_syntax=True)
                except Exception:
                    print(traceback.format_exc())
                    self.fail("Wrong task input file: %s" % full_path)
Example #18
0
    def validate(self, config, credentials, plugin_cls, plugin_cfg):
        wf_name = config.get("args", {}).get(self.param_name)
        if wf_name:
            wb_path = config.get("args", {}).get(self.param_name)
            wb_path = os.path.expanduser(wb_path)
            file_result = validators.ValidatorUtils._file_access_ok(
                config.get("args", {}).get(self.param_name), os.R_OK,
                self.param_name)
            if not file_result.is_valid:
                return file_result

            with open(wb_path, "r") as wb_def:
                wb_def = yaml.safe_load(wb_def)
                if wf_name not in wb_def["workflows"]:
                    self.fail("workflow '{}' not found "
                              "in the definition '{}'".format(wf_name, wb_def))
Example #19
0
    def configure_verifier(self, api, verifier_id=None, deployment=None,
                           reconfigure=False, extra_options=None,
                           new_configuration=None, show=False):
        """Configure a verifier for a specific deployment."""

        # TODO(ylobankov): Add an ability to read extra options from
        #                  a json or yaml file.
        if new_configuration and (extra_options or reconfigure):
            print("Argument '--override' cannot be used with arguments "
                  "'--reconfigure' and '--extend'.")
            return 1

        if new_configuration:
            if not os.path.exists(new_configuration):
                print("File '%s' not found." % new_configuration)
                return 1

            with open(new_configuration) as f:
                config = f.read()
            api.verifier.override_configuration(verifier_id=verifier_id,
                                                deployment_id=deployment,
                                                new_configuration=config)
        else:
            if extra_options:
                if os.path.isfile(extra_options):
                    conf = configparser.ConfigParser()
                    conf.read(extra_options)
                    extra_options = dict(conf._sections)
                    for s in extra_options:
                        extra_options[s] = dict(extra_options[s])
                        extra_options[s].pop("__name__", None)

                    defaults = dict(conf.defaults())
                    if defaults:
                        extra_options["DEFAULT"] = dict(conf.defaults())
                else:
                    extra_options = yaml.safe_load(extra_options)

            config = api.verifier.configure(verifier=verifier_id,
                                            deployment_id=deployment,
                                            extra_options=extra_options,
                                            reconfigure=reconfigure)

        if show:
            print("\n%s\n" % config.strip())
Example #20
0
    def import_results(self, api, verifier_id=None, deployment=None,
                       file_to_parse=None, run_args=None, do_use=True):
        """Import results of a test run into the Rally database."""

        if not os.path.exists(file_to_parse):
            print("File '%s' not found." % file_to_parse)
            return 1
        with open(file_to_parse, "r") as f:
            data = f.read()

        run_args = yaml.safe_load(run_args) if run_args else {}
        verification, results = api.verification.import_results(
            verifier_id=verifier_id, deployment_id=deployment,
            data=data, **run_args)
        self._print_totals(results["totals"])

        verification_uuid = verification["uuid"]
        if do_use:
            self.use(api, verification_uuid)
        else:
            print("Verification UUID: %s." % verification_uuid)
Example #21
0
    def list_extensions(self):
        """List all installed Tempest plugins."""
        # TODO(andreykurilin): find a better way to list tempest plugins
        cmd = ("from tempest.test_discover import plugins; "
               "plugins_manager = plugins.TempestTestPluginManager(); "
               "plugins_map = plugins_manager.get_plugin_load_tests_tuple(); "
               "plugins_list = ["
               "    {'name': p.name, "
               "     'entry_point': p.entry_point_target, "
               "     'location': plugins_map[p.name][1]} "
               "    for p in plugins_manager.ext_plugins.extensions]; "
               "print(plugins_list)")
        try:
            output = utils.check_output(["python", "-c", cmd],
                                        cwd=self.base_dir, env=self.environ,
                                        debug_output=False).strip()
        except subprocess.CalledProcessError:
            raise exceptions.RallyException(
                "Cannot list installed Tempest plugins for verifier %s." %
                self.verifier)

        return yaml.safe_load(output)
Example #22
0
    def list_extensions(self):
        """List all installed Tempest plugins."""
        # TODO(andreykurilin): find a better way to list tempest plugins
        cmd = ("from tempest.test_discover import plugins; "
               "plugins_manager = plugins.TempestTestPluginManager(); "
               "plugins_map = plugins_manager.get_plugin_load_tests_tuple(); "
               "plugins_list = ["
               "    {'name': p.name, "
               "     'entry_point': p.entry_point_target, "
               "     'location': plugins_map[p.name][1]} "
               "    for p in plugins_manager.ext_plugins.extensions]; "
               "print(plugins_list)")
        try:
            output = utils.check_output(["python", "-c", cmd],
                                        cwd=self.base_dir, env=self.environ,
                                        debug_output=False).strip()
        except subprocess.CalledProcessError:
            raise exceptions.RallyException(
                "Cannot list installed Tempest plugins for verifier %s." %
                self.verifier)

        return yaml.safe_load(output)
Example #23
0
        def parse_task_args(src_name, args):
            try:
                kw = args and yaml.safe_load(args)
                kw = {} if kw is None else kw
            except yaml.ParserError as e:
                print_invalid_header(src_name, args)
                print(_("%(source)s has to be YAML or JSON. Details:"
                        "\n\n%(err)s\n") % {
                            "source": src_name,
                            "err": e
                        },
                      file=sys.stderr)
                raise TypeError()

            if not isinstance(kw, dict):
                print_invalid_header(src_name, args)
                print(_("%(src)s has to be dict, actually %(src_type)s\n") % {
                    "src": src_name,
                    "src_type": type(kw)
                },
                      file=sys.stderr)
                raise TypeError()
            return kw
Example #24
0
def workbook_contains_workflow(config, clients, deployment, workbook,
                               workflow_name):
    """Validate that workflow exist in workbook when workflow is passed

    :param workbook: parameter containing the workbook definition
    :param workflow_name: parameter containing the workflow name
    """

    wf_name = config.get("args", {}).get(workflow_name)
    if wf_name:
        wb_path = config.get("args", {}).get(workbook)
        wb_path = os.path.expanduser(wb_path)
        file_result = _file_access_ok(
            config.get("args", {}).get(workbook), os.R_OK, workbook)
        if not file_result.is_valid:
            return file_result

        with open(wb_path, "r") as wb_def:
            wb_def = yaml.safe_load(wb_def)
            if wf_name not in wb_def["workflows"]:
                return ValidationResult(
                    False,
                    "workflow '{}' not found in the definition '{}'".format(
                        wf_name, wb_def))
Example #25
0
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results`"""

        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = yaml.safe_load(inp_js)

        if isinstance(tasks_results, list):
            # it is an old format:

            task = {"version": 2,
                    "title": "Task loaded from a file.",
                    "description": "Auto-ported from task format V1.",
                    "uuid": "n/a",
                    "tags": [],
                    "subtasks": []}

            start_time = None

            for result in tasks_results:
                try:
                    jsonschema.validate(
                        result, OLD_TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = None
                max_duration = None

                for itr in result["result"]:
                    if start_time is None or itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if max_duration is None or duration > max_duration:
                        max_duration = duration

                    if min_duration is None or min_duration > duration:
                        min_duration = duration

                durations_stat = charts.MainStatsTable(
                    {"total_iteration_count": iter_count})

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                created_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%d-%mT%H:%M:%S")
                updated_at = created_at + dt.timedelta(
                    seconds=result["full_duration"])
                created_at = created_at.strftime(consts.TimeFormat.ISO8601)
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                runner_type = result["key"]["kw"]["runner"].pop("type")
                for h in result["hooks"]:
                    trigger = h["config"]["trigger"]
                    h["config"] = {
                        "description": h["config"].get("description"),
                        "action": (h["config"]["name"], h["config"]["args"]),
                        "trigger": (trigger["name"], trigger["args"])}
                workload = {"uuid": "n/a",
                            "name": result["key"]["name"],
                            "position": result["key"]["pos"],
                            "description": result["key"].get("description",
                                                             ""),
                            "full_duration": result["full_duration"],
                            "load_duration": result["load_duration"],
                            "total_iteration_count": iter_count,
                            "failed_iteration_count": failed_iter_count,
                            "min_duration": min_duration,
                            "max_duration": max_duration,
                            "start_time": start_time,
                            "created_at": created_at,
                            "updated_at": updated_at,
                            "args": result["key"]["kw"]["args"],
                            "runner_type": runner_type,
                            "runner": result["key"]["kw"]["runner"],
                            "hooks": result["hooks"],
                            "sla": result["key"]["kw"]["sla"],
                            "sla_results": {"sla": result["sla"]},
                            "pass_sla": pass_sla,
                            "context": result["key"]["kw"]["context"],
                            "data": sorted(result["result"],
                                           key=lambda x: x["timestamp"]),
                            "statistics": {
                                "durations": durations_stat.to_dict()},
                            }
                task["subtasks"].append(
                    {"title": "A SubTask",
                     "description": "",
                     "workloads": [workload]})
            return [task]
        elif isinstance(tasks_results, dict) and "tasks" in tasks_results:
            for task_result in tasks_results["tasks"]:
                try:
                    jsonschema.validate(task_result,
                                        api.task.TASK_SCHEMA)
                except jsonschema.ValidationError as e:
                    msg = six.text_type(e)
                    raise exceptions.RallyException(
                        "ERROR: Invalid task result format\n\n\t%s" % msg)
                for subtask in task_result["subtasks"]:
                    for workload in subtask["workloads"]:
                        workload["context"] = workload.pop("contexts")
                        workload["runner_type"], workload["runner"] = list(
                            workload["runner"].items())[0]
                        workload["name"], workload["args"] = list(
                            workload.pop("scenario").items())[0]

            return tasks_results["tasks"]
        else:
            raise FailedToLoadResults(
                source=task_id, msg="Wrong format")
Example #26
0
 def test_safe_load_order_key(self):
     stream = "{'b': 1, 'a': 1, 'c': 1}"
     stream_obj = yamlutils.safe_load(stream)
     self.assertEqual({"a": 1, "b": 1, "c": 1}, stream_obj)
     self.assertEqual(["b", "a", "c"], list(stream_obj))
Example #27
0
    def _load_task(self, api, task_file, task_args=None, task_args_file=None):
        """Load tasks template from file and render it with passed args.

        :param task_file: Path to file with input task
        :param task_args: JSON or YAML representation of dict with args that
                          will be used to render input task with jinja2
        :param task_args_file: Path to file with JSON or YAML representation
                               of dict, that will be used to render input
                               with jinja2. If both specified task_args and
                               task_args_file they will be merged. task_args
                               has bigger priority so it will update values
                               from task_args_file.
        :returns: Str with loaded and rendered task
        """
        print(cliutils.make_header("Preparing input task"))

        def print_invalid_header(source_name, args):
            print(_("Invalid %(source)s passed: \n\n %(args)s \n") % {
                "source": source_name,
                "args": args
            },
                  file=sys.stderr)

        def parse_task_args(src_name, args):
            try:
                kw = args and yaml.safe_load(args)
                kw = {} if kw is None else kw
            except yaml.ParserError as e:
                print_invalid_header(src_name, args)
                print(_("%(source)s has to be YAML or JSON. Details:"
                        "\n\n%(err)s\n") % {
                            "source": src_name,
                            "err": e
                        },
                      file=sys.stderr)
                raise TypeError()

            if not isinstance(kw, dict):
                print_invalid_header(src_name, args)
                print(_("%(src)s has to be dict, actually %(src_type)s\n") % {
                    "src": src_name,
                    "src_type": type(kw)
                },
                      file=sys.stderr)
                raise TypeError()
            return kw

        try:
            kw = {}
            if task_args_file:
                with open(task_args_file) as f:
                    kw.update(parse_task_args("task_args_file", f.read()))
            kw.update(parse_task_args("task_args", task_args))
        except TypeError:
            raise FailedToLoadTask()

        with open(task_file) as f:
            try:
                input_task = f.read()
                task_dir = os.path.expanduser(
                    os.path.dirname(task_file)) or "./"
                rendered_task = api.task.render_template(
                    input_task, task_dir, **kw)
            except Exception as e:
                print(
                    _("Failed to render task template:\n%(task)s\n%(err)s\n") %
                    {
                        "task": input_task,
                        "err": e
                    },
                    file=sys.stderr)
                raise FailedToLoadTask()

            print(_("Task is:\n%s\n") % rendered_task)
            try:
                parsed_task = yaml.safe_load(rendered_task)

            except Exception as e:
                print(_("Wrong format of rendered input task. It should be "
                        "YAML or JSON.\n%s") % e,
                      file=sys.stderr)
                raise FailedToLoadTask()

            print(_("Task syntax is correct :)"))
            return parsed_task
Example #28
0
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results`"""

        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = yaml.safe_load(inp_js)

        if isinstance(tasks_results, list):
            # it is an old format:

            task = {
                "version": 2,
                "title": "Task loaded from a file.",
                "description": "Auto-ported from task format V1.",
                "uuid": "n/a",
                "tags": [],
                "subtasks": []
            }

            start_time = None

            for result in tasks_results:
                try:
                    jsonschema.validate(result, OLD_TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = None
                max_duration = None

                for itr in result["result"]:
                    if start_time is None or itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if max_duration is None or duration > max_duration:
                        max_duration = duration

                    if min_duration is None or min_duration > duration:
                        min_duration = duration

                durations_stat = charts.MainStatsTable(
                    {"total_iteration_count": iter_count})

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                created_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%d-%mT%H:%M:%S")
                updated_at = created_at + dt.timedelta(
                    seconds=result["full_duration"])
                created_at = created_at.strftime(consts.TimeFormat.ISO8601)
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                runner_type = result["key"]["kw"]["runner"].pop("type")
                for h in result["hooks"]:
                    trigger = h["config"]["trigger"]
                    h["config"] = {
                        "description": h["config"].get("description"),
                        "action": (h["config"]["name"], h["config"]["args"]),
                        "trigger": (trigger["name"], trigger["args"])
                    }
                workload = {
                    "uuid": "n/a",
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description", ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": created_at,
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner_type": runner_type,
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": result["hooks"],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {
                        "sla": result["sla"]
                    },
                    "pass_sla": pass_sla,
                    "context": result["key"]["kw"]["context"],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict()
                    },
                }
                task["subtasks"].append({
                    "title": "A SubTask",
                    "description": "",
                    "workloads": [workload]
                })
            return [task]
        elif isinstance(tasks_results, dict) and "tasks" in tasks_results:
            for task_result in tasks_results["tasks"]:
                try:
                    jsonschema.validate(task_result, api.task.TASK_SCHEMA)
                except jsonschema.ValidationError as e:
                    msg = six.text_type(e)
                    raise exceptions.RallyException(
                        "ERROR: Invalid task result format\n\n\t%s" % msg)
                for subtask in task_result["subtasks"]:
                    for workload in subtask["workloads"]:
                        workload["context"] = workload.pop("contexts")
                        workload["runner_type"], workload["runner"] = list(
                            workload["runner"].items())[0]
                        workload["name"], workload["args"] = list(
                            workload.pop("scenario").items())[0]

            return tasks_results["tasks"]
        else:
            raise FailedToLoadResults(source=task_id, msg="Wrong format")
Example #29
0
 def test_safe_load(self):
     stream = "{'a': 1, 'b': {'a': 2}}"
     stream_obj = yamlutils.safe_load(stream)
     self.assertEqual({"a": 1, "b": {"a": 2}},
                      stream_obj)
Example #30
0
    def detailed(self, api, task_id=None, iterations_data=False):
        """Print detailed information about given task.

        :param task_id: str, task uuid
        :param iterations_data: bool, include results for each iteration
        """
        task = api.task.get_detailed(task_id, extended_results=True)

        if not task:
            print("The task %s can not be found" % task_id)
            return 1

        print()
        print("-" * 80)
        print(
            _("Task %(task_id)s: %(status)s") % {
                "task_id": task_id,
                "status": task["status"]
            })

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            verification = yaml.safe_load(task["verification_log"])
            if logging.is_debug():
                print(yaml.safe_load(verification["trace"]))
            else:
                print(verification["etype"])
                print(verification["msg"])
                print(
                    _("\nFor more details run:\nrally -d task detailed %s") %
                    task["uuid"])
            return 0
        elif task["status"] not in [
                consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED
        ]:
            print("-" * 80)
            print(
                _("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'.") %
                (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0
        for result in task["results"]:
            key = result["key"]
            print("-" * 80)
            print()
            print("test scenario %s" % key["name"])
            print("args position %s" % key["pos"])
            print("args values:")
            print(json.dumps(key["kw"], indent=2))
            print()

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                for i, atomic_name in enumerate(result["info"]["atomic"], 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(result["iterations"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        row[action] = itr["atomic_actions"].get(name, 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                    # NOTE(amaretskiy): "scenario_output" is supported
                    #   for backward compatibility
                    if ("scenario_output" in itr
                            and itr["scenario_output"]["data"]):
                        iteration_output["additive"].append({
                            "data":
                            itr["scenario_output"]["data"].items(),
                            "title":
                            "Scenario output",
                            "description":
                            "",
                            "chart_plugin":
                            "StackedArea"
                        })

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = plot.charts.OutputStatsTable(
                            result["info"], title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = plot.charts.MainStatsTable.columns
            float_cols = result["info"]["stat"]["cols"][1:7]
            formatters = dict(
                zip(float_cols, [
                    cliutils.pretty_float_formatter(col, 3)
                    for col in float_cols
                ]))
            rows = [dict(zip(cols, r)) for r in result["info"]["stat"]["rows"]]
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(
                    zip(iterations_headers[1:], [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in iterations_headers[1:]
                    ]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = plot.charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows,
                                            fields=cols,
                                            formatters=formatters)
                        print()

            print(
                _("Load duration: %s") %
                rutils.format_float_to_str(result["info"]["load_duration"]))
            print(
                _("Full duration: %s") %
                rutils.format_float_to_str(result["info"]["full_duration"]))

            print("\nHINTS:")
            print(_("* To plot HTML graphics with this data, run:"))
            print("\trally task report %s --out output.html\n" % task["uuid"])
            print(_("* To generate a JUnit report, run:"))
            print("\trally task report %s --junit --out output.xml\n" %
                  task["uuid"])
            print(_("* To get raw JSON output of task results, run:"))
            print("\trally task results %s\n" % task["uuid"])
Example #31
0
 def parse(filename):
     with open(filename, "r") as f:
         return yaml.safe_load(f.read())
Example #32
0
    def _detailed(self,
                  api,
                  task_id=None,
                  iterations_data=False,
                  filters=None):
        """Print detailed information about given task."""
        scenarios_filter = []
        only_sla_failures = False
        for filter in filters or []:
            if filter.startswith("scenario="):
                filter_value = filter.split("=")[1]
                scenarios_filter = filter_value.split(",")
            if filter == "sla-failures":
                only_sla_failures = True

        task = api.task.get(task_id=task_id, detailed=True)

        print()
        print("-" * 80)
        print("Task %(task_id)s: %(status)s" % {
            "task_id": task_id,
            "status": task["status"]
        })

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            validation = task["validation_result"]
            if logging.is_debug():
                print(yaml.safe_load(validation["trace"]))
            else:
                print(validation["etype"])
                print(validation["msg"])
                print("\nFor more details run:\nrally -d task detailed %s" %
                      task["uuid"])
            return 0
        elif task["status"] not in [
                consts.TaskStatus.FINISHED, consts.TaskStatus.ABORTED
        ]:
            print("-" * 80)
            print("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'." %
                  (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0

        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            if scenarios_filter and workload["name"] not in scenarios_filter:
                continue
            if only_sla_failures and workload["pass_sla"]:
                continue

            print("-" * 80)
            print()
            print("test scenario %s" % workload["name"])
            print("args position %s" % workload["position"])
            print("args values:")
            print(
                json.dumps(
                    {
                        "args": workload["args"],
                        "runner": workload["runner"],
                        "contexts": workload["contexts"],
                        "sla": workload["sla"],
                        "hooks": [r["config"] for r in workload["hooks"]]
                    },
                    indent=2))
            print()

            duration_stats = workload["statistics"]["durations"]

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                atomic_names = [
                    a["display_name"] for a in duration_stats["atomics"]
                ]
                for i, atomic_name in enumerate(atomic_names, 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(workload["data"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        atomic_actions = atomic.merge_atomic_actions(
                            itr["atomic_actions"])
                        row[action] = atomic_actions.get(name, {}).get(
                            "duration", 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = charts.OutputStatsTable(
                            workload, title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = charts.MainStatsTable.columns
            formatters = {
                "Action": lambda x: x["display_name"],
                "Min (sec)": lambda x: x["data"]["min"],
                "Median (sec)": lambda x: x["data"]["median"],
                "90%ile (sec)": lambda x: x["data"]["90%ile"],
                "95%ile (sec)": lambda x: x["data"]["95%ile"],
                "Max (sec)": lambda x: x["data"]["max"],
                "Avg (sec)": lambda x: x["data"]["avg"],
                "Success": lambda x: x["data"]["success"],
                "Count": lambda x: x["data"]["iteration_count"]
            }

            rows = []

            def make_flat(r, depth=0):
                if depth > 0:
                    r["display_name"] = (" %s> %s" %
                                         ("-" * depth, r["display_name"]))

                rows.append(r)
                for children in r["children"]:
                    make_flat(children, depth + 1)

            for row in itertools.chain(duration_stats["atomics"],
                                       [duration_stats["total"]]):
                make_flat(row)
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                normalize_field_names=True,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(
                    zip(iterations_headers[1:], [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in iterations_headers[1:]
                    ]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(
                    zip(float_cols, [
                        cliutils.pretty_float_formatter(col, 3)
                        for col in float_cols
                    ]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows,
                                            fields=cols,
                                            formatters=formatters)
                        print()

            print("Load duration: %s" %
                  strutils.format_float_to_str(workload["load_duration"]))
            print("Full duration: %s" %
                  strutils.format_float_to_str(workload["full_duration"]))

        print("\nHINTS:")
        print("* To plot HTML graphics with this data, run:")
        print("\trally task report %s --out output.html\n" % task["uuid"])
        print("* To generate a JUnit report, run:")
        print("\trally task export %s --type junit --to output.xml\n" %
              task["uuid"])
        print("* To get raw JSON output of task results, run:")
        print("\trally task report %s --json --out output.json\n" %
              task["uuid"])

        if not task["pass_sla"]:
            print("At least one workload did not pass SLA criteria.\n")
            return 1
Example #33
0
 def parse(filename):
     with open(filename, "r") as f:
         return yaml.safe_load(f.read())
Example #34
0
    def _detailed(self, api, task_id=None, iterations_data=False):
        """Print detailed information about given task."""

        task = api.task.get(task_id=task_id, detailed=True)

        print()
        print("-" * 80)
        print("Task %(task_id)s: %(status)s"
              % {"task_id": task_id, "status": task["status"]})

        if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
                consts.TaskStatus.VALIDATION_FAILED):
            print("-" * 80)
            validation = task["validation_result"]
            if logging.is_debug():
                print(yaml.safe_load(validation["trace"]))
            else:
                print(validation["etype"])
                print(validation["msg"])
                print("\nFor more details run:\nrally -d task detailed %s"
                      % task["uuid"])
            return 0
        elif task["status"] not in [consts.TaskStatus.FINISHED,
                                    consts.TaskStatus.ABORTED]:
            print("-" * 80)
            print("\nThe task %s marked as '%s'. Results "
                  "available when it is '%s'."
                  % (task_id, task["status"], consts.TaskStatus.FINISHED))
            return 0

        for workload in itertools.chain(
                *[s["workloads"] for s in task["subtasks"]]):
            print("-" * 80)
            print()
            print("test scenario %s" % workload["name"])
            print("args position %s" % workload["position"])
            print("args values:")
            print(json.dumps(
                {"args": workload["args"],
                 "runner": workload["runner"],
                 "contexts": workload["contexts"],
                 "sla": workload["sla"],
                 "hooks": [r["config"] for r in workload["hooks"]]},
                indent=2))
            print()

            duration_stats = workload["statistics"]["durations"]

            iterations = []
            iterations_headers = ["iteration", "duration"]
            iterations_actions = []
            output = []
            task_errors = []
            if iterations_data:
                atomic_names = [a["display_name"]
                                for a in duration_stats["atomics"]]
                for i, atomic_name in enumerate(atomic_names, 1):
                    action = "%i. %s" % (i, atomic_name)
                    iterations_headers.append(action)
                    iterations_actions.append((atomic_name, action))

            for idx, itr in enumerate(workload["data"], 1):

                if iterations_data:
                    row = {"iteration": idx, "duration": itr["duration"]}
                    for name, action in iterations_actions:
                        atomic_actions = atomic.merge_atomic_actions(
                            itr["atomic_actions"])
                        row[action] = atomic_actions.get(name, {}).get(
                            "duration", 0)
                    iterations.append(row)

                if "output" in itr:
                    iteration_output = itr["output"]
                else:
                    iteration_output = {"additive": [], "complete": []}

                for idx, additive in enumerate(iteration_output["additive"]):
                    if len(output) <= idx + 1:
                        output_table = charts.OutputStatsTable(
                            workload, title=additive["title"])
                        output.append(output_table)
                    output[idx].add_iteration(additive["data"])

                if itr.get("error"):
                    task_errors.append(TaskCommands._format_task_error(itr))

            self._print_task_errors(task_id, task_errors)

            cols = charts.MainStatsTable.columns
            formatters = {
                "Action": lambda x: x["display_name"],
                "Min (sec)": lambda x: x["data"]["min"],
                "Median (sec)": lambda x: x["data"]["median"],
                "90%ile (sec)": lambda x: x["data"]["90%ile"],
                "95%ile (sec)": lambda x: x["data"]["95%ile"],
                "Max (sec)": lambda x: x["data"]["max"],
                "Avg (sec)": lambda x: x["data"]["avg"],
                "Success": lambda x: x["data"]["success"],
                "Count": lambda x: x["data"]["iteration_count"]
            }

            rows = []

            def make_flat(r, depth=0):
                if depth > 0:
                    r["display_name"] = (" %s> %s" % ("-" * depth,
                                                      r["display_name"]))

                rows.append(r)
                for children in r["children"]:
                    make_flat(children, depth + 1)

            for row in itertools.chain(duration_stats["atomics"],
                                       [duration_stats["total"]]):
                make_flat(row)
            cliutils.print_list(rows,
                                fields=cols,
                                formatters=formatters,
                                normalize_field_names=True,
                                table_label="Response Times (sec)",
                                sortby_index=None)
            print()

            if iterations_data:
                formatters = dict(zip(iterations_headers[1:],
                                      [cliutils.pretty_float_formatter(col, 3)
                                       for col in iterations_headers[1:]]))
                cliutils.print_list(iterations,
                                    fields=iterations_headers,
                                    table_label="Atomics per iteration",
                                    formatters=formatters)
                print()

            if output:
                cols = charts.OutputStatsTable.columns
                float_cols = cols[1:7]
                formatters = dict(zip(float_cols,
                                  [cliutils.pretty_float_formatter(col, 3)
                                   for col in float_cols]))

                for out in output:
                    data = out.render()
                    rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
                    if rows:
                        # NOTE(amaretskiy): print title explicitly because
                        #     prettytable fails if title length is too long
                        print(data["title"])
                        cliutils.print_list(rows, fields=cols,
                                            formatters=formatters)
                        print()

            print("Load duration: %s"
                  % strutils.format_float_to_str(workload["load_duration"]))
            print("Full duration: %s"
                  % strutils.format_float_to_str(workload["full_duration"]))

        print("\nHINTS:")
        print("* To plot HTML graphics with this data, run:")
        print("\trally task report %s --out output.html\n" % task["uuid"])
        print("* To generate a JUnit report, run:")
        print("\trally task export %s --type junit --to output.xml\n" %
              task["uuid"])
        print("* To get raw JSON output of task results, run:")
        print("\trally task report %s --json --out output.json\n" %
              task["uuid"])

        if not task["pass_sla"]:
            print("At least one workload did not pass SLA criteria.\n")
            return 1
Example #35
0
    def _load_and_validate_task(self,
                                api,
                                task_file,
                                args_file=None,
                                raw_args=None):
        """Load, render and validate tasks template from file with passed args.

        :param task_file: Path to file with input task
        :param raw_args: JSON or YAML representation of dict with args that
            will be used to render input task with jinja2
        :param args_file: Path to file with JSON or YAML representation
            of dict, that will be used to render input with jinja2. If both
            specified task_args and task_args_file they will be merged.
            raw_args has bigger priority so it will update values
            from args_file.
        :returns: Str with loaded and rendered task
        """

        print(cliutils.make_header("Preparing input task"))

        try:
            input_task = open(task_file).read()
        except IOError as err:
            raise FailedToLoadTask(source="--task",
                                   msg="Error reading %s: %s" %
                                   (task_file, err))

        task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./"

        task_args = {}
        if args_file:
            try:
                task_args.update(yaml.safe_load(open(args_file).read()))
            except yaml.ParserError as e:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="File '%s' has to be YAML or JSON. Details:\n\n%s" %
                    (args_file, e))
            except IOError as err:
                raise FailedToLoadTask(source="--task-args-file",
                                       msg="Error reading %s: %s" %
                                       (args_file, err))

        if raw_args:
            try:
                data = yaml.safe_load(raw_args)
                if isinstance(data, (six.text_type, six.string_types)):
                    raise yaml.ParserError("String '%s' doesn't look like a "
                                           "dictionary." % raw_args)
                task_args.update(data)
            except yaml.ParserError as e:
                args = [
                    keypair.split("=", 1) for keypair in raw_args.split(",")
                ]
                if len([a for a in args if len(a) != 1]) != len(args):
                    raise FailedToLoadTask(
                        source="--task-args",
                        msg="Value has to be YAML or JSON. Details:\n\n%s" % e)
                else:
                    task_args.update(dict(args))

        try:
            rendered_task = api.task.render_template(task_template=input_task,
                                                     template_dir=task_dir,
                                                     **task_args)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Failed to render task template.\n\n%s" % e)

        print("Task is:\n%s\n" % rendered_task.strip())
        try:
            parsed_task = yaml.safe_load(rendered_task)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Wrong format of rendered input task. It should be YAML or"
                " JSON. Details:\n\n%s" % e)

        print("Task syntax is correct :)")
        return parsed_task
Example #36
0
    def test_task_samples_are_valid(self):
        rally = utils.Rally(force_new_db=True)
        # In TestTaskSamples, Rally API will be called directly (not via
        # subprocess), so we need to change database options to temp database.
        db.db_options.set_defaults(
            db.CONF, connection="sqlite:///%s/db" % rally.tmp_dir)

        # let's use pre-created users to make TestTaskSamples quicker
        deployment = api._Deployment._get("MAIN")
        admin_cred = deployment.get_credentials_for("openstack")["admin"]

        ctx = {"admin": {"credential": admin_cred},
               "task": {"uuid": self.__class__.__name__}}
        user_ctx = users.UserGenerator(ctx)
        user_ctx.setup()
        self.addCleanup(user_ctx.cleanup)

        config = deployment["config"]
        os_creds = config["creds"]["openstack"]

        user = copy.copy(os_creds["admin"])
        user["username"] = ctx["users"][0]["credential"].username
        user["password"] = ctx["users"][0]["credential"].password
        if "project_name" in os_creds["admin"]:
            # it is Keystone
            user["project_name"] = ctx["users"][0]["credential"].tenant_name
        else:
            user["tenant_name"] = ctx["users"][0]["credential"].tenant_name
        config["creds"]["openstack"]["users"] = [user]

        rally("deployment destroy MAIN", write_report=False)
        deployment_cfg = os.path.join(rally.tmp_dir, "new_deployment.json")
        with open(deployment_cfg, "w") as f:
            f.write(json.dumps(config))
        rally("deployment create --name MAIN --filename %s" % deployment_cfg,
              write_report=False)

        samples_path = os.path.join(
            os.path.dirname(rally_module.__file__), os.pardir,
            "samples", "tasks")

        for dirname, dirnames, filenames in os.walk(samples_path):
            # NOTE(rvasilets): Skip by suggest of boris-42 because in
            # future we don't what to maintain this dir
            if dirname.find("tempest-do-not-run-against-production") != -1:
                continue
            for filename in filenames:
                full_path = os.path.join(dirname, filename)

                # NOTE(hughsaunders): Skip non config files
                # (bug https://bugs.launchpad.net/rally/+bug/1314369)
                if os.path.splitext(filename)[1] not in (".json"):
                    continue
                with open(full_path) as task_file:
                    try:
                        input_task = task_file.read()
                        rendered_task = api._Task.render_template(input_task)
                        task_config = yaml.safe_load(rendered_task)
                        api._Task.validate("MAIN", task_config)
                    except Exception as e:
                        if not self._skip(six.text_type(e)):
                            print(traceback.format_exc())
                            print("Failed on task config %s with error." %
                                  full_path)
                            raise
Example #37
0
 def test_safe_load(self):
     stream = "{'a': 1, 'b': {'a': 2}}"
     stream_obj = yamlutils.safe_load(stream)
     self.assertEqual({"a": 1, "b": {"a": 2}}, stream_obj)
Example #38
0
 def test_safe_load_order_key(self):
     stream = "{'b': 1, 'a': 1, 'c': 1}"
     stream_obj = yamlutils.safe_load(stream)
     self.assertEqual({"a": 1, "b": 1, "c": 1}, stream_obj)
     self.assertEqual(["b", "a", "c"], list(stream_obj))
Example #39
0
    def create(self, api, name, fromenv=False, filename=None, do_use=False):
        """Create new deployment.

        This command will create a new deployment record in rally
        database. In the case of ExistingCloud deployment engine, it
        will use the cloud represented in the configuration. If the
        cloud doesn't exist, Rally can deploy a new one for you with
        Devstack or Fuel. Different deployment engines exist for these
        cases.

        If you use the ExistingCloud deployment engine, you can pass
        the deployment config by environment variables with ``--fromenv``:

            OS_USERNAME
            OS_PASSWORD
            OS_AUTH_URL
            OS_TENANT_NAME or OS_PROJECT_NAME
            OS_ENDPOINT_TYPE or OS_INTERFACE
            OS_ENDPOINT
            OS_REGION_NAME
            OS_CACERT
            OS_INSECURE
            OS_IDENTITY_API_VERSION

        All other deployment engines need more complex configuration
        data, so it should be stored in a configuration file.

        You can use physical servers, LXC containers, KVM virtual
        machines or virtual machines in OpenStack for deploying the
        cloud. Except physical servers, Rally can create cluster nodes
        for you. Interaction with virtualization software, OpenStack
        cloud or physical servers is provided by server providers.

        :param fromenv: boolean, read environment instead of config file
        :param filename: path to the configuration file
        :param name: name of the deployment
        """

        if fromenv:
            config = {"type": "ExistingCloud"}
            config.update(envutils.get_creds_from_env_vars())
        else:
            if not filename:
                print("Either --filename or --fromenv is required.")
                return (1)
            filename = os.path.expanduser(filename)
            with open(filename, "rb") as deploy_file:
                config = yaml.safe_load(deploy_file.read())

        try:
            deployment = api.deployment.create(config, name)
        except jsonschema.ValidationError:
            print(_("Config schema validation error: %s.") % sys.exc_info()[1])
            return (1)
        except exceptions.DeploymentNameExists:
            print(_("Error: %s") % sys.exc_info()[1])
            return (1)

        self.list(api, deployment_list=[deployment])
        if do_use:
            self.use(api, deployment["uuid"])
Example #40
0
    def create(self, api, name, fromenv=False, filename=None, do_use=False):
        """Create new deployment.

        This command will create a new deployment record in rally
        database. In the case of ExistingCloud deployment engine, it
        will use the cloud represented in the configuration. If the
        cloud doesn't exist, Rally can deploy a new one for you with
        Devstack or Fuel. Different deployment engines exist for these
        cases (see `rally plugin list --plugin-base Engine` for
        more details).

        If you use the ExistingCloud deployment engine, you can pass
        the deployment config by environment variables with ``--fromenv``:

            OS_USERNAME
            OS_PASSWORD
            OS_AUTH_URL
            OS_TENANT_NAME or OS_PROJECT_NAME
            OS_ENDPOINT_TYPE or OS_INTERFACE
            OS_ENDPOINT
            OS_REGION_NAME
            OS_CACERT
            OS_INSECURE
            OS_IDENTITY_API_VERSION

        All other deployment engines need more complex configuration
        data, so it should be stored in a configuration file.

        You can use physical servers, LXC containers, KVM virtual
        machines or virtual machines in OpenStack for deploying the
        cloud. Except physical servers, Rally can create cluster nodes
        for you. Interaction with virtualization software, OpenStack
        cloud or physical servers is provided by server providers.
        """

        if fromenv:
            result = env_mgr.EnvManager.create_spec_from_sys_environ()
            config = result["spec"]
            if "existing@openstack" in config:
                # NOTE(andreykurilin): if we are are here it means that
                #   rally-openstack package is installed
                import rally_openstack
                if rally_openstack.__version_tuple__ <= (1, 4, 0):
                    print(rally_openstack.__version_tuple__)
                    if config["existing@openstack"]["https_key"]:
                        print("WARNING: OS_KEY is ignored due to old version "
                              "of rally-openstack package.")
                    # NOTE(andreykurilin): To support rally-openstack <=1.4.0
                    #   we need to remove https_key, since OpenStackCredentials
                    #   object doesn't support it.
                    #   Latest rally-openstack fixed this issue with
                    #   https://github.com/openstack/rally-openstack/commit/c7483386e6b59474c83e3ecd0c7ee0e77ff50c02

                    config["existing@openstack"].pop("https_key")
        else:
            if not filename:
                config = {}
            else:
                with open(os.path.expanduser(filename), "rb") as deploy_file:
                    config = yaml.safe_load(deploy_file.read())

        try:
            deployment = api.deployment.create(config=config, name=name)
        except jsonschema.ValidationError:
            print("Config schema validation error: %s." % sys.exc_info()[1])
            return 1
        except exceptions.DBRecordExists:
            print("Error: %s" % sys.exc_info()[1])
            return 1

        self.list(api, deployment_list=[deployment])
        if do_use:
            self.use(api, deployment)
Example #41
0
    def _load_task_results_file(self, api, task_id):
        """Load the json file which is created by `rally task results` """
        with open(os.path.expanduser(task_id)) as inp_js:
            tasks_results = yaml.safe_load(inp_js)

        if type(tasks_results) == list:
            # it is an old format:

            task = {"subtasks": []}

            start_time = float("inf")

            for result in tasks_results:
                try:
                    jsonschema.validate(result, api.task.TASK_RESULT_SCHEMA)
                except jsonschema.ValidationError as e:
                    raise FailedToLoadResults(source=task_id,
                                              msg=six.text_type(e))

                iter_count = 0
                failed_iter_count = 0
                min_duration = float("inf")
                max_duration = 0

                atomics = collections.OrderedDict()

                for itr in result["result"]:
                    if itr["timestamp"] < start_time:
                        start_time = itr["timestamp"]
                    # NOTE(chenhb): back compatible for atomic_actions
                    itr["atomic_actions"] = list(
                        tutils.WrapperForAtomicActions(itr["atomic_actions"],
                                                       itr["timestamp"]))

                    iter_count += 1
                    if itr.get("error"):
                        failed_iter_count += 1

                    duration = itr.get("duration", 0)

                    if duration > max_duration:
                        max_duration = duration

                    if min_duration and min_duration > duration:
                        min_duration = duration

                    merged_atomic = atomic.merge_atomic(itr["atomic_actions"])
                    for key, value in merged_atomic.items():
                        duration = value["duration"]
                        count = value["count"]
                        if key not in atomics or count > atomics[key]["count"]:
                            atomics[key] = {
                                "min_duration": duration,
                                "max_duration": duration,
                                "count": count
                            }
                        elif count == atomics[key]["count"]:
                            if duration < atomics[key]["min_duration"]:
                                atomics[key]["min_duration"] = duration
                            if duration > atomics[key]["max_duration"]:
                                atomics[key]["max_duration"] = duration

                durations_stat = charts.MainStatsTable({
                    "total_iteration_count":
                    iter_count,
                    "statistics": {
                        "atomics": atomics
                    }
                })

                for itr in result["result"]:
                    durations_stat.add_iteration(itr)

                updated_at = dt.datetime.strptime(result["created_at"],
                                                  "%Y-%m-%dT%H:%M:%S")
                updated_at += dt.timedelta(seconds=result["full_duration"])
                updated_at = updated_at.strftime(consts.TimeFormat.ISO8601)
                pass_sla = all(s.get("success") for s in result["sla"])
                workload = {
                    "name": result["key"]["name"],
                    "position": result["key"]["pos"],
                    "description": result["key"].get("description", ""),
                    "full_duration": result["full_duration"],
                    "load_duration": result["load_duration"],
                    "total_iteration_count": iter_count,
                    "failed_iteration_count": failed_iter_count,
                    "min_duration": min_duration,
                    "max_duration": max_duration,
                    "start_time": start_time,
                    "created_at": result["created_at"],
                    "updated_at": updated_at,
                    "args": result["key"]["kw"]["args"],
                    "runner": result["key"]["kw"]["runner"],
                    "hooks": [{
                        "config": h
                    } for h in result["key"]["kw"]["hooks"]],
                    "sla": result["key"]["kw"]["sla"],
                    "sla_results": {
                        "sla": result["sla"]
                    },
                    "pass_sla": pass_sla,
                    "context": result["key"]["kw"]["context"],
                    "data": sorted(result["result"],
                                   key=lambda x: x["timestamp"]),
                    "statistics": {
                        "durations": durations_stat.to_dict(),
                        "atomics": atomics
                    },
                }
                task["subtasks"].append({"workloads": [workload]})
            return task
        else:
            raise FailedToLoadResults(source=task_id, msg="Wrong format")
Example #42
0
    def _load_and_validate_task(self, api, task_file, args_file=None,
                                raw_args=None):
        """Load, render and validate tasks template from file with passed args.

        :param task_file: Path to file with input task
        :param raw_args: JSON or YAML representation of dict with args that
            will be used to render input task with jinja2
        :param args_file: Path to file with JSON or YAML representation
            of dict, that will be used to render input with jinja2. If both
            specified task_args and task_args_file they will be merged.
            raw_args has bigger priority so it will update values
            from args_file.
        :returns: Str with loaded and rendered task
        """

        print(cliutils.make_header("Preparing input task"))

        try:
            input_task = open(task_file).read()
        except IOError as err:
            raise FailedToLoadTask(
                source="--task",
                msg="Error reading %s: %s" % (task_file, err))

        task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./"

        task_args = {}
        if args_file:
            try:
                task_args.update(yaml.safe_load(open(args_file).read()))
            except yaml.ParserError as e:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="File '%s' has to be YAML or JSON. Details:\n\n%s"
                    % (args_file, e))
            except IOError as err:
                raise FailedToLoadTask(
                    source="--task-args-file",
                    msg="Error reading %s: %s" % (args_file, err))

        if raw_args:
            try:
                data = yaml.safe_load(raw_args)
                if isinstance(data, (six.text_type, six.string_types)):
                    raise yaml.ParserError("String '%s' doesn't look like a "
                                           "dictionary." % raw_args)
                task_args.update(data)
            except yaml.ParserError as e:
                args = [keypair.split("=", 1)
                        for keypair in raw_args.split(",")]
                if len([a for a in args if len(a) != 1]) != len(args):
                    raise FailedToLoadTask(
                        source="--task-args",
                        msg="Value has to be YAML or JSON. Details:\n\n%s" % e)
                else:
                    task_args.update(dict(args))

        try:
            rendered_task = api.task.render_template(task_template=input_task,
                                                     template_dir=task_dir,
                                                     **task_args)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Failed to render task template.\n\n%s" % e)

        print("Task is:\n%s\n" % rendered_task.strip())
        try:
            parsed_task = yaml.safe_load(rendered_task)
        except Exception as e:
            raise FailedToLoadTask(
                source="--task",
                msg="Wrong format of rendered input task. It should be YAML or"
                    " JSON. Details:\n\n%s" % e)

        print("Task syntax is correct :)")
        return parsed_task