def test_schema_is_valid(self,
                             mock_benchmark_engine__validate_config_semantic):
        scenarios = set()

        for dirname, dirnames, filenames in os.walk(self.samples_path):
            for filename in filenames:
                full_path = os.path.join(dirname, filename)

                # NOTE(hughsaunders): Skip non config files
                # (bug https://bugs.launchpad.net/rally/+bug/1314369)
                if not re.search("\.(ya?ml|json)$", filename, flags=re.I):
                    continue

                with open(full_path) as task_file:
                    try:
                        task_config = yaml.safe_load(api.Task.render_template
                                                     (task_file.read()))
                        eng = engine.BenchmarkEngine(task_config,
                                                     mock.MagicMock())
                        eng.validate()
                    except Exception:
                        print(traceback.format_exc())
                        self.fail("Invalid task file: %s" % full_path)
                    else:
                        scenarios.update(task_config.keys())

        missing = set(s.get_name() for s in scenario.Scenario.get_all())
        missing -= scenarios
        # check missing scenario is not from plugin
        missing = [s for s in list(missing)
                   if scenario.Scenario.get(s).__module__.startswith("rally")]
        self.assertEqual(missing, [],
                         "These scenarios don't have samples: %s" % missing)
Esempio n. 2
0
    def start(cls, deployment, config, task=None, abort_on_sla_failure=False):
        """Start a task.

        Task is a list of benchmarks that will be called one by one, results of
        execution will be stored in DB.

        :param deployment: UUID or name of the deployment
        :param config: a dict with a task configuration
        :param task: Task object. If None, it will be created
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """
        deployment = objects.Deployment.get(deployment)
        task = task or objects.Task(deployment_uuid=deployment["uuid"])

        if task.is_temporary:
            raise ValueError(
                _("Unable to run a temporary task. Please check your code."))

        LOG.info("Benchmark Task %s on Deployment %s" %
                 (task["uuid"], deployment["uuid"]))
        benchmark_engine = engine.BenchmarkEngine(
            config,
            task,
            admin=deployment["admin"],
            users=deployment["users"],
            abort_on_sla_failure=abort_on_sla_failure)

        try:
            benchmark_engine.run()
        except Exception:
            deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT)
            raise
Esempio n. 3
0
 def test__prepare_context(self, mock_scenario_get):
     default_context = {"a": 1, "b": 2}
     mock_scenario_get.return_value._meta_get.return_value = default_context
     task = mock.MagicMock()
     name = "a.benchmark"
     context = {"b": 3, "c": 4}
     endpoint = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     eng = engine.BenchmarkEngine(config, task)
     result = eng._prepare_context(context, name, endpoint)
     expected_context = copy.deepcopy(default_context)
     expected_context.setdefault("users", {})
     expected_context.update(context)
     expected_result = {
         "task": task,
         "admin": {
             "endpoint": endpoint
         },
         "scenario_name": name,
         "config": expected_context
     }
     self.assertEqual(result, expected_result)
     mock_scenario_get.assert_called_once_with(name)
     mock_scenario_get.return_value._meta_get.assert_called_once_with(
         "default_context")
Esempio n. 4
0
    def test__validate_config_semantic(
            self, mock_deployment_get,
            mock__validate_config_semantic_helper,
            mock_users_ctx, mock_clients):
        mock_users_ctx.UserGenerator = fakes.FakeUserContext
        mock_clients.return_value = mock.MagicMock()
        config = {
            "a": [mock.MagicMock(), mock.MagicMock()],
            "b": [mock.MagicMock()]
        }

        fake_task = mock.MagicMock()
        eng = engine.BenchmarkEngine(config, fake_task)

        eng.admin = "admin"

        eng._validate_config_semantic(config)

        expected_calls = [
            mock.call("admin"),
            mock.call(fakes.FakeUserContext.user["endpoint"])
        ]
        mock_clients.assert_has_calls(expected_calls)

        mock_deployment_get.assert_called_once_with(fake_task["uuid"])

        admin = user = mock_clients.return_value
        fake_deployment = mock_deployment_get.return_value
        expected_calls = [
            mock.call(admin, user, "a", 0, fake_deployment, config["a"][0]),
            mock.call(admin, user, "a", 1, fake_deployment, config["a"][1]),
            mock.call(admin, user, "b", 0, fake_deployment, config["b"][0])
        ]
        mock__validate_config_semantic_helper.assert_has_calls(
            expected_calls, any_order=True)
Esempio n. 5
0
 def test__prepare_context_with_existing_users(self, mock_scenario_get):
     mock_scenario_get.return_value._meta_get.return_value = {}
     task = mock.MagicMock()
     name = "a.benchmark"
     context = {"b": 3, "c": 4}
     endpoint = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     existing_users = [mock.MagicMock()]
     eng = engine.BenchmarkEngine(config, task, users=existing_users)
     result = eng._prepare_context(context, name, endpoint)
     expected_context = {"existing_users": existing_users}
     expected_context.update(context)
     expected_result = {
         "task": task,
         "admin": {
             "endpoint": endpoint
         },
         "scenario_name": name,
         "config": expected_context
     }
     self.assertEqual(result, expected_result)
     mock_scenario_get.assert_called_once_with(name)
     mock_scenario_get.return_value._meta_get.assert_called_once_with(
         "default_context")
Esempio n. 6
0
    def test_schema_is_valid(self,
                             mock_benchmark_engine__validate_config_semantic):
        scenarios = set()

        for dirname, dirnames, filenames in os.walk(self.samples_path):
            for filename in filenames:
                full_path = os.path.join(dirname, filename)

                # NOTE(hughsaunders): Skip non config files
                # (bug https://bugs.launchpad.net/rally/+bug/1314369)
                if not re.search("\.(ya?ml|json)$", filename, flags=re.I):
                    continue

                with open(full_path) as task_file:
                    try:
                        task_config = yaml.safe_load(task_file.read())
                        eng = engine.BenchmarkEngine(task_config,
                                                     mock.MagicMock())
                        eng.validate()
                    except Exception:
                        print(traceback.format_exc())
                        self.fail("Invalid task file: %s" % full_path)
                    else:
                        scenarios.update(task_config.keys())

        # TODO(boris-42): We should refactor scenarios framework add "_" to
        #                 all non-benchmark methods.. Then this test will pass.
        missing = set(scenario.Scenario.list_benchmark_scenarios()) - scenarios
        # check missing scenario is not from plugin
        missing = [
            s for s in list(missing) if scenario.Scenario.get_by_name(
                s.split(".")[0]).__module__.startswith("rally")
        ]
        self.assertEqual(missing, [],
                         "These scenarios don't have samples: %s" % missing)
Esempio n. 7
0
    def test_schema_is_valid(self,
                             mock_benchmark_engine__validate_config_semantic):
        discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))

        for filename in [
                "rally.yaml", "rally-neutron.yaml", "rally-zaqar.yaml",
                "rally-designate.yaml"
        ]:
            full_path = os.path.join(self.rally_jobs_path, filename)

            with open(full_path) as task_file:
                try:
                    args_file = os.path.join(
                        self.rally_jobs_path,
                        filename.rsplit(".", 1)[0] + "_args.yaml")

                    args = {}
                    if os.path.exists(args_file):
                        args = yaml.safe_load(open(args_file).read())
                        if not isinstance(args, dict):
                            raise TypeError(
                                "args file %s must be dict in yaml or json "
                                "presenatation" % args_file)

                    task = api.Task.render_template(task_file.read(), **args)
                    task = yaml.safe_load(task)

                    eng = engine.BenchmarkEngine(task, mock.MagicMock())
                    eng.validate()
                except Exception:
                    print(traceback.format_exc())
                    self.fail("Wrong task input file: %s" % full_path)
Esempio n. 8
0
    def test_run_exception_is_logged(self, mock_context_manager_setup,
                                     mock_context_manager_cleanup,
                                     mock_scenario_runner, mock_scenario,
                                     mock_result_consumer, mock_log,
                                     mock_task_get_status):

        mock_context_manager_setup.side_effect = Exception

        mock_result_consumer.is_task_in_aborting_status.return_value = False
        config = {
            "a.benchmark": [{
                "context": {
                    "context_a": {
                        "a": 1
                    }
                }
            }],
            "b.benchmark": [{
                "context": {
                    "context_b": {
                        "b": 2
                    }
                }
            }]
        }
        task = mock.MagicMock()
        eng = engine.BenchmarkEngine(config, task)

        eng.run()

        self.assertEqual(2, mock_log.exception.call_count)
Esempio n. 9
0
 def test_run__task_aborted(self, mock_scenario_runner, mock_scenario,
                            mock_context_manager_setup,
                            mock_context_manager_cleanup,
                            mock_result_consumer, mock_task_get_status):
     task = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "runner": {
                 "type": "a",
                 "b": 1
             }
         }],
         "b.benchmark": [{
             "runner": {
                 "type": "a",
                 "b": 1
             }
         }],
         "c.benchmark": [{
             "runner": {
                 "type": "a",
                 "b": 1
             }
         }]
     }
     fake_runner_cls = mock.MagicMock()
     fake_runner = mock.MagicMock()
     fake_runner_cls.return_value = fake_runner
     mock_task_get_status.return_value = consts.TaskStatus.SOFT_ABORTING
     mock_scenario_runner.get.return_value = fake_runner_cls
     eng = engine.BenchmarkEngine(config, task)
     eng.run()
     self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
                      task.update_status.mock_calls[-1])
Esempio n. 10
0
 def test_run__config_has_context(self, mock_context_manager_setup,
                                  mock_context_manager_cleanup,
                                  mock_scenario_runner, mock_scenario,
                                  mock_result_consumer,
                                  mock_task_get_status):
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
         "b.benchmark": [{
             "context": {
                 "context_b": {
                     "b": 2
                 }
             }
         }]
     }
     task = mock.MagicMock()
     mock_task_get_status.return_value = consts.TaskStatus.RUNNING
     eng = engine.BenchmarkEngine(config, task)
     eng.run()
Esempio n. 11
0
    def test__validate_config_semanitc_helper_invalid_arg(
            self, mock_scenario_validate):
        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())

        self.assertRaises(exceptions.InvalidBenchmarkConfig,
                          eng._validate_config_semantic_helper, "a", "u", "n",
                          "p", mock.MagicMock(), {})
Esempio n. 12
0
    def start(cls, deployment, config, task=None, abort_on_sla_failure=False):
        """Start a task.

        Task is a list of benchmarks that will be called one by one, results of
        execution will be stored in DB.

        :param deployment: UUID or name of the deployment
        :param config: a dict with a task configuration
        :param task: Task object. If None, it will be created
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """
        deployment = objects.Deployment.get(deployment)
        task = task or objects.Task(deployment_uuid=deployment["uuid"])
        LOG.info("Benchmark Task %s on Deployment %s" %
                 (task["uuid"], deployment["uuid"]))
        benchmark_engine = engine.BenchmarkEngine(
            config,
            task,
            admin=deployment["admin"],
            users=deployment["users"],
            abort_on_sla_failure=abort_on_sla_failure)

        try:
            benchmark_engine.validate()
            benchmark_engine.run()
        except exceptions.InvalidTaskException:
            # NOTE(boris-42): We don't log anything, because it's a normal
            #                 situation when a user puts a wrong config.
            pass
        except Exception:
            deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT)
            raise
Esempio n. 13
0
    def test__validate_config_scenarios_name_non_exsisting(
            self, mock_scenario):
        config = {"exist": [], "nonexist1": [], "nonexist2": []}
        mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"]
        eng = engine.BenchmarkEngine(config, mock.MagicMock())

        self.assertRaises(exceptions.NotFoundScenarios,
                          eng._validate_config_scenarios_name, config)
Esempio n. 14
0
 def test_init(self, mock_task_config):
     config = mock.MagicMock()
     task = mock.MagicMock()
     mock_task_config.return_value = fake_task_instance = mock.MagicMock()
     eng = engine.BenchmarkEngine(config, task)
     mock_task_config.assert_has_calls([mock.call(config)])
     self.assertEqual(eng.config, fake_task_instance)
     self.assertEqual(eng.task, task)
Esempio n. 15
0
    def test_validate__wrong_scenarios_name(self, mock_validate):
        task = mock.MagicMock()
        eng = engine.BenchmarkEngine(mock.MagicMock(), task)
        eng._validate_config_scenarios_name = mock.MagicMock(
            side_effect=exceptions.NotFoundScenarios)

        self.assertRaises(exceptions.InvalidTaskException, eng.validate)
        self.assertTrue(task.set_failed.called)
Esempio n. 16
0
 def test__validate_config_semantic_helper(self, mock_scenario_validate):
     deployment = mock.MagicMock()
     eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
     eng._validate_config_semantic_helper("admin", "user", "name", "pos",
                                          deployment, {"args": "args"})
     mock_scenario_validate.assert_called_once_with(
         "name", {"args": "args"}, admin="admin", users=["user"],
         deployment=deployment)
Esempio n. 17
0
 def test__validate_config_scenarios_name(self, mock_scenario):
     config = {
         "a": [],
         "b": []
     }
     mock_scenario.list_benchmark_scenarios.return_value = ["e", "b", "a"]
     eng = engine.BenchmarkEngine(config, mock.MagicMock())
     eng._validate_config_scenarios_name(config)
Esempio n. 18
0
    def test__validate_config_syntax__wrong_context(
            self, mock_context_manager, mock_scenario_runner_validate):
        config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())

        mock_context_manager.validate = mock.MagicMock(
            side_effect=jsonschema.ValidationError("a"))
        self.assertRaises(exceptions.InvalidBenchmarkConfig,
                          eng._validate_config_syntax, config)
Esempio n. 19
0
 def test_validate__wrong_schema(self):
     config = {
         "wrong": True
     }
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine(config, task)
     self.assertRaises(exceptions.InvalidTaskException,
                       eng.validate)
     self.assertTrue(task.set_failed.called)
Esempio n. 20
0
    def test_validate__wrong_semantic(self, mock_task_config):
        task = mock.MagicMock()
        eng = engine.BenchmarkEngine(mock.MagicMock(), task)
        eng._validate_config_scenarios_name = mock.MagicMock()
        eng._validate_config_syntax = mock.MagicMock()
        eng._validate_config_semantic = mock.MagicMock(
            side_effect=exceptions.InvalidBenchmarkConfig)

        self.assertRaises(exceptions.InvalidTaskException, eng.validate)
        self.assertTrue(task.set_failed.called)
Esempio n. 21
0
 def test_run__config_has_context(
         self, mock_context_manager_setup, mock_context_manager_cleanup,
         mock_scenario_runner, mock_scenario, mock_result_consumer):
     config = {
         "a.benchmark": [{"context": {"context_a": {"a": 1}}}],
         "b.benchmark": [{"context": {"context_b": {"b": 2}}}]
     }
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine(config, task)
     eng.run()
Esempio n. 22
0
    def test__validate_config_scenarios_name(self, mock_scenario_get_all):
        config = {"a": [], "b": []}

        mock_scenario_get_all.return_value = [
            mock.MagicMock(get_name=lambda: "e"),
            mock.MagicMock(get_name=lambda: "b"),
            mock.MagicMock(get_name=lambda: "a")
        ]
        eng = engine.BenchmarkEngine(config, mock.MagicMock())
        eng._validate_config_scenarios_name(config)
Esempio n. 23
0
 def test__validate_config_syntax(
         self, mock_context_manager_validate,
         mock_scenario_runner_validate):
     config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
     eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
     eng._validate_config_syntax(config)
     mock_scenario_runner_validate.assert_has_calls(
         [mock.call({}), mock.call("b")], any_order=True)
     mock_context_manager_validate.assert_has_calls(
         [mock.call("a", non_hidden=True), mock.call({}, non_hidden=True)],
         any_order=True)
Esempio n. 24
0
 def test_run__update_status(
         self, mock_scenario_runner, mock_scenario,
         mock_context_manager_setup, mock_context_manager_cleanup,
         mock_result_consumer):
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine([], task)
     eng.run()
     task.update_status.assert_has_calls([
         mock.call(consts.TaskStatus.RUNNING),
         mock.call(consts.TaskStatus.FINISHED)
     ])
Esempio n. 25
0
    def validate(cls, deployment, config):
        """Validate a task config against specified deployment.

        :param deployment: UUID or name of the deployment
        :param config: a dict with a task configuration
        """
        deployment = objects.Deployment.get(deployment)
        task = objects.Task(deployment_uuid=deployment["uuid"], fake=True)
        benchmark_engine = engine.BenchmarkEngine(config,
                                                  task,
                                                  admin=deployment["admin"],
                                                  users=deployment["users"])
        benchmark_engine.validate()
Esempio n. 26
0
    def test_get_user_ctx_for_validation_existing_users(
            self, mock_existing_users):

        context = {"a": 10}
        users = [mock.MagicMock(), mock.MagicMock()]

        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock(),
                                     users=users)

        result = eng._get_user_ctx_for_validation(context)

        self.assertEqual(context["config"]["existing_users"], users)
        mock_existing_users.assert_called_once_with(context)

        self.assertEqual(mock_existing_users.return_value, result)
Esempio n. 27
0
    def test__validate_config_syntax__wrong_context(
            self, mock_context_manager, mock_scenario_runner_validate,
            mock_task_config):
        mock_task_instance = mock.MagicMock()
        mock_subtask = mock.MagicMock()
        mock_subtask.scenarios = [
            {"name": "sca", "context": "a"},
            {"name": "scb", "runner": "b"}
        ]
        mock_task_instance.subtasks = [mock_subtask]
        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())

        mock_context_manager.validate = mock.MagicMock(
            side_effect=jsonschema.ValidationError("a"))
        self.assertRaises(exceptions.InvalidBenchmarkConfig,
                          eng._validate_config_syntax, mock_task_instance)
Esempio n. 28
0
    def test__validate_config_semantic(
            self, mock_deployment_get,
            mock__validate_config_semantic_helper,
            mock_users_ctx, mock_clients, mock_task_config):
        mock_users_ctx.UserGenerator = fakes.FakeUserContext
        mock_clients.return_value = mock.MagicMock()

        mock_task_instance = mock.MagicMock()
        mock_subtask1 = mock.MagicMock()
        mock_subtask1.scenarios = [
            {"name": "a", "kw": 0},
            {"name": "a", "kw": 1}
        ]

        mock_subtask2 = mock.MagicMock()
        mock_subtask2.scenarios = [
            {"name": "b", "kw": 0},
        ]

        mock_task_instance.subtasks = [mock_subtask1, mock_subtask2]
        fake_task = mock.MagicMock()
        eng = engine.BenchmarkEngine(mock_task_instance, fake_task)

        eng.admin = "admin"

        eng._validate_config_semantic(mock_task_instance)

        expected_calls = [
            mock.call("admin"),
            mock.call(fakes.FakeUserContext.user["endpoint"])
        ]
        mock_clients.assert_has_calls(expected_calls)

        mock_deployment_get.assert_called_once_with(fake_task["uuid"])

        admin = user = mock_clients.return_value
        fake_deployment = mock_deployment_get.return_value
        expected_calls = [
            mock.call(admin, user, "a", 0, fake_deployment,
                      {"name": "a", "kw": 0}),
            mock.call(admin, user, "a", 1, fake_deployment,
                      {"name": "a", "kw": 1}),
            mock.call(admin, user, "b", 0, fake_deployment,
                      {"name": "b", "kw": 0})
        ]
        mock__validate_config_semantic_helper.assert_has_calls(
            expected_calls, any_order=True)
Esempio n. 29
0
    def test__validate_config_scenarios_name_non_exsisting(
            self, mock_scenario, mock_task_config):

        mock_task_instance = mock.MagicMock()
        mock_subtask = mock.MagicMock()
        mock_subtask.scenarios = [
            {"name": "exist"},
            {"name": "nonexist1"},
            {"name": "nonexist2"}
        ]
        mock_task_instance.subtasks = [mock_subtask]
        mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"]
        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())

        self.assertRaises(exceptions.NotFoundScenarios,
                          eng._validate_config_scenarios_name,
                          mock_task_instance)
Esempio n. 30
0
    def test_validate(self, mock_validate):
        config = mock.MagicMock()
        eng = engine.BenchmarkEngine(config, mock.MagicMock())
        mock_validate = mock.MagicMock()

        eng._validate_config_scenarios_name = mock_validate.names
        eng._validate_config_syntax = mock_validate.syntax
        eng._validate_config_semantic = mock_validate.semantic

        eng.validate()

        expected_calls = [
            mock.call.names(config),
            mock.call.syntax(config),
            mock.call.semantic(config)
        ]
        mock_validate.assert_has_calls(expected_calls)