Esempio n. 1
0
 def test_consume_results_sla_failure_continue(self, mock_sla):
     mock_sla_instance = mock.MagicMock()
     mock_sla.return_value = mock_sla_instance
     mock_sla_instance.add_iteration.side_effect = [
         True, True, False, False
     ]
     key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
     task = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     runner = mock.MagicMock()
     runner.result_queue = collections.deque([1, 2, 3, 4])
     is_done = mock.MagicMock()
     is_done.isSet.side_effect = [False, False, False, False, True]
     eng = engine.BenchmarkEngine(config, task, abort_on_sla_failure=False)
     eng.duration = 123
     eng.full_duration = 456
     eng.consume_results(key, task, is_done, {}, runner)
     mock_sla.assert_called_once_with({"fake": 2})
     self.assertEqual(0, runner.abort.call_count)
Esempio n. 2
0
 def test__prepare_context(self, mock_meta):
     default_context = {"a": 1, "b": 2}
     mock_meta.return_value = default_context
     task = mock.MagicMock()
     name = "a.benchmark"
     context = {"b": 3, "c": 4}
     endpoint = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     eng = engine.BenchmarkEngine(config, task)
     result = eng._prepare_context(context, name, endpoint)
     expected_context = copy.deepcopy(default_context)
     expected_context.setdefault("users", {})
     expected_context.update(context)
     expected_result = {
         "task": task,
         "admin": {
             "endpoint": endpoint
         },
         "scenario_name": name,
         "config": expected_context
     }
     self.assertEqual(result, expected_result)
     mock_meta.assert_called_once_with(name, "context")
Esempio n. 3
0
 def test__prepare_context_with_existing_users(self, mock_meta):
     mock_meta.return_value = {}
     task = mock.MagicMock()
     name = "a.benchmark"
     context = {"b": 3, "c": 4}
     endpoint = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     existing_users = [mock.MagicMock()]
     eng = engine.BenchmarkEngine(config, task, users=existing_users)
     result = eng._prepare_context(context, name, endpoint)
     expected_context = {"existing_users": existing_users}
     expected_context.update(context)
     expected_result = {
         "task": task,
         "admin": {
             "endpoint": endpoint
         },
         "scenario_name": name,
         "config": expected_context
     }
     self.assertEqual(result, expected_result)
     mock_meta.assert_called_once_with(name, "context")
Esempio n. 4
0
 def test_consume_results(self, mock_sla):
     mock_sla_instance = mock.MagicMock()
     mock_sla.return_value = mock_sla_instance
     key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
     task = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     runner = mock.MagicMock()
     runner.result_queue = collections.deque([1, 2])
     is_done = mock.MagicMock()
     is_done.isSet.side_effect = [False, False, True]
     eng = engine.BenchmarkEngine(config, task)
     eng.duration = 123
     eng.full_duration = 456
     eng.consume_results(key, task, is_done, {}, runner)
     mock_sla.assert_called_once_with({"fake": 2})
     expected_iteration_calls = [mock.call(1), mock.call(2)]
     self.assertEqual(expected_iteration_calls,
                      mock_sla_instance.add_iteration.mock_calls)
Esempio n. 5
0
    def test_run_exception_is_logged(self, mock_ctx_setup, mock_ctx_cleanup,
                                     mock_runner, mock_scenario, mock_consume,
                                     mock_log):

        mock_ctx_setup.side_effect = Exception

        config = {
            "a.benchmark": [{
                "context": {
                    "context_a": {
                        "a": 1
                    }
                }
            }],
            "b.benchmark": [{
                "context": {
                    "context_b": {
                        "b": 2
                    }
                }
            }]
        }
        task = mock.MagicMock()
        eng = engine.BenchmarkEngine(config, task)
        eng.run()

        self.assertEqual(2, mock_log.exception.call_count)
Esempio n. 6
0
    def test_schema_is_valid(self,
            mock_benchmark_engine__validate_config_semantic):
        scenarios = set()

        for dirname, dirnames, filenames in os.walk(self.samples_path):
            for filename in filenames:
                full_path = os.path.join(dirname, filename)

                # NOTE(hughsaunders): Skip non config files
                # (bug https://bugs.launchpad.net/rally/+bug/1314369)
                if not re.search("\.(ya?ml|json)$", filename, flags=re.I):
                    continue

                with open(full_path) as task_file:
                    try:
                        task_config = yaml.safe_load(task_file.read())
                        eng = engine.BenchmarkEngine(task_config,
                                                     mock.MagicMock())
                        eng.validate()
                    except Exception:
                        print(traceback.format_exc())
                        self.assertTrue(False,
                                        "Wrong task config %s" % full_path)
                    else:
                        scenarios.update(task_config.keys())

        # TODO(boris-42): We should refactor scenarios framework add "_" to
        #                 all non-benchmark methods.. Then this test will pass.
        missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios
        # check missing scenario is not from plugin
        missing = [scenario for scenario in list(missing) if
                   base.Scenario.get_by_name(scenario.split(".")[0]).
                   __module__.startswith("rally")]
        self.assertEqual(missing, [],
                         "These scenarios don't have samples: %s" % missing)
Esempio n. 7
0
    def test__validate_config_semantic(self, mock_deployment_get, mock_helper,
                                       mock_userctx, mock_osclients):
        mock_userctx.UserGenerator = fakes.FakeUserContext
        mock_osclients.return_value = mock.MagicMock()
        config = {
            "a": [mock.MagicMock(), mock.MagicMock()],
            "b": [mock.MagicMock()]
        }

        fake_task = mock.MagicMock()
        eng = engine.BenchmarkEngine(config, fake_task)

        eng.admin = "admin"

        eng._validate_config_semantic(config)

        expected_calls = [
            mock.call("admin"),
            mock.call(fakes.FakeUserContext.user["endpoint"])
        ]
        mock_osclients.assert_has_calls(expected_calls)

        mock_deployment_get.assert_called_once_with(fake_task["uuid"])

        admin = user = mock_osclients.return_value
        fake_deployment = mock_deployment_get.return_value
        expected_calls = [
            mock.call(admin, user, "a", 0, fake_deployment, config["a"][0]),
            mock.call(admin, user, "a", 1, fake_deployment, config["a"][1]),
            mock.call(admin, user, "b", 0, fake_deployment, config["b"][0])
        ]
        mock_helper.assert_has_calls(expected_calls, any_order=True)
Esempio n. 8
0
def start_task(deploy_uuid, config, task=None):
    """Start a task.

    Task is a list of benchmarks that will be called one by one, results of
    execution will be stored in DB.

    :param deploy_uuid: UUID of the deployment
    :param config: a dict with a task configuration
    """
    deployment = objects.Deployment.get(deploy_uuid)
    task = task or objects.Task(deployment_uuid=deploy_uuid)
    LOG.info("Benchmark Task %s on Deployment %s" %
             (task['uuid'], deployment['uuid']))
    benchmark_engine = engine.BenchmarkEngine(config, task)
    endpoint = deployment['endpoints']

    try:
        benchmark_engine.bind(endpoint)
        benchmark_engine.validate()
        benchmark_engine.run()
    except exceptions.InvalidTaskException:
        # NOTE(boris-42): We don't log anything, because it's normal situation
        #                 that user put wrong config.
        pass
    except Exception:
        deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT)
        raise
Esempio n. 9
0
    def test_schema_is_valid(self, mock_semantic):
        samples_path =  os.path.join(os.path.dirname(__file__), "..", "..",
                                     "doc",  "samples", "tasks")

        scenarios = set()

        for dirname, dirnames, filenames in os.walk(samples_path):
            for filename in filenames:
                full_path = os.path.join(dirname, filename)

                with open(full_path) as task_file:
                    try:
                        task_config = yaml.safe_load(task_file.read())
                        eng = engine.BenchmarkEngine(task_config,
                                                     mock.MagicMock())
                        eng.validate()
                    except Exception:
                        print(traceback.format_exc())
                        self.assertTrue(False,
                                        "Wrong task config %s" % full_path)
                    else:
                        scenarios.update(task_config.keys())

        # TODO(boris-42): We should refactor scenarios framework add "_" to
        #                 all non-benchmark methods.. Then this test will pass.
        missing = set(base.Scenario.list_benchmark_scenarios()) - scenarios
        self.assertEqual(missing, set([]),
                         "These scenarios don't have samples: %s" % missing)
Esempio n. 10
0
    def test_schema_is_valid(
            self, mock_benchmark_engine__validate_config_semantic):
        discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))

        for filename in ["rally.yaml", "rally-neutron.yaml",
                         "rally-zaqar.yaml", "rally-designate.yaml"]:
            full_path = os.path.join(self.rally_jobs_path, filename)

            with open(full_path) as task_file:
                try:
                    args_file = os.path.join(
                        self.rally_jobs_path,
                        filename.rsplit(".", 1)[0] + "_args.yaml")

                    args = {}
                    if os.path.exists(args_file):
                        args = yaml.safe_load(open(args_file).read())
                        if not isinstance(args, dict):
                            raise TypeError(
                                "args file %s must be dict in yaml or json "
                                "presenatation" % args_file)

                    task = api.Task.render_template(task_file.read(), **args)
                    task = yaml.safe_load(task)

                    eng = engine.BenchmarkEngine(task, mock.MagicMock())
                    eng.validate()
                except Exception:
                    print(traceback.format_exc())
                    self.fail("Wrong task input file: %s" % full_path)
Esempio n. 11
0
    def start(cls, deployment, config, task=None, abort_on_sla_failure=False):
        """Start a task.

        Task is a list of benchmarks that will be called one by one, results of
        execution will be stored in DB.

        :param deployment: UUID or name of the deployment
        :param config: a dict with a task configuration
        :param task: Task object. If None, it will be created
        :param abort_on_sla_failure: if True, the execution of a benchmark
                                     scenario will stop when any SLA check
                                     for it fails
        """
        deployment = objects.Deployment.get(deployment)
        task = task or objects.Task(deployment_uuid=deployment["uuid"])
        LOG.info("Benchmark Task %s on Deployment %s" %
                 (task["uuid"], deployment["uuid"]))
        benchmark_engine = engine.BenchmarkEngine(
            config,
            task,
            admin=deployment["admin"],
            users=deployment["users"],
            abort_on_sla_failure=abort_on_sla_failure)

        try:
            benchmark_engine.validate()
            benchmark_engine.run()
        except exceptions.InvalidTaskException:
            # NOTE(boris-42): We don't log anything, because it's a normal
            #                 situation when a user puts a wrong config.
            pass
        except Exception:
            deployment.update_status(consts.DeployStatus.DEPLOY_INCONSISTENT)
            raise
Esempio n. 12
0
    def test__validate_config_semanitc_helper_invalid_arg(self, mock_validate):
        mock_validate.side_effect = exceptions.InvalidScenarioArgument()
        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())

        self.assertRaises(exceptions.InvalidBenchmarkConfig,
                          eng._validate_config_semantic_helper, "a", "u", "n",
                          "p", mock.MagicMock(), {})
Esempio n. 13
0
    def test_validate__wrong_scenarios_name(self, mova_validate):
        task = mock.MagicMock()
        eng = engine.BenchmarkEngine(mock.MagicMock(), task)
        eng._validate_config_scenarios_name = mock.MagicMock(
            side_effect=exceptions.NotFoundScenarios)

        self.assertRaises(exceptions.InvalidTaskException, eng.validate)
        self.assertTrue(task.set_failed.called)
Esempio n. 14
0
 def test_run__update_status(self, mock_consume):
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine([], task)
     eng.run()
     task.update_status.assert_has_calls([
         mock.call(consts.TaskStatus.RUNNING),
         mock.call(consts.TaskStatus.FINISHED)
     ])
Esempio n. 15
0
 def test__validate_config_syntax(self, mock_context, mock_runner):
     config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
     eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
     eng._validate_config_syntax(config)
     mock_runner.assert_has_calls([mock.call({}), mock.call("b")])
     mock_context.assert_has_calls(
         [mock.call("a", non_hidden=True),
          mock.call({}, non_hidden=True)])
Esempio n. 16
0
    def test__validate_config_scenarios_name_non_exsisting(
            self, mock_scenario):
        config = {"exist": [], "nonexist1": [], "nonexist2": []}
        mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"]
        eng = engine.BenchmarkEngine(config, mock.MagicMock())

        self.assertRaises(exceptions.NotFoundScenarios,
                          eng._validate_config_scenarios_name, config)
Esempio n. 17
0
 def test_run__update_status(self):
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine([], task)
     results = eng.run()
     self.assertEqual(results, {})
     task.update_status.assert_has_calls([
         mock.call(consts.TaskStatus.RUNNING),
         mock.call(consts.TaskStatus.FINISHED)
     ])
Esempio n. 18
0
 def test__validate_config_semantic_helper(self, mock_validate):
     deployment = mock.MagicMock()
     eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
     eng._validate_config_semantic_helper("admin", "user", "name", "pos",
                                          deployment, {"args": "args"})
     mock_validate.assert_called_once_with("name", {"args": "args"},
                                           admin="admin",
                                           users=["user"],
                                           deployment=deployment)
Esempio n. 19
0
    def test__validate_config_syntax__wrong_context(self, mock_context,
                                                    mock_runner):
        config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
        eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())

        mock_context.validate = mock.MagicMock(
            side_effect=jsonschema.ValidationError("a"))
        self.assertRaises(exceptions.InvalidBenchmarkConfig,
                          eng._validate_config_syntax, config)
Esempio n. 20
0
 def test_run__update_status(self, mock_endpoint, mock_osclients,
                             mock_runner, mock_scenario, mock_setup,
                             mock_cleanup, mock_consume):
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine([], task)
     eng.run()
     task.update_status.assert_has_calls([
         mock.call(consts.TaskStatus.RUNNING),
         mock.call(consts.TaskStatus.FINISHED)
     ])
Esempio n. 21
0
def task_validate(deploy_uuid, config):
    """Validate a task config against specified deployment.

    :param deploy_uuid: UUID of the deployment
    :param config: a dict with a task configuration
    """
    deployment = objects.Deployment.get(deploy_uuid)
    task = objects.Task(deployment_uuid=deploy_uuid)
    benchmark_engine = engine.BenchmarkEngine(config, task)
    benchmark_engine.bind(admin=deployment["admin"], users=deployment["users"])
    benchmark_engine.validate()
Esempio n. 22
0
 def test_run__update_status(self, mock_scenario_runner, mock_scenario,
                             mock_context_manager_setup,
                             mock_context_manager_cleanup,
                             mock_consume_results):
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine([], task)
     eng.run()
     task.update_status.assert_has_calls([
         mock.call(consts.TaskStatus.RUNNING),
         mock.call(consts.TaskStatus.FINISHED)
     ])
Esempio n. 23
0
 def test__validate_config_syntax(self, mock_context_manager_validate,
                                  mock_scenario_runner_validate):
     config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
     eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
     eng._validate_config_syntax(config)
     mock_scenario_runner_validate.assert_has_calls(
         [mock.call({}), mock.call("b")], any_order=True)
     mock_context_manager_validate.assert_has_calls(
         [mock.call("a", non_hidden=True),
          mock.call({}, non_hidden=True)],
         any_order=True)
Esempio n. 24
0
    def validate(cls, deployment, config):
        """Validate a task config against specified deployment.

        :param deployment: UUID or name of the deployment
        :param config: a dict with a task configuration
        """
        deployment = objects.Deployment.get(deployment)
        task = objects.Task(deployment_uuid=deployment["uuid"], fake=True)
        benchmark_engine = engine.BenchmarkEngine(config,
                                                  task,
                                                  admin=deployment["admin"],
                                                  users=deployment["users"])
        benchmark_engine.validate()
Esempio n. 25
0
    def test_schema_is_valid(self, mock_validate):
        rutils.load_plugins(os.path.join(self.rally_scenarios_path, "plugins"))

        for filename in ["rally.yaml", "rally-neutron.yaml"]:
            full_path = os.path.join(self.rally_scenarios_path, filename)

            with open(full_path) as task_file:
                try:
                    task_config = yaml.safe_load(task_file.read())
                    eng = engine.BenchmarkEngine(task_config,
                                                 mock.MagicMock())
                    eng.validate()
                except Exception:
                    print(traceback.format_exc())
                    self.fail("Wrong scenario config %s" % full_path)
Esempio n. 26
0
    def test_get_user_ctx_for_validation_existing_users(self, mock_users_ctx):

        context = {"a": 10}
        users = [mock.MagicMock(), mock.MagicMock()]

        eng = engine.BenchmarkEngine(mock.MagicMock(),
                                     mock.MagicMock(),
                                     users=users)

        result = eng._get_user_ctx_for_validation(context)

        self.assertEqual(context["config"]["existing_users"], users)
        mock_users_ctx.assert_called_once_with(context)

        self.assertEqual(mock_users_ctx.return_value, result)
Esempio n. 27
0
    def test_validate(self, mock_json_validate):
        config = mock.MagicMock()
        eng = engine.BenchmarkEngine(config, mock.MagicMock())
        mock_validate = mock.MagicMock()

        eng._validate_config_scenarios_name = mock_validate.names
        eng._validate_config_syntax = mock_validate.syntax
        eng._validate_config_semantic = mock_validate.semantic

        eng.validate()

        expected_calls = [
            mock.call.names(config),
            mock.call.syntax(config),
            mock.call.semantic(config)
        ]
        mock_validate.assert_has_calls(expected_calls)
Esempio n. 28
0
 def test_consume_results(self, mock_check_all):
     key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
     task = mock.MagicMock()
     config = {
         "a.benchmark": [{
             "context": {
                 "context_a": {
                     "a": 1
                 }
             }
         }],
     }
     is_done = mock.MagicMock()
     is_done.isSet.side_effect = [False, False, True]
     eng = engine.BenchmarkEngine(config, task)
     eng.duration = 1
     eng.consume_results(key, task, collections.deque([1, 2]), is_done)
     mock_check_all.assert_called_once_with({"fake": 2}, [1, 2])
Esempio n. 29
0
 def test_run__config_has_runner(self, mock_endpoint, mock_osclients,
                                 mock_runner):
     config = {
         "a.args": [{
             "runner": {
                 "type": "a",
                 "b": 1
             }
         }],
         "b.args": [{
             "runner": {
                 "a": 1
             }
         }]
     }
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine(config, task).bind([{}])
     eng.run()
Esempio n. 30
0
 def test_run__config_has_runner(self, mock_setup, mock_cleanup,
                                 mock_runner, mock_scenario, mock_consume):
     config = {
         "a.benchmark": [{
             "runner": {
                 "type": "a",
                 "b": 1
             }
         }],
         "b.benchmark": [{
             "runner": {
                 "a": 1
             }
         }]
     }
     task = mock.MagicMock()
     eng = engine.BenchmarkEngine(config, task)
     eng.run()