コード例 #1
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_run_job_execution(self, mock_clients):

        mock_clients("sahara").job_executions.get.side_effect = [
            mock.MagicMock(info={"status": "pending"}, id="42"),
            mock.MagicMock(info={"status": "SUCCESS"}, id="42")
        ]

        mock_clients("sahara").job_executions.create.return_value = (
            mock.MagicMock(id="42"))

        scenario = utils.SaharaScenario()
        scenario._run_job_execution(job_id="test_job_id",
                                    cluster_id="test_cluster_id",
                                    input_id="test_input_id",
                                    output_id="test_output_id",
                                    configs={"k": "v"},
                                    job_idx=0)

        mock_clients("sahara").job_executions.create.assert_called_once_with(
            job_id="test_job_id",
            cluster_id="test_cluster_id",
            input_id="test_input_id",
            output_id="test_output_id",
            configs={"k": "v"})

        je_get_expected = mock.call("42")
        mock_clients("sahara").job_executions.get.assert_has_calls(
            [je_get_expected, je_get_expected])
コード例 #2
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_run_job_execution_fail(self, mock_clients):

        mock_clients("sahara").job_executions.get.side_effect = [
            mock.MagicMock(info={"status": "pending"}, id="42"),
            mock.MagicMock(info={"status": "killed"}, id="42")
        ]

        mock_clients("sahara").job_executions.create.return_value = (
            mock.MagicMock(id="42"))

        scenario = utils.SaharaScenario()
        self.assertRaises(exceptions.RallyException,
                          scenario._run_job_execution,
                          job_id="test_job_id",
                          cluster_id="test_cluster_id",
                          input_id="test_input_id",
                          output_id="test_output_id",
                          configs={"k": "v"},
                          job_idx=0)

        mock_clients("sahara").job_executions.create.assert_called_once_with(
            job_id="test_job_id",
            cluster_id="test_cluster_id",
            input_id="test_input_id",
            output_id="test_output_id",
            configs={"k": "v"})
コード例 #3
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_list_node_group_templates(self, mock_clients):
        ngts = []
        mock_clients("sahara").node_group_templates.list.return_value = ngts

        scenario = utils.SaharaScenario()
        return_ngts_list = scenario._list_node_group_templates()

        self.assertEqual(ngts, return_ngts_list)
        self._test_atomic_action_timer(scenario.atomic_actions(),
                                       'sahara.list_node_group_templates')
コード例 #4
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_delete_node_group_templates(self, mock_clients):

        scenario = utils.SaharaScenario()
        ng = mock.MagicMock(id=42)

        scenario._delete_node_group_template(ng)

        delete_mock = mock_clients("sahara").node_group_templates.delete
        delete_mock.assert_called_once_with(42)

        self._test_atomic_action_timer(scenario.atomic_actions(),
                                       'sahara.delete_node_group_template')
コード例 #5
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_create_output_ds_swift(self, mock_clients, mock_random_name):

        ctxt = {
            "sahara_output_conf": {
                "output_type": "swift",
                "output_url_prefix": "swift://test_out/"
            }
        }

        scenario = utils.SaharaScenario(ctxt)
        self.assertRaises(exceptions.RallyException,
                          scenario._create_output_ds)
コード例 #6
0
ファイル: test_utils.py プロジェクト: x-ion-de/rally
    def test_create_node_group_templates(self, mock_clients, mock_random_name):

        scenario = utils.SaharaScenario()
        mock_processes = {
            "test_plugin": {
                "test_version": {
                    "master": ["p1"],
                    "worker": ["p2"]
                }
            }
        }

        scenario.NODE_PROCESSES = mock_processes

        scenario._create_master_node_group_template(
            flavor_id="test_flavor",
            plugin_name="test_plugin",
            hadoop_version="test_version"
        )
        scenario._create_worker_node_group_template(
            flavor_id="test_flavor",
            plugin_name="test_plugin",
            hadoop_version="test_version"
        )

        create_calls = [
            mock.call(
                name="random_name",
                plugin_name="test_plugin",
                hadoop_version="test_version",
                flavor_id="test_flavor",
                node_processes=["p1"]),
            mock.call(
                name="random_name",
                plugin_name="test_plugin",
                hadoop_version="test_version",
                flavor_id="test_flavor",
                node_processes=["p2"]
            )]
        mock_clients("sahara").node_group_templates.create.assert_has_calls(
            create_calls)

        self._test_atomic_action_timer(
            scenario.atomic_actions(),
            "sahara.create_master_node_group_template")
        self._test_atomic_action_timer(
            scenario.atomic_actions(),
            "sahara.create_worker_node_group_template")
コード例 #7
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_create_output_ds(self, mock_clients, mock_random_name):

        ctxt = {
            "sahara_output_conf": {
                "output_type": "hdfs",
                "output_url_prefix": "hdfs://test_out/"
            }
        }

        scenario = utils.SaharaScenario(ctxt)
        scenario._create_output_ds()

        mock_clients("sahara").data_sources.create.assert_called_once_with(
            name="42",
            description="",
            data_source_type="hdfs",
            url="hdfs://test_out/42")
コード例 #8
0
ファイル: test_utils.py プロジェクト: varunarya10/rally
    def test_launch_cluster_error(self, mock_constants, mock_clients,
                                  mock_random_name):

        scenario = utils.SaharaScenario(clients=mock.MagicMock())
        mock_processes = {
            "test_plugin": {
                "test_version": {
                    "master": ["p1"],
                    "worker": ["p2"]
                }
            }
        }

        mock_configs = {
            "test_plugin": {
                "test_version": {
                    "target": "HDFS",
                    "config_name": "dfs.replication"
                }
            }
        }

        mock_constants.NODE_PROCESSES = mock_processes
        mock_constants.REPLICATION_CONFIGS = mock_configs

        mock_clients("sahara").clusters.create.return_value = mock.MagicMock(
            id="test_cluster_id")

        mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
            status="error")

        self.assertRaises(
            exceptions.SaharaClusterFailure,
            scenario._launch_cluster,
            plugin_name="test_plugin",
            hadoop_version="test_version",
            flavor_id="test_flavor",
            image_id="test_image",
            floating_ip_pool="test_pool",
            volumes_per_node=5,
            volumes_size=10,
            workers_count=42,
            node_configs={"HDFS": {
                "local_config": "local_value"
            }})
コード例 #9
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_delete_cluster(self, mock_clients):

        scenario = utils.SaharaScenario()
        cluster = mock.MagicMock(id=42)
        mock_clients("sahara").clusters.get.side_effect = [
            cluster, sahara_base.APIException()
        ]

        scenario._delete_cluster(cluster)

        delete_mock = mock_clients("sahara").clusters.delete
        delete_mock.assert_called_once_with(42)

        cl_get_expected = mock.call(42)
        mock_clients("sahara").clusters.get.assert_has_calls(
            [cl_get_expected, cl_get_expected])

        self._test_atomic_action_timer(scenario.atomic_actions(),
                                       'sahara.delete_cluster')
コード例 #10
0
ファイル: sahara_cluster.py プロジェクト: esikachev/rally
    def setup(self):
        wait_dict = dict()

        for user, tenant_id in rutils.iterate_per_tenants(
                self.context["users"]):
            clients = osclients.Clients(user["endpoint"])

            image_id = self.context["tenants"][tenant_id]["sahara_image"]

            floating_ip_pool = self.config.get("floating_ip_pool")

            temporary_context = {"tenant": self.context["tenants"][tenant_id]}
            cluster = utils.SaharaScenario(
                context=temporary_context, clients=clients)._launch_cluster(
                    plugin_name=self.config["plugin_name"],
                    hadoop_version=self.config["hadoop_version"],
                    flavor_id=self.config["flavor_id"],
                    workers_count=self.config["workers_count"],
                    image_id=image_id,
                    floating_ip_pool=floating_ip_pool,
                    volumes_per_node=self.config.get("volumes_per_node"),
                    volumes_size=self.config.get("volumes_size", 1),
                    auto_security_group=self.config.get(
                        "auto_security_group", True),
                    security_groups=self.config.get("security_groups"),
                    node_configs=self.config.get("node_configs"),
                    cluster_configs=self.config.get("cluster_configs"),
                    enable_anti_affinity=self.config.get(
                        "enable_anti_affinity", False),
                    wait_active=False)

            self.context["tenants"][tenant_id]["sahara_cluster"] = cluster.id

            # Need to save the client instance to poll for active status
            wait_dict[cluster] = clients.sahara()

        bench_utils.wait_for(
            resource=wait_dict,
            update_resource=self.update_clusters_dict,
            is_ready=self.all_clusters_active,
            timeout=CONF.benchmark.cluster_create_timeout,
            check_interval=CONF.benchmark.cluster_check_interval)
コード例 #11
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_scale_cluster(self, mock_clients):

        scenario = utils.SaharaScenario()
        cluster = mock.MagicMock(id=42,
                                 node_groups=[{
                                     "name": "random_master",
                                     "count": 1
                                 }, {
                                     "name": "random_worker",
                                     "count": 41
                                 }])
        mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
            id=42, status="active")

        expected_scale_object = {
            "resize_node_groups": [{
                "name": "random_worker",
                "count": 42
            }]
        }

        scenario._scale_cluster(cluster, 1)
        mock_clients("sahara").clusters.scale.assert_called_once_with(
            42, expected_scale_object)
コード例 #12
0
ファイル: test_utils.py プロジェクト: sahanasj/rally
    def test_launch_cluster(self, mock_clients, mock_random_name):

        scenario = utils.SaharaScenario()
        mock_processes = {
            "test_plugin": {
                "test_version": {
                    "master": ["p1"],
                    "worker": ["p2"]
                }
            }
        }

        mock_configs = {
            "test_plugin": {
                "test_version": {
                    "target": "HDFS",
                    "config_name": "dfs.replication"
                }
            }
        }

        node_groups = [{
            "name": "master-ng",
            "flavor_id": "test_flavor",
            "node_processes": ["p1"],
            "floating_ip_pool": "test_pool",
            "volumes_per_node": 5,
            "volumes_size": 10,
            "count": 1,
            "node_configs": {
                "HDFS": {
                    "local_config": "local_value"
                }
            }
        }, {
            "name": "worker-ng",
            "flavor_id": "test_flavor",
            "node_processes": ["p2"],
            "floating_ip_pool": "test_pool",
            "volumes_per_node": 5,
            "volumes_size": 10,
            "count": 41,
            "node_configs": {
                "HDFS": {
                    "local_config": "local_value"
                }
            }
        }]

        scenario.NODE_PROCESSES = mock_processes
        scenario.REPLICATION_CONFIGS = mock_configs

        mock_clients("sahara").clusters.create.return_value = mock.MagicMock(
            id="test_cluster_id")

        mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
            status="active")

        scenario._launch_cluster(
            plugin_name="test_plugin",
            hadoop_version="test_version",
            flavor_id="test_flavor",
            image_id="test_image",
            floating_ip_pool="test_pool",
            volumes_per_node=5,
            volumes_size=10,
            node_count=42,
            node_configs={"HDFS": {
                "local_config": "local_value"
            }})

        mock_clients("sahara").clusters.create.assert_called_once_with(
            name="random_name",
            plugin_name="test_plugin",
            hadoop_version="test_version",
            node_groups=node_groups,
            default_image_id="test_image",
            cluster_configs={"HDFS": {
                "dfs.replication": 3
            }},
            net_id=None)

        self._test_atomic_action_timer(scenario.atomic_actions(),
                                       'sahara.launch_cluster')
コード例 #13
0
ファイル: test_utils.py プロジェクト: varunarya10/rally
    def test_launch_cluster(self, mock_constants, mock_clients,
                            mock_random_name):

        clients_values = mock.MagicMock(return_value=[consts.Service.NEUTRON])
        mock_clients.services.return_value = mock.MagicMock(
            values=clients_values)

        context = {
            "tenant": {
                "networks": [{
                    "id": "test_neutron_id",
                    "router_id": "test_router_id"
                }]
            }
        }

        scenario = utils.SaharaScenario(context=context, clients=mock_clients)

        mock_processes = {
            "test_plugin": {
                "test_version": {
                    "master": ["p1"],
                    "worker": ["p2"]
                }
            }
        }

        mock_configs = {
            "test_plugin": {
                "test_version": {
                    "target": "HDFS",
                    "config_name": "dfs.replication"
                }
            }
        }

        floating_ip_pool_uuid = uuidutils.generate_uuid()
        node_groups = [{
            "name": "master-ng",
            "flavor_id": "test_flavor",
            "node_processes": ["p1"],
            "floating_ip_pool": floating_ip_pool_uuid,
            "volumes_per_node": 5,
            "volumes_size": 10,
            "count": 1,
            "auto_security_group": True,
            "security_groups": ["g1", "g2"],
            "node_configs": {
                "HDFS": {
                    "local_config": "local_value"
                }
            },
        }, {
            "name": "worker-ng",
            "flavor_id": "test_flavor",
            "node_processes": ["p2"],
            "floating_ip_pool": floating_ip_pool_uuid,
            "volumes_per_node": 5,
            "volumes_size": 10,
            "count": 42,
            "auto_security_group": True,
            "security_groups": ["g1", "g2"],
            "node_configs": {
                "HDFS": {
                    "local_config": "local_value"
                }
            },
        }]

        mock_constants.NODE_PROCESSES = mock_processes
        mock_constants.REPLICATION_CONFIGS = mock_configs

        mock_clients("sahara").clusters.create.return_value = mock.MagicMock(
            id="test_cluster_id")

        mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
            status="active")

        scenario._launch_cluster(
            plugin_name="test_plugin",
            hadoop_version="test_version",
            flavor_id="test_flavor",
            image_id="test_image",
            floating_ip_pool=floating_ip_pool_uuid,
            volumes_per_node=5,
            volumes_size=10,
            auto_security_group=True,
            security_groups=["g1", "g2"],
            workers_count=42,
            node_configs={"HDFS": {
                "local_config": "local_value"
            }})

        mock_clients("sahara").clusters.create.assert_called_once_with(
            name="random_name",
            plugin_name="test_plugin",
            hadoop_version="test_version",
            node_groups=node_groups,
            default_image_id="test_image",
            cluster_configs={"HDFS": {
                "dfs.replication": 3
            }},
            net_id="test_neutron_id",
            anti_affinity=None)

        self._test_atomic_action_timer(scenario.atomic_actions(),
                                       "sahara.launch_cluster")
コード例 #14
0
ファイル: sahara_cluster.py プロジェクト: sahanasj/rally
    def setup(self):
        ready_tenants = set()
        wait_dict = dict()

        for user in self.context.get("users", []):
            tenant_id = user["tenant_id"]
            if tenant_id not in ready_tenants:
                ready_tenants.add(tenant_id)

                clients = osclients.Clients(user["endpoint"])

                image_id = self.context["sahara_images"][tenant_id]

                neutron_net = self.config.get("neutron_net")
                if not neutron_net:
                    # Skipping fixed network config
                    neutron_net_id = None
                else:
                    network_cfg = {"name": neutron_net}
                    neutron_net_id = (types.NeutronNetworkResourceType
                                      .transform(clients, network_cfg))

                floating_ip_pool = self.config.get("floating_ip_pool")
                if not floating_ip_pool:
                    # Skipping floating network config
                    floating_ip_pool_id = None
                else:
                    network_cfg = {"name": floating_ip_pool}
                    floating_ip_pool_id = (types.NeutronNetworkResourceType
                                           .transform(clients, network_cfg))

                cluster = utils.SaharaScenario(
                    context=self.context, clients=clients)._launch_cluster(
                        plugin_name=self.config["plugin_name"],
                        hadoop_version=self.config["hadoop_version"],
                        flavor_id=self.config["flavor_id"],
                        node_count=self.config["node_count"],
                        image_id=image_id,
                        floating_ip_pool=floating_ip_pool_id,
                        neutron_net_id=neutron_net_id,
                        volumes_per_node=self.config.get("volumes_per_node"),
                        volumes_size=self.config.get("volumes_size", 1),
                        node_configs=self.config.get("node_configs"),
                        cluster_configs=self.config.get("cluster_configs"),
                        wait_active=False)

                self.context["sahara_clusters"][tenant_id] = cluster.id

                # Need to save the client instance to poll for active status
                wait_dict[cluster.id] = clients.sahara()

        def all_active(dct):
            for cl_id, client in dct.items():
                cl = client.clusters.get(cl_id)
                if cl.status.lower() != "active":
                    return False
            return True

        bench_utils.wait_for(
            resource=wait_dict,
            is_ready=all_active,
            timeout=CONF.benchmark.cluster_create_timeout,
            check_interval=CONF.benchmark.cluster_check_interval)