def test_generate_xml_configs(self, auth_url):
        auth_url.return_value = "http://localhost:5000/v2/"

        # Make a dict of swift configs to verify generated values
        swift_vals = c_helper.extract_name_values(swift.get_swift_configs())

        # Make sure that all the swift configs are in core-site
        c = c_helper.generate_xml_configs({}, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        self.assertDictContainsSubset(swift_vals, properties)

        # Make sure that user values have precedence over defaults
        c = c_helper.generate_xml_configs(
            {'HDFS': {'fs.swift.service.sahara.tenant': 'fred'}},
            ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        mod_swift_vals = copy.copy(swift_vals)
        mod_swift_vals['fs.swift.service.sahara.tenant'] = 'fred'
        self.assertDictContainsSubset(mod_swift_vals, properties)

        # Make sure that swift confgs are left out if not enabled
        c = c_helper.generate_xml_configs(
            {'HDFS': {'fs.swift.service.sahara.tenant': 'fred'},
             'general': {'Enable Swift': False}},
            ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        for key in mod_swift_vals.keys():
            self.assertNotIn(key, properties)
示例#2
0
    def _extract_configs_to_extra(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        sp_slaves = utils.get_instances(cluster, "slave")

        extra = dict()

        config_master = config_slaves = ""
        if sp_master is not None:
            config_master = c_helper.generate_spark_env_configs(cluster)

        if sp_slaves is not None:
            slavenames = []
            for slave in sp_slaves:
                slavenames.append(slave.hostname())
            config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
        else:
            config_slaves = "\n"

        for ng in cluster.node_groups:
            extra[ng.id] = {
                "xml": c_helper.generate_xml_configs(ng.configuration(), ng.storage_paths(), nn.hostname(), None),
                "setup_script": c_helper.generate_hadoop_setup_script(
                    ng.storage_paths(), c_helper.extract_hadoop_environment_confs(ng.configuration())
                ),
                "sp_master": config_master,
                "sp_slaves": config_slaves,
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(cluster, CONF.enable_hypervisor_awareness)
            extra["topology_data"] = "\n".join([k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
示例#3
0
    def _add_instance_ng_related_to_extra(self, cluster, instance, extra):
        extra = extra.copy()
        ng = instance.node_group
        nn = utils.get_instance(cluster, "namenode")

        extra["xml"] = c_helper.generate_xml_configs(ng.configuration(), instance.storage_paths(), nn.hostname(), None)
        extra["setup_script"] = c_helper.generate_hadoop_setup_script(
            instance.storage_paths(), c_helper.extract_hadoop_environment_confs(ng.configuration())
        )

        return extra
示例#4
0
文件: plugin.py 项目: crobby/sahara
    def _extract_configs_to_extra(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        sp_slaves = utils.get_instances(cluster, "slave")

        extra = dict()

        config_master = config_slaves = ''
        if sp_master is not None:
            config_master = c_helper.generate_spark_env_configs(cluster)

        if sp_slaves is not None:
            slavenames = []
            for slave in sp_slaves:
                slavenames.append(slave.hostname())
            config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
        else:
            config_slaves = "\n"

        # Any node that might be used to run spark-submit will need
        # these libs for swift integration
        config_defaults = c_helper.generate_spark_executor_classpath(cluster)

        extra['job_cleanup'] = c_helper.generate_job_cleanup_config(cluster)
        for ng in cluster.node_groups:
            extra[ng.id] = {
                'xml': c_helper.generate_xml_configs(
                    ng.configuration(),
                    ng.storage_paths(),
                    nn.hostname(), None
                ),
                'setup_script': c_helper.generate_hadoop_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_hadoop_environment_confs(
                        ng.configuration())
                ),
                'sp_master': config_master,
                'sp_slaves': config_slaves,
                'sp_defaults': config_defaults
            }
            if "zeppelin" in ng.node_processes:
                extra[ng.id].update({
                    "zeppelin_setup_script":
                        c_helper.generate_zeppelin_setup_script(sp_master)})


        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
示例#5
0
    def _add_instance_ng_related_to_extra(self, cluster, instance, extra):
        extra = extra.copy()
        ng = instance.node_group
        nn = utils.get_instance(cluster, "namenode")

        extra['xml'] = c_helper.generate_xml_configs(
            ng.configuration(), instance.storage_paths(), nn.hostname(), None)
        extra['setup_script'] = c_helper.generate_hadoop_setup_script(
            instance.storage_paths(),
            c_helper.extract_hadoop_environment_confs(ng.configuration()))

        return extra
示例#6
0
    def test_generate_xml_configs(self, auth_url):
        auth_url.return_value = "http://localhost:5000/v2/"

        # Make a dict of swift configs to verify generated values
        swift_vals = c_helper.extract_name_values(swift.get_swift_configs())

        # Make sure that all the swift configs are in core-site
        c = c_helper.generate_xml_configs({}, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        self.assertDictContainsSubset(swift_vals, properties)

        # Make sure that user values have precedence over defaults
        c = c_helper.generate_xml_configs(
            {'HDFS': {
                'fs.swift.service.sahara.tenant': 'fred'
            }}, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        mod_swift_vals = copy.copy(swift_vals)
        mod_swift_vals['fs.swift.service.sahara.tenant'] = 'fred'
        self.assertDictContainsSubset(mod_swift_vals, properties)

        # Make sure that swift confgs are left out if not enabled
        c = c_helper.generate_xml_configs(
            {
                'HDFS': {
                    'fs.swift.service.sahara.tenant': 'fred'
                },
                'general': {
                    'Enable Swift': False
                }
            }, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        for key in mod_swift_vals.keys():
            self.assertNotIn(key, properties)
示例#7
0
    def _extract_configs_to_extra(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        sp_slaves = utils.get_instances(cluster, "slave")

        extra = dict()

        config_master = config_slaves = ''
        if sp_master is not None:
            config_master = c_helper.generate_spark_env_configs(cluster)

        if sp_slaves is not None:
            slavenames = []
            for slave in sp_slaves:
                slavenames.append(slave.hostname())
            config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
        else:
            config_slaves = "\n"

        # Any node that might be used to run spark-submit will need
        # these libs for swift integration
        config_defaults = c_helper.generate_spark_executor_classpath(cluster)

        extra['job_cleanup'] = c_helper.generate_job_cleanup_config(cluster)
        for ng in cluster.node_groups:
            extra[ng.id] = {
                'xml':
                c_helper.generate_xml_configs(ng.configuration(),
                                              ng.storage_paths(),
                                              nn.hostname(), None),
                'setup_script':
                c_helper.generate_hadoop_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_hadoop_environment_confs(
                        ng.configuration())),
                'sp_master':
                config_master,
                'sp_slaves':
                config_slaves,
                'sp_defaults':
                config_defaults
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
示例#8
0
    def test_generate_xml_configs(self, auth_url):
        auth_url.return_value = "http://localhost:5000/v2/"

        # Make a dict of swift configs to verify generated values
        swift_vals = c_helper.extract_name_values(swift.get_swift_configs())

        # Make sure that all the swift configs are in core-site
        c = c_helper.generate_xml_configs({}, ["/mnt/one"], "localhost", None)
        doc = xml.parseString(c["core-site"])
        configuration = doc.getElementsByTagName("configuration")
        properties = xmlutils.get_property_dict(configuration[0])
        self.assertDictContainsSubset(swift_vals, properties)

        # Make sure that user values have precedence over defaults
        c = c_helper.generate_xml_configs(
            {"HDFS": {"fs.swift.service.sahara.tenant": "fred"}}, ["/mnt/one"], "localhost", None
        )
        doc = xml.parseString(c["core-site"])
        configuration = doc.getElementsByTagName("configuration")
        properties = xmlutils.get_property_dict(configuration[0])
        mod_swift_vals = copy.copy(swift_vals)
        mod_swift_vals["fs.swift.service.sahara.tenant"] = "fred"
        self.assertDictContainsSubset(mod_swift_vals, properties)

        # Make sure that swift confgs are left out if not enabled
        c = c_helper.generate_xml_configs(
            {"HDFS": {"fs.swift.service.sahara.tenant": "fred"}, "general": {"Enable Swift": False}},
            ["/mnt/one"],
            "localhost",
            None,
        )
        doc = xml.parseString(c["core-site"])
        configuration = doc.getElementsByTagName("configuration")
        properties = xmlutils.get_property_dict(configuration[0])
        for key in mod_swift_vals.keys():
            self.assertNotIn(key, properties)
示例#9
0
    def _extract_configs_to_extra(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        sp_slaves = utils.get_instances(cluster, "slave")

        extra = dict()

        config_master = config_slaves = ''
        if sp_master is not None:
            config_master = c_helper.generate_spark_env_configs(cluster)

        if sp_slaves is not None:
            slavenames = []
            for slave in sp_slaves:
                slavenames.append(slave.hostname())
            config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
        else:
            config_slaves = "\n"

        for ng in cluster.node_groups:
            extra[ng.id] = {
                'xml':
                c_helper.generate_xml_configs(
                    ng.configuration(),
                    ng.storage_paths(),
                    nn.hostname(),
                    None,
                ),
                'setup_script':
                c_helper.generate_hadoop_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_hadoop_environment_confs(
                        ng.configuration())),
                'sp_master':
                config_master,
                'sp_slaves':
                config_slaves
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra