예제 #1
0
def _configure_swift(client, cluster):
    swift_enable = c_helper.get_config_value(
        cluster.cluster_configs.get('general'), c_helper.ENABLE_SWIFT)
    if swift_enable:
        swift_configs = swift.get_swift_configs()
        for conf in swift_configs:
            client.params.hadoop.add(conf['name'], conf['value'])
예제 #2
0
    def test_generate_xml_configs(self, auth_url):
        auth_url.return_value = "http://localhost:5000/v2/"

        # Make a dict of swift configs to verify generated values
        swift_vals = c_helper.extract_name_values(swift.get_swift_configs())

        # Make sure that all the swift configs are in core-site
        c = c_helper.generate_xml_configs({}, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        self.assertDictContainsSubset(swift_vals, properties)

        # Make sure that user values have precedence over defaults
        c = c_helper.generate_xml_configs(
            {'HDFS': {'fs.swift.service.sahara.tenant': 'fred'}},
            ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        mod_swift_vals = copy.copy(swift_vals)
        mod_swift_vals['fs.swift.service.sahara.tenant'] = 'fred'
        self.assertDictContainsSubset(mod_swift_vals, properties)

        # Make sure that swift confgs are left out if not enabled
        c = c_helper.generate_xml_configs(
            {'HDFS': {'fs.swift.service.sahara.tenant': 'fred'},
             'general': {'Enable Swift': False}},
            ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        for key in mod_swift_vals.keys():
            self.assertNotIn(key, properties)
예제 #3
0
def _configure_swift(client, cluster):
    swift_enable = c_helper.get_config_value(
        cluster.cluster_configs.get('general'), c_helper.ENABLE_SWIFT)
    if swift_enable:
        swift_configs = swift.get_swift_configs()
        for conf in swift_configs:
            client.params.hadoop.add(conf['name'], conf['value'])
예제 #4
0
    def test_get_swift_configs(self):
        self.setup_context(tenant_name='test_tenant',
                           auth_uri='http://localhost:8080/v2.0/')
        self.override_config("os_region_name", 'regionOne')

        result = h.get_swift_configs()
        self.assertEqual(8, len(result))
        self.assertIn(
            {
                'name': "fs.swift.service.sahara.tenant",
                'value': 'test_tenant',
                'description': ''
            }, result)
        self.assertIn(
            {
                'name': "fs.swift.service.sahara.http.port",
                'value': '8080',
                'description': ''
            }, result)
        self.assertIn(
            {
                'name': "fs.swift.service.sahara.region",
                'value': 'regionOne',
                'description': ''
            }, result)
예제 #5
0
 def _get_core_site_props(self, context):
     result = {
         'hadoop.proxyuser.mapr.groups': '*',
         'hadoop.proxyuser.mapr.hosts': '*',
     }
     for conf in swift_helper.get_swift_configs():
         result[conf['name']] = conf['value']
     return result
예제 #6
0
파일: config.py 프로젝트: hongbin/sahara
def _get_hadoop_configs(node_group):
    cluster = node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(node_group)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.get_config_value(c_helper.ENABLE_SWIFT.applicable_target,
                                 c_helper.ENABLE_SWIFT.name, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    return confs, c_helper.get_env_configs()
예제 #7
0
def _get_hadoop_configs(node_group):
    cluster = node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(node_group)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services':
            'mapreduce_shuffle',
            'yarn.resourcemanager.hostname':
            '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path':
            '%s/nm-include' % (HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path':
            '%s/nm-exclude' % (HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {'mapreduce.framework.name': 'yarn'}

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.get_config_value(c_helper.ENABLE_SWIFT.applicable_target,
                                 c_helper.ENABLE_SWIFT.name, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update(
            {"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})

    return confs, c_helper.get_env_configs()
예제 #8
0
 def _get_core_site_props(self, context):
     result = {}
     if context.is_node_aware:
         result.update(self._get_core_site_node_aware_props())
     for conf in swift_helper.get_swift_configs():
         result[conf['name']] = conf['value']
     for conf in self._get_impersonation_props():
         result[conf['name']] = conf['value']
     return result
예제 #9
0
 def _get_core_site_props(self, context):
     result = {}
     if context.is_node_aware:
         result.update(self._get_core_site_node_aware_props())
     for conf in swift_helper.get_swift_configs():
         result[conf['name']] = conf['value']
     for conf in self._get_impersonation_props():
         result[conf['name']] = conf['value']
     return result
예제 #10
0
    def test_get_swift_configs(self):
        self.setup_context(tenant_name="test_tenant", auth_uri="http://localhost:8080/v2.0/")
        self.override_config("os_region_name", "regionOne")

        result = h.get_swift_configs()
        self.assertEqual(8, len(result))
        self.assertIn({"name": "fs.swift.service.sahara.tenant", "value": "test_tenant", "description": ""}, result)
        self.assertIn({"name": "fs.swift.service.sahara.http.port", "value": "8080", "description": ""}, result)
        self.assertIn({"name": "fs.swift.service.sahara.region", "value": "regionOne", "description": ""}, result)
예제 #11
0
파일: yarn.py 프로젝트: lhcxx/sahara
 def _get_core_site_props(self, context):
     result = {
         'hadoop.proxyuser.mapr.groups': '*',
         'hadoop.proxyuser.mapr.hosts': '*',
     }
     if context.is_node_aware:
         result.update(self._get_core_site_node_aware_props())
     for conf in swift_helper.get_swift_configs():
         result[conf['name']] = conf['value']
     return result
예제 #12
0
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
    oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))

    ng_configs = node_group.configuration()

    general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)

    all_cfg = generate_sahara_configs(cluster, node_group)

    # inserting user-defined configs
    for key, value in extract_xml_confs(ng_configs):
        all_cfg[key] = value

    # applying swift configs if user enabled it
    swift_xml_confs = swift.get_swift_configs()
    all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swift_xml_confs
    mapred_all = MAPRED_DEFAULT

    if CONF.enable_data_locality:
        all_cfg.update(topology.TOPOLOGY_CONFIG)

        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()
        mapred_all += topology.vm_awareness_mapred_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(all_cfg, core_all),
        'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
        'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
    }

    if hive_hostname:
        cfg = all_cfg
        cfg_filter = HIVE_DEFAULT
        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if CONF.use_identity_api_v3 and proxy_configs:
            cfg, cfg_filter = _inject_swift_trust_info(cfg,
                                                       cfg_filter,
                                                       proxy_configs)
        xml_configs.update({'hive-site':
                            x.create_hadoop_xml(cfg, cfg_filter)})
        LOG.debug('Generated hive-site.xml for hive {host}'.format(
            host=hive_hostname))

    if oozie_hostname:
        xml_configs.update({'oozie-site':
                            x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
        LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
            host=oozie_hostname))

    return xml_configs
예제 #13
0
파일: deploy.py 프로젝트: viplav/sahara
def _configure_swift_to_inst(instance):
    cluster = instance.node_group.cluster
    with instance.remote() as r:
        r.execute_command('sudo curl %s -o %s/hadoop-openstack.jar' % (
            c_helper.get_swift_lib_url(cluster), HADOOP_LIB_DIR))
        core_site = r.read_file_from(PATH_TO_CORE_SITE_XML)
        configs = xmlutils.parse_hadoop_xml_with_name_and_value(core_site)
        configs.extend(swift_helper.get_swift_configs())
        confs = dict((c['name'], c['value']) for c in configs)
        new_core_site = xmlutils.create_hadoop_xml(confs)
        r.write_file_to(PATH_TO_CORE_SITE_XML, new_core_site, run_as_root=True)
예제 #14
0
def get_cluster_params(cluster):
    configs = _create_ambari_configs(cluster.cluster_configs,
                                     cluster.hadoop_version)
    swift_configs = {x["name"]: x["value"]
                     for x in swift_helper.get_swift_configs()}
    configs.setdefault("core-site", {})
    configs["core-site"].update(swift_configs)
    if utils.get_instance(cluster, common.RANGER_ADMIN):
        configs.setdefault("admin-properties", {})
        configs["admin-properties"]["db_root_password"] = (
            cluster.extra["ranger_db_password"])
    return _serialize_ambari_configs(configs)
예제 #15
0
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
    oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))

    ng_configs = node_group.configuration()

    general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)

    all_cfg = generate_sahara_configs(cluster, node_group)

    # inserting user-defined configs
    for key, value in extract_xml_confs(ng_configs):
        all_cfg[key] = value

    # applying swift configs if user enabled it
    swift_xml_confs = swift.get_swift_configs()
    all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swift_xml_confs
    mapred_all = MAPRED_DEFAULT

    if CONF.enable_data_locality:
        all_cfg.update(topology.TOPOLOGY_CONFIG)

        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()
        mapred_all += topology.vm_awareness_mapred_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(all_cfg, core_all),
        'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
        'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
    }

    if hive_hostname:
        cfg = all_cfg
        cfg_filter = HIVE_DEFAULT
        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if CONF.use_identity_api_v3 and proxy_configs:
            cfg, cfg_filter = _inject_swift_trust_info(cfg, cfg_filter,
                                                       proxy_configs)
        xml_configs.update({'hive-site': x.create_hadoop_xml(cfg, cfg_filter)})
        LOG.debug('Generated hive-site.xml for hive {host}'.format(
            host=hive_hostname))

    if oozie_hostname:
        xml_configs.update(
            {'oozie-site': x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
        LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
            host=oozie_hostname))

    return xml_configs
예제 #16
0
def _configure_swift_to_inst(instance):
    cluster = instance.node_group.cluster
    with instance.remote() as r:
        r.execute_command(
            'sudo curl %s -o %s/hadoop-openstack.jar' %
            (c_helper.get_swift_lib_url(cluster), HADOOP_LIB_DIR))
        core_site = r.read_file_from(PATH_TO_CORE_SITE_XML)
        configs = xmlutils.parse_hadoop_xml_with_name_and_value(core_site)
        configs.extend(swift_helper.get_swift_configs())
        confs = dict((c['name'], c['value']) for c in configs)
        new_core_site = xmlutils.create_hadoop_xml(confs)
        r.write_file_to(PATH_TO_CORE_SITE_XML, new_core_site, run_as_root=True)
예제 #17
0
def get_cluster_params(cluster):
    configs = _create_ambari_configs(cluster.cluster_configs,
                                     cluster.hadoop_version)
    swift_configs = {x["name"]: x["value"]
                     for x in swift_helper.get_swift_configs()}
    configs.setdefault("core-site", {})
    configs["core-site"].update(swift_configs)
    if utils.get_instance(cluster, common.RANGER_ADMIN):
        configs.setdefault("admin-properties", {})
        configs["admin-properties"]["db_root_password"] = (
            cluster.extra["ranger_db_password"])
    return _serialize_ambari_configs(configs)
예제 #18
0
    def test_get_swift_configs(self, authUrlConfig):
        self.setup_context(tenant_name='test_tenant')
        self.override_config("os_auth_protocol", 'http')
        self.override_config("os_auth_port", '8080')
        authUrlConfig.return_value = "http://localhost:8080/v2.0/"

        result = h.get_swift_configs()
        self.assertEqual(7, len(result))
        self.assertIn({'name': "fs.swift.service.sahara.tenant",
                       'value': 'test_tenant', 'description': ''}, result)
        self.assertIn({'name': "fs.swift.service.sahara.http.port",
                       'value': '8080', 'description': ''}, result)
예제 #19
0
    def test_get_swift_configs(self, url_for_mock):
        url_for_mock.return_value = 'http://localhost:5000/v2.0'
        self.setup_context(tenant_name='test_tenant')
        self.override_config("os_region_name", 'regionOne')

        result = h.get_swift_configs()
        self.assertEqual(8, len(result))
        self.assertIn({'name': "fs.swift.service.sahara.tenant",
                       'value': 'test_tenant', 'description': ''}, result)
        self.assertIn({'name': "fs.swift.service.sahara.http.port",
                       'value': '8080', 'description': ''}, result)
        self.assertIn({'name': "fs.swift.service.sahara.region",
                       'value': 'regionOne', 'description': ''}, result)
예제 #20
0
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
    oozie_hostname = _get_hostname(utils.get_oozie(cluster))
    hive_hostname = _get_hostname(utils.get_hiveserver(cluster))

    ng_configs = node_group.configuration()

    general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)

    all_cfg = generate_sahara_configs(cluster, node_group)

    # inserting user-defined configs
    for key, value in extract_xml_confs(ng_configs):
        all_cfg[key] = value

    # applying swift configs if user enabled it
    swift_xml_confs = swift.get_swift_configs()
    all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swift_xml_confs
    mapred_all = MAPRED_DEFAULT

    if CONF.enable_data_locality:
        all_cfg.update(topology.TOPOLOGY_CONFIG)

        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()
        mapred_all += topology.vm_awareness_mapred_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(all_cfg, core_all),
        'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
        'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
    }

    if hive_hostname:
        xml_configs.update(
            {'hive-site': x.create_hadoop_xml(all_cfg, HIVE_DEFAULT)})
        LOG.debug('Generated hive-site.xml for hive % s', hive_hostname)

    if oozie_hostname:
        xml_configs.update(
            {'oozie-site': x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
        LOG.debug('Generated oozie-site.xml for oozie % s', oozie_hostname)

    return xml_configs
예제 #21
0
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
    oozie_hostname = _get_hostname(utils.get_oozie(cluster))
    hive_hostname = _get_hostname(utils.get_hiveserver(cluster))

    ng_configs = node_group.configuration()

    general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)

    all_cfg = generate_sahara_configs(cluster, node_group)

    # inserting user-defined configs
    for key, value in extract_xml_confs(ng_configs):
        all_cfg[key] = value

    # applying swift configs if user enabled it
    swift_xml_confs = swift.get_swift_configs()
    all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swift_xml_confs
    mapred_all = MAPRED_DEFAULT

    if CONF.enable_data_locality:
        all_cfg.update(topology.TOPOLOGY_CONFIG)

        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()
        mapred_all += topology.vm_awareness_mapred_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(all_cfg, core_all),
        'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
        'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
    }

    if hive_hostname:
        xml_configs.update({'hive-site':
                            x.create_hadoop_xml(all_cfg, HIVE_DEFAULT)})
        LOG.debug('Generated hive-site.xml for hive % s', hive_hostname)

    if oozie_hostname:
        xml_configs.update({'oozie-site':
                            x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
        LOG.debug('Generated oozie-site.xml for oozie % s', oozie_hostname)

    return xml_configs
예제 #22
0
def generate_xml_configs(configs, storage_path, nn_hostname, hadoop_port):
    if hadoop_port is None:
        hadoop_port = 8020

    cfg = {
        'fs.defaultFS': 'hdfs://%s:%s' % (nn_hostname, str(hadoop_port)),
        'dfs.namenode.name.dir': extract_hadoop_path(storage_path,
                                                     '/dfs/nn'),
        'dfs.datanode.data.dir': extract_hadoop_path(storage_path,
                                                     '/dfs/dn'),
        'hadoop.tmp.dir': extract_hadoop_path(storage_path,
                                              '/dfs'),
        'dfs.hosts': '/etc/hadoop/dn.incl',
        'dfs.hosts.exclude': '/etc/hadoop/dn.excl'
    }

    # inserting user-defined configs
    for key, value in extract_hadoop_xml_confs(configs):
        cfg[key] = value

    # Add the swift defaults if they have not been set by the user
    swft_def = []
    if is_swift_enabled(configs):
        swft_def = SWIFT_DEFAULTS
        swift_configs = extract_name_values(swift.get_swift_configs())
        for key, value in six.iteritems(swift_configs):
            if key not in cfg:
                cfg[key] = value

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swft_def

    if CONF.enable_data_locality:
        cfg.update(topology.TOPOLOGY_CONFIG)
        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(cfg, core_all),
        'hdfs-site': x.create_hadoop_xml(cfg, HDFS_DEFAULT)
    }

    return xml_configs
예제 #23
0
def generate_xml_configs(configs, storage_path, nn_hostname, hadoop_port):
    if hadoop_port is None:
        hadoop_port = 8020

    cfg = {
        'fs.defaultFS': 'hdfs://%s:%s' % (nn_hostname, str(hadoop_port)),
        'dfs.namenode.name.dir': extract_hadoop_path(storage_path,
                                                     '/dfs/nn'),
        'dfs.datanode.data.dir': extract_hadoop_path(storage_path,
                                                     '/dfs/dn'),
        'hadoop.tmp.dir': extract_hadoop_path(storage_path,
                                              '/dfs'),
        'dfs.hosts': '/etc/hadoop/dn.incl',
        'dfs.hosts.exclude': '/etc/hadoop/dn.excl'
    }

    # inserting user-defined configs
    for key, value in extract_hadoop_xml_confs(configs):
        cfg[key] = value

    # Add the swift defaults if they have not been set by the user
    swft_def = []
    if is_swift_enabled(configs):
        swft_def = SWIFT_DEFAULTS
        swift_configs = extract_name_values(swift.get_swift_configs())
        for key, value in six.iteritems(swift_configs):
            if key not in cfg:
                cfg[key] = value

    # invoking applied configs to appropriate xml files
    core_all = CORE_DEFAULT + swft_def

    if CONF.enable_data_locality:
        cfg.update(topology.TOPOLOGY_CONFIG)
        # applying vm awareness configs
        core_all += topology.vm_awareness_core_config()

    xml_configs = {
        'core-site': x.create_hadoop_xml(cfg, core_all),
        'hdfs-site': x.create_hadoop_xml(cfg, HDFS_DEFAULT)
    }

    return xml_configs
예제 #24
0
    def test_get_swift_configs(self, authUrlConfig):
        self.setup_context(tenant_name='test_tenant')
        self.override_config("os_auth_protocol", 'http')
        self.override_config("os_auth_port", '8080')
        authUrlConfig.return_value = "http://localhost:8080/v2.0/"

        result = h.get_swift_configs()
        self.assertEqual(7, len(result))
        self.assertIn(
            {
                'name': "fs.swift.service.sahara.tenant",
                'value': 'test_tenant',
                'description': ''
            }, result)
        self.assertIn(
            {
                'name': "fs.swift.service.sahara.http.port",
                'value': '8080',
                'description': ''
            }, result)
예제 #25
0
def get_general_configs(hive_hostname, passwd_hive_mysql):
    config = {
        ENABLE_SWIFT.name: {
            'default_value': ENABLE_SWIFT.default_value,
            'conf': extract_name_values(swift.get_swift_configs())
        },
        ENABLE_MYSQL.name: {
            'default_value': ENABLE_MYSQL.default_value,
            'conf': m_h.get_required_mysql_configs(
                hive_hostname, passwd_hive_mysql)
        }
    }
    if CONF.enable_data_locality:
        config.update({
            ENABLE_DATA_LOCALITY.name: {
                'default_value': ENABLE_DATA_LOCALITY.default_value,
                'conf': extract_name_values(topology.vm_awareness_all_config())
            }
        })
    return config
예제 #26
0
def get_general_configs(hive_hostname, passwd_hive_mysql):
    config = {
        ENABLE_SWIFT.name: {
            'default_value': ENABLE_SWIFT.default_value,
            'conf': extract_name_values(swift.get_swift_configs())
        },
        ENABLE_MYSQL.name: {
            'default_value':
            ENABLE_MYSQL.default_value,
            'conf':
            m_h.get_required_mysql_configs(hive_hostname, passwd_hive_mysql)
        }
    }
    if CONF.enable_data_locality:
        config.update({
            ENABLE_DATA_LOCALITY.name: {
                'default_value': ENABLE_DATA_LOCALITY.default_value,
                'conf': extract_name_values(topology.vm_awareness_all_config())
            }
        })
    return config
예제 #27
0
    def test_generate_xml_configs(self, auth_url):
        auth_url.return_value = "http://localhost:5000/v2/"

        # Make a dict of swift configs to verify generated values
        swift_vals = c_helper.extract_name_values(swift.get_swift_configs())

        # Make sure that all the swift configs are in core-site
        c = c_helper.generate_xml_configs({}, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        self.assertDictContainsSubset(swift_vals, properties)

        # Make sure that user values have precedence over defaults
        c = c_helper.generate_xml_configs(
            {'HDFS': {
                'fs.swift.service.sahara.tenant': 'fred'
            }}, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        mod_swift_vals = copy.copy(swift_vals)
        mod_swift_vals['fs.swift.service.sahara.tenant'] = 'fred'
        self.assertDictContainsSubset(mod_swift_vals, properties)

        # Make sure that swift confgs are left out if not enabled
        c = c_helper.generate_xml_configs(
            {
                'HDFS': {
                    'fs.swift.service.sahara.tenant': 'fred'
                },
                'general': {
                    'Enable Swift': False
                }
            }, ['/mnt/one'], 'localhost', None)
        doc = xml.parseString(c['core-site'])
        configuration = doc.getElementsByTagName('configuration')
        properties = xmlutils.get_property_dict(configuration[0])
        for key in mod_swift_vals.keys():
            self.assertNotIn(key, properties)
예제 #28
0
    def test_generate_xml_configs(self, auth_url):
        auth_url.return_value = "http://localhost:5000/v2/"

        # Make a dict of swift configs to verify generated values
        swift_vals = c_helper.extract_name_values(swift.get_swift_configs())

        # Make sure that all the swift configs are in core-site
        c = c_helper.generate_xml_configs({}, ["/mnt/one"], "localhost", None)
        doc = xml.parseString(c["core-site"])
        configuration = doc.getElementsByTagName("configuration")
        properties = xmlutils.get_property_dict(configuration[0])
        self.assertDictContainsSubset(swift_vals, properties)

        # Make sure that user values have precedence over defaults
        c = c_helper.generate_xml_configs(
            {"HDFS": {"fs.swift.service.sahara.tenant": "fred"}}, ["/mnt/one"], "localhost", None
        )
        doc = xml.parseString(c["core-site"])
        configuration = doc.getElementsByTagName("configuration")
        properties = xmlutils.get_property_dict(configuration[0])
        mod_swift_vals = copy.copy(swift_vals)
        mod_swift_vals["fs.swift.service.sahara.tenant"] = "fred"
        self.assertDictContainsSubset(mod_swift_vals, properties)

        # Make sure that swift confgs are left out if not enabled
        c = c_helper.generate_xml_configs(
            {"HDFS": {"fs.swift.service.sahara.tenant": "fred"}, "general": {"Enable Swift": False}},
            ["/mnt/one"],
            "localhost",
            None,
        )
        doc = xml.parseString(c["core-site"])
        configuration = doc.getElementsByTagName("configuration")
        properties = xmlutils.get_property_dict(configuration[0])
        for key in mod_swift_vals.keys():
            self.assertNotIn(key, properties)
예제 #29
0
def get_swift_configs(**kwargs):
    return swift_helper.get_swift_configs()
예제 #30
0
    def _get_configs(self, service, cluster=None, instance=None):
        def get_hadoop_dirs(mount_points, suffix):
            return ','.join([x + suffix for x in mount_points])

        all_confs = {}
        if cluster:
            zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
            core_site_safety_valve = ''
            if self.pu.c_helper.is_swift_enabled(cluster):
                configs = swift_helper.get_swift_configs()
                confs = {c['name']: c['value'] for c in configs}
                core_site_safety_valve = xmlutils.create_elements_xml(confs)
            all_confs = {
                'HDFS': {
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
                    'core_site_safety_valve': core_site_safety_valve
                },
                'HIVE': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'OOZIE': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'YARN': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'HUE': {
                    'hive_service': self.HIVE_SERVICE_NAME,
                    'oozie_service': self.OOZIE_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'SPARK_ON_YARN': {
                    'yarn_service': self.YARN_SERVICE_NAME
                },
                'HBASE': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
                }
            }
            hive_confs = {
                'HIVE': {
                    'hive_metastore_database_type': 'postgresql',
                    'hive_metastore_database_host':
                        self.pu.get_manager(cluster).internal_ip,
                    'hive_metastore_database_port': '7432',
                    'hive_metastore_database_password':
                        self.pu.db_helper.get_hive_db_password(cluster)
                }
            }
            hue_confs = {
                'HUE': {
                    'hue_webhdfs':
                        self.pu.get_role_name(self.pu.get_namenode(cluster),
                                              'NAMENODE')
                }
            }

            all_confs = s_cfg.merge_configs(all_confs, hue_confs)
            all_confs = s_cfg.merge_configs(all_confs, hive_confs)
            all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs)

        if instance:
            paths = instance.storage_paths()

            instance_default_confs = {
                'NAMENODE': {
                    'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn')
                },
                'SECONDARYNAMENODE': {
                    'fs_checkpoint_dir_list':
                        get_hadoop_dirs(paths, '/fs/snn')
                },
                'DATANODE': {
                    'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'),
                    'dfs_datanode_data_dir_perm': 755,
                    'dfs_datanode_handler_count': 30
                },
                'NODEMANAGER': {
                    'yarn_nodemanager_local_dirs':
                        get_hadoop_dirs(paths, '/yarn/local')
                },
                'SERVER': {
                    'maxSessionTimeout': 60000
                }
            }

            ng_user_confs = self.pu.convert_process_configs(
                instance.node_group.node_configs)
            all_confs = s_cfg.merge_configs(all_confs, ng_user_confs)
            all_confs = s_cfg.merge_configs(all_confs, instance_default_confs)

        return all_confs.get(service, {})
예제 #31
0
 def _get_swift_properties(self):
     return h.get_swift_configs()
예제 #32
0
    def _get_configs(self, service, cluster=None, instance=None):
        def get_hadoop_dirs(mount_points, suffix):
            return ",".join([x + suffix for x in mount_points])

        all_confs = {}
        if cluster:
            zk_count = self.validator._get_inst_count(cluster, "ZOOKEEPER_SERVER")
            core_site_safety_valve = ""
            if self.pu.c_helper.is_swift_enabled(cluster):
                configs = swift_helper.get_swift_configs()
                confs = {c["name"]: c["value"] for c in configs}
                core_site_safety_valve = xmlutils.create_elements_xml(confs)
            all_confs = {
                "HDFS": {
                    "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "",
                    "core_site_safety_valve": core_site_safety_valve,
                },
                "HIVE": {
                    "mapreduce_yarn_service": self.YARN_SERVICE_NAME,
                    "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "",
                },
                "OOZIE": {
                    "mapreduce_yarn_service": self.YARN_SERVICE_NAME,
                    "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "",
                },
                "YARN": {
                    "hdfs_service": self.HDFS_SERVICE_NAME,
                    "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "",
                },
                "HUE": {
                    "hive_service": self.HIVE_SERVICE_NAME,
                    "oozie_service": self.OOZIE_SERVICE_NAME,
                    "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "",
                },
                "SPARK_ON_YARN": {"yarn_service": self.YARN_SERVICE_NAME},
                "HBASE": {"hdfs_service": self.HDFS_SERVICE_NAME, "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME},
            }
            hive_confs = {
                "HIVE": {
                    "hive_metastore_database_type": "postgresql",
                    "hive_metastore_database_host": self.pu.get_manager(cluster).internal_ip,
                    "hive_metastore_database_port": "7432",
                    "hive_metastore_database_password": self.pu.db_helper.get_hive_db_password(cluster),
                }
            }
            hue_confs = {"HUE": {"hue_webhdfs": self.pu.get_role_name(self.pu.get_namenode(cluster), "NAMENODE")}}

            all_confs = s_cfg.merge_configs(all_confs, hue_confs)
            all_confs = s_cfg.merge_configs(all_confs, hive_confs)
            all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs)

        if instance:
            paths = instance.storage_paths()

            instance_default_confs = {
                "NAMENODE": {"dfs_name_dir_list": get_hadoop_dirs(paths, "/fs/nn")},
                "SECONDARYNAMENODE": {"fs_checkpoint_dir_list": get_hadoop_dirs(paths, "/fs/snn")},
                "DATANODE": {
                    "dfs_data_dir_list": get_hadoop_dirs(paths, "/fs/dn"),
                    "dfs_datanode_data_dir_perm": 755,
                    "dfs_datanode_handler_count": 30,
                },
                "NODEMANAGER": {"yarn_nodemanager_local_dirs": get_hadoop_dirs(paths, "/yarn/local")},
                "SERVER": {"maxSessionTimeout": 60000},
            }

            ng_user_confs = self.pu.convert_process_configs(instance.node_group.node_configs)
            all_confs = s_cfg.merge_configs(all_confs, ng_user_confs)
            all_confs = s_cfg.merge_configs(all_confs, instance_default_confs)

        return all_confs.get(service, {})
예제 #33
0
파일: config.py 프로젝트: uladz/sahara
def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        "Hadoop": {"fs.defaultFS": "hdfs://%s:9000" % nn_hostname},
        "HDFS": {
            "dfs.namenode.name.dir": ",".join(dirs["hadoop_name_dirs"]),
            "dfs.datanode.data.dir": ",".join(dirs["hadoop_data_dirs"]),
            "dfs.hosts": "%s/dn-include" % HADOOP_CONF_DIR,
            "dfs.hosts.exclude": "%s/dn-exclude" % HADOOP_CONF_DIR,
        },
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs["YARN"] = {
            "yarn.nodemanager.aux-services": "mapreduce_shuffle",
            "yarn.resourcemanager.hostname": "%s" % res_hostname,
            "yarn.resourcemanager.nodes.include-path": "%s/nm-include" % (HADOOP_CONF_DIR),
            "yarn.resourcemanager.nodes.exclude-path": "%s/nm-exclude" % (HADOOP_CONF_DIR),
        }
        confs["MapReduce"] = {"mapreduce.framework.name": "yarn"}
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs["MapReduce"]["mapreduce.jobhistory.address"] = "%s:10020" % hs_hostname

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {"hadoop.proxyuser.hadoop.hosts": "*", "hadoop.proxyuser.hadoop.groups": "hadoop"}
        confs["Hadoop"].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs["JobFlow"] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config["name"]] = config["value"]

        confs["Hadoop"].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs["Hadoop"].update(th.TOPOLOGY_CONFIG)
        confs["Hadoop"].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            "hive.warehouse.subdir.inherit.perms": True,
            "javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true",
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update(
                {
                    "javax.jdo.option.ConnectionURL": "jdbc:mysql://%s/metastore" % hive_hostname,
                    "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
                    "javax.jdo.option.ConnectionUserName": "******",
                    "javax.jdo.option.ConnectionPassword": "******",
                    "datanucleus.autoCreateSchema": "false",
                    "datanucleus.fixedDatastore": "true",
                    "hive.metastore.uris": "thrift://%s:9083" % hive_hostname,
                }
            )

        proxy_configs = cluster.cluster_configs.get("proxy_configs")
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            key = key_manager.API().get(context.current(), proxy_configs["proxy_password"])
            password = key.get_encoded()
            hive_cfg.update(
                {
                    swift.HADOOP_SWIFT_USERNAME: proxy_configs["proxy_username"],
                    swift.HADOOP_SWIFT_PASSWORD: password,
                    swift.HADOOP_SWIFT_TRUST_ID: proxy_configs["proxy_trust_id"],
                    swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name,
                }
            )

        confs["Hive"] = hive_cfg

    return confs
예제 #34
0
파일: config.py 프로젝트: egafford/sahara
def _get_hadoop_configs(pctx, instance):
    cluster = instance.node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(instance)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = (
                "%s:10020" % hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            'hive.warehouse.subdir.inherit.perms': True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update({
                'javax.jdo.option.ConnectionURL':
                'jdbc:mysql://%s/metastore' % hive_hostname,
                'javax.jdo.option.ConnectionDriverName':
                'com.mysql.jdbc.Driver',
                'javax.jdo.option.ConnectionUserName': '******',
                'javax.jdo.option.ConnectionPassword': '******',
                'datanucleus.autoCreateSchema': 'false',
                'datanucleus.fixedDatastore': 'true',
                'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
            })

        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            hive_cfg.update({
                swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
                swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
                swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
                swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
            })

        confs['Hive'] = hive_cfg

    return confs
예제 #35
0
 def _get_swift_properties(self):
     return h.get_swift_configs()
예제 #36
0
 def get_swift_configs(self):
     mapper = lambda i: (i['name'], i['value'])
     file_name = '%s/core-site.xml' % self.get_hadoop_conf_dir()
     service = self.plugin_spec.get_service_for_file_name(file_name)
     data = dict(map(mapper, sh.get_swift_configs()))
     return {service: {file_name: data}}
예제 #37
0
def _get_hadoop_configs(pctx, node_group):
    cluster = node_group.cluster
    nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
    dirs = _get_hadoop_dirs(node_group)
    confs = {
        'Hadoop': {
            'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
        },
        'HDFS': {
            'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
            'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
            'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
            'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
        }
    }

    res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
    if res_hostname:
        confs['YARN'] = {
            'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
            'yarn.resourcemanager.hostname': '%s' % res_hostname,
            'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
                HADOOP_CONF_DIR),
            'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
                HADOOP_CONF_DIR)
        }
        confs['MapReduce'] = {
            'mapreduce.framework.name': 'yarn'
        }
        hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
        if hs_hostname:
            confs['MapReduce']['mapreduce.jobhistory.address'] = (
                "%s:10020" % hs_hostname)

    oozie = vu.get_oozie(cluster)
    if oozie:
        hadoop_cfg = {
            'hadoop.proxyuser.hadoop.hosts': '*',
            'hadoop.proxyuser.hadoop.groups': 'hadoop'
        }
        confs['Hadoop'].update(hadoop_cfg)

        oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
        if c_helper.is_mysql_enabled(pctx, cluster):
            oozie_cfg.update(o_helper.get_oozie_mysql_configs())

        confs['JobFlow'] = oozie_cfg

    if c_helper.is_swift_enabled(pctx, cluster):
        swift_configs = {}
        for config in swift.get_swift_configs():
            swift_configs[config['name']] = config['value']

        confs['Hadoop'].update(swift_configs)

    if c_helper.is_data_locality_enabled(pctx, cluster):
        confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
        confs['Hadoop'].update({"topology.script.file.name":
                                HADOOP_CONF_DIR + "/topology.sh"})

    hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
    if hive_hostname:
        hive_cfg = {
            'hive.warehouse.subdir.inherit.perms': True,
            'javax.jdo.option.ConnectionURL':
            'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
        }

        if c_helper.is_mysql_enabled(pctx, cluster):
            hive_cfg.update({
                'javax.jdo.option.ConnectionURL':
                'jdbc:mysql://%s/metastore' % hive_hostname,
                'javax.jdo.option.ConnectionDriverName':
                'com.mysql.jdbc.Driver',
                'javax.jdo.option.ConnectionUserName': '******',
                'javax.jdo.option.ConnectionPassword': '******',
                'datanucleus.autoCreateSchema': 'false',
                'datanucleus.fixedDatastore': 'true',
                'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
            })

        proxy_configs = cluster.cluster_configs.get('proxy_configs')
        if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
            hive_cfg.update({
                swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
                swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
                swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
                swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
            })

        confs['Hive'] = hive_cfg

    return confs
예제 #38
0
    def _get_configs(self, service, cluster=None, node_group=None):
        def get_hadoop_dirs(mount_points, suffix):
            return ','.join([x + suffix for x in mount_points])

        all_confs = {}
        if cluster:
            zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
            hbm_count = v._get_inst_count(cluster, 'HBASE_MASTER')
            snt_count = v._get_inst_count(cluster, 'SENTRY_SERVER')
            ks_count = v._get_inst_count(cluster, 'KEY_VALUE_STORE_INDEXER')
            imp_count = v._get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
            hive_count = v._get_inst_count(cluster, 'HIVE_METASTORE')
            slr_count = v._get_inst_count(cluster, 'SOLR_SERVER')
            sqp_count = v._get_inst_count(cluster, 'SQOOP_SERVER')
            core_site_safety_valve = ''
            if self.pu.c_helper.is_swift_enabled(cluster):
                configs = swift_helper.get_swift_configs()
                confs = {c['name']: c['value'] for c in configs}
                core_site_safety_valve = xmlutils.create_elements_xml(confs)
            all_confs = {
                'HDFS': {
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
                    'dfs_block_local_path_access_user':
                        '******' if imp_count else '',
                    'core_site_safety_valve': core_site_safety_valve
                },
                'HIVE': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
                    'sentry_service':
                        self.SENTRY_SERVICE_NAME if snt_count else '',
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'OOZIE': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
                    'hive_service':
                        self.HIVE_SERVICE_NAME if hive_count else '',
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'YARN': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'HUE': {
                    'hive_service': self.HIVE_SERVICE_NAME,
                    'oozie_service': self.OOZIE_SERVICE_NAME,
                    'sentry_service':
                        self.SENTRY_SERVICE_NAME if snt_count else '',
                    'solr_service':
                        self.SOLR_SERVICE_NAME if slr_count else '',
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
                    'hbase_service':
                        self.HBASE_SERVICE_NAME if hbm_count else '',
                    'impala_service':
                        self.IMPALA_SERVICE_NAME if imp_count else '',
                    'sqoop_service':
                        self.SQOOP_SERVICE_NAME if sqp_count else ''
                },
                'SPARK_ON_YARN': {
                    'yarn_service': self.YARN_SERVICE_NAME
                },
                'HBASE': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME,
                    'hbase_enable_indexing': 'true' if ks_count else 'false',
                    'hbase_enable_replication':
                        'true' if ks_count else 'false'
                },
                'FLUME': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'solr_service':
                        self.SOLR_SERVICE_NAME if slr_count else '',
                    'hbase_service':
                        self.HBASE_SERVICE_NAME if hbm_count else ''
                },
                'SENTRY': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'sentry_server_config_safety_valve': (
                        c_helper.SENTRY_IMPALA_CLIENT_SAFETY_VALVE
                        if imp_count else '')
                },
                'SOLR': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
                },
                'SQOOP': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME
                },
                'KS_INDEXER': {
                    'hbase_service': self.HBASE_SERVICE_NAME,
                    'solr_service': self.SOLR_SERVICE_NAME
                },
                'IMPALA': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'hbase_service':
                        self.HBASE_SERVICE_NAME if hbm_count else '',
                    'hive_service': self.HIVE_SERVICE_NAME,
                    'sentry_service':
                        self.SENTRY_SERVICE_NAME if snt_count else '',
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                }
            }
            hive_confs = {
                'HIVE': {
                    'hive_metastore_database_type': 'postgresql',
                    'hive_metastore_database_host':
                        self.pu.get_manager(cluster).internal_ip,
                    'hive_metastore_database_port': '7432',
                    'hive_metastore_database_password':
                        self.pu.db_helper.get_hive_db_password(cluster)
                }
            }
            hue_confs = {
                'HUE': {
                    'hue_webhdfs': self.pu.get_role_name(
                        self.pu.get_namenode(cluster), 'NAMENODE')
                }
            }
            sentry_confs = {
                'SENTRY': {
                    'sentry_server_database_type': 'postgresql',
                    'sentry_server_database_host':
                        self.pu.get_manager(cluster).internal_ip,
                    'sentry_server_database_port': '7432',
                    'sentry_server_database_password':
                        self.pu.db_helper.get_sentry_db_password(cluster)
                }
            }

            all_confs = _merge_dicts(all_confs, hue_confs)
            all_confs = _merge_dicts(all_confs, hive_confs)
            all_confs = _merge_dicts(all_confs, sentry_confs)
            all_confs = _merge_dicts(all_confs, cluster.cluster_configs)

        if node_group:
            paths = node_group.storage_paths()

            ng_default_confs = {
                'NAMENODE': {
                    'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn')
                },
                'SECONDARYNAMENODE': {
                    'fs_checkpoint_dir_list':
                        get_hadoop_dirs(paths, '/fs/snn')
                },
                'DATANODE': {
                    'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'),
                    'dfs_datanode_data_dir_perm': 755,
                    'dfs_datanode_handler_count': 30
                },
                'NODEMANAGER': {
                    'yarn_nodemanager_local_dirs':
                        get_hadoop_dirs(paths, '/yarn/local')
                },
                'SERVER': {
                    'maxSessionTimeout': 60000
                },
                'HIVESERVER2': {
                    'hiveserver2_enable_impersonation':
                        'false' if snt_count else 'true',
                    'hive_hs2_config_safety_valve': (
                        c_helper.HIVE_SERVER2_SENTRY_SAFETY_VALVE
                        if snt_count else '')
                },
                'HIVEMETASTORE': {
                    'hive_metastore_config_safety_valve': (
                        c_helper.HIVE_METASTORE_SENTRY_SAFETY_VALVE
                        if snt_count else '')
                }
            }

            ng_user_confs = self.pu.convert_process_configs(
                node_group.node_configs)
            all_confs = _merge_dicts(all_confs, ng_user_confs)
            all_confs = _merge_dicts(all_confs, ng_default_confs)

        return all_confs.get(service, {})
예제 #39
0
    def _get_configs(self, service, cluster=None, node_group=None):
        def get_hadoop_dirs(mount_points, suffix):
            return ','.join([x + suffix for x in mount_points])

        all_confs = {}
        if cluster:
            zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
            core_site_safety_valve = ''
            if self.pu.c_helper.is_swift_enabled(cluster):
                configs = swift_helper.get_swift_configs()
                confs = dict((c['name'], c['value']) for c in configs)
                core_site_safety_valve = xmlutils.create_elements_xml(confs)
            all_confs = {
                'HDFS': {
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
                    'core_site_safety_valve': core_site_safety_valve
                },
                'HIVE': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'OOZIE': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'YARN': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'HUE': {
                    'hive_service': self.HIVE_SERVICE_NAME,
                    'oozie_service': self.OOZIE_SERVICE_NAME,
                    'zookeeper_service':
                        self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'SPARK_ON_YARN': {
                    'yarn_service': self.YARN_SERVICE_NAME
                },
                'HBASE': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
                }
            }
            hive_confs = {
                'HIVE': {
                    'hive_metastore_database_type': 'postgresql',
                    'hive_metastore_database_host':
                        self.pu.get_manager(cluster).internal_ip,
                    'hive_metastore_database_port': '7432',
                    'hive_metastore_database_password':
                        self.pu.db_helper.get_hive_db_password(cluster)
                }
            }
            hue_confs = {
                'HUE': {
                    'hue_webhdfs':
                        self.pu.get_role_name(self.pu.get_namenode(cluster),
                                              'NAMENODE')
                }
            }

            all_confs = _merge_dicts(all_confs, hue_confs)
            all_confs = _merge_dicts(all_confs, hive_confs)
            all_confs = _merge_dicts(all_confs, cluster.cluster_configs)

        if node_group:
            paths = node_group.storage_paths()

            ng_default_confs = {
                'NAMENODE': {
                    'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn')
                },
                'SECONDARYNAMENODE': {
                    'fs_checkpoint_dir_list':
                        get_hadoop_dirs(paths, '/fs/snn')
                },
                'DATANODE': {
                    'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'),
                    'dfs_datanode_data_dir_perm': 755,
                    'dfs_datanode_handler_count': 30
                },
                'NODEMANAGER': {
                    'yarn_nodemanager_local_dirs':
                        get_hadoop_dirs(paths, '/yarn/local')
                },
                'SERVER': {
                    'maxSessionTimeout': 60000
                }
            }

            ng_user_confs = self.pu.convert_process_configs(
                node_group.node_configs)
            all_confs = _merge_dicts(all_confs, ng_user_confs)
            all_confs = _merge_dicts(all_confs, ng_default_confs)

        return all_confs.get(service, {})
예제 #40
0
    def _get_configs(self, service, cluster=None, instance=None):
        def get_hadoop_dirs(mount_points, suffix):
            return ','.join([x + suffix for x in mount_points])

        all_confs = {}
        if cluster:
            zk_count = self.validator._get_inst_count(cluster,
                                                      'ZOOKEEPER_SERVER')
            hbm_count = self.validator._get_inst_count(cluster, 'HBASE_MASTER')
            snt_count = self.validator._get_inst_count(cluster,
                                                       'SENTRY_SERVER')
            ks_count =\
                self.validator._get_inst_count(cluster,
                                               'KEY_VALUE_STORE_INDEXER')
            kms_count = self.validator._get_inst_count(cluster, 'KMS')
            imp_count =\
                self.validator._get_inst_count(cluster,
                                               'IMPALA_CATALOGSERVER')
            hive_count = self.validator._get_inst_count(
                cluster, 'HIVE_METASTORE')
            slr_count = self.validator._get_inst_count(cluster, 'SOLR_SERVER')
            sqp_count = self.validator._get_inst_count(cluster, 'SQOOP_SERVER')
            core_site_safety_valve = ''
            if self.pu.c_helper.is_swift_enabled(cluster):
                configs = swift_helper.get_swift_configs()
                confs = {c['name']: c['value'] for c in configs}
                core_site_safety_valve = xmlutils.create_elements_xml(confs)
            all_confs = {
                'HDFS': {
                    'zookeeper_service':
                    self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
                    'dfs_block_local_path_access_user':
                    '******' if imp_count else '',
                    'kms_service':
                    self.KMS_SERVICE_NAME if kms_count else '',
                    'core_site_safety_valve':
                    core_site_safety_valve
                },
                'HIVE': {
                    'mapreduce_yarn_service':
                    self.YARN_SERVICE_NAME,
                    'sentry_service':
                    self.SENTRY_SERVICE_NAME if snt_count else '',
                    'zookeeper_service':
                    self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'OOZIE': {
                    'mapreduce_yarn_service':
                    self.YARN_SERVICE_NAME,
                    'hive_service':
                    self.HIVE_SERVICE_NAME if hive_count else '',
                    'zookeeper_service':
                    self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'YARN': {
                    'hdfs_service':
                    self.HDFS_SERVICE_NAME,
                    'zookeeper_service':
                    self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                },
                'HUE': {
                    'hive_service':
                    self.HIVE_SERVICE_NAME,
                    'oozie_service':
                    self.OOZIE_SERVICE_NAME,
                    'sentry_service':
                    self.SENTRY_SERVICE_NAME if snt_count else '',
                    'solr_service':
                    self.SOLR_SERVICE_NAME if slr_count else '',
                    'zookeeper_service':
                    self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
                    'hbase_service':
                    self.HBASE_SERVICE_NAME if hbm_count else '',
                    'impala_service':
                    self.IMPALA_SERVICE_NAME if imp_count else '',
                    'sqoop_service':
                    self.SQOOP_SERVICE_NAME if sqp_count else ''
                },
                'SPARK_ON_YARN': {
                    'yarn_service': self.YARN_SERVICE_NAME
                },
                'HBASE': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME,
                    'hbase_enable_indexing': 'true' if ks_count else 'false',
                    'hbase_enable_replication': 'true' if ks_count else 'false'
                },
                'FLUME': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'solr_service':
                    self.SOLR_SERVICE_NAME if slr_count else '',
                    'hbase_service':
                    self.HBASE_SERVICE_NAME if hbm_count else ''
                },
                'SENTRY': {
                    'hdfs_service':
                    self.HDFS_SERVICE_NAME,
                    'sentry_server_config_safety_valve':
                    (c_helper.SENTRY_IMPALA_CLIENT_SAFETY_VALVE
                     if imp_count else '')
                },
                'SOLR': {
                    'hdfs_service': self.HDFS_SERVICE_NAME,
                    'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
                },
                'SQOOP': {
                    'mapreduce_yarn_service': self.YARN_SERVICE_NAME
                },
                'KS_INDEXER': {
                    'hbase_service': self.HBASE_SERVICE_NAME,
                    'solr_service': self.SOLR_SERVICE_NAME
                },
                'IMPALA': {
                    'hdfs_service':
                    self.HDFS_SERVICE_NAME,
                    'hbase_service':
                    self.HBASE_SERVICE_NAME if hbm_count else '',
                    'hive_service':
                    self.HIVE_SERVICE_NAME,
                    'sentry_service':
                    self.SENTRY_SERVICE_NAME if snt_count else '',
                    'zookeeper_service':
                    self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
                }
            }
            hive_confs = {
                'HIVE': {
                    'hive_metastore_database_type':
                    'postgresql',
                    'hive_metastore_database_host':
                    self.pu.get_manager(cluster).internal_ip,
                    'hive_metastore_database_port':
                    '7432',
                    'hive_metastore_database_password':
                    self.pu.db_helper.get_hive_db_password(cluster)
                }
            }
            hue_confs = {
                'HUE': {
                    'hue_webhdfs':
                    self.pu.get_role_name(self.pu.get_namenode(cluster),
                                          'NAMENODE')
                }
            }
            sentry_confs = {
                'SENTRY': {
                    'sentry_server_database_type':
                    'postgresql',
                    'sentry_server_database_host':
                    self.pu.get_manager(cluster).internal_ip,
                    'sentry_server_database_port':
                    '7432',
                    'sentry_server_database_password':
                    self.pu.db_helper.get_sentry_db_password(cluster)
                }
            }

            all_confs = s_cfg.merge_configs(all_confs, hue_confs)
            all_confs = s_cfg.merge_configs(all_confs, hive_confs)
            all_confs = s_cfg.merge_configs(all_confs, sentry_confs)
            all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs)

        if instance:
            snt_count = self.validator._get_inst_count(instance.cluster,
                                                       'SENTRY_SERVER')
            paths = instance.storage_paths()

            instance_default_confs = {
                'NAMENODE': {
                    'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn')
                },
                'SECONDARYNAMENODE': {
                    'fs_checkpoint_dir_list':
                    get_hadoop_dirs(paths, '/fs/snn')
                },
                'DATANODE': {
                    'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'),
                    'dfs_datanode_data_dir_perm': 755,
                    'dfs_datanode_handler_count': 30
                },
                'NODEMANAGER': {
                    'yarn_nodemanager_local_dirs':
                    get_hadoop_dirs(paths, '/yarn/local')
                },
                'SERVER': {
                    'maxSessionTimeout': 60000
                },
                'HIVESERVER2': {
                    'hiveserver2_enable_impersonation':
                    'false' if snt_count else 'true',
                    'hive_hs2_config_safety_valve':
                    (c_helper.HIVE_SERVER2_SENTRY_SAFETY_VALVE
                     if snt_count else '')
                },
                'HIVEMETASTORE': {
                    'hive_metastore_config_safety_valve':
                    (c_helper.HIVE_METASTORE_SENTRY_SAFETY_VALVE
                     if snt_count else '')
                }
            }

            ng_user_confs = self.pu.convert_process_configs(
                instance.node_group.node_configs)
            all_confs = s_cfg.merge_configs(all_confs, ng_user_confs)
            all_confs = s_cfg.merge_configs(all_confs, instance_default_confs)

        return all_confs.get(service, {})