def _set_config(cfg, gen_cfg, name=None): if name in gen_cfg: cfg.update(gen_cfg[name]['conf']) if name is None: for name in gen_cfg: cfg.update(gen_cfg[name]['conf']) return cfg
def get_global_parameters(config_group): cfg = {} components = list(CONF.repositories.names) paths = [] # Order does matter. At first we add global defaults. for conf_path in ("resources/defaults.yaml", "resources/globals.yaml"): paths.append(get_resource_path(conf_path)) # After we add component defaults. for component in components: paths.append(os.path.join(CONF.repositories.path, component, "service/files/defaults.yaml")) # And finaly we add cluster-wide globals conf, if provided. if CONF.deploy_config: paths.append(CONF.deploy_config) for path in paths: if os.path.isfile(path): LOG.debug("Adding parameters from \"%s\"", path) with open(path, "r") as f: cfg.update(yaml.load(f).get(config_group, {})) else: LOG.warning("\"%s\" not found, skipping", path) return cfg
def _get_config(): cfg = dict(CONF.images.items()) if CONF.registry.address: cfg['namespace'] = '%s/%s' % (CONF.registry.address, cfg['namespace']) cfg.update(utils.get_global_parameters('versions')) return cfg
def generate_sahara_configs(cluster, node_group=None): nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) jt_hostname = vu.get_instance_hostname(vu.get_jobtracker(cluster)) oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster)) hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) storage_path = node_group.storage_paths() if node_group else None # inserting common configs depends on provisioned VMs and HDFS placement # TODO(aignatov): should be moved to cluster context cfg = { 'fs.default.name': 'hdfs://%s:8020' % nn_hostname, 'dfs.name.dir': extract_hadoop_path(storage_path, '/lib/hadoop/hdfs/namenode'), 'dfs.data.dir': extract_hadoop_path(storage_path, '/lib/hadoop/hdfs/datanode'), 'dfs.hosts': '/etc/hadoop/dn.incl', 'dfs.hosts.exclude': '/etc/hadoop/dn.excl', } if jt_hostname: mr_cfg = { 'mapred.job.tracker': '%s:8021' % jt_hostname, 'mapred.system.dir': extract_hadoop_path(storage_path, '/mapred/mapredsystem'), 'mapred.local.dir': extract_hadoop_path(storage_path, '/lib/hadoop/mapred'), 'mapred.hosts': '/etc/hadoop/tt.incl', 'mapred.hosts.exclude': '/etc/hadoop/tt.excl', } cfg.update(mr_cfg) if oozie_hostname: o_cfg = { 'hadoop.proxyuser.hadoop.hosts': "localhost," + oozie_hostname, 'hadoop.proxyuser.hadoop.groups': 'hadoop', } cfg.update(o_cfg) LOG.debug('Applied Oozie configs for core-site.xml') cfg.update(o_h.get_oozie_required_xml_configs()) LOG.debug('Applied Oozie configs for oozie-site.xml') if hive_hostname: h_cfg = { 'hive.warehouse.subdir.inherit.perms': True, 'javax.jdo.option.ConnectionURL': 'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true' } cfg.update(h_cfg) LOG.debug('Applied Hive config for hive metastore server') return cfg
def _inject_swift_trust_info(cfg, cfg_filter, proxy_configs): cfg = cfg.copy() cfg.update({ swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'], swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'], swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'], swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name }) allow_swift_auth_filter = [ {'name': swift.HADOOP_SWIFT_USERNAME}, {'name': swift.HADOOP_SWIFT_PASSWORD}, {'name': swift.HADOOP_SWIFT_TRUST_ID}, {'name': swift.HADOOP_SWIFT_DOMAIN_NAME} ] cfg_filter = cfg_filter + allow_swift_auth_filter return cfg, cfg_filter
def generate_xml_configs(configs, storage_path, nn_hostname, hadoop_port): if hadoop_port is None: hadoop_port = 8020 cfg = { 'fs.defaultFS': 'hdfs://%s:%s' % (nn_hostname, str(hadoop_port)), 'dfs.namenode.name.dir': extract_hadoop_path(storage_path, '/dfs/nn'), 'dfs.datanode.data.dir': extract_hadoop_path(storage_path, '/dfs/dn'), 'hadoop.tmp.dir': extract_hadoop_path(storage_path, '/dfs'), 'dfs.hosts': '/etc/hadoop/dn.incl', 'dfs.hosts.exclude': '/etc/hadoop/dn.excl' } # inserting user-defined configs for key, value in extract_hadoop_xml_confs(configs): cfg[key] = value # Add the swift defaults if they have not been set by the user swft_def = [] if is_swift_enabled(configs): swft_def = SWIFT_DEFAULTS swift_configs = extract_name_values(swift.get_swift_configs()) for key, value in six.iteritems(swift_configs): if key not in cfg: cfg[key] = value # invoking applied configs to appropriate xml files core_all = CORE_DEFAULT + swft_def if CONF.enable_data_locality: cfg.update(topology.TOPOLOGY_CONFIG) # applying vm awareness configs core_all += topology.vm_awareness_core_config() xml_configs = { 'core-site': x.create_hadoop_xml(cfg, core_all), 'hdfs-site': x.create_hadoop_xml(cfg, HDFS_DEFAULT) } return xml_configs
def _inject_swift_trust_info(cfg, cfg_filter, proxy_configs): cfg = cfg.copy() cfg.update({ swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'], swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'], swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'], swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name }) allow_swift_auth_filter = [{ 'name': swift.HADOOP_SWIFT_USERNAME }, { 'name': swift.HADOOP_SWIFT_PASSWORD }, { 'name': swift.HADOOP_SWIFT_TRUST_ID }, { 'name': swift.HADOOP_SWIFT_DOMAIN_NAME }] cfg_filter = cfg_filter + allow_swift_auth_filter return cfg, cfg_filter