def _generate_configs(pctx, node_group): hadoop_xml_confs = _get_hadoop_configs(pctx, node_group) user_xml_confs, user_env_confs = _get_user_configs(pctx, node_group) xml_confs = s_cfg.merge_configs(user_xml_confs, hadoop_xml_confs) env_confs = s_cfg.merge_configs(pctx['env_confs'], user_env_confs) return xml_confs, env_confs
def _get_user_configs(pctx, node_group): ng_xml_confs, ng_env_confs = _separate_configs(node_group.node_configs, pctx["env_confs"]) cl_xml_confs, cl_env_confs = _separate_configs(node_group.cluster.cluster_configs, pctx["env_confs"]) xml_confs = s_cfg.merge_configs(cl_xml_confs, ng_xml_confs) env_confs = s_cfg.merge_configs(cl_env_confs, ng_env_confs) return xml_confs, env_confs
def _generate_configs(pctx, instance): hadoop_xml_confs = _get_hadoop_configs(pctx, instance) user_xml_confs, user_env_confs = _get_user_configs(pctx, instance.node_group) xml_confs = s_cfg.merge_configs(user_xml_confs, hadoop_xml_confs) env_confs = s_cfg.merge_configs(pctx["env_confs"], user_env_confs) return xml_confs, env_confs
def _generate_configs(pctx, node_group): hadoop_xml_confs = _get_hadoop_configs(pctx, node_group) user_xml_confs, user_env_confs = _get_user_configs(pctx, node_group) xml_confs = s_cfg.merge_configs(user_xml_confs, hadoop_xml_confs) env_confs = s_cfg.merge_configs(pctx['env_confs'], user_env_confs) return xml_confs, env_confs
def _get_user_configs(pctx, node_group): ng_xml_confs, ng_env_confs = _separate_configs(node_group.node_configs, pctx['env_confs']) cl_xml_confs, cl_env_confs = _separate_configs( node_group.cluster.cluster_configs, pctx['env_confs']) xml_confs = s_cfg.merge_configs(cl_xml_confs, ng_xml_confs) env_confs = s_cfg.merge_configs(cl_env_confs, ng_env_confs) return xml_confs, env_confs
def cluster_create(self, context, values): """Create a cluster from the values dictionary.""" # loading defaults merged_values = copy.deepcopy(CLUSTER_DEFAULTS) merged_values['tenant_id'] = context.tenant_id private_key, public_key = crypto.generate_key_pair() merged_values['management_private_key'] = private_key merged_values['management_public_key'] = public_key cluster_template_id = values.get('cluster_template_id') c_tmpl = None if cluster_template_id: c_tmpl = self.cluster_template_get(context, cluster_template_id) del c_tmpl['created_at'] del c_tmpl['updated_at'] del c_tmpl['id'] # updating with cluster_template values merged_values.update(c_tmpl) # updating with values provided in request merged_values.update(values) if c_tmpl: merged_values['cluster_configs'] = configs.merge_configs( c_tmpl.get('cluster_configs'), values.get('cluster_configs')) merged_values['node_groups'] = self._populate_node_groups( context, merged_values) return self.db.cluster_create(context, merged_values)
def test_merge_configs(self): a = { 'HDFS': { 'param1': 'value1', 'param2': 'value2' } } b = { 'HDFS': { 'param1': 'value3', 'param3': 'value4' }, 'YARN': { 'param5': 'value5' } } res = configs.merge_configs(a, b) expected = { 'HDFS': { 'param1': 'value3', 'param2': 'value2', 'param3': 'value4' }, 'YARN': { 'param5': 'value5' } } self.assertEqual(expected, res)
def cluster_create(self, context, values): """Create a cluster from the values dictionary.""" # loading defaults merged_values = copy.deepcopy(CLUSTER_DEFAULTS) merged_values["tenant_id"] = context.tenant_id private_key, public_key = crypto.generate_key_pair() merged_values["management_private_key"] = private_key merged_values["management_public_key"] = public_key cluster_template_id = values.get("cluster_template_id") c_tmpl = None if cluster_template_id: c_tmpl = self.cluster_template_get(context, cluster_template_id) del c_tmpl["created_at"] del c_tmpl["updated_at"] del c_tmpl["id"] # updating with cluster_template values merged_values.update(c_tmpl) # updating with values provided in request merged_values.update(values) if c_tmpl: merged_values["cluster_configs"] = configs.merge_configs( c_tmpl.get("cluster_configs"), values.get("cluster_configs") ) merged_values["node_groups"] = self._populate_node_groups(context, merged_values) return self.db.cluster_create(context, merged_values)
def _write_config_files(self, cluster_context, instances): LOG.debug('Writing config files') def get_node_groups(instances): return util.unique_list(instances, lambda i: i.node_group) for ng in get_node_groups(instances): ng_services = cluster_context.get_cluster_services(ng) ng_user_configs = ng.configuration() ng_default_configs = cluster_context.get_services_configs_dict( ng_services) ng_configs = sahara_configs.merge_configs( ng_default_configs, ng_user_configs) ng_config_files = dict() for service in ng_services: service_conf_files = service.get_config_files( cluster_context=cluster_context, configs=ng_configs[service.ui_name], instance=ng.instances[0] ) LOG.debug('Rendering {ui_name} config files'.format( ui_name=service.ui_name)) for conf_file in service_conf_files: ng_config_files.update({ conf_file.remote_path: conf_file.render() }) ng_instances = filter(lambda i: i in instances, ng.instances) self._write_ng_config_files(ng_instances, ng_config_files) LOG.debug('Config files successfully wrote')
def get_configs(node_group): services = get_node_group_services(node_group) h_version = node_group.cluster.hadoop_version v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version) default_configs = get_default_configs(v_handler, services) user_configs = node_group.configuration() return c.merge_configs(default_configs, user_configs)
def _write_config_files(self, cluster_context, instances): LOG.debug('Writing config files') def get_node_groups(instances): return util.unique_list(instances, lambda i: i.node_group) for ng in get_node_groups(instances): ng_services = cluster_context.get_cluster_services(ng) ng_user_configs = ng.configuration() ng_default_configs = cluster_context.get_services_configs_dict( ng_services) ng_configs = sahara_configs.merge_configs( ng_default_configs, ng_user_configs) ng_config_files = dict() for service in ng_services: service_conf_files = service.get_config_files( cluster_context=cluster_context, configs=ng_configs[service.ui_name], instance=ng.instances[0] ) LOG.debug('Rendering {ui_name} config files'.format( ui_name=service.ui_name)) for conf_file in service_conf_files: ng_config_files.update({ conf_file.remote_path: conf_file.render() }) ng_instances = filter(lambda i: i in instances, ng.instances) self._write_ng_config_files(ng_instances, ng_config_files) LOG.debug('Config files successfully wrote')
def _populate_node_group(self, context, node_group): node_group_merged = copy.deepcopy(NODE_GROUP_DEFAULTS) ng_tmpl_id = node_group.get('node_group_template_id') ng_tmpl = None if ng_tmpl_id: ng_tmpl = self.node_group_template_get(context, ng_tmpl_id) self._cleanup_node_group(ng_tmpl) node_group_merged.update(ng_tmpl) node_group_merged.update(node_group) if ng_tmpl: node_group_merged['node_configs'] = configs.merge_configs( ng_tmpl.get('node_configs'), node_group.get('node_configs')) return node_group_merged
def _populate_node_group(self, context, node_group): node_group_merged = copy.deepcopy(NODE_GROUP_DEFAULTS) ng_tmpl_id = node_group.get('node_group_template_id') ng_tmpl = None if ng_tmpl_id: ng_tmpl = self.node_group_template_get(context, ng_tmpl_id) self._cleanup_node_group(ng_tmpl) node_group_merged.update(ng_tmpl) node_group_merged.update(node_group) if ng_tmpl: node_group_merged['node_configs'] = configs.merge_configs( ng_tmpl.get('node_configs'), node_group.get('node_configs')) return node_group_merged
def _get_configs(self, service, cluster=None, instance=None): def get_hadoop_dirs(mount_points, suffix): return ','.join([x + suffix for x in mount_points]) all_confs = {} if cluster: zk_count = self.validator._get_inst_count(cluster, 'ZOOKEEPER_SERVER') hbm_count = self.validator._get_inst_count(cluster, 'HBASE_MASTER') snt_count = self.validator._get_inst_count(cluster, 'SENTRY_SERVER') ks_count =\ self.validator._get_inst_count(cluster, 'KEY_VALUE_STORE_INDEXER') kms_count = self.validator._get_inst_count(cluster, 'KMS') imp_count =\ self.validator._get_inst_count(cluster, 'IMPALA_CATALOGSERVER') hive_count = self.validator._get_inst_count( cluster, 'HIVE_METASTORE') slr_count = self.validator._get_inst_count(cluster, 'SOLR_SERVER') sqp_count = self.validator._get_inst_count(cluster, 'SQOOP_SERVER') core_site_safety_valve = '' if self.pu.c_helper.is_swift_enabled(cluster): configs = swift_helper.get_swift_configs() confs = {c['name']: c['value'] for c in configs} core_site_safety_valve = xmlutils.create_elements_xml(confs) all_confs = { 'HDFS': { 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '', 'dfs_block_local_path_access_user': '******' if imp_count else '', 'kms_service': self.KMS_SERVICE_NAME if kms_count else '', 'core_site_safety_valve': core_site_safety_valve }, 'HIVE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'sentry_service': self.SENTRY_SERVICE_NAME if snt_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'OOZIE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'hive_service': self.HIVE_SERVICE_NAME if hive_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'YARN': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'HUE': { 'hive_service': self.HIVE_SERVICE_NAME, 'oozie_service': self.OOZIE_SERVICE_NAME, 'sentry_service': self.SENTRY_SERVICE_NAME if snt_count else '', 'solr_service': self.SOLR_SERVICE_NAME if slr_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '', 'hbase_service': self.HBASE_SERVICE_NAME if hbm_count else '', 'impala_service': self.IMPALA_SERVICE_NAME if imp_count else '', 'sqoop_service': self.SQOOP_SERVICE_NAME if sqp_count else '' }, 'SPARK_ON_YARN': { 'yarn_service': self.YARN_SERVICE_NAME }, 'HBASE': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME, 'hbase_enable_indexing': 'true' if ks_count else 'false', 'hbase_enable_replication': 'true' if ks_count else 'false' }, 'FLUME': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'solr_service': self.SOLR_SERVICE_NAME if slr_count else '', 'hbase_service': self.HBASE_SERVICE_NAME if hbm_count else '' }, 'SENTRY': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'sentry_server_config_safety_valve': (c_helper.SENTRY_IMPALA_CLIENT_SAFETY_VALVE if imp_count else '') }, 'SOLR': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME }, 'SQOOP': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME }, 'KS_INDEXER': { 'hbase_service': self.HBASE_SERVICE_NAME, 'solr_service': self.SOLR_SERVICE_NAME }, 'IMPALA': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'hbase_service': self.HBASE_SERVICE_NAME if hbm_count else '', 'hive_service': self.HIVE_SERVICE_NAME, 'sentry_service': self.SENTRY_SERVICE_NAME if snt_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' } } hive_confs = { 'HIVE': { 'hive_metastore_database_type': 'postgresql', 'hive_metastore_database_host': self.pu.get_manager(cluster).internal_ip, 'hive_metastore_database_port': '7432', 'hive_metastore_database_password': self.pu.db_helper.get_hive_db_password(cluster) } } hue_confs = { 'HUE': { 'hue_webhdfs': self.pu.get_role_name(self.pu.get_namenode(cluster), 'NAMENODE') } } sentry_confs = { 'SENTRY': { 'sentry_server_database_type': 'postgresql', 'sentry_server_database_host': self.pu.get_manager(cluster).internal_ip, 'sentry_server_database_port': '7432', 'sentry_server_database_password': self.pu.db_helper.get_sentry_db_password(cluster) } } all_confs = s_cfg.merge_configs(all_confs, hue_confs) all_confs = s_cfg.merge_configs(all_confs, hive_confs) all_confs = s_cfg.merge_configs(all_confs, sentry_confs) all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs) if instance: snt_count = self.validator._get_inst_count(instance.cluster, 'SENTRY_SERVER') paths = instance.storage_paths() instance_default_confs = { 'NAMENODE': { 'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn') }, 'SECONDARYNAMENODE': { 'fs_checkpoint_dir_list': get_hadoop_dirs(paths, '/fs/snn') }, 'DATANODE': { 'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'), 'dfs_datanode_data_dir_perm': 755, 'dfs_datanode_handler_count': 30 }, 'NODEMANAGER': { 'yarn_nodemanager_local_dirs': get_hadoop_dirs(paths, '/yarn/local') }, 'SERVER': { 'maxSessionTimeout': 60000 }, 'HIVESERVER2': { 'hiveserver2_enable_impersonation': 'false' if snt_count else 'true', 'hive_hs2_config_safety_valve': (c_helper.HIVE_SERVER2_SENTRY_SAFETY_VALVE if snt_count else '') }, 'HIVEMETASTORE': { 'hive_metastore_config_safety_valve': (c_helper.HIVE_METASTORE_SENTRY_SAFETY_VALVE if snt_count else '') } } ng_user_confs = self.pu.convert_process_configs( instance.node_group.node_configs) all_confs = s_cfg.merge_configs(all_confs, ng_user_confs) all_confs = s_cfg.merge_configs(all_confs, instance_default_confs) return all_confs.get(service, {})
def get_default_configs(version_handler, services=None): cluster_configs = get_cluster_default_configs(version_handler, services) node_configs = get_node_default_configs(version_handler, services) return c.merge_configs(cluster_configs, node_configs)
def configuration(self): return configs.merge_configs(self.cluster.cluster_configs, self.node_configs)
def configuration(self): return configs.merge_configs(self.cluster.cluster_configs, self.node_configs)
def get_configuration(self, node_group): services = self.get_cluster_services(node_group) user_configs = node_group.configuration() default_configs = self.get_services_configs_dict(services) return sahara_configs.merge_configs(default_configs, user_configs)
def _get_configs(self, service, cluster=None, instance=None): def get_hadoop_dirs(mount_points, suffix): return ','.join([x + suffix for x in mount_points]) all_confs = {} if cluster: zk_count = self.validator._get_inst_count(cluster, 'ZOOKEEPER_SERVER') hbm_count = self.validator._get_inst_count(cluster, 'HBASE_MASTER') snt_count = self.validator._get_inst_count(cluster, 'SENTRY_SERVER') ks_count =\ self.validator._get_inst_count(cluster, 'KEY_VALUE_STORE_INDEXER') kms_count = self.validator._get_inst_count(cluster, 'KMS') imp_count =\ self.validator._get_inst_count(cluster, 'IMPALA_CATALOGSERVER') hive_count = self.validator._get_inst_count(cluster, 'HIVE_METASTORE') slr_count = self.validator._get_inst_count(cluster, 'SOLR_SERVER') sqp_count = self.validator._get_inst_count(cluster, 'SQOOP_SERVER') core_site_safety_valve = '' if self.pu.c_helper.is_swift_enabled(cluster): configs = swift_helper.get_swift_configs() confs = {c['name']: c['value'] for c in configs} core_site_safety_valve = xmlutils.create_elements_xml(confs) all_confs = { 'HDFS': { 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '', 'dfs_block_local_path_access_user': '******' if imp_count else '', 'kms_service': self.KMS_SERVICE_NAME if kms_count else '', 'core_site_safety_valve': core_site_safety_valve }, 'HIVE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'sentry_service': self.SENTRY_SERVICE_NAME if snt_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'OOZIE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'hive_service': self.HIVE_SERVICE_NAME if hive_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'YARN': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'HUE': { 'hive_service': self.HIVE_SERVICE_NAME, 'oozie_service': self.OOZIE_SERVICE_NAME, 'sentry_service': self.SENTRY_SERVICE_NAME if snt_count else '', 'solr_service': self.SOLR_SERVICE_NAME if slr_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '', 'hbase_service': self.HBASE_SERVICE_NAME if hbm_count else '', 'impala_service': self.IMPALA_SERVICE_NAME if imp_count else '', 'sqoop_service': self.SQOOP_SERVICE_NAME if sqp_count else '' }, 'SPARK_ON_YARN': { 'yarn_service': self.YARN_SERVICE_NAME }, 'HBASE': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME, 'hbase_enable_indexing': 'true' if ks_count else 'false', 'hbase_enable_replication': 'true' if ks_count else 'false' }, 'FLUME': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'solr_service': self.SOLR_SERVICE_NAME if slr_count else '', 'hbase_service': self.HBASE_SERVICE_NAME if hbm_count else '' }, 'SENTRY': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'sentry_server_config_safety_valve': ( c_helper.SENTRY_IMPALA_CLIENT_SAFETY_VALVE if imp_count else '') }, 'SOLR': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME }, 'SQOOP': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME }, 'KS_INDEXER': { 'hbase_service': self.HBASE_SERVICE_NAME, 'solr_service': self.SOLR_SERVICE_NAME }, 'IMPALA': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'hbase_service': self.HBASE_SERVICE_NAME if hbm_count else '', 'hive_service': self.HIVE_SERVICE_NAME, 'sentry_service': self.SENTRY_SERVICE_NAME if snt_count else '', 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' } } hive_confs = { 'HIVE': { 'hive_metastore_database_type': 'postgresql', 'hive_metastore_database_host': self.pu.get_manager(cluster).internal_ip, 'hive_metastore_database_port': '7432', 'hive_metastore_database_password': self.pu.db_helper.get_hive_db_password(cluster) } } hue_confs = { 'HUE': { 'hue_webhdfs': self.pu.get_role_name( self.pu.get_namenode(cluster), 'NAMENODE') } } sentry_confs = { 'SENTRY': { 'sentry_server_database_type': 'postgresql', 'sentry_server_database_host': self.pu.get_manager(cluster).internal_ip, 'sentry_server_database_port': '7432', 'sentry_server_database_password': self.pu.db_helper.get_sentry_db_password(cluster) } } all_confs = s_cfg.merge_configs(all_confs, hue_confs) all_confs = s_cfg.merge_configs(all_confs, hive_confs) all_confs = s_cfg.merge_configs(all_confs, sentry_confs) all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs) if instance: snt_count = self.validator._get_inst_count(instance.cluster, 'SENTRY_SERVER') paths = instance.storage_paths() instance_default_confs = { 'NAMENODE': { 'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn') }, 'SECONDARYNAMENODE': { 'fs_checkpoint_dir_list': get_hadoop_dirs(paths, '/fs/snn') }, 'DATANODE': { 'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'), 'dfs_datanode_data_dir_perm': 755, 'dfs_datanode_handler_count': 30 }, 'NODEMANAGER': { 'yarn_nodemanager_local_dirs': get_hadoop_dirs(paths, '/yarn/local') }, 'SERVER': { 'maxSessionTimeout': 60000 }, 'HIVESERVER2': { 'hiveserver2_enable_impersonation': 'false' if snt_count else 'true', 'hive_hs2_config_safety_valve': ( c_helper.HIVE_SERVER2_SENTRY_SAFETY_VALVE if snt_count else '') }, 'HIVEMETASTORE': { 'hive_metastore_config_safety_valve': ( c_helper.HIVE_METASTORE_SENTRY_SAFETY_VALVE if snt_count else '') } } ng_user_confs = self.pu.convert_process_configs( instance.node_group.node_configs) all_confs = s_cfg.merge_configs(all_confs, ng_user_confs) all_confs = s_cfg.merge_configs(all_confs, instance_default_confs) return all_confs.get(service, {})
def merge_configs(config_a, config_b, **kwargs): return sahara_configs.merge_configs(config_a, config_b)
def get_configuration(self, node_group): services = self.get_cluster_services(node_group) user_configs = node_group.configuration() default_configs = self.get_services_configs_dict(services) return sahara_configs.merge_configs(default_configs, user_configs)
def _get_configs(self, service, cluster=None, instance=None): def get_hadoop_dirs(mount_points, suffix): return ",".join([x + suffix for x in mount_points]) all_confs = {} if cluster: zk_count = self.validator._get_inst_count(cluster, "ZOOKEEPER_SERVER") core_site_safety_valve = "" if self.pu.c_helper.is_swift_enabled(cluster): configs = swift_helper.get_swift_configs() confs = {c["name"]: c["value"] for c in configs} core_site_safety_valve = xmlutils.create_elements_xml(confs) all_confs = { "HDFS": { "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "", "core_site_safety_valve": core_site_safety_valve, }, "HIVE": { "mapreduce_yarn_service": self.YARN_SERVICE_NAME, "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "", }, "OOZIE": { "mapreduce_yarn_service": self.YARN_SERVICE_NAME, "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "", }, "YARN": { "hdfs_service": self.HDFS_SERVICE_NAME, "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "", }, "HUE": { "hive_service": self.HIVE_SERVICE_NAME, "oozie_service": self.OOZIE_SERVICE_NAME, "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME if zk_count else "", }, "SPARK_ON_YARN": {"yarn_service": self.YARN_SERVICE_NAME}, "HBASE": {"hdfs_service": self.HDFS_SERVICE_NAME, "zookeeper_service": self.ZOOKEEPER_SERVICE_NAME}, } hive_confs = { "HIVE": { "hive_metastore_database_type": "postgresql", "hive_metastore_database_host": self.pu.get_manager(cluster).internal_ip, "hive_metastore_database_port": "7432", "hive_metastore_database_password": self.pu.db_helper.get_hive_db_password(cluster), } } hue_confs = {"HUE": {"hue_webhdfs": self.pu.get_role_name(self.pu.get_namenode(cluster), "NAMENODE")}} all_confs = s_cfg.merge_configs(all_confs, hue_confs) all_confs = s_cfg.merge_configs(all_confs, hive_confs) all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs) if instance: paths = instance.storage_paths() instance_default_confs = { "NAMENODE": {"dfs_name_dir_list": get_hadoop_dirs(paths, "/fs/nn")}, "SECONDARYNAMENODE": {"fs_checkpoint_dir_list": get_hadoop_dirs(paths, "/fs/snn")}, "DATANODE": { "dfs_data_dir_list": get_hadoop_dirs(paths, "/fs/dn"), "dfs_datanode_data_dir_perm": 755, "dfs_datanode_handler_count": 30, }, "NODEMANAGER": {"yarn_nodemanager_local_dirs": get_hadoop_dirs(paths, "/yarn/local")}, "SERVER": {"maxSessionTimeout": 60000}, } ng_user_confs = self.pu.convert_process_configs(instance.node_group.node_configs) all_confs = s_cfg.merge_configs(all_confs, ng_user_confs) all_confs = s_cfg.merge_configs(all_confs, instance_default_confs) return all_confs.get(service, {})
def _get_configs(self, service, cluster=None, node_group=None): def get_hadoop_dirs(mount_points, suffix): return ','.join([x + suffix for x in mount_points]) all_confs = {} if cluster: zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER') core_site_safety_valve = '' if self.pu.c_helper.is_swift_enabled(cluster): configs = swift_helper.get_swift_configs() confs = {c['name']: c['value'] for c in configs} core_site_safety_valve = xmlutils.create_elements_xml(confs) all_confs = { 'HDFS': { 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '', 'core_site_safety_valve': core_site_safety_valve }, 'HIVE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'OOZIE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'YARN': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'HUE': { 'hive_service': self.HIVE_SERVICE_NAME, 'oozie_service': self.OOZIE_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'SPARK_ON_YARN': { 'yarn_service': self.YARN_SERVICE_NAME }, 'HBASE': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME } } hive_confs = { 'HIVE': { 'hive_metastore_database_type': 'postgresql', 'hive_metastore_database_host': self.pu.get_manager(cluster).internal_ip, 'hive_metastore_database_port': '7432', 'hive_metastore_database_password': self.pu.db_helper.get_hive_db_password(cluster) } } hue_confs = { 'HUE': { 'hue_webhdfs': self.pu.get_role_name(self.pu.get_namenode(cluster), 'NAMENODE') } } all_confs = s_cfg.merge_configs(all_confs, hue_confs) all_confs = s_cfg.merge_configs(all_confs, hive_confs) all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs) if node_group: paths = node_group.storage_paths() ng_default_confs = { 'NAMENODE': { 'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn') }, 'SECONDARYNAMENODE': { 'fs_checkpoint_dir_list': get_hadoop_dirs(paths, '/fs/snn') }, 'DATANODE': { 'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'), 'dfs_datanode_data_dir_perm': 755, 'dfs_datanode_handler_count': 30 }, 'NODEMANAGER': { 'yarn_nodemanager_local_dirs': get_hadoop_dirs(paths, '/yarn/local') }, 'SERVER': { 'maxSessionTimeout': 60000 } } ng_user_confs = self.pu.convert_process_configs( node_group.node_configs) all_confs = s_cfg.merge_configs(all_confs, ng_user_confs) all_confs = s_cfg.merge_configs(all_confs, ng_default_confs) return all_confs.get(service, {})
def _get_configs(self, service, cluster=None, instance=None): def get_hadoop_dirs(mount_points, suffix): return ','.join([x + suffix for x in mount_points]) all_confs = {} if cluster: zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER') core_site_safety_valve = '' if self.pu.c_helper.is_swift_enabled(cluster): configs = swift_helper.get_swift_configs() confs = {c['name']: c['value'] for c in configs} core_site_safety_valve = xmlutils.create_elements_xml(confs) all_confs = { 'HDFS': { 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '', 'core_site_safety_valve': core_site_safety_valve }, 'HIVE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'OOZIE': { 'mapreduce_yarn_service': self.YARN_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'YARN': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'HUE': { 'hive_service': self.HIVE_SERVICE_NAME, 'oozie_service': self.OOZIE_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME if zk_count else '' }, 'SPARK_ON_YARN': { 'yarn_service': self.YARN_SERVICE_NAME }, 'HBASE': { 'hdfs_service': self.HDFS_SERVICE_NAME, 'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME } } hive_confs = { 'HIVE': { 'hive_metastore_database_type': 'postgresql', 'hive_metastore_database_host': self.pu.get_manager(cluster).internal_ip, 'hive_metastore_database_port': '7432', 'hive_metastore_database_password': self.pu.db_helper.get_hive_db_password(cluster) } } hue_confs = { 'HUE': { 'hue_webhdfs': self.pu.get_role_name(self.pu.get_namenode(cluster), 'NAMENODE') } } all_confs = s_cfg.merge_configs(all_confs, hue_confs) all_confs = s_cfg.merge_configs(all_confs, hive_confs) all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs) if instance: paths = instance.storage_paths() instance_default_confs = { 'NAMENODE': { 'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn') }, 'SECONDARYNAMENODE': { 'fs_checkpoint_dir_list': get_hadoop_dirs(paths, '/fs/snn') }, 'DATANODE': { 'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'), 'dfs_datanode_data_dir_perm': 755, 'dfs_datanode_handler_count': 30 }, 'NODEMANAGER': { 'yarn_nodemanager_local_dirs': get_hadoop_dirs(paths, '/yarn/local') }, 'SERVER': { 'maxSessionTimeout': 60000 } } ng_user_confs = self.pu.convert_process_configs( instance.node_group.node_configs) all_confs = s_cfg.merge_configs(all_confs, ng_user_confs) all_confs = s_cfg.merge_configs(all_confs, instance_default_confs) return all_confs.get(service, {})
def get_cluster_configs(cluster): h_version = cluster.hadoop_version v_handler = vhf.VersionHandlerFactory.get().get_handler(h_version) default_configs = get_cluster_default_configs(v_handler) user_configs = cluster.cluster_configs return c.merge_configs(default_configs, user_configs)