def start_cluster(self, cluster): nn = vu.get_namenode(cluster) run.format_namenode(nn) run.start_hadoop_process(nn, 'namenode') for snn in vu.get_secondarynamenodes(cluster): run.start_hadoop_process(snn, 'secondarynamenode') rm = vu.get_resourcemanager(cluster) if rm: run.start_yarn_process(rm, 'resourcemanager') run.start_dn_nm_processes(utils.get_instances(cluster)) run.await_datanodes(cluster) hs = vu.get_historyserver(cluster) if hs: run.start_historyserver(hs) oo = vu.get_oozie(cluster) if oo: run.start_oozie_process(self.pctx, oo) hiveserver = vu.get_hiveserver(cluster) if hiveserver: run.start_hiveserver_process(self.pctx, hiveserver) self._set_cluster_info(cluster)
def start_cluster(self, cluster): nn = vu.get_namenode(cluster) run.format_namenode(nn) run.start_hadoop_process(nn, 'namenode') for snn in vu.get_secondarynamenodes(cluster): run.start_hadoop_process(snn, 'secondarynamenode') rm = vu.get_resourcemanager(cluster) if rm: run.start_yarn_process(rm, 'resourcemanager') run.start_all_processes(utils.get_instances(cluster), ['datanode', 'nodemanager']) run.await_datanodes(cluster) hs = vu.get_historyserver(cluster) if hs: run.start_historyserver(hs) oo = vu.get_oozie(cluster) if oo: run.start_oozie_process(self.pctx, oo) hiveserver = vu.get_hiveserver(cluster) if hiveserver: run.start_hiveserver_process(self.pctx, hiveserver) self._set_cluster_info(cluster)
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) rm = vu.get_resourcemanager(cluster) hs = vu.get_historyserver(cluster) oo = vu.get_oozie(cluster) info = {} if rm: info['YARN'] = { 'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'), 'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032') } if nn: info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000') } if oo: info['JobFlow'] = { 'Oozie': 'http://%s:%s' % (oo.management_ip, '11000') } if hs: info['MapReduce JobHistory Server'] = { 'Web UI': 'http://%s:%s' % (hs.management_ip, '19888') } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def start_cluster(self, cluster): nn = vu.get_namenode(cluster) run.format_namenode(nn) run.start_hadoop_process(nn, "namenode") for snn in vu.get_secondarynamenodes(cluster): run.start_hadoop_process(snn, "secondarynamenode") rm = vu.get_resourcemanager(cluster) if rm: run.start_yarn_process(rm, "resourcemanager") for dn in vu.get_datanodes(cluster): run.start_hadoop_process(dn, "datanode") run.await_datanodes(cluster) for nm in vu.get_nodemanagers(cluster): run.start_yarn_process(nm, "nodemanager") hs = vu.get_historyserver(cluster) if hs: run.start_historyserver(hs) oo = vu.get_oozie(cluster) if oo: run.start_oozie_process(oo) self._set_cluster_info(cluster)
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) rm = vu.get_resourcemanager(cluster) hs = vu.get_historyserver(cluster) oo = vu.get_oozie(cluster) info = {} if rm: info["YARN"] = { "Web UI": "http://%s:%s" % (rm.management_ip, "8088"), "ResourceManager": "http://%s:%s" % (rm.management_ip, "8032"), } if nn: info["HDFS"] = { "Web UI": "http://%s:%s" % (nn.management_ip, "50070"), "NameNode": "hdfs://%s:%s" % (nn.hostname(), "9000"), } if oo: info["JobFlow"] = {"Oozie": "http://%s:%s" % (oo.management_ip, "11000")} if hs: info["MapReduce JobHistory Server"] = {"Web UI": "http://%s:%s" % (hs.management_ip, "19888")} ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info})
def start_cluster(self, cluster): nn = vu.get_namenode(cluster) run.format_namenode(nn) run.start_hadoop_process(nn, 'namenode') for snn in vu.get_secondarynamenodes(cluster): run.start_hadoop_process(snn, 'secondarynamenode') rm = vu.get_resourcemanager(cluster) run.start_yarn_process(rm, 'resourcemanager') for dn in vu.get_datanodes(cluster): run.start_hadoop_process(dn, 'datanode') run.await_datanodes(cluster) for nm in vu.get_nodemanagers(cluster): run.start_yarn_process(nm, 'nodemanager') hs = vu.get_historyserver(cluster) if hs: run.start_historyserver(hs) oo = vu.get_oozie(cluster) if oo: run.start_oozie_process(oo) self._set_cluster_info(cluster)
def _get_hadoop_configs(node_group): cluster = node_group.cluster nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) dirs = _get_hadoop_dirs(node_group) confs = { 'Hadoop': { 'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname }, 'HDFS': { 'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']), 'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']), 'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR, 'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR } } res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster)) if res_hostname: confs['YARN'] = { 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', 'yarn.resourcemanager.hostname': '%s' % res_hostname, 'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % ( HADOOP_CONF_DIR), 'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % ( HADOOP_CONF_DIR) } confs['MapReduce'] = { 'mapreduce.framework.name': 'yarn' } hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster)) if hs_hostname: confs['MapReduce']['mapreduce.jobhistory.address'] = ( "%s:10020" % hs_hostname) oozie = vu.get_oozie(cluster) if oozie: hadoop_cfg = { 'hadoop.proxyuser.hadoop.hosts': '*', 'hadoop.proxyuser.hadoop.groups': 'hadoop' } confs['Hadoop'].update(hadoop_cfg) oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR) if c_helper.is_mysql_enabled(cluster): oozie_cfg.update(o_helper.get_oozie_mysql_configs()) confs['JobFlow'] = oozie_cfg if c_helper.get_config_value(c_helper.ENABLE_SWIFT.applicable_target, c_helper.ENABLE_SWIFT.name, cluster): swift_configs = {} for config in swift.get_swift_configs(): swift_configs[config['name']] = config['value'] confs['Hadoop'].update(swift_configs) if c_helper.is_data_locality_enabled(cluster): confs['Hadoop'].update(th.TOPOLOGY_CONFIG) confs['Hadoop'].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"}) return confs, c_helper.get_env_configs()
def _get_hadoop_configs(pctx, instance): cluster = instance.node_group.cluster nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) dirs = _get_hadoop_dirs(instance) confs = { "Hadoop": {"fs.defaultFS": "hdfs://%s:9000" % nn_hostname}, "HDFS": { "dfs.namenode.name.dir": ",".join(dirs["hadoop_name_dirs"]), "dfs.datanode.data.dir": ",".join(dirs["hadoop_data_dirs"]), "dfs.hosts": "%s/dn-include" % HADOOP_CONF_DIR, "dfs.hosts.exclude": "%s/dn-exclude" % HADOOP_CONF_DIR, }, } res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster)) if res_hostname: confs["YARN"] = { "yarn.nodemanager.aux-services": "mapreduce_shuffle", "yarn.resourcemanager.hostname": "%s" % res_hostname, "yarn.resourcemanager.nodes.include-path": "%s/nm-include" % (HADOOP_CONF_DIR), "yarn.resourcemanager.nodes.exclude-path": "%s/nm-exclude" % (HADOOP_CONF_DIR), } confs["MapReduce"] = {"mapreduce.framework.name": "yarn"} hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster)) if hs_hostname: confs["MapReduce"]["mapreduce.jobhistory.address"] = "%s:10020" % hs_hostname oozie = vu.get_oozie(cluster) if oozie: hadoop_cfg = {"hadoop.proxyuser.hadoop.hosts": "*", "hadoop.proxyuser.hadoop.groups": "hadoop"} confs["Hadoop"].update(hadoop_cfg) oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR) if c_helper.is_mysql_enabled(pctx, cluster): oozie_cfg.update(o_helper.get_oozie_mysql_configs()) confs["JobFlow"] = oozie_cfg if c_helper.is_swift_enabled(pctx, cluster): swift_configs = {} for config in swift.get_swift_configs(): swift_configs[config["name"]] = config["value"] confs["Hadoop"].update(swift_configs) if c_helper.is_data_locality_enabled(pctx, cluster): confs["Hadoop"].update(th.TOPOLOGY_CONFIG) confs["Hadoop"].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"}) hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) if hive_hostname: hive_cfg = { "hive.warehouse.subdir.inherit.perms": True, "javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true", } if c_helper.is_mysql_enabled(pctx, cluster): hive_cfg.update( { "javax.jdo.option.ConnectionURL": "jdbc:mysql://%s/metastore" % hive_hostname, "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", "javax.jdo.option.ConnectionUserName": "******", "javax.jdo.option.ConnectionPassword": "******", "datanucleus.autoCreateSchema": "false", "datanucleus.fixedDatastore": "true", "hive.metastore.uris": "thrift://%s:9083" % hive_hostname, } ) proxy_configs = cluster.cluster_configs.get("proxy_configs") if proxy_configs and c_helper.is_swift_enabled(pctx, cluster): key = key_manager.API().get(context.current(), proxy_configs["proxy_password"]) password = key.get_encoded() hive_cfg.update( { swift.HADOOP_SWIFT_USERNAME: proxy_configs["proxy_username"], swift.HADOOP_SWIFT_PASSWORD: password, swift.HADOOP_SWIFT_TRUST_ID: proxy_configs["proxy_trust_id"], swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name, } ) confs["Hive"] = hive_cfg return confs
def _get_hadoop_configs(pctx, node_group): cluster = node_group.cluster nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) dirs = _get_hadoop_dirs(node_group) confs = { 'Hadoop': { 'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname }, 'HDFS': { 'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']), 'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']), 'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR, 'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR } } res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster)) if res_hostname: confs['YARN'] = { 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', 'yarn.resourcemanager.hostname': '%s' % res_hostname, 'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % ( HADOOP_CONF_DIR), 'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % ( HADOOP_CONF_DIR) } confs['MapReduce'] = { 'mapreduce.framework.name': 'yarn' } hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster)) if hs_hostname: confs['MapReduce']['mapreduce.jobhistory.address'] = ( "%s:10020" % hs_hostname) oozie = vu.get_oozie(cluster) if oozie: hadoop_cfg = { 'hadoop.proxyuser.hadoop.hosts': '*', 'hadoop.proxyuser.hadoop.groups': 'hadoop' } confs['Hadoop'].update(hadoop_cfg) oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR) if c_helper.is_mysql_enabled(pctx, cluster): oozie_cfg.update(o_helper.get_oozie_mysql_configs()) confs['JobFlow'] = oozie_cfg if c_helper.is_swift_enabled(pctx, cluster): swift_configs = {} for config in swift.get_swift_configs(): swift_configs[config['name']] = config['value'] confs['Hadoop'].update(swift_configs) if c_helper.is_data_locality_enabled(pctx, cluster): confs['Hadoop'].update(th.TOPOLOGY_CONFIG) confs['Hadoop'].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"}) hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) if hive_hostname: hive_cfg = { 'hive.warehouse.subdir.inherit.perms': True, 'javax.jdo.option.ConnectionURL': 'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true' } if c_helper.is_mysql_enabled(pctx, cluster): hive_cfg.update({ 'javax.jdo.option.ConnectionURL': 'jdbc:mysql://%s/metastore' % hive_hostname, 'javax.jdo.option.ConnectionDriverName': 'com.mysql.jdbc.Driver', 'javax.jdo.option.ConnectionUserName': '******', 'javax.jdo.option.ConnectionPassword': '******', 'datanucleus.autoCreateSchema': 'false', 'datanucleus.fixedDatastore': 'true', 'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname, }) proxy_configs = cluster.cluster_configs.get('proxy_configs') if proxy_configs and c_helper.is_swift_enabled(pctx, cluster): hive_cfg.update({ swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'], swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'], swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'], swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name }) confs['Hive'] = hive_cfg return confs
def start_historyserver(self, cluster): hs = vu.get_historyserver(cluster) if hs: run.start_historyserver(hs)
def _get_hadoop_configs(pctx, instance): cluster = instance.node_group.cluster nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) dirs = _get_hadoop_dirs(instance) confs = { 'Hadoop': { 'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname }, 'HDFS': { 'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']), 'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']), 'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR, 'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR } } res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster)) if res_hostname: confs['YARN'] = { 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', 'yarn.resourcemanager.hostname': '%s' % res_hostname, 'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % ( HADOOP_CONF_DIR), 'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % ( HADOOP_CONF_DIR) } confs['MapReduce'] = { 'mapreduce.framework.name': 'yarn' } hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster)) if hs_hostname: confs['MapReduce']['mapreduce.jobhistory.address'] = ( "%s:10020" % hs_hostname) oozie = vu.get_oozie(cluster) if oozie: hadoop_cfg = { 'hadoop.proxyuser.hadoop.hosts': '*', 'hadoop.proxyuser.hadoop.groups': 'hadoop' } confs['Hadoop'].update(hadoop_cfg) oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR) if c_helper.is_mysql_enabled(pctx, cluster): oozie_cfg.update(o_helper.get_oozie_mysql_configs()) confs['JobFlow'] = oozie_cfg if c_helper.is_swift_enabled(pctx, cluster): swift_configs = {} for config in swift.get_swift_configs(): swift_configs[config['name']] = config['value'] confs['Hadoop'].update(swift_configs) if c_helper.is_data_locality_enabled(pctx, cluster): confs['Hadoop'].update(th.TOPOLOGY_CONFIG) confs['Hadoop'].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"}) hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster)) if hive_hostname: hive_cfg = { 'hive.warehouse.subdir.inherit.perms': True, 'javax.jdo.option.ConnectionURL': 'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true' } if c_helper.is_mysql_enabled(pctx, cluster): hive_cfg.update({ 'javax.jdo.option.ConnectionURL': 'jdbc:mysql://%s/metastore' % hive_hostname, 'javax.jdo.option.ConnectionDriverName': 'com.mysql.jdbc.Driver', 'javax.jdo.option.ConnectionUserName': '******', 'javax.jdo.option.ConnectionPassword': '******', 'datanucleus.autoCreateSchema': 'false', 'datanucleus.fixedDatastore': 'true', 'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname, }) proxy_configs = cluster.cluster_configs.get('proxy_configs') if proxy_configs and c_helper.is_swift_enabled(pctx, cluster): hive_cfg.update({ swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'], swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'], swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'], swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name }) confs['Hive'] = hive_cfg return confs
def _get_hadoop_configs(pctx, node_group): cluster = node_group.cluster nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster)) dirs = _get_hadoop_dirs(node_group) confs = { 'Hadoop': { 'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname }, 'HDFS': { 'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']), 'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']), 'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR, 'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR } } res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster)) if res_hostname: confs['YARN'] = { 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', 'yarn.resourcemanager.hostname': '%s' % res_hostname, 'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (HADOOP_CONF_DIR), 'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (HADOOP_CONF_DIR) } confs['MapReduce'] = {'mapreduce.framework.name': 'yarn'} hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster)) if hs_hostname: confs['MapReduce']['mapreduce.jobhistory.address'] = ("%s:10020" % hs_hostname) oozie = vu.get_oozie(cluster) if oozie: hadoop_cfg = { 'hadoop.proxyuser.hadoop.hosts': '*', 'hadoop.proxyuser.hadoop.groups': 'hadoop' } confs['Hadoop'].update(hadoop_cfg) oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR) if c_helper.is_mysql_enabled(pctx, cluster): oozie_cfg.update(o_helper.get_oozie_mysql_configs()) confs['JobFlow'] = oozie_cfg if c_helper.is_swift_enabled(pctx, cluster): swift_configs = {} for config in swift.get_swift_configs(): swift_configs[config['name']] = config['value'] confs['Hadoop'].update(swift_configs) if c_helper.is_data_locality_enabled(pctx, cluster): confs['Hadoop'].update(th.TOPOLOGY_CONFIG) confs['Hadoop'].update( {"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"}) return confs