def _set_cluster_info(self, cluster): mng = u.get_instances(cluster, 'manager')[0] nn = u.get_namenode(cluster) jt = u.get_jobtracker(cluster) oozie = u.get_oozie(cluster) #TODO(alazarev) make port configurable (bug #1262895) info = {'IDH Manager': { 'Web UI': 'https://%s:9443' % mng.management_ip }} if jt: #TODO(alazarev) make port configurable (bug #1262895) info['MapReduce'] = { 'Web UI': 'http://%s:50030' % jt.management_ip } #TODO(alazarev) make port configurable (bug #1262895) info['MapReduce']['JobTracker'] = '%s:54311' % jt.hostname() if nn: #TODO(alazarev) make port configurable (bug #1262895) info['HDFS'] = { 'Web UI': 'http://%s:50070' % nn.management_ip } #TODO(alazarev) make port configurable (bug #1262895) info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname() if oozie: #TODO(alazarev) make port configurable (bug #1262895) info['JobFlow'] = { 'Oozie': 'http://%s:11000' % oozie.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): mng = u.get_instances(cluster, 'manager')[0] nn = u.get_namenode(cluster) jt = u.get_jobtracker(cluster) oozie = u.get_oozie(cluster) #TODO(alazarev) make port configurable (bug #1262895) info = { 'IDH Manager': { 'Web UI': 'https://%s:9443' % mng.management_ip } } if jt: #TODO(alazarev) make port configurable (bug #1262895) info['MapReduce'] = { 'Web UI': 'http://%s:50030' % jt.management_ip } #TODO(alazarev) make port configurable (bug #1262895) info['MapReduce']['JobTracker'] = '%s:54311' % jt.hostname() if nn: #TODO(alazarev) make port configurable (bug #1262895) info['HDFS'] = {'Web UI': 'http://%s:50070' % nn.management_ip} #TODO(alazarev) make port configurable (bug #1262895) info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname() if oozie: #TODO(alazarev) make port configurable (bug #1262895) info['JobFlow'] = { 'Oozie': 'http://%s:11000' % oozie.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) jt = vu.get_jobtracker(cluster) oozie = vu.get_oozie(cluster) info = {} if jt: ui_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker.http.address", cluster) jt_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker", cluster) info["MapReduce"] = { "Web UI": "http://%s:%s" % (jt.management_ip, ui_port), "JobTracker": "%s:%s" % (jt.hostname(), jt_port), } if nn: ui_port = c_helper.get_port_from_config("HDFS", "dfs.http.address", cluster) nn_port = c_helper.get_port_from_config("HDFS", "fs.default.name", cluster) info["HDFS"] = { "Web UI": "http://%s:%s" % (nn.management_ip, ui_port), "NameNode": "hdfs://%s:%s" % (nn.hostname(), nn_port), } if oozie: # TODO(yrunts) change from hardcode value info["JobFlow"] = {"Oozie": "http://%s:11000" % oozie.management_ip} ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info})
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) rm = vu.get_resourcemanager(cluster) hs = vu.get_historyserver(cluster) oo = vu.get_oozie(cluster) info = {} if rm: info['YARN'] = { 'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'), 'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032') } if nn: info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000') } if oo: info['JobFlow'] = { 'Oozie': 'http://%s:%s' % (oo.management_ip, '11000') } if hs: info['MapReduce JobHistory Server'] = { 'Web UI': 'http://%s:%s' % (hs.management_ip, '19888') } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster, cluster_spec): info = {} for service in cluster_spec.services: if service.deployed: service.register_service_urls(cluster_spec, info) conductor.cluster_update(context.ctx(), cluster, {'info': info})
def _prepare_ranger(cluster): ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN) if not ranger: return ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) with ambari.remote() as r: sudo = functools.partial(r.execute_command, run_as_root=True) sudo("ambari-server setup --jdbc-db=mysql " "--jdbc-driver=/usr/share/java/mysql-connector-java.jar") init_db_template = ( "create user 'root'@'%' identified by '{password}';\n" "set password for 'root'@'localhost' = password('{password}');") password = uuidutils.generate_uuid() extra = cluster.extra.to_dict() if cluster.extra else {} extra["ranger_db_password"] = password ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"extra": extra}) with ranger.remote() as r: sudo = functools.partial(r.execute_command, run_as_root=True) # TODO(sreshetnyak): add ubuntu support sudo("yum install -y mysql-server") sudo("service mysqld start") r.write_file_to("/tmp/init.sql", init_db_template.format(password=password)) sudo("mysql < /tmp/init.sql") sudo("rm /tmp/init.sql")
def _set_cluster_info(self, cluster): nn = utils.get_instance(cluster, "namenode") sp_master = utils.get_instance(cluster, "master") info = {} if nn: address = utils.get_config_value_or_default( 'HDFS', 'dfs.http.address', cluster) port = address[address.rfind(':') + 1:] info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), port) } info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname() if sp_master: port = utils.get_config_value_or_default('Spark', 'Master webui port', cluster) if port is not None: info['Spark'] = { 'Web UI': 'http://%s:%s' % (sp_master.get_ip_or_dns_name(), port) } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _update_cluster_info(self, cluster_context): LOG.debug('Updating UI information.') info = { 'Admin user credentials': { 'Username': '******', 'Password': pu.get_mapr_password(cluster_context.cluster) } } for service in cluster_context.cluster_services: for title, node_process, ui_info in ( service.get_ui_info(cluster_context)): removed = cluster_context.removed_instances(node_process) instances = cluster_context.get_instances(node_process) instances = [i for i in instances if i not in removed] if len(instances) == 1: display_name_template = "%(title)s" else: display_name_template = "%(title)s %(index)s" for index, instance in enumerate(instances, start=1): args = {"title": title, "index": index} display_name = display_name_template % args data = ui_info.copy() data[srvc.SERVICE_UI] = (data[srvc.SERVICE_UI] % instance.get_ip_or_dns_name()) info.update({display_name: data}) ctx = context.ctx() conductor.cluster_update(ctx, cluster_context.cluster, {'info': info})
def _prepare_ranger(cluster): ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN) if not ranger: return ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) with ambari.remote() as r: sudo = functools.partial(r.execute_command, run_as_root=True) sudo("yum install -y mysql-connector-java") sudo("ambari-server setup --jdbc-db=mysql " "--jdbc-driver=/usr/share/java/mysql-connector-java.jar") init_db_template = ( "create user 'root'@'%' identified by '{password}';\n" "set password for 'root'@'localhost' = password('{password}');") password = uuidutils.generate_uuid() extra = cluster.extra.to_dict() if cluster.extra else {} extra["ranger_db_password"] = password ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"extra": extra}) with ranger.remote() as r: sudo = functools.partial(r.execute_command, run_as_root=True) # TODO(sreshetnyak): add ubuntu support sudo("yum install -y mysql-server") sudo("service mysqld start") r.write_file_to("/tmp/init.sql", init_db_template.format(password=password)) sudo("mysql < /tmp/init.sql") sudo("rm /tmp/init.sql")
def _update_cluster_info(self, cluster_context): LOG.debug('Updating UI information.') info = {'Admin user credentials': {'Username': pu.MAPR_USER_NAME, 'Password': pu.get_mapr_password (cluster_context.cluster)}} for service in cluster_context.cluster_services: for title, node_process, ui_info in (service.get_ui_info (cluster_context)): removed = cluster_context.removed_instances(node_process) instances = cluster_context.get_instances(node_process) instances = [i for i in instances if i not in removed] if len(instances) == 1: display_name_template = "%(title)s" else: display_name_template = "%(title)s %(index)s" for index, instance in enumerate(instances, start=1): args = {"title": title, "index": index} display_name = display_name_template % args data = ui_info.copy() data[srvc.SERVICE_UI] = (data[srvc.SERVICE_UI] % instance.management_ip) info.update({display_name: data}) ctx = context.ctx() conductor.cluster_update(ctx, cluster_context.cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) rm = vu.get_resourcemanager(cluster) hs = vu.get_historyserver(cluster) oo = vu.get_oozie(cluster) info = {} if rm: info["YARN"] = { "Web UI": "http://%s:%s" % (rm.management_ip, "8088"), "ResourceManager": "http://%s:%s" % (rm.management_ip, "8032"), } if nn: info["HDFS"] = { "Web UI": "http://%s:%s" % (nn.management_ip, "50070"), "NameNode": "hdfs://%s:%s" % (nn.hostname(), "9000"), } if oo: info["JobFlow"] = {"Oozie": "http://%s:%s" % (oo.management_ip, "11000")} if hs: info["MapReduce JobHistory Server"] = {"Web UI": "http://%s:%s" % (hs.management_ip, "19888")} ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info})
def _generate_hive_mysql_password(self, cluster): extra = cluster.extra.to_dict() if cluster.extra else {} password = extra.get("hive_mysql_passwd") if not password: password = six.text_type(uuid.uuid4()) extra["hive_mysql_passwd"] = password conductor.cluster_update(context.ctx(), cluster, {"extra": extra}) return password
def _generate_hive_mysql_password(self, cluster): extra = cluster.extra.to_dict() if cluster.extra else {} password = extra.get('hive_mysql_passwd') if not password: password = six.text_type(uuid.uuid4()) extra['hive_mysql_passwd'] = password conductor.cluster_update(context.ctx(), cluster, {'extra': extra}) return password
def update_default_ambari_password(cluster): ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER) new_password = uuidutils.generate_uuid() with ambari_client.AmbariClient(ambari) as client: client.update_user_password("admin", "admin", new_password) extra = cluster.extra.to_dict() if cluster.extra else {} extra["ambari_password"] = new_password ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"extra": extra}) cluster = conductor.cluster_get(ctx, cluster.id)
def _set_cluster_info(self, cluster): info = CU.get_cloudera_manager_info(cluster) hue = CU.pu.get_hue(cluster) if hue: info['Hue Dashboard'] = { 'Web UI': 'http://%s:8888' % hue.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): st_master = utils.get_instance(cluster, "nimbus") info = {} if st_master: port = "8080" info["Strom"] = {"Web UI": "http://%s:%s" % (st_master.management_ip, port)} ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info})
def _update_cluster_info(self, cluster_context): LOG.debug('Updating UI information.') info = {} for service in cluster_context.cluster_services: for uri_info in service.ui_info: title, process, url = uri_info instance = cluster_context.get_instance(process) info.update({title: {'WebUI': url % instance.management_ip}}) ctx = context.ctx() conductor.cluster_update(ctx, cluster_context.cluster, {'info': info})
def _set_cluster_info(self, cluster): st_master = utils.get_instance(cluster, "nimbus") info = {} if st_master: port = "8080" info['Strom'] = { 'Web UI': 'http://%s:%s' % (st_master.management_ip, port) } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): mng = cu.get_manager(cluster) info = { 'Cloudera Manager': { 'Web UI': 'http://%s:7180' % mng.management_ip, 'Username': '******', 'Password': '******' } } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _update_cluster_info(self, cluster_context): LOG.debug('Updating UI information.') info = dict() for service in cluster_context.cluster_services: for uri_info in service.ui_info: title, process, url = uri_info info.update({ title: { 'WebUI': url % cluster_context.get_instance_ip(process) } }) ctx = context.ctx() conductor.cluster_update(ctx, cluster_context.cluster, {'info': info})
def _set_cluster_info(self, cluster): ambari_ip = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER).management_ip ambari_port = "8080" info = { p_common.AMBARI_SERVER: { "Web UI": "http://{host}:{port}".format(host=ambari_ip, port=ambari_port), "Username": "******", "Password": cluster.extra["ambari_password"], } } namenode = plugin_utils.get_instance(cluster, p_common.NAMENODE) if namenode: info[p_common.NAMENODE] = {"Web UI": "http://%s:50070" % namenode.management_ip} resourcemanager = plugin_utils.get_instance(cluster, p_common.RESOURCEMANAGER) if resourcemanager: info[p_common.RESOURCEMANAGER] = {"Web UI": "http://%s:8088" % resourcemanager.management_ip} historyserver = plugin_utils.get_instance(cluster, p_common.HISTORYSERVER) if historyserver: info[p_common.HISTORYSERVER] = {"Web UI": "http://%s:19888" % historyserver.management_ip} atlserver = plugin_utils.get_instance(cluster, p_common.APP_TIMELINE_SERVER) if atlserver: info[p_common.APP_TIMELINE_SERVER] = {"Web UI": "http://%s:8188" % atlserver.management_ip} oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER) if oozie: info[p_common.OOZIE_SERVER] = {"Web UI": "http://%s:11000/oozie" % oozie.management_ip} hbase_master = plugin_utils.get_instance(cluster, p_common.HBASE_MASTER) if hbase_master: info[p_common.HBASE_MASTER] = {"Web UI": "http://%s:60010" % hbase_master.management_ip} falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER) if falcon: info[p_common.FALCON_SERVER] = {"Web UI": "http://%s:15000" % falcon.management_ip} storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER) if storm_ui: info[p_common.STORM_UI_SERVER] = {"Web UI": "http://%s:8744" % storm_ui.management_ip} ranger_admin = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN) if ranger_admin: info[p_common.RANGER_ADMIN] = { "Web UI": "http://%s:6080" % ranger_admin.management_ip, "Username": "******", "Password": "******", } spark_hs = plugin_utils.get_instance(cluster, p_common.SPARK_JOBHISTORYSERVER) if spark_hs: info[p_common.SPARK_JOBHISTORYSERVER] = {"Web UI": "http://%s:18080" % spark_hs.management_ip} info.update(cluster.info.to_dict()) ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info}) cluster = conductor.cluster_get(ctx, cluster.id)
def create_hadoop_ssh_keys(cluster): private_key, public_key = crypto.generate_key_pair() extra = { 'hadoop_private_ssh_key': private_key, 'hadoop_public_ssh_key': public_key } return conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
def _set_cluster_info(self, cluster): mng = CU.pu.get_manager(cluster) info = { 'Cloudera Manager': { 'Web UI': 'http://%s:7180' % mng.management_ip, 'Username': '******', 'Password': db_helper.get_cm_password(cluster) } } hue = CU.pu.get_hue(cluster) if hue: info['Hue Dashboard'] = { 'Web UI': 'http://%s:8888' % hue.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = utils.get_instance(cluster, "namenode") sp_master = utils.get_instance(cluster, "master") info = {} if nn: address = c_helper.get_config_value("HDFS", "dfs.http.address", cluster) port = address[address.rfind(":") + 1 :] info["HDFS"] = {"Web UI": "http://%s:%s" % (nn.management_ip, port)} info["HDFS"]["NameNode"] = "hdfs://%s:8020" % nn.hostname() if sp_master: port = c_helper.get_config_value("Spark", "Master webui port", cluster) if port is not None: info["Spark"] = {"Web UI": "http://%s:%s" % (sp_master.management_ip, port)} ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info})
def get_sentry_db_password(cluster): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster.id) passwd = cluster.extra.get('sentry_db_password') if cluster.extra else None if passwd: return passwd passwd = six.text_type(uuid.uuid4()) extra = cluster.extra.to_dict() if cluster.extra else {} extra['sentry_db_password'] = passwd cluster = conductor.cluster_update(ctx, cluster, {'extra': extra}) return passwd
def get_password_from_db(cluster, pwname): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster.id) passwd = cluster.extra.get(pwname) if cluster.extra else None if passwd: return passwd passwd = six.text_type(uuid.uuid4()) extra = cluster.extra.to_dict() if cluster.extra else {} extra[pwname] = passwd cluster = conductor.cluster_update(ctx, cluster, {'extra': extra}) return passwd
def get_password_from_db(cluster, pwname): """return a password for the named entry This function will return, or create and return, a password for the named entry. It will store the password in the key manager and use the ID in the database entry. :param cluster: The cluster record containing the password :param pwname: The entry name associated with the password :returns: The cleartext password """ ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster.id) passwd = cluster.extra.get(pwname) if cluster.extra else None if passwd: return key_manager.get_secret(passwd, ctx) passwd = uuidutils.generate_uuid() extra = cluster.extra.to_dict() if cluster.extra else {} extra[pwname] = key_manager.store_secret(passwd, ctx) conductor.cluster_update(ctx, cluster, {'extra': extra}) return passwd
def _update_cluster_info(self, cluster_context): LOG.debug('Updating UI information.') info = {} for service in cluster_context.cluster_services: for title, node_process, url_template in service.ui_info: removed = cluster_context.removed_instances(node_process) instances = cluster_context.get_instances(node_process) instances = [i for i in instances if i not in removed] if len(instances) == 1: display_name_template = "%(title)s" else: display_name_template = "%(title)s %(index)s" for index, instance in enumerate(instances, start=1): args = {"title": title, "index": index} display_name = display_name_template % args url = url_template % instance.management_ip info.update({display_name: {"WebUI": url}}) ctx = context.ctx() conductor.cluster_update(ctx, cluster_context.cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = utils.get_instance(cluster, "namenode") sp_master = utils.get_instance(cluster, "master") info = {} if nn: address = utils.get_config_value_or_default( 'HDFS', 'dfs.http.address', cluster) port = address[address.rfind(':') + 1:] info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, port) } info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname() if sp_master: port = utils.get_config_value_or_default( 'Spark', 'Master webui port', cluster) if port is not None: info['Spark'] = { 'Web UI': 'http://%s:%s' % (sp_master.management_ip, port) } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) jt = vu.get_jobtracker(cluster) oozie = vu.get_oozie(cluster) info = {} if jt: ui_port = c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker.http.address', cluster) jt_port = c_helper.get_port_from_config('MapReduce', 'mapred.job.tracker', cluster) info['MapReduce'] = { 'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port), 'JobTracker': '%s:%s' % (jt.hostname(), jt_port) } if nn: ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address', cluster) nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name', cluster) info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port) } if oozie: # TODO(yrunts) change from hardcode value info['JobFlow'] = { 'Oozie': 'http://%s:11000' % oozie.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) jt = vu.get_jobtracker(cluster) oozie = vu.get_oozie(cluster) info = {} if jt: ui_port = c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker.http.address', cluster) jt_port = c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker', cluster) info['MapReduce'] = { 'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port), 'JobTracker': '%s:%s' % (jt.hostname(), jt_port) } if nn: ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address', cluster) nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name', cluster) info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port) } if oozie: # TODO(yrunts) change from hardcode value info['JobFlow'] = { 'Oozie': 'http://%s:11000' % oozie.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def get_password(cluster, pw_name): """return a password for the named entry This function will return, or create and return, a password for the named entry. It will store the password in the key manager and use the ID in the database entry. :param cluster: The cluster record containing the password :param pw_name: The entry name associated with the password :returns: The cleartext password """ ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster.id) passwd = cluster.extra.get(pw_name) if cluster.extra else None if passwd: return key_manager.get_secret(passwd, ctx) passwd = six.text_type(uuid.uuid4()) extra = cluster.extra.to_dict() if cluster.extra else {} extra[pw_name] = key_manager.store_secret(passwd, ctx) cluster = conductor.cluster_update(ctx, cluster, {'extra': extra}) return passwd
def configure_cluster(self, cluster): dp.configure_cluster(cluster) conductor.cluster_update( context.ctx(), cluster, {'info': CU.get_cloudera_manager_info(cluster)})
def cluster_update(context, cluster, values, **kwargs): return conductor.cluster_update(context, cluster, values)
def configure_cluster(self, cluster): dp.configure_cluster(cluster) conductor.cluster_update( context.ctx(), cluster, { 'info': CU.get_cloudera_manager_info(cluster)})
def _set_cluster_info(self, cluster): ambari_ip = plugin_utils.get_instance( cluster, p_common.AMBARI_SERVER).get_ip_or_dns_name() ambari_port = "8080" info = { p_common.AMBARI_SERVER: { "Web UI": "http://{host}:{port}".format(host=ambari_ip, port=ambari_port), "Username": "******", "Password": cluster.extra["ambari_password"] } } nns = plugin_utils.get_instances(cluster, p_common.NAMENODE) info[p_common.NAMENODE] = {} for idx, namenode in enumerate(nns): info[p_common.NAMENODE]["Web UI %s" % (idx + 1)] = ( "http://%s:50070" % namenode.get_ip_or_dns_name()) rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER) info[p_common.RESOURCEMANAGER] = {} for idx, resourcemanager in enumerate(rms): info[p_common.RESOURCEMANAGER]["Web UI %s" % (idx + 1)] = ( "http://%s:8088" % resourcemanager.get_ip_or_dns_name()) historyserver = plugin_utils.get_instance(cluster, p_common.HISTORYSERVER) if historyserver: info[p_common.HISTORYSERVER] = { "Web UI": "http://%s:19888" % historyserver.get_ip_or_dns_name() } atlserver = plugin_utils.get_instance(cluster, p_common.APP_TIMELINE_SERVER) if atlserver: info[p_common.APP_TIMELINE_SERVER] = { "Web UI": "http://%s:8188" % atlserver.get_ip_or_dns_name() } oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER) if oozie: info[p_common.OOZIE_SERVER] = { "Web UI": "http://%s:11000/oozie" % oozie.get_ip_or_dns_name() } hbase_master = plugin_utils.get_instance(cluster, p_common.HBASE_MASTER) if hbase_master: info[p_common.HBASE_MASTER] = { "Web UI": "http://%s:60010" % hbase_master.get_ip_or_dns_name() } falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER) if falcon: info[p_common.FALCON_SERVER] = { "Web UI": "http://%s:15000" % falcon.get_ip_or_dns_name() } storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER) if storm_ui: info[p_common.STORM_UI_SERVER] = { "Web UI": "http://%s:8744" % storm_ui.get_ip_or_dns_name() } ranger_admin = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN) if ranger_admin: info[p_common.RANGER_ADMIN] = { "Web UI": "http://%s:6080" % ranger_admin.get_ip_or_dns_name(), "Username": "******", "Password": "******" } spark_hs = plugin_utils.get_instance(cluster, p_common.SPARK_JOBHISTORYSERVER) if spark_hs: info[p_common.SPARK_JOBHISTORYSERVER] = { "Web UI": "http://%s:18080" % spark_hs.get_ip_or_dns_name() } info.update(cluster.info.to_dict()) ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info}) cluster = conductor.cluster_get(ctx, cluster.id)