def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) jt = vu.get_jobtracker(cluster) oozie = vu.get_oozie(cluster) info = {} if jt: ui_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker.http.address", cluster) jt_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker", cluster) info["MapReduce"] = { "Web UI": "http://%s:%s" % (jt.management_ip, ui_port), "JobTracker": "%s:%s" % (jt.hostname(), jt_port), } if nn: ui_port = c_helper.get_port_from_config("HDFS", "dfs.http.address", cluster) nn_port = c_helper.get_port_from_config("HDFS", "fs.default.name", cluster) info["HDFS"] = { "Web UI": "http://%s:%s" % (nn.management_ip, ui_port), "NameNode": "hdfs://%s:%s" % (nn.hostname(), nn_port), } if oozie: # TODO(yrunts) change from hardcode value info["JobFlow"] = {"Oozie": "http://%s:11000" % oozie.management_ip} ctx = context.ctx() conductor.cluster_update(ctx, cluster, {"info": info})
def get_open_ports(self, node_group): cluster = node_group.cluster ports = [] if "namenode" in node_group.node_processes: ports.append(c_helper.get_port_from_config("HDFS", "dfs.http.address", cluster)) ports.append(8020) if "datanode" in node_group.node_processes: ports.append(c_helper.get_port_from_config("HDFS", "dfs.datanode.http.address", cluster)) ports.append(c_helper.get_port_from_config("HDFS", "dfs.datanode.address", cluster)) ports.append(c_helper.get_port_from_config("HDFS", "dfs.datanode.ipc.address", cluster)) if "jobtracker" in node_group.node_processes: ports.append(c_helper.get_port_from_config("MapReduce", "mapred.job.tracker.http.address", cluster)) ports.append(8021) if "tasktracker" in node_group.node_processes: ports.append(c_helper.get_port_from_config("MapReduce", "mapred.task.tracker.http.address", cluster)) if "secondarynamenode" in node_group.node_processes: ports.append(c_helper.get_port_from_config("HDFS", "dfs.secondary.http.address", cluster)) if "oozie" in node_group.node_processes: ports.append(11000) if "hive" in node_group.node_processes: ports.append(9999) ports.append(10000) return ports
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) jt = vu.get_jobtracker(cluster) oozie = vu.get_oozie(cluster) info = {} if jt: ui_port = c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker.http.address', cluster) jt_port = c_helper.get_port_from_config('MapReduce', 'mapred.job.tracker', cluster) info['MapReduce'] = { 'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port), 'JobTracker': '%s:%s' % (jt.hostname(), jt_port) } if nn: ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address', cluster) nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name', cluster) info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port) } if oozie: # TODO(yrunts) change from hardcode value info['JobFlow'] = { 'Oozie': 'http://%s:11000' % oozie.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def _set_cluster_info(self, cluster): nn = vu.get_namenode(cluster) jt = vu.get_jobtracker(cluster) oozie = vu.get_oozie(cluster) info = {} if jt: ui_port = c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker.http.address', cluster) jt_port = c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker', cluster) info['MapReduce'] = { 'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port), 'JobTracker': '%s:%s' % (jt.hostname(), jt_port) } if nn: ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address', cluster) nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name', cluster) info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port) } if oozie: # TODO(yrunts) change from hardcode value info['JobFlow'] = { 'Oozie': 'http://%s:11000' % oozie.management_ip } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def get_open_ports(self, node_group): cluster = node_group.cluster ports = [] if "namenode" in node_group.node_processes: ports.append( c_helper.get_port_from_config('HDFS', 'dfs.http.address', cluster)) ports.append(8020) if "datanode" in node_group.node_processes: ports.append( c_helper.get_port_from_config('HDFS', 'dfs.datanode.http.address', cluster)) ports.append( c_helper.get_port_from_config('HDFS', 'dfs.datanode.address', cluster)) ports.append( c_helper.get_port_from_config('HDFS', 'dfs.datanode.ipc.address', cluster)) if "jobtracker" in node_group.node_processes: ports.append( c_helper.get_port_from_config( 'MapReduce', 'mapred.job.tracker.http.address', cluster)) ports.append(8021) if "tasktracker" in node_group.node_processes: ports.append( c_helper.get_port_from_config( 'MapReduce', 'mapred.task.tracker.http.address', cluster)) if "secondarynamenode" in node_group.node_processes: ports.append( c_helper.get_port_from_config('HDFS', 'dfs.secondary.http.address', cluster)) if "oozie" in node_group.node_processes: ports.append(11000) if "hive" in node_group.node_processes: ports.append(9999) ports.append(10000) return ports