def _configure_services(client, cluster): nn_host = u.get_namenode(cluster).fqdn() snn = u.get_secondarynamenodes(cluster) snn_host = snn[0].fqdn() if snn else None rm_host = u.get_resourcemanager(cluster).fqdn() if u.get_resourcemanager( cluster) else None hs_host = u.get_historyserver(cluster).fqdn() if u.get_historyserver( cluster) else None dn_hosts = [dn.fqdn() for dn in u.get_datanodes(cluster)] nm_hosts = [tt.fqdn() for tt in u.get_nodemanagers(cluster)] oozie_host = u.get_oozie(cluster).fqdn() if u.get_oozie(cluster) else None hive_host = u.get_hiveserver(cluster).fqdn() if u.get_hiveserver( cluster) else None services = [] if u.get_namenode(cluster): services += ['hdfs'] if u.get_resourcemanager(cluster): services += ['yarn'] if oozie_host: services += ['oozie'] services += ['pig'] if hive_host: services += ['hive'] LOG.debug("Add services: %s" % ', '.join(services)) client.services.add(services) LOG.debug("Assign roles to hosts") client.services.hdfs.add_nodes('PrimaryNameNode', [nn_host]) client.services.hdfs.add_nodes('DataNode', dn_hosts) if snn: client.services.hdfs.add_nodes('SecondaryNameNode', [snn_host]) if oozie_host: client.services.oozie.add_nodes('Oozie', [oozie_host]) if hive_host: client.services.hive.add_nodes('HiveServer', [hive_host]) if rm_host: client.services.yarn.add_nodes('ResourceManager', [rm_host]) client.services.yarn.add_nodes('NodeManager', nm_hosts) if hs_host: client.services.yarn.add_nodes('HistoryServer', [hs_host])
def _set_cluster_info(self, cluster): nn = utils.get_namenode(cluster) rm = utils.get_resourcemanager(cluster) hs = utils.get_historyserver(cluster) oo = utils.get_oozie(cluster) info = {} if rm: info['YARN'] = { 'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'), 'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032') } if nn: info['HDFS'] = { 'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000') } if oo: info['JobFlow'] = { 'Oozie': 'http://%s:%s' % (oo.management_ip, '11000') } if hs: info['MapReduce JobHistory Server'] = { 'Web UI': 'http://%s:%s' % (hs.management_ip, '19888') } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info})
def start_cluster(self, cluster): nn = utils.get_namenode(cluster) run.format_namenode(nn) run.start_hadoop_process(nn, 'namenode') rm = utils.get_resourcemanager(cluster) run.start_yarn_process(rm, 'resourcemanager') for dn in utils.get_datanodes(cluster): run.start_hadoop_process(dn, 'datanode') run.await_datanodes(cluster) for nm in utils.get_nodemanagers(cluster): run.start_yarn_process(nm, 'nodemanager') hs = utils.get_historyserver(cluster) if hs: run.start_historyserver(hs) oo = utils.get_oozie(cluster) if oo: run.start_oozie_process(oo) self._set_cluster_info(cluster)