def start_cluster(self, cluster): nn_instance = utils.get_namenode(cluster) datanodes = utils.get_datanodes(cluster) jt_instance = utils.get_jobtracker(cluster) tasktrackers = utils.get_tasktrackers(cluster) oozie = utils.get_oozie(cluster) with remote.get_remote(nn_instance) as r: run.format_namenode(r) run.start_process(r, "namenode") snns = utils.get_secondarynamenodes(cluster) if snns: for snn in snns: run.start_process(remote.get_remote(snn), "secondarynamenode") for dn in datanodes: run.start_process(remote.get_remote(dn), "datanode") LOG.info("HDFS service at '%s' has been started", nn_instance.hostname) if jt_instance: run.start_process(remote.get_remote(jt_instance), "jobtracker") for tt in tasktrackers: run.start_process(remote.get_remote(tt), "tasktracker") LOG.info("MapReduce service at '%s' has been started", jt_instance.hostname) if oozie: with remote.get_remote(oozie) as r: run.oozie_share_lib(r, nn_instance.hostname) run.start_oozie(r) LOG.info("Oozie service at '%s' has been started", nn_instance.hostname) LOG.info('Cluster %s has been started successfully' % cluster.name) self._set_cluster_info(cluster)
def start_cluster(self, cluster): nn_instance = utils.get_namenode(cluster) datanodes = utils.get_datanodes(cluster) jt_instance = utils.get_jobtracker(cluster) tasktrackers = utils.get_tasktrackers(cluster) oozie = utils.get_oozie(cluster) hive_server = utils.get_hiveserver(cluster) with remote.get_remote(nn_instance) as r: run.format_namenode(r) run.start_process(r, "namenode") snns = utils.get_secondarynamenodes(cluster) if snns: for snn in snns: run.start_process(remote.get_remote(snn), "secondarynamenode") for dn in datanodes: run.start_process(remote.get_remote(dn), "datanode") LOG.info("HDFS service at '%s' has been started", nn_instance.hostname) if jt_instance: run.start_process(remote.get_remote(jt_instance), "jobtracker") for tt in tasktrackers: run.start_process(remote.get_remote(tt), "tasktracker") LOG.info("MapReduce service at '%s' has been started", jt_instance.hostname) if oozie: with remote.get_remote(oozie) as r: if c_helper.is_mysql_enable(cluster): run.mysql_start(r, oozie) run.oozie_create_db(r) run.oozie_share_lib(r, nn_instance.hostname) run.start_oozie(r) LOG.info("Oozie service at '%s' has been started", nn_instance.hostname) if hive_server: with remote.get_remote(nn_instance) as r: run.hive_create_warehouse_dir(r) if c_helper.is_mysql_enable(cluster): with remote.get_remote(hive_server) as h: if not oozie or hive_server.hostname != oozie.hostname: run.mysql_start(h, hive_server) run.hive_create_db(h) run.hive_metastore_start(h) LOG.info("Hive Metastore server at %s has been started", hive_server.hostname) LOG.info('Cluster %s has been started successfully' % cluster.name) self._set_cluster_info(cluster)
def start_cluster(self, cluster): instances = utils.get_instances(cluster) nn_instance = utils.get_namenode(cluster) jt_instance = utils.get_jobtracker(cluster) oozie = utils.get_oozie(cluster) hive_server = utils.get_hiveserver(cluster) with remote.get_remote(nn_instance) as r: run.format_namenode(r) run.start_processes(r, "namenode") for snn in utils.get_secondarynamenodes(cluster): run.start_processes(remote.get_remote(snn), "secondarynamenode") if jt_instance: run.start_processes(remote.get_remote(jt_instance), "jobtracker") self._start_tt_dn_processes(instances) LOG.info("Hadoop services in cluster %s have been started" % cluster.name) if oozie: with remote.get_remote(oozie) as r: if c_helper.is_mysql_enable(cluster): run.mysql_start(r, oozie) run.oozie_create_db(r) run.oozie_share_lib(r, nn_instance.hostname) run.start_oozie(r) LOG.info("Oozie service at '%s' has been started", nn_instance.hostname) if hive_server: with remote.get_remote(nn_instance) as r: run.hive_create_warehouse_dir(r) if c_helper.is_mysql_enable(cluster): with remote.get_remote(hive_server) as h: if not oozie or hive_server.hostname != oozie.hostname: run.mysql_start(h, hive_server) run.hive_create_db(h) run.hive_metastore_start(h) LOG.info("Hive Metastore server at %s has been started", hive_server.hostname) LOG.info('Cluster %s has been started successfully' % cluster.name) self._set_cluster_info(cluster)
def start_cluster(self, cluster): nn_instance = utils.get_namenode(cluster) with remote.get_remote(nn_instance) as r: run.format_namenode(r) run.start_processes(r, "namenode") for snn in utils.get_secondarynamenodes(cluster): run.start_processes(remote.get_remote(snn), "secondarynamenode") jt_instance = utils.get_jobtracker(cluster) if jt_instance: run.start_processes(remote.get_remote(jt_instance), "jobtracker") self._start_tt_dn_processes(utils.get_instances(cluster)) self._await_datanodes(cluster) LOG.info("Hadoop services in cluster %s have been started" % cluster.name) oozie = utils.get_oozie(cluster) if oozie: with remote.get_remote(oozie) as r: if c_helper.is_mysql_enable(cluster): run.mysql_start(r, oozie) run.oozie_create_db(r) run.oozie_share_lib(r, nn_instance.hostname()) run.start_oozie(r) LOG.info("Oozie service at '%s' has been started", nn_instance.hostname()) hive_server = utils.get_hiveserver(cluster) if hive_server: with remote.get_remote(nn_instance) as r: run.hive_create_warehouse_dir(r) if c_helper.is_mysql_enable(cluster): with remote.get_remote(hive_server) as h: if not oozie or hive_server.hostname() != oozie.hostname(): run.mysql_start(h, hive_server) run.hive_create_db(h) run.hive_metastore_start(h) LOG.info("Hive Metastore server at %s has been started", hive_server.hostname()) LOG.info('Cluster %s has been started successfully' % cluster.name) self._set_cluster_info(cluster)
def _configure_services(client, cluster): nn_host = u.get_namenode(cluster).fqdn() snn = u.get_secondarynamenodes(cluster) snn_host = snn[0].fqdn() if snn else None jt_host = u.get_jobtracker(cluster).fqdn() if u.get_jobtracker( cluster) else None dn_hosts = [dn.fqdn() for dn in u.get_datanodes(cluster)] tt_hosts = [tt.fqdn() for tt in u.get_tasktrackers(cluster)] oozie_host = u.get_oozie(cluster).fqdn() if u.get_oozie(cluster) else None hive_host = u.get_hiveserver(cluster).fqdn() if u.get_hiveserver( cluster) else None services = [] if u.get_namenode(cluster): services += ['hdfs'] if u.get_jobtracker(cluster): services += ['mapred'] if oozie_host: services += ['oozie'] services += ['pig'] if hive_host: services += ['hive'] LOG.debug("Add services: %s" % ', '.join(services)) client.services.add(services) LOG.debug("Assign roles to hosts") client.services.hdfs.add_nodes('PrimaryNameNode', [nn_host]) client.services.hdfs.add_nodes('DataNode', dn_hosts) if snn: client.services.hdfs.add_nodes('SecondaryNameNode', [snn_host]) if oozie_host: client.services.oozie.add_nodes('Oozie', [oozie_host]) if hive_host: client.services.hive.add_nodes('HiveServer', [hive_host]) if jt_host: client.services.mapred.add_nodes('JobTracker', [jt_host]) client.services.mapred.add_nodes('TaskTracker', tt_hosts)
def _configure_services(client, cluster): nn_host = u.get_namenode(cluster).fqdn() snn = u.get_secondarynamenodes(cluster) snn_host = snn[0].fqdn() if snn else None jt_host = u.get_jobtracker(cluster).fqdn() dn_hosts = [dn.fqdn() for dn in u.get_datanodes(cluster)] tt_hosts = [tt.fqdn() for tt in u.get_tasktrackers(cluster)] oozie_host = u.get_oozie(cluster).fqdn() if u.get_oozie( cluster) else None hive_host = u.get_hiveserver(cluster).fqdn() if u.get_hiveserver( cluster) else None services = [] if u.get_namenode(cluster): services += ['hdfs'] if u.get_jobtracker(cluster): services += ['mapred'] if oozie_host: services += ['oozie'] services += ['pig'] if hive_host: services += ['hive'] LOG.debug("Add services: %s" % ', '.join(services)) client.services.add(services) LOG.debug("Assign roles to hosts") client.services.hdfs.add_nodes('PrimaryNameNode', [nn_host]) client.services.hdfs.add_nodes('DataNode', dn_hosts) if snn: client.services.hdfs.add_nodes('SecondaryNameNode', [snn_host]) if oozie_host: client.services.oozie.add_nodes('Oozie', [oozie_host]) if hive_host: client.services.hive.add_nodes('HiveServer', [hive_host]) client.services.mapred.add_nodes('JobTracker', [jt_host]) client.services.mapred.add_nodes('TaskTracker', tt_hosts)