def scale_cluster(self, cluster, instances): master = utils.get_instance(cluster, "master") r_master = remote.get_remote(master) run.stop_spark(r_master, self._spark_home(cluster)) self._setup_instances(cluster, instances) nn = utils.get_instance(cluster, "namenode") run.refresh_nodes(remote.get_remote(nn), "dfsadmin") self._start_slave_datanode_processes(instances) run.start_spark_master(r_master, self._spark_home(cluster)) LOG.info(_LI("Spark master service at '%s' has been restarted"), master.hostname())
def scale_cluster(self, cluster, instances): master = utils.get_instance(cluster, "master") r_master = remote.get_remote(master) run.stop_spark(r_master, self._spark_home(cluster)) self._setup_instances(cluster, instances) nn = utils.get_instance(cluster, "namenode") run.refresh_nodes(remote.get_remote(nn), "dfsadmin") dn_instances = [instance for instance in instances if "datanode" in instance.node_group.node_processes] self._start_datanode_processes(dn_instances) run.start_spark_master(r_master, self._spark_home(cluster)) LOG.info(_LI("Spark master service has been restarted"))
def scale_cluster(self, cluster, instances): master = utils.get_instance(cluster, "master") r_master = remote.get_remote(master) run.stop_spark(r_master, self._spark_home(cluster)) self._setup_instances(cluster, instances) nn = utils.get_instance(cluster, "namenode") run.refresh_nodes(remote.get_remote(nn), "dfsadmin") dn_instances = [instance for instance in instances if 'datanode' in instance.node_group.node_processes] self._start_datanode_processes(dn_instances) swift_helper.install_ssl_certs(instances) run.start_spark_master(r_master, self._spark_home(cluster)) LOG.info("Spark master service has been restarted")
def scale_cluster(self, cluster, instances): master = utils.get_instance(cluster, "master") r_master = remote.get_remote(master) run.stop_spark(r_master, self._spark_home(cluster)) self._setup_instances(cluster, instances) nn = utils.get_instance(cluster, "namenode") run.refresh_nodes(remote.get_remote(nn), "dfsadmin") dn_instances = [ instance for instance in instances if 'datanode' in instance.node_group.node_processes ] self._start_datanode_processes(dn_instances) run.start_spark_master(r_master, self._spark_home(cluster)) LOG.info( _LI("Spark master service at {host} has been restarted").format( host=master.hostname()))
def decommission_sl(master, inst_to_be_deleted, survived_inst): if survived_inst is not None: slavenames = [] for slave in survived_inst: slavenames.append(slave.hostname()) slaves_content = c_helper.generate_spark_slaves_configs(slavenames) else: slaves_content = "\n" r_master = remote.get_remote(master) run.stop_spark(r_master) # write new slave file to master files = {'/opt/spark/conf/slaves': slaves_content} r_master.write_files_to(files) # write new slaves file to each survived slave as well for i in survived_inst: with remote.get_remote(i) as r: r.write_files_to(files) run.start_spark_master(r_master)
def decommission_sl(master, inst_to_be_deleted, survived_inst): if survived_inst is not None: slavenames = [] for slave in survived_inst: slavenames.append(slave.hostname()) slaves_content = c_helper.generate_spark_slaves_configs(slavenames) else: slaves_content = "\n" cluster = master.cluster sp_home = c_helper.get_config_value("Spark", "Spark home", cluster) r_master = remote.get_remote(master) run.stop_spark(r_master, sp_home) # write new slave file to master files = {os.path.join(sp_home, 'conf/slaves'): slaves_content} r_master.write_files_to(files) # write new slaves file to each survived slave as well for i in survived_inst: with remote.get_remote(i) as r: r.write_files_to(files) run.start_spark_master(r_master, sp_home)