Example #1
0
    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        sm_instance = utils.get_instance(cluster, "master")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        # start the data nodes
        self._start_slave_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
                              "/user/$USER/")

        # start spark nodes
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_spark_master(r, self._spark_home(cluster))
                LOG.info(_LI("Spark service at '%s' has been started"),
                         sm_instance.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
        self._set_cluster_info(cluster)
Example #2
0
    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        sm_instance = utils.get_instance(cluster, "master")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        # start the data nodes
        self._start_slave_datanode_processes(dn_instances)

        LOG.info("Hadoop services in cluster %s have been started" %
                 cluster.name)

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command(("sudo -u hdfs hdfs dfs -chown $USER "
                               "/user/$USER/"))

        # start spark nodes
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_spark_master(r, self._spark_home(cluster))
                LOG.info("Spark service at '%s' has been started",
                         sm_instance.hostname())

        LOG.info('Cluster %s has been started successfully' % cluster.name)
        self._set_cluster_info(cluster)
Example #3
0
    def scale_cluster(self, cluster, instances):
        master = utils.get_instance(cluster, "master")
        r_master = remote.get_remote(master)

        run.stop_spark(r_master, self._spark_home(cluster))

        self._setup_instances(cluster, instances)
        nn = utils.get_instance(cluster, "namenode")
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        self._start_slave_datanode_processes(instances)

        run.start_spark_master(r_master, self._spark_home(cluster))
        LOG.info(_LI("Spark master service at '%s' has been restarted"), master.hostname())
Example #4
0
    def scale_cluster(self, cluster, instances):
        master = utils.get_instance(cluster, "master")
        r_master = remote.get_remote(master)

        run.stop_spark(r_master, self._spark_home(cluster))

        self._setup_instances(cluster, instances)
        nn = utils.get_instance(cluster, "namenode")
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        self._start_slave_datanode_processes(instances)

        run.start_spark_master(r_master, self._spark_home(cluster))
        LOG.info(_LI("Spark master service at '%s' has been restarted"),
                 master.hostname())
Example #5
0
    def scale_cluster(self, cluster, instances):
        master = utils.get_instance(cluster, "master")
        r_master = remote.get_remote(master)

        run.stop_spark(r_master, self._spark_home(cluster))

        self._setup_instances(cluster, instances)
        nn = utils.get_instance(cluster, "namenode")
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        dn_instances = [instance for instance in instances if "datanode" in instance.node_group.node_processes]
        self._start_datanode_processes(dn_instances)

        run.start_spark_master(r_master, self._spark_home(cluster))
        LOG.info(_LI("Spark master service has been restarted"))
Example #6
0
    def scale_cluster(self, cluster, instances):
        master = utils.get_instance(cluster, "master")
        r_master = remote.get_remote(master)

        run.stop_spark(r_master, self._spark_home(cluster))

        self._setup_instances(cluster, instances)
        nn = utils.get_instance(cluster, "namenode")
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        dn_instances = [instance for instance in instances if
                        'datanode' in instance.node_group.node_processes]
        self._start_datanode_processes(dn_instances)

        swift_helper.install_ssl_certs(instances)
        run.start_spark_master(r_master, self._spark_home(cluster))
        LOG.info("Spark master service has been restarted")
Example #7
0
    def scale_cluster(self, cluster, instances):
        master = utils.get_instance(cluster, "master")
        r_master = remote.get_remote(master)

        run.stop_spark(r_master, self._spark_home(cluster))

        self._setup_instances(cluster, instances)
        nn = utils.get_instance(cluster, "namenode")
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        dn_instances = [
            instance for instance in instances
            if 'datanode' in instance.node_group.node_processes
        ]
        self._start_datanode_processes(dn_instances)

        run.start_spark_master(r_master, self._spark_home(cluster))
        LOG.info(
            _LI("Spark master service at {host} has been restarted").format(
                host=master.hostname()))
Example #8
0
def decommission_sl(master, inst_to_be_deleted, survived_inst):
    if survived_inst is not None:
        slavenames = []
        for slave in survived_inst:
            slavenames.append(slave.hostname())
        slaves_content = c_helper.generate_spark_slaves_configs(slavenames)
    else:
        slaves_content = "\n"

    r_master = remote.get_remote(master)
    run.stop_spark(r_master)

    # write new slave file to master
    files = {'/opt/spark/conf/slaves': slaves_content}
    r_master.write_files_to(files)

    # write new slaves file to each survived slave as well
    for i in survived_inst:
        with remote.get_remote(i) as r:
            r.write_files_to(files)

    run.start_spark_master(r_master)
Example #9
0
def decommission_sl(master, inst_to_be_deleted, survived_inst):
    if survived_inst is not None:
        slavenames = []
        for slave in survived_inst:
            slavenames.append(slave.hostname())
        slaves_content = c_helper.generate_spark_slaves_configs(slavenames)
    else:
        slaves_content = "\n"

    cluster = master.cluster
    sp_home = c_helper.get_config_value("Spark", "Spark home", cluster)
    r_master = remote.get_remote(master)
    run.stop_spark(r_master, sp_home)

    # write new slave file to master
    files = {os.path.join(sp_home, 'conf/slaves'): slaves_content}
    r_master.write_files_to(files)

    # write new slaves file to each survived slave as well
    for i in survived_inst:
        with remote.get_remote(i) as r:
            r.write_files_to(files)

    run.start_spark_master(r_master, sp_home)
Example #10
0
 def _start_spark(self, cluster, sm_instance):
     with remote.get_remote(sm_instance) as r:
         run.start_spark_master(r, self._spark_home(cluster))
         LOG.info(_LI("Spark service has been started"))
Example #11
0
 def _start_spark(self, cluster, sm_instance):
     with remote.get_remote(sm_instance) as r:
         run.start_spark_master(r, self._spark_home(cluster))
         LOG.info(
             _LI("Spark service at {host} has been started").format(
                 host=sm_instance.hostname()))
Example #12
0
 def _start_spark(self, cluster, sm_instance):
     with remote.get_remote(sm_instance) as r:
         run.start_spark_master(r, self._spark_home(cluster))
         LOG.info(_LI("Spark service has been started"))
Example #13
0
 def _start_spark(self, cluster, sm_instance):
     with remote.get_remote(sm_instance) as r:
         run.start_spark_master(r, self._spark_home(cluster))
         LOG.info(_LI("Spark service at {host} has been started").format(
                  host=sm_instance.hostname()))