Esempio n. 1
0
def setup_cluster(masters, slaves, opts, deploy_ssh_key):
    master = masters[0]
    if deploy_ssh_key:
        print "==> Generating cluster's SSH key on master..."
        key_setup = """
          [ -f ~/.ssh/id_rsa ] ||
            (ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
             cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
        """
        utils.do_ssh(master, opts, key_setup)
        dot_ssh_tar = utils.ssh_read(master, opts, ["tar", "c", ".ssh"])
        print "==> Transferring cluster's SSH key to slaves..."
        for slave in slaves:
            utils.ssh_write(slave, opts, ["tar", "x"], dot_ssh_tar)

    print "==> Updating /etc/hosts for each ECS instance..."
    utils.prepare_hosts(master, slaves, opts)

    print "==> Updating Spark default configuration..."
    # copy default hadoop config
    os.system(" /bin/cp -r %s/spark/conf/* %s" % (GlobalVar.DEFAULT_CONF_DIR, GlobalVar.SPARK_CONF_DIR))
    utils.do_scp(masters[0], opts, GlobalVar.SPARK_CONF_DIR, GlobalVar.SPARK_INSTALL_DIR)
    for slave in slaves:
        utils.do_scp(slave, opts, GlobalVar.SPARK_CONF_DIR, GlobalVar.SPARK_INSTALL_DIR)

    print "==> Starting spark cluster..."
    start_spark_cluster(master, slaves, opts)
Esempio n. 2
0
def setup_cluster(masters, slaves, opts, deploy_ssh_key):
    master = masters[0]
    if deploy_ssh_key:
        print "==> Generating cluster's SSH key on master..."
        key_setup = """
          [ -f ~/.ssh/id_rsa ] ||
            (ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
             cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
        """
        utils.do_ssh(master, opts, key_setup)
        dot_ssh_tar = utils.ssh_read(master, opts, ['tar', 'c', '.ssh'])
        print "==> Transferring cluster's SSH key to slaves..."
        for slave in slaves:
            utils.ssh_write(slave, opts, ['tar', 'x'], dot_ssh_tar)

    print "==> Updating /etc/hosts for each ECS instance..."
    utils.prepare_hosts(master, slaves, opts)

    print "==> Updating Spark default configuration..."
    # copy default hadoop config
    os.system(" /bin/cp -r %s/spark/conf/* %s" %
              (GlobalVar.DEFAULT_CONF_DIR, GlobalVar.SPARK_CONF_DIR))
    utils.do_scp(masters[0], opts, GlobalVar.SPARK_CONF_DIR,
                 GlobalVar.SPARK_INSTALL_DIR)
    for slave in slaves:
        utils.do_scp(slave, opts, GlobalVar.SPARK_CONF_DIR,
                     GlobalVar.SPARK_INSTALL_DIR)

    print "==> Starting spark cluster..."
    start_spark_cluster(master, slaves, opts)
Esempio n. 3
0
def setup_hdfs(masters, slaves, opts):
    print "==> Updating Hadoop configuration for each ECS instance..."
    # copy default hadoop config
    os.system(" /bin/cp -r %s/hadoop/etc/hadoop/* %s/etc/hadoop/"
              % (GlobalVar.DEFAULT_CONF_DIR, GlobalVar.HADOOP_INSTALL_DIR))

    master_intranet_ip = ecs.get_instance_info(masters[0])['InnerIpAddress']['IpAddress'][0]
    namenode = "hdfs://%s:9000" % master_intranet_ip
    utils.update_hadoop_configuration(namenode)
    utils.do_scp(masters[0], opts, GlobalVar.HADOOP_CONF_DIR, "%s/etc/" % GlobalVar.HADOOP_INSTALL_DIR)
    for slave in slaves:
        utils.do_scp(slave, opts, GlobalVar.HADOOP_CONF_DIR, "%s/etc/" % GlobalVar.HADOOP_INSTALL_DIR)

    print "==> Starting HDFS service..."
    start_hdfs(masters[0], slaves, opts)
    print "==> Started HDFS service successfully"