コード例 #1
0
ファイル: stop.py プロジェクト: codemeow5/deployment
def main(message):

	if message is None:
		return None
	desc = message.get('desc')
	if desc is None:
		return None
	id_ = message.get('id')
	if id_ is None:
		return None

	hosts = ssh.init(desc)

	master = ssh.filterName('hadoop_master', 'hostname')
	slave = ssh.filterName('hadoop_slave', 'hostname')

	ssh.cmd('sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs stop namenode', False, *master)
	ssh.cmd('sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs stop datanode', False, *slave)
	ssh.cmd('sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop stop resourcemanager', False, *master)
	ssh.cmd('sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop stop nodemanager', False, *slave)

	ssh.close()

	db.u_cluster_update_status(id_, 'S')
コード例 #2
0
ファイル: install.py プロジェクト: codemeow5/deployment
def main(message):

    if message is None:
        return None
    desc = message.get("desc")
    if desc is None:
        return None
    uid = message.get("uid")
    if uid is None:
        return None
    name = message.get("name")
    if name is None:
        return None

    id_ = db.u_cluster(uid, name, "HS", desc)

    jdk_local_path = tool.join(__file__, "jdk-8u45-linux-x64.tar.gz")
    jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz"
    hadoop_local_path = tool.join(__file__, "hadoop-2.6.0.tar.gz")
    hadoop_tmp_path = "/tmp/hadoop-2.6.0.tar.gz"
    spark_local_path = tool.join(__file__, "spark-1.3.0-bin-hadoop2.4.tgz")
    spark_tmp_path = "/tmp/spark-1.3.0-bin-hadoop2.4.tgz"

    hosts = ssh.init(desc)
    hostnames = []

    for host in hosts:
        hostnames.append(host.get("hostname", ""))

    master = ssh.filterName("hadoop_master", "hostname")
    slave = ssh.filterName("hadoop_slave", "hostname")

    m_ = master[0]
    s_ = ",".join(slave)

    ssh.upload(jdk_local_path, jdk_tmp_path)
    ssh.cmd("sudo tar zxvf %s -C /usr/local" % jdk_tmp_path)
    ssh.upload(hadoop_local_path, hadoop_tmp_path)
    ssh.cmd("sudo tar zxvf %s -C /usr/local" % hadoop_tmp_path)
    ssh.upload(tool.join(__file__, "hadoop-install.sh"), "/tmp/hadoop-install.sh")
    ssh.cmd("sudo chmod u+x /tmp/hadoop-install.sh")
    ssh.cmd("sudo /tmp/hadoop-install.sh -m %s -s %s -t 0" % (m_, s_), False, *master)
    ssh.cmd("sudo /tmp/hadoop-install.sh -m %s -s %s -t 1" % (m_, s_), False, *slave)

    ssh.cmd("sudo /usr/local/hadoop/bin/hdfs namenode -format hadoop", False, *master)
    ssh.cmd(
        "sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs start namenode",
        False,
        *master
    )
    ssh.cmd(
        "sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs start datanode",
        False,
        *slave
    )
    ssh.cmd(
        "sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop start resourcemanager",
        False,
        *master
    )
    ssh.cmd(
        "sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop start nodemanager",
        False,
        *slave
    )

    spark_client = ssh.filterName("spark_client", "hostname")

    ssh.upload(spark_local_path, spark_tmp_path, False, *spark_client)
    ssh.cmd("sudo tar zxvf %s -C /usr/local" % spark_tmp_path, False, *spark_client)
    ssh.upload(tool.join(__file__, "spark-install.sh"), "/tmp/spark-install.sh", False, *spark_client)
    ssh.cmd("sudo chmod u+x /tmp/spark-install.sh", False, *spark_client)
    ssh.cmd("sudo /tmp/spark-install.sh", False, *spark_client)

    ssh.close()

    db.u_cluster_update_status(id_, "R")