def main(message): desc = message.get('desc') if desc is None: return None hosts = ssh.init(desc) ssh.upload(tool.join(__file__, 'ipconfig-clean.sh'), '/tmp/ipconfig-clean.sh') ssh.cmd('sudo chmod u+x /tmp/ipconfig-clean.sh') ssh.cmd('sudo /tmp/ipconfig-clean.sh')
def main(message): desc = message.get('desc') if desc is None: return None hosts = ssh.init(desc) maps = [] for host in hosts: maps.append(host.get('ipaddr', '')) maps.append(host.get('hostname', '')) s_ = ",".join(maps) ssh.upload(tool.join(__file__, 'ipconfig-install.sh'), '/tmp/ipconfig-install.sh') ssh.cmd('sudo chmod u+x /tmp/ipconfig-install.sh') for host in hosts: h_ = host.get('hostname', '') ssh.cmd('sudo /tmp/ipconfig-install.sh -h %s -s %s' % (h_, s_), False, h_)
def main(message): if message is None: return None desc = message.get('desc') if desc is None: return None id_ = message.get('id') if id_ is None: return None hosts = ssh.init(desc) master = ssh.filterName('hadoop_master', 'hostname') slave = ssh.filterName('hadoop_slave', 'hostname') ssh.cmd('sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs stop namenode', False, *master) ssh.cmd('sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs stop datanode', False, *slave) ssh.cmd('sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop stop resourcemanager', False, *master) ssh.cmd('sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop stop nodemanager', False, *slave) ssh.close() db.u_cluster_update_status(id_, 'S')
#!/usr/bin/python import sys sys.path.append('..') from common import ssh jdk_local_path = "./jdk-8u45-linux-x64.tar.gz" jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz" hadoop_local_path = "./hadoop-2.6.0.tar.gz" hadoop_tmp_path = "/tmp/hadoop-2.6.0.tar.gz" if __name__ == '__main__': hosts = ssh.init() h_ = ",".join(hosts) ssh.upload(jdk_local_path, jdk_tmp_path) ssh.cmd('sudo tar zxvf %s -C /usr/local' % jdk_tmp_path) ssh.upload(hadoop_local_path, hadoop_tmp_path) ssh.cmd('sudo tar zxvf %s -C /usr/local' % hadoop_tmp_path) ssh.upload('hadoop-install.sh', '/tmp/hadoop-install.sh') ssh.cmd('sudo chmod u+x /tmp/hadoop-install.sh') ssh.cmd('sudo /tmp/hadoop-install.sh -h %s -t 0' % h_, False, hosts[0]) ssh.cmd('sudo /tmp/hadoop-install.sh -h %s -t 1' % h_, False, *hosts[1:])
def main(message): if message is None: return None desc = message.get('desc') if desc is None: return None id_ = message.get('id') if id_ is None: return None hosts = ssh.init(desc) spark_client = ssh.filterName('spark_client', 'hostname') ssh.cmd('sudo rm -rf /usr/local/jdk*') ssh.cmd('sudo rm -rf /usr/local/hadoop*') ssh.cmd('sudo rm -rf /usr/local/spark*', False, *spark_client) ssh.cmd('sudo rm -rf /data') ssh.upload(tool.join(__file__, 'hadoop-clean.sh'), '/tmp/hadoop-clean.sh') ssh.cmd('sudo chmod u+x /tmp/hadoop-clean.sh') ssh.cmd('sudo /tmp/hadoop-clean.sh') ssh.close() db.u_cluster_remove(id_)
def main(message): if message is None: return None desc = message.get("desc") if desc is None: return None uid = message.get("uid") if uid is None: return None name = message.get("name") if name is None: return None id_ = db.u_cluster(uid, name, "HS", desc) jdk_local_path = tool.join(__file__, "jdk-8u45-linux-x64.tar.gz") jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz" hadoop_local_path = tool.join(__file__, "hadoop-2.6.0.tar.gz") hadoop_tmp_path = "/tmp/hadoop-2.6.0.tar.gz" spark_local_path = tool.join(__file__, "spark-1.3.0-bin-hadoop2.4.tgz") spark_tmp_path = "/tmp/spark-1.3.0-bin-hadoop2.4.tgz" hosts = ssh.init(desc) hostnames = [] for host in hosts: hostnames.append(host.get("hostname", "")) master = ssh.filterName("hadoop_master", "hostname") slave = ssh.filterName("hadoop_slave", "hostname") m_ = master[0] s_ = ",".join(slave) ssh.upload(jdk_local_path, jdk_tmp_path) ssh.cmd("sudo tar zxvf %s -C /usr/local" % jdk_tmp_path) ssh.upload(hadoop_local_path, hadoop_tmp_path) ssh.cmd("sudo tar zxvf %s -C /usr/local" % hadoop_tmp_path) ssh.upload(tool.join(__file__, "hadoop-install.sh"), "/tmp/hadoop-install.sh") ssh.cmd("sudo chmod u+x /tmp/hadoop-install.sh") ssh.cmd("sudo /tmp/hadoop-install.sh -m %s -s %s -t 0" % (m_, s_), False, *master) ssh.cmd("sudo /tmp/hadoop-install.sh -m %s -s %s -t 1" % (m_, s_), False, *slave) ssh.cmd("sudo /usr/local/hadoop/bin/hdfs namenode -format hadoop", False, *master) ssh.cmd( "sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs start namenode", False, *master ) ssh.cmd( "sudo /usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs start datanode", False, *slave ) ssh.cmd( "sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop start resourcemanager", False, *master ) ssh.cmd( "sudo /usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop start nodemanager", False, *slave ) spark_client = ssh.filterName("spark_client", "hostname") ssh.upload(spark_local_path, spark_tmp_path, False, *spark_client) ssh.cmd("sudo tar zxvf %s -C /usr/local" % spark_tmp_path, False, *spark_client) ssh.upload(tool.join(__file__, "spark-install.sh"), "/tmp/spark-install.sh", False, *spark_client) ssh.cmd("sudo chmod u+x /tmp/spark-install.sh", False, *spark_client) ssh.cmd("sudo /tmp/spark-install.sh", False, *spark_client) ssh.close() db.u_cluster_update_status(id_, "R")