Exemplo n.º 1
0
def runCassandra(status='stop'):
    with settings(warn_only=True):
        fabfile.__normalUser()
        cassandra_path = os.path.join(
            '/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        print cassandra_path
        if (status == 'start'):
            if (not (env.host in fabfile.cf.get(fabfile.activeSession,
                                                'admin_ip'))
                    and not (env.host in fabfile.cf.get(
                        'cassandra', 'one_seed_ip'))):
                out = run(
                    'nohup ' +
                    os.path.join(cassandra_path,
                                 'bin/cassandra -p cassandraPID && sleep 5') +
                    ' ',
                    pty=True,
                    combine_stderr=True)
                print out
        elif (status == 'stop'):
            if (not (env.host in fabfile.cf.get(fabfile.activeSession,
                                                'admin_ip'))):
                pid = run('cat ' +
                          os.path.join('/home', env.user, 'cassandraPID'))
                run("kill -9 " + pid)
                run('rm ' + os.path.join('/home', env.user, 'cassandraPID'))
        else:
            print 'unknow command ' + status + ", only support start or stop"
Exemplo n.º 2
0
def runOneCassandraSeed():
    if (env.host in fabfile.cf.get('cassandra','one_seed_ip')):
        fabfile.__normalUser()
        cassandra_path=os.path.join('/home',env.user,fabfile.cf.get('cassandra','cassandra_folder'))
        print cassandra_path
        out=run('nohup ' + os.path.join(cassandra_path,'bin/cassandra -p cassandraPID')+' ',pty=True, combine_stderr=True)
        print out
Exemplo n.º 3
0
def modifyCassandra():
    if (not (env.host in fabfile.cf.get(fabfile.activeSession, 'admin_ip'))):
        fabfile.__normalUser()
        cassandra_path = os.path.join(
            '/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        yaml = os.path.join(
            os.path.split(env.real_fabfile)[0], 'files/cassandra.yaml')
        put(yaml, os.path.join(cassandra_path, 'conf/cassandra.yaml'))
Exemplo n.º 4
0
def formatHadoop():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        cf=fabfile.cf
        special = cf.get('hadoop', 'master_public_ip')
        hadoop_folder=os.path.join('/home',env.user,cf.get('hadoop','hadoop_folder'))
        with cd(hadoop_folder):
            if(env.host==special):
                run('bin/hdfs namenode -format '+ cf.get("hadoop",'format_cluster_name'))
Exemplo n.º 5
0
def showCassandra():
    if (env.host in fabfile.cf.get('cassandra', 'one_seed_ip')):
        fabfile.__normalUser()
        cassandra_path = os.path.join(
            '/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        out = run(os.path.join(cassandra_path, 'bin/nodetool  status'),
                  pty=True,
                  combine_stderr=True)
        print out
Exemplo n.º 6
0
def stopSpark2_4_0():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        cf=fabfile.cf
        special = cf.get('spark', 'master_public_ip')
        spark_path = os.path.join('/home', env.user, cf.get('spark', 'spark_folder'))
        with cd(spark_path):
            if(env.host==special):
                run('sbin/stop-all.sh')
Exemplo n.º 7
0
def stopSpark2_4_0():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        cf = fabfile.cf
        special = cf.get('spark', 'master_public_ip')
        spark_path = os.path.join('/home', env.user,
                                  cf.get('spark', 'spark_folder'))
        with cd(spark_path):
            if (env.host == special):
                run('sbin/stop-all.sh')
Exemplo n.º 8
0
def stopHadoop():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        cf=fabfile.cf
        special = cf.get('hadoop', 'master_public_ip')
        hadoop_folder=os.path.join('/home',env.user,cf.get('hadoop','hadoop_folder'))
        with cd(hadoop_folder):
            if(env.host==special):
                run('sbin/stop-all.sh')
                run('sbin/mr-jobhistory-daemon.sh stop historyserver')
Exemplo n.º 9
0
def stopHadoop():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        cf = fabfile.cf
        special = cf.get('hadoop', 'master_public_ip')
        hadoop_folder = os.path.join('/home', env.user,
                                     cf.get('hadoop', 'hadoop_folder'))
        with cd(hadoop_folder):
            if (env.host == special):
                run('sbin/stop-all.sh')
                run('sbin/mr-jobhistory-daemon.sh stop historyserver')
Exemplo n.º 10
0
def formatHadoop():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        cf = fabfile.cf
        special = cf.get('hadoop', 'master_public_ip')
        hadoop_folder = os.path.join('/home', env.user,
                                     cf.get('hadoop', 'hadoop_folder'))
        with cd(hadoop_folder):
            if (env.host == special):
                run('bin/hdfs namenode -format ' +
                    cf.get("hadoop", 'format_cluster_name'))
Exemplo n.º 11
0
def startSpark2_4_0():
    with settings(prompts={
        'Are you sure you want to continue connecting (yes/no)?':'yes'
    }):
        if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
            fabfile.__normalUser()
            cf=fabfile.cf
            special = cf.get('spark', 'master_public_ip')
            spark_path = os.path.join('/home', env.user, cf.get('spark', 'spark_folder'))
            with cd(spark_path):
                if(env.host==special):
                    run('sbin/start-all.sh')
Exemplo n.º 12
0
def runOneCassandraSeed():
    if (env.host in fabfile.cf.get('cassandra', 'one_seed_ip')):
        fabfile.__normalUser()
        cassandra_path = os.path.join(
            '/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        print cassandra_path
        out = run(
            'nohup ' +
            os.path.join(cassandra_path, 'bin/cassandra -p cassandraPID') +
            ' ',
            pty=True,
            combine_stderr=True)
        print out
Exemplo n.º 13
0
def startHadoop():
    with settings(prompts={
        'Are you sure you want to continue connecting (yes/no)?':'yes'
    }):
        if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
            fabfile.__normalUser()
            cf=fabfile.cf
            special = cf.get('hadoop', 'master_public_ip')
            hadoop_folder=os.path.join('/home',env.user,cf.get('hadoop','hadoop_folder'))
            with cd(hadoop_folder):
                if(env.host==special):
                    run('sbin/start-all.sh')
                    run('sbin/mr-jobhistory-daemon.sh start historyserver')
Exemplo n.º 14
0
def distributeSpark2_4_0():
    if (not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts:
        fabfile.__normalUser()
        print 'will create a temp file /home/username/fabric-spark.tar'
        cf = fabfile.cf
        spark_path = os.path.join('/home', env.user, cf.get('spark', 'spark_folder'))
        spark_config_folder = os.path.join('/home', env.user, cf.get('spark', 'spark_folder'), 'conf')

        put(os.path.join(os.path.split(env.real_fabfile)[0], cf.get('spark', 'spark_file')), os.path.join('/home', env.user, 'fabric-spark.tar'))
        run('tar -xf ' + os.path.join('/home', env.user, 'fabric-spark.tar'))

        if not remoteFileExist(cf.get('spark', 'spark_work')):
            run('mkdir -p ' + cf.get('spark', 'spark_work'))
        with cd(spark_config_folder):
            print '清空slaves...'
            put(os.path.join(os.path.split(env.real_fabfile)[0], 'files/spark/slaves'), spark_config_folder)
            # run("cp slaves.template > slaves")
            run("cat /dev/null > slaves")

            print '填写slaves...'
            # special = cf.get('spark', 'master_ip')
            for node in cf.get('spark', 'slaves').split(","):
                    run("echo " + node + ">> slaves")

            print '填写spark-env.sh'
            put(os.path.join(os.path.split(env.real_fabfile)[0], 'files/spark/spark-env.sh'), spark_config_folder)
            # SPARK_MASTER_PORT
            run("echo 'SPARK_MASTER_PORT=7077' >> spark-env.sh")
            # SPARK_MASTER_HOST
            run("echo 'SPARK_MASTER_HOST=" + cf.get('spark', 'master_ip') + "' >> spark-env.sh")
            # SPARK_LOCAL_IP
            localippara1 = 'ifconfig -a|grep inet|grep -v inet6|grep -v 127|awk '
            localippara2 = '\'{print $2}\''
            localippara3 = '|tr -d '
            localippara4 = '\"addr:\"'
            localippara5 = '|sed -n '
            localippara6 = '\'1p\''
                # SparkLocalIp = run('ifconfig -a|grep inet|grep -v inet6|grep -v 127|awk '{print $2}'|tr -d "addr:"|sed -n '1p'')
            SparkLocalIp = localippara1 + localippara2 + localippara3 + localippara4 + localippara5 + localippara6
            out = run(SparkLocalIp)
            run("echo 'SPARK_LOCAL_IP=" + out + "' >> spark-env.sh")
            # SPARK_HOME
            run("echo 'SPARK_HOME=" + spark_path + "' >> spark-env.sh")
            # JAVA_HOME
            jdk_path=os.path.join('/home/', env.user, cf.get(fabfile.activeSession, 'jdk_folder'))
            run("echo 'JAVA_HOME=" + jdk_path + "' >> spark-env.sh")
            # SPARK_WORK_DIR
            run("echo 'SPARK_WORK_DIR=" + cf.get('spark', 'spark_work') +"' >> spark-env.sh")
            # SPARK_WORKER_OPTS
            spark_work_opts='"-Dspark.worker.cleanup.enabled=true -Dspark.worker.cleanup.interval=1800 -Dspark.worker.cleanup.appDataTtl=3600"'
            run("echo 'SPARK_WORKER_OPTS=" + spark_work_opts + "' >> spark-env.sh")
Exemplo n.º 15
0
def distributeCassandra():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        print 'will create a temp file /home/username/fabric-cassandra.tar.gz'
        cf=fabfile.cf

        put(os.path.join(os.path.split(env.real_fabfile)[0], cf.get('cassandra','cassandra_file')), os.path.join('/home',env.user,'fabric-cassandra.tar.gz'))
        run('tar -xzf '+ os.path.join('/home',env.user,'fabric-cassandra.tar.gz'))
        cassandra_path=os.path.join('/home',env.user,cf.get('cassandra','cassandra_folder'))
        yaml=os.path.join(os.path.split(env.real_fabfile)[0], 'files/cassandra.yaml')
        logback=os.path.join(os.path.split(env.real_fabfile)[0], 'files/cassandra-logback.xml')
        put(yaml,os.path.join(cassandra_path,'conf/cassandra.yaml'))
        put(logback, os.path.join(cassandra_path, 'conf/logback.yaml'))
        run('mkdir '+ cf.get('cassandra','data_folder'))
Exemplo n.º 16
0
def startSpark2_4_0():
    with settings(
            prompts={
                'Are you sure you want to continue connecting (yes/no)?': 'yes'
            }):
        if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
            fabfile.__normalUser()
            cf = fabfile.cf
            special = cf.get('spark', 'master_public_ip')
            spark_path = os.path.join('/home', env.user,
                                      cf.get('spark', 'spark_folder'))
            with cd(spark_path):
                if (env.host == special):
                    run('sbin/start-all.sh')
Exemplo n.º 17
0
def startHadoop():
    with settings(
            prompts={
                'Are you sure you want to continue connecting (yes/no)?': 'yes'
            }):
        if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
            fabfile.__normalUser()
            cf = fabfile.cf
            special = cf.get('hadoop', 'master_public_ip')
            hadoop_folder = os.path.join('/home', env.user,
                                         cf.get('hadoop', 'hadoop_folder'))
            with cd(hadoop_folder):
                if (env.host == special):
                    run('sbin/start-all.sh')
                    run('sbin/mr-jobhistory-daemon.sh start historyserver')
Exemplo n.º 18
0
def runCassandra(status='stop'):
    with settings(warn_only=True):
        fabfile.__normalUser()
        cassandra_path = os.path.join('/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        print cassandra_path
        if (status == 'start'):
            if (not (env.host in fabfile.cf.get(fabfile.activeSession,'admin_ip')) and not (env.host in fabfile.cf.get('cassandra','one_seed_ip'))):
                out=run('nohup ' + os.path.join(cassandra_path,'bin/cassandra -p cassandraPID && sleep 5')+' ',pty=True, combine_stderr=True)
                print out
        elif(status=='stop'):
            if (not (env.host in fabfile.cf.get(fabfile.activeSession, 'admin_ip'))):
                pid=run('cat '+os.path.join('/home',env.user,'cassandraPID'))
                run("kill -9 "+pid)
                run('rm '+os.path.join('/home',env.user,'cassandraPID'))
        else:
            print 'unknow command '+ status+", only support start or stop"
Exemplo n.º 19
0
def distributeHadoop2_8_5():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        print 'will create a temp file /home/username/fabric-hadoop.tar.gz'
        cf=fabfile.cf
        put(os.path.join(os.path.split(env.real_fabfile)[0], cf.get('hadoop','hadoop_file')), os.path.join('/home',env.user,'fabric-hadoop.tar.gz'))
        run('tar -xzf '+ os.path.join('/home',env.user,'fabric-hadoop.tar.gz'))
        if not remoteFileExist(cf.get('hadoop','data_folder')):
            run('mkdir -p ' + cf.get('hadoop','data_folder'))
        hadoop_config_folder=os.path.join('/home',env.user,cf.get('hadoop','hadoop_folder'),'etc/hadoop')
        with cd(hadoop_config_folder):
            modifyJDK="sed -i 's/export JAVA_HOME=.*/export JAVA_HOME=" + os.path.join('/home/',env.user, cf.get(fabfile.activeSession,'jdk_folder')).replace("/","\\/")  + "/g' hadoop-env.sh"
            run(modifyJDK)
            print '清空slaves...'
            run("cat /dev/null > slaves")

            print '填写slaves...'
            special = cf.get('hadoop','master_ip')
            for node in cf.get('hadoop','slaves').split(","):
                #if(not (node == special and cf.get('hadoop','master_as_a_slave')=="0")):
                    run("echo "+ node + ">> slaves")

            print '填写mapred-site.xml'
            put(os.path.join(os.path.split(env.real_fabfile)[0], 'files/hadoop/mapred-site.xml'),hadoop_config_folder)
            modify="sed -i 's/<value>MASTERIP:10020.*/"+"<value>"+special+":10020<\\/value>"+"/g' mapred-site.xml"
            run(modify)
            modify = "sed -i 's/<value>MASTERIP:19888.*/" + "<value>" + special + ":19888<\\/value>" + "/g' mapred-site.xml"
            run(modify)

            print '填写core-site.xml'
            put(os.path.join(os.path.split(env.real_fabfile)[0], 'files/hadoop/core-site.xml'), hadoop_config_folder)
            modifyFSURL="sed -i 's/<value>.*/"+"<value>hdfs:\\/\\/"+special+":9000<\\/value>"+"/g' core-site.xml"
            run(modifyFSURL)

            print '填写yarn-site.xml'
            put(os.path.join(os.path.split(env.real_fabfile)[0], 'files/hadoop/yarn-site.xml'), hadoop_config_folder)
            modify = "sed -i 's/<value>MASTERIP:8031.*/" + "<value>" + special + ":8031<\\/value>" + "/g' yarn-site.xml"
            run(modify)
            modify = "sed -i 's/<value>MASTERIP:8032.*/" + "<value>" + special + ":8032<\\/value>" + "/g' yarn-site.xml"
            run(modify)
            modify = "sed -i 's/<value>MASTERIP:8030.*/" + "<value>" + special + ":8030<\\/value>" + "/g' yarn-site.xml"
            run(modify)

            print '填写hdfs-site.xml'
            put(os.path.join(os.path.split(env.real_fabfile)[0], 'files/hadoop/hdfs-site.xml'), hadoop_config_folder)
            modify="sed -i 's/<value>DATADIR.*/<value>" + cf.get('hadoop','data_folder').replace("/", "\\/") +"<\\/value>/g' hdfs-site.xml"
            run(modify)
Exemplo n.º 20
0
def distributeCassandra():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        print 'will create a temp file /home/username/fabric-cassandra.tar.gz'
        cf = fabfile.cf

        put(
            os.path.join(
                os.path.split(env.real_fabfile)[0],
                cf.get('cassandra', 'cassandra_file')),
            os.path.join('/home', env.user, 'fabric-cassandra.tar.gz'))
        run('tar -xzf ' +
            os.path.join('/home', env.user, 'fabric-cassandra.tar.gz'))
        cassandra_path = os.path.join('/home', env.user,
                                      cf.get('cassandra', 'cassandra_folder'))
        yaml = os.path.join(
            os.path.split(env.real_fabfile)[0], 'files/cassandra.yaml')
        logback = os.path.join(
            os.path.split(env.real_fabfile)[0], 'files/cassandra-logback.xml')
        put(yaml, os.path.join(cassandra_path, 'conf/cassandra.yaml'))
        put(logback, os.path.join(cassandra_path, 'conf/logback.yaml'))
        run('mkdir ' + cf.get('cassandra', 'data_folder'))
Exemplo n.º 21
0
def showCassandra():
    if (env.host in fabfile.cf.get('cassandra', 'one_seed_ip')):
        fabfile.__normalUser()
        cassandra_path = os.path.join('/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        out = run(os.path.join(cassandra_path, 'bin/nodetool  status'), pty=True, combine_stderr=True)
        print out
Exemplo n.º 22
0
def rmCassandraData(status='stop'):
    if (not (env.host in fabfile.cf.get(fabfile.activeSession,'admin_ip'))):
        fabfile.__normalUser()
        run('rm -rf '+fabfile.cf.get('cassandra','data_folder'))
        run('mkdir ' + fabfile.cf.get('cassandra', 'data_folder'))
Exemplo n.º 23
0
def distributeSpark2_4_0():
    if (not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts:
        fabfile.__normalUser()
        print 'will create a temp file /home/username/fabric-spark.tar'
        cf = fabfile.cf
        spark_path = os.path.join('/home', env.user,
                                  cf.get('spark', 'spark_folder'))
        spark_config_folder = os.path.join('/home', env.user,
                                           cf.get('spark', 'spark_folder'),
                                           'conf')

        put(
            os.path.join(
                os.path.split(env.real_fabfile)[0],
                cf.get('spark', 'spark_file')),
            os.path.join('/home', env.user, 'fabric-spark.tar'))
        run('tar -xf ' + os.path.join('/home', env.user, 'fabric-spark.tar'))

        if not remoteFileExist(cf.get('spark', 'spark_work')):
            run('mkdir -p ' + cf.get('spark', 'spark_work'))
        with cd(spark_config_folder):
            print '清空slaves...'
            put(
                os.path.join(
                    os.path.split(env.real_fabfile)[0], 'files/spark/slaves'),
                spark_config_folder)
            # run("cp slaves.template > slaves")
            run("cat /dev/null > slaves")

            print '填写slaves...'
            # special = cf.get('spark', 'master_ip')
            for node in cf.get('spark', 'slaves').split(","):
                run("echo " + node + ">> slaves")

            print '填写spark-env.sh'
            put(
                os.path.join(
                    os.path.split(env.real_fabfile)[0],
                    'files/spark/spark-env.sh'), spark_config_folder)
            # SPARK_MASTER_PORT
            run("echo 'SPARK_MASTER_PORT=7077' >> spark-env.sh")
            # SPARK_MASTER_HOST
            run("echo 'SPARK_MASTER_HOST=" + cf.get('spark', 'master_ip') +
                "' >> spark-env.sh")
            # SPARK_LOCAL_IP
            localippara1 = 'ifconfig -a|grep inet|grep -v inet6|grep -v 127|awk '
            localippara2 = '\'{print $2}\''
            localippara3 = '|tr -d '
            localippara4 = '\"addr:\"'
            localippara5 = '|sed -n '
            localippara6 = '\'1p\''
            # SparkLocalIp = run('ifconfig -a|grep inet|grep -v inet6|grep -v 127|awk '{print $2}'|tr -d "addr:"|sed -n '1p'')
            SparkLocalIp = localippara1 + localippara2 + localippara3 + localippara4 + localippara5 + localippara6
            out = run(SparkLocalIp)
            run("echo 'SPARK_LOCAL_IP=" + out + "' >> spark-env.sh")
            # SPARK_HOME
            run("echo 'SPARK_HOME=" + spark_path + "' >> spark-env.sh")
            # JAVA_HOME
            jdk_path = os.path.join(
                '/home/', env.user, cf.get(fabfile.activeSession,
                                           'jdk_folder'))
            run("echo 'JAVA_HOME=" + jdk_path + "' >> spark-env.sh")
            # SPARK_WORK_DIR
            run("echo 'SPARK_WORK_DIR=" + cf.get('spark', 'spark_work') +
                "' >> spark-env.sh")
            # SPARK_WORKER_OPTS
            spark_work_opts = '"-Dspark.worker.cleanup.enabled=true -Dspark.worker.cleanup.interval=1800 -Dspark.worker.cleanup.appDataTtl=3600"'
            run("echo 'SPARK_WORKER_OPTS=" + spark_work_opts +
                "' >> spark-env.sh")
Exemplo n.º 24
0
def modifyCassandra():
    if (not (env.host in fabfile.cf.get(fabfile.activeSession,'admin_ip'))):
        fabfile.__normalUser()
        cassandra_path = os.path.join('/home', env.user, fabfile.cf.get('cassandra', 'cassandra_folder'))
        yaml = os.path.join(os.path.split(env.real_fabfile)[0], 'files/cassandra.yaml')
        put(yaml, os.path.join(cassandra_path, 'conf/cassandra.yaml'))
Exemplo n.º 25
0
def rmCassandraData(status='stop'):
    if (not (env.host in fabfile.cf.get(fabfile.activeSession, 'admin_ip'))):
        fabfile.__normalUser()
        run('rm -rf ' + fabfile.cf.get('cassandra', 'data_folder'))
        run('mkdir ' + fabfile.cf.get('cassandra', 'data_folder'))
Exemplo n.º 26
0
def distributeHadoop2_8_5():
    if ((not fabfile.myenv.append) or env.host in fabfile.myenv.new_hosts):
        fabfile.__normalUser()
        print 'will create a temp file /home/username/fabric-hadoop.tar.gz'
        cf = fabfile.cf
        put(
            os.path.join(
                os.path.split(env.real_fabfile)[0],
                cf.get('hadoop', 'hadoop_file')),
            os.path.join('/home', env.user, 'fabric-hadoop.tar.gz'))
        run('tar -xzf ' +
            os.path.join('/home', env.user, 'fabric-hadoop.tar.gz'))
        if not remoteFileExist(cf.get('hadoop', 'data_folder')):
            run('mkdir -p ' + cf.get('hadoop', 'data_folder'))
        hadoop_config_folder = os.path.join('/home', env.user,
                                            cf.get('hadoop', 'hadoop_folder'),
                                            'etc/hadoop')
        with cd(hadoop_config_folder):
            modifyJDK = "sed -i 's/export JAVA_HOME=.*/export JAVA_HOME=" + os.path.join(
                '/home/', env.user,
                cf.get(fabfile.activeSession, 'jdk_folder')).replace(
                    "/", "\\/") + "/g' hadoop-env.sh"
            run(modifyJDK)
            print '清空slaves...'
            run("cat /dev/null > slaves")

            print '填写slaves...'
            special = cf.get('hadoop', 'master_ip')
            for node in cf.get('hadoop', 'slaves').split(","):
                #if(not (node == special and cf.get('hadoop','master_as_a_slave')=="0")):
                run("echo " + node + ">> slaves")

            print '填写mapred-site.xml'
            put(
                os.path.join(
                    os.path.split(env.real_fabfile)[0],
                    'files/hadoop/mapred-site.xml'), hadoop_config_folder)
            modify = "sed -i 's/<value>MASTERIP:10020.*/" + "<value>" + special + ":10020<\\/value>" + "/g' mapred-site.xml"
            run(modify)
            modify = "sed -i 's/<value>MASTERIP:19888.*/" + "<value>" + special + ":19888<\\/value>" + "/g' mapred-site.xml"
            run(modify)

            print '填写core-site.xml'
            put(
                os.path.join(
                    os.path.split(env.real_fabfile)[0],
                    'files/hadoop/core-site.xml'), hadoop_config_folder)
            modifyFSURL = "sed -i 's/<value>.*/" + "<value>hdfs:\\/\\/" + special + ":9000<\\/value>" + "/g' core-site.xml"
            run(modifyFSURL)

            print '填写yarn-site.xml'
            put(
                os.path.join(
                    os.path.split(env.real_fabfile)[0],
                    'files/hadoop/yarn-site.xml'), hadoop_config_folder)
            modify = "sed -i 's/<value>MASTERIP:8031.*/" + "<value>" + special + ":8031<\\/value>" + "/g' yarn-site.xml"
            run(modify)
            modify = "sed -i 's/<value>MASTERIP:8032.*/" + "<value>" + special + ":8032<\\/value>" + "/g' yarn-site.xml"
            run(modify)
            modify = "sed -i 's/<value>MASTERIP:8030.*/" + "<value>" + special + ":8030<\\/value>" + "/g' yarn-site.xml"
            run(modify)

            print '填写hdfs-site.xml'
            put(
                os.path.join(
                    os.path.split(env.real_fabfile)[0],
                    'files/hadoop/hdfs-site.xml'), hadoop_config_folder)
            modify = "sed -i 's/<value>DATADIR.*/<value>" + cf.get(
                'hadoop', 'data_folder').replace(
                    "/", "\\/") + "<\\/value>/g' hdfs-site.xml"
            run(modify)