Beispiel #1
0
def stopMemsql():
# sudo  ./memsql-ops/memsql-ops/memsql-ops stop
    print "Stopping all active Memsql nodes"
    del_mnodes_cmd = "{0}/memsql-ops/memsql-ops  memsql-delete --all --delete-without-prompting".format(Memsql.msqlopsdir)
    del_memsql_nodes = ThreadedClients([Storage.master], del_mnodes_cmd, root=True)
    del_memsql_nodes.start()
    del_memsql_nodes.join()
Beispiel #2
0
def createTopic(tName, nParts, ePath, server):
    # TODO Change zk default port?
    ct_cmd = '{0}/bin/kafka-topics.sh --create --partitions {1} --zookeeper {2}:2181 --replication-factor 1 --topic {3}'
    topicClient = ThreadedClients([Config.server],
                                  ct_cmd.format(ePath, nParts, server, tName),
                                  root=True)
    topicClient.start()
    topicClient.join()
Beispiel #3
0
def startPresto():
    #start_presto_cmd = "'JAVA_HOME={1} PATH={1}/bin:$PATH {0}/bin/launcher run'".format(Presto.prestodir, General.javahome)
    start_presto_cmd = "PATH={0}/bin:$PATH {1}/bin/launcher run".format(General.javahome, Presto.prestodir)
    coordinator = ThreadedClients([Presto.coordinator], start_presto_cmd)
    coordinator.start()
    time.sleep(5)
    workers = ThreadedClients(Presto.nodes, start_presto_cmd)
    workers.start()
    coordinator.join()
    workers.join()
Beispiel #4
0
def execApp(approach, nMiss):
    #jParams = "kafka={0}:9092 zk={0}:2181 missing={1} > {2}.{1}.log"
    jParams = "kafka={0}:9092 zk={0}:2181 missing={1}"
     # loop for all operations
    jPa = jParams.format(Config.server, nMiss)
    print javaCmd.format(Config.jarpath, approach, jPa)
    jC = ThreadedClients([Config.server], javaCmd.format(Config.jarpath, approach, jPa))
    jC.start()
    jC.join()
    time.sleep(3)
    print doneStr.format(approach + "-" + str(nMiss))
Beispiel #5
0
def stopSetup():
    # stop kafka
    stopKafkaClient = ThreadedClients([Config.server],
                                      "{0}/bin/kafka-server-stop.sh".format(
                                          Config.exec_path))
    stopKafkaClient.start()
    stopKafkaClient.join()
    # stop zk
    stopZkClient = ThreadedClients([Config.server],
                                   "{0}/bin/zookeeper-server-stop.sh".format(
                                       Config.exec_path))
    stopZkClient.start()
    stopZkClient.join()
Beispiel #6
0
def cleanDirs():
    # Clean zk dirs
    dClient = ThreadedClients([Config.server],
                              "rm -rf {0}".format(Config.zk_dir),
                              root=True)
    dClient.start()
    dClient.join()
    # Clean kafka dirs
    dClient2 = ThreadedClients([Config.server],
                               "rm -rf {0}".format(Config.k_dir),
                               root=True)
    dClient2.start()
    dClient2.join()
Beispiel #7
0
def confZk():
    zooCfg = '{0}/conf/zoo.cfg'.format(Zookeeper.zkdir)
    with open(zooCfg, 'w+') as f:
         f.write("maxClientCnxns={0}\n".format(Zookeeper.maxclients))
         f.write("tickTime={0}\n".format(Zookeeper.ticktime))
         f.write("dataDir={0}\n".format(Zookeeper.datadir))
         f.write("clientPort={0}\n".format(Zookeeper.clientport))

    deleteClient = ThreadedClients([Storage.master], "rm -rf {0}".format(Zookeeper.datadir), root=True)
    deleteClient.start()
    deleteClient.join()

    copyClient = ThreadedClients([Storage.master], "mkdir -p {0}".format(Zookeeper.datadir), root=True)
    copyClient.start()
    copyClient.join()
    copyToHost([Storage.master], zooCfg)
Beispiel #8
0
def confZk():
    zooCfg = '{0}/conf/zoo.cfg'.format(Zookeeper.zkdir)
    with open(zooCfg, 'w+') as f:
         f.write("maxClientCnxns={0}\n".format(Zookeeper.maxclients))
         f.write("tickTime={0}\n".format(Zookeeper.ticktime))
         f.write("dataDir={0}\n".format(Zookeeper.datadir))
         f.write("clientPort={0}\n".format(Zookeeper.clientport))

    deleteClient = ThreadedClients([Storage.master], "rm -rf {0}".format(Zookeeper.datadir), root=True)
    deleteClient.start()
    deleteClient.join()

    copyClient = ThreadedClients([Storage.master], "mkdir -p {0}".format(Zookeeper.datadir), root=True)
    copyClient.start()
    copyClient.join()
    copyToHost([Storage.master], zooCfg)
Beispiel #9
0
def startMemsql():
    # start memsql-ops as sudo
    ops_cmd = "numactl -m 0 -N 0 {0}/memsql-ops/memsql-ops start --port {1} -u root --foreground".format(Memsql.msqlopsdir, Memsql.msqlopsport)
    observer = Observer("memsql_platform.jobs.engine: Ready.")
    # start master
    master_ops = ThreadedClients([Storage.master], ops_cmd, root=True, observers=[observer])
    master_ops.start()
    observer.waitFor(1)
    # add client binary
    add_bin_cmd = "{0}/memsql-ops/memsql-ops  file-add -t memsql {1}".format(Memsql.msqlopsdir, Memsql.msqlbin)
    add_client = ThreadedClients([Storage.master], add_bin_cmd, root=True)
    add_client.start()
    add_client.join()
    # add master aggregator
    maggr_cmd = "{0}/memsql-ops/memsql-ops  memsql-deploy --role master --community-edition".format(Memsql.msqlopsdir)
    maggr_client = ThreadedClients([Storage.master], maggr_cmd, root=True)
    maggr_client.start()
    maggr_client.join()

    # start agents
    slave_ops = []
    for s in  Storage.servers:
        # start
        obs = Observer("memsql_platform.jobs.engine: Ready.")
        node_client = ThreadedClients([s], ops_cmd, root=True, observers=[obs])
        node_client.start()
        obs.waitFor(1)
        slave_ops.append(node_client)
        # unfollow
        unfollow_cmd = "{0}/memsql-ops/memsql-ops unfollow".format(Memsql.msqlopsdir)
        ufw_client = ThreadedClients([s], unfollow_cmd, root=True)
        ufw_client.start()
        ufw_client.join()
        # follow master
        follow_cmd = "{0}/memsql-ops/memsql-ops follow -h {1} -P {2}".format(Memsql.msqlopsdir, Storage.master, Memsql.msqlopsport)
        fclient = ThreadedClients([s], follow_cmd, root=True)
        fclient.start()
        fclient.join()
        # start leaves
        start_leaves = "{0}/memsql-ops/memsql-ops  memsql-deploy --role leaf  --community-edition".format(Memsql.msqlopsdir)
        lclient = ThreadedClients([s], start_leaves, root=True)
        lclient.start()
        lclient.join()

        #sudo ./memsql-ops/memsql-ops/memsql-ops follow -h euler07 -P 9000
    return [master_ops] + slave_ops
Beispiel #10
0
def populate(nMiss):
   checkTopics()
   print doneStr.format("CREATION")
   time.sleep(3)

   #params = "kafka={0}:9092 zk={0}:2181 missing={1} sf={2} tuples={3} pcompletion={4} psuccess={5} bid_ratio={6} > {7}.{1}.log"
   params = "kafka={0}:9092 zk={0}:2181 missing={1} sf={2} tuples={3} pcompletion={4} psuccess={5} bid_ratio={6}"
   #aPa = params.format(Config.server, Config.missing, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio, "auctionProducer")
   aPa = params.format(Config.server, nMiss, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio)
   #bPa = params.format(Config.server, Config.missing, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio, "bidProducer")
   bPa = params.format(Config.server, nMiss, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio)

   # populating auctions
   aC = ThreadedClients([Config.server], javaCmd.format(Config.jarpath, Config.aproducer, aPa))
   aC.start()
   aC.join()
   time.sleep(3)

   # populating bids
   bC = ThreadedClients([Config.server], javaCmd.format(Config.jarpath, Config.bproducer, bPa))
   bC.start()
   bC.join()
   print doneStr.format("POPULATION")
   time.sleep(3)
Beispiel #11
0
def confRamcloud():
    deleteClient = ThreadedClients(Storage.servers, "rm -rf {0}".format(Ramcloud.backupdir), root=True)
    deleteClient.start()
    deleteClient.join()
    
    deleteClient = ThreadedClients(Storage.servers1, "rm -rf {0}".format(Ramcloud.backupdir1), root=True)
    deleteClient.start()
    deleteClient.join()
    
    copyClient = ThreadedClients(Storage.servers, "mkdir -p {0}".format(Ramcloud.backupdir), root=True)
    copyClient.start()
    copyClient.join()
    
    copyClient = ThreadedClients(Storage.servers1, "mkdir -p {0}".format(Ramcloud.backupdir1), root=True)
    copyClient.start()
    copyClient.join()

    confZk()
Beispiel #12
0
def confRamcloud():
    deleteClient = ThreadedClients(Storage.servers, "rm -rf {0}".format(Ramcloud.backupdir), root=True)
    deleteClient.start()
    deleteClient.join()
    
    deleteClient = ThreadedClients(Storage.servers1, "rm -rf {0}".format(Ramcloud.backupdir1), root=True)
    deleteClient.start()
    deleteClient.join()
    
    copyClient = ThreadedClients(Storage.servers, "mkdir -p {0}".format(Ramcloud.backupdir), root=True)
    copyClient.start()
    copyClient.join()
    
    copyClient = ThreadedClients(Storage.servers1, "mkdir -p {0}".format(Ramcloud.backupdir1), root=True)
    copyClient.start()
    copyClient.join()

    confZk()
Beispiel #13
0
def confKudu():
   if Kudu.clean:
       rmcommand = 'rm -rf {0}/*'
       master_client = ThreadedClients([Storage.master], rmcommand.format(Kudu.master_dir), root=True)
       master_client.start()
       tserver_client = ThreadedClients(Storage.servers, rmcommand.format(Kudu.tserver_dir), root=True)
       tserver_client.start()
       tserver_client1 = ThreadedClients(Storage.servers1, rmcommand.format(Kudu.tserver_dir1), root=True)
       tserver_client1.start()

       master_client.join()
       tserver_client.join()
       tserver_client1.join()
Beispiel #14
0
def confKudu():
   if Kudu.clean:
       rmcommand = 'rm -rf {0}/*'
       master_client = ThreadedClients([Storage.master], rmcommand.format(Kudu.master_dir), root=True)
       master_client.start()
       tserver_client = ThreadedClients(Storage.servers, rmcommand.format(Kudu.tserver_dir), root=True)
       tserver_client.start()
       tserver_client1 = ThreadedClients(Storage.servers1, rmcommand.format(Kudu.tserver_dir1), root=True)
       tserver_client1.start()

       master_client.join()
       tserver_client.join()
       tserver_client1.join()
    f.write("export LD_LIBRARY_PATH={0}\n".format(Spark.telljava))

tmpDirCommand = lambda host: os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Spark.tmpDir))
configCopyCommand = lambda host: os.system(
    "scp {0} {1} root@{2}:{3}/conf/".format(sparkEnv, sparkDefault, host, Spark.sparkdir)
)
jarCopyCommand = lambda host: os.system("scp {0}/*.jar root@{1}:{0}".format(Spark.jarsDir, host))
tmpDirCommand(Spark.master)
configCopyCommand(Spark.master)
jarCopyCommand(Spark.master)
for host in Spark.slaves:
    tmpDirCommand(host)
    configCopyCommand(host)
    jarCopyCommand(host)

rmClients = ThreadedClients([Spark.master] + Spark.slaves, "rm -rf {0}/work".format(Spark.sparkdir), root=True)
rmClients.start()
rmClients.join()

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

time.sleep(2)

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
Beispiel #16
0
def confCassandraCluster():
    os.system("mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient = ThreadedClients(Storage.servers1, "mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient.start()
    dirClient.join()

    # copy all conf files over, cassandra.yaml will be overwritten later
    os.system("cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient = ThreadedClients(Storage.servers1, "cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient.start()
    copyClient.join()

    # we also have to change cassandra-env.sh
    f = open('{0}/conf/cassandra-env.sh'.format(Cassandra.casdir), 'r')
    templateEnv = f.read()
    f.close()
    templateEnv = templateEnv.replace('JMX_PORT="7199"', 'JMX_PORT="7198"')
    cassandraEnv = '{0}/conf1/cassandra-env.sh'.format(Cassandra.casdir)
    with open (cassandraEnv, 'w') as f:
        f.write(templateEnv)
        f.close()
    copyToHost(Storage.servers1, cassandraEnv)
        
    for numaNode in [0,1]:
        servers = Storage.servers if numaNode == 0 else Storage.servers1
        datadir = Cassandra.datadir if numaNode == 0 else Cassandra.datadir1
        logdir = Cassandra.logdir if numaNode == 0 else Cassandra.logdir1
        nativeport = Cassandra.nativeport if numaNode == 0 else Cassandra.nativeport1
        rpcport = Cassandra.rpcport if numaNode == 0 else Cassandra.rpcport1
        storageport = Cassandra.storageport if numaNode == 0 else Cassandra.storageport1
        sslport = Cassandra.sslport if numaNode == 0 else Cassandra.sslport1
        cassandraConf = '{0}/conf/cassandra.yaml' if numaNode == 0 else '{0}/conf1/cassandra.yaml'
        cassandraConf = cassandraConf.format(Cassandra.casdir)

        if len(servers) == 0:
            continue 

        for host in servers:
            f = open('cassandra.yaml.template', 'r')
            templateconf = f.read()
            f.close()

            templateconf = templateconf.replace("casseeds", "\"" + Storage.servers[0] + "\"")
            templateconf = templateconf.replace("caslistenaddr", host)
            templateconf = templateconf.replace("casdatadir", datadir)
            templateconf = templateconf.replace("caslogdir", logdir)
            templateconf = templateconf.replace("casnativeport", nativeport)
            templateconf = templateconf.replace("casrpcport", rpcport)
            templateconf = templateconf.replace("casstorageport", storageport)
            templateconf = templateconf.replace("cassslport", sslport)

            with open(cassandraConf, 'w') as f:
                f.write(templateconf)
                f.close()
            copyToHost([host], cassandraConf)

        mkClients = ThreadedClients(servers, "mkdir -p {0}".format(datadir), root=True)
        mkClients.start()
        mkClients.join()
        mntClients = ThreadedClients(servers, "mount -t tmpfs -o size={0}G tmpfs {1}".format(Cassandra.datadirSz, datadir), root=True)
        mntClients.start()
        mntClients.join()
        mkClients = ThreadedClients(servers , "mkdir -p {0}".format(logdir), root=True)
        mkClients.start()
        mkClients.join()
Beispiel #17
0
def confHdfs():

    # mount tmpfs for master and servers on numa 0
    mkClients = ThreadedClients([Storage.master] + Storage.servers, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # mount tmpfs for servers on numa 1
    mkClients = ThreadedClients(Storage.servers1, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir1, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # modify core-site.xml
    coreSiteXml = '{0}/etc/hadoop/core-site.xml'.format(Hadoop.hadoopdir)
    with open(coreSiteXml, 'w+') as f:
        f.write("<configuration>\n")
        f.write(xmlProp("fs.default.name", "hdfs://{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
        f.write(xmlProp("hadoop.tmp.dir", Hadoop.datadir))
        f.write(xmlProp("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem"))
        f.write(xmlProp("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"))
        f.write("</configuration>")

    # hadoop_env.sh
    hadoopEnv = '{0}/etc/hadoop/hadoop-env.sh'.format(Hadoop.hadoopdir)
    with open(hadoopEnv, 'w+') as f:
        f.write('export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"\n')
        f.write('HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"\n')
        f.write('HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"\n')
        f.write('HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"\n')
        f.write('export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"\n')
        f.write('export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_CLIENT_OPTS="-Xmx2048m $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_SECURE_DN_USER=\n')
        f.write("export JAVA_HOME={0}\n".format(General.javahome))
        f.write("export HADOOP_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_SECURE_DN_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_CONF_DIR={0}/etc/hadoop/\n".format(Hadoop.hadoopdir))

    # hdfs-site.xml
    hdfsSiteXml = '{0}/etc/hadoop/hdfs-site.xml'.format(Hadoop.hadoopdir)
    with open(hdfsSiteXml, 'w+') as f:
       f.write("<configuration>\n")
       f.write(xmlProp("dfs.replication", Hadoop.dfsreplication))
       f.write(xmlProp("dfs.permissions", "true"))
       f.write(xmlProp("dfs.namenode.rpc-address", "{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
       f.write("</configuration>")

    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], coreSiteXml)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hadoopEnv)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hdfsSiteXml)

    # master file - probably not used anymore as we do not use start-dfs.sh
    masterFile = '{0}/etc/hadoop/masters'.format(Hadoop.hadoopdir)
    with open(masterFile, "w+") as f:
        f.write(Storage.master)

    # slaves file - probably not used anymore as we do not use start-dfs.sh
    slavesFile = '{0}/etc/hadoop/slaves'.format(Hadoop.hadoopdir)
    with open(slavesFile, 'w+') as f:
       for host in (Storage.servers + Storage.servers1):
          f.write(host + "\n")
    
    copyToHost([Storage.master], masterFile)
    copyToHost([Storage.master], slavesFile)

    # format namenode
    nn_format_cmd = "numactl -m 0 -N 0 {0}/bin/hadoop namenode -format".format(Hadoop.hadoopdir)
    nnFormatClients = ThreadedClients([Storage.master], nn_format_cmd, root=True)
    nnFormatClients.start()
    nnFormatClients.join()
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import General

import time

cmd = ""

if Tpch.storage == Kudu:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 8 -P {2} -k'.format(Tpch.builddir, Kudu.master, len(Kudu.tservers)*4)
elif Tpch.storage == TellStore:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 4 -c "{2}"'.format(Tpch.builddir, TellStore.getServerList(), TellStore.getCommitManagerAddress())

server0 = ThreadedClients(Tpch.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
server1 = ThreadedClients(Tpch.servers1, "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

server0.start()
server1.start()

server0.join()
server1.join()

with open(sparkEnv, 'w+') as f:
    f.write('export JAVA_HOME={0}\n'.format(Spark.javahome))
    f.write('export LD_LIBRARY_PATH={0}\n'.format(Spark.telljava))

tmpDirCommand = lambda host:  os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Spark.tmpDir))
configCopyCommand = lambda host: os.system('scp {0} {1} root@{2}:{3}/conf/'.format(sparkEnv, sparkDefault, host, Spark.sparkdir))
jarCopyCommand = lambda host: os.system('scp {0}/*.jar root@{1}:{0}'.format(Spark.jarsDir, host))
tmpDirCommand(Spark.master)
configCopyCommand(Spark.master)
jarCopyCommand(Spark.master)
for host in Spark.slaves:
    tmpDirCommand(host)
    configCopyCommand(host)
    jarCopyCommand(host)

rmClients = ThreadedClients([Spark.master] + Spark.slaves, "rm -rf {0}/work".format(Spark.sparkdir), root=True)
rmClients.start()
rmClients.join()

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

time.sleep(2)

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
Beispiel #20
0
#!/usr/bin/env python
from ServerConfig import Spark
from threaded_ssh import ThreadedClients

master_cmd = "{0}/sbin/stop-master.sh".format(Spark.sparkdir)
slave_cmd = "{0}/sbin/stop-slave.sh".format(Spark.sparkdir, Spark.master)

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
Beispiel #21
0
def confHdfs():

    # mount tmpfs for master and servers on numa 0
    mkClients = ThreadedClients([Storage.master] + Storage.servers, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # mount tmpfs for servers on numa 1
    mkClients = ThreadedClients(Storage.servers1, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir1, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # modify core-site.xml
    coreSiteXml = '{0}/etc/hadoop/core-site.xml'.format(Hadoop.hadoopdir)
    with open(coreSiteXml, 'w+') as f:
        f.write("<configuration>\n")
        f.write(xmlProp("fs.default.name", "hdfs://{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
        f.write(xmlProp("hadoop.tmp.dir", Hadoop.datadir))
        f.write(xmlProp("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem"))
        f.write(xmlProp("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"))
        f.write("</configuration>")

    # hadoop_env.sh
    hadoopEnv = '{0}/etc/hadoop/hadoop-env.sh'.format(Hadoop.hadoopdir)
    with open(hadoopEnv, 'w+') as f:
        f.write('export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"\n')
        f.write('HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"\n')
        f.write('HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"\n')
        f.write('HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"\n')
        f.write('export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"\n')
        f.write('export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_CLIENT_OPTS="-Xmx2048m $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_SECURE_DN_USER=\n')
        f.write("export JAVA_HOME={0}\n".format(General.javahome))
        f.write("export HADOOP_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_SECURE_DN_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_CONF_DIR={0}/etc/hadoop/\n".format(Hadoop.hadoopdir))

    # hdfs-site.xml
    hdfsSiteXml = '{0}/etc/hadoop/hdfs-site.xml'.format(Hadoop.hadoopdir)
    with open(hdfsSiteXml, 'w+') as f:
       f.write("<configuration>\n")
       f.write(xmlProp("dfs.replication", Hadoop.dfsreplication))
       f.write(xmlProp("dfs.permissions", "true"))
       f.write(xmlProp("dfs.namenode.rpc-address", "{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
       f.write("</configuration>")

    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], coreSiteXml)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hadoopEnv)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hdfsSiteXml)

    # master file - probably not used anymore as we do not use start-dfs.sh
    masterFile = '{0}/etc/hadoop/masters'.format(Hadoop.hadoopdir)
    with open(masterFile, "w+") as f:
        f.write(Storage.master)

    # slaves file - probably not used anymore as we do not use start-dfs.sh
    slavesFile = '{0}/etc/hadoop/slaves'.format(Hadoop.hadoopdir)
    with open(slavesFile, 'w+') as f:
       for host in (Storage.servers + Storage.servers1):
          f.write(host + "\n")
    
    copyToHost([Storage.master], masterFile)
    copyToHost([Storage.master], slavesFile)

    # format namenode
    nn_format_cmd = "numactl -m 0 -N 0 {0}/bin/hadoop namenode -format".format(Hadoop.hadoopdir)
    nnFormatClients = ThreadedClients([Storage.master], nn_format_cmd, root=True)
    nnFormatClients.start()
    nnFormatClients.join()
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import General

import time

server_cmd = 'cd {0}/watch/tpch; {0}/watch/tpch/tpch_server -s "{1}" -c "{2}"'.format(Tpch.builddir, TellStore.getServerList(), TellStore.getCommitManagerAddress())
client_cmd = '{0}/watch/tpch/tpch_client -H "{1}" -s {2} -P'.format(Tpch.builddir, Tpch.server, Tpch.scaling)

server = ThreadedClients([Tpch.server], server_cmd)
server.start()
time.sleep(5)

client = ThreadedClients([Tpch.client], client_cmd)
client.start()

client.join()
print "Population done, please hit Ctr+C to finish"
server.join()

#!/usr/bin/env python
from ServerConfig import Spark
from threaded_ssh import ThreadedClients

master_cmd ="{0}/sbin/stop-master.sh".format(Spark.sparkdir)
slave_cmd  ="{0}/sbin/stop-slave.sh".format(Spark.sparkdir, Spark.master)

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
Beispiel #24
0
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import General

import time

server_cmd = 'cd {0}/watch/tpch; {0}/watch/tpch/tpch_server -s "{1}" -c "{2}"'.format(
    Tpch.builddir, TellStore.getServerList(),
    TellStore.getCommitManagerAddress())
client_cmd = '{0}/watch/tpch/tpch_client -H "{1}" -s {2} -P'.format(
    Tpch.builddir, Tpch.server, Tpch.scaling)

server = ThreadedClients([Tpch.server], server_cmd)
server.start()
time.sleep(5)

client = ThreadedClients([Tpch.client], client_cmd)
client.start()

client.join()
print "Population done, please hit Ctr+C to finish"
server.join()
Beispiel #25
0
def confCassandraCluster():
    os.system("mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient = ThreadedClients(Storage.servers1, "mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient.start()
    dirClient.join()

    # copy all conf files over, cassandra.yaml will be overwritten later
    os.system("cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient = ThreadedClients(Storage.servers1, "cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient.start()
    copyClient.join()

    # we also have to change cassandra-env.sh
    f = open('{0}/conf/cassandra-env.sh'.format(Cassandra.casdir), 'r')
    templateEnv = f.read()
    f.close()
    templateEnv = templateEnv.replace('JMX_PORT="7199"', 'JMX_PORT="7198"')
    cassandraEnv = '{0}/conf1/cassandra-env.sh'.format(Cassandra.casdir)
    with open (cassandraEnv, 'w') as f:
        f.write(templateEnv)
        f.close()
    copyToHost(Storage.servers1, cassandraEnv)
        
    for numaNode in [0,1]:
        servers = Storage.servers if numaNode == 0 else Storage.servers1
        datadir = Cassandra.datadir if numaNode == 0 else Cassandra.datadir1
        logdir = Cassandra.logdir if numaNode == 0 else Cassandra.logdir1
        nativeport = Cassandra.nativeport if numaNode == 0 else Cassandra.nativeport1
        rpcport = Cassandra.rpcport if numaNode == 0 else Cassandra.rpcport1
        storageport = Cassandra.storageport if numaNode == 0 else Cassandra.storageport1
        sslport = Cassandra.sslport if numaNode == 0 else Cassandra.sslport1
        cassandraConf = '{0}/conf/cassandra.yaml' if numaNode == 0 else '{0}/conf1/cassandra.yaml'
        cassandraConf = cassandraConf.format(Cassandra.casdir)

        if len(servers) == 0:
            continue 

        for host in servers:
            f = open('cassandra.yaml.template', 'r')
            templateconf = f.read()
            f.close()

            templateconf = templateconf.replace("casseeds", "\"" + Storage.servers[0] + "\"")
            templateconf = templateconf.replace("caslistenaddr", host)
            templateconf = templateconf.replace("casdatadir", datadir)
            templateconf = templateconf.replace("caslogdir", logdir)
            templateconf = templateconf.replace("casnativeport", nativeport)
            templateconf = templateconf.replace("casrpcport", rpcport)
            templateconf = templateconf.replace("casstorageport", storageport)
            templateconf = templateconf.replace("cassslport", sslport)

            with open(cassandraConf, 'w') as f:
                f.write(templateconf)
                f.close()
            copyToHost([host], cassandraConf)

        mkClients = ThreadedClients(servers, "mkdir -p {0}".format(datadir), root=True)
        mkClients.start()
        mkClients.join()
        mntClients = ThreadedClients(servers, "mount -t tmpfs -o size={0}G tmpfs {1}".format(Cassandra.datadirSz, datadir), root=True)
        mntClients.start()
        mntClients.join()
        mkClients = ThreadedClients(servers , "mkdir -p {0}".format(logdir), root=True)
        mkClients.start()
        mkClients.join()
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import General

import time

cmd = ""

if Tpch.storage == Kudu:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 8 -P {2} -k'.format(
        Tpch.builddir, Kudu.master,
        len(Kudu.tservers) * 4)
elif Tpch.storage == TellStore:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 4 -c "{2}"'.format(
        Tpch.builddir, TellStore.getServerList(),
        TellStore.getCommitManagerAddress())

server0 = ThreadedClients(Tpch.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
server1 = ThreadedClients(Tpch.servers1,
                          "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

server0.start()
server1.start()

server0.join()
server1.join()
#!/usr/bin/env python
from threaded_ssh import ThreadedClients
from ServerConfig import Tpcc
from ServerConfig import Kudu
from ServerConfig import TellStore
from ServerConfig import General

def hostToIp(host):
    return General.infinibandIp[host]

def semicolonReduce(x, y):
    return x + ';' + y

cmd = ""

if Tpcc.storage == Kudu:
    cmd = "{0}/watch/tpcc/tpcc_kudu -H `hostname` -W {1} --network-threads 8 -s {2} -P {3}".format(Tpcc.builddir, Tpcc.warehouses, Kudu.master, len(Kudu.tservers)*4)
elif Tpcc.storage == TellStore:
    Tpcc.rsyncBuild()
    cmd = '{0}/watch/tpcc/tpcc_server -W {1} --network-threads 4 -c "{2}" -s "{3}"'.format(Tpcc.builddir, Tpcc.warehouses, General.infinibandIp[TellStore.commitmanager] + ":7242", reduce(semicolonReduce, map(lambda x: hostToIp(x) + ":7241", TellStore.servers)))

client0 = ThreadedClients(Tpcc.servers0, "numactl -m 0 -N 0 {0}".format(cmd), rnd_start=True, root=False)
client1 = ThreadedClients(Tpcc.servers1, "numactl -m 1 -N 1 {0} -p 8712".format(cmd), rnd_start=True, root=False)

client0.start()
client1.start()

client0.join()
client1.join()

Beispiel #28
0

cmd = ""

if Tpcc.storage == Kudu:
    cmd = "{0}/watch/tpcc/tpcc_kudu -H `hostname` -W {1} --network-threads 8 -s {2} -P {3}".format(
        Tpcc.builddir, Tpcc.warehouses, Kudu.master,
        len(Kudu.tservers) * 4)
elif Tpcc.storage == TellStore:
    Tpcc.rsyncBuild()
    cmd = '{0}/watch/tpcc/tpcc_server -W {1} --network-threads 4 -c "{2}" -s "{3}"'.format(
        Tpcc.builddir, Tpcc.warehouses,
        General.infinibandIp[TellStore.commitmanager] + ":7242",
        reduce(semicolonReduce,
               map(lambda x: hostToIp(x) + ":7241", TellStore.servers)))

client0 = ThreadedClients(Tpcc.servers0,
                          "numactl -m 0 -N 0 {0}".format(cmd),
                          rnd_start=True,
                          root=False)
client1 = ThreadedClients(Tpcc.servers1,
                          "numactl -m 1 -N 1 {0} -p 8712".format(cmd),
                          rnd_start=True,
                          root=False)

client0.start()
client1.start()

client0.join()
client1.join()
    YCSB.networkThread)

fst = None
fstZero = True
cCommand = "-C" if args.createTable else ""

if len(YCSB.servers0) > 0:
    fst = YCSB.servers0.pop()
else:
    fstZero = False
    fst = YCSB.servers1.pop()

if fstZero:
    fst = ThreadedClients([fst],
                          "numactl -m 0 -N 0 {0} {1}".format(cmd, cCommand))
else:
    fst = ThreadedClients([fst], "numactl -m 1 -N 1 {0} -p 8712 {1}".format(
        cmd, cCommand))

client0 = ThreadedClients(YCSB.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
client1 = ThreadedClients(YCSB.servers1,
                          "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

fst.start()
client0.start()
client1.start()

fst.join()
client0.join()
client1.join()
cmd = '{0}/watch/ycsb-server/ycsb-server -H `hostname` --network-threads {3} -c "{1}" -s "{2}"'.format(YCSB.builddir, General.infinibandIp[TellStore.commitmanager] + ":7242", reduce(semicolonReduce, map(lambda x: hostToIp(x) + ":7241", TellStore.servers)), YCSB.networkThread)

fst = None
fstZero = True
cCommand = "-C" if args.createTable else ""

if len(YCSB.servers0) > 0:
    fst = YCSB.servers0.pop()
else:
    fstZero = False
    fst = YCSB.servers1.pop()

if fstZero:
    fst = ThreadedClients([fst], "numactl -m 0 -N 0 {0} {1}".format(cmd, cCommand))
else:
    fst = ThreadedClients([fst], "numactl -m 1 -N 1 {0} -p 8712 {1}".format(cmd, cCommand))


client0 = ThreadedClients(YCSB.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
client1 = ThreadedClients(YCSB.servers1, "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

fst.start()
client0.start()
client1.start()

fst.join()
client0.join()
client1.join()


Beispiel #31
0
def checkTopics():
    chkCmd = '{0}/bin/kafka-topics.sh --list --zookeeper {1}:2181'.format(
        Config.exec_path, Config.server)
    chkClient = ThreadedClients([Config.server], chkCmd, root=True)
    chkClient.start()
    chkClient.join()