Beispiel #1
0
def startZk():
    observer = Observer("binding to port")
    zk_cmd = 'numactl -m 0 -N 0 {0}/bin/zkServer.sh start-foreground'.format(Zookeeper.zkdir)
    zkClient = ThreadedClients([Storage.master], zk_cmd, root=True, observers=[observer])
    zkClient.start()
    observer.waitFor(1)
    return zkClient
Beispiel #2
0
def startZk():
    observer = Observer("binding to port")
    zk_cmd = 'numactl -m 0 -N 0 {0}/bin/zkServer.sh start-foreground'.format(Zookeeper.zkdir)
    zkClient = ThreadedClients([Storage.master], zk_cmd, root=True, observers=[observer])
    zkClient.start()
    observer.waitFor(1)
    return zkClient
Beispiel #3
0
def startStorageThreads(master_cmd, server_cmd, numa0Args, numa1Args, masterObserverString="", serverObserverString="", envVars = ""):
    masterObservers = []
    if len(masterObserverString) > 0:
        masterObservers = [Observer(masterObserverString)]

    # if Storage.storage == Hbase:
    #    masterObservers.append(Observer("Master has completed initialization"))

    mclient = ThreadedClients([Storage.master], "{0}numactl -m 0 -N 0 {1}".format(envVars, master_cmd), observers=masterObservers)
    mclient.start()

    if len(masterObservers) > 0:
       masterObservers[0].waitFor(1)
    else:
        time.sleep(2)
    
    storageObservers = []
    if len(serverObserverString) > 0:
        storageObservers = [Observer(serverObserverString)]

    tclient = ThreadedClients(Storage.servers, "{0}numactl -m 0 -N 0 {1} {2}".format(envVars, server_cmd, numa0Args), observers=storageObservers)
    tclient.start()

    tclient1 = ThreadedClients(Storage.servers1, '{0}numactl -m 1 -N 1 {1} {2}'.format(envVars, server_cmd, numa1Args), observers=storageObservers)
    tclient1.start()
    
    if len(storageObservers) > 0:
        storageObservers[0].waitFor(len(Storage.servers) + len(Storage.servers1))
    else:
        time.sleep(2)

    if Storage.storage == Hbase:
        time.sleep(5)

    return [tclient, tclient1, mclient]
Beispiel #4
0
def startCassandra():
    observerString = "No host ID found"
    start_cas_cmd = "{0}/bin/cassandra -f".format(Cassandra.casdir)
    javaHome = "JAVA_HOME={0}".format(General.javahome)

    # startup seed node
    obs = Observer(observerString)
    seedClient = ThreadedClients([Storage.servers[0]], "{1} numactl -m 0 -N 0 {0}".format(start_cas_cmd, javaHome), observers=[obs])
    seedClient.start()
    obs.waitFor(1)

    nodeClients = []
    # startup remaining nodes on NUMA 0
    if len(Storage.servers) > 1:
        for server in Storage.servers[1:]:
            obs = Observer(observerString)
            nodeClient = ThreadedClients([server], "{1} numactl -m 0 -N 0 {0}".format(start_cas_cmd, javaHome), observers=[obs])
            nodeClient.start()
            obs.waitFor(1)
            nodeClients = nodeClients + [nodeClient]

    if len(Storage.servers1) > 0:
    # startup nodes on NUMA 1
        for server in Storage.servers1:
            obs = Observer(observerString)
            nodeClient = ThreadedClients([server], '{2} CASSANDRA_CONF={1}/conf1 numactl -m 1 -N 1 {0} -Dcassandra.logdir={3}'.format(start_cas_cmd, Cassandra.casdir, javaHome, Cassandra.logdir1), observers=[obs])
            nodeClient.start()
            obs.waitFor(1)
            nodeClients = nodeClients + [nodeClient]

    return nodeClients + [seedClient]
Beispiel #5
0
def startKafka():
    confKafka()
    # Start Kafka
    observer = Observer("started (kafka.server.KafkaServer)")
    k_cmd = '{0}/bin/kafka-server-start.sh {0}/config/{1}'.format(
        Config.exec_path, Config.k_config)
    kClient = ThreadedClients([Config.server],
                              k_cmd,
                              root=True,
                              observers=[observer])
    kClient.start()
    observer.waitFor(1)
    return kClient
Beispiel #6
0
def startZk():
    confZk()
    observer = Observer("binding to port")
    zk_cmd = '{0}/bin/zookeeper-server-start.sh {0}/config/{1}'.format(
        Config.exec_path, Config.zk_config)
    zkClient = ThreadedClients([Config.server],
                               zk_cmd,
                               root=True,
                               observers=[observer])
    print zk_cmd
    zkClient.start()
    observer.waitFor(1)
    return zkClient
Beispiel #7
0
def confZk():
    zooCfg = '{0}/conf/zoo.cfg'.format(Zookeeper.zkdir)
    with open(zooCfg, 'w+') as f:
         f.write("maxClientCnxns={0}\n".format(Zookeeper.maxclients))
         f.write("tickTime={0}\n".format(Zookeeper.ticktime))
         f.write("dataDir={0}\n".format(Zookeeper.datadir))
         f.write("clientPort={0}\n".format(Zookeeper.clientport))

    deleteClient = ThreadedClients([Storage.master], "rm -rf {0}".format(Zookeeper.datadir), root=True)
    deleteClient.start()
    deleteClient.join()

    copyClient = ThreadedClients([Storage.master], "mkdir -p {0}".format(Zookeeper.datadir), root=True)
    copyClient.start()
    copyClient.join()
    copyToHost([Storage.master], zooCfg)
Beispiel #8
0
def startMBServer(observers):

    Microbench.rsyncBuild()
    path = ""
    params = '-t {0} -n {1} -s {2} '.format(Microbench.threads, Microbench.numColumns, Microbench.scaling)

    if Storage.storage == TellStore:
        cmd = '{0}/watch/microbench/mbserver_tell {1}'.format(TellStore.builddir, params)
        cmd += '-c "{0}" --storage "{1}" --network-threads {2} -m {3}'.format(TellStore.getCommitManagerAddress(), TellStore.getServerList(), Microbench.networkThreads, Microbench.infinioBatch)
    elif Storage.storage == Kudu:
        cmd = '{0}/watch/microbench/mbserver_kudu {1}'.format(TellStore.builddir, params)
        cmd += '-c {0}'.format(Storage.master)
    elif Storage.storage == Cassandra:
        Microbench.rsyncJars()
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd ='java -jar {0}/mbserver_cassandra.jar {1}'.format(Microbench.javaDir, params)
        cmd += getNodes(Storage.servers, " -cn ")
    elif Storage.storage == Hbase:
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd = 'java -jar {0}/mbserver_hbase.jar {1}'.format(Microbench.javaDir, params)
        cmd += '-hm {0}'.format(Storage.master)
        cmd += '-zm {0}'.format(Storage.master)
    elif Storage.storage == Ramcloud:
        cmd = '{0}/watch/microbench/mbserver_ramcloud {1}'.format(TellStore.builddir, params)
        cmd += '-c main -l "infrc:host={0}-infrc,port=11100" -x {1}'.format(Storage.master, len(Storage.servers) + len(Storage.servers1))
  
    client0 = ThreadedClients(Microbench.servers0, "{0}numactl -m 0 -N 0 {1}".format(path, cmd), observers=observers)
    client1 = ThreadedClients(Microbench.servers1, "{0}numactl -m 1 -N 1 {1} -p 8712".format(path, cmd), observers=observers)
    
    client0.start()
    client1.start()

    return [client0, client1]
Beispiel #9
0
def stopMemsql():
# sudo  ./memsql-ops/memsql-ops/memsql-ops stop
    print "Stopping all active Memsql nodes"
    del_mnodes_cmd = "{0}/memsql-ops/memsql-ops  memsql-delete --all --delete-without-prompting".format(Memsql.msqlopsdir)
    del_memsql_nodes = ThreadedClients([Storage.master], del_mnodes_cmd, root=True)
    del_memsql_nodes.start()
    del_memsql_nodes.join()
Beispiel #10
0
def startMBServer(observers):

    Microbench.rsyncBuild()
    path = ""
    params = '-t {0} -n {1} -s {2} '.format(Microbench.threads,
                                            Microbench.numColumns,
                                            Microbench.scaling)

    if Storage.storage == TellStore:
        cmd = '{0}/watch/microbench/mbserver_tell {1}'.format(
            TellStore.builddir, params)
        cmd += '-c "{0}" --storage "{1}" --network-threads {2} -m {3}'.format(
            TellStore.getCommitManagerAddress(), TellStore.getServerList(),
            Microbench.networkThreads, Microbench.infinioBatch)
    elif Storage.storage == Kudu:
        cmd = '{0}/watch/microbench/mbserver_kudu {1}'.format(
            TellStore.builddir, params)
        cmd += '-c {0}'.format(Storage.master)
    elif Storage.storage == Cassandra:
        Microbench.rsyncJars()
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd = 'java -jar {0}/mbserver_cassandra.jar {1}'.format(
            Microbench.javaDir, params)
        cmd += getNodes(Storage.servers, " -cn ")
    elif Storage.storage == Hbase:
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd = 'java -jar {0}/mbserver_hbase.jar {1}'.format(
            Microbench.javaDir, params)
        cmd += '-hm {0}'.format(Storage.master)
        cmd += '-zm {0}'.format(Storage.master)
    elif Storage.storage == Ramcloud:
        cmd = '{0}/watch/microbench/mbserver_ramcloud {1}'.format(
            TellStore.builddir, params)
        cmd += '-c main -l "infrc:host={0}-infrc,port=11100" -x {1}'.format(
            Storage.master,
            len(Storage.servers) + len(Storage.servers1))

    client0 = ThreadedClients(Microbench.servers0,
                              "{0}numactl -m 0 -N 0 {1}".format(path, cmd),
                              observers=observers)
    client1 = ThreadedClients(Microbench.servers1,
                              "{0}numactl -m 1 -N 1 {1} -p 8712".format(
                                  path, cmd),
                              observers=observers)

    client0.start()
    client1.start()

    return [client0, client1]
Beispiel #11
0
def createTopic(tName, nParts, ePath, server):
    # TODO Change zk default port?
    ct_cmd = '{0}/bin/kafka-topics.sh --create --partitions {1} --zookeeper {2}:2181 --replication-factor 1 --topic {3}'
    topicClient = ThreadedClients([Config.server],
                                  ct_cmd.format(ePath, nParts, server, tName),
                                  root=True)
    topicClient.start()
    topicClient.join()
Beispiel #12
0
def execApp(approach, nMiss):
    #jParams = "kafka={0}:9092 zk={0}:2181 missing={1} > {2}.{1}.log"
    jParams = "kafka={0}:9092 zk={0}:2181 missing={1}"
     # loop for all operations
    jPa = jParams.format(Config.server, nMiss)
    print javaCmd.format(Config.jarpath, approach, jPa)
    jC = ThreadedClients([Config.server], javaCmd.format(Config.jarpath, approach, jPa))
    jC.start()
    jC.join()
    time.sleep(3)
    print doneStr.format(approach + "-" + str(nMiss))
Beispiel #13
0
def startRamcloud():
    zkClient = startZk()

    master_cmd = "LD_LIBRARY_PATH={3} numactl -m 0 -N 0 {0}/coordinator -C infrc:host={1}-infrc,port=11100 -x zk:{1}:{2} --timeout {4}".format(Ramcloud.ramclouddir, Storage.master, Zookeeper.clientport, Ramcloud.boost_lib, Ramcloud.timeout)

    masterObs = Observer("Memory usage now")
    masterClient = ThreadedClients([Storage.master], master_cmd, observers=[masterObs])
    masterClient.start()
    masterObs.waitFor(1)

    # create observer list
    storageObs = []
    for i in range(len(Storage.servers) + len(Storage.servers1)):
        storageObs = storageObs + [Observer("Server " + str(i+1) + ".0 is up")]

    nodeClients = []
    storage_cmd = "LD_LIBRARY_PATH={7} numactl -m 0 -N 0 {0}/server -L infrc:host={3}-infrc,port={4} -x zk:{1}:{2} --totalMasterMemory {5} -f {6} --segmentFrames 10000 -r 0 --timeout {8}"
    storage_cmd = storage_cmd.format(Ramcloud.ramclouddir, Storage.master, Zookeeper.clientport, "{0}", Ramcloud.storageport, Ramcloud.memorysize, Ramcloud.backupfile, Ramcloud.boost_lib, Ramcloud.timeout)

    # startup nodes on NUMA 0
    for server in Storage.servers:
        nodeClient = ThreadedClients([server],  storage_cmd.format(server), observers=storageObs)
        nodeClient.start()
        nodeClients = nodeClients + [nodeClient]

    # startup nodes on NUMA 1
    if len(Storage.servers1) > 0:
        storage_cmd = "LD_LIBRARY_PATH={7} numactl -m 1 -N 1 {0}/server -L infrc:host={3}-infrc,port={4} -x zk:{1}:{2} --totalMasterMemory {5} -f {6} --segmentFrames 10000 -r 0 --timeout {8}"
        storage_cmd = storage_cmd.format(Ramcloud.ramclouddir, Storage.master, Zookeeper.clientport, "{0}", Ramcloud.storageport1, Ramcloud.memorysize, Ramcloud.backupfile1, Ramcloud.boost_lib, Ramcloud.timeout)
        for server in Storage.servers1:
            nodeClient = ThreadedClients([server],  storage_cmd.format(server))
            nodeClient.start()
            nodeClients = nodeClients + [nodeClient]

    # wait until all storages are up (the message is displayed at least once at at least one storage server)
    for storageOb in storageObs:
        storageOb.waitFor(1)

    return  nodeClients + [masterClient, zkClient]
Beispiel #14
0
def startAimServers(observers = []):
    Aim.rsyncBuild()
    numChunks = (len(Storage.servers) + len(Storage.servers1)) * Aim.numRTAClients * 16
    chunkSize = ((TellStore.scanMemory // numChunks) // 8) * 8
    
    serverExec = ""
    if Storage.storage == Kudu:
        serverExec = "aim_kudu -P {0} -s {1}".format((len(Storage.servers) + len(Storage.servers1)) * 2, Storage.master)
    elif Storage.storage == TellStore:
        serverExec = 'aim_server -M {0} -m {1} -c "{2}" -s "{3}" --processing-threads {4}'.format(numChunks, chunkSize, TellStore.getCommitManagerAddress(), TellStore.getServerList(), Aim.serverthreads)
    
    cmd = '{0}/watch/aim-benchmark/{3} -f {1} -b {2}'.format(Aim.builddir, Aim.schemaFile, Aim.batchSize, serverExec)
    
    client0 = ThreadedClients(Aim.sepservers0 + Aim.rtaservers0, "numactl -m 0 -N 0 {0}".format(cmd), root=True)
    client1 = ThreadedClients(Aim.sepservers1 + Aim.rtaservers1, "numactl -m 1 -N 1 {0} -p 8715 -u 8716".format(cmd), root=True)
    
    client0.start()
    client1.start()
    return [client0, client1]
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import General

import time

cmd = ""

if Tpch.storage == Kudu:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 8 -P {2} -k'.format(
        Tpch.builddir, Kudu.master,
        len(Kudu.tservers) * 4)
elif Tpch.storage == TellStore:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 4 -c "{2}"'.format(
        Tpch.builddir, TellStore.getServerList(),
        TellStore.getCommitManagerAddress())

server0 = ThreadedClients(Tpch.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
server1 = ThreadedClients(Tpch.servers1,
                          "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

server0.start()
server1.start()

server0.join()
server1.join()
Beispiel #16
0
def confRamcloud():
    deleteClient = ThreadedClients(Storage.servers, "rm -rf {0}".format(Ramcloud.backupdir), root=True)
    deleteClient.start()
    deleteClient.join()
    
    deleteClient = ThreadedClients(Storage.servers1, "rm -rf {0}".format(Ramcloud.backupdir1), root=True)
    deleteClient.start()
    deleteClient.join()
    
    copyClient = ThreadedClients(Storage.servers, "mkdir -p {0}".format(Ramcloud.backupdir), root=True)
    copyClient.start()
    copyClient.join()
    
    copyClient = ThreadedClients(Storage.servers1, "mkdir -p {0}".format(Ramcloud.backupdir1), root=True)
    copyClient.start()
    copyClient.join()

    confZk()
Beispiel #17
0
def startMemsql():
    # start memsql-ops as sudo
    ops_cmd = "numactl -m 0 -N 0 {0}/memsql-ops/memsql-ops start --port {1} -u root --foreground".format(Memsql.msqlopsdir, Memsql.msqlopsport)
    observer = Observer("memsql_platform.jobs.engine: Ready.")
    # start master
    master_ops = ThreadedClients([Storage.master], ops_cmd, root=True, observers=[observer])
    master_ops.start()
    observer.waitFor(1)
    # add client binary
    add_bin_cmd = "{0}/memsql-ops/memsql-ops  file-add -t memsql {1}".format(Memsql.msqlopsdir, Memsql.msqlbin)
    add_client = ThreadedClients([Storage.master], add_bin_cmd, root=True)
    add_client.start()
    add_client.join()
    # add master aggregator
    maggr_cmd = "{0}/memsql-ops/memsql-ops  memsql-deploy --role master --community-edition".format(Memsql.msqlopsdir)
    maggr_client = ThreadedClients([Storage.master], maggr_cmd, root=True)
    maggr_client.start()
    maggr_client.join()

    # start agents
    slave_ops = []
    for s in  Storage.servers:
        # start
        obs = Observer("memsql_platform.jobs.engine: Ready.")
        node_client = ThreadedClients([s], ops_cmd, root=True, observers=[obs])
        node_client.start()
        obs.waitFor(1)
        slave_ops.append(node_client)
        # unfollow
        unfollow_cmd = "{0}/memsql-ops/memsql-ops unfollow".format(Memsql.msqlopsdir)
        ufw_client = ThreadedClients([s], unfollow_cmd, root=True)
        ufw_client.start()
        ufw_client.join()
        # follow master
        follow_cmd = "{0}/memsql-ops/memsql-ops follow -h {1} -P {2}".format(Memsql.msqlopsdir, Storage.master, Memsql.msqlopsport)
        fclient = ThreadedClients([s], follow_cmd, root=True)
        fclient.start()
        fclient.join()
        # start leaves
        start_leaves = "{0}/memsql-ops/memsql-ops  memsql-deploy --role leaf  --community-edition".format(Memsql.msqlopsdir)
        lclient = ThreadedClients([s], start_leaves, root=True)
        lclient.start()
        lclient.join()

        #sudo ./memsql-ops/memsql-ops/memsql-ops follow -h euler07 -P 9000
    return [master_ops] + slave_ops
Beispiel #18
0
def confKudu():
   if Kudu.clean:
       rmcommand = 'rm -rf {0}/*'
       master_client = ThreadedClients([Storage.master], rmcommand.format(Kudu.master_dir), root=True)
       master_client.start()
       tserver_client = ThreadedClients(Storage.servers, rmcommand.format(Kudu.tserver_dir), root=True)
       tserver_client.start()
       tserver_client1 = ThreadedClients(Storage.servers1, rmcommand.format(Kudu.tserver_dir1), root=True)
       tserver_client1.start()

       master_client.join()
       tserver_client.join()
       tserver_client1.join()
Beispiel #19
0
def confRamcloud():
    deleteClient = ThreadedClients(Storage.servers, "rm -rf {0}".format(Ramcloud.backupdir), root=True)
    deleteClient.start()
    deleteClient.join()
    
    deleteClient = ThreadedClients(Storage.servers1, "rm -rf {0}".format(Ramcloud.backupdir1), root=True)
    deleteClient.start()
    deleteClient.join()
    
    copyClient = ThreadedClients(Storage.servers, "mkdir -p {0}".format(Ramcloud.backupdir), root=True)
    copyClient.start()
    copyClient.join()
    
    copyClient = ThreadedClients(Storage.servers1, "mkdir -p {0}".format(Ramcloud.backupdir1), root=True)
    copyClient.start()
    copyClient.join()

    confZk()
Beispiel #20
0
def confCassandraCluster():
    os.system("mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient = ThreadedClients(Storage.servers1, "mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient.start()
    dirClient.join()

    # copy all conf files over, cassandra.yaml will be overwritten later
    os.system("cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient = ThreadedClients(Storage.servers1, "cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient.start()
    copyClient.join()

    # we also have to change cassandra-env.sh
    f = open('{0}/conf/cassandra-env.sh'.format(Cassandra.casdir), 'r')
    templateEnv = f.read()
    f.close()
    templateEnv = templateEnv.replace('JMX_PORT="7199"', 'JMX_PORT="7198"')
    cassandraEnv = '{0}/conf1/cassandra-env.sh'.format(Cassandra.casdir)
    with open (cassandraEnv, 'w') as f:
        f.write(templateEnv)
        f.close()
    copyToHost(Storage.servers1, cassandraEnv)
        
    for numaNode in [0,1]:
        servers = Storage.servers if numaNode == 0 else Storage.servers1
        datadir = Cassandra.datadir if numaNode == 0 else Cassandra.datadir1
        logdir = Cassandra.logdir if numaNode == 0 else Cassandra.logdir1
        nativeport = Cassandra.nativeport if numaNode == 0 else Cassandra.nativeport1
        rpcport = Cassandra.rpcport if numaNode == 0 else Cassandra.rpcport1
        storageport = Cassandra.storageport if numaNode == 0 else Cassandra.storageport1
        sslport = Cassandra.sslport if numaNode == 0 else Cassandra.sslport1
        cassandraConf = '{0}/conf/cassandra.yaml' if numaNode == 0 else '{0}/conf1/cassandra.yaml'
        cassandraConf = cassandraConf.format(Cassandra.casdir)

        if len(servers) == 0:
            continue 

        for host in servers:
            f = open('cassandra.yaml.template', 'r')
            templateconf = f.read()
            f.close()

            templateconf = templateconf.replace("casseeds", "\"" + Storage.servers[0] + "\"")
            templateconf = templateconf.replace("caslistenaddr", host)
            templateconf = templateconf.replace("casdatadir", datadir)
            templateconf = templateconf.replace("caslogdir", logdir)
            templateconf = templateconf.replace("casnativeport", nativeport)
            templateconf = templateconf.replace("casrpcport", rpcport)
            templateconf = templateconf.replace("casstorageport", storageport)
            templateconf = templateconf.replace("cassslport", sslport)

            with open(cassandraConf, 'w') as f:
                f.write(templateconf)
                f.close()
            copyToHost([host], cassandraConf)

        mkClients = ThreadedClients(servers, "mkdir -p {0}".format(datadir), root=True)
        mkClients.start()
        mkClients.join()
        mntClients = ThreadedClients(servers, "mount -t tmpfs -o size={0}G tmpfs {1}".format(Cassandra.datadirSz, datadir), root=True)
        mntClients.start()
        mntClients.join()
        mkClients = ThreadedClients(servers , "mkdir -p {0}".format(logdir), root=True)
        mkClients.start()
        mkClients.join()
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import General

import time

cmd = ""

if Tpch.storage == Kudu:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 8 -P {2} -k'.format(Tpch.builddir, Kudu.master, len(Kudu.tservers)*4)
elif Tpch.storage == TellStore:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 4 -c "{2}"'.format(Tpch.builddir, TellStore.getServerList(), TellStore.getCommitManagerAddress())

server0 = ThreadedClients(Tpch.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
server1 = ThreadedClients(Tpch.servers1, "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

server0.start()
server1.start()

server0.join()
server1.join()

Beispiel #22
0
def startPresto():
    #start_presto_cmd = "'JAVA_HOME={1} PATH={1}/bin:$PATH {0}/bin/launcher run'".format(Presto.prestodir, General.javahome)
    start_presto_cmd = "PATH={0}/bin:$PATH {1}/bin/launcher run".format(General.javahome, Presto.prestodir)
    coordinator = ThreadedClients([Presto.coordinator], start_presto_cmd)
    coordinator.start()
    time.sleep(5)
    workers = ThreadedClients(Presto.nodes, start_presto_cmd)
    workers.start()
    coordinator.join()
    workers.join()
#!/usr/bin/env python
from ServerConfig import Spark
from threaded_ssh import ThreadedClients

master_cmd ="{0}/sbin/stop-master.sh".format(Spark.sparkdir)
slave_cmd  ="{0}/sbin/stop-slave.sh".format(Spark.sparkdir, Spark.master)

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
    reduce(semicolonReduce,
           map(lambda x: hostToIp(x) + ":7241", TellStore.servers)),
    YCSB.networkThread)

fst = None
fstZero = True
cCommand = "-C" if args.createTable else ""

if len(YCSB.servers0) > 0:
    fst = YCSB.servers0.pop()
else:
    fstZero = False
    fst = YCSB.servers1.pop()

if fstZero:
    fst = ThreadedClients([fst],
                          "numactl -m 0 -N 0 {0} {1}".format(cmd, cCommand))
else:
    fst = ThreadedClients([fst], "numactl -m 1 -N 1 {0} -p 8712 {1}".format(
        cmd, cCommand))

client0 = ThreadedClients(YCSB.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
client1 = ThreadedClients(YCSB.servers1,
                          "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

fst.start()
client0.start()
client1.start()

fst.join()
client0.join()
client1.join()
Beispiel #25
0
def confHdfs():

    # mount tmpfs for master and servers on numa 0
    mkClients = ThreadedClients([Storage.master] + Storage.servers, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # mount tmpfs for servers on numa 1
    mkClients = ThreadedClients(Storage.servers1, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir1, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # modify core-site.xml
    coreSiteXml = '{0}/etc/hadoop/core-site.xml'.format(Hadoop.hadoopdir)
    with open(coreSiteXml, 'w+') as f:
        f.write("<configuration>\n")
        f.write(xmlProp("fs.default.name", "hdfs://{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
        f.write(xmlProp("hadoop.tmp.dir", Hadoop.datadir))
        f.write(xmlProp("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem"))
        f.write(xmlProp("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"))
        f.write("</configuration>")

    # hadoop_env.sh
    hadoopEnv = '{0}/etc/hadoop/hadoop-env.sh'.format(Hadoop.hadoopdir)
    with open(hadoopEnv, 'w+') as f:
        f.write('export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"\n')
        f.write('HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"\n')
        f.write('HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"\n')
        f.write('HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"\n')
        f.write('export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"\n')
        f.write('export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_CLIENT_OPTS="-Xmx2048m $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_SECURE_DN_USER=\n')
        f.write("export JAVA_HOME={0}\n".format(General.javahome))
        f.write("export HADOOP_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_SECURE_DN_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_CONF_DIR={0}/etc/hadoop/\n".format(Hadoop.hadoopdir))

    # hdfs-site.xml
    hdfsSiteXml = '{0}/etc/hadoop/hdfs-site.xml'.format(Hadoop.hadoopdir)
    with open(hdfsSiteXml, 'w+') as f:
       f.write("<configuration>\n")
       f.write(xmlProp("dfs.replication", Hadoop.dfsreplication))
       f.write(xmlProp("dfs.permissions", "true"))
       f.write(xmlProp("dfs.namenode.rpc-address", "{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
       f.write("</configuration>")

    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], coreSiteXml)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hadoopEnv)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hdfsSiteXml)

    # master file - probably not used anymore as we do not use start-dfs.sh
    masterFile = '{0}/etc/hadoop/masters'.format(Hadoop.hadoopdir)
    with open(masterFile, "w+") as f:
        f.write(Storage.master)

    # slaves file - probably not used anymore as we do not use start-dfs.sh
    slavesFile = '{0}/etc/hadoop/slaves'.format(Hadoop.hadoopdir)
    with open(slavesFile, 'w+') as f:
       for host in (Storage.servers + Storage.servers1):
          f.write(host + "\n")
    
    copyToHost([Storage.master], masterFile)
    copyToHost([Storage.master], slavesFile)

    # format namenode
    nn_format_cmd = "numactl -m 0 -N 0 {0}/bin/hadoop namenode -format".format(Hadoop.hadoopdir)
    nnFormatClients = ThreadedClients([Storage.master], nn_format_cmd, root=True)
    nnFormatClients.start()
    nnFormatClients.join()
Beispiel #26
0
def confKudu():
   if Kudu.clean:
       rmcommand = 'rm -rf {0}/*'
       master_client = ThreadedClients([Storage.master], rmcommand.format(Kudu.master_dir), root=True)
       master_client.start()
       tserver_client = ThreadedClients(Storage.servers, rmcommand.format(Kudu.tserver_dir), root=True)
       tserver_client.start()
       tserver_client1 = ThreadedClients(Storage.servers1, rmcommand.format(Kudu.tserver_dir1), root=True)
       tserver_client1.start()

       master_client.join()
       tserver_client.join()
       tserver_client1.join()
Beispiel #27
0
#!/usr/bin/env python
from ServerConfig import Spark
from threaded_ssh import ThreadedClients

master_cmd = "{0}/sbin/stop-master.sh".format(Spark.sparkdir)
slave_cmd = "{0}/sbin/stop-slave.sh".format(Spark.sparkdir, Spark.master)

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
with open(sparkEnv, 'w+') as f:
    f.write('export JAVA_HOME={0}\n'.format(Spark.javahome))
    f.write('export LD_LIBRARY_PATH={0}\n'.format(Spark.telljava))

tmpDirCommand = lambda host:  os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Spark.tmpDir))
configCopyCommand = lambda host: os.system('scp {0} {1} root@{2}:{3}/conf/'.format(sparkEnv, sparkDefault, host, Spark.sparkdir))
jarCopyCommand = lambda host: os.system('scp {0}/*.jar root@{1}:{0}'.format(Spark.jarsDir, host))
tmpDirCommand(Spark.master)
configCopyCommand(Spark.master)
jarCopyCommand(Spark.master)
for host in Spark.slaves:
    tmpDirCommand(host)
    configCopyCommand(host)
    jarCopyCommand(host)

rmClients = ThreadedClients([Spark.master] + Spark.slaves, "rm -rf {0}/work".format(Spark.sparkdir), root=True)
rmClients.start()
rmClients.join()

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

time.sleep(2)

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
Beispiel #29
0
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import General

import time

server_cmd = 'cd {0}/watch/tpch; {0}/watch/tpch/tpch_server -s "{1}" -c "{2}"'.format(
    Tpch.builddir, TellStore.getServerList(),
    TellStore.getCommitManagerAddress())
client_cmd = '{0}/watch/tpch/tpch_client -H "{1}" -s {2} -P'.format(
    Tpch.builddir, Tpch.server, Tpch.scaling)

server = ThreadedClients([Tpch.server], server_cmd)
server.start()
time.sleep(5)

client = ThreadedClients([Tpch.client], client_cmd)
client.start()

client.join()
print "Population done, please hit Ctr+C to finish"
server.join()
Beispiel #30
0
def confHdfs():

    # mount tmpfs for master and servers on numa 0
    mkClients = ThreadedClients([Storage.master] + Storage.servers, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # mount tmpfs for servers on numa 1
    mkClients = ThreadedClients(Storage.servers1, "mkdir -p {0}; mount -t tmpfs -o size={1}G tmpfs {0}".format(Hadoop.datadir1, Hadoop.datadirSz), root=True)
    mkClients.start()
    mkClients.join()

    # modify core-site.xml
    coreSiteXml = '{0}/etc/hadoop/core-site.xml'.format(Hadoop.hadoopdir)
    with open(coreSiteXml, 'w+') as f:
        f.write("<configuration>\n")
        f.write(xmlProp("fs.default.name", "hdfs://{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
        f.write(xmlProp("hadoop.tmp.dir", Hadoop.datadir))
        f.write(xmlProp("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem"))
        f.write(xmlProp("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem"))
        f.write("</configuration>")

    # hadoop_env.sh
    hadoopEnv = '{0}/etc/hadoop/hadoop-env.sh'.format(Hadoop.hadoopdir)
    with open(hadoopEnv, 'w+') as f:
        f.write('export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"\n')
        f.write('HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"\n')
        f.write('HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"\n')
        f.write('HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"\n')
        f.write('export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"\n')
        f.write('export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_CLIENT_OPTS="-Xmx2048m $HADOOP_CLIENT_OPTS"\n')
        f.write('export HADOOP_SECURE_DN_USER=\n')
        f.write("export JAVA_HOME={0}\n".format(General.javahome))
        f.write("export HADOOP_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_SECURE_DN_LOG_DIR={0}\n".format(Hadoop.datadir))
        f.write("export HADOOP_CONF_DIR={0}/etc/hadoop/\n".format(Hadoop.hadoopdir))

    # hdfs-site.xml
    hdfsSiteXml = '{0}/etc/hadoop/hdfs-site.xml'.format(Hadoop.hadoopdir)
    with open(hdfsSiteXml, 'w+') as f:
       f.write("<configuration>\n")
       f.write(xmlProp("dfs.replication", Hadoop.dfsreplication))
       f.write(xmlProp("dfs.permissions", "true"))
       f.write(xmlProp("dfs.namenode.rpc-address", "{0}:{1}".format(Storage.master, Hadoop.hdfsport)))
       f.write("</configuration>")

    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], coreSiteXml)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hadoopEnv)
    copyToHost(Storage.servers + Storage.servers1 + [Storage.master], hdfsSiteXml)

    # master file - probably not used anymore as we do not use start-dfs.sh
    masterFile = '{0}/etc/hadoop/masters'.format(Hadoop.hadoopdir)
    with open(masterFile, "w+") as f:
        f.write(Storage.master)

    # slaves file - probably not used anymore as we do not use start-dfs.sh
    slavesFile = '{0}/etc/hadoop/slaves'.format(Hadoop.hadoopdir)
    with open(slavesFile, 'w+') as f:
       for host in (Storage.servers + Storage.servers1):
          f.write(host + "\n")
    
    copyToHost([Storage.master], masterFile)
    copyToHost([Storage.master], slavesFile)

    # format namenode
    nn_format_cmd = "numactl -m 0 -N 0 {0}/bin/hadoop namenode -format".format(Hadoop.hadoopdir)
    nnFormatClients = ThreadedClients([Storage.master], nn_format_cmd, root=True)
    nnFormatClients.start()
    nnFormatClients.join()
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import General

import time

server_cmd = 'cd {0}/watch/tpch; {0}/watch/tpch/tpch_server -s "{1}" -c "{2}"'.format(Tpch.builddir, TellStore.getServerList(), TellStore.getCommitManagerAddress())
client_cmd = '{0}/watch/tpch/tpch_client -H "{1}" -s {2} -P'.format(Tpch.builddir, Tpch.server, Tpch.scaling)

server = ThreadedClients([Tpch.server], server_cmd)
server.start()
time.sleep(5)

client = ThreadedClients([Tpch.client], client_cmd)
client.start()

client.join()
print "Population done, please hit Ctr+C to finish"
server.join()

Beispiel #32
0
def startCassandra():
    observerString = "No host ID found"
    start_cas_cmd = "{0}/bin/cassandra -f".format(Cassandra.casdir)
    javaHome = "JAVA_HOME={0}".format(General.javahome)

    # startup seed node
    obs = Observer(observerString)
    seedClient = ThreadedClients([Storage.servers[0]], "{1} numactl -m 0 -N 0 {0}".format(start_cas_cmd, javaHome), observers=[obs])
    seedClient.start()
    obs.waitFor(1)

    nodeClients = []
    # startup remaining nodes on NUMA 0
    if len(Storage.servers) > 1:
        for server in Storage.servers[1:]:
            obs = Observer(observerString)
            nodeClient = ThreadedClients([server], "{1} numactl -m 0 -N 0 {0}".format(start_cas_cmd, javaHome), observers=[obs])
            nodeClient.start()
            obs.waitFor(1)
            nodeClients = nodeClients + [nodeClient]

    if len(Storage.servers1) > 0:
    # startup nodes on NUMA 1
        for server in Storage.servers1:
            obs = Observer(observerString)
            nodeClient = ThreadedClients([server], '{2} CASSANDRA_CONF={1}/conf1 numactl -m 1 -N 1 {0} -Dcassandra.logdir={3}'.format(start_cas_cmd, Cassandra.casdir, javaHome, Cassandra.logdir1), observers=[obs])
            nodeClient.start()
            obs.waitFor(1)
            nodeClients = nodeClients + [nodeClient]

    return nodeClients + [seedClient]
Beispiel #33
0
def stopSetup():
    # stop kafka
    stopKafkaClient = ThreadedClients([Config.server],
                                      "{0}/bin/kafka-server-stop.sh".format(
                                          Config.exec_path))
    stopKafkaClient.start()
    stopKafkaClient.join()
    # stop zk
    stopZkClient = ThreadedClients([Config.server],
                                   "{0}/bin/zookeeper-server-stop.sh".format(
                                       Config.exec_path))
    stopZkClient.start()
    stopZkClient.join()
Beispiel #34
0
def startRamcloud():
    zkClient = startZk()

    master_cmd = "LD_LIBRARY_PATH={3} numactl -m 0 -N 0 {0}/coordinator -C infrc:host={1}-infrc,port=11100 -x zk:{1}:{2} --timeout {4}".format(Ramcloud.ramclouddir, Storage.master, Zookeeper.clientport, Ramcloud.boost_lib, Ramcloud.timeout)

    masterObs = Observer("Memory usage now")
    masterClient = ThreadedClients([Storage.master], master_cmd, observers=[masterObs])
    masterClient.start()
    masterObs.waitFor(1)

    # create observer list
    storageObs = []
    for i in range(len(Storage.servers) + len(Storage.servers1)):
        storageObs = storageObs + [Observer("Server " + str(i+1) + ".0 is up")]

    nodeClients = []
    storage_cmd = "LD_LIBRARY_PATH={7} numactl -m 0 -N 0 {0}/server -L infrc:host={3}-infrc,port={4} -x zk:{1}:{2} --totalMasterMemory {5} -f {6} --segmentFrames 10000 -r 0 --timeout {8}"
    storage_cmd = storage_cmd.format(Ramcloud.ramclouddir, Storage.master, Zookeeper.clientport, "{0}", Ramcloud.storageport, Ramcloud.memorysize, Ramcloud.backupfile, Ramcloud.boost_lib, Ramcloud.timeout)

    # startup nodes on NUMA 0
    for server in Storage.servers:
        nodeClient = ThreadedClients([server],  storage_cmd.format(server), observers=storageObs)
        nodeClient.start()
        nodeClients = nodeClients + [nodeClient]

    # startup nodes on NUMA 1
    if len(Storage.servers1) > 0:
        storage_cmd = "LD_LIBRARY_PATH={7} numactl -m 1 -N 1 {0}/server -L infrc:host={3}-infrc,port={4} -x zk:{1}:{2} --totalMasterMemory {5} -f {6} --segmentFrames 10000 -r 0 --timeout {8}"
        storage_cmd = storage_cmd.format(Ramcloud.ramclouddir, Storage.master, Zookeeper.clientport, "{0}", Ramcloud.storageport1, Ramcloud.memorysize, Ramcloud.backupfile1, Ramcloud.boost_lib, Ramcloud.timeout)
        for server in Storage.servers1:
            nodeClient = ThreadedClients([server],  storage_cmd.format(server))
            nodeClient.start()
            nodeClients = nodeClients + [nodeClient]

    # wait until all storages are up (the message is displayed at least once at at least one storage server)
    for storageOb in storageObs:
        storageOb.waitFor(1)

    return  nodeClients + [masterClient, zkClient]
    f.write("export LD_LIBRARY_PATH={0}\n".format(Spark.telljava))

tmpDirCommand = lambda host: os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Spark.tmpDir))
configCopyCommand = lambda host: os.system(
    "scp {0} {1} root@{2}:{3}/conf/".format(sparkEnv, sparkDefault, host, Spark.sparkdir)
)
jarCopyCommand = lambda host: os.system("scp {0}/*.jar root@{1}:{0}".format(Spark.jarsDir, host))
tmpDirCommand(Spark.master)
configCopyCommand(Spark.master)
jarCopyCommand(Spark.master)
for host in Spark.slaves:
    tmpDirCommand(host)
    configCopyCommand(host)
    jarCopyCommand(host)

rmClients = ThreadedClients([Spark.master] + Spark.slaves, "rm -rf {0}/work".format(Spark.sparkdir), root=True)
rmClients.start()
rmClients.join()

print master_cmd
master = ThreadedClients([Spark.master], master_cmd, root=True)
master.start()
master.join()

time.sleep(2)

print slave_cmd
slave = ThreadedClients(Spark.slaves, slave_cmd, root=True)
slave.start()
slave.join()
Beispiel #36
0
def startStorageThreads(master_cmd, server_cmd, numa0Args, numa1Args, masterObserverString="", serverObserverString="", envVars = ""):
    masterObservers = []
    if len(masterObserverString) > 0:
        masterObservers = [Observer(masterObserverString)]

    # if Storage.storage == Hbase:
    #    masterObservers.append(Observer("Master has completed initialization"))

    mclient = ThreadedClients([Storage.master], "{0}numactl -m 0 -N 0 {1}".format(envVars, master_cmd), observers=masterObservers)
    mclient.start()

    if len(masterObservers) > 0:
       masterObservers[0].waitFor(1)
    else:
        time.sleep(2)
    
    storageObservers = []
    if len(serverObserverString) > 0:
        storageObservers = [Observer(serverObserverString)]

    tclient = ThreadedClients(Storage.servers, "{0}numactl -m 0 -N 0 {1} {2}".format(envVars, server_cmd, numa0Args), observers=storageObservers)
    tclient.start()

    tclient1 = ThreadedClients(Storage.servers1, '{0}numactl -m 1 -N 1 {1} {2}'.format(envVars, server_cmd, numa1Args), observers=storageObservers)
    tclient1.start()
    
    if len(storageObservers) > 0:
        storageObservers[0].waitFor(len(Storage.servers) + len(Storage.servers1))
    else:
        time.sleep(2)

    if Storage.storage == Hbase:
        time.sleep(5)

    return [tclient, tclient1, mclient]
Beispiel #37
0
def cleanDirs():
    # Clean zk dirs
    dClient = ThreadedClients([Config.server],
                              "rm -rf {0}".format(Config.zk_dir),
                              root=True)
    dClient.start()
    dClient.join()
    # Clean kafka dirs
    dClient2 = ThreadedClients([Config.server],
                               "rm -rf {0}".format(Config.k_dir),
                               root=True)
    dClient2.start()
    dClient2.join()
Beispiel #38
0
def populate(nMiss):
   checkTopics()
   print doneStr.format("CREATION")
   time.sleep(3)

   #params = "kafka={0}:9092 zk={0}:2181 missing={1} sf={2} tuples={3} pcompletion={4} psuccess={5} bid_ratio={6} > {7}.{1}.log"
   params = "kafka={0}:9092 zk={0}:2181 missing={1} sf={2} tuples={3} pcompletion={4} psuccess={5} bid_ratio={6}"
   #aPa = params.format(Config.server, Config.missing, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio, "auctionProducer")
   aPa = params.format(Config.server, nMiss, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio)
   #bPa = params.format(Config.server, Config.missing, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio, "bidProducer")
   bPa = params.format(Config.server, nMiss, Config.sf, Config.tuples, Config.pcompletion, Config.psuccess, Config.bratio)

   # populating auctions
   aC = ThreadedClients([Config.server], javaCmd.format(Config.jarpath, Config.aproducer, aPa))
   aC.start()
   aC.join()
   time.sleep(3)

   # populating bids
   bC = ThreadedClients([Config.server], javaCmd.format(Config.jarpath, Config.bproducer, bPa))
   bC.start()
   bC.join()
   print doneStr.format("POPULATION")
   time.sleep(3)
Beispiel #39
0
def confCassandraCluster():
    os.system("mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient = ThreadedClients(Storage.servers1, "mkdir -p {0}/conf1".format(Cassandra.casdir))
    dirClient.start()
    dirClient.join()

    # copy all conf files over, cassandra.yaml will be overwritten later
    os.system("cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient = ThreadedClients(Storage.servers1, "cp -a {0}/conf/* {0}/conf1/".format(Cassandra.casdir))
    copyClient.start()
    copyClient.join()

    # we also have to change cassandra-env.sh
    f = open('{0}/conf/cassandra-env.sh'.format(Cassandra.casdir), 'r')
    templateEnv = f.read()
    f.close()
    templateEnv = templateEnv.replace('JMX_PORT="7199"', 'JMX_PORT="7198"')
    cassandraEnv = '{0}/conf1/cassandra-env.sh'.format(Cassandra.casdir)
    with open (cassandraEnv, 'w') as f:
        f.write(templateEnv)
        f.close()
    copyToHost(Storage.servers1, cassandraEnv)
        
    for numaNode in [0,1]:
        servers = Storage.servers if numaNode == 0 else Storage.servers1
        datadir = Cassandra.datadir if numaNode == 0 else Cassandra.datadir1
        logdir = Cassandra.logdir if numaNode == 0 else Cassandra.logdir1
        nativeport = Cassandra.nativeport if numaNode == 0 else Cassandra.nativeport1
        rpcport = Cassandra.rpcport if numaNode == 0 else Cassandra.rpcport1
        storageport = Cassandra.storageport if numaNode == 0 else Cassandra.storageport1
        sslport = Cassandra.sslport if numaNode == 0 else Cassandra.sslport1
        cassandraConf = '{0}/conf/cassandra.yaml' if numaNode == 0 else '{0}/conf1/cassandra.yaml'
        cassandraConf = cassandraConf.format(Cassandra.casdir)

        if len(servers) == 0:
            continue 

        for host in servers:
            f = open('cassandra.yaml.template', 'r')
            templateconf = f.read()
            f.close()

            templateconf = templateconf.replace("casseeds", "\"" + Storage.servers[0] + "\"")
            templateconf = templateconf.replace("caslistenaddr", host)
            templateconf = templateconf.replace("casdatadir", datadir)
            templateconf = templateconf.replace("caslogdir", logdir)
            templateconf = templateconf.replace("casnativeport", nativeport)
            templateconf = templateconf.replace("casrpcport", rpcport)
            templateconf = templateconf.replace("casstorageport", storageport)
            templateconf = templateconf.replace("cassslport", sslport)

            with open(cassandraConf, 'w') as f:
                f.write(templateconf)
                f.close()
            copyToHost([host], cassandraConf)

        mkClients = ThreadedClients(servers, "mkdir -p {0}".format(datadir), root=True)
        mkClients.start()
        mkClients.join()
        mntClients = ThreadedClients(servers, "mount -t tmpfs -o size={0}G tmpfs {1}".format(Cassandra.datadirSz, datadir), root=True)
        mntClients.start()
        mntClients.join()
        mkClients = ThreadedClients(servers , "mkdir -p {0}".format(logdir), root=True)
        mkClients.start()
        mkClients.join()
Beispiel #40
0
def confZk():
    zooCfg = '{0}/conf/zoo.cfg'.format(Zookeeper.zkdir)
    with open(zooCfg, 'w+') as f:
         f.write("maxClientCnxns={0}\n".format(Zookeeper.maxclients))
         f.write("tickTime={0}\n".format(Zookeeper.ticktime))
         f.write("dataDir={0}\n".format(Zookeeper.datadir))
         f.write("clientPort={0}\n".format(Zookeeper.clientport))

    deleteClient = ThreadedClients([Storage.master], "rm -rf {0}".format(Zookeeper.datadir), root=True)
    deleteClient.start()
    deleteClient.join()

    copyClient = ThreadedClients([Storage.master], "mkdir -p {0}".format(Zookeeper.datadir), root=True)
    copyClient.start()
    copyClient.join()
    copyToHost([Storage.master], zooCfg)
Beispiel #41
0

cmd = ""

if Tpcc.storage == Kudu:
    cmd = "{0}/watch/tpcc/tpcc_kudu -H `hostname` -W {1} --network-threads 8 -s {2} -P {3}".format(
        Tpcc.builddir, Tpcc.warehouses, Kudu.master,
        len(Kudu.tservers) * 4)
elif Tpcc.storage == TellStore:
    Tpcc.rsyncBuild()
    cmd = '{0}/watch/tpcc/tpcc_server -W {1} --network-threads 4 -c "{2}" -s "{3}"'.format(
        Tpcc.builddir, Tpcc.warehouses,
        General.infinibandIp[TellStore.commitmanager] + ":7242",
        reduce(semicolonReduce,
               map(lambda x: hostToIp(x) + ":7241", TellStore.servers)))

client0 = ThreadedClients(Tpcc.servers0,
                          "numactl -m 0 -N 0 {0}".format(cmd),
                          rnd_start=True,
                          root=False)
client1 = ThreadedClients(Tpcc.servers1,
                          "numactl -m 1 -N 1 {0} -p 8712".format(cmd),
                          rnd_start=True,
                          root=False)

client0.start()
client1.start()

client0.join()
client1.join()

cmd = '{0}/watch/ycsb-server/ycsb-server -H `hostname` --network-threads {3} -c "{1}" -s "{2}"'.format(YCSB.builddir, General.infinibandIp[TellStore.commitmanager] + ":7242", reduce(semicolonReduce, map(lambda x: hostToIp(x) + ":7241", TellStore.servers)), YCSB.networkThread)

fst = None
fstZero = True
cCommand = "-C" if args.createTable else ""

if len(YCSB.servers0) > 0:
    fst = YCSB.servers0.pop()
else:
    fstZero = False
    fst = YCSB.servers1.pop()

if fstZero:
    fst = ThreadedClients([fst], "numactl -m 0 -N 0 {0} {1}".format(cmd, cCommand))
else:
    fst = ThreadedClients([fst], "numactl -m 1 -N 1 {0} -p 8712 {1}".format(cmd, cCommand))


client0 = ThreadedClients(YCSB.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
client1 = ThreadedClients(YCSB.servers1, "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

fst.start()
client0.start()
client1.start()

fst.join()
client0.join()
client1.join()
Beispiel #43
0
def checkTopics():
    chkCmd = '{0}/bin/kafka-topics.sh --list --zookeeper {1}:2181'.format(
        Config.exec_path, Config.server)
    chkClient = ThreadedClients([Config.server], chkCmd, root=True)
    chkClient.start()
    chkClient.join()