示例#1
0
def startMBServer(observers):

    Microbench.rsyncBuild()
    path = ""
    params = '-t {0} -n {1} -s {2} '.format(Microbench.threads, Microbench.numColumns, Microbench.scaling)

    if Storage.storage == TellStore:
        cmd = '{0}/watch/microbench/mbserver_tell {1}'.format(TellStore.builddir, params)
        cmd += '-c "{0}" --storage "{1}" --network-threads {2} -m {3}'.format(TellStore.getCommitManagerAddress(), TellStore.getServerList(), Microbench.networkThreads, Microbench.infinioBatch)
    elif Storage.storage == Kudu:
        cmd = '{0}/watch/microbench/mbserver_kudu {1}'.format(TellStore.builddir, params)
        cmd += '-c {0}'.format(Storage.master)
    elif Storage.storage == Cassandra:
        Microbench.rsyncJars()
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd ='java -jar {0}/mbserver_cassandra.jar {1}'.format(Microbench.javaDir, params)
        cmd += getNodes(Storage.servers, " -cn ")
    elif Storage.storage == Hbase:
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd = 'java -jar {0}/mbserver_hbase.jar {1}'.format(Microbench.javaDir, params)
        cmd += '-hm {0}'.format(Storage.master)
        cmd += '-zm {0}'.format(Storage.master)
    elif Storage.storage == Ramcloud:
        cmd = '{0}/watch/microbench/mbserver_ramcloud {1}'.format(TellStore.builddir, params)
        cmd += '-c main -l "infrc:host={0}-infrc,port=11100" -x {1}'.format(Storage.master, len(Storage.servers) + len(Storage.servers1))
  
    client0 = ThreadedClients(Microbench.servers0, "{0}numactl -m 0 -N 0 {1}".format(path, cmd), observers=observers)
    client1 = ThreadedClients(Microbench.servers1, "{0}numactl -m 1 -N 1 {1} -p 8712".format(path, cmd), observers=observers)
    
    client0.start()
    client1.start()

    return [client0, client1]
示例#2
0
def startMBServer(observers):

    Microbench.rsyncBuild()
    path = ""
    params = '-t {0} -n {1} -s {2} '.format(Microbench.threads,
                                            Microbench.numColumns,
                                            Microbench.scaling)

    if Storage.storage == TellStore:
        cmd = '{0}/watch/microbench/mbserver_tell {1}'.format(
            TellStore.builddir, params)
        cmd += '-c "{0}" --storage "{1}" --network-threads {2} -m {3}'.format(
            TellStore.getCommitManagerAddress(), TellStore.getServerList(),
            Microbench.networkThreads, Microbench.infinioBatch)
    elif Storage.storage == Kudu:
        cmd = '{0}/watch/microbench/mbserver_kudu {1}'.format(
            TellStore.builddir, params)
        cmd += '-c {0}'.format(Storage.master)
    elif Storage.storage == Cassandra:
        Microbench.rsyncJars()
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd = 'java -jar {0}/mbserver_cassandra.jar {1}'.format(
            Microbench.javaDir, params)
        cmd += getNodes(Storage.servers, " -cn ")
    elif Storage.storage == Hbase:
        path = "PATH={0}/bin:$PATH ".format(General.javahome)
        cmd = 'java -jar {0}/mbserver_hbase.jar {1}'.format(
            Microbench.javaDir, params)
        cmd += '-hm {0}'.format(Storage.master)
        cmd += '-zm {0}'.format(Storage.master)
    elif Storage.storage == Ramcloud:
        cmd = '{0}/watch/microbench/mbserver_ramcloud {1}'.format(
            TellStore.builddir, params)
        cmd += '-c main -l "infrc:host={0}-infrc,port=11100" -x {1}'.format(
            Storage.master,
            len(Storage.servers) + len(Storage.servers1))

    client0 = ThreadedClients(Microbench.servers0,
                              "{0}numactl -m 0 -N 0 {1}".format(path, cmd),
                              observers=observers)
    client1 = ThreadedClients(Microbench.servers1,
                              "{0}numactl -m 1 -N 1 {1} -p 8712".format(
                                  path, cmd),
                              observers=observers)

    client0.start()
    client1.start()

    return [client0, client1]
示例#3
0
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import General

import time

server_cmd = 'cd {0}/watch/tpch; {0}/watch/tpch/tpch_server -s "{1}" -c "{2}"'.format(
    Tpch.builddir, TellStore.getServerList(),
    TellStore.getCommitManagerAddress())
client_cmd = '{0}/watch/tpch/tpch_client -H "{1}" -s {2} -P'.format(
    Tpch.builddir, Tpch.server, Tpch.scaling)

server = ThreadedClients([Tpch.server], server_cmd)
server.start()
time.sleep(5)

client = ThreadedClients([Tpch.client], client_cmd)
client.start()

client.join()
print "Population done, please hit Ctr+C to finish"
server.join()
示例#4
0
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import Kudu
from ServerConfig import General

import time

cmd = ""

if Tpch.storage == Kudu:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 8 -P {2} -k'.format(Tpch.builddir, Kudu.master, len(Kudu.tservers)*4)
elif Tpch.storage == TellStore:
    cmd = '{0}/watch/tpch/tpch_server -s "{1}" --network-threads 4 -c "{2}"'.format(Tpch.builddir, TellStore.getServerList(), TellStore.getCommitManagerAddress())

server0 = ThreadedClients(Tpch.servers0, "numactl -m 0 -N 0 {0}".format(cmd))
server1 = ThreadedClients(Tpch.servers1, "numactl -m 1 -N 1 {0} -p 8712".format(cmd))

server0.start()
server1.start()

server0.join()
server1.join()

示例#5
0
with open(sparkDefault, 'w+') as f:
    f.write('spark.driver.extraClassPath {0}\n'.format(classpath))
    f.write('spark.executor.extraClassPath {0}\n'.format(classpath))
    f.write('spark.serializer org.apache.spark.serializer.KryoSerializer\n')
    f.write('spark.driver.memory 10g\n')
    f.write('spark.executor.memory 90g\n')
    f.write('spark.local.dir {0}\n'.format(Spark.tmpDir))
    f.write('spark.executor.cores {0}\n'.format(Spark.numCores))
    # TellStore
    f.write('spark.sql.tell.numPartitions {0}\n'.format(Spark.tellPartitions))
    #f.write('spark.sql.tell.chunkSizeSmall 104857600\n')
    numChunks = (len(TellStore.servers) + len(TellStore.servers1)) * Spark.numCores
    f.write('spark.sql.tell.chunkSizeBig   {0}\n'.format(((TellStore.scanMemory // numChunks) // 8) * 8))
    f.write('spark.sql.tell.chunkCount {0}\n'.format(numChunks))
    f.write('spark.sql.tell.commitmanager {0}\n'.format(TellStore.getCommitManagerAddress()))
    f.write('spark.sql.tell.storagemanager {0}\n'.format(TellStore.getServerList()))

with open(sparkEnv, 'w+') as f:
    f.write('export JAVA_HOME={0}\n'.format(Spark.javahome))
    f.write('export LD_LIBRARY_PATH={0}\n'.format(Spark.telljava))

tmpDirCommand = lambda host:  os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Spark.tmpDir))
configCopyCommand = lambda host: os.system('scp {0} {1} root@{2}:{3}/conf/'.format(sparkEnv, sparkDefault, host, Spark.sparkdir))
jarCopyCommand = lambda host: os.system('scp {0}/*.jar root@{1}:{0}'.format(Spark.jarsDir, host))
tmpDirCommand(Spark.master)
configCopyCommand(Spark.master)
jarCopyCommand(Spark.master)
for host in Spark.slaves:
    tmpDirCommand(host)
    configCopyCommand(host)
#!/usr/bin/env python
from argparse import ArgumentParser
import os

from ServerConfig import Client
from ServerConfig import General
from ServerConfig import Storage
from ServerConfig import Kudu
from ServerConfig import Tpch
from ServerConfig import TellStore

storageAddr = Kudu.master + " -k"
if Storage.storage == TellStore:
    storageAddr = TellStore.getServerList()
    masterAddr = TellStore.getCommitManagerAddress()

cmd = '{0}/watch/tpcc/tpch -S "{1}" -d {2}/{3} -C "{4}"'.format(TellStore.builddir, storageAddr, Tpch.dbgenFiles, Tpch.scaling, masterAddr)
print cmd
exit(os.system(cmd))
示例#7
0
with open(sparkDefault, "w+") as f:
    f.write("spark.driver.extraClassPath {0}\n".format(classpath))
    f.write("spark.executor.extraClassPath {0}\n".format(classpath))
    f.write("spark.serializer org.apache.spark.serializer.KryoSerializer\n")
    f.write("spark.driver.memory 10g\n")
    f.write("spark.executor.memory 90g\n")
    f.write("spark.local.dir {0}\n".format(Spark.tmpDir))
    f.write("spark.executor.cores {0}\n".format(Spark.numCores))
    # TellStore
    f.write("spark.sql.tell.numPartitions {0}\n".format(Spark.tellPartitions))
    # f.write('spark.sql.tell.chunkSizeSmall 104857600\n')
    numChunks = (len(TellStore.servers) + len(TellStore.servers1)) * Spark.numCores
    f.write("spark.sql.tell.chunkSizeBig   {0}\n".format(((TellStore.scanMemory // numChunks) // 8) * 8))
    f.write("spark.sql.tell.chunkCount {0}\n".format(numChunks))
    f.write("spark.sql.tell.commitmanager {0}\n".format(TellStore.getCommitManagerAddress()))
    f.write("spark.sql.tell.storagemanager {0}\n".format(TellStore.getServerList()))

with open(sparkEnv, "w+") as f:
    f.write("export JAVA_HOME={0}\n".format(Spark.javahome))
    f.write("export LD_LIBRARY_PATH={0}\n".format(Spark.telljava))

tmpDirCommand = lambda host: os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Spark.tmpDir))
configCopyCommand = lambda host: os.system(
    "scp {0} {1} root@{2}:{3}/conf/".format(sparkEnv, sparkDefault, host, Spark.sparkdir)
)
jarCopyCommand = lambda host: os.system("scp {0}/*.jar root@{1}:{0}".format(Spark.jarsDir, host))
tmpDirCommand(Spark.master)
configCopyCommand(Spark.master)
jarCopyCommand(Spark.master)
for host in Spark.slaves:
示例#8
0
def startAimServers(observers = []):
    Aim.rsyncBuild()
    numChunks = (len(Storage.servers) + len(Storage.servers1)) * Aim.numRTAClients * 16
    chunkSize = ((TellStore.scanMemory // numChunks) // 8) * 8
    
    serverExec = ""
    if Storage.storage == Kudu:
        serverExec = "aim_kudu -P {0} -s {1}".format((len(Storage.servers) + len(Storage.servers1)) * 2, Storage.master)
    elif Storage.storage == TellStore:
        serverExec = 'aim_server -M {0} -m {1} -c "{2}" -s "{3}" --processing-threads {4}'.format(numChunks, chunkSize, TellStore.getCommitManagerAddress(), TellStore.getServerList(), Aim.serverthreads)
    
    cmd = '{0}/watch/aim-benchmark/{3} -f {1} -b {2}'.format(Aim.builddir, Aim.schemaFile, Aim.batchSize, serverExec)
    
    client0 = ThreadedClients(Aim.sepservers0 + Aim.rtaservers0, "numactl -m 0 -N 0 {0}".format(cmd), root=True)
    client1 = ThreadedClients(Aim.sepservers1 + Aim.rtaservers1, "numactl -m 1 -N 1 {0} -p 8715 -u 8716".format(cmd), root=True)
    
    client0.start()
    client1.start()
    return [client0, client1]
示例#9
0
def confNode(host, coordinator = False):
    print "\nCONFIGURING {0}".format(host)
    # node properties
    nodeProps = "{0}/etc/node.properties".format(Presto.prestodir)
    with open (nodeProps, 'w+') as f:
         f.write("node.environment=ethz\n")
         f.write("node.id=ffffffff-ffff-ffff-ffff-{0}\n".format(host))
         f.write("node.data-dir={0}\n".format(Presto.datadir))
    copyToHost([host], nodeProps)
    # jvm config
    jvmConf = "{0}/etc/jvm.config".format(Presto.prestodir)
    with open (jvmConf, 'w+') as f:
         f.write("-server\n")
         f.write("-Djava.library.path={0}\n".format(Java.telljava))
         if Presto.debug:
             f.write('-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005\n')
         f.write("-Xmx{0}\n".format(Presto.jvmheap))
         f.write("-XX:+UseG1GC\n")
         f.write("-XX:G1HeapRegionSize={0}\n".format(Presto.jvmheapregion))
         f.write("-XX:+UseGCOverheadLimit\n")
         f.write("-XX:+ExplicitGCInvokesConcurrent\n")
         f.write("-XX:+HeapDumpOnOutOfMemoryError\n")
         f.write("-XX:OnOutOfMemoryError=kill -9 %p\n")
    copyToHost([host], jvmConf)
    # config properties
    confProps = "{0}/etc/config.properties".format(Presto.prestodir)
    with open (confProps, 'w+') as f:
         if (coordinator):
            f.write("coordinator=true\n")
            f.write("node-scheduler.include-coordinator=false\n")
            f.write("discovery-server.enabled=true\n")
         else:
            f.write("coordinator=false\n")
         f.write("http-server.http.port={0}\n".format(Presto.httpport))
         f.write("query.max-memory={0}\n".format(Presto.querymaxmem))
         f.write("query.max-memory-per-node={0}\n".format(Presto.querymaxnode))
         f.write("discovery.uri=http://{0}:8080\n".format(Presto.coordinator))
         f.write("node-scheduler.max-splits-per-node={0}\n".format(Presto.splitsPerMachine))
         f.write("node-scheduler.max-pending-splits-per-node-per-task={0}\n".format(0))
    copyToHost([host], confProps)
    # catalog:
    if Storage.storage == Hadoop:
        hiveCat = "{0}/etc/catalog/hive.properties".format(Presto.prestodir)
        with open (hiveCat, 'w+') as f:
             f.write("connector.name=hive-hadoop2\n")
             f.write("hive.metastore.uri=thrift://{0}:{1}\n".format(Hive.metastoreuri, Hive.metastoreport))
             f.write("hive.metastore-timeout={0}\n".format(Hive.metastoretimeout))
        copyToHost([host], hiveCat)
    elif Storage.storage == TellStore:
        tellCat = "{0}/etc/catalog/tell.properties".format(Presto.prestodir)
        numChunks = Presto.splitsPerMachine * TellStore.numServers()
        with open (tellCat, 'w+') as f:
            f.write('connector.name=tell\n')
            f.write('tell.commitManager={0}\n'.format(TellStore.getCommitManagerAddress()))
            f.write('tell.storages={0}\n'.format(TellStore.getServerList()))
            f.write('tell.numPartitions={0}\n'.format(Presto.splitsPerMachine * len(Presto.nodes)))
            f.write('tell.partitionShift={0}\n'.format(TellStore.scanShift))
            f.write('tell.chunkCount={0}\n'.format(numChunks))
            f.write('tell.chunkSize={0}\n'.format(((TellStore.scanMemory // numChunks) // 8) * 8))
        copyToHost([host], tellCat)
    # log level
    logProps = "{0}/etc/log.properties".format(Presto.prestodir)
    f = open(logProps, 'w+')
    f.write("com.facebook.presto={0}\n".format(Presto.loglevel))
    f.close()
    copyToHost([host], logProps)
    # tmp files for logging
    os.system("ssh root@{0} 'rm -rf {1}; mkdir {1}'".format(host, Presto.datadir))
#!/usr/bin/env python
from argparse import ArgumentParser
from threaded_ssh import ThreadedClients
from ServerConfig import Tpch
from ServerConfig import TellStore
from ServerConfig import General

import time

server_cmd = 'cd {0}/watch/tpch; {0}/watch/tpch/tpch_server -s "{1}" -c "{2}"'.format(Tpch.builddir, TellStore.getServerList(), TellStore.getCommitManagerAddress())
client_cmd = '{0}/watch/tpch/tpch_client -H "{1}" -s {2} -P'.format(Tpch.builddir, Tpch.server, Tpch.scaling)

server = ThreadedClients([Tpch.server], server_cmd)
server.start()
time.sleep(5)

client = ThreadedClients([Tpch.client], client_cmd)
client.start()

client.join()
print "Population done, please hit Ctr+C to finish"
server.join()