示例#1
0
def indexMultiple(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--masterServiceThreads 1'
    if cluster_args['timeout'] < 2100:
        cluster_args['timeout'] = 200
    # Ensure atleast 15 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)

    # use a maximum of 10 secondary keys
    if len(hosts) <= 10:
        # TODO(ankitak): hack until synchronization bug in write RPC handler
        # in MasterService is resolved. This bug prevents us from using more
        # than 1 MasterSerivice thread. However, we need to use more than 1
        # service thread, otherwise if a tablet and its corresponding
        # indexlet end up on the same server, we will have a deadlock.
        # For now, make sure that we never wrap around the server list
        # Once the bug is resolved, we should be able to use len(hosts)
        # for numIndexes
        client_args['--numIndexes'] = len(hosts) - 1
    else:
        client_args['--numIndexes'] = 10

    cluster.run(client='%s/ClusterPerf %s %s' %
               (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#2
0
def workloadDist(name, options, cluster_args, client_args):
    if not options.extract:
        if 'master_args' not in cluster_args:
            cluster_args['master_args'] = '-t 2000'
        cluster_args['disjunct'] = True
        cluster.run(client='%s/apps/ClusterPerf %s %s' %
                (config.hooks.get_remote_obj_path(),
                 flatten_args(client_args), name),
                **cluster_args)
    if options.fullSamples:
        import gzip
        with gzip.open('logs/latest/rcdf.data.gz', 'wb') as rcdf_file:
            print_rcdf_from_log_samples(rcdf_file)
        with gzip.open('logs/latest/samples.data.gz', 'wb') as samples_file:
            print_samples_from_log(samples_file)
    else:
        print("# Cumulative distribution latencies for operations specified by\n"
              "# the benchmark.\n#\n"
              "# Generated by 'clusterperf.py %s'\n#\n"
              "# Time (usec)  Cum. Fraction\n"
              "#---------------------------"
              % (name))
        if (options.rcdf):
            print_rcdf_from_log()
        else:
            print_cdf_from_log()
示例#3
0
def indexReadDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 200:
        cluster_args['timeout'] = 200

    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())

    if '--count' not in client_args:
        client_args['--count'] = 10000

    if '--numObjects' not in client_args:
        client_args['--numObjects'] = 1000000

    if '--warmup' not in client_args:
        client_args['--warmup'] = 100

    cluster.run(
        client='%s/apps/ClusterPerf %s %s' %
        (config.hooks.get_remote_obj_path(), flatten_args(client_args), name),
        **cluster_args)

    print("# Cumulative distribution of time for a single client to read\n"
          "# %d %d-byte objects to a table with one index and %d\n"
          "# initial objects. Each object has two 30-byte keys and a 100\n"
          "# byte value. Each line indicates that a given fraction of all\n"
          "# reads took at most a given time to complete.\n"
          "#\n"
          "# Generated by 'clusterperf.py readDist'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------" %
          (client_args['--count'], options.size, client_args['--numObjects']))
    print_cdf_from_log()
示例#4
0
def readLoaded(name, options, cluster_args, client_args):
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 20
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print(get_client_log(), end='')
示例#5
0
def workloadDist(name, options, cluster_args, client_args):
    if not options.extract:
        if 'master_args' not in cluster_args:
            cluster_args['master_args'] = '-t 2000'
        cluster_args['disjunct'] = True
        cluster.run(client='%s/apps/ClusterPerf %s %s' %
                    (config.hooks.get_remote_obj_path(),
                     flatten_args(client_args), name),
                    **cluster_args)
    if options.fullSamples:
        import gzip
        with gzip.open('logs/latest/rcdf.data.gz', 'wb') as rcdf_file:
            print_rcdf_from_log_samples(rcdf_file)
        with gzip.open('logs/latest/samples.data.gz', 'wb') as samples_file:
            print_samples_from_log(samples_file)
    else:
        print(
            "# Cumulative distribution latencies for operations specified by\n"
            "# the benchmark.\n#\n"
            "# Generated by 'clusterperf.py %s'\n#\n"
            "# Time (usec)  Cum. Fraction\n"
            "#---------------------------" % (name))
        if (options.rcdf):
            print_rcdf_from_log()
        else:
            print_cdf_from_log()
示例#6
0
def indexMultiple(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--masterServiceThreads 1'
    if cluster_args['timeout'] < 360:
        cluster_args['timeout'] = 360
    # Ensure atleast 15 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)

    # use a maximum of 10 secondary keys
    if len(hosts) <= 10:
        # Hack until synchronization bug in write RPC handler
        # in MasterService is resolved. This bug prevents us from using more
        # than 1 MasterSerivice thread. However, we need to use more than 1
        # service thread, otherwise if a tablet and its corresponding
        # indexlet end up on the same server, we will have a deadlock.
        # For now, make sure that we never wrap around the server list
        # Once the bug is resolved, we should be able to use len(hosts)
        # for numIndexes
        client_args['--numIndexes'] = len(hosts) - 1
    else:
        client_args['--numIndexes'] = 10

    cluster.run(client='%s/ClusterPerf %s %s' %
               (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#7
0
def indexReadDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--masterServiceThreads 1 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 200:
        cluster_args['timeout'] = 200

    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)

    if '--count' not in client_args:
        client_args['--count'] = 10000

    if '--numObjects' not in client_args:
        client_args['--numObjects'] = 1000000

    if '--warmup' not in client_args:
        client_args['--warmup'] = 100

    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)

    print("# Cumulative distribution of time for a single client to read\n"
          "# %d %d-byte objects to a table with one index and %d\n"
          "# initial objects. Each object has two 30-byte keys and a 100\n"
          "# byte value. Each line indicates that a given fraction of all\n"
          "# reads took at most a given time to complete.\n"
          "#\n"
          "# Generated by 'clusterperf.py readDist'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------"
          % (client_args['--count'], options.size, client_args['--numObjects'] ))
    print_cdf_from_log()
示例#8
0
def readLoaded(name, options, cluster_args, client_args):
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 20
    cluster.run(client='%s/apps/ClusterPerf %s %s' %
            (config.hooks.get_remote_obj_path(),
             flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#9
0
def broadcast(name, options, cluster_args, client_args):
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 10
    cluster.run(
        client='%s/apps/ClusterPerf %s %s' %
        (config.hooks.get_remote_obj_path(), flatten_args(client_args), name),
        **cluster_args)
    print(get_client_log(), end='')
示例#10
0
def basic(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 4000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#11
0
def multiOp(name, options, cluster_args, client_args):
    cluster_args['timeout'] = 100
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), master_args='-d', **cluster_args)
    print(get_client_log(), end='')
示例#12
0
def basic(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 4000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
def readRandom(name, options, cluster_args, client_args):
    cluster_args["timeout"] = 60
    if "num_clients" not in cluster_args:
        cluster_args["num_clients"] = 50
    if options.num_servers == None:
        cluster_args["num_servers"] = 10
    client_args["--numTables"] = cluster_args["num_servers"]
    cluster.run(client="%s/ClusterPerf %s %s" % (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end="")
示例#14
0
def migrateLoaded(name, options, cluster_args, client_args):
    if not options.extract:
        clients = options.num_clients
        servers = options.num_servers  # len(getHosts()) - clients - 1

        if servers < 4:
            raise Exception('Not enough servers: only %d left' % servers)
        if clients < 16:
            print('!!! WARNING !!! Use 16 clients to ensure enough load for ' +
                  'real experiments !!! WARNING !!!',
                  file=sys.stderr)

        cluster_args['num_servers'] = servers

        # Use two backups per server for more disk bandwidth.
        defaultTo(cluster_args, 'backup_disks_per_server', 2)

        # Need lots of mem for big workload and migration.
        defaultTo(cluster_args, 'master_args', '-t 18000 --segmentFrames 8192')

        # Sixteen clients to try to generate enough load to keep things at
        # about 90% load.
        cluster_args['num_clients'] = clients

        # Can take awhile due to fillup and migration.
        if cluster_args['timeout'] < 300:
            cluster_args['timeout'] = 300

        # We're really interested in jitter on servers; better keep the clients
        # off the server machines.
        cluster_args['disjunct'] = True

        # Can't default --workload this due to command line default...

        # 1 million * 100 B ~= 100 MB table
        defaultTo(client_args, '--numObjects', 1 * 1000 * 1000)

        # Set clients up to keep server at 90% load.
        defaultTo(
            client_args, '--targetOps',
            calculatePerClientTarget(client_args['--workload'], clients,
                                     options.loadPct))

        # Turn on timestamps on latency samples.
        defaultTo(client_args, '--fullSamples', '')

        name = 'readDistWorkload'
        cluster.run(client='%s/apps/ClusterPerf %s %s' %
                    (obj_path, flatten_args(client_args), name),
                    **cluster_args)

    import gzip
    with gzip.open('logs/latest/rcdf.data.gz', 'wb') as rcdf_file:
        print_rcdf_from_log_samples(rcdf_file)
    with gzip.open('logs/latest/samples.data.gz', 'wb') as samples_file:
        print_samples_from_log(samples_file)
示例#15
0
def multiOp(name, options, cluster_args, client_args):
    if cluster_args['timeout'] < 100:
        cluster_args['timeout'] = 100
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name),
            **cluster_args)
    print(get_client_log(), end='')
示例#16
0
def readRandom(name, options, cluster_args, client_args):
    cluster_args['timeout'] = 60
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 50
    if options.num_servers == None:
        cluster_args['num_servers'] = 10
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#17
0
def readThroughput(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(hosts) - cluster_args['num_servers']
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
def readAllToAll(name, options, cluster_args, client_args):
    cluster_args["backups_per_server"] = 0
    cluster_args["replicas"] = 0
    if "num_clients" not in cluster_args:
        cluster_args["num_clients"] = len(hosts)
    if options.num_servers == None:
        cluster_args["num_servers"] = len(hosts)
    client_args["--numTables"] = cluster_args["num_servers"]
    cluster.run(client="%s/ClusterPerf %s %s" % (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end="")
示例#19
0
def indexBasic(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 200:
        cluster_args['timeout'] = 200
    # Ensure at least 5 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#20
0
def txCollision(name, options, cluster_args, client_args):
    if cluster_args['timeout'] < 100:
        cluster_args['timeout'] = 100
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    #client_args['--numTables'] = cluster_args['num_servers'];
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 5
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#21
0
def readRandom(name, options, cluster_args, client_args):
    cluster_args['timeout'] = 60
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 50
    if options.num_servers == None:
        cluster_args['num_servers'] = 10
    client_args['--numTables'] = cluster_args['num_servers']
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print(get_client_log(), end='')
示例#22
0
def readRandom(name, options, cluster_args, client_args):
    cluster_args['backups_per_server'] = 0
    cluster_args['replicas'] = 0
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 16 
    if options.num_servers == None:
        cluster_args['num_servers'] = 1
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#23
0
def multiOp(name, options, cluster_args, client_args):
    if cluster_args['timeout'] < 100:
        cluster_args['timeout'] = 100
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/apps/ClusterPerf %s %s' %
            (config.hooks.get_remote_obj_path(),
             flatten_args(client_args), name),
            **cluster_args)
    print(get_client_log(), end='')
示例#24
0
def multiOp(name, options, cluster_args, client_args):
    if cluster_args['timeout'] < 100:
        cluster_args['timeout'] = 100
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    client_args['--numTables'] = cluster_args['num_servers']
    cluster.run(
        client='%s/apps/ClusterPerf %s %s' %
        (config.hooks.get_remote_obj_path(), flatten_args(client_args), name),
        **cluster_args)
    print(get_client_log(), end='')
示例#25
0
def indexBasic(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--masterServiceThreads 1 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 200:
        cluster_args['timeout'] = 200
    # Ensure at least 5 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#26
0
def readRandom(name, options, cluster_args, client_args):
    cluster_args['backups_per_server'] = 0
    cluster_args['replicas'] = 0
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 16
    if options.num_servers == None:
        cluster_args['num_servers'] = 1
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#27
0
def txCollision(name, options, cluster_args, client_args):
    if cluster_args['timeout'] < 100:
        cluster_args['timeout'] = 100
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)
    #client_args['--numTables'] = cluster_args['num_servers'];
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 5
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#28
0
def indexBasic(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 200:
        cluster_args['timeout'] = 200
    # Ensure at least 5 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    cluster.run(client='%s/apps/ClusterPerf %s %s' %
            (config.hooks.get_remote_obj_path(),
             flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#29
0
def readAllToAll(name, options, cluster_args, client_args):
    cluster_args['backup_disks_per_server'] = 0
    cluster_args['replicas'] = 0
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(getHosts())
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    client_args['--numTables'] = cluster_args['num_servers']
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print(get_client_log(), end='')
示例#30
0
def workloadDist(name, options, cluster_args, client_args):
    if not options.extract:
        if 'master_args' not in cluster_args:
            cluster_args['master_args'] = '-t 2000'
        cluster_args['disjunct'] = True
        cluster.run(client='%s/ClusterPerf %s %s' %
                    (obj_path, flatten_args(client_args), name),
                    **cluster_args)
    if options.rcdf:
        print_rcdf_from_log_samples()
    else:
        print_samples_from_log()
示例#31
0
def readAllToAll(name, options, cluster_args, client_args):
    cluster_args['backup_disks_per_server'] = 0
    cluster_args['replicas'] = 0
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(getHosts())
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    client_args['--numTables'] = cluster_args['num_servers'];
    cluster.run(client='%s/apps/ClusterPerf %s %s' %
            (config.hooks.get_remote_obj_path(),
             flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#32
0
def workloadDist(name, options, cluster_args, client_args):
    if not options.extract:
        if 'master_args' not in cluster_args:
            cluster_args['master_args'] = '-t 2000'
        cluster_args['disjunct'] = True
        cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path,  flatten_args(client_args), name),
                **cluster_args)
    if options.rcdf:
        print_rcdf_from_log_samples()
    else:
        print_samples_from_log()
def netBandwidth(name, options, cluster_args, client_args):
    if "num_clients" not in cluster_args:
        cluster_args["num_clients"] = 2 * len(config.hosts)
    if options.num_servers == None:
        cluster_args["num_servers"] = cluster_args["num_clients"]
        if cluster_args["num_servers"] > len(config.hosts):
            cluster_args["num_servers"] = len(config.hosts)
    if options.size != None:
        client_args["--size"] = options.size
    else:
        client_args["--size"] = 1024 * 1024
    cluster.run(client="%s/ClusterPerf %s %s" % (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end="")
示例#34
0
def indexBasic(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--masterServiceThreads 1'
    if cluster_args['timeout'] < 200:
        cluster_args['timeout'] = 200
    # Ensure at least 5 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)
    # using 20GB for servers so that we don't run out of memory when inserting
    # 10 million objects/index entries
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#35
0
def readDist(name, options, cluster_args, client_args):
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path,  flatten_args(client_args), name),
            **cluster_args)
    print("# Cumulative distribution of time for a single client to read a\n"
          "# single %d-byte object from a single server.  Each line indicates\n"
          "# that a given fraction of all reads took at most a given time\n"
          "# to complete.\n"
          "# Generated by 'clusterperf.py readDist'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------"
          % options.size)
    print_cdf_from_log()
示例#36
0
def readDistRandom(name, options, cluster_args, client_args):
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path,  flatten_args(client_args), name),
            **cluster_args)
    print("# Cumulative distribution of time for a single client to read a\n"
          "# random %d-byte object from a single server.  Each line indicates\n"
          "# that a given fraction of all reads took at most a given time\n"
          "# to complete.\n"
          "# Generated by 'clusterperf.py readDist'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------"
          % options.size)
    print_cdf_from_log()
示例#37
0
def netBandwidth(name, options, cluster_args, client_args):
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 2*len(config.hosts)
    if options.num_servers == None:
        cluster_args['num_servers'] = cluster_args['num_clients']
        if cluster_args['num_servers'] > len(config.hosts):
            cluster_args['num_servers'] = len(config.hosts)
    if options.size != None:
        client_args['--size'] = options.size
    else:
        client_args['--size'] = 1024*1024;
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#38
0
def echoWorkload(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 4000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    cluster_args['replicas'] = 0
    if options.num_servers == None:
        cluster_args['num_servers'] = 1
    cluster.run(
        client='%s/apps/ClusterPerf %s %s' %
        (config.hooks.get_remote_obj_path(), flatten_args(client_args), name),
        **cluster_args)
    print("# Generated by 'clusterperf.py echoWorkload'\n#\n")
    print_percentiles_from_logs()
示例#39
0
def migrateLoaded(name, options, cluster_args, client_args):
    if not options.extract:
        clients = 16
        servers = len(getHosts()) - clients - 1

        if servers < 4:
            raise Exception('Not enough servers: only %d left' % servers)

        cluster_args['num_servers'] = servers

        # Use two backups per server for more disk bandwidth.
        defaultTo(cluster_args, 'backup_disks_per_server', 2)

        # Need lots of mem for big workload and migration.
        defaultTo(cluster_args, 'master_args',
                '-t 18000 --segmentFrames 8192')

        # Sixteen clients to try to generate enough load to keep things at
        # about 90% load.
        cluster_args['num_clients'] = clients

        # Can take awhile due to fillup and migration.
        if cluster_args['timeout'] < 300:
            cluster_args['timeout'] = 300

        # We're really interested in jitter on servers; better keep the clients
        # off the server machines.
        cluster_args['disjunct'] = True

        # Can't default --workload this due to command line default...

        # 1 million * 100 B ~= 100 MB table
        defaultTo(client_args, '--numObjects', 1 * 1000 * 1000)

        # Set clients up to keep server at 90% load.
        defaultTo(client_args, '--targetOps',
                calculatePerClientTarget(
                    client_args['--workload'], clients,
                    options.loadPct))

        name = 'readDistWorkload'
        cluster.run(client='%s/apps/ClusterPerf %s %s' %
                (obj_path,  flatten_args(client_args), name),
                **cluster_args)

    import gzip
    with gzip.open('logs/latest/rcdf.data.gz', 'wb') as rcdf_file:
        print_rcdf_from_log_samples(rcdf_file)
    with gzip.open('logs/latest/samples.data.gz', 'wb') as samples_file:
        print_samples_from_log(samples_file)
示例#40
0
def netBandwidth(name, options, cluster_args, client_args):
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 2*len(config.hosts)
    if options.num_servers == None:
        cluster_args['num_servers'] = cluster_args['num_clients']
        if cluster_args['num_servers'] > len(config.hosts):
            cluster_args['num_servers'] = len(config.hosts)
    if options.size != None:
        client_args['--size'] = options.size
    else:
        client_args['--size'] = 1024*1024;
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#41
0
def readThroughput(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(getHosts()) - cluster_args['num_servers']
    if cluster_args['num_clients'] < 2:
        print("Not enough machines in the cluster to run the '%s' benchmark"
                % name)
        print("Need at least 2 machines in this configuration")
        return
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#42
0
def indexScalability(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--masterServiceThreads 2'
    if cluster_args['timeout'] < 100:
        cluster_args['timeout'] = 100
    cluster_args['backups_per_server'] = 0
    cluster_args['replicas'] = 0
    # Ensure at least 15 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(hosts)
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 10
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#43
0
def readThroughput(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(getHosts()) - cluster_args['num_servers']
    if cluster_args['num_clients'] < 2:
        print("Not enough machines in the cluster to run the '%s' benchmark"
                % name)
        print("Need at least 2 machines in this configuration")
        return
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#44
0
def writeDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    cluster_args['disjunct'] = True
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path,  flatten_args(client_args), name),
            **cluster_args)
    print("# Cumulative distribution of time for a single client to write a\n"
          "# single %d-byte object from a single server.  Each line indicates\n"
          "# that a given fraction of all writes took at most a given time\n"
          "# to complete.\n"
          "# Generated by 'clusterperf.py %s'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------"
          % (options.size, name))
    print_cdf_from_log()
示例#45
0
def workloadDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    cluster_args['disjunct'] = True
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print("# Cumulative distribution latencies for operations specified by\n"
          "# the benchmark.\n#\n"
          "# Generated by 'clusterperf.py %s'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------" % (name))
    if (options.rcdf):
        print_rcdf_from_log()
    else:
        print_cdf_from_log()
示例#46
0
def txTpccLatency(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = ' -t 10000 '
    if 'backup_args' not in cluster_args:
        cluster_args[
            'backup_args'] = ' --segmentFrames 10000 --maxNonVolatileBuffers 8'
    if cluster_args['timeout'] < 150:
        cluster_args['timeout'] = 150
    #cluster_args['disjunct'] = True
    if options.num_servers == None:
        cluster_args['num_servers'] = 4
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 1
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print(get_client_log(), end='')
示例#47
0
def workloadDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    cluster_args['disjunct'] = True
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path,  flatten_args(client_args), name),
            **cluster_args)
    print("# Cumulative distribution latencies for operations specified by\n"
          "# the benchmark.\n#\n"
          "# Generated by 'clusterperf.py %s'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------"
          % (name))
    if (options.rcdf):
        print_rcdf_from_log()
    else:
        print_cdf_from_log()
示例#48
0
def transactionThroughput(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(getHosts()) - cluster_args['num_servers']
    if cluster_args['num_clients'] < 2:
        print("Not enough machines in the cluster to run the '%s' benchmark"
                % name)
        print("Need at least 2 machines in this configuration")
        return
    if options.numTables == None:
        client_args['--numTables'] = 1
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    for i in range(1, cluster_args['num_clients'] + 1):
        print(get_client_log(i), end='')
示例#49
0
def transactionThroughput(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    if cluster_args['timeout'] < 250:
        cluster_args['timeout'] = 250
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = len(getHosts()) - cluster_args['num_servers']
    if cluster_args['num_clients'] < 2:
        print("Not enough machines in the cluster to run the '%s' benchmark"
                % name)
        print("Need at least 2 machines in this configuration")
        return
    if options.numTables == None:
        client_args['--numTables'] = 1
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    for i in range(1, cluster_args['num_clients'] + 1):
        print(get_client_log(i), end='')
示例#50
0
def readDistRandom(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 1000'
    cluster.run(
        client='%s/apps/ClusterPerf %s %s' %
        (config.hooks.get_remote_obj_path(), flatten_args(client_args), name),
        **cluster_args)
    print(
        "# Cumulative distribution of time for a single client to read a\n"
        "# random %d-byte object from a single server.  Each line indicates\n"
        "# that a given fraction of all reads took at most a given time\n"
        "# to complete.\n"
        "# Generated by 'clusterperf.py readDist'\n#\n"
        "# Time (usec)  Cum. Fraction\n"
        "#---------------------------" % options.size)
    if (options.rcdf):
        print_rcdf_from_log()
    else:
        print_cdf_from_log()
示例#51
0
def writeDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    cluster_args['disjunct'] = True
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print(
        "# Cumulative distribution of time for a single client to write a\n"
        "# single %d-byte object from a single server.  Each line indicates\n"
        "# that a given fraction of all writes took at most a given time\n"
        "# to complete.\n"
        "# Generated by 'clusterperf.py %s'\n#\n"
        "# Time (usec)  Cum. Fraction\n"
        "#---------------------------" % (options.size, name))
    if (options.rcdf):
        print_rcdf_from_log()
    else:
        print_cdf_from_log()
示例#52
0
def indexRange(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 360:
        cluster_args['timeout'] = 360

    if '--numObjects' not in client_args:
        client_args['--numObjects'] = 1000
    if '--warmup' not in client_args:
        client_args['--warmup'] = 10
    if '--count' not in client_args:
        client_args['--count'] = 90

    # Ensure at least 5 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#53
0
def readDistRandom(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 1000'
    cluster.run(client='%s/apps/ClusterPerf %s %s' %
            (config.hooks.get_remote_obj_path(),
             flatten_args(client_args), name),
            **cluster_args)
    print("# Cumulative distribution of time for a single client to read a\n"
          "# random %d-byte object from a single server.  Each line indicates\n"
          "# that a given fraction of all reads took at most a given time\n"
          "# to complete.\n"
          "# Generated by 'clusterperf.py readDist'\n#\n"
          "# Time (usec)  Cum. Fraction\n"
          "#---------------------------"
          % options.size)
    if (options.rcdf):
        print_rcdf_from_log()
    else:
        print_cdf_from_log()
示例#54
0
def default(
        name,                      # Name of this test; passed through
                                   # to ClusterPerf verbatim.
        options,                   # The full set of command-line options.
        cluster_args,              # Proposed set of arguments to pass to
                                   # cluster.run (extracted from options).
                                   # Individual tests can override as
                                   # appropriate for the test.
        client_args,               # Proposed set of arguments to pass to
                                   # ClusterPerf (via cluster.run).
                                   # Individual tests can override as
                                   # needed for the test.
        ):
    """
    This function is used as the invocation function for most tests;
    it simply invokes ClusterPerf via cluster.run and prints the result.
    """
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#55
0
def transactionDist(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '-t 2000'
    if options.numTables == None:
        client_args['--numTables'] = 1
    cluster.run(client='%s/ClusterPerf %s %s' %
                (obj_path, flatten_args(client_args), name),
                **cluster_args)
    print(
        "# Cumulative distribution of time for a single client to commit a\n"
        "# transactional read-write on a single %d-byte object from a\n"
        "# single server.  Each line indicates that a given fraction of all\n"
        "# commits took at most a given time to complete.\n"
        "# Generated by 'clusterperf.py %s'\n#\n"
        "# Time (usec)  Cum. Fraction\n"
        "#---------------------------" % (options.size, name))
    if (options.rcdf):
        print_rcdf_from_log()
    else:
        print_cdf_from_log()
示例#56
0
def default(
        name,                      # Name of this test; passed through
                                   # to ClusterPerf verbatim.
        options,                   # The full set of command-line options.
        cluster_args,              # Proposed set of arguments to pass to
                                   # cluster.run (extracted from options).
                                   # Individual tests can override as
                                   # appropriate for the test.
        client_args,               # Proposed set of arguments to pass to
                                   # ClusterPerf (via cluster.run).
                                   # Individual tests can override as
                                   # needed for the test.
        ):
    """
    This function is used as the invocation function for most tests;
    it simply invokes ClusterPerf via cluster.run and prints the result.
    """
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')
示例#57
0
def indexRange(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--maxCores 2 --totalMasterMemory 1500'
    if cluster_args['timeout'] < 360:
        cluster_args['timeout'] = 360

    if '--numObjects' not in client_args:
        client_args['--numObjects'] = 1000
    if '--warmup' not in client_args:
        client_args['--warmup'] = 10
    if '--count' not in client_args:
        client_args['--count'] = 90

    # Ensure at least 5 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    cluster.run(
        client='%s/apps/ClusterPerf %s %s' %
        (config.hooks.get_remote_obj_path(), flatten_args(client_args), name),
        **cluster_args)
    print(get_client_log(), end='')
示例#58
0
def indexScalability(name, options, cluster_args, client_args):
    if 'master_args' not in cluster_args:
        cluster_args['master_args'] = '--maxCores 3'
    if cluster_args['timeout'] < 360:
        cluster_args['timeout'] = 360
    cluster_args['backup_disks_per_server'] = 0
    cluster_args['replicas'] = 0
    # Number of concurrent rpcs to do per indexlet
    if '--count' not in client_args:
        client_args['--count'] = 20
    # Number of objects per read request
    if '--numObjects' not in client_args:
        client_args['--numObjects'] = 1

    # Ensure at least 15 hosts for optimal performance
    if options.num_servers == None:
        cluster_args['num_servers'] = len(getHosts())
    if 'num_clients' not in cluster_args:
        cluster_args['num_clients'] = 10
    cluster.run(client='%s/ClusterPerf %s %s' %
            (obj_path, flatten_args(client_args), name), **cluster_args)
    print(get_client_log(), end='')