def ht_zipf():
    address = "frog.zoo.cs.yale.edu"
    service_port = 9090
    lease_port = 9091
    num_blocks = 1
    chain_length = 1
    num_ops = 100000
    data_size = 64
    op_type_set = []

    op_type_set.append("get")
    path = "/tmp"
    backing_path = "local://tmp"
    file_name = './ht_zipf_with_cache.txt'
    data = open(file_name, 'w+')

    # Output all the configuration parameters:
    print >> data, "host: ", address
    print >> data, "service-port: ", service_port
    print >> data, "lease-port: ", lease_port
    print >> data, "num-blocks: ", num_blocks
    print >> data, "chain-length: ", chain_length
    print >> data, "num-ops: ", num_ops
    print >> data, "data-size: ", data_size
    print >> data, "path: ", path
    print >> data, "backing-path: ", backing_path

    num_clients = 1
    loading = 0
    client = JiffyClient(address, service_port, lease_port)
    ht_clients = [None] * num_clients
    for cache_size in range(num_ops // 20 * 64, num_ops * 64 + 1,
                            num_ops // 20 * 64):
        ht_clients[0] = client.open_or_create_hash_table(
            path, backing_path, num_blocks, chain_length, cache_size)
        benchmark = GetBenchmark(ht_clients, data_size, num_clients, num_ops)
        benchmark.run()
        result = benchmark.wait()
        client.remove(path)
        print >> data, "===== ", "Zipf_ht_Benchmark, ", "Cache_Size= ", cache_size, " ======"
        print >> data, "\t", num_ops, " requests completed in ", (
            float(num_ops) / result[0]), " s"
        print >> data, "\t", num_clients, " parallel clients"
        print >> data, "\t", data_size, " payload"
        print >> data, "\tAverage put latency: ", result[1], "us"
        print >> data, "\tAverage get latency: ", result[2], "us"
        print >> data, "\tAverage total latency: ", result[1] + result[2], "us"
        print >> data, "\tThroughput: ", result[0], " requests per second"
        print >> data, "\tHit_rate: ", round(result[3], 4), "%"
        print >> data, "\n"
        loading += 1
        print("Loading -- ", round(float(loading * 100 / 20), 1), "%")

    return 0
Example #2
0
def run_scale_workload(d_host='127.0.0.1', d_port=9090, l_port=9091, data_path='/data/test', n_ops=100000,
                       value_size=102400, skew=0.0):
    value = bytes(value_size)
    keys = zipf_keys(skew, 512, n_ops)
    client = JiffyClient(d_host, d_port, l_port)
    kv = client.open_or_create(data_path, '/tmp')
    logging.info("Generated {} keys".format(len(keys)))
    for key in keys:
        kv.put(key, value)
    for key in keys:
        kv.remove(key)
    client.remove(data_path, RemoveMode.delete)
Example #3
0
def file_cp():
    address = "frog.zoo.cs.yale.edu"
    service_port = 9090
    lease_port = 9091
    num_blocks = 1
    chain_length = 1
    num_ops = 100000
    data_size = 64
    op_type_set = []
    op_type_set.append("write")
    op_type_set.append("read")
    path = "/tmp"
    backing_path = "local://tmp"

    # Output all the configuration parameters:
    file_name = './file_cp.txt'
    data = open(file_name, 'w+')
    print >> data, "host: ", address
    print >> data, "service-port: ", service_port
    print >> data, "lease-port: ", lease_port
    print >> data, "num-blocks: ", num_blocks
    print >> data, "chain-length: ", chain_length
    print >> data, "num-ops: ", num_ops
    print >> data, "data-size: ", data_size
    print >> data, "path: ", path
    print >> data, "backing-path: ", backing_path
    for op_type in op_type_set:
        count = 1
        while count <= 1:
            loading = 0
            num_clients = count
            cache_block_size = 2000
            client = JiffyClient(address, service_port, lease_port)
            ht_clients = [None] * num_clients
            for cache_size in range(100, 2101, 200):
                for prefetch_size in range(5, 51, 5):
                    for i in range(num_clients):
                        ht_clients[i] = client.open_or_create_file(
                            path, backing_path, num_blocks, chain_length,
                            cache_size, cache_block_size, prefetch_size)

                    if (op_type == "write"):
                        benchmark = WriteBenchmark(ht_clients, data_size,
                                                   num_clients, num_ops)
                    if (op_type == "read"):
                        benchmark = ReadBenchmark(ht_clients, data_size,
                                                  num_clients, num_ops)
                    else:
                        print >> data, "Incorrect operation type for file: ", op_type
                        return 0

                    benchmark.run()
                    result = benchmark.wait()
                    client.remove(path)

                    print >> data, "===== ", "Cache_Size= ", cache_size, "Prefetch Size= ", prefetch_size, " ======"
                    print >> data, "\t", num_ops, " requests completed in ", (
                        float(num_ops) / result[0]), " s"
                    print >> data, "\t", num_clients, " parallel clients"
                    print >> data, "\t", data_size, " payload"
                    print >> data, "\tAverage write latency: ", result[1], "us"
                    print >> data, "\tAverage read latency: ", result[2], "us"
                    print >> data, "\tAverage total latency: ", result[
                        1] + result[2], "us"
                    print >> data, "\tThroughput: ", result[
                        0], " bytes per second"
                    print >> data, "\tHit_rate: ", round(result[3], 4), "%"
                    print >> data, "\n"
                    loading += 1
                    print("Loading -- ", round(float(loading * 100 / 110), 1),
                          "%")
            count *= 2

    data.close()
    return 0