Esempio n. 1
0
def main():
    res_path = os.environ['PROJ_ROOT'] + '/results/memcached/raw.csv'
    full_output_file = os.environ[
        'DATA_PATH'] + "/results/memcached/memcached.log"

    # reshape log file for per-line collection
    with open(full_output_file, 'r') as f:
        old = f.readlines()
        new = old[:]
        for i, l in enumerate(old):
            if l.startswith('Total Statistics'):
                new[i + 3] = 'TotalStatistics' + new[i + 3]
    with open(full_output_file, 'w') as f:
        for s in new:
            f.write("%s" % s)

    collect.collect(
        "memcached",
        user_parameters={
            "num_clients":
            ["input:", lambda l: collect.get_int_from_string(l)],
            "tput": ["TPS: ", lambda l: float(l.split('TPS: ')[1].split()[0])],
            "lat": [
                "TotalStatisticsGlobal",
                lambda l: float(l.split()[8].split()[0])
            ],
        },
        result_file=res_path)
Esempio n. 2
0
def main():
    full_output_file = os.environ[
        'DATA_PATH'] + "/results/memcached/memcached.log"

    # reshape log file for per-line collection
    with open(full_output_file, 'r') as f:
        old = f.readlines()
        new = old[:]
        for i, l in enumerate(old):
            if l.startswith('Total Statistics'):
                new[i + 3] = 'TotalStatistics' + new[i + 3]
    with open(full_output_file, 'w') as f:
        for s in new:
            f.write("%s" % s)

    collect.collect(
        "memcached",
        user_parameters={
            "num_clients": [
                "input:",
                lambda l: int(re.search(r'input: (\d{1,4})', l).group(1))
            ],
            "tput": ["TPS: ", lambda l: float(l.split('TPS: ')[1].split()[0])],
            "lat": [
                "TotalStatisticsGlobal",
                lambda l: float(l.split()[8].split()[0])
            ],
        })
Esempio n. 3
0
def main():
    collect.collect(
        "sqlite3",
        user_parameters={
            "total_tput":
            ["total_tput", lambda l: collect.get_float_from_string(l)],
            "total_lat":
            ["total_lat", lambda l: collect.get_float_from_string(l)]
        })
Esempio n. 4
0
def main():
    collect.collect(
        "ripe",
        user_parameters={
            "ok": ["TOTAL OK:", lambda l: collect.get_int_from_string(l)],
            "some": ["TOTAL SOME:", lambda l: collect.get_int_from_string(l)],
            "fail": ["TOTAL FAIL:", lambda l: collect.get_int_from_string(l)],
            "notpossible":
            ["TOTAL NP:", lambda l: collect.get_int_from_string(l)],
            "total":
            ["TOTAL ATTACKS:", lambda l: collect.get_int_from_string(l)],
        })
Esempio n. 5
0
def main():
    res_path = os.environ['PROJ_ROOT'] + '/results/nginx/raw.csv'
    collect.collect(
        "nginx",
        user_parameters={
            "num_clients": ["Concurrency Level", lambda l: collect.get_int_from_string(l)],
            "tput": ["Requests per second", lambda l: collect.get_float_from_string(l)],
            "lat": ["[ms] (mean)", lambda l: collect.get_float_from_string(l)],
            "complete_requests": ["Complete requests", lambda l: collect.get_int_from_string(l)],
            "failed_requests": ["Failed requests", lambda l: collect.get_int_from_string(l)],
        },
        result_file=res_path
    )
def main():
    # set parameters
    full_output_file = collect.data + "/ripe/ripe.log"
    results_file = collect.data + "/ripe/raw.csv"
    parameters = {
        "ok": ["TOTAL OK:", lambda l: collect.get_int_from_string(l)],
        "some": ["TOTAL SOME:", lambda l: collect.get_int_from_string(l)],
        "fail": ["TOTAL FAIL:", lambda l: collect.get_int_from_string(l)],
        "notpossible": ["TOTAL NP:", lambda l: collect.get_int_from_string(l)],
        "total": ["TOTAL ATTACKS:", lambda l: collect.get_int_from_string(l)],
    }
    # collect
    collect.collect(results_file, full_output_file, parameters)
Esempio n. 7
0
def main():
    # set parameters
    full_output_file = collect.data + "/apache_perf/apache_perf.log"
    results_file = collect.data + "/apache_perf/raw.csv"
    parameters = {
        "num_clients": ["Concurrency Level", lambda l: collect.get_int_from_string(l)],
        "tput": ["Requests per second", lambda l: collect.get_float_from_string(l)],
        "lat": ["[ms] (mean)", lambda l: collect.get_float_from_string(l)],
        "complete_requests": ["Complete requests", lambda l: collect.get_int_from_string(l)],
        "failed_requests": ["Failed requests", lambda l: collect.get_int_from_string(l)],
    }

    # collect
    collect.collect(results_file, full_output_file, parameters)
Esempio n. 8
0
def main():
    collect.collect(
        "apache",
        user_parameters={
            "num_clients":
            ["Concurrency Level", lambda l: collect.get_int_from_string(l)],
            "tput": [
                "Requests per second",
                lambda l: collect.get_float_from_string(l)
            ],
            "lat": ["[ms] (mean)", lambda l: collect.get_float_from_string(l)],
            "complete_requests":
            ["Complete requests", lambda l: collect.get_int_from_string(l)],
            "failed_requests":
            ["Failed requests", lambda l: collect.get_int_from_string(l)],
        })
def main():
    # set parameters
    full_output_file = collect.data + "/memcached_perf/memcached_perf.log"
    results_file = collect.data + "/memcached_perf/raw.csv"
    parameters = {
        "num_clients": ["input:", lambda l: int(re.search(r'input: (\d{1,4})', l).group(1))],
        "tput": ["TPS: ", lambda l: float(l.split('TPS: ')[1].split()[0])],
        "lat": ["TotalStatisticsGlobal", lambda l: float(l.split()[8].split()[0])],
    }

    # reshape log file for per-line collection
    new = []
    with open(full_output_file, 'r') as f:
        old = f.readlines()
        new = old[:]
        for i, l in enumerate(old):
            if l.startswith('Total Statistics'):
                new[i+3] = 'TotalStatistics' + new[i+3]
    with open(full_output_file, 'w') as f:
        for s in new:
            f.write("%s" % s)

    # collect
    collect.collect(results_file, full_output_file, parameters)
Esempio n. 10
0
def main():
    full_output_file = collect.data + "/phoenix_perf/phoenix_perf.log"
    results_file = collect.data + "/phoenix_perf/raw.csv"
    collect.collect(results_file, full_output_file)
Esempio n. 11
0
def main():
    collect.collect("parsec")
Esempio n. 12
0
def main():
    res_path = os.environ['PROJ_ROOT'] + '/results/parsec/raw.csv'
    collect.collect("parsec", result_file=res_path)
Esempio n. 13
0
def main():
    collect.collect("postgresql", user_parameters={
        "num_clients": ["clients", lambda l: int(re.search(r'clients: (\d{1,4})', l).group(1))],
        "latency": ["latency average", lambda l: collect.get_float_from_string(l)],
        "tps": ["excluding connections establishing", lambda l: collect.get_float_from_string(l)],
    })
Esempio n. 14
0
def main():
    collect.collect("parsec_var_input")
Esempio n. 15
0
def main():
    collect.collect("splash")
Esempio n. 16
0
def main():
    full_output_file = gc.data + "/phoenix_var_input/phoenix_var_input.log"
    results_file = gc.data + "/phoenix_var_input/raw.csv"
    gc.collect(results_file, full_output_file)
Esempio n. 17
0
def main():
    collect.collect("phoenix_var_input")
Esempio n. 18
0
def main():
    res_path = os.environ['PROJ_ROOT'] + '/results/phoenix/raw.csv'
    collect.collect("phoenix", result_file=res_path)
Esempio n. 19
0
def main():
    collect.collect("micro")