示例#1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--db_file", type=str, required=True, help="trials.db")
    parser.add_argument("--csv_dir", type=str, required=True)
    parser.add_argument("--graph_dir", type=str, required=True)
    args = parser.parse_args()

    # Query data
    db = sqlite_helper_object.SQLiteHelperObject(args.db_file)
    db.connect()
    data = []
    c = db.c
    for row in c.execute("SELECT ops_per_sec_cum, p50_ms, p99_ms, skews "
                         "FROM trials_table "
                         "WHERE server_nodes=3"):
        data.append({
            "ops/sec(cum)": row[0],
            "p50(ms)": row[1],
            "p99(ms)": row[2],
            "skews": row[3],
        })
    db.close()

    # sort and write out data
    data = sorted(data, key=lambda point: point["skews"])
    csv_file = csv_utils.write_out_data(data,
                                        os.path.join(args.csv_dir, "dat.csv"))

    # graph data
    plot_utils.gnuplot("src/plot.gp", csv_file,
                       os.path.join(args.graph_dir, "p50_v_skew.png"),
                       os.path.join(args.graph_dir, "tp_v_skew.png"),
                       os.path.join(args.graph_dir, "p99_v_skew.png"))

    return 0
def insert_csv_data(data, csv_fpath):
    if len(data) <= 0:
        return None

    existing_rows = csv_utils.read_in_data(csv_fpath)
    all_data = existing_rows + data
    all_data = sorted(all_data, key=lambda i: i["concurrency"])

    _ = csv_utils.write_out_data(all_data, csv_fpath)

    return csv_fpath
示例#3
0
def run_single_trial_wrapper(config, trial_logs_location):
    """ Wraps run_single_trial function so that the input and output
    parameters match the following interface.

    Args:
        config (dict): a dictionary
        trial_logs_location (str): location that the trial will log.

    Returns:
        (str) absolute filepath of the results.
        """
    trial_data = run_single_trial(
        config["server_node"], config["server_commit_branch"],
        config["server_concurrency"], config["workload_nodes"],
        config["client_commit_branch"], config["concurrency"],
        trial_logs_location, config["duration"], config["batch"],
        config["read_percent"])

    results_fpath = os.path.join(trial_logs_location, "results.csv")
    _ = csv_utils.write_out_data([trial_data], results_fpath)

    return results_fpath
示例#4
0
def run(config, log_dir):
    server_nodes = config["warm_nodes"]
    client_nodes = config["workload_nodes"]
    commit_hash = config["cockroach_commit"]
    hot_node = config["hot_node"] if "hot_node" in config else None
    # hotkeys = config["hotkeys"]

    # clear any remaining experiments
    cleanup_previous_experiments(server_nodes, client_nodes, hot_node)

    # disable cores, if need be
    cores_to_disable = config["disable_cores"]
    if cores_to_disable > 0:
        disable_cores(server_nodes, cores_to_disable)
        if hot_node:
            disable_cores([hot_node], cores_to_disable)

    # start hot node
    if hot_node:
        setup_hotnode(hot_node)

    # build and start crdb cluster
    build_cockroachdb_commit(server_nodes + client_nodes, commit_hash)
    start_cluster(server_nodes)
    set_cluster_settings_on_single_node(server_nodes[0])

    # build and start client nodes
    results_fpath = ""
    if config["name"] == "kv":
        keyspace = config["keyspace"]
        warm_up_duration = config["warm_up_duration"]
        duration = config["duration"]
        read_percent = config["read_percent"]
        n_keys_per_statement = config["n_keys_per_statement"]
        skew = config["skews"]
        concurrency = config["concurrency"]
        bench_log_files = run_kv_workload(client_nodes, server_nodes,
                                          concurrency, keyspace,
                                          warm_up_duration, duration,
                                          read_percent, n_keys_per_statement,
                                          skew, log_dir)

        # create csv file of gathered data
        data = {"concurrency": config["concurrency"]}
        more_data, has_data = gather.gather_data_from_raw_kv_logs(
            bench_log_files)
        if not has_data:
            raise RuntimeError(
                "Config {0} has failed to produce any results".format(
                    config[constants.CONFIG_FPATH_KEY]))
        data.update(more_data)

        # write out csv file
        results_fpath = os.path.join(log_dir, "results.csv")
        _ = csv_utils.write_out_data([data], results_fpath)

    # re-enable cores
    cores_to_enable = cores_to_disable
    if cores_to_enable > 0:
        enable_cores(server_nodes, cores_to_enable)
        if hot_node:
            enable_cores([hot_node], cores_to_enable)

    return results_fpath