def build_server(server_node, commit_branch): """ Builds the server, given the commit branch. Args: server_node (dict of Node object) commit_branch (str) Returns: None. """ server_url = server_node["ip"] cmd = "cd /root/smdbrpc; " \ "git fetch origin {0}; " \ "git checkout {0}; " \ "git pull origin {0}; ".format(commit_branch) print(system_utils.call_remote(server_url, cmd)) cmd = "cd /root/smdbrpc/cpp/; " \ "rm -rf cmake/*; " \ "mkdir -p cmake/build; " \ "pushd cmake/build; " \ "export PATH=$PATH:/root/.local/bin; " \ "cmake -DCMAKE_INSTALL_PREFIX=/root/.local ../..; " \ "make -j" print(system_utils.call_remote(server_url, cmd))
def set_cluster_settings_on_single_node(node): cmd = ( 'echo "' # 'set cluster setting kv.range_merge.queue_enabled = false;' # 'set cluster setting kv.range_split.by_load_enabled = false;' 'set cluster setting kv.raft_log.disable_synchronization_unsafe = true;' 'alter range default configure zone using num_replicas = 1;' '" | {0} sql --insecure ' '--url="postgresql://root@{1}?sslmode=disable"').format( EXE, node["ip"]) system_utils.call_remote(node["ip"], cmd)
def kill(node): """ Kills a client or server on given node. Args: node (dict of Node object) Returns: None. """ ip = node["ip"] cmd = ("PID=$(! pgrep hotshard) " "|| (sudo pkill -9 hotshard; while ps -p $PID;do sleep 1;done;)") system_utils.call_remote(ip, cmd)
def build_server(server_node, commit_branch): server_url = server_node["ip"] cmd = "cd /root/cicada-engine; " \ "git fetch origin {0}; " \ "git checkout origin {0}; " \ "git pull origin {0}; ".format(commit_branch) print(system_utils.call_remote(server_url, cmd)) cmd = "cd /root/cicada-engine/build; " \ "rm -rf *; " \ "export PATH=$PATH:/root/.local/bin; " \ "cmake -DLTO=ON ..; " \ "make -j; " \ "../script/setup.sh 16384 16384; " \ "cp ../src/mica/test/*.json ." print(system_utils.call_remote(server_url, cmd))
def build_client(client_node, commit_branch): """ Builds client. Args: client_node (dict of Node object) commit_branch (str) Returns: None. """ client_url = client_node["ip"] cmd = "cd /root/smdbrpc; " \ "git fetch origin {0}; " \ "git checkout {0}; " \ "git pull origin {0}; ".format(commit_branch) print(system_utils.call_remote(client_url, cmd))
def run_kv_workload(client_nodes, server_nodes, concurrency, keyspace, warm_up_duration, duration, read_percent, n_keys_per_statement, skew, log_dir, mode=RunMode.WARMUP_AND_TRIAL_RUN): server_urls = [ "postgresql://root@{0}:26257?sslmode=disable".format(n["ip"]) for n in server_nodes ] # warmup and trial run commands are the same args = [ "--concurrency {}".format(int(concurrency)), "--read-percent={}".format(read_percent), "--batch={}".format(n_keys_per_statement), "--zipfian --s={}".format(skew), "--keyspace={}".format(keyspace) ] # cmd = "{0} workload run kv {1} {2} --useOriginal=False".format(EXE, " ".join(server_urls), " ".join(args)) # initialize the workload from driver node # for url in server_urls: init_cmd = "{0} workload init kv {1}".format(EXE, server_urls[0]) # init_cmd = "{0} workload init kv {1}".format(EXE, url) driver_node = client_nodes[0] system_utils.call_remote(driver_node["ip"], init_cmd) # set database settings a_server_node = server_nodes[0] settings_cmd = 'echo "alter range default configure zone using num_replicas = 1;" | ' \ '{0} sql --insecure --database=kv --url="postgresql://root@{1}?sslmode=disable"' \ .format(EXE, a_server_node["ip"]) system_utils.call_remote(driver_node["ip"], settings_cmd) # prepopulate data data_csv_leaf = "init_data.csv" data_csv = os.path.join(constants.SCRATCH_DIR, data_csv_leaf) populate_crdb_data.populate(data_csv, keyspace) nfs_location = "data/{0}".format(data_csv_leaf) upload_cmd = "{0} nodelocal upload {1} {2} --host={3} --insecure".format( EXE, data_csv, nfs_location, a_server_node["ip"]) system_utils.call(upload_cmd) import_cmd = 'echo "IMPORT INTO kv (k, v) CSV DATA(\\\"nodelocal://1/{1}\\\");" | ' \ "{0} sql --insecure --database=kv".format(EXE, nfs_location) system_utils.call_remote(a_server_node["ip"], import_cmd) if mode == RunMode.WARMUP_ONLY or mode == RunMode.WARMUP_AND_TRIAL_RUN: # run warmup # warmup_cmd = cmd + " --duration={}s".format(warm_up_duration) warmup_processes = [] for i in range(len(client_nodes)): node = client_nodes[i] cmd = "{0} workload run kv {1} {2} --useOriginal=False".format( EXE, server_urls[i % len(server_nodes)], " ".join(args)) warmup_cmd = cmd + " --duration={}s".format(warm_up_duration) # for node in client_nodes: individual_node_cmd = "sudo ssh {0} '{1}'".format( node["ip"], warmup_cmd) print(individual_node_cmd) warmup_processes.append( subprocess.Popen(shlex.split(individual_node_cmd))) for wp in warmup_processes: wp.wait() if mode == RunMode.TRIAL_RUN_ONLY or mode == RunMode.WARMUP_AND_TRIAL_RUN: # making the logs directory, if it doesn't already exist log_fpath = os.path.join(log_dir, "logs") if not os.path.exists(log_fpath): os.makedirs(log_fpath) # run trial # trial_cmd = cmd + " --duration={}s".format(duration) trial_processes = [] bench_log_files = [] for i in range(len(client_nodes)): node = client_nodes[i] cmd = "{0} workload run kv {1} {2} --useOriginal=False".format( EXE, server_urls[i % len(server_nodes)], " ".join(args)) trial_cmd = cmd + " --duration={}s".format(duration) # for node in client_nodes: # logging output for each node individual_log_fpath = os.path.join( log_fpath, "bench_{}.txt".format(node["ip"])) bench_log_files.append(individual_log_fpath) # run command individual_node_cmd = "sudo ssh {0} '{1}'".format( node["ip"], trial_cmd) print(individual_node_cmd) with open(individual_log_fpath, "w") as f: trial_processes.append( subprocess.Popen(shlex.split(individual_node_cmd), stdout=f)) for tp in trial_processes: tp.wait() return bench_log_files