def worker():
    Thread(target=init_vegeta).start()
    Thread(target=init_vegeta_v2).start()

    time.sleep(10)
    print("kill vegeta")
    kill_vegeta(CURRENT_TRAFFIC)
    kill_vegeta(CURRENT_TRAFFIC_V2)

    time.sleep(1)
    r1_throughput = get_throughput('./servicec.txt')
    r2_throughput = get_throughput('./servicec2.txt')
    current_overall_throughput = r1_throughput + r2_throughput

    # lstm part
    # current_stat_list = generate_metric(5, query_dict)
    # latency = get_mean_latency('./servicea.txt')
    # empty_stat_list = []
    # for i in range(len(current_stat_list)):
    #     current_stat_list[i]["mean_latency"] = latency
    #     empty_stat_list += [float(i) for i in list(current_stat_list[i].values())]

    # new_limit = lstm_model(LSTM_MODEL_DIR, empty_stat_list)
    # exit()

    # get current stat
    current_stat = generate_metric(1, query_dict)[0]
    current_stat["mean_latency"] = get_mean_latency('./servicec.txt')

    current_stat_val = [float(i) for i in list(current_stat.values())]

    new_limit = r1_c_predictor(current_stat_val)
    # new_limit = fc_model(FC_MODEL_DIR, current_stat_val)

    new_limit = int(new_limit)
    if new_limit < 0:
        return
    print("new_limit: ", str(new_limit))

    current_limit_dict = get_all_keys()
    old_limit = current_limit_dict[CURRENT_REQUEST_SERVICE]
    current_limit_dict[CURRENT_REQUEST_SERVICE] = new_limit

    predicted_throughput = checkerInfer(
        [float(i) for i in list(current_limit_dict.values())])

    print("Trying to edit descriptor " + CURRENT_REQUEST_SERVICE + " from " +
          str(old_limit) + " To " + str(new_limit))
    print("predicted_throughput: " + str(predicted_throughput))
    print("current_throughput: " + str(current_overall_throughput))
    if predicted_throughput >= current_overall_throughput:
        print("Edit Sucess")
        edit_key(CURRENT_REQUEST_SERVICE, new_limit)
    else:
        print("Edit Fail")
Exemplo n.º 2
0
    Thread(target=init_vegeta_v2).start()

    time.sleep(20)
    print("kill vegeta")
    kill_vegeta(CURRENT_TRAFFIC)
    kill_vegeta(CURRENT_TRAFFIC_V2)

    time.sleep(1)
    r1_throughput = get_throughput('./out.txt')
    r2_throughput = get_throughput('./out2.txt')

    current_limit_dict = get_all_keys()
    for key, val in current_limit_dict.items():
        one_record[key] = val
    one_record["r1_throughput"] = r1_throughput
    one_record["r2_throughput"] = r2_throughput
    one_record["r1_mean_latency"] = get_mean_latency('./out.txt')
    one_record["r2_mean_latency"] = get_mean_latency('./out2.txt')
    one_record["throughput"] = r1_throughput + r2_throughput

    # list_of_dict.append(one_record)
    print("generate " + str(i)+ " th sample ..................")
    print(one_record)
    appendlist2csv(one_record, "checkerDatasetv2.csv")


# nestedlist2csv(list_of_dict, "checkerDataset" + time.asctime(time.localtime(time.time())) +".csv")
# nestedlist2csv(list_of_dict, "checkerDatasetv2.csv")

kill_docker_compose()
Exemplo n.º 3
0
def appendLatency(sourceFile, outputFile, latency):
    sourceFile = sourceFile
    df = pd.read_csv(sourceFile, low_memory=False)
    df['mean_latency'] = latency
    print("append latency success!!")

    columns_val = list(df.columns.values)

    df.to_csv(outputFile, columns=columns_val, index=0, header=1)


if __name__ == "__main__":
    # generate dataset
    list_of_dict = generate_dataet(NUMOFRECORD, query_dict)

    nestedlist2csv(list_of_dict,
                   CURRENT_REQUEST_SERVICE + "_" + current_limit_str + ".csv")

    print("kill vegeta and stop docker compose")
    kill_vegeta(CURRENT_TRAFFIC)
    kill_vegeta(CURRENT_TRAFFIC_V2)

    kill_docker_compose()

    latency = get_mean_latency("./out.txt")
    sourceFile = CURRENT_REQUEST_SERVICE + "_" + current_limit_str + ".csv"
    outputFile = sourceFile

    appendLatency(sourceFile, outputFile, latency)
Exemplo n.º 4
0
def worker():
    Thread(target=init_vegeta).start()
    Thread(target=init_vegeta_v2).start()

    time.sleep(10)
    print("kill vegeta")
    kill_vegeta(CURRENT_TRAFFIC)
    kill_vegeta(CURRENT_TRAFFIC_V2)

    time.sleep(1)
    r1_throughput = get_throughput('./servicecb.txt')
    r2_throughput = get_throughput('./servicecb2.txt')
    current_overall_throughput = r1_throughput + r2_throughput

    # lstm part
    # current_stat_list = generate_metric(5, query_dict)
    # latency = get_mean_latency('./servicea.txt')
    # empty_stat_list = []
    # for i in range(len(current_stat_list)):
    #     current_stat_list[i]["mean_latency"] = latency
    #     empty_stat_list += [float(i) for i in list(current_stat_list[i].values())]

    # r1_b_new_limit = lstm_model("models/dl/r1_service_b_lstm", empty_stat_list)
    # exit()

    # get current stat
    current_stat = generate_metric(1, query_dict)[0]

    current_stat["mean_latency"] = get_mean_latency('./servicecb.txt')

    current_stat_val = [float(i) for i in list(current_stat.values())]
    r1_b_new_limit = r1_b_predictor(current_stat_val)
    # r1_b_new_limit = fc_model("models/dl/r1_service_b_fc", current_stat_val)

    r1_b_new_limit = int(r1_b_new_limit)
    if r1_b_new_limit < 0:
        return

    current_stat["mean_latency"] = get_mean_latency('./servicecb2.txt')
    current_stat_val = [float(i) for i in list(current_stat.values())]
    r2_b_new_limit = r2_b_predictor(current_stat_val)
    # r2_b_new_limit = fc_model("models/dl/r2_service_b_fc", current_stat_val)
    # r2_b_new_limit = lstm_model("models/dl/r2_service_b_lstm", empty_stat_list)

    r2_b_new_limit = int(r2_b_new_limit)
    if r2_b_new_limit < 0:
        return
    current_limit_dict = get_all_keys()
    old_r1_service_b = current_limit_dict["r1_service_b"]
    old_r2_service_b = current_limit_dict["r2_service_b"]

    current_limit_dict["r1_service_b"] = r1_b_new_limit
    current_limit_dict["r2_service_b"] = r2_b_new_limit

    predicted_throughput = checkerInfer(
        [float(i) for i in list(current_limit_dict.values())])

    print("predicted_throughput: " + str(predicted_throughput))
    print("current_throughput: " + str(current_overall_throughput))
    print("Trying to edit descriptor " + "r1_service_b" + " from " +
          str(old_r1_service_b) + " To " + str(r1_b_new_limit))
    print("Trying to edit descriptor " + "r2_service_b" + " from " +
          str(old_r2_service_b) + " To " + str(r2_b_new_limit))

    if predicted_throughput >= current_overall_throughput:
        print("edit success")
        edit_key("r1_service_b", r1_b_new_limit)
        edit_key("r2_service_b", r2_b_new_limit)
    else:
        print("edit fail, try to apply partial rate")
        do_priority_part(old_r1_service_b, old_r2_service_b, r1_b_new_limit,
                         r2_b_new_limit)