예제 #1
0
def find_congestion(platform, start_time, congestion_ts):
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        sys.exit(util.EXIT_FAILURE)
    logs = query_storage(platform)
    # we want to make sure we aren't recording anything earlier
    # than our starting time. That wouldn't make sense
    ival_start = -1
    ival_end = -1
    for idx, (time_stamp, _) in enumerate(logs):
        if (int(time_stamp) - start_time) >= congestion_ts:
            ival_start = idx
            break
    for idx, (time_stamp, _) in enumerate(logs):
        if (int(time_stamp) - start_time) >= (congestion_ts +
                                              CONGESTION_PERIOD):
            ival_end = idx
            break
    log_slice = logs[ival_start:ival_end]
    congestion_dict = {}
    for idx, (time_stamp, service_name) in enumerate(log_slice):
        congestion_dict[service_name] = int(time_stamp) - start_time
        # we have congestion at more than 1 service
        if len(congestion_dict) > 1:
            for congested_service, service_ts in congestion_dict.items():
                congestion_ts_str = util.ns_to_timestamp(service_ts)
                log_str = (f"Congestion at {congested_service} "
                           f"at time {congestion_ts_str}")
                log.info(log_str)
            return min(congestion_dict.values())

    log.info("No congestion found")
    return None
예제 #2
0
def do_experiment(platform, num_experiments, results_dir):
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        sys.exit(util.EXIT_FAILURE)

    # generate a folder for this test
    test_dir = generate_testfolder(results_dir)

    # clean up any proc listening on 8090 and 9090 just to be safe
    util.kill_tcp_proc(9090)
    util.kill_tcp_proc(8090)

    # once everything has started, retrieve the necessary url info
    _, _, gateway_url = kube_env.get_gateway_info(platform)
    # set up storage to query later
    log.info("Forwarding storage port")
    storage_proc = launch_storage_mon()
    # start fortio load generation
    log.info("Running Fortio")
    fortio_proc = kube_env.start_fortio(gateway_url)
    time.sleep(10)
    do_multiple_runs(platform, num_experiments, test_dir)
    log.info("Killing fortio")
    # terminate fortio by sending an interrupt to the process group
    os.killpg(os.getpgid(fortio_proc.pid), signal.SIGINT)
    # kill the storage proc after the query
    log.info("Killing storage")
    os.killpg(os.getpgid(storage_proc.pid), signal.SIGINT)
예제 #3
0
def start_benchmark(custom, filter_dirs, platform, threads, qps, run_time):
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        return util.EXIT_FAILURE

    _, _, gateway_url = kube_env.get_gateway_info(platform)
    product_url = f"http://{gateway_url}/productpage"
    log.info("Gateway URL: %s", product_url)
    results = []
    filters = []
    for f in DATA_DIR.glob("*"):
        if f.is_file():
            f.unlink()

    for (idx, fd) in enumerate(filter_dirs):
        build_res = kube_env.build_filter(fd)

        if build_res != util.EXIT_SUCCESS:
            log.error(
                "Building filter failed for %s."
                " Make sure you give the right path", fd)
            return util.EXIT_FAILURE

        filter_res = kube_env.refresh_filter(fd)
        if filter_res != util.EXIT_SUCCESS:
            log.error(
                "Deploying filter failed for %s."
                " Make sure you give the right path", fd)
            return util.EXIT_FAILURE

        # wait for kubernetes set up to finish
        time.sleep(10)
        fname = Path(fd).name
        filters.append(fname)
        log.info("Warming up...")
        warmup_res = do_burst(product_url, platform, threads, 10, 1)
        if not warmup_res:
            log.error("No data was collected during warm up")
            return uitl.EXIT_FAILURE
        if custom == "fortio":
            log.info("Running fortio...")
            fortio_res = run_fortio(product_url, platform, threads, qps,
                                    run_time, fname)
            if fortio_res != util.EXIT_SUCCESS:
                log.error("Error benchmarking for %s", fd)
                return util.EXIT_FAILURE
        else:
            log.info("Generating load...")
            burst_res = do_burst(product_url, platform, threads, qps, run_time)
            results.append(burst_res)

    if custom == "fortio":
        fortio_df, title = transform_fortio_data(filters)
        np.save("fortio", fortio_df)
        return plot(fortio_df, filters, title, fortio=True)
    else:
        loadgen_df = transform_loadgen_data(filters, results)
        np.save("output", loadgen_df)
        return plot(loadgen_df, filters, "", fortio=False)
예제 #4
0
def launch_storage_mon():                                                       
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:                 
        log.error("Kubernetes is not set up."                                   
                  " Did you run the deployment script?")                        
        sys.exit(util.EXIT_FAILURE)                                             
    cmd = "kubectl get pods -lapp=storage-upstream "                            
    cmd += " -o jsonpath={.items[0].metadata.name} -n=storage"                  
    storage_pod_name = util.get_output_from_proc(cmd).decode("utf-8")           
    cmd = f"kubectl -n=storage port-forward {storage_pod_name} 8090:8080"       
    storage_proc = util.start_process(cmd, preexec_fn=os.setsid)                
    # Let settle things in a bit                                                
    time.sleep(2)                                                               
    return storage_proc
예제 #5
0
def launch_prometheus():
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        sys.exit(util.EXIT_FAILURE)
    cmd = "kubectl get pods -n istio-system -lapp=prometheus "
    cmd += " -o jsonpath={.items[0].metadata.name}"
    prom_pod_name = util.get_output_from_proc(cmd).decode("utf-8")
    cmd = f"kubectl port-forward -n istio-system {prom_pod_name} 9090"
    prom_proc = util.start_process(cmd, preexec_fn=os.setsid)
    time.sleep(2)
    prom_api = PrometheusConnect(url="http://localhost:9090", disable_ssl=True)

    return prom_proc, prom_api
예제 #6
0
def start_benchmark(filter_dirs, platform, threads, qps, run_time, **kwargs):
    if kube_env.check_kubernetes_status() != util.EXIT_SUCCESS:
        log.error("Kubernetes is not set up."
                  " Did you run the deployment script?")
        return util.EXIT_FAILURE

    custom = kwargs.get("custom")
    request = kwargs.get("request")
    output = kwargs.get("output_file")
    command_args = " ".join(kwargs.get("command_args"))
    application = APPLICATIONS.get(kwargs.get("application"))

    _, _, gateway_url = kube_env.get_gateway_info(platform)
    path = kwargs.get("subpath")
    url = f"http://{gateway_url}/{path}"
    log.info("Gateway URL: %s", url)

    results = []
    filters = []

    if kwargs.get("no_filter") == "ON":
        filter_dirs.insert(0, "no_filter")
        filters.insert(0, "no_filter")

    for f in DATA_DIR.glob("*"):
        if f.is_file():
            f.unlink()

    for (idx, filter_dir) in enumerate(filter_dirs):
        log.info("Benchmarking %s", filter_dir)
        fname = Path(filter_dir).name
        if filter_dir != "no_filter":
            res = build_and_deploy_filter(filter_dir)
            if res != util.EXIT_SUCCESS:
                return util.EXIT_FAILURE
            filters.append(fname)

        log.info("Warming up...")
        for i in range(10):
            requests.get(url)

        if custom == "locust":
            if not application:
                log.error("Provided application does not exists")
                return util.EXIT_FAILURE
            log.info("Running locust...")
            res = run_locust(f"http://{gateway_url}", platform, command_args,
                             application, fname, run_time,
                             kwargs.get("num_users"), kwargs.get("spawn_rate"))
            if res != util.EXIT_SUCCESS:
                log.error("Error benchmarking %s application", application)
                return util.EXIT_FAILURE
        elif custom == "fortio":
            log.info("Running fortio...")
            fortio_res = run_fortio(url, platform, threads, qps, run_time,
                                    command_args, fname)
            if fortio_res != util.EXIT_SUCCESS:
                log.error("Error benchmarking for %s", fd)
                return util.EXIT_FAILURE
        elif custom == "loadgen":
            log.info("Generating load...")
            burst_res = run_loadgen(url, platform, threads, qps, run_time,
                                    request)
            results.append(burst_res)
        else:
            log.error("Invalid load generator")
            return util.EXIT_FAILURE

    # Plot functions
    timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
    npy_file = f"{custom} {application} {timestamp}"
    npy_file_dir = str(NPY_DIR.joinpath(npy_file))
    graph_output = f"{custom} {application} {timestamp}"
    util.check_dir(NPY_DIR)
    if custom == "locust":
        locust_df = transform_locust_data(filters, application)
        np.save(npy_file_dir, locust_df)
        return plot(locust_df, filters, graph_output, custom)
    elif custom == "fortio":
        fortio_df, title = transform_fortio_data(filters)
        np.save(npy_file_dir, fortio_df)
        return plot(fortio_df, filters, graph_output, custom)
    elif custom == "loadgen":
        loadgen_df = transform_loadgen_data(filters, results)
        np.save(npy_file_dir, loadgen_df)
        return plot(loadgen_df, filters, graph_output, custom)