def refresh_filter(filter_dir): start_time = time.time() # delete and recreate the config map update_conf_map(filter_dir) # activate the filter cmd = f"kubectl apply -f {YAML_DIR}/filter.yaml" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # this is equivalent to a deployment restart right now cmd = "kubectl rollout restart deployments --namespace=default" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # also reset storage since we are working with a different filter now cmd = "kubectl rollout restart deployment storage-upstream -n=storage " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result result = application_wait() if result != util.EXIT_SUCCESS: return result end_time = time.time() log.info("To update filter, took %d", end_time - start_time) with open("update_times.csv", 'a+') as csv_file: w = csv.writer(csv_file) w.writerow([end_time - start_time]) return application_wait()
def deploy_addons(addons): apply_cmd = "kubectl apply -f " url = "https://raw.githubusercontent.com/istio/istio/release-1.9" cmd = "" if "kiali" in addons: addons.append("kiali") for (idx, addon) in enumerate(addons): if addon == "prometheus-mod": cmd += f"{apply_cmd} {YAML_DIR}/prometheus-mod.yaml" else: cmd += f"{apply_cmd} {url}/samples/addons/{addon}.yaml" if idx < len(addons) - 1: cmd += " && " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result cmd = "kubectl get deploy -n istio-system -o name" deployments = util.get_output_from_proc(cmd).decode("utf-8").strip() deployments = deployments.split("\n") for depl in deployments: wait_cmd = "kubectl rollout status -n istio-system " wait_cmd += f"{depl} -w --timeout=180s" _ = util.exec_process(wait_cmd) log.info("Addons are ready.") return util.EXIT_SUCCESS
def delete_config_map(): cmd = f"kubectl delete configmap {CM_FILTER_NAME} " result = util.exec_process(cmd, allow_failures=True) if result != util.EXIT_SUCCESS: log.warning("Failed to delete the config map, it does not exist.") # repeat this process for stage cmd = f"kubectl delete -n storage configmap {CM_FILTER_NAME} " return util.exec_process(cmd, allow_failures=True)
def create_conf_map(filter_dir): cmd = f"kubectl create configmap {CM_FILTER_NAME} " cmd += f"--from-file {filter_dir}/wasm_bins/filter.wasm " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: log.error("Failed to create config map.") return result # also refresh the aggregation filter cmd = f"kubectl -n storage create configmap {CM_FILTER_NAME} " cmd += f"--from-file {filter_dir}/wasm_bins/agg_filter.wasm " return util.exec_process(cmd)
def patch_bookinfo(): cmd = "kubectl get deploy -o name" deployments = util.get_output_from_proc(cmd).decode("utf-8").strip() deployments = deployments.split("\n") for depl in deployments: patch_cmd = f"kubectl patch {depl} " patch_cmd += f"--patch-file {YAML_DIR}/cm_patch.yaml " result = util.exec_process(patch_cmd) if result != util.EXIT_SUCCESS: log.error("Failed to patch %s.", depl) # we also patch storage patch_cmd = "kubectl patch -n storage deployment.apps/storage-upstream " patch_cmd += f"--patch-file {YAML_DIR}/cm_patch.yaml " result = util.exec_process(patch_cmd) if result != util.EXIT_SUCCESS: log.error("Failed to patch storage.") return result
def undeploy_filter(): # delete the config map delete_config_map() cmd = f"kubectl delete -f {YAML_DIR}/filter.yaml " result = util.exec_process(cmd, allow_failures=True) if result != util.EXIT_SUCCESS: log.warning("Failed to delete the filter.") # restore the original bookinfo return deploy_bookinfo()
def remove_bookinfo(): # remove bookinfo samples_dir = f"{ISTIO_DIR}/samples" bookinfo_dir = f"{samples_dir}/bookinfo" cmd = f"{bookinfo_dir}/platform/kube/cleanup.sh &&" cmd += f"kubectl delete -f {YAML_DIR}/storage.yaml && " cmd += f"kubectl delete -f {YAML_DIR}/productpage-cluster.yaml " result = util.exec_process(cmd) return result
def bookinfo_wait(): cmd = "kubectl get deploy -o name" deployments = util.get_output_from_proc(cmd).decode("utf-8").strip() deployments = deployments.split("\n") for depl in deployments: wait_cmd = f"kubectl rollout status {depl} -w --timeout=180s" _ = util.exec_process(wait_cmd) log.info("Bookinfo is ready.") return util.EXIT_SUCCESS
def inject_istio(): cmd = f"{ISTIO_BIN} install --set profile=demo " cmd += "--set meshConfig.enableTracing=true --skip-confirmation " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result cmd = "kubectl label namespace default istio-injection=enabled --overwrite" result = util.exec_process(cmd) cmd = f"{ISTIO_BIN} install --set profile=demo -n storage " cmd += "--set meshConfig.enableTracing=true --skip-confirmation " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result cmd = "kubectl label namespace storage istio-injection=enabled --overwrite" result = util.exec_process(cmd) return result
def stop_kubernetes(platform): if platform == "GCP": cmd = "gcloud container clusters delete " cmd += "demo --zone us-central1-a --quiet " else: # delete minikube cmd = "minikube delete" result = util.exec_process(cmd) return result
def run_locust(url, platform, command_args, application, filename, run_time, num_users, spawn_rate): py_file_dir = str(FILE_DIR.joinpath(f"{application}.py")) csv_prefix = str(GRAPHS_DIR.joinpath(f"{application}")) if filename != "no_filter": csv_prefix += f"_{filename}" cmd = f"locust -f {py_file_dir} -H {url} {command_args} --csv {csv_prefix} --headless -t {run_time} -u {num_users} -r {spawn_rate}" res = util.exec_process(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return res
def run_fortio(url, platform, threads, qps, run_time, file_name): util.check_dir(DATA_DIR) output_file = str(DATA_DIR.joinpath(f"{file_name}.json")) fortio_dir = str(FORTIO_DIR) cmd = f"{fortio_dir} " cmd += f"load -c {threads} -qps {qps} -jitter -t {run_time}s -json {output_file} " cmd += f"{url}" with open(output_file, "w") as f: fortio_res = util.exec_process(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return fortio_res
def deploy_application(application): if check_kubernetes_status() != util.EXIT_SUCCESS: log.error("Kubernetes is not set up." " Did you run the deployment script?") sys.exit(util.EXIT_FAILURE) cmd = CONFIG_MATRIX[application]['deploy_cmd'] cmd += f" && {APPLY_CMD} {YAML_DIR}/storage.yaml && " cmd += f"{APPLY_CMD} {YAML_DIR}/istio-config.yaml && " cmd += f"{APPLY_CMD} {YAML_DIR}/root-cluster.yaml " result = util.exec_process(cmd) application_wait() return result
def remove_addons(addons): remove_cmd = "kubectl delete -f" url = "https://raw.githubusercontent.com/istio/istio/release-1.9" cmd = "" for (idx, addon) in enumerate(addons): if addon == "prometheus-mod": cmd += f"{remove_cmd} {YAML_DIR}/prometheus-mod.yaml --ignore-not-found=true" else: cmd += f"{remove_cmd} {url}/samples/addons/{addon}.yaml --ignore-not-found=true" if idx < len(addons) - 1: cmd += " && " return util.exec_process(cmd)
def start_kubernetes(platform, multizonal, application): if platform == "GCP": # 1. Create cluster enabled with Istio already cmd = CONFIG_MATRIX[application]['gcloud_startup_command'] if multizonal: cmd += "--region us-central1-a --node-locations us-central1-b " cmd += "us-central1-c us-central1-a " else: cmd += "--zone=us-central1-a " result = util.exec_process(cmd) cmd = f"gcloud services enable container.googleapis.com --project {PROJECT_ID} &&" cmd += f"gcloud services enable monitoring.googleapis.com cloudtrace.googleapis.com " cmd += f"clouddebugger.googleapis.com cloudprofiler.googleapis.com --project {PROJECT_ID}" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # 2. Create storage namespace cmd = "kubectl create namespace storage" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result else: # 1. Create cluster if CONFIG_MATRIX[application]['minikube_startup_command'] != None: cmd = CONFIG_MATRIX[application]['minikube_startup_command'] result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result else: return "APPLICATION IS NOT SUPPORTED ON MINIKUBE" # 2. Create storage namespace cmd = "kubectl create namespace storage" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result return result
def refresh_filter(filter_dir): # delete and recreate the config map update_conf_map(filter_dir) # activate the filter cmd = f"kubectl apply -f {YAML_DIR}/filter.yaml" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # this is equivalent to a deployment restart right now cmd = "kubectl rollout restart deployments --namespace=default" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # also reset storage since we are working with a different filter now cmd = "kubectl rollout restart deployment storage-upstream -n=storage " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result return bookinfo_wait()
def run_fortio(url, platform, threads, qps, run_time, command_args, filename): util.check_dir(DATA_DIR) output_file = str(DATA_DIR.joinpath(f"{filename}.json")) fortio_dir = str(FORTIO_DIR) cmd = f"{fortio_dir} load " cmd += f"-c {threads} -qps {qps} -timeout 50s -t {run_time}s -json {output_file} " cmd += " {command_args} " cmd += f"{url}" with open(output_file, "w") as f: res = util.exec_process(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return res
def build_filter(filter_dir): # TODO: Move this into a script in the filter dir log.info("Building filter...") cmd = "cargo +nightly build -Z unstable-options " cmd += "--target=wasm32-unknown-unknown --release " cmd += f"--out-dir {filter_dir}/wasm_bins " cmd += f"--target-dir {filter_dir}/target " cmd += f"--manifest-path {filter_dir}/Cargo.toml " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # Also build the aggregation filter cmd = "cargo +nightly build -Z unstable-options " cmd += "--target=wasm32-unknown-unknown --release " cmd += f"--out-dir {filter_dir}/wasm_bins " cmd += f"--target-dir {filter_dir}/target " cmd += f"--manifest-path {filter_dir}/agg/Cargo.toml " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result log.info("Build successful!") return result
def deploy_filter(filter_dir): # check if the config map already exists # we assume that if the config map does not exist in default # it also does not exist in storage cmd = f"kubectl get configmaps {CM_FILTER_NAME} " result = util.exec_process(cmd, allow_failures=True) if result == util.EXIT_SUCCESS: # Config map exists, assume that the deployment is already modded log.warning("Config map %s already exists!", CM_FILTER_NAME) # delete and recreate the config map return update_conf_map(filter_dir) # create the config map with the filter result = create_conf_map(filter_dir) if result != util.EXIT_SUCCESS: return result # update the containers with the config map result = patch_bookinfo() if result != util.EXIT_SUCCESS: return result # now activate the filter cmd = f"kubectl apply -f {YAML_DIR}/filter.yaml" return util.exec_process(cmd)
def setup_bookinfo_deployment(platform, multizonal): start_kubernetes(platform, multizonal) cmd = " kubectl create namespace storage " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result result = inject_istio() if result != util.EXIT_SUCCESS: return result result = deploy_bookinfo() if result != util.EXIT_SUCCESS: return result return result
def start_kubernetes(platform, multizonal): if platform == "GCP": cmd = "gcloud container clusters create demo --enable-autoupgrade " cmd += "--enable-autoscaling --min-nodes=3 " cmd += "--max-nodes=10 --num-nodes=5 " if multizonal: cmd += "--region us-central1-a --node-locations us-central1-b " cmd += "us-central1-c us-central1-a " else: cmd += "--zone=us-central1-a " else: cmd = "minikube start --memory=6144 --cpus=2 " result = util.exec_process(cmd) return result
def deploy_bookinfo(): if check_kubernetes_status() != util.EXIT_SUCCESS: log.error("Kubernetes is not set up." " Did you run the deployment script?") sys.exit(util.EXIT_FAILURE) # launch bookinfo samples_dir = f"{ISTIO_DIR}/samples" bookinfo_dir = f"{samples_dir}/bookinfo" apply_cmd = "kubectl apply -f" book_cmd = f"{apply_cmd} {bookinfo_dir}" cmd = f"{apply_cmd} {YAML_DIR}/bookinfo-services.yaml && " cmd += f"{apply_cmd} {YAML_DIR}/bookinfo-apps.yaml && " cmd += f"{book_cmd}/networking/bookinfo-gateway.yaml && " cmd += f"{book_cmd}/networking/destination-rule-reviews.yaml && " cmd += f"{apply_cmd} {YAML_DIR}/storage.yaml && " cmd += f"{apply_cmd} {YAML_DIR}/istio-config.yaml && " cmd += f"{apply_cmd} {YAML_DIR}/productpage-cluster.yaml " result = util.exec_process(cmd) bookinfo_wait() return result
def remove_application(application): cmd = CONFIG_MATRIX[application]['undeploy_cmd'] cmd += f" && {DELETE_CMD} {YAML_DIR}/storage.yaml && " cmd += f"{DELETE_CMD} {YAML_DIR}/root-cluster.yaml " result = util.exec_process(cmd) return result
def inject_failure(): cmd = f"kubectl apply -f {YAML_DIR}/fault-injection.yaml " result = util.exec_process(cmd) return result
def remove_failure(): cmd = f"kubectl delete -f {YAML_DIR}/fault-injection.yaml " result = util.exec_process(cmd) return result
def check_kubernetes_status(): cmd = "kubectl cluster-info" result = util.exec_process(cmd, stdout=util.subprocess.PIPE, stderr=util.subprocess.PIPE) return result