def check_experiment(engine_name, experiment_name, namespace): wait_response = wait_for_status(engine_name, "running", experiment_name, namespace) if wait_response: wait_for_status(engine_name, "completed", experiment_name, namespace) else: sys.exit(1) chaos_result = runcommand.invoke( "kubectl get chaosresult %s" "-%s -n %s -o " "jsonpath='{.status.experimentStatus.verdict}'" % (engine_name, experiment_name, namespace)) if chaos_result == "Pass": logging.info("Engine " + str(engine_name) + " finished with status " + str(chaos_result)) return True else: chaos_result = runcommand.invoke( "kubectl get chaosresult %s" "-%s -n %s -o jsonpath=" "'{.status.experimentStatus.failStep}'" % (engine_name, experiment_name, namespace)) logging.info("Chaos scenario:" + engine_name + " failed with error: " + str(chaos_result)) logging.info("See 'kubectl get chaosresult %s" "-%s -n %s -o yaml' for full results" % (engine_name, experiment_name, namespace)) return False
def delete_chaos_experiments(namespace): if kubecli.check_if_namespace_exists(namespace): chaos_exp_exists = runcommand.invoke_no_exit("kubectl get chaosexperiment") if "returned non-zero exit status 1" not in chaos_exp_exists: logging.info("Deleting all litmus experiments") runcommand.invoke("kubectl delete chaosexperiment --all -n " + str(namespace))
def container_killing_in_pod(cont_scenario): scenario_name = cont_scenario.get("name", "") namespace = cont_scenario.get("namespace", "*") label_selector = cont_scenario.get("label_selector", None) pod_names = cont_scenario.get("pod_names", []) container_name = cont_scenario.get("container_name", "") kill_action = cont_scenario.get("action", "kill 1") kill_count = cont_scenario.get("count", 1) if type(pod_names) != list: logging.error("Please make sure your pod_names are in a list format") sys.exit(1) if len(pod_names) == 0: if namespace == "*": # returns double array of pod name and namespace pods = kubecli.get_all_pods(label_selector) else: # Only returns pod names pods = kubecli.list_pods(namespace, label_selector) else: if namespace == "*": logging.error("You must specify the namespace to kill a container in a specific pod") logging.error("Scenario " + scenario_name + " failed") sys.exit(1) pods = pod_names # get container and pod name container_pod_list = [] for pod in pods: if type(pod) == list: container_names = runcommand.invoke( 'kubectl get pods %s -n %s -o jsonpath="{.spec.containers[*].name}"' % (pod[0], pod[1]) ).split(" ") container_pod_list.append([pod[0], pod[1], container_names]) else: container_names = runcommand.invoke( 'oc get pods %s -n %s -o jsonpath="{.spec.containers[*].name}"' % (pod, namespace) ).split(" ") container_pod_list.append([pod, namespace, container_names]) killed_count = 0 killed_container_list = [] while killed_count < kill_count: if len(container_pod_list) == 0: logging.error("Trying to kill more containers than were found, try lowering kill count") logging.error("Scenario " + scenario_name + " failed") sys.exit(1) selected_container_pod = container_pod_list[random.randint(0, len(container_pod_list) - 1)] for c_name in selected_container_pod[2]: if container_name != "": if c_name == container_name: killed_container_list.append([selected_container_pod[0], selected_container_pod[1], c_name]) retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name) break else: killed_container_list.append([selected_container_pod[0], selected_container_pod[1], c_name]) retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name) break container_pod_list.remove(selected_container_pod) killed_count += 1 logging.info("Scenario " + scenario_name + " successfully injected") return killed_container_list
def find_kraken_node(): pods = get_all_pods() kraken_pod_name = None for pod in pods: if "kraken-deployment" in pod[0]: kraken_pod_name = pod[0] kraken_project = pod[1] break # have to switch to proper project if kraken_pod_name: # get kraken-deployment pod, find node name runcommand.invoke("kubectl config set-context --current --namespace=" + str(kraken_project)) pod_json_str = runcommand.invoke("kubectl get pods/" + str(kraken_pod_name) + " -o json") pod_json = json.loads(pod_json_str) node_name = pod_json['spec']['nodeName'] # Reset to the default project runcommand.invoke( "kubectl config set-context --current --namespace=default") global kraken_node_name kraken_node_name = node_name
def stop_instances(self, node): try: runcommand.invoke("openstack server stop %s" % (node)) logging.info("Instance: " + str(node) + " stopped") except Exception as e: logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (node, e)) sys.exit(1)
def reboot_instances(self, node): try: runcommand.invoke("openstack server reboot --soft %s" % (node)) logging.info("Instance: " + str(node) + " rebooted") except Exception as e: logging.error("Failed to reboot node instance %s. Encountered following " "exception: %s." % (node, e)) sys.exit(1)
def run(kubeconfig_path, scenario, pre_action_output=""): if scenario.endswith(".yaml") or scenario.endswith(".yml"): logging.error("Powerfulseal support has recently been removed. Please switch to using plugins instead.") elif scenario.endswith(".py"): action_output = runcommand.invoke("python3 " + scenario).strip() if pre_action_output: if pre_action_output == action_output: logging.info(scenario + " post action checks passed") else: logging.info(scenario + " post action response did not match pre check output") logging.info("Pre action output: " + str(pre_action_output) + "\n") logging.info("Post action output: " + str(action_output)) return False elif scenario != "": # invoke custom bash script action_output = runcommand.invoke(scenario).strip() if pre_action_output: if pre_action_output == action_output: logging.info(scenario + " post action checks passed") else: logging.info(scenario + " post action response did not match pre check output") return False return action_output
def wait_for_status(engine_name, expected_status, experiment_name, namespace): if expected_status == "running": response = wait_for_initialized(engine_name, experiment_name, namespace) if not response: logging.info("Chaos engine never initialized, exiting") return False chaos_engine = runcommand.invoke( "kubectl get chaosengines/%s -n %s -o jsonpath='{.status.experiments[0].status}'" % (engine_name, namespace)) engine_status = chaos_engine.strip() max_tries = 30 engine_counter = 0 while engine_status.lower() != expected_status: time.sleep(10) logging.info("Waiting for " + experiment_name + " to be " + expected_status) chaos_engine = runcommand.invoke( "kubectl get chaosengines/%s -n %s -o jsonpath='{.status.experiments[0].status}'" % (engine_name, namespace)) engine_status = chaos_engine.strip() if engine_counter >= max_tries: logging.error("Chaos engine " + experiment_name + " took longer than 5 minutes to be " + expected_status) return False engine_counter += 1 # need to see if error in run if "notfound" in engine_status.lower(): logging.info("Chaos engine was not found") return False return True
def check_experiment(engine_name, experiment_name, namespace): chaos_engine = runcommand.invoke( "kubectl get chaosengines/%s -n %s -o jsonpath=" "'{.status.engineStatus}'" % (engine_name, namespace) ) engine_status = chaos_engine.strip() max_tries = 30 engine_counter = 0 while engine_status.lower() != "running" and engine_status.lower() != "completed": time.sleep(10) logging.info("Waiting for engine to start running.") chaos_engine = runcommand.invoke( "kubectl get chaosengines/%s -n %s -o jsonpath=" "'{.status.engineStatus}'" % (engine_name, namespace) ) engine_status = chaos_engine.strip() if engine_counter >= max_tries: logging.error("Chaos engine took longer than 5 minutes to be running or complete") return False engine_counter += 1 # need to see if error in run if "notfound" in engine_status.lower(): logging.info("Chaos engine was not found") return False if not chaos_engine: return False chaos_result = runcommand.invoke( "kubectl get chaosresult %s" "-%s -n %s -o " "jsonpath='{.status.experimentstatus.verdict}'" % (engine_name, experiment_name, namespace) ) result_counter = 0 status = chaos_result.strip() while status == "Awaited": logging.info("Waiting for chaos result to finish, sleeping 10 seconds") time.sleep(10) chaos_result = runcommand.invoke( "kubectl get chaosresult %s" "-%s -n %s -o " "jsonpath='{.status.experimentstatus.verdict}'" % (engine_name, experiment_name, namespace) ) status = chaos_result.strip() if result_counter >= max_tries: logging.error("Chaos results took longer than 5 minutes to get a final result") return False result_counter += 1 if "notfound" in status.lower(): logging.info("Chaos result was not found") return False if status == "Pass": return True else: chaos_result = runcommand.invoke( "kubectl get chaosresult %s" "-%s -n %s -o jsonpath=" "'{.status.experimentstatus.failStep}'" % (engine_name, experiment_name, namespace) ) logging.info("Chaos result failed information: " + str(chaos_result)) return False
def run(scenarios_list, config, wait_duration): failed_post_scenarios = "" for app_outage_config in scenarios_list: if len(app_outage_config) > 1: with open(app_outage_config, "r") as f: app_outage_config_yaml = yaml.full_load(f) scenario_config = app_outage_config_yaml["application_outage"] pod_selector = scenario_config.get("pod_selector", "{}") traffic_type = scenario_config.get("block", "[Ingress, Egress]") namespace = scenario_config.get("namespace", "") duration = scenario_config.get("duration", 60) start_time = int(time.time()) network_policy_template = """--- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: kraken-deny spec: podSelector: matchLabels: {{ pod_selector }} policyTypes: {{ traffic_type }} """ t = Template(network_policy_template) rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type) # Write the rendered template to a file with open("kraken_network_policy.yaml", "w") as f: f.write(rendered_spec) # Block the traffic by creating network policy logging.info("Creating the network policy") runcommand.invoke( "kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)) # wait for the specified duration logging.info( "Waiting for the specified duration in the config: %s" % (duration)) time.sleep(duration) # unblock the traffic by deleting the network policy logging.info("Deleting the network policy") runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace)) logging.info( "End of scenario. Waiting for the specified duration: %s" % (wait_duration)) time.sleep(wait_duration) end_time = int(time.time()) cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
def deploy_all_experiments(version_string): if not version_string.startswith("v"): logging.error("Incorrect version string for litmus, needs to start with 'v' " "followed by a number") sys.exit(1) version = version_string[1:] runcommand.invoke( "kubectl apply -f " "https://hub.litmuschaos.io/api/chaos/%s?file=charts/generic/experiments.yaml" % version )
def instance(distribution, prometheus_url, prometheus_bearer_token): if distribution == "openshift" and not prometheus_url: url = runcommand.invoke( r"""oc get routes -n openshift-monitoring -o=jsonpath='{.items[?(@.metadata.name=="prometheus-k8s")].spec.host}'""" # noqa ) prometheus_url = "https://" + url if distribution == "openshift" and not prometheus_bearer_token: prometheus_bearer_token = runcommand.invoke( "oc -n openshift-monitoring " "sa get-token prometheus-k8s") return prometheus_url, prometheus_bearer_token
def delete_chaos_experiments(namespace): namespace_exists = runcommand.invoke("oc get project -o name | grep -c " + namespace + " | xargs") if namespace_exists.strip() != "0": chaos_exp_exists = runcommand.invoke_no_exit( "kubectl get chaosexperiment") if "returned non-zero exit status 1" not in chaos_exp_exists: logging.info("Deleting all litmus experiments") runcommand.invoke("kubectl delete chaosexperiment --all -n " + str(namespace))
def run(scenarios_list, config, wait_duration, failed_post_scenarios, kubeconfig_path): for scenario_config in scenarios_list: if len(scenario_config) > 1: pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1]) else: pre_action_output = "" with open(scenario_config[0], "r") as f: scenario_config_yaml = yaml.full_load(f) for scenario in scenario_config_yaml["scenarios"]: scenario_namespace = scenario.get("namespace", "^.*$") scenario_label = scenario.get("label_selector", None) run_count = scenario.get("runs", 1) namespace_action = scenario.get("action", "delete") run_sleep = scenario.get("sleep", 10) wait_time = scenario.get("wait_time", 30) killed_namespaces = [] namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label) start_time = int(time.time()) for i in range(run_count): if len(namespaces) == 0: logging.error( "Couldn't %s %s namespaces, not enough namespaces matching %s with label %s" % (namespace_action, str(run_count), scenario_namespace, str(scenario_label)) ) sys.exit(1) selected_namespace = namespaces[random.randint(0, len(namespaces) - 1)] killed_namespaces.append(selected_namespace) try: runcommand.invoke("oc %s project %s" % (namespace_action, selected_namespace)) logging.info(namespace_action + " on namespace " + str(selected_namespace) + " was successful") except Exception as e: logging.info( namespace_action + " on namespace " + str(selected_namespace) + " was unsuccessful" ) logging.info("Namespace action error: " + str(e)) sys.exit(1) namespaces.remove(selected_namespace) logging.info("Waiting %s seconds between namespace deletions" % str(run_sleep)) time.sleep(run_sleep) logging.info("Waiting for the specified duration: %s" % wait_duration) time.sleep(wait_duration) if len(scenario_config) > 1: try: failed_post_scenarios = post_actions.check_recovery( kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output ) except Exception as e: logging.error("Failed to run post action checks: %s" % e) sys.exit(1) else: failed_post_scenarios = check_active_namespace(killed_namespaces, wait_time) end_time = int(time.time()) cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
def delete_chaos(namespace): if kubecli.check_if_namespace_exists(namespace): logging.info("Deleting all litmus run objects") chaos_engine_exists = runcommand.invoke_no_exit("kubectl get chaosengine") if "returned non-zero exit status 1" not in chaos_engine_exists: runcommand.invoke("kubectl delete chaosengine --all -n " + str(namespace)) chaos_result_exists = runcommand.invoke_no_exit("kubectl get chaosresult") if "returned non-zero exit status 1" not in chaos_result_exists: runcommand.invoke("kubectl delete chaosresult --all -n " + str(namespace)) else: logging.info(namespace + " namespace doesn't exist")
def run(scenarios_list, config, litmus_uninstall, wait_duration, litmus_namespace): # Loop to run the scenarios starts here for l_scenario in scenarios_list: start_time = int(time.time()) try: for item in l_scenario: runcommand.invoke("kubectl apply -f %s -n %s" % (item, litmus_namespace)) if "http" in item: f = requests.get(item) yaml_item = list(yaml.safe_load_all(f.content))[0] else: with open(item, "r") as f: yaml_item = list(yaml.safe_load_all(f))[0] if yaml_item["kind"] == "ChaosEngine": engine_name = yaml_item["metadata"]["name"] experiment_names = yaml_item["spec"]["experiments"] experiment_namespace = yaml_item["metadata"]["namespace"] if experiment_namespace != "litmus": logging.error( "Specified namespace: %s in the scenario: %s is not supported, please switch it to litmus" % (experiment_namespace, l_scenario)) sys.exit(1) for expr in experiment_names: expr_name = expr["name"] experiment_result = check_experiment( engine_name, expr_name, litmus_namespace) if experiment_result: logging.info( "Scenario: %s has been successfully injected!" % item) else: logging.info( "Scenario: %s was not successfully injected, please check" % item) if litmus_uninstall: delete_chaos(litmus_namespace) sys.exit(1) if litmus_uninstall: delete_chaos(litmus_namespace) logging.info("Waiting for the specified duration: %s" % wait_duration) time.sleep(wait_duration) end_time = int(time.time()) cerberus.get_status(config, start_time, end_time) except Exception as e: logging.error("Failed to run litmus scenario: %s. Encountered " "the following exception: %s" % (item, e)) sys.exit(1)
def node_crash_scenario(self, instance_kill_count, node, timeout): for _ in range(instance_kill_count): try: logging.info("Starting node_crash_scenario injection") logging.info("Crashing the node %s" % (node)) runcommand.invoke("oc debug node/" + node + " -- chroot /host " "dd if=/dev/urandom of=/proc/sysrq-trigger") logging.info( "node_crash_scenario has been successfuly injected!") except Exception as e: logging.error( "Failed to crash the node. Encountered following exception: %s. " "Test Failed" % (e)) logging.error("node_crash_scenario injection failed!") sys.exit(1)
def pod_scenarios(scenarios_list, config, failed_post_scenarios): try: # Loop to run the scenarios starts here for pod_scenario in scenarios_list: if len(pod_scenario) > 1: pre_action_output = run_post_action(kubeconfig_path, pod_scenario[1]) else: pre_action_output = '' scenario_logs = runcommand.invoke( "powerfulseal autonomous --use-pod-delete-instead-" "of-ssh-kill --policy-file %s --kubeconfig %s " "--no-cloud --inventory-kubernetes --headless" % (pod_scenario[0], kubeconfig_path)) # Display pod scenario logs/actions print(scenario_logs) logging.info("Scenario: %s has been successfully injected!" % (pod_scenario[0])) logging.info("Waiting for the specified duration: %s" % (wait_duration)) time.sleep(wait_duration) failed_post_scenarios = post_actions(kubeconfig_path, pod_scenario, failed_post_scenarios, pre_action_output) publish_kraken_status(config, failed_post_scenarios) except Exception as e: logging.error("Failed to run scenario: %s. Encountered the following " "exception: %s" % (pod_scenario[0], e)) return failed_post_scenarios
def install_litmus(version, namespace): logging.info("Installing version %s of litmus in namespace %s" % (version, namespace)) litmus_install = runcommand.invoke( "kubectl -n %s apply -f " "https://litmuschaos.github.io/litmus/litmus-operator-%s.yaml" % (namespace, version) ) if "unable" in litmus_install: logging.info("Unable to install litmus because " + str(litmus_install)) sys.exit(1) runcommand.invoke( "oc patch -n %s deployment.apps/chaos-operator-ce --type=json --patch ' " '[ { "op": "add", "path": "/spec/template/spec/containers/0/env/-", ' '"value": { "name": "ANALYTICS", "value": "FALSE" } } ]\'' % namespace ) logging.info("Waiting for litmus operator to become available") runcommand.invoke("oc wait deploy -n %s chaos-operator-ce --for=condition=Available" % namespace)
def pod_exec(pod_name, command, namespace): i = 0 for i in range(5): response = runcommand.invoke('kubectl exec %s -n %s -- %s' % (pod_name, namespace, command)) if "unauthorized" in response.lower() or "authorization" in response.lower(): continue else: break return response
def delete_chaos(namespace): namespace_exists = runcommand.invoke("oc get project -o name | grep -c " + namespace + " | xargs") if namespace_exists.strip() != "0": logging.info("Deleting all litmus run objects") chaos_engine_exists = runcommand.invoke_no_exit( "kubectl get chaosengine") if "returned non-zero exit status 1" not in chaos_engine_exists: runcommand.invoke("kubectl delete chaosengine --all -n " + str(namespace)) chaos_result_exists = runcommand.invoke_no_exit( "kubectl get chaosresult") if "returned non-zero exit status 1" not in chaos_result_exists: runcommand.invoke("kubectl delete chaosresult --all -n " + str(namespace)) else: logging.info(namespace + " namespace doesn't exist")
def run(kubeconfig_path, scenario, pre_action_output=""): if scenario.endswith(".yaml") or scenario.endswith(".yml"): action_output = runcommand.invoke( "powerfulseal autonomous " "--use-pod-delete-instead-of-ssh-kill" " --policy-file %s --kubeconfig %s --no-cloud" " --inventory-kubernetes --headless" % (scenario, kubeconfig_path)) # read output to make sure no error if "ERROR" in action_output: action_output.split("ERROR")[1].split("\n")[0] if not pre_action_output: logging.info("Powerful seal pre action check failed for " + str(scenario)) return False else: logging.info(scenario + " post action checks passed") elif scenario.endswith(".py"): action_output = runcommand.invoke("python3 " + scenario).strip() if pre_action_output: if pre_action_output == action_output: logging.info(scenario + " post action checks passed") else: logging.info( scenario + " post action response did not match pre check output") logging.info("Pre action output: " + str(pre_action_output) + "\n") logging.info("Post action output: " + str(action_output)) return False elif scenario != "": # invoke custom bash script action_output = runcommand.invoke(scenario).strip() if pre_action_output: if pre_action_output == action_output: logging.info(scenario + " post action checks passed") else: logging.info( scenario + " post action response did not match pre check output") return False return action_output
def __init__(self): logging.info("azure " + str(self)) # Acquire a credential object using CLI-based authentication. credentials = DefaultAzureCredential() logging.info("credential " + str(credentials)) az_account = runcommand.invoke("az account list -o yaml") az_account_yaml = yaml.load(az_account, Loader=yaml.FullLoader) subscription_id = az_account_yaml[0]["id"] self.compute_client = ComputeManagementClient(credentials, subscription_id)
def __init__(self): self.project = runcommand.invoke( 'gcloud config get-value project').split('/n')[0].strip() logging.info("project " + str(self.project) + "!") credentials = GoogleCredentials.get_application_default() self.client = discovery.build('compute', 'v1', credentials=credentials, cache_discovery=False)
def uninstall_litmus(version, litmus_namespace): namespace_exists = runcommand.invoke("oc get project -o name | grep -c " + litmus_namespace + " | xargs") if namespace_exists.strip() != "0": logging.info("Uninstalling Litmus operator") runcommand.invoke_no_exit( "kubectl delete -n %s -f " "https://litmuschaos.github.io/litmus/litmus-operator-%s.yaml" % (litmus_namespace, version)) logging.info("Deleting litmus crd") runcommand.invoke_no_exit( "kubectl get crds | grep litmus | awk '{print $1}' | xargs -I {} oc delete crd/{}" )
def get_instance_status(self, node, expected_status, timeout): i = 0 sleeper = 1 while i <= timeout: instStatus = runcommand.invoke( "openstack server show %s | tr -d ' ' |" "grep '^|status' |" "cut -d '|' -f3 | tr -d '\n'" % (node) ) logging.info("instance status is %s" % (instStatus)) logging.info("expected status is %s" % (expected_status)) if instStatus.strip() == expected_status: logging.info("instance status has reached desired status %s" % (instStatus)) return True time.sleep(sleeper) i += sleeper return False
def install_litmus(version): runcommand.invoke("kubectl apply -f " "https://litmuschaos.github.io/litmus/litmus-operator-%s.yaml" % version) runcommand.invoke( "oc patch -n litmus deployment.apps/chaos-operator-ce --type=json --patch ' " '[ { "op": "add", "path": "/spec/template/spec/containers/0/env/-", ' '"value": { "name": "ANALYTICS", "value": "FALSE" } } ]\'' ) runcommand.invoke("oc wait deploy -n litmus chaos-operator-ce --for=condition=Available")
def get_openstack_nodename(self, os_node_ip): server_list = runcommand.invoke("openstack server list | grep %s" % (os_node_ip)) list_of_servers = server_list.split("\n") for item in list_of_servers: items = item.split("|") counter = 0 for i in items: if i.strip() != "" and counter == 2: node_name = i.strip() logging.info("Openstack node name is %s " % (node_name)) counter += 1 continue item_list = i.split("=") if len(item_list) == 2 and item_list[-1].strip() == os_node_ip: return node_name counter += 1
def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration): # Loop to run the scenarios starts here for pod_scenario in scenarios_list: if len(pod_scenario) > 1: pre_action_output = post_actions.run(kubeconfig_path, pod_scenario[1]) else: pre_action_output = "" try: # capture start time start_time = int(time.time()) scenario_logs = runcommand.invoke( "powerfulseal autonomous --use-pod-delete-instead-" "of-ssh-kill --policy-file %s --kubeconfig %s " "--no-cloud --inventory-kubernetes --headless" % (pod_scenario[0], kubeconfig_path)) except Exception as e: logging.error( "Failed to run scenario: %s. Encountered the following " "exception: %s" % (pod_scenario[0], e)) sys.exit(1) # Display pod scenario logs/actions print(scenario_logs) logging.info("Scenario: %s has been successfully injected!" % (pod_scenario[0])) logging.info("Waiting for the specified duration: %s" % (wait_duration)) time.sleep(wait_duration) try: failed_post_scenarios = post_actions.check_recovery( kubeconfig_path, pod_scenario, failed_post_scenarios, pre_action_output) except Exception as e: logging.error("Failed to run post action checks: %s" % e) sys.exit(1) # capture end time end_time = int(time.time()) # publish cerberus status cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time) return failed_post_scenarios
def install_litmus(version): runcommand.invoke( "kubectl apply -f " "https://litmuschaos.github.io/litmus/litmus-operator-%s.yaml" % version) runcommand.invoke( "oc patch -n litmus deployment.apps/chaos-operator-ce --type=json --patch ' " "[ { \"op\": \"add\", \"path\": \"/spec/template/spec/containers/0/env/-\", " "\"value\": { \"name\": \"ANALYTICS\", \"value\": \"FALSE\" } } ]'") runcommand.invoke( "oc wait deploy -n litmus chaos-operator-ce --for=condition=Available")