def pods_not_in_phase( label_selector: str, phase: str = "Running", ns: str = "default", secrets: Secrets = None, ) -> bool: """ Lookup a pod by `label_selector` in the namespace `ns`. Raises :exc:`chaoslib.exceptions.ActivityFailed` when the pod is in the given phase and should not have. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) if label_selector: ret = v1.list_namespaced_pod(ns, label_selector=label_selector) logger.debug( f"Found {len(ret.items)} pods matching label '{label_selector}'" f" in ns '{ns}'" ) else: ret = v1.list_namespaced_pod(ns) logger.debug(f"Found {len(ret.items)} pods in ns '{ns}'") if not ret.items: raise ActivityFailed(f"no pods '{label_selector}' were found") for d in ret.items: if d.status.phase == phase: raise ActivityFailed( f"pod '{label_selector}' should not be in phase '{d.status.phase}'" ) return True
def delete_replica_set(name: str = None, ns: str = "default", label_selector: str = None, secrets: Secrets = None): """ Delete a replica set by `name` or `label_selector` in the namespace `ns`. The replica set is deleted without a graceful period to trigger an abrupt termination. If neither `name` nor `label_selector` is specified, all the replica sets will be deleted in the namespace. """ api = create_k8s_api_client(secrets) v1 = client.ExtensionsV1beta1Api(api) if name: ret = v1.list_namespaced_replica_set( ns, field_selector="metadata.name={}".format(name)) elif label_selector: ret = v1.list_namespaced_replica_set(ns, label_selector=label_selector) else: ret = v1.list_namespaced_replica_set(ns) logger.debug("Found {d} replica sets named '{n}'".format( d=len(ret.items), n=name)) body = client.V1DeleteOptions() for r in ret.items: v1.delete_namespaced_replica_set(r.metadata.name, ns, body=body)
def create_node(meta: Dict[str, Any] = None, spec: Dict[str, Any] = None, secrets: Secrets = None) -> client.V1Node: """ Create one new node in the cluster. Due to the way things work on certain cloud providers, you won't be able to use this meaningfully on them. For instance on GCE, this will likely fail. See also: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency """ # noqa: E501 api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) body = client.V1Node() body.metadata = client.V1ObjectMeta(**meta) if meta else None body.spec = client.V1NodeSpec(**spec) if spec else None try: res = v1.create_node(body) except ApiException as x: raise ActivityFailed("Creating new node failed: {}".format(x.body)) logger.debug("Node '{}' created".format(res.metadata.name)) return res
def _deployment_readiness_has_state( name: str, ready: bool, ns: str = "default", label_selector: str = None, timeout: int = 30, secrets: Secrets = None, ) -> Union[bool, None]: """ Check wether if the given deployment state is ready or not according to the ready paramter. If the state is not reached after `timeout` seconds, a :exc:`chaoslib.exceptions.ActivityFailed` exception is raised. """ field_selector = f"metadata.name={name}" api = create_k8s_api_client(secrets) v1 = client.AppsV1Api(api) w = watch.Watch() timeout = int(timeout) if label_selector is None: watch_events = partial( w.stream, v1.list_namespaced_deployment, namespace=ns, field_selector=field_selector, _request_timeout=timeout, ) else: label_selector = label_selector.format(name=name) watch_events = partial( w.stream, v1.list_namespaced_deployment, namespace=ns, field_selector=field_selector, label_selector=label_selector, _request_timeout=timeout, ) try: logger.debug(f"Watching events for {timeout}s") for event in watch_events(): deployment = event["object"] status = deployment.status spec = deployment.spec logger.debug( f"Deployment '{deployment.metadata.name}' {event['type']}: " f"Ready Replicas {status.ready_replicas} - " f"Unavailable Replicas {status.unavailable_replicas} - " f"Desired Replicas {spec.replicas}") readiness = status.ready_replicas == spec.replicas if ready == readiness: w.stop() return True except urllib3.exceptions.ReadTimeoutError: logger.debug("Timed out!") return False
def microservice_available_and_healthy( name: str, ns: str = "default", label_selector: str = "name in ({name})", secrets: Secrets = None) -> Union[bool, None]: """ Lookup a deployment by `name` in the namespace `ns`. The selected resources are matched by the given `label_selector`. Raises :exc:`chaoslib.exceptions.FailedActivity` when the state is not as expected. """ label_selector = label_selector.format(name=name) api = create_k8s_api_client(secrets) v1 = client.AppsV1beta1Api(api) ret = v1.list_namespaced_deployment(ns, label_selector=label_selector) logger.debug("Found {d} deployments named '{n}'".format(d=len(ret.items), n=name)) if not ret.items: raise FailedActivity( "microservice '{name}' was not found".format(name=name)) for d in ret.items: logger.debug("Deployment has '{s}' available replicas".format( s=d.status.available_replicas)) if d.status.available_replicas != d.spec.replicas: raise FailedActivity( "microservice '{name}' is not healthy".format(name=name)) return True
def get_custom_object( group: str, version: str, plural: str, name: str, ns: str = "default", secrets: Secrets = None, ) -> Dict[str, Any]: """ Get a custom object in the given namespace. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ """ # noqa: E501 api = client.CustomObjectsApi(create_k8s_api_client(secrets)) try: r = api.get_namespaced_custom_object(group, version, ns, plural, name, _preload_content=False) return json.loads(r.data) except ApiException as x: raise ActivityFailed( f"Failed to create custom resource object: '{x.reason}' {x.body}")
def patch_cluster_custom_object(group: str, version: str, plural: str, name: str, force: bool = False, resource: Dict[str, Any] = None, resource_as_yaml_file: str = None, secrets: Secrets = None) -> Dict[str, Any]: """ Patch a custom object cluster-wide. The resource must be the updated version to apply. Force will re-acquire conflicting fields owned by others. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ """ # noqa: E501 api = client.CustomObjectsApi(create_k8s_api_client(secrets)) body = load_body(resource, resource_as_yaml_file) try: r = api.patch_cluster_custom_object( group, version, plural, name, body, force=force, _preload_content=False ) return json.loads(r.data) except ApiException as x: raise ActivityFailed( "Failed to patch custom resource object: '{}' {}".format( x.reason, x.body))
def replace_cluster_custom_object(group: str, version: str, plural: str, name: str, force: bool = False, resource: Dict[str, Any] = None, resource_as_yaml_file: str = None, secrets: Secrets = None) -> Dict[str, Any]: """ Replace a custom object in the given namespace. The resource must be the new version to apply. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ """ # noqa: E501 api = client.CustomObjectsApi(create_k8s_api_client(secrets)) body = load_body(resource, resource_as_yaml_file) try: r = api.replace_cluster_custom_object( group, version, plural, name, body, force=force, _preload_content=False ) return json.loads(r.data) except ApiException as x: raise ActivityFailed( "Failed to replace custom resource object: '{}' {}".format( x.reason, x.body))
def delete_deployment( name: str = None, ns: str = "default", label_selector: str = None, secrets: Secrets = None, ): """ Delete a deployment by `name` or `label_selector` in the namespace `ns`. The deployment is deleted without a graceful period to trigger an abrupt termination. If neither `name` nor `label_selector` is specified, all the deployments will be deleted in the namespace. """ api = create_k8s_api_client(secrets) v1 = client.AppsV1Api(api) if name: ret = v1.list_namespaced_deployment( ns, field_selector=f"metadata.name={name}") elif label_selector: ret = v1.list_namespaced_deployment(ns, label_selector=label_selector) else: ret = v1.list_namespaced_deployment(ns) logger.debug(f"Found {len(ret.items)} deployments named '{name}'") body = client.V1DeleteOptions() for d in ret.items: v1.delete_namespaced_deployment(d.metadata.name, ns, body=body)
def remove_statefulset(name: str = None, ns: str = "default", label_selector: str = None, secrets: Secrets = None): """ Remove a statefulset by `name` in the namespace `ns`. The statefulset is removed by deleting it without a graceful period to trigger an abrupt termination. The selected resources are matched by the given `label_selector`. """ field_selector = "metadata.name={name}".format(name=name) api = create_k8s_api_client(secrets) v1 = client.AppsV1Api(api) if label_selector: ret = v1.list_namespaced_stateful_set( ns, field_selector=field_selector, label_selector=label_selector) else: ret = v1.list_namespaced_stateful_set(ns, field_selector=field_selector) logger.debug("Found {d} statefulset(s) named '{n}' in ns '{s}'".format( d=len(ret.items), n=name, s=ns)) body = client.V1DeleteOptions() for d in ret.items: res = v1.delete_namespaced_stateful_set( d.metadata.name, ns, body=body)
def trigger_rollout(name: str, ns: "default", secrets: Secrets = None): api = create_k8s_api_client(secrets) v1 = client.AppsV1Api(api) statefulset = v1.read_namespaced_stateful_set(name, ns) for container in statefulset.spec.template.spec.containers: add_trigger_environment_variable(container) v1.replace_namespaced_stateful_set(name, ns, statefulset)
def count_pods(label_selector: str, phase: str = None, ns: str = "default", secrets: Secrets = None) -> int: """ Count the number of pods matching the given selector in a given `phase`, if one is given. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) ret = v1.list_namespaced_pod(ns, label_selector=label_selector) logger.debug("Found {d} pods matching label '{n}'".format( d=len(ret.items), n=label_selector)) if not ret.items: return 0 if not phase: return len(ret.items) count = 0 for d in ret.items: if d.status.phase == phase: count = count + 1 return count
def pods_in_phase(label_selector: str, phase: str = "Running", ns: str = "default", secrets: Secrets = None) -> bool: """ Lookup a pod by `label_selector` in the namespace `ns`. Raises :exc:`chaoslib.exceptions.ActivityFailed` when the state is not as expected. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) ret = v1.list_namespaced_pod(ns, label_selector=label_selector) logger.debug("Found {d} pods matching label '{n}'".format( d=len(ret.items), n=label_selector)) if not ret.items: raise ActivityFailed( "no pods '{name}' were found".format(name=label_selector)) for d in ret.items: if d.status.phase != phase: raise ActivityFailed( "pod '{name}' is in phase '{s}' but should be '{p}'".format( name=label_selector, s=d.status.phase, p=phase)) return True
def delete_custom_object( group: str, version: str, plural: str, name: str, ns: str = "default", secrets: Secrets = None, ) -> Dict[str, Any]: """ Create a custom object cluster wide. Its custom resource definition must already exists or this will fail with a 404. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ """ # noqa: E501 api = client.CustomObjectsApi(create_k8s_api_client(secrets)) try: r = api.delete_namespaced_custom_object(group, version, ns, plural, name, _preload_content=False) return json.loads(r.data) except ApiException as x: raise ActivityFailed( f"Failed to delete custom resource object: '{x.reason}' {x.body}")
def microservice_is_not_available(name: str, ns: str = "default", label_selector: str = "name in ({name})", secrets: Secrets = None) -> bool: """ Lookup pods with a `name` label set to the given `name` in the specified `ns`. Raises :exc:`chaoslib.exceptions.ActivityFailed` when one of the pods with the specified `name` is in the `"Running"` phase. """ label_selector = label_selector.format(name=name) api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) if label_selector: ret = v1.list_namespaced_pod(ns, label_selector=label_selector) else: ret = v1.list_namespaced_pod(ns) logger.debug("Found {d} pod(s) named '{n}' in ns '{s}".format(d=len( ret.items), n=name, s=ns)) for p in ret.items: phase = p.status.phase logger.debug("Pod '{p}' has status '{s}'".format(p=p.metadata.name, s=phase)) if phase == "Running": raise ActivityFailed( "microservice '{name}' is actually running".format(name=name)) return True
def uncordon_node(name: str = None, label_selector: str = None, secrets: Secrets = None): """ Uncordon nodes matching the given label name, so that pods can be scheduled on them again. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) nodes = _select_nodes(name=name, label_selector=label_selector, secrets=secrets) body = {"spec": {"unschedulable": False}} for n in nodes: try: v1.patch_node(n.metadata.name, body) except ApiException as x: logger.debug("Scheduling node '{}' failed: {}".format( n.metadata.name, x.body)) raise ActivityFailed("Failed to schedule node '{}': {}".format( n.metadata.name, x.body))
def delete_pods(name: str = None, ns: str = "default", label_selector: str = None, secrets: Secrets = None): """ Delete pods by `name` or `label_selector` in the namespace `ns`. The pods are deleted without a graceful period to trigger an abrupt termination. If neither of `name` and `label_selector` is specified, all the pods will be deleted in the namespace. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) if name: ret = v1.list_namespaced_pod( ns, field_selector="metadata.name={}".format(name)) elif label_selector: ret = v1.list_namespaced_pod(ns, label_selector=label_selector) else: ret = v1.list_namespaced_pod(ns) logger.debug("Found {d} pods named '{n}'".format(d=len(ret.items), n=name)) body = client.V1DeleteOptions() for p in ret.items: v1.delete_namespaced_pod(p.metadata.name, ns, body=body)
def service_endpoint_is_initialized(name: str, ns: str = "default", label_selector: str = "name in ({name})", secrets: Secrets = None): """ Lookup a service endpoint by its name and raises :exc:`FailedProbe` when the service was not found or not initialized. """ label_selector = label_selector.format(name=name) api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) if label_selector: ret = v1.list_namespaced_service(ns, label_selector=label_selector) else: ret = v1.list_namespaced_service(ns) logger.debug("Found {d} service(s) named '{n}' ins ns '{s}'".format(d=len( ret.items), n=name, s=ns)) if not ret.items: raise ActivityFailed( "service '{name}' is not initialized".format(name=name)) return True
def delete_deployment(name: str, ns: str = "default", label_selector: str = "name in ({name})", secrets: Secrets = None): """ Delete a deployment by `name` in the namespace `ns`. The deployment is deleted without a graceful period to trigger an abrupt termination. The selected resources are matched by the given `label_selector`. """ label_selector = label_selector.format(name=name) api = create_k8s_api_client(secrets) v1 = client.AppsV1beta1Api(api) if label_selector: ret = v1.list_namespaced_deployment(ns, label_selector=label_selector) else: ret = v1.list_namespaced_deployment(ns) logger.debug("Found {d} deployments named '{n}'".format( d=len(ret.items), n=name)) body = client.V1DeleteOptions() for d in ret.items: v1.delete_namespaced_deployment(d.metadata.name, ns, body=body)
def delete_service(name: str, ns: str = "default", secrets: Secrets = None): """ Remove the given service """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) v1.delete_namespaced_service(name, namespace=ns)
def all_microservices_healthy(ns: str = "default", secrets: Secrets = None) -> MicroservicesStatus: """ Check all microservices in the system are running and available. Raises :exc:`chaoslib.exceptions.ActivityFailed` when the state is not as expected. """ api = create_k8s_api_client(secrets) not_ready = [] failed = [] v1 = client.CoreV1Api(api) ret = v1.list_namespaced_pod(namespace=ns) for p in ret.items: phase = p.status.phase if phase == "Failed": failed.append(p) elif phase not in ("Running", "Succeeded"): not_ready.append(p) logger.debug("Found {d} failed and {n} not ready pods".format( d=len(failed), n=len(not_ready))) # we probably should list them in the message if failed or not_ready: raise ActivityFailed("the system is unhealthy") return True
def create_cluster_custom_object(group: str, version: str, plural: str, resource: Dict[str, Any] = None, resource_as_yaml_file: str = None, secrets: Secrets = None) -> Dict[str, Any]: """ Delete a custom object in the given namespace. Read more about custom resources here: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ """ # noqa: E501 api = client.CustomObjectsApi(create_k8s_api_client(secrets)) body = load_body(resource, resource_as_yaml_file) try: r = api.create_cluster_custom_object( group, version, plural, body, _preload_content=False ) return json.loads(r.data) except ApiException as x: if x.status == 409: logger.debug( "Custom resource object {}/{} already exists".format( group, version)) return json.loads(x.body) else: raise ActivityFailed( "Failed to create custom resource object: '{}' {}".format( x.reason, x.body))
def read_pod_logs(name: str = None, last: Union[str, None] = None, ns: str = "default", from_previous: bool = False, label_selector: str = "name in ({name})", container_name: str = None, secrets: Secrets = None) -> Dict[str, str]: """ Fetch logs for all the pods with the label `"name"` set to `name` and return a dictionary with the keys being the pod's name and the values the logs of said pod. If `name` is not provided, use only the `label_selector` instead. When your pod has several containers, you should also set `container_name` to clarify which container you want to read logs from. If you provide `last`, this returns the logs of the last N seconds until now. This can set to a fluent delta such as `10 minutes`. You may also set `from_previous` to `True` to capture the logs of a previous pod's incarnation, if any. """ label_selector = label_selector.format(name=name) api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) if label_selector: ret = v1.list_namespaced_pod(ns, label_selector=label_selector) else: ret = v1.list_namespaced_pod(ns) logger.debug("Found {d} pods: [{p}] in ns '{n}'".format( d=len(ret.items), n=ns, p=', '.join([p.metadata.name for p in ret.items]))) since = None if last: now = datetime.now() since = int((now - dateparser.parse(last)).total_seconds()) params = dict( namespace=ns, follow=False, previous=from_previous, timestamps=True, container=container_name or "", # None is not a valid value _preload_content=False ) if since: params["since_seconds"] = since logs = {} for p in ret.items: name = p.metadata.name logger.debug("Fetching logs for pod '{n}'".format(n=name)) r = v1.read_namespaced_pod_log(name, **params) logs[name] = r.read().decode('utf-8') return logs
def remove_network_policy(name: str, ns: str = "default", secrets: Secrets = None): """ Create a network policy in the given namespace eitehr from the definition as `spec` or from a file containing the definition at `spec_path`. """ api = create_k8s_api_client(secrets) v1 = client.NetworkingV1Api(api) v1.delete_namespaced_network_policy(name, ns)
def test_client_can_provide_a_context(cfg, has_conf): has_conf.return_value = True cfg.load_kube_config = MagicMock() try: os.environ.update({"KUBERNETES_CONTEXT": "minikube"}) api = create_k8s_api_client() cfg.load_kube_config.assert_called_with(context="minikube") finally: os.environ.pop("KUBERNETES_CONTEXT", None)
def remove_service_endpoint(name: str, ns: str = "default", secrets: Secrets = None): """ Remove the service endpoint that sits in front of microservices (pods). """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) v1.delete_namespaced_service(name, namespace=ns)
def delete_nodes(label_selector: str = None, all: bool = False, rand: bool = False, count: int = None, grace_period_seconds: int = None, secrets: Secrets = None): """ Delete nodes gracefully. Select the appropriate nodes by label. Nodes are not drained beforehand so we can see how cluster behaves. Nodes cannot be restarted, they are really deleted. Please be careful when using this action. On certain cloud providers, you also need to delete the underneath VM instance as well afterwards. This is the case on GCE for instance. If `all` is set to `True`, all nodes will be terminated. If `rand` is set to `True`, one random node will be terminated. If ̀`count` is set to a positive number, only a upto `count` nodes (randomly picked) will be terminated. Otherwise, the first retrieved node will be terminated. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) ret = v1.list_node(label_selector=label_selector) logger.debug("Found {d} nodes labelled '{s}'".format( d=len(ret.items), s=label_selector)) nodes = ret.items if not nodes: raise FailedActivity( "failed to find a node that matches selector {}".format( label_selector)) if rand: nodes = [random.choice(nodes)] logger.debug("Picked node '{p}' to be terminated".format( p=nodes[0].metadata.name)) elif count is not None: nodes = random.choices(nodes, k=count) logger.debug("Picked {c} nodes '{p}' to be terminated".format( c=len(nodes), p=", ".join([n.metadata.name for n in nodes]))) elif not all: nodes = [nodes[0]] logger.debug("Picked node '{p}' to be terminated".format( p=nodes[0].metadata.name)) else: logger.debug("Picked all nodes '{p}' to be terminated".format( p=", ".join([n.metadata.name for n in nodes]))) body = client.V1DeleteOptions() for n in nodes: res = v1.delete_node( n.metadata.name, body, grace_period_seconds=grace_period_seconds) if res.status != "Success": logger.debug("Terminating nodes failed: {}".format(res.message))
def terminate_pods( label_selector: str = None, name_pattern: str = None, all: bool = False, rand: bool = False, mode: str = "fixed", qty: int = 1, grace_period: int = -1, ns: str = "default", order: str = "alphabetic", secrets: Secrets = None, ): """ Terminate a pod gracefully. Select the appropriate pods by label and/or name patterns. Whenever a pattern is provided for the name, all pods retrieved will be filtered out if their name do not match the given pattern. If neither `label_selector` nor `name_pattern` are provided, all pods in the namespace will be selected for termination. If `all` is set to `True`, all matching pods will be terminated. Value of `qty` varies based on `mode`. If `mode` is set to `fixed`, then `qty` refers to number of pods to be terminated. If `mode` is set to `percentage`, then `qty` refers to percentage of pods, from 1 to 100, to be terminated. Default `mode` is `fixed` and default `qty` is `1`. If `order` is set to `oldest`, the retrieved pods will be ordered by the pods creation_timestamp, with the oldest pod first in list. If `rand` is set to `True`, n random pods will be terminated Otherwise, the first retrieved n pods will be terminated. If `grace_period` is greater than or equal to 0, it will be used as the grace period (in seconds) to terminate the pods. Otherwise, the default pod's grace period will be used. """ api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) pods = _select_pods(v1, label_selector, name_pattern, all, rand, mode, qty, ns, order) body = client.V1DeleteOptions() if grace_period >= 0: body = client.V1DeleteOptions(grace_period_seconds=grace_period) deleted_pods = [] for p in pods: v1.delete_namespaced_pod(p.metadata.name, ns, body=body) deleted_pods.append(p.metadata.name) return deleted_pods
def test_client_can_be_created_from_secrets(load_incluster_config, has_conf): os.environ["CHAOSTOOLKIT_IN_POD"] = "true" try: has_conf.return_value = False load_incluster_config.return_value = None api = create_k8s_api_client() load_incluster_config.assert_called_once_with() finally: os.environ.pop("CHAOSTOOLKIT_IN_POD", None)
def _statefulset_readiness_has_state(name: str, ready: bool, ns: str = "default", label_selector: str = None, timeout: int = 30, secrets: Secrets = None): """ Check wether if the given statefulSet state is ready or not according to the ready paramter. If the state is not reached after `timeout` seconds, a :exc:`chaoslib.exceptions.ActivityFailed` exception is raised. """ field_selector = "metadata.name={name}".format(name=name) api = create_k8s_api_client(secrets) v1 = client.AppsV1Api(api) w = watch.Watch() timeout = int(timeout) if label_selector is None: watch_events = partial(w.stream, v1.list_namespaced_stateful_set, namespace=ns, field_selector=field_selector, _request_timeout=timeout) else: label_selector = label_selector.format(name=name) watch_events = partial(w.stream, v1.list_namespaced_stateful_set, namespace=ns, field_selector=field_selector, label_selector=label_selector, _request_timeout=timeout) try: logger.debug("Watching events for {t}s".format(t=timeout)) for event in watch_events(): statefulset = event['object'] status = statefulset.status spec = statefulset.spec logger.debug( "StatefulSet '{p}' {t}: " "Ready Replicas {r} - " "Unavailable Replicas {u} - " "Desired Replicas {a}".format( p=statefulset.metadata.name, t=event["type"], r=status.ready_replicas, a=spec.replicas, u=status.unavailable_replicas)) readiness = status.ready_replicas == spec.replicas if ready == readiness: w.stop() return True except urllib3.exceptions.ReadTimeoutError: logger.debug("Timed out!") return False