Пример #1
0
def test_remove_pods():
    service_map = {
        "service1": [
            EvictedPod("pod1", "namespace1", "Ran out of disk"),
            EvictedPod("pod2", "namespace1", "Ran out of mem"),
            EvictedPod("pod3", "namespace1", "Ran out of disk"),
        ]
    }
    mock_client = mock.MagicMock()
    remove_pods(mock_client, service_map, False)
    assert mock_client.core.delete_namespaced_pod.call_count == 2
    assert mock_client.core.delete_namespaced_pod.mock_calls == [
        mock.call(
            "pod1",
            "namespace1",
            body=V1DeleteOptions(),
            grace_period_seconds=0,
            propagation_policy="Background",
        ),
        mock.call(
            "pod2",
            "namespace1",
            body=V1DeleteOptions(),
            grace_period_seconds=0,
            propagation_policy="Background",
        ),
    ]
def test_terminate_nodes():

    mock_client = mock.MagicMock()
    mock_client.core.delete_node.side_effect = [None, ApiException(404), None]

    m1, m2, m3 = mock.Mock(), mock.Mock(), mock.Mock()
    success, errors = terminate_nodes(client=mock_client, nodes=[m1, m2, m3])
    expected_calls = [
        mock.call.core.delete_node(
            node, body=V1DeleteOptions(), propagation_policy="foreground"
        )
        for node in [m1, m2, m3]
    ]

    assert mock_client.mock_calls == expected_calls
    assert success == [m1, m3]
    assert errors[0][0] == m2
    assert isinstance(errors[0][1], ApiException)

    mock_client.reset_mock()

    mock_client.core.delete_node.side_effect = [None, ApiException(404), None]
    success, errors = terminate_nodes(client=mock_client, nodes=[m1, m2, m3])
    expected_calls = [
        mock.call.core.delete_node(
            node, body=V1DeleteOptions(), propagation_policy="foreground"
        )
        for node in [m1, m2, m3]
    ]
    assert mock_client.mock_calls == expected_calls
    assert success == [m1, m3]
    assert errors[0][0] == m2
    assert isinstance(errors[0][1], ApiException)
Пример #3
0
    def delete_pod(self):
        """
        Delete the sandbox pod.

        :return: response from the API server
        """
        try:
            # delete PVC first because it has a bigger prio to be deleted
            # lingering PVCs are affecting quota
            if self.pvc:
                pvc_status = self.api.delete_namespaced_persistent_volume_claim(
                    self.pvc.claim_name,
                    namespace=self.k8s_namespace_name,
                    body=V1DeleteOptions(grace_period_seconds=0),
                )
                logger.debug(f"PVC deletion status = {pvc_status}")
        except ApiException as e:
            logger.debug(e)
            if e.status != 404:
                raise
        try:
            status = self.api.delete_namespaced_pod(
                self.pod_name,
                self.k8s_namespace_name,
                body=V1DeleteOptions(grace_period_seconds=0),
            )
            logger.debug(f"Pod deletion status = {status}")
        except ApiException as e:
            logger.debug(e)
            if e.status != 404:
                raise
Пример #4
0
def delete_serving_deployment(params):
    from kubernetes.client import V1DeleteOptions

    spec = get_deployment_spec(params)
    name = get_deployment_name(params)  # spec["metadata"]["name"]
    namespace = "default"  # TODO: the namespace should be configured or be figured out dynamically

    del_opts = V1DeleteOptions()
    api_client = get_api_client_appv1()
    api_response = api_client.list_namespaced_deployment(namespace)

    if name in [deployment.metadata.name for deployment in api_response.items]:
        api_response = api_client.delete_namespaced_deployment(
            name, namespace, del_opts)
    else:
        LOG.error("Could not find the serving deployment '%s'" % name)
        return {
            "status": "Error",
            "details":
            "Could not find a serving deployment with name '%s'" % name
        }

    # api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
    LOG.info("%s ..." % str(api_response)[:160])
    return api_response
Пример #5
0
def delete_serving_deployment(params):
    from kubernetes.client import V1DeleteOptions

    spec = get_deployment_spec(params)
    name = params["deployment_name"]
    namespace = params["namespace"]

    spec["metadata"]["name"] = params["deployment_name"]
    spec["metadata"]["labels"]["app"] = params["deployment_name"]
    spec["spec"]["selector"]["matchLabels"]["app"] = params["deployment_name"]
    spec["spec"]["template"]["metadata"]["labels"]["app"] = params["deployment_name"]

    del_opts = V1DeleteOptions()
    api_client = get_api_client_appv1()
    api_response = api_client.list_namespaced_deployment(namespace)

    if name in [deployment.metadata.name for deployment in api_response.items]:
        api_response = api_client.delete_namespaced_deployment(name, namespace, body=del_opts)
    else:
        LOG.error("Could not find the serving deployment '%s'" % name)
        return {
            "status": "Error",
            "details": "Could not find a serving deployment with name '%s'" % name
        }

    # api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
    LOG.info("%s ..." % str(api_response)[:160])
    return api_response
    def delete_service(self, service_name_to_delete, namespace):
        """Dekete service.

        :param str namespace:
        :param str service_name_to_delete:
        :return:
        """
        try:
            delete_options = V1DeleteOptions(
                propagation_policy="Foreground", grace_period_seconds=0
            )
            self._clients.core_api.delete_namespaced_service(
                name=service_name_to_delete, namespace=namespace, body=delete_options
            )
        except ApiException as e:
            if e.status == 404:
                # Service does not exist, nothing to delete but
                # we can consider this a success.
                self._logger.warning(
                    "not deleting nonexistent service/{} from ns/{}".format(
                        service_name_to_delete, namespace
                    )
                )
            else:
                raise
        else:
            self._logger.info(
                "deleted service/{} from ns/{}".format(
                    service_name_to_delete, namespace
                )
            )
Пример #7
0
def delete_ds(config,
              version,
              ds_name,
              ns_name="default",
              body=V1DeleteOptions()):
    k8s_api_core = client_from_config(config)

    if version >= util.parse_version("v1.9.0"):
        k8s_api_apps = apps_api_client_from_config(config)
        k8s_api_apps.delete_namespaced_daemon_set(ds_name,
                                                  ns_name,
                                                  grace_period_seconds=0,
                                                  orphan_dependents=False)
    else:
        k8s_api_ext = extensions_client_from_config(config)
        k8s_api_ext.delete_namespaced_daemon_set(ds_name,
                                                 ns_name,
                                                 grace_period_seconds=0,
                                                 orphan_dependents=False)

    # Pod in ds has fixed label so we use label selector
    data = k8s_api_core.list_namespaced_pod(
        ns_name, label_selector="app={}".format(ds_name)).to_dict()
    # There should be only one pod
    for pod in data["items"]:
        logging.debug("Removing pod \"{}\"".format(pod["metadata"]["name"]))
        delete_pod(None, pod["metadata"]["name"], ns_name)
    return
Пример #8
0
def test_deep_delete_and_create(mock_load_system_paasta_config):
    with mock.patch(
            "paasta_tools.kubernetes.application.controller_wrappers.sleep",
            autospec=True
    ), mock.patch(
            "paasta_tools.kubernetes.application.controller_wrappers.list_all_deployments",
            autospec=True,
    ) as mock_list_deployments, mock.patch(
            "paasta_tools.kubernetes.application.controller_wrappers.force_delete_pods",
            autospec=True,
    ) as mock_force_delete_pods:
        mock_kube_client = mock.MagicMock()
        mock_kube_client.deployments = mock.Mock(
            spec=kubernetes.client.AppsV1Api)
        config_dict = {"instances": 1, "bounce_method": "brutal"}
        app = setup_app(config_dict, True)
        # This mocks being unable to delete the deployment
        mock_list_deployments.return_value = [app.kube_deployment]
        delete_options = V1DeleteOptions(propagation_policy="Background")

        with pytest.raises(Exception):
            # test deep_delete_and_create makes kubeclient calls correctly
            app.deep_delete_and_create(mock_kube_client)
        mock_force_delete_pods.assert_called_with(
            app.item.metadata.name,
            app.kube_deployment.service,
            app.kube_deployment.instance,
            app.item.metadata.namespace,
            mock_kube_client,
        )
        mock_kube_client.deployments.delete_namespaced_deployment.assert_called_with(
            app.item.metadata.name,
            app.item.metadata.namespace,
            body=delete_options)
Пример #9
0
    def delete_app(self, namespace, app_name_to_delete):
        """Delete App.

        Delete a deployment immediately. All pods are deleted in the foreground.
        :param str namespace:
        :param str app_name_to_delete:
        :return:
        """
        try:
            delete_options = V1DeleteOptions(
                propagation_policy="Foreground", grace_period_seconds=0
            )
            self._clients.apps_api.delete_namespaced_deployment(
                name=app_name_to_delete,
                namespace=namespace,
                body=delete_options,
                pretty="true",
            )
        except ApiException as e:
            if e.status == 404:
                # Deployment does not exist, nothing to delete but
                # we can consider this a success.
                self._logger.warning(
                    "not deleting nonexistent deploy/{} from ns/{}".format(
                        app_name_to_delete, namespace
                    )
                )
            else:
                raise
        else:
            self._logger.info(
                "deleted deploy/{} from ns/{}".format(app_name_to_delete, namespace)
            )
Пример #10
0
 def _pod_delete(self):
     """Delete the Pod after it has Completed or Failed.
     """
     log.debug(f"deleting pod {self._pod_name}")
     self._kclient.delete_namespaced_pod(self._pod_name,
                                         namespace=self._namespace,
                                         body=V1DeleteOptions())
    def cleanup(self):
        with self.__lock:
            log.debug('cleanup: oppo list: %s',
                      json.dumps(self.__opportunistic_resources))
            clean_count = 0
            check_secs = self.__config_manager.get_float(
                OVERSUBSCRIBE_CLEANUP_AFTER_SECONDS_KEY,
                DEFAULT_OVERSUBSCRIBE_CLEANUP_AFTER_SECONDS)
            if check_secs <= 0:
                log.info(
                    'configured to skip cleanup. opportunistic resource windows will not be deleted.'
                )
                return 0
            for item in self.__opportunistic_resources.values():
                check_time = datetime.utcnow() - timedelta(seconds=check_secs)
                if check_time < self.__get_timestamp(
                        item['object']['spec']['window']['end']):
                    continue
                log.debug('deleting: %s', json.dumps(item))
                delete_opts = V1DeleteOptions(grace_period_seconds=0,
                                              propagation_policy='Foreground')
                resp = self.__custom_api.delete_namespaced_custom_object(
                    version=OPPORTUNISTIC_RESOURCE_VERSION,
                    group=OPPORTUNISTIC_RESOURCE_GROUP,
                    plural=OPPORTUNISTIC_RESOURCE_PLURAL,
                    namespace=OPPORTUNISTIC_RESOURCE_NAMESPACE,
                    name=item['object']['metadata']['name'],
                    body=delete_opts)
                log.debug('deleted: %s', json.dumps(resp))
                clean_count += 1

            return clean_count
Пример #12
0
def delete_namespace(namespace: str, propagate: bool = False):
    """
    Removes a namespace with the given name

    :param namespace: namespace to be deleted
    :param propagate: If True - all objects in a namespace will be deleted
    In case of any problems (i.e. lack of privileges) it throws an exception
    """
    try:
        api = get_k8s_api()
        propagation_policy = "Orphan"
        if propagate:
            propagation_policy = "Foreground"
        body = V1DeleteOptions(propagation_policy=propagation_policy)

        response = api.delete_namespace(namespace, body)

        if response.status != "{'phase': 'Terminating'}":
            error_description = Texts.NAMESPACE_DELETE_ERROR_MSG.format(
                namespace=namespace)
            logger.exception(error_description)
            raise KubernetesError(error_description)

    except Exception:
        error_description = Texts.NAMESPACE_DELETE_ERROR_MSG.format(
            namespace=namespace)
        logger.exception(error_description)
        raise KubernetesError(error_description)
Пример #13
0
def delete_deployment(params):
    from kubernetes.client import V1DeleteOptions

    spec = get_knative_spec(params)
    spec["metadata"]["name"] = params["deployment_name"]
    name = spec["metadata"]["name"]
    namespace = "default"  # TODO: the namespace should be configured or be figured out dynamically
    plural = spec["kind"].lower(
    ) + "s"  # TODO: verify the "rule" for constructing plural
    group, version = spec["apiVersion"].split("/")

    del_opts = V1DeleteOptions()
    api_client = get_custom_objects_api_client()
    api_response = api_client.list_namespaced_custom_object(
        group, version, namespace, plural)

    if name in [
            deployment["metadata"]["name"]
            for deployment in api_response["items"]
    ]:
        api_response = api_client.delete_namespaced_custom_object(
            group, version, namespace, plural, name, del_opts)
    else:
        LOG.error("Could not find the knative serving deployment '%s'" % name)
        return {
            "status":
            "Error",
            "details":
            "Could not find a knative serving deployment with name '%s'" % name
        }

    # api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
    LOG.info("%s ..." % str(api_response)[:160])
    return api_response
Пример #14
0
 def deep_delete(self, kube_client: KubeClient) -> None:
     """
     Remove all controllers, pods, and pod disruption budgets related to this application
     :param kube_client:
     """
     delete_options = V1DeleteOptions(propagation_policy="Foreground")
     try:
         kube_client.deployments.delete_namespaced_stateful_set(
             self.item.metadata.name,
             self.item.metadata.namespace,
             body=delete_options,
         )
     except ApiException as e:
         if e.status == 404:
             # StatefulSet does not exist, nothing to delete but
             # we can consider this a success.
             self.logging.debug(
                 "not deleting nonexistent statefulset/{} from namespace/{}"
                 .format(self.item.metadata.name,
                         self.item.metadata.namespace))
         else:
             raise
     else:
         self.logging.info(
             "deleted statefulset/{} from namespace/{}".format(
                 self.item.metadata.name, self.item.metadata.namespace))
     self.delete_pod_disruption_budget(kube_client)
def restart_prometheus_adapter(kube_client: KubeClient) -> None:
    log.info("Attempting to remove existing adapter pod(s).")
    all_pods = cast(
        # once again, we cast since the kubernetes python api isn't typed
        List[V1Pod],
        kube_client.core.list_namespaced_pod(
            namespace=PROMETHEUS_ADAPTER_POD_NAMESPACE).items,
    )
    # there should only ever be one pod actually up, but we might as well enforce that here
    # just in case there are more
    pods_to_delete = [
        pod for pod in all_pods
        if pod.metadata.name.startswith(PROMETHEUS_ADAPTER_POD_NAME_PREFIX)
        and pod.status.phase in PROMETHEUS_ADAPTER_POD_PHASES_TO_REMOVE
    ]
    log.debug("Found the following pods to delete: %s", pods_to_delete)

    for pod in pods_to_delete:
        log.debug("Attempting to remove %s.", pod.metadata.name)
        kube_client.core.delete_namespaced_pod(
            name=pod.metadata.name,
            namespace=pod.metadata.namespace,
            body=V1DeleteOptions(),
            # background propagation with no grace period is equivalent to doing a force-delete from kubectl
            grace_period_seconds=0,
            propagation_policy="Background",
        )
        log.debug("Removed %s.", pod.metadata.name)

    log.info("Adapter restarted successfully")
Пример #16
0
def db():
    """
    Provides a real cloud database, with all test users removed afterwards.
    """
    database = firestore.Client()
    yield database

    users = database.collection('users')

    batch = database.batch()

    for user in (users.where('username', '>=',
                             '__').where('username', '<', '_`')).stream():
        batch.delete(users.document(user.id))

    batch.commit()

    gke_client = container_v1.ClusterManagerClient()
    cluster = gke_client.get_cluster('st-dev-252104', 'asia-northeast1-b',
                                     'st-dev')
    k8s = kubernetes_api(cluster)

    gameservers = k8s.get_namespaced_custom_object('agones.dev', 'v1',
                                                   'default', 'gameservers',
                                                   '')['items']

    for gameserver in gameservers:
        if gameserver['status']['state'] == 'Ready':
            continue
        k8s.delete_namespaced_custom_object('agones.dev', 'v1', 'default',
                                            'gameservers',
                                            gameserver['metadata']['name'],
                                            V1DeleteOptions())
Пример #17
0
def delete_unschedulable_pods(api: CoreV1Api, namespace: str):
    """
    Delete pods that are unschedulable due to a missing persistent volume claim.

    A stateful set may create a pod attached to a missing persistent volume
    claim if the pod is recreated while the persistent volume claim is pending
    delete.

    When this happens, delete the pod so that the stateful set will create a
    new persistent volume claim when it next creates the pod.
    """
    for pod in api.list_namespaced_pod(namespace).items:
        if _unschedulable_due_to_pvc(pod):
            logger.info(f"deleting unschedulable pod: {pod.metadata.name}")
            try:
                api.delete_namespaced_pod(
                    name=pod.metadata.name,
                    namespace=namespace,
                    body=V1DeleteOptions(
                        grace_period_seconds=0,
                        propagation_policy="Background",
                        preconditions=V1Preconditions(
                            resource_version=pod.metadata.resource_version,
                            uid=pod.metadata.uid,
                        ),
                    ),
                )
            except ApiException as e:
                if e.reason not in (CONFLICT, NOT_FOUND):
                    raise
                logger.info(f"pod already deleted or updated: {pod.metadata.name}")
Пример #18
0
def test_ensure_pod_disruption_budget_replaces_outdated():
    with mock.patch(
            'paasta_tools.setup_kubernetes_job.pod_disruption_budget_for_service_instance',
            autospec=True,
    ) as mock_pdr_for_service_instance, mock.patch(
            'paasta_tools.setup_kubernetes_job.create_pod_disruption_budget',
            autospec=True,
    ) as mock_create_pdr:
        mock_req_pdr = mock.Mock()
        mock_req_pdr.spec.min_available = 10
        mock_pdr_for_service_instance.return_value = mock_req_pdr

        mock_client = mock.Mock()

        mock_pdr = mock.Mock()
        mock_pdr.spec.min_available = 10

        mock_client.read_namespaced_pod_disruption_budget.return_value = mock_pdr

        ensure_pod_disruption_budget(
            mock_client,
            'fake_service',
            'fake_instances',
            min_instances=10,
        )
        mock_client.policy.delete_namespaced_pod_disruption_budget.assert_called_once_with(
            name=mock_req_pdr.metadata.name,
            namespace=mock_req_pdr.metadata.namespace,
            body=V1DeleteOptions(),
        )
        mock_create_pdr.assert_called_once_with(
            kube_client=mock_client,
            pod_disruption_budget=mock_req_pdr,
        )
Пример #19
0
    def delete_function(self, function_id, version, labels=None):
        """Delete related resources for function.

        - Delete service
        - Delete pods
        """
        pre_label = {
            'function_id': function_id,
            'function_version': str(version)
        }
        labels = labels or pre_label
        selector = common.convert_dict_to_string(labels)

        ret = self.v1.list_namespaced_service(self.conf.kubernetes.namespace,
                                              label_selector=selector)
        names = [i.metadata.name for i in ret.items]
        for svc_name in names:
            self.v1.delete_namespaced_service(
                svc_name,
                self.conf.kubernetes.namespace,
                V1DeleteOptions(),
            )

        self.v1.delete_collection_namespaced_pod(
            self.conf.kubernetes.namespace, label_selector=selector)
    def delete_secret(self, credstash_secret, resource_version):
        namespace = credstash_secret["metadata"]["namespace"]
        name = credstash_secret["metadata"]["name"]
        try:
            secret_obj = self.v1core.read_namespaced_secret(
                name, namespace=namespace)
            try:
                self.check_resource_version(secret_obj, resource_version)
            except ResourceTooOldException:
                print("We've already processed this event, skipping")
                return
        except ApiException as e:
            if e.status != 404:
                raise
            else:
                "Secret already deleted, returning"
                return

        if (secret_obj.metadata.annotations.get("credstash-fully-managed",
                                                None) == "true"):
            print("{} is managed by credstash, deleting it".format(name))
            self.v1core.delete_namespaced_secret(name, namespace,
                                                 V1DeleteOptions())
        else:
            print(
                "{} is NOT managed by credstash, NOT deleting it".format(name))
Пример #21
0
def delete_persistent_volume_claim(namespace: str,
                                   name: str,
                                   async_req: bool = False) -> None:
    """delete_persistent_volume_claim

    delete a PersistentVolumeClaim

    This method makes a synchronous HTTP request by default. To make an
    asynchronous HTTP request, please pass async_req=True

    :param str namespace: namespace where the PersistentVolumeClaim is located
    :param str name: name of the PersistentVolumeClaim (required)
    :param bool async_req: execute request asynchronously
    """
    load_opta_kube_config()
    v1 = CoreV1Api()

    try:
        options = V1DeleteOptions(grace_period_seconds=5)
        v1.delete_collection_namespaced_persistent_volume_claim(
            namespace=namespace,
            field_selector=f"metadata.name={name}",
            async_req=async_req,
            body=options,
        )
    except ApiException as e:
        if e.status == 404:
            # not found = nothing to delete
            return None
        raise e
Пример #22
0
    def delete_pool(self, name):
        """Delete all resources belong to the deployment."""
        LOG.info("Deleting deployment %s", name)

        labels = {'runtime_id': name}
        selector = common.convert_dict_to_string(labels)

        self.v1extension.delete_collection_namespaced_replica_set(
            self.conf.kubernetes.namespace, label_selector=selector)
        LOG.info("ReplicaSets in deployment %s deleted.", name)

        ret = self.v1.list_namespaced_service(self.conf.kubernetes.namespace,
                                              label_selector=selector)
        names = [i.metadata.name for i in ret.items]
        for svc_name in names:
            self.v1.delete_namespaced_service(
                svc_name,
                self.conf.kubernetes.namespace,
                V1DeleteOptions(),
            )
        LOG.info("Services in deployment %s deleted.", name)

        self.v1extension.delete_collection_namespaced_deployment(
            self.conf.kubernetes.namespace,
            label_selector=selector,
            field_selector='metadata.name=%s' % name)
        # Should delete pods after deleting deployment to avoid pods are
        # recreated by k8s.
        self.v1.delete_collection_namespaced_pod(
            self.conf.kubernetes.namespace, label_selector=selector)
        LOG.info("Pods in deployment %s deleted.", name)
        LOG.info("Deployment %s deleted.", name)
Пример #23
0
def delete_serving_service(params):
    from kubernetes.client import V1DeleteOptions

    spec = get_service_spec()
    name = params["deployment_name"]
    namespace = "default"  # TODO: the namespace should be configured or be figured out dynamically

    spec["metadata"]["name"] = params["deployment_name"]
    spec["metadata"]["labels"]["app"] = params["deployment_name"]
    spec["spec"]["selector"]["app"] = params["deployment_name"]
    spec["spec"]["ports"][0]["port"] = int(params["container_port"])

    del_opts = V1DeleteOptions()
    api_client = get_api_client_v1()
    api_response = api_client.list_namespaced_service(namespace)

    if name in [service.metadata.name for service in api_response.items]:
        api_response = api_client.delete_namespaced_service(name,
                                                            namespace,
                                                            body=del_opts)
    else:
        LOG.error("Could not find the serving service '%s'" % name)
        return {
            "status": "Error",
            "details": "Could not find a serving service with name '%s'" % name
        }

    # api_response_filtered = {key: api_response[key] for key in ["apiVersion", "kind"]}
    LOG.info("%s ..." % str(api_response)[:160])
    return api_response
Пример #24
0
def delete_pv(k8s_api=None):
    body = V1DeleteOptions()
    try:
        k8s_api.delete_persistent_volume(name='datalayer', body=body)
    except ApiException as err:
        logger.error(
            "Exception when calling CoreV1Api->delete_persistent_volume: %s\n"
            % err)
Пример #25
0
 def _delete(self, namespace, name, options, *args, **kwargs):
     return self._api.delete_namespaced_custom_object(
         group=self._api_group,
         version=self.version,
         namespace=namespace,
         name=name,
         body=V1DeleteOptions(**options),
         **kwargs)
Пример #26
0
 def tearDown(self):
     self.api_instance.delete_namespaced_custom_object(
         group=self.group,
         version=self.version,
         namespace=self.namespace,
         plural=self.plural,
         name=self.job_name,
         body=V1DeleteOptions())
Пример #27
0
 def _delete(self, name, options, *args, **kwargs):
     return self._api.delete_cluster_custom_object(
         group=self._api_group,
         version=self.version,
         plural=self._plural_name,
         name=name,
         body=V1DeleteOptions(**options),
         **kwargs)
Пример #28
0
 def _init_pod_delete(self):
     """Teardown the init Pod after the context has been copied
     into the volume.
     """
     log.debug(f"deleting init pod {self._init_pod_name}")
     self._kclient.delete_namespaced_pod(self._init_pod_name,
                                         namespace=self._namespace,
                                         body=V1DeleteOptions())
Пример #29
0
 def _vol_claim_delete(self):
     """Delete the PersistentVolumeClaim.
     """
     log.debug(f"deleting volume claim {self._vol_claim_name}")
     self._kclient.delete_namespaced_persistent_volume_claim(
         self._vol_claim_name,
         namespace=self._namespace,
         body=V1DeleteOptions())
Пример #30
0
def delete_complete_jobs(api: CoreV1Api, batch_api: BatchV1Api, namespace: str):
    """Delete complete jobs."""
    for job in batch_api.list_namespaced_job(namespace).items:
        if (
            job.status.conditions
            and job.status.conditions[0].type == "Complete"
            and not job.metadata.deletion_timestamp
            and _is_flush_job(job)
        ):
            logger.info(f"deleting complete job: {job.metadata.name}")
            # configure persistent volume claims to be deleted with the job
            pv_name = _pv_name_from_job(job)
            logger.info(f"including pv in pvc delete: {pv_name}")
            api.patch_persistent_volume(
                name=pv_name,
                body=V1PersistentVolume(
                    spec=V1PersistentVolumeSpec(
                        persistent_volume_reclaim_policy="Delete",
                    )
                ),
            )
            logger.info(f"including pvc in job delete: {job.metadata.name}")
            api.patch_namespaced_persistent_volume_claim(
                name=job.metadata.name,
                namespace=namespace,
                body=V1PersistentVolumeClaim(
                    metadata=V1ObjectMeta(
                        owner_references=[
                            V1OwnerReference(
                                api_version="batch/v1",
                                kind="Job",
                                name=job.metadata.name,
                                uid=job.metadata.uid,
                                block_owner_deletion=True,
                            )
                        ]
                    )
                ),
            )
            try:
                batch_api.delete_namespaced_job(
                    name=job.metadata.name,
                    namespace=namespace,
                    body=V1DeleteOptions(
                        grace_period_seconds=0,
                        propagation_policy="Foreground",
                        preconditions=V1Preconditions(
                            resource_version=job.metadata.resource_version,
                            uid=job.metadata.uid,
                        ),
                    ),
                )
            except ApiException as e:
                if e.reason not in (CONFLICT, NOT_FOUND):
                    raise
                logger.info(f"job already deleted or updated: {job.metadata.name}")