def test_create_pvc_raises_server_error(api: MagicMock, batch_api: MagicMock): api.list_persistent_volume.return_value = V1PersistentVolumeList( items=[ V1PersistentVolume( metadata=V1ObjectMeta(name="pv-0"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), ] ) def create_pvc(namespace, body): raise ApiException(reason="Server Error") api.create_namespaced_persistent_volume_claim.side_effect = create_pvc with pytest.raises(ApiException): flush_released_pvs(api, batch_api, "command", "env", "image", "namespace") api.list_persistent_volume.assert_called_once_with() batch_api.list_namespaced_job.called_once_with("namespace")
def delete_complete_jobs(api: CoreV1Api, batch_api: BatchV1Api, namespace: str): """Delete complete jobs.""" for job in batch_api.list_namespaced_job(namespace).items: if ( job.status.conditions and job.status.conditions[0].type == "Complete" and not job.metadata.deletion_timestamp and _is_flush_job(job) ): logger.info(f"deleting complete job: {job.metadata.name}") # configure persistent volume claims to be deleted with the job pv_name = _pv_name_from_job(job) logger.info(f"including pv in pvc delete: {pv_name}") api.patch_persistent_volume( name=pv_name, body=V1PersistentVolume( spec=V1PersistentVolumeSpec( persistent_volume_reclaim_policy="Delete", ) ), ) logger.info(f"including pvc in job delete: {job.metadata.name}") api.patch_namespaced_persistent_volume_claim( name=job.metadata.name, namespace=namespace, body=V1PersistentVolumeClaim( metadata=V1ObjectMeta( owner_references=[ V1OwnerReference( api_version="batch/v1", kind="Job", name=job.metadata.name, uid=job.metadata.uid, block_owner_deletion=True, ) ] ) ), ) try: batch_api.delete_namespaced_job( name=job.metadata.name, namespace=namespace, body=V1DeleteOptions( grace_period_seconds=0, propagation_policy="Foreground", preconditions=V1Preconditions( resource_version=job.metadata.resource_version, uid=job.metadata.uid, ), ), ) except ApiException as e: if e.reason not in (CONFLICT, NOT_FOUND): raise logger.info(f"job already deleted or updated: {job.metadata.name}")
def deploy(self, path, access_mode="ReadWriteMany"): if self.meta.labels: self.meta.labels.update(self.target_labels) else: self.meta.labels = self.target_labels pv_spec = V1PersistentVolumeSpec( access_modes=[access_mode], capacity={"storage": self.capacity}, host_path=V1HostPathVolumeSource(path=path), persistent_volume_reclaim_policy='Recycle' # claim_ref=V1ObjectReference(), ) pv = V1PersistentVolume(metadata=self.meta, spec=pv_spec) k8sclient.apiV1.create_persistent_volume(body=pv)
def _bind_pvc(api: CoreV1Api, pv: V1PersistentVolume, pvc: V1PersistentVolumeClaim) -> V1PersistentVolume: logger.info(f"binding pv to pvc: {pv.metadata.name}, {pvc.metadata.name}") return api.patch_persistent_volume( name=pv.metadata.name, body=V1PersistentVolume(spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( api_version="v1", kind="PersistentVolumeClaim", name=pvc.metadata.name, namespace=pvc.metadata.namespace, resource_version=pvc.metadata.resource_version, uid=pvc.metadata.uid, ))), )
def create_pv_gluster_volume(self, v1_api, endpoint): body = kubernetes.client.V1PersistentVolume() body.api_version = "v1" body.kind = "PersistentVolume" meta = V1ObjectMeta() meta.generate_name = "persist-volume" body.metadata = meta source = V1GlusterfsPersistentVolumeSource( endpoints=endpoint, path=DEFAULT_DATASET_VOLUME_NAME, read_only=False) spec = V1PersistentVolumeSpec( capacity={"storage": DEFAULT_DATASET_VOLUME_SIZE}, access_modes=["ReadWriteMany"], persistent_volume_reclaim_policy="Retain", glusterfs=source) body.spec = spec api_response = v1_api.create_persistent_volume(body) return api_response.metadata.name
def deploy(self, access_mode="ReadWriteMany"): if self.meta.labels: self.meta.labels.update(self.target_labels) else: self.meta.labels = self.target_labels pv_spec = V1PersistentVolumeSpec( access_modes=[access_mode], capacity={"storage": self.capacity}, rbd=V1RBDVolumeSource( fs_type='xfs', image=self.image, secret_ref=V1LocalObjectReference(self.secret_name), monitors=self.monitors, pool=self.pool, read_only=False, ), persistent_volume_reclaim_policy='Recycle' # claim_ref=V1ObjectReference(), ) pv = V1PersistentVolume(metadata=self.meta, spec=pv_spec) k8sclient.apiV1.create_persistent_volume(body=pv)
def test_flush_released_pvs(api: MagicMock, batch_api: MagicMock): api.list_persistent_volume.return_value = V1PersistentVolumeList( items=[ # don't flush because job exists V1PersistentVolume(metadata=V1ObjectMeta(name="pv-0")), # don't flush because wrong namespace V1PersistentVolume( metadata=V1ObjectMeta(name="pv-4"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference(namespace="other"), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), # don't flush because it's already done V1PersistentVolume( metadata=V1ObjectMeta(name="pv-5"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference(namespace="namespace"), persistent_volume_reclaim_policy="Delete", ), status=V1PersistentVolumeStatus(phase="Released"), ), # don't flush because it's in use V1PersistentVolume( metadata=V1ObjectMeta(name="pv-6"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # try to flush because pvc is bound but job was created after jobs were listed V1PersistentVolume( metadata=V1ObjectMeta(name="pv-7"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="flush-pv-7", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # flush because pvc is bound but job does not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-8"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="flush-pv-8", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # flush because pvc is not yet bound and job does not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-9"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), # flush because pvc and job both do not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-A"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), ] ) def create_pvc(namespace: str, body: V1PersistentVolumeClaim): if body.metadata.name == "flush-pv-9": exc = ApiException(status=409, reason="Conflict") exc.body = '{"reason":"AlreadyExists"}' raise exc body.metadata.uid = "uid-" + body.metadata.name body.metadata.resource_version = "1" return body api.create_namespaced_persistent_volume_claim.side_effect = create_pvc def read_pvc(name: str, namespace: str): return V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=name, namespace=namespace, uid="uid-" + name, resource_version="2" ) ) api.read_namespaced_persistent_volume_claim.side_effect = read_pvc batch_api.list_namespaced_job.return_value = V1JobList( items=[V1Job(metadata=V1ObjectMeta(name="flush-pv-0"))] ) def create_job(namespace, body): if body.metadata.name == "flush-pv-7": exc = ApiException(status=409, reason="Conflict") exc.body = '{"reason":"AlreadyExists"}' raise exc batch_api.create_namespaced_job.side_effect = create_job flush_released_pvs(api, batch_api, "command", "env", "image", "namespace") api.list_persistent_volume.assert_called_once_with() batch_api.list_namespaced_job.assert_called_once_with("namespace") assert [f"flush-pv-{i}" for i in "9A"] == [ call.kwargs["body"].metadata.name for call in api.create_namespaced_persistent_volume_claim.call_args_list ] api.read_namespaced_persistent_volume_claim.assert_called_once_with( "flush-pv-9", "namespace" ) assert [("pv-9", "flush-pv-9"), ("pv-A", "flush-pv-A"),] == [ ( call.kwargs["name"], call.kwargs["body"].spec.claim_ref and call.kwargs["body"].spec.claim_ref.name, ) for call in api.patch_persistent_volume.call_args_list ] assert [f"flush-pv-{i}" for i in "789A"] == [ call.kwargs["body"].metadata.name for call in batch_api.create_namespaced_job.call_args_list ] batch_api.read_namespaced_job.assert_called_once_with("flush-pv-7", "namespace")