def test_persistent_volume_claim_present(self, mock_exit_json, mock_k8s_read, mock_k8s_create): args = dict(state='present', kind='PersistentVolumeClaim', name='pvc-demo', namespace='vms', api_version='v1', resource_definition=V1PersistentVolumeClaim().to_dict()) set_module_args(args) mock_k8s_read.return_value = None mock_k8s_create.return_value = V1PersistentVolumeClaim() k = raw.KubeVirtRawModule() k.execute_module() mock_exit_json.assert_called_once_with( changed=True, result=V1PersistentVolumeClaim().to_dict())
def _create_pvc(api: CoreV1Api, name: str, namespace: str, pv: V1PersistentVolume) -> V1PersistentVolumeClaim: logger.info(f"creating pvc: {name}") try: return api.create_namespaced_persistent_volume_claim( namespace=namespace, body=V1PersistentVolumeClaim( api_version="v1", kind="PersistentVolumeClaim", metadata=V1ObjectMeta(name=name, namespace=namespace), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests=pv.spec.capacity), storage_class_name=pv.spec.storage_class_name, volume_name=pv.metadata.name, ), ), ) except ApiException as e: if e.reason == CONFLICT and json.loads( e.body)["reason"] == ALREADY_EXISTS: logger.info(f"using existing pvc: {name}") return api.read_namespaced_persistent_volume_claim(name, namespace) raise
def setUp(self): super().setUp() self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.name = self.cluster_object.metadata.name self.namespace = self.cluster_object.metadata.namespace self.stateful_set = V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=V1ResourceRequirements(limits={ "cpu": "100m", "memory": "64Mi" }, requests={ "cpu": "100m", "memory": "64Mi" })) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def _ensure_pvc(self, name, storage_class, size): request = V1ResourceRequirements(requests={'storage': size}) claim_spec = V1PersistentVolumeClaimSpec(storage_class_name=storage_class, resources=request) metadata = V1ObjectMeta(namespace=self.namespace, name=name) claim = V1PersistentVolumeClaim(metadata=metadata, spec=claim_spec) self.api.create_namespaced_persistent_volume_claim(namespace=self.namespace, body=claim, _request_timeout=API_TIMEOUT)
def test_delete_detached_pvcs_raises_server_error(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[]) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should be deleted V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="queue-web-0", uid="uid-queue-web-0", resource_version="1"), spec=V1PersistentVolumeClaimSpec(volume_name="pv-0"), ) ]) def delete_pvc(name, namespace, body): raise ApiException(reason="Server Error") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc with pytest.raises(ApiException): delete_detached_pvcs(api, "namespace", "queue-", timedelta(microseconds=0), {}) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") assert [("queue-web-0", "namespace", "uid-queue-web-0", "1")] == [( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list]
def delete_complete_jobs(api: CoreV1Api, batch_api: BatchV1Api, namespace: str): """Delete complete jobs.""" for job in batch_api.list_namespaced_job(namespace).items: if ( job.status.conditions and job.status.conditions[0].type == "Complete" and not job.metadata.deletion_timestamp and _is_flush_job(job) ): logger.info(f"deleting complete job: {job.metadata.name}") # configure persistent volume claims to be deleted with the job pv_name = _pv_name_from_job(job) logger.info(f"including pv in pvc delete: {pv_name}") api.patch_persistent_volume( name=pv_name, body=V1PersistentVolume( spec=V1PersistentVolumeSpec( persistent_volume_reclaim_policy="Delete", ) ), ) logger.info(f"including pvc in job delete: {job.metadata.name}") api.patch_namespaced_persistent_volume_claim( name=job.metadata.name, namespace=namespace, body=V1PersistentVolumeClaim( metadata=V1ObjectMeta( owner_references=[ V1OwnerReference( api_version="batch/v1", kind="Job", name=job.metadata.name, uid=job.metadata.uid, block_owner_deletion=True, ) ] ) ), ) try: batch_api.delete_namespaced_job( name=job.metadata.name, namespace=namespace, body=V1DeleteOptions( grace_period_seconds=0, propagation_policy="Foreground", preconditions=V1Preconditions( resource_version=job.metadata.resource_version, uid=job.metadata.uid, ), ), ) except ApiException as e: if e.reason not in (CONFLICT, NOT_FOUND): raise logger.info(f"job already deleted or updated: {job.metadata.name}")
def deploy(self, access_mode="ReadWriteMany"): assert self.capacity pvc_spec = V1PersistentVolumeClaimSpec( access_modes=[access_mode], resources=V1ResourceRequirements( requests={"storage": self.capacity}), selector=V1LabelSelector(match_labels=self.selector), volume_name=self.pvvolume) pvc = V1PersistentVolumeClaim(metadata=self.meta, spec=pvc_spec) k8sclient.apiV1.create_namespaced_persistent_volume_claim( self.meta.namespace, body=pvc)
def __init__(self) -> None: metadata = V1ObjectMeta(name="postgres", labels={"app": "postgres"}) label_selector = V1LabelSelector(match_labels={"app": "postgres"}) env = [V1EnvVar(name="POSTGRES_HOST_AUTH_METHOD", value="trust")] ports = [V1ContainerPort(container_port=5432, name="sql")] volume_mounts = [ V1VolumeMount(name="data", mount_path="/data"), V1VolumeMount( name="postgres-init", mount_path="/docker-entrypoint-initdb.d" ), ] volume_config = V1ConfigMapVolumeSource( name="postgres-init", ) volumes = [V1Volume(name="postgres-init", config_map=volume_config)] container = V1Container( name="postgres", image="postgres:14.3", env=env, ports=ports, volume_mounts=volume_mounts, ) pod_spec = V1PodSpec(containers=[container], volumes=volumes) template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec) claim_templates = [ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="data"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements(requests={"storage": "1Gi"}), ), ) ] self.stateful_set = V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=metadata, spec=V1StatefulSetSpec( service_name="postgres", replicas=1, selector=label_selector, template=template_spec, volume_claim_templates=claim_templates, ), )
def get_volume_claim_templates(self) -> Sequence[V1PersistentVolumeClaim]: return [ V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=self.get_persistent_volume_name(volume), ), spec=V1PersistentVolumeClaimSpec( # must be ReadWriteOnce for EBS access_modes=["ReadWriteOnce"], storage_class_name=self.get_storage_class_name(), resources=V1ResourceRequirements(requests={ 'storage': f"{volume['size']}Gi", }, ), ), ) for volume in self.get_persistent_volumes() ]
def _createStatefulSet(self) -> V1beta1StatefulSet: return V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--wiredTigerCacheSizeGB", "0.25", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=self._createResourceLimits()) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def _ensure_pvc(self, name, storage_class, size, deployment_name): size_Mi = f'{max(round(int(size)/1024), 1024)}Mi' size_Gi = f'{max(round(int(size)/1048576), 1)}Gi' request = V1ResourceRequirements(requests={'storage': size_Mi}) claim_spec = V1PersistentVolumeClaimSpec(storage_class_name=storage_class, resources=request, volume_mode='Filesystem', access_modes=['ReadWriteOnce']) metadata = V1ObjectMeta(namespace=self.namespace, name=name) claim = V1PersistentVolumeClaim(metadata=metadata, spec=claim_spec) def remove_pvc(deployment_name, pvc_name): self.logger.info(f'Deleting old {deployment_name} deployment to release {name} PVC to be recreated..') # Remove deployment self.apps_api.delete_namespaced_deployment(name=deployment_name, namespace=self.namespace, _request_timeout=API_TIMEOUT) # Remove PVC self.api.delete_namespaced_persistent_volume_claim(name=pvc_name, namespace=self.namespace, _request_timeout=API_TIMEOUT) # Poll to see if PVC has been removed try: while self.api.read_namespaced_persistent_volume_claim_status(name=pvc_name, namespace=self.namespace): sleep(15) except ApiException as e: if e.status == 404: return self.logger.error(e.reason) # Check to see if a PVC with the same name exists for pvc in self.api.list_namespaced_persistent_volume_claim(namespace=self.namespace).items: if pvc.metadata.name == metadata.name: pvc_requests = pvc.spec.resources.requests # Check for significant changes, if so replace if (pvc_requests['storage'].endswith('Mi') and pvc_requests['storage'] != size_Mi) or \ (pvc_requests['storage'].endswith('Gi') and pvc_requests['storage'] != size_Gi) or \ pvc.spec.storage_class_name != claim.spec.storage_class_name: # If PVC is currently in use, terminate associated deployments to proceed with replacement remove_pvc(deployment_name, name) break # Otherwise no need to create a PVC that already exists unchanged return self.api.create_namespaced_persistent_volume_claim(namespace=self.namespace, body=claim, _request_timeout=API_TIMEOUT)
def ensure_statefulset_with_containers(api_apps_v1, name, namespace, containers, volume_paths, replicas=1, init_containers=None, volumes=None): if volumes is None: volumes = [] if init_containers is None: init_containers = [] volume_claim_templates = [ V1PersistentVolumeClaim(metadata=V1ObjectMeta(name=path[0]), spec=V1PersistentVolumeClaimSpec( access_modes=['ReadWriteOnce'], resources=V1ResourceRequirements( requests={'storage': path[2]}), storage_class_name=path[3])) for path in volume_paths ] ss = client.V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=client.V1ObjectMeta(name=name, labels={'app': name}), spec=client.V1StatefulSetSpec( replicas=replicas, service_name=name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels={"app": name}), spec=V1PodSpec(containers=containers, volumes=volumes, init_containers=init_containers)), selector={'matchLabels': { 'app': name }}, volume_claim_templates=volume_claim_templates)) ensure_statefulset(api_apps_v1, stateful_set=ss, namespace=namespace, name=name)
def test_create_pvc_if_not_exists(self, list_p): pvc = V1PersistentVolumeClaim(metadata=V1ObjectMeta(name='tt')) list_p.return_value = V1PersistentVolumeClaimList(items=[pvc]) res = k8s.create_pvc_if_not_exists(pvc, 'test_namespace') self.assertFalse(res) list_p.return_value = V1PersistentVolumeClaimList(items=[]) with patch('xcube_hub.core.k8s.create_pvc') as p: res = k8s.create_pvc_if_not_exists(pvc, 'test_namespace') p.assert_called_once() self.assertTrue(res) list_p.side_effect = ApiValueError('Error') with self.assertRaises(api.ApiError) as e: k8s.create_pvc_if_not_exists(pvc, 'test_namespace') self.assertEqual('Error when creating the pvc tt: Error', str(e.exception))
def _create_persistent_volume_claim(pvc_name, volume_name, namespace): _LOG.info( f'Creating persistent volume claim {pvc_name} with volume {volume_name}' ) spec = { 'volumeName': volume_name, 'volumeMode': 'Filesystem', 'storageClassName': 'local-storage', 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': '100Gi' } } } _kubernetes.create_namespaced_persistent_volume_claim( namespace, V1PersistentVolumeClaim(api_version='v1', kind='PersistentVolumeClaim', metadata={'name': f'{volume_name}-pvc'}, spec=spec))
def test_delete_detached_pvcs(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList( items=[ # pvc is attached V1Pod( spec=V1PodSpec( containers=[], volumes=[ V1Volume( name="queue", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="queue-web-3", ), ) ], ), ), # pvc no attached because spec is missing V1Pod(), # pvc no attached because volumes are missing V1Pod(spec=V1PodSpec(containers=[],),), # pvc no attached because volume is not persistent V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume(name="queue")]),), # pvc not attached because pod is unschedulable due to pvc V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ] ) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should delete 0-2, 3 is in attached pvcs *( V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=f"queue-web-{i}", uid=f"uid-queue-web-{i}", resource_version=f"{i}", ), ) for i in range(4) ), # name does not start with claim prefix V1PersistentVolumeClaim(metadata=V1ObjectMeta(name="other-web-0"),), ] ) def delete_pvc(name, namespace, body): if name == "queue-web-1": raise ApiException(reason="Conflict") if name == "queue-web-2": raise ApiException(reason="Not Found") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc delete_detached_pvcs(api, "namespace", "queue-") api.list_namespaced_pod.called_once_with("namespace") api.list_namespaced_persistent_volume_claim.called_once_with("namespace") assert [ (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3) ] == [ ( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list ]
def read_pvc(name: str, namespace: str): return V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=name, namespace=namespace, uid="uid-" + name, resource_version="2" ) )
def __init__(self) -> None: metadata = V1ObjectMeta(name="environmentd", labels={"app": "environmentd"}) label_selector = V1LabelSelector(match_labels={"app": "environmentd"}) value_from = V1EnvVarSource(field_ref=V1ObjectFieldSelector( field_path="metadata.name")) env = [ V1EnvVar(name="MZ_POD_NAME", value_from=value_from), V1EnvVar(name="AWS_REGION", value="minio"), V1EnvVar(name="AWS_ACCESS_KEY_ID", value="minio"), V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value="minio123"), ] ports = [V1ContainerPort(container_port=5432, name="sql")] volume_mounts = [ V1VolumeMount(name="data", mount_path="/data"), ] s3_endpoint = urllib.parse.quote("http://minio-service.default:9000") container = V1Container( name="environmentd", image=self.image("environmentd"), args=[ "--storaged-image=" + self.image("storaged"), "--computed-image=" + self.image("computed"), "--availability-zone=kind-worker", "--availability-zone=kind-worker2", "--availability-zone=kind-worker3", f"--persist-blob-url=s3://minio:minio123@persist/persist?endpoint={s3_endpoint}®ion=minio", "--orchestrator=kubernetes", "--orchestrator-kubernetes-image-pull-policy=never", "--persist-consensus-url=postgres://[email protected]?options=--search_path=consensus", "--adapter-stash-url=postgres://[email protected]?options=--search_path=catalog", "--storage-stash-url=postgres://[email protected]?options=--search_path=storage", "--unsafe-mode", ], env=env, ports=ports, volume_mounts=volume_mounts, ) pod_spec = V1PodSpec(containers=[container]) template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec) claim_templates = [ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="data"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "1Gi"}), ), ) ] self.stateful_set = V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=metadata, spec=V1StatefulSetSpec( service_name="environmentd", replicas=1, pod_management_policy="Parallel", selector=label_selector, template=template_spec, volume_claim_templates=claim_templates, ), )
def test_delete_detached_pvcs(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[ # pvc is attached V1Pod(spec=V1PodSpec( containers=[], volumes=[ V1Volume( name="queue", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="queue-web-3", ), ) ], ), ), # pvc not attached because spec is missing V1Pod(), # pvc not attached because volumes are missing V1Pod(spec=V1PodSpec(containers=[], ), ), # pvc not attached because volume is not persistent V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume( name="queue")]), ), # pvc not attached because pod is unschedulable due to pvc V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ]) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should delete 0-2, 3 is in attached pvcs *(V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=f"queue-web-{i}", uid=f"uid-queue-web-{i}", resource_version=f"{i}", ), spec=V1PersistentVolumeClaimSpec(volume_name=f"pv-{i}"), ) for i in range(4)), # name does not start with claim prefix V1PersistentVolumeClaim(metadata=V1ObjectMeta( name="other-web-0"), ), ]) def delete_pvc(name, namespace, body): if name == "queue-web-1": raise ApiException(reason="Conflict") if name == "queue-web-2": raise ApiException(reason="Not Found") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc pvc_cleanup_delay = timedelta(microseconds=1) delay_complete = datetime.utcnow() - pvc_cleanup_delay cache = { # wrong pv name, should be overwritten "queue-web-0": PvcCacheEntry(pv="wrong", time=delay_complete), # no longer detached, should be removed "queue-web-3": PvcCacheEntry(pv="pv-3", time=delay_complete), } delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") api.delete_namespaced_persistent_volume_claim.assert_not_called() assert {f"queue-web-{i}": f"pv-{i}" for i in range(3)} == {k: v.pv for k, v in cache.items()} api.list_namespaced_pod.reset_mock() api.list_namespaced_persistent_volume_claim.reset_mock() previous_cache = {**cache} delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") assert previous_cache == cache assert [ (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3) ] == [( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list]