def _get_pod_spec(self, container_command: str, pvc: str) -> V1PodSpec:
        config_map_volume_names = []
        config_map_volumes = []
        if self.ca_config_maps:
            for map_number, config_map in enumerate(self.ca_config_maps):
                config_map_name = f"ca-config-map-{map_number}"
                config_map_volumes.append(
                    V1Volume(
                        name=config_map_name,
                        config_map=V1ConfigMapVolumeSource(
                            name=config_map,
                            items=[V1KeyToPath(key="ca_cert", path=config_map)]
                        )
                    )
                )
                config_map_volume_names.append(config_map_name)

        pod_spec = V1PodSpec(
            containers=[
                self._get_container(command=container_command,
                                    config_map_volume_names=config_map_volume_names)
            ],
            volumes=[
                        V1Volume(name=self.data_volume_name,
                                 persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
                                     claim_name=pvc)
                                 )
                    ] + config_map_volumes,
            restart_policy="Never"
        )

        return pod_spec
Beispiel #2
0
 def __init__(self, name, mount, claim_name, read_only=False):
     self.mount = V1VolumeMount(name=name,
                                mount_path=mount,
                                read_only=read_only)
     self.volume = V1Volume(
         name=name,
         persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
             claim_name=claim_name))
Beispiel #3
0
    def _create_pod(self, **kargs):
        # Container
        pod_resource_requests = kargs["resource_requests"]
        pod_resource_limits = kargs["resource_limits"]
        pod_resource_limits = (pod_resource_limits if pod_resource_limits else
                               pod_resource_requests)
        container = client.V1Container(
            name=kargs["pod_name"],
            image=kargs["image_name"],
            command=kargs["command"],
            resources=client.V1ResourceRequirements(
                requests=parse_resource(pod_resource_requests),
                limits=parse_resource(pod_resource_limits),
            ),
            args=kargs["container_args"],
            image_pull_policy=kargs["image_pull_policy"],
            env=kargs["env"],
        )

        # Pod
        spec = client.V1PodSpec(
            containers=[container],
            restart_policy=kargs["restart_policy"],
            priority_class_name=kargs["pod_priority"],
        )

        # Mount data path
        if kargs["volume"]:
            volume_dict = parse_volume(kargs["volume"])
            volume_name = kargs["pod_name"] + "-volume"
            volume = client.V1Volume(
                name=volume_name,
                persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
                    claim_name=volume_dict["claim_name"], read_only=False),
            )
            spec.volumes = [volume]
            container.volume_mounts = [
                client.V1VolumeMount(name=volume_name,
                                     mount_path=volume_dict["mount_path"])
            ]

        pod = client.V1Pod(
            spec=spec,
            metadata=client.V1ObjectMeta(
                name=kargs["pod_name"],
                labels={
                    "app": ELASTICDL_APP_NAME,
                    ELASTICDL_JOB_KEY: kargs["job_name"],
                },
                owner_references=self.create_owner_reference(
                    kargs["owner_pod"]),
                namespace=self.namespace,
            ),
        )
        if self.cluster:
            pod = self.cluster.with_pod(pod)

        return pod
Beispiel #4
0
def _create_flush_job(
    batch_api: BatchV1Api,
    command: List[str],
    env: List[V1EnvVar],
    image: str,
    name: str,
    namespace: str,
    service_account_name: str,
) -> V1Job:
    logger.info(f"creating job: {name}")
    try:
        return batch_api.create_namespaced_job(
            namespace=namespace,
            body=V1Job(
                api_version="batch/v1",
                kind="Job",
                metadata=V1ObjectMeta(name=name, namespace=namespace),
                spec=V1JobSpec(
                    template=V1PodTemplateSpec(
                        spec=V1PodSpec(
                            containers=[
                                V1Container(
                                    image=image,
                                    command=command,
                                    name="flush",
                                    volume_mounts=[
                                        V1VolumeMount(mount_path="/data", name="queue")
                                    ],
                                    env=env,
                                )
                            ],
                            restart_policy="OnFailure",
                            volumes=[
                                V1Volume(
                                    name="queue",
                                    persistent_volume_claim=(
                                        V1PersistentVolumeClaimVolumeSource(
                                            claim_name=name
                                        )
                                    ),
                                )
                            ],
                            service_account_name=service_account_name,
                        )
                    )
                ),
            ),
        )
    except ApiException as e:
        if e.reason == CONFLICT and json.loads(e.body)["reason"] == ALREADY_EXISTS:
            logger.info(f"using existing job: {name}")
            return batch_api.read_namespaced_job(name, namespace)
        raise
Beispiel #5
0
    def start_stateful_container(self, service_name, container_name, spec, labels):
        # Setup PVC
        deployment_name = service_name + '-' + container_name
        mounts, volumes = [], []
        for volume_name, volume_spec in spec.volumes.items():
            mount_name = deployment_name + volume_name

            # Check if the PVC exists, create if not
            self._ensure_pvc(mount_name, volume_spec.storage_class, volume_spec.capacity)

            # Create the volume info
            volumes.append(V1Volume(
                name=mount_name,
                persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(mount_name)
            ))
            mounts.append(V1VolumeMount(mount_path=volume_spec.mount_path, name=mount_name))

        self._create_deployment(service_name, deployment_name, spec.container,
                                30, 1, labels, volumes=volumes, mounts=mounts)
Beispiel #6
0
 def create_cluster(self, spec, cluster_management, namespace_name,
                    volume_claim_name):
     count = int(spec[FRAMEWORK_RESOURCES][FRAMEWORK_DPU_COUNT])
     version = str(spec[FRAMEWORK_VERSION])
     image = "tensorflow/tensorflow:" + version + "-gpu"
     ###
     v1_api = cluster_management.kube_api
     api_response_list = []
     for i in range(count):
         body = kubernetes.client.V1Pod()
         body.api_version = "v1"
         body.kind = "Pod"
         meta = V1ObjectMeta()
         meta.generate_name = "tensorflow-"
         body.metadata = meta
         uuid = str(uuid4())
         container = V1Container(name=uuid, image=image)
         pod_spec = V1PodSpec(containers=[container])
         container_mounts = V1VolumeMount(
             mount_path=GLUSTER_DEFAULT_MOUNT_PATH,
             name=CONTAINER_VOLUME_PREFIX)
         container.volume_mounts = [container_mounts]
         compute_resource = V1ResourceRequirements()
         compute_resource.limits = {"nvidia.com/gpu": 1}
         compute_resource.requests = {"nvidia.com/gpu": 1}
         container.resources = compute_resource
         claim = V1PersistentVolumeClaimVolumeSource(
             claim_name=volume_claim_name)
         volume_claim = V1Volume(name=CONTAINER_VOLUME_PREFIX,
                                 persistent_volume_claim=claim)
         volume_claim.persistent_volume_claim = claim
         pod_spec.volumes = [volume_claim]
         body.spec = pod_spec
         try:
             api_response = v1_api.create_namespaced_pod(
                 namespace_name, body)
         except ApiException as e:
             raise Exception(
                 "Exception when calling CoreV1Api->create_namespaced_pod: %s\n"
                 % e)
         api_response_list.append(api_response)
     return api_response_list
Beispiel #7
0
    def _create_volumes(self, service_name):
        volumes, mounts = [], []

        # Attach the mount that provides the config file
        volumes.extend(self.config_volumes.values())
        mounts.extend(self.config_mounts.values())

        # Attach the mount that provides the update
        volumes.append(V1Volume(
            name='update-directory',
            persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
                claim_name=FILE_UPDATE_VOLUME,
                read_only=True
            ),
        ))

        mounts.append(V1VolumeMount(
            name='update-directory',
            mount_path=CONTAINER_UPDATE_DIRECTORY,
            sub_path=service_name,
            read_only=True,
        ))

        return volumes, mounts
Beispiel #8
0
    def createsvc(self, deploy, port, imagename, namespace, envvar, nameapp,
                  service, pvc, volumename, datadir):
        bservice = client.V1Service()
        smeta = V1ObjectMeta()
        dcmeta = V1ObjectMeta()
        pmt = V1ObjectMeta()
        sspec = client.V1ServiceSpec()
        bdc = openshift.client.V1DeploymentConfig()
        dcspec = openshift.client.V1DeploymentConfigSpec()
        strategy = openshift.client.V1DeploymentStrategy()
        rollingparams = openshift.client.V1RollingDeploymentStrategyParams()
        podtemp = client.V1PodTemplateSpec()
        podspec = client.V1PodSpec()
        container = client.V1Container()

        idname = nameapp + "-" + deploy

        smeta.name = idname  # !!!
        smeta.namespace = namespace
        smeta.labels = {"label": idname, "bundle": service + "-" + nameapp}

        sspec.selector = {"label": idname}
        sspec.ports = []

        for l in range(0, len(port)):
            p = client.V1ServicePort()
            p.name = "{port}-{tcp}".format(**port[l])
            p.protocol = "TCP"
            p.port = port[l]['tcp']
            p.target_port = "{port}-{tcp}".format(**port[l])
            sspec.ports.append(p)
            if port[l]['route'] == 'yes':
                self.createroute(p.target_port, idname, namespace, service,
                                 nameapp, l)

        bservice.api_version = 'v1'
        bservice.kind = 'Service'
        bservice.metadata = smeta
        bservice.spec = sspec
        bservice.api_version = 'v1'

        # DeploymentConfig

        dcmeta.labels = {"label": idname, "bundle": service + "-" + nameapp}
        dcmeta.name = idname
        dcmeta.namespace = namespace

        rollingparams.interval_seconds = 1

        strategy.labels = {"label": idname, "bundle": service + "-" + nameapp}
        strategy.type = 'Rolling'
        strategy.rolling_params = rollingparams

        container.image = imagename
        container.name = idname
        container.env = []
        if pvc:
            vm = V1VolumeMount()
            vm.mount_path = datadir
            vm.name = volumename
            container.volume_mounts = [vm]
        else:
            pass

        for key in envvar:
            v = client.V1EnvVar()
            v.name = key
            v.value = envvar[key]
            container.env.append(v)

        container.ports = []
        for o in range(0, len(port)):
            p = client.V1ContainerPort()
            p.name = ("{port}-{tcp}".format(**port[o]))
            p.protocol = "TCP"
            p.container_port = port[o]['tcp']
            container.ports.append(p)

        pmt.labels = {"label": idname, "bundle": service + "-" + nameapp}
        pmt.name = idname

        podspec.containers = [container]
        if pvc:
            vol = V1Volume()
            pvcname = V1PersistentVolumeClaimVolumeSource()
            volgfs = V1GlusterfsVolumeSource()

            pvcname.claim_name = volumename

            volgfs.endpoints = volumename
            volgfs.path = datadir

            #vol.glusterfs = volgfs
            vol.name = volumename
            vol.persistent_volume_claim = pvcname
            podspec.volumes = [vol]
        else:
            pass

        podtemp.metadata = pmt
        podtemp.spec = podspec

        dcspec.replicas = 1
        dcspec.selector = {"label": idname}
        dcspec.template = podtemp
        dcspec.strategy = strategy

        bdc.api_version = 'v1'
        bdc.spec = dcspec
        bdc.metadata = dcmeta
        bdc.kind = 'DeploymentConfig'

        try:
            self.k1.create_namespaced_service(namespace=namespace,
                                              body=bservice,
                                              pretty='true')
        except ApiException as e:
            print("Exception when calling OapiApi->create_service: %s\n" % e)

        try:
            self.o1.create_namespaced_deployment_config(namespace=namespace,
                                                        body=bdc,
                                                        pretty='true')
        except ApiException as e:
            print("Exception when calling OapiApi->create_dc: %s\n" % e)
Beispiel #9
0
    def apply_pod_profile(self,
                          username,
                          pod,
                          profile,
                          gpu_types,
                          default_mount_path,
                          gpu_mode=None,
                          selected_gpu_type="ALL"):
        api_client = kubernetes.client.ApiClient()

        pod.metadata.labels['jupyterhub.opendatahub.io/user'] = escape(
            username)

        profile_volumes = profile.get('volumes')

        if profile_volumes:
            for volume in profile_volumes:
                volume_name = re.sub('[^a-zA-Z0-9\.]', '-',
                                     volume['name']).lower()
                read_only = volume['persistentVolumeClaim'].get('readOnly')
                pvc = V1PersistentVolumeClaimVolumeSource(
                    volume['persistentVolumeClaim']['claimName'],
                    read_only=read_only)
                mount_path = self.generate_volume_path(volume.get('mountPath'),
                                                       default_mount_path,
                                                       volume_name)
                pod.spec.volumes.append(
                    V1Volume(name=volume_name, persistent_volume_claim=pvc))
                pod.spec.containers[0].volume_mounts.append(
                    V1VolumeMount(name=volume_name, mount_path=mount_path))

        profile_environment = profile.get('env')

        if profile_environment:

            # Kept for backwards compatibility with simplified env var definitions
            if isinstance(profile_environment, dict):
                for k, v in profile['env'].items():
                    update = False
                    for e in pod.spec.containers[0].env:
                        if e.name == k:
                            e.value = v
                            update = True
                            break
                    if not update:
                        pod.spec.containers[0].env.append(V1EnvVar(k, v))

            elif isinstance(profile_environment, list):
                for i in profile_environment:
                    r = type("Response", (), {})
                    r.data = json.dumps(i)
                    env_var = api_client.deserialize(r, V1EnvVar)
                    pod.spec.containers[0].env.append(env_var)

        resource_var = None
        resource_json = type("Response", (), {})
        resource_json.data = json.dumps(profile.get('resources'))
        resource_var = api_client.deserialize(resource_json,
                                              V1ResourceRequirements)

        if resource_var:
            pod.spec.containers[0].resources = resource_var
            mem_limit = resource_var.limits.get('memory', '')
            if mem_limit:
                pod.spec.containers[0].env.append(
                    V1EnvVar(name='MEM_LIMIT',
                             value=self.get_mem_limit(mem_limit)))

        for c in pod.spec.containers:
            update = False
            if type(c) is dict:
                env = c['env']
            else:
                env = c.env
            for e in env:
                if type(e) is dict:
                    if e['name'] == _JUPYTERHUB_USER_NAME_ENV:
                        e['value'] = username
                        update = True
                        break
                else:
                    if e.name == _JUPYTERHUB_USER_NAME_ENV:
                        e.value = username
                        update = True
                        break

            if not update:
                env.append(V1EnvVar(_JUPYTERHUB_USER_NAME_ENV, username))

        self.apply_gpu_config(gpu_mode, profile, gpu_types, pod,
                              selected_gpu_type)

        node_tolerations = profile.get('node_tolerations', [])
        node_affinity = profile.get('node_affinity', {})

        self.apply_pod_schedulers(node_tolerations, node_affinity, pod)

        return pod
    def start_stateful_container(self, service_name: str, container_name: str,
                                 spec, labels: dict[str, str], change_key: str):
        # Setup PVC
        deployment_name = self._dependency_name(service_name, container_name)
        mounts, volumes = [], []
        for volume_name, volume_spec in spec.volumes.items():
            mount_name = f'{deployment_name}-{volume_name}'

            # Check if the PVC exists, create if not
            self._ensure_pvc(mount_name, volume_spec.storage_class, volume_spec.capacity, deployment_name)

            # Create the volume info
            volumes.append(V1Volume(
                name=mount_name,
                persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(mount_name)
            ))
            mounts.append(V1VolumeMount(mount_path=volume_spec.mount_path, name=mount_name))

        # Read the key being used for the deployment instance or generate a new one
        instance_key = uuid.uuid4().hex
        try:
            old_deployment = self.apps_api.read_namespaced_deployment(deployment_name, self.namespace)
            for container in old_deployment.spec.template.spec.containers:
                for env in container.env:
                    if env.name == 'AL_INSTANCE_KEY':
                        instance_key = env.value
                        break
        except ApiException as error:
            if error.status != 404:
                raise

        # Setup the deployment itself
        labels['container'] = container_name
        spec.container.environment.append({'name': 'AL_INSTANCE_KEY', 'value': instance_key})
        self._create_deployment(service_name, deployment_name, spec.container,
                                30, 1, labels, volumes=volumes, mounts=mounts,
                                core_mounts=spec.run_as_core, change_key=change_key)

        # Setup a service to direct to the deployment
        try:
            service = self.api.read_namespaced_service(deployment_name, self.namespace)
            service.metadata.labels = labels
            service.spec.selector = labels
            service.spec.ports = [V1ServicePort(port=int(_p)) for _p in spec.container.ports]
            self.api.patch_namespaced_service(deployment_name, self.namespace, service)
        except ApiException as error:
            if error.status != 404:
                raise
            service = V1Service(
                metadata=V1ObjectMeta(name=deployment_name, labels=labels),
                spec=V1ServiceSpec(
                    cluster_ip='None',
                    selector=labels,
                    ports=[V1ServicePort(port=int(_p)) for _p in spec.container.ports]
                )
            )
            self.api.create_namespaced_service(self.namespace, service)

        # Add entries to the environment variable list to point to this container
        self._service_limited_env[service_name][f'{container_name}_host'] = deployment_name
        self._service_limited_env[service_name][f'{container_name}_key'] = instance_key
        if spec.container.ports:
            self._service_limited_env[service_name][f'{container_name}_port'] = spec.container.ports[0]
Beispiel #11
0
    def launch(self, name, docker_config: DockerConfig, mounts, env, blocking: bool = True):
        name = (self.prefix + 'update-' + name.lower()).replace('_', '-')

        # If we have been given a username or password for the registry, we have to
        # update it, if we haven't been, make sure its been cleaned up in the system
        # so we don't leave passwords lying around
        pull_secret_name = f'{name}-job-pull-secret'
        use_pull_secret = False
        try:
            # Check if there is already a username/password defined for this job
            current_pull_secret = self.api.read_namespaced_secret(pull_secret_name, self.namespace,
                                                                  _request_timeout=API_TIMEOUT)
        except ApiException as error:
            if error.status != 404:
                raise
            current_pull_secret = None

        if docker_config.registry_username or docker_config.registry_password:
            use_pull_secret = True
            # Build the secret we want to make
            new_pull_secret = V1Secret(
                metadata=V1ObjectMeta(name=pull_secret_name, namespace=self.namespace),
                type='kubernetes.io/dockerconfigjson',
                string_data={
                    '.dockerconfigjson': create_docker_auth_config(
                        image=docker_config.image,
                        username=docker_config.registry_username,
                        password=docker_config.registry_password,
                    )
                }
            )

            # Send it to the server
            if current_pull_secret:
                self.api.replace_namespaced_secret(pull_secret_name, namespace=self.namespace,
                                                   body=new_pull_secret, _request_timeout=API_TIMEOUT)
            else:
                self.api.create_namespaced_secret(namespace=self.namespace, body=new_pull_secret,
                                                  _request_timeout=API_TIMEOUT)
        elif current_pull_secret:
            # If there is a password set in kubernetes, but not in our configuration clear it out
            self.api.delete_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT)

        try:
            self.batch_api.delete_namespaced_job(name=name, namespace=self.namespace,
                                                 propagation_policy='Background', _request_timeout=API_TIMEOUT)
            while True:
                self.batch_api.read_namespaced_job(namespace=self.namespace, name=name,
                                                   _request_timeout=API_TIMEOUT)
                time.sleep(1)
        except ApiException:
            pass

        volumes = []
        volume_mounts = []

        for index, mnt in enumerate(mounts):
            volumes.append(V1Volume(
                name=f'mount-{index}',
                persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
                    claim_name=mnt['volume'],
                    read_only=False
                ),
            ))

            volume_mounts.append(V1VolumeMount(
                name=f'mount-{index}',
                mount_path=mnt['dest_path'],
                sub_path=mnt['source_path'],
                read_only=False,
            ))

        if CONFIGURATION_CONFIGMAP:
            volumes.append(V1Volume(
                name='mount-configuration',
                config_map=V1ConfigMapVolumeSource(
                    name=CONFIGURATION_CONFIGMAP
                ),
            ))

            volume_mounts.append(V1VolumeMount(
                name='mount-configuration',
                mount_path='/etc/assemblyline/config.yml',
                sub_path="config",
                read_only=True,
            ))

        section = 'service'
        labels = {
            'app': 'assemblyline',
            'section': section,
            'privilege': 'core',
            'component': 'update-script',
        }
        labels.update(self.extra_labels)

        metadata = V1ObjectMeta(
            name=name,
            labels=labels
        )

        environment_variables = [V1EnvVar(name=_e.name, value=_e.value) for _e in docker_config.environment]
        environment_variables.extend([V1EnvVar(name=k, value=v) for k, v in env.items()])
        environment_variables.extend([V1EnvVar(name=k, value=os.environ[k])
                                      for k in INHERITED_VARIABLES if k in os.environ])
        environment_variables.append(V1EnvVar(name="LOG_LEVEL", value=self.log_level))

        cores = docker_config.cpu_cores
        memory = docker_config.ram_mb
        memory_min = min(docker_config.ram_mb_min, memory)

        container = V1Container(
            name=name,
            image=docker_config.image,
            command=docker_config.command,
            env=environment_variables,
            image_pull_policy='Always',
            volume_mounts=volume_mounts,
            resources=V1ResourceRequirements(
                limits={'cpu': cores, 'memory': f'{memory}Mi'},
                requests={'cpu': cores / 4, 'memory': f'{memory_min}Mi'},
            )
        )

        pod = V1PodSpec(
            volumes=volumes,
            restart_policy='Never',
            containers=[container],
            priority_class_name=self.priority_class,
        )

        if use_pull_secret:
            pod.image_pull_secrets = [V1LocalObjectReference(name=pull_secret_name)]

        job = V1Job(
            metadata=metadata,
            spec=V1JobSpec(
                backoff_limit=1,
                completions=1,
                template=V1PodTemplateSpec(
                    metadata=metadata,
                    spec=pod
                )
            )
        )

        status = self.batch_api.create_namespaced_job(namespace=self.namespace, body=job,
                                                      _request_timeout=API_TIMEOUT).status

        if blocking:
            try:
                while not (status.failed or status.succeeded):
                    time.sleep(3)
                    status = self.batch_api.read_namespaced_job(namespace=self.namespace, name=name,
                                                                _request_timeout=API_TIMEOUT).status

                self.batch_api.delete_namespaced_job(name=name, namespace=self.namespace,
                                                     propagation_policy='Background', _request_timeout=API_TIMEOUT)
            except ApiException as error:
                if error.status != 404:
                    raise
def test_delete_detached_pvcs(api: MagicMock):
    api.list_namespaced_pod.return_value = V1PodList(items=[
        # pvc is attached
        V1Pod(spec=V1PodSpec(
            containers=[],
            volumes=[
                V1Volume(
                    name="queue",
                    persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
                        claim_name="queue-web-3", ),
                )
            ],
        ), ),
        # pvc not attached because spec is missing
        V1Pod(),
        # pvc not attached because volumes are missing
        V1Pod(spec=V1PodSpec(containers=[], ), ),
        # pvc not attached because volume is not persistent
        V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume(
            name="queue")]), ),
        # pvc not attached because pod is unschedulable due to pvc
        V1Pod(
            metadata=V1ObjectMeta(
                name="web-0",
                namespace="default",
                uid="uid-web-0",
                resource_version="1",
                owner_references=[V1ObjectReference(kind="StatefulSet")],
            ),
            status=V1PodStatus(
                phase="Pending",
                conditions=[
                    V1PodCondition(
                        status="Not Ready",
                        type="False",
                        reason="Unschedulable",
                        message='persistentvolumeclaim "queue-web-0" not found',
                    )
                ],
            ),
        ),
    ])
    api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList(
        items=[
            # should delete 0-2, 3 is in attached pvcs
            *(V1PersistentVolumeClaim(
                metadata=V1ObjectMeta(
                    name=f"queue-web-{i}",
                    uid=f"uid-queue-web-{i}",
                    resource_version=f"{i}",
                ),
                spec=V1PersistentVolumeClaimSpec(volume_name=f"pv-{i}"),
            ) for i in range(4)),
            # name does not start with claim prefix
            V1PersistentVolumeClaim(metadata=V1ObjectMeta(
                name="other-web-0"), ),
        ])

    def delete_pvc(name, namespace, body):
        if name == "queue-web-1":
            raise ApiException(reason="Conflict")
        if name == "queue-web-2":
            raise ApiException(reason="Not Found")

    api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc
    pvc_cleanup_delay = timedelta(microseconds=1)
    delay_complete = datetime.utcnow() - pvc_cleanup_delay
    cache = {
        # wrong pv name, should be overwritten
        "queue-web-0": PvcCacheEntry(pv="wrong", time=delay_complete),
        # no longer detached, should be removed
        "queue-web-3": PvcCacheEntry(pv="pv-3", time=delay_complete),
    }

    delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache)

    api.list_namespaced_pod.assert_called_once_with("namespace")
    api.list_namespaced_persistent_volume_claim.assert_called_once_with(
        "namespace")
    api.delete_namespaced_persistent_volume_claim.assert_not_called()
    assert {f"queue-web-{i}": f"pv-{i}"
            for i in range(3)} == {k: v.pv
                                   for k, v in cache.items()}
    api.list_namespaced_pod.reset_mock()
    api.list_namespaced_persistent_volume_claim.reset_mock()
    previous_cache = {**cache}

    delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache)

    api.list_namespaced_pod.assert_called_once_with("namespace")
    api.list_namespaced_persistent_volume_claim.assert_called_once_with(
        "namespace")
    assert previous_cache == cache
    assert [
        (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}")
        for i in range(3)
    ] == [(
        call.kwargs["name"],
        call.kwargs["namespace"],
        call.kwargs["body"].preconditions.uid,
        call.kwargs["body"].preconditions.resource_version,
    ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list]
Beispiel #13
0
def test_delete_detached_pvcs(api: MagicMock):
    api.list_namespaced_pod.return_value = V1PodList(
        items=[
            # pvc is attached
            V1Pod(
                spec=V1PodSpec(
                    containers=[],
                    volumes=[
                        V1Volume(
                            name="queue",
                            persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
                                claim_name="queue-web-3",
                            ),
                        )
                    ],
                ),
            ),
            # pvc no attached because spec is missing
            V1Pod(),
            # pvc no attached because volumes are missing
            V1Pod(spec=V1PodSpec(containers=[],),),
            # pvc no attached because volume is not persistent
            V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume(name="queue")]),),
            # pvc not attached because pod is unschedulable due to pvc
            V1Pod(
                metadata=V1ObjectMeta(
                    name="web-0",
                    namespace="default",
                    uid="uid-web-0",
                    resource_version="1",
                    owner_references=[V1ObjectReference(kind="StatefulSet")],
                ),
                status=V1PodStatus(
                    phase="Pending",
                    conditions=[
                        V1PodCondition(
                            status="Not Ready",
                            type="False",
                            reason="Unschedulable",
                            message='persistentvolumeclaim "queue-web-0" not found',
                        )
                    ],
                ),
            ),
        ]
    )
    api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList(
        items=[
            # should delete 0-2, 3 is in attached pvcs
            *(
                V1PersistentVolumeClaim(
                    metadata=V1ObjectMeta(
                        name=f"queue-web-{i}",
                        uid=f"uid-queue-web-{i}",
                        resource_version=f"{i}",
                    ),
                )
                for i in range(4)
            ),
            # name does not start with claim prefix
            V1PersistentVolumeClaim(metadata=V1ObjectMeta(name="other-web-0"),),
        ]
    )

    def delete_pvc(name, namespace, body):
        if name == "queue-web-1":
            raise ApiException(reason="Conflict")
        if name == "queue-web-2":
            raise ApiException(reason="Not Found")

    api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc

    delete_detached_pvcs(api, "namespace", "queue-")

    api.list_namespaced_pod.called_once_with("namespace")
    api.list_namespaced_persistent_volume_claim.called_once_with("namespace")
    assert [
        (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3)
    ] == [
        (
            call.kwargs["name"],
            call.kwargs["namespace"],
            call.kwargs["body"].preconditions.uid,
            call.kwargs["body"].preconditions.resource_version,
        )
        for call in api.delete_namespaced_persistent_volume_claim.call_args_list
    ]