def test_calculate_resource_utilization_for_kube_nodes(): fake_nodes = [ V1Node( metadata=V1ObjectMeta(name="fake_node1"), status=V1NodeStatus( allocatable={ "cpu": "500", "ephemeral-storage": "200Mi", "memory": "750Mi", }, ), ) ] fake_pods_by_node = { "fake_node1": [ V1Pod( metadata=V1ObjectMeta(name="pod1"), status=V1PodStatus(phase="Running"), spec=V1PodSpec( containers=[ V1Container( name="container1", resources=V1ResourceRequirements( requests={ "cpu": "20", "ephemeral-storage": "20Mi", "memory": "20Mi", } ), ) ] ), ) ] } free = metastatus_lib.calculate_resource_utilization_for_kube_nodes( nodes=fake_nodes, pods_by_node=fake_pods_by_node )["free"] assert free.cpus == 480 assert free.mem == 730 assert free.disk == 180
def test_init_containers_running(): container_state = V1ContainerState(running=V1ContainerStateRunning()) status = V1PodStatus( phase="Pending", init_container_statuses=[ V1ContainerStatus( image="benchmarkai/data-puller", name="data-puller", image_id="", ready=True, restart_count=0, state=container_state, ) ], ) pod = V1Pod(metadata=V1ObjectMeta(name="pod-name"), status=status) inferrer = SingleNodeStrategyKubernetesStatusInferrer( V1JobStatus(active=1), pods=[pod]) assert inferrer.status() == BenchmarkJobStatus.RUNNING_AT_INIT_CONTAINERS
def test_assert_kube_pods_running(): with mock.patch("paasta_tools.metrics.metastatus_lib.get_all_pods", autospec=True) as mock_get_all_pods: client = Mock() mock_get_all_pods.return_value = [ V1Pod(status=V1PodStatus(phase="Running")), V1Pod(status=V1PodStatus(phase="Pending")), V1Pod(status=V1PodStatus(phase="Pending")), V1Pod(status=V1PodStatus(phase="Failed")), V1Pod(status=V1PodStatus(phase="Failed")), V1Pod(status=V1PodStatus(phase="Failed")), ] output, ok = metastatus_lib.assert_kube_pods_running(client) assert re.match("Pods: running: 1 pending: 2 failed: 3", output) assert ok
def test_delete_unschedulable_pods(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[ V1Pod( metadata=V1ObjectMeta( name=f"web-{i}", namespace="default", uid=f"uid-web-{i}", resource_version=f"{i}", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", # 0-2 should be deleted, 3 should have the wrong message message="" if i == 3 else f'persistentvolumeclaim "queue-web-{i}" not found', ) ], ), ) for i in range(4) ]) def delete_pod(name, namespace, body): if name == "web-1": raise ApiException(reason="Conflict") if name == "web-2": raise ApiException(reason="Not Found") api.delete_namespaced_pod.side_effect = delete_pod delete_unschedulable_pods(api, "namespace") assert [(f"web-{i}", "namespace", f"uid-web-{i}", f"{i}") for i in range(3)] == [( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_pod.call_args_list]
def test_get_by_id(mocker: MockFixture, tensorboard_manager_mocked: TensorboardManager, fake_tensorboard_pod_phase: str, expected_tensorboard_status: TensorboardStatus): fake_tensorboard_id = 'cda7ad77-c499-4ef7-8f73-be7ce254be6a' fake_tensorboard_path = '/tb/' + fake_tensorboard_id fake_deployment = V1Deployment() fake_ingress = V1beta1Ingress(spec=V1beta1IngressSpec(rules=[ V1beta1IngressRule(http=V1beta1HTTPIngressRuleValue(paths=[ V1beta1HTTPIngressPath(backend=V1beta1IngressBackend( service_name='fake-service', service_port=80), path=fake_tensorboard_path) ])) ])) fake_pod = V1Pod(status=V1PodStatus( phase=fake_tensorboard_pod_phase, container_statuses=[ V1ContainerStatus( ready=True, image='', image_id='', name='', restart_count=0), V1ContainerStatus( ready=True, image='', image_id='', name='', restart_count=0) ])) mocker.patch.object(tensorboard_manager_mocked.client, 'get_deployment').return_value = fake_deployment mocker.patch.object(tensorboard_manager_mocked.client, 'get_ingress').return_value = fake_ingress mocker.patch.object(tensorboard_manager_mocked.client, 'get_pod').return_value = fake_pod mocker.patch( 'tensorboard.tensorboard.TensorboardManager._check_tensorboard_nginx_reachable' ).return_value = True tensorboard = tensorboard_manager_mocked.get_by_id(id=fake_tensorboard_id) assert tensorboard.id == fake_tensorboard_id assert tensorboard.status == expected_tensorboard_status assert tensorboard.url == fake_tensorboard_path
def pod_with_required_affinity(): return V1Pod( status=V1PodStatus(phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ]), spec=V1PodSpec( containers=[ V1Container( name='container', resources=V1ResourceRequirements(requests={'cpu': '1.5'})) ], affinity=V1Affinity(node_affinity=V1NodeAffinity( required_during_scheduling_ignored_during_execution= V1NodeSelector(node_selector_terms=[ V1NodeSelectorTerm(match_expressions=[ V1NodeSelectorRequirement(key='clusterman.com/pool', operator='In', values=['bar']) ]) ])))))
def test_launch_pod_fail(launch_mocks: LaunchPredictMocks): failed_pod_mock = MagicMock(spec=V1Pod) failed_pod_mock.status = V1PodStatus(container_statuses=[ V1ContainerStatus(ready=True, image="image", image_id="image_id", name="name", restart_count=0) ], phase='Failed') launch_mocks.get_namespaced_pods.return_value = [failed_pod_mock] model_location = '/fake/model/location' runner = CliRunner() result = runner.invoke(launch.launch, ['--model-location', model_location]) assert launch_mocks.generate_name_mock.call_count == 1 assert launch_mocks.start_inference_instance_mock.call_count == 1 assert launch_mocks.get_namespace_mock.call_count == 1 assert launch_mocks.get_inference_instance_url_mock.call_count == 1 assert launch_mocks.get_authorization_header_mock.call_count == 1 assert launch_mocks.get_namespaced_pods.call_count == 1 assert result.exit_code == 1
def test_get_by_runs(mocker: MockFixture, tensorboard_manager_mocked: TensorboardManager): fake_tensorboard_id = '5c0b46de-4017-4062-9ac8-94698cc0c513' fake_tensorboard_path = '/tb/' + fake_tensorboard_id + '/' fake_tensorboard_pod_phase = 'RUNNING' expected_tensorboard_status = TensorboardStatus.RUNNING runs = [ Run(name='run-name-1', owner='sigfrid'), Run(name='run-name-2', owner='schreck'), Run(name='run-name-3', owner='jacek') ] k8s_tensorboard = K8STensorboardInstance.from_runs(id=fake_tensorboard_id, runs=runs) # mocking manually this method is done because we want to mock Kubernetes behaviour to check, if our code # requests Kubernetes API server for deployment with proper label_selector. If label_selector matches # created deployment, this mocked method will return it. Our code should pass the same label_selector regardless of # order of run_names in get_by_run_names parameter. That's why we create below get_run_names with other order to # check if returned deployment would still be the same. # noinspection PyUnusedLocal def _fake_list_deployments(namespace: str, label_selector: str): deployments = [k8s_tensorboard.deployment] splitted_label_selector = label_selector.split('=') label_selector_key = splitted_label_selector[0] label_selector_value = splitted_label_selector[1] if deployments[0].metadata.labels[ label_selector_key] == label_selector_value: return deployments fake_pod = V1Pod(status=V1PodStatus( phase=fake_tensorboard_pod_phase, container_statuses=[ V1ContainerStatus( ready=True, image='', image_id='', name='', restart_count=0), V1ContainerStatus( ready=True, image='', image_id='', name='', restart_count=0) ])) mocker.patch.object(tensorboard_manager_mocked.client, 'list_deployments', new=_fake_list_deployments) mocker.patch.object( tensorboard_manager_mocked.client, 'list_ingresses').return_value = [k8s_tensorboard.ingress] mocker.patch.object(tensorboard_manager_mocked.client, 'get_pod').return_value = fake_pod mocker.patch( 'tensorboard.tensorboard.TensorboardManager._check_tensorboard_nginx_reachable' ).return_value = True get_runs = [ Run(name='run-name-2', owner='schreck'), Run(name='run-name-3', owner='jacek'), Run(name='run-name-1', owner='sigfrid') ] tensorboard = tensorboard_manager_mocked.get_by_runs(get_runs) assert tensorboard.id == fake_tensorboard_id assert tensorboard.status == expected_tensorboard_status assert tensorboard.url == fake_tensorboard_path
pod_count=1, pod_selector={}) ] TEST_EXPERIMENT = Experiment(name='test-experiment', template_name='template-name', template_namespace='namespace', template_version='0.1.0') mocked_test_pod = MagicMock(spec=V1Pod) mocked_test_pod.metadata.name = "test" mocked_test_pod.metadata.uid = "uid" TEST_PODS = [mocked_test_pod] pending_pod = MagicMock(spec=V1Pod) pending_pod.status = V1PodStatus(phase=PodStatus.PENDING.value) pending_pod.metadata.name = "test" pending_pod.metadata.uid = "uid" PENDING_POD = [pending_pod] TOP_USERS = [ResourceUsage(user_name="user_name", cpu_usage=2, mem_usage=1000)] event = MagicMock(spec=V1Event) event.message = "insufficient memory" event.reason = "insufficient memory" event.involved_object = V1ObjectReference(name="test-experiment") event.metadata = V1ObjectMeta(name="test-experiment") EVENTS = [event] class ViewMocks:
# See the License for the specific language governing permissions and # limitations under the License. # from click.testing import CliRunner import pytest from unittest.mock import MagicMock from kubernetes.client import V1Pod, V1PodStatus, V1ContainerStatus from commands.predict import launch mocked_test_pod = MagicMock(spec=V1Pod) mocked_test_pod.status = V1PodStatus(container_statuses=[ V1ContainerStatus(ready=True, image="image", image_id="image_id", name="name", restart_count=0) ], phase='Running') TEST_PODS = [mocked_test_pod] class LaunchPredictMocks: def __init__(self, mocker): self.generate_name_mock = mocker.patch( 'commands.predict.launch.generate_name') self.start_inference_instance_mock = mocker.patch( 'commands.predict.launch.start_inference_instance') self.get_inference_instance_url_mock = mocker.patch( 'commands.predict.launch.get_inference_instance_url') self.get_authorization_header_mock = mocker.patch(
def test_delete_detached_pvcs(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[ # pvc is attached V1Pod(spec=V1PodSpec( containers=[], volumes=[ V1Volume( name="queue", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="queue-web-3", ), ) ], ), ), # pvc not attached because spec is missing V1Pod(), # pvc not attached because volumes are missing V1Pod(spec=V1PodSpec(containers=[], ), ), # pvc not attached because volume is not persistent V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume( name="queue")]), ), # pvc not attached because pod is unschedulable due to pvc V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ]) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should delete 0-2, 3 is in attached pvcs *(V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=f"queue-web-{i}", uid=f"uid-queue-web-{i}", resource_version=f"{i}", ), spec=V1PersistentVolumeClaimSpec(volume_name=f"pv-{i}"), ) for i in range(4)), # name does not start with claim prefix V1PersistentVolumeClaim(metadata=V1ObjectMeta( name="other-web-0"), ), ]) def delete_pvc(name, namespace, body): if name == "queue-web-1": raise ApiException(reason="Conflict") if name == "queue-web-2": raise ApiException(reason="Not Found") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc pvc_cleanup_delay = timedelta(microseconds=1) delay_complete = datetime.utcnow() - pvc_cleanup_delay cache = { # wrong pv name, should be overwritten "queue-web-0": PvcCacheEntry(pv="wrong", time=delay_complete), # no longer detached, should be removed "queue-web-3": PvcCacheEntry(pv="pv-3", time=delay_complete), } delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") api.delete_namespaced_persistent_volume_claim.assert_not_called() assert {f"queue-web-{i}": f"pv-{i}" for i in range(3)} == {k: v.pv for k, v in cache.items()} api.list_namespaced_pod.reset_mock() api.list_namespaced_persistent_volume_claim.reset_mock() previous_cache = {**cache} delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") assert previous_cache == cache assert [ (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3) ] == [( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list]
def test_pod_in_running_phase(): pod = V1Pod(status=V1PodStatus(phase="Running")) inferrer = SingleNodeStrategyKubernetesStatusInferrer( V1JobStatus(active=1), pods=[pod]) assert inferrer.status() == BenchmarkJobStatus.RUNNING_AT_MAIN_CONTAINERS
def create_k8s_autoscaler(context, prevent_scale_down_after_capacity_loss=False): behave.use_fixture(autoscaler_patches, context) context.mock_cluster_connector.__class__ = KubernetesClusterConnector context.mock_cluster_connector.get_cluster_allocated_resources.return_value = ClustermanResources( cpus=context.allocated_cpus, ) context.mock_cluster_connector._pending_pods = [] if float(context.pending_cpus) > 0: context.mock_cluster_connector.get_unschedulable_pods = \ lambda: KubernetesClusterConnector.get_unschedulable_pods(context.mock_cluster_connector) context.mock_cluster_connector._get_pod_unschedulable_reason.side_effect = lambda pod: ( PodUnschedulableReason.InsufficientResources if pod.metadata.name == 'pod1' else PodUnschedulableReason.Unknown) context.mock_cluster_connector._pending_pods = [ V1Pod( metadata=V1ObjectMeta(name='pod1'), status=V1PodStatus( phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ], ), spec=V1PodSpec(containers=[ V1Container(name='container1', resources=V1ResourceRequirements( requests={'cpu': context.pending_cpus})), ]), ), V1Pod( metadata=V1ObjectMeta(name='pod2'), status=V1PodStatus( phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ], ), spec=V1PodSpec(containers=[ V1Container(name='container1', resources=V1ResourceRequirements( requests={'cpu': context.pending_cpus})), ]), ), ] context.autoscaler = Autoscaler( cluster='kube-test', pool='bar', apps=['bar'], scheduler='kubernetes', metrics_client=mock.Mock(), monitoring_enabled=False, ) if prevent_scale_down_after_capacity_loss: context.autoscaler.autoscaling_config = AutoscalingConfig( excluded_resources=[], setpoint=0.7, target_capacity_margin=0.1, prevent_scale_down_after_capacity_loss=True, instance_loss_threshold=0)
def stub_read_namespaced_pod(**args): v1_pod = V1Pod( **{ 'api_version': 'v1', 'kind': 'Pod', 'metadata': V1ObjectMeta( **{ 'annotations': { 'k8s.v1.cni.cncf.io/network-status': '[{\n' ' ' '"name": ' '"",\n' ' ' '"interface": ' '"eth0",\n' ' "ips": ' '[\n' ' ' '"10.244.0.205"\n' ' ],\n' ' "mac": ' '"8a:b9:aa:99:6e:91",\n' ' ' '"default": ' 'true,\n' ' "dns": ' '{}\n' '},{\n' ' ' '"name": ' '"default/fgt-91e0e29-3b9b-46de-9015-863cb2283795-n6-cmm-vl",\n' ' ' '"interface": ' '"net1",\n' ' "ips": ' '[\n' ' ' '"192.168.10.17"\n' ' ],\n' ' "mac": ' '"52:54:00:11:2a:27",\n' ' "dns": ' '{}\n' '}]', 'k8s.v1.cni.cncf.io/networks': '[{"name": ' '"fgt-91e0e29-3b9b-46de-9015-863cb2283795-n6-cmm-vl", ' '"default-route": ' '["0.0.0.0"], ' '"ips": ' '["192.168.10.17"], ' '"mac": ' '"52:54:00:11:2a:29"}]', 'k8s.v1.cni.cncf.io/networks-status': '[{\n' ' ' '"name": ' '"",\n' ' ' '"interface": ' '"eth0",\n' ' ' '"ips": [\n' ' ' '"10.244.0.205"\n' ' ],\n' ' ' '"mac": ' '"8a:b9:aa:99:6e:91",\n' ' ' '"default": ' 'true,\n' ' ' '"dns": ' '{}\n' '},{\n' ' ' '"name": ' '"default/fgt-91e0e29-3b9b-46de-9015-863cb2283795-n6-cmm-vl",\n' ' ' '"interface": ' '"net1",\n' ' ' '"ips": [\n' ' ' '"192.168.10.17"\n' ' ],\n' ' ' '"mac": ' '"52:54:00:11:2a:27",\n' ' ' '"dns": ' '{}\n' '}]' }, 'cluster_name': None, 'creation_timestamp': datetime.datetime(2020, 12, 29, 12, 28, 36, tzinfo=tzutc()), 'deletion_grace_period_seconds': None, 'deletion_timestamp': None, 'finalizers': None, 'generate_name': None, 'generation': None, 'labels': None, 'managed_fields': [{ 'api_version': 'v1', 'fields_type': 'FieldsV1', 'fields_v1': { 'f:metadata': { 'f:annotations': { '.': {}, 'f:k8s.v1.cni.cncf.io/networks': {} } }, 'f:spec': { 'f:containers': { 'k:{"name":"fgt-91e0e29-3b9b-46de-9015-863cb2283795-0-darlvnf-1"}': { '.': {}, 'f:command': {}, 'f:image': {}, 'f:imagePullPolicy': {}, 'f:name': {}, 'f:resources': {}, 'f:terminationMessagePath': {}, 'f:terminationMessagePolicy': {} } }, 'f:dnsPolicy': {}, 'f:enableServiceLinks': {}, 'f:restartPolicy': {}, 'f:schedulerName': {}, 'f:securityContext': {}, 'f:terminationGracePeriodSeconds': {} } }, 'manager': 'OpenAPI-Generator', 'operation': 'Update', 'time': datetime.datetime( 2020, 12, 29, 12, 28, 36, tzinfo=tzutc()) }, { 'api_version': 'v1', 'fields_type': 'FieldsV1', 'fields_v1': { 'f:metadata': { 'f:annotations': { 'f:k8s.v1.cni.cncf.io/network-status': {}, 'f:k8s.v1.cni.cncf.io/networks-status': {} } } }, 'manager': 'multus', 'operation': 'Update', 'time': datetime.datetime( 2020, 12, 29, 12, 28, 38, tzinfo=tzutc()) }, { 'api_version': 'v1', 'fields_type': 'FieldsV1', 'fields_v1': { 'f:status': { 'f:conditions': { 'k:{"type":"ContainersReady"}': { '.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {} }, 'k:{"type":"Initialized"}': { '.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {} }, 'k:{"type":"Ready"}': { '.': {}, 'f:lastProbeTime': {}, 'f:lastTransitionTime': {}, 'f:status': {}, 'f:type': {} } }, 'f:containerStatuses': {}, 'f:hostIP': {}, 'f:phase': {}, 'f:podIP': {}, 'f:podIPs': { '.': {}, 'k:{"ip":"10.244.0.205"}': { '.': {}, 'f:ip': {} } }, 'f:startTime': {} } }, 'manager': 'kubelet', 'operation': 'Update', 'time': datetime.datetime( 2020, 12, 29, 12, 28, 40, tzinfo=tzutc()) }], 'name': 'fgt-91e0e29-3b9b-46de-9015-863cb2283795-0-darlvnf-1', 'namespace': 'default', 'owner_references': None, 'resource_version': '2453414', 'self_link': '/api/v1/namespaces/default/pods/fgt-91e0e29-3b9b-46de-9015-863cb2283795-0-darlvnf-1', 'uid': 'e862a80b-fc42-42b5-969a-33e397e5f816' }), 'spec': { 'active_deadline_seconds': None, 'affinity': None, 'automount_service_account_token': None, 'containers': [{ 'args': None, 'command': [ '/bin/ash', '-c', 'trap : TERM INT; sleep infinity & wait' ], 'env': None, 'env_from': None, 'image': 'jabbo16/iperf-host:1.0.0', 'image_pull_policy': 'IfNotPresent', 'lifecycle': None, 'liveness_probe': None, 'name': 'fgt-91e0e29-3b9b-46de-9015-863cb2283795-0-darlvnf-1', 'ports': None, 'readiness_probe': None, 'resources': { 'limits': None, 'requests': None }, 'security_context': None, 'startup_probe': None, 'stdin': None, 'stdin_once': None, 'termination_message_path': '/dev/termination-log', 'termination_message_policy': 'File', 'tty': None, 'volume_devices': None, 'volume_mounts': [{ 'mount_path': '/var/run/secrets/kubernetes.io/serviceaccount', 'mount_propagation': None, 'name': 'default-token-59pwl', 'read_only': True, 'sub_path': None, 'sub_path_expr': None }], 'working_dir': None }], 'dns_config': None, 'dns_policy': 'ClusterFirst', 'enable_service_links': True, 'ephemeral_containers': None, 'host_aliases': None, 'host_ipc': None, 'host_network': None, 'host_pid': None, 'hostname': None, 'image_pull_secrets': None, 'init_containers': None, 'node_name': 'master-node', 'node_selector': None, 'overhead': None, 'preemption_policy': 'PreemptLowerPriority', 'priority': 0, 'priority_class_name': None, 'readiness_gates': None, 'restart_policy': 'Always', 'runtime_class_name': None, 'scheduler_name': 'default-scheduler', 'security_context': { 'fs_group': None, 'run_as_group': None, 'run_as_non_root': None, 'run_as_user': None, 'se_linux_options': None, 'supplemental_groups': None, 'sysctls': None, 'windows_options': None }, 'service_account': 'default', 'service_account_name': 'default', 'share_process_namespace': None, 'subdomain': None, 'termination_grace_period_seconds': 30, 'tolerations': [{ 'effect': 'NoExecute', 'key': 'node.kubernetes.io/not-ready', 'operator': 'Exists', 'toleration_seconds': 300, 'value': None }, { 'effect': 'NoExecute', 'key': 'node.kubernetes.io/unreachable', 'operator': 'Exists', 'toleration_seconds': 300, 'value': None }], 'topology_spread_constraints': None, 'volumes': [{ 'aws_elastic_block_store': None, 'azure_disk': None, 'azure_file': None, 'cephfs': None, 'cinder': None, 'config_map': None, 'csi': None, 'downward_api': None, 'empty_dir': None, 'fc': None, 'flex_volume': None, 'flocker': None, 'gce_persistent_disk': None, 'git_repo': None, 'glusterfs': None, 'host_path': None, 'iscsi': None, 'name': 'default-token-59pwl', 'nfs': None, 'persistent_volume_claim': None, 'photon_persistent_disk': None, 'portworx_volume': None, 'projected': None, 'quobyte': None, 'rbd': None, 'scale_io': None, 'secret': { 'default_mode': 420, 'items': None, 'optional': None, 'secret_name': 'default-token-59pwl' }, 'storageos': None, 'vsphere_volume': None }] }, 'status': V1PodStatus( **{ 'conditions': [{ 'last_probe_time': None, 'last_transition_time': datetime.datetime( 2020, 12, 29, 12, 28, 36, tzinfo=tzutc()), 'message': None, 'reason': None, 'status': 'True', 'type': 'Initialized' }, { 'last_probe_time': None, 'last_transition_time': datetime.datetime( 2020, 12, 29, 12, 28, 40, tzinfo=tzutc()), 'message': None, 'reason': None, 'status': 'True', 'type': 'Ready' }, { 'last_probe_time': None, 'last_transition_time': datetime.datetime( 2020, 12, 29, 12, 28, 40, tzinfo=tzutc()), 'message': None, 'reason': None, 'status': 'True', 'type': 'ContainersReady' }, { 'last_probe_time': None, 'last_transition_time': datetime.datetime( 2020, 12, 29, 12, 28, 36, tzinfo=tzutc()), 'message': None, 'reason': None, 'status': 'True', 'type': 'PodScheduled' }], 'container_statuses': [{ 'container_id': 'docker://21f98bce8f30b874a99a1871ab2267c5632ebc4b2841fc74535415f2e50e0684', 'image': 'jabbo16/iperf-host:1.0.0', 'image_id': 'docker-pullable://jabbo16/iperf-host@sha256:d2e9422dcf0700f7c67fbe6a48d7204f4d4dcba0ad5b4d71d9a93af50d3ae547', 'last_state': { 'running': None, 'terminated': None, 'waiting': None }, 'name': 'fgt-91e0e29-3b9b-46de-9015-863cb2283795-0-darlvnf-1', 'ready': True, 'restart_count': 0, 'started': True, 'state': { 'running': { 'started_at': datetime.datetime( 2020, 12, 29, 12, 28, 40, tzinfo=tzutc()) }, 'terminated': None, 'waiting': None } }], 'ephemeral_container_statuses': None, 'host_ip': '192.168.100.150', 'init_container_statuses': None, 'message': None, 'nominated_node_name': None, 'phase': 'Running', 'pod_i_ps': [{ 'ip': '10.244.0.205' }], 'pod_ip': '10.244.0.205', 'qos_class': 'BestEffort', 'reason': None, 'start_time': datetime.datetime(2020, 12, 29, 12, 28, 36, tzinfo=tzutc()) }) }) return v1_pod
def test_delete_detached_pvcs(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList( items=[ # pvc is attached V1Pod( spec=V1PodSpec( containers=[], volumes=[ V1Volume( name="queue", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="queue-web-3", ), ) ], ), ), # pvc no attached because spec is missing V1Pod(), # pvc no attached because volumes are missing V1Pod(spec=V1PodSpec(containers=[],),), # pvc no attached because volume is not persistent V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume(name="queue")]),), # pvc not attached because pod is unschedulable due to pvc V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ] ) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should delete 0-2, 3 is in attached pvcs *( V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=f"queue-web-{i}", uid=f"uid-queue-web-{i}", resource_version=f"{i}", ), ) for i in range(4) ), # name does not start with claim prefix V1PersistentVolumeClaim(metadata=V1ObjectMeta(name="other-web-0"),), ] ) def delete_pvc(name, namespace, body): if name == "queue-web-1": raise ApiException(reason="Conflict") if name == "queue-web-2": raise ApiException(reason="Not Found") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc delete_detached_pvcs(api, "namespace", "queue-") api.list_namespaced_pod.called_once_with("namespace") api.list_namespaced_persistent_volume_claim.called_once_with("namespace") assert [ (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3) ] == [ ( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list ]