def test_delete_unschedulable_pods_raises_server_error(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[ V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ]) def delete_pod(name, namespace, body): raise ApiException(reason="Server Error") api.delete_namespaced_pod.side_effect = delete_pod with pytest.raises(ApiException): delete_unschedulable_pods(api, "namespace") api.list_namespaced_pod.called_once_with("namespace")
def test_create_pvc_raises_server_error(api: MagicMock, batch_api: MagicMock): api.list_persistent_volume.return_value = V1PersistentVolumeList( items=[ V1PersistentVolume( metadata=V1ObjectMeta(name="pv-0"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), ] ) def create_pvc(namespace, body): raise ApiException(reason="Server Error") api.create_namespaced_persistent_volume_claim.side_effect = create_pvc with pytest.raises(ApiException): flush_released_pvs(api, batch_api, "command", "env", "image", "namespace") api.list_persistent_volume.assert_called_once_with() batch_api.list_namespaced_job.called_once_with("namespace")
def schedule_pod(v1_client, name, node, namespace="default"): target = V1ObjectReference() target.kind = "Node" target.apiVersion = "v1" target.name = node meta = V1ObjectMeta() meta.name = name body = V1Binding(api_version=None, kind=None, metadata=meta, target=target) logger.info("Binding Pod: %s to Node: %s", name, node) return v1_client.create_namespaced_pod_binding(name, namespace, body)
def assign_pod_to_node(pod: V1Pod, node: V1Node): print("Scheduling %s on %s" % (pod.metadata.name, node.metadata.name)) binding = V1Binding(api_version="v1", kind="Binding", metadata=V1ObjectMeta(name=pod.metadata.name), target=V1ObjectReference(api_version="v1", kind="Node", name=node.metadata.name)) client.CoreV1Api().create_namespaced_pod_binding(pod.metadata.name, "default", binding) print("Scheduled %s on %s" % (pod.metadata.name, node.metadata.name)) notify_binding(pod, node)
def _bind_pvc(api: CoreV1Api, pv: V1PersistentVolume, pvc: V1PersistentVolumeClaim) -> V1PersistentVolume: logger.info(f"binding pv to pvc: {pv.metadata.name}, {pvc.metadata.name}") return api.patch_persistent_volume( name=pv.metadata.name, body=V1PersistentVolume(spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( api_version="v1", kind="PersistentVolumeClaim", name=pvc.metadata.name, namespace=pvc.metadata.namespace, resource_version=pvc.metadata.resource_version, uid=pvc.metadata.uid, ))), )
def notify_binding(pod: V1Pod, node: V1Node): timestamp = datetime.utcnow().isoformat("T") + "Z" event = V1Event(count=1, message=("Scheduled %s on %s" % (pod.metadata.name, node.metadata.name)), metadata=V1ObjectMeta(generate_name=pod.metadata.name + "-"), reason="Scheduled", last_timestamp=timestamp, first_timestamp=timestamp, type="Normal", source=V1EventSource(component="efficient-scheduler"), involved_object=V1ObjectReference(kind="Pod", name=pod.metadata.name, namespace="default", uid=pod.metadata.uid)) client.CoreV1Api().create_namespaced_event("default", event) print("Event sent")
def test_delete_unschedulable_pods(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[ V1Pod( metadata=V1ObjectMeta( name=f"web-{i}", namespace="default", uid=f"uid-web-{i}", resource_version=f"{i}", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", # 0-2 should be deleted, 3 should have the wrong message message="" if i == 3 else f'persistentvolumeclaim "queue-web-{i}" not found', ) ], ), ) for i in range(4) ]) def delete_pod(name, namespace, body): if name == "web-1": raise ApiException(reason="Conflict") if name == "web-2": raise ApiException(reason="Not Found") api.delete_namespaced_pod.side_effect = delete_pod delete_unschedulable_pods(api, "namespace") assert [(f"web-{i}", "namespace", f"uid-web-{i}", f"{i}") for i in range(3)] == [( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_pod.call_args_list]
def assign_pod_to_node(self, pod: V1Pod, node: V1Node): """ Assign a pod to a node :param pod: type V1Pod to schedule :param node: type V1Node """ info("Scheduling %s on %s" % (pod.metadata.name, node.metadata.name)) binding = V1Binding(api_version="v1", kind="Binding", metadata=V1ObjectMeta(name=pod.metadata.name), target=V1ObjectReference(api_version="v1", kind="Node", name=node.metadata.name)) try: self.api_k8s.create_namespaced_pod_binding(pod.metadata.name, "default", binding) info("Scheduled %s on %s" % (pod.metadata.name, node.metadata.name)) self.notify_binding(pod, node) except Exception: traceback.print_exc() info("pod %s is already assigned" % pod.metadata.name)
def notify_binding(self, pod: V1Pod, node: V1Node): """ Notify that a pod has been bind to a node. :param pod: type V1Pod :param node: type V1Node """ timestamp = datetime.utcnow().isoformat("T") + "Z" event = V1Event( count=1, message=("Scheduled %s on %s" % (pod.metadata.name, node.metadata.name)), metadata=V1ObjectMeta(generate_name=pod.metadata.name + "-"), reason="Scheduled", last_timestamp=timestamp, first_timestamp=timestamp, type="Normal", source=V1EventSource(component="genpack"), involved_object=V1ObjectReference(kind="Pod", name=pod.metadata.name, namespace="default", uid=pod.metadata.uid)) self.api_k8s.create_namespaced_event("default", event) info("Event sent")
def test_flush_released_pvs(api: MagicMock, batch_api: MagicMock): api.list_persistent_volume.return_value = V1PersistentVolumeList( items=[ # don't flush because job exists V1PersistentVolume(metadata=V1ObjectMeta(name="pv-0")), # don't flush because wrong namespace V1PersistentVolume( metadata=V1ObjectMeta(name="pv-4"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference(namespace="other"), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), # don't flush because it's already done V1PersistentVolume( metadata=V1ObjectMeta(name="pv-5"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference(namespace="namespace"), persistent_volume_reclaim_policy="Delete", ), status=V1PersistentVolumeStatus(phase="Released"), ), # don't flush because it's in use V1PersistentVolume( metadata=V1ObjectMeta(name="pv-6"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # try to flush because pvc is bound but job was created after jobs were listed V1PersistentVolume( metadata=V1ObjectMeta(name="pv-7"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="flush-pv-7", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # flush because pvc is bound but job does not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-8"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="flush-pv-8", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Bound"), ), # flush because pvc is not yet bound and job does not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-9"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), # flush because pvc and job both do not exist V1PersistentVolume( metadata=V1ObjectMeta(name="pv-A"), spec=V1PersistentVolumeSpec( claim_ref=V1ObjectReference( name="queue-web-0", namespace="namespace" ), persistent_volume_reclaim_policy="Retain", ), status=V1PersistentVolumeStatus(phase="Released"), ), ] ) def create_pvc(namespace: str, body: V1PersistentVolumeClaim): if body.metadata.name == "flush-pv-9": exc = ApiException(status=409, reason="Conflict") exc.body = '{"reason":"AlreadyExists"}' raise exc body.metadata.uid = "uid-" + body.metadata.name body.metadata.resource_version = "1" return body api.create_namespaced_persistent_volume_claim.side_effect = create_pvc def read_pvc(name: str, namespace: str): return V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=name, namespace=namespace, uid="uid-" + name, resource_version="2" ) ) api.read_namespaced_persistent_volume_claim.side_effect = read_pvc batch_api.list_namespaced_job.return_value = V1JobList( items=[V1Job(metadata=V1ObjectMeta(name="flush-pv-0"))] ) def create_job(namespace, body): if body.metadata.name == "flush-pv-7": exc = ApiException(status=409, reason="Conflict") exc.body = '{"reason":"AlreadyExists"}' raise exc batch_api.create_namespaced_job.side_effect = create_job flush_released_pvs(api, batch_api, "command", "env", "image", "namespace") api.list_persistent_volume.assert_called_once_with() batch_api.list_namespaced_job.assert_called_once_with("namespace") assert [f"flush-pv-{i}" for i in "9A"] == [ call.kwargs["body"].metadata.name for call in api.create_namespaced_persistent_volume_claim.call_args_list ] api.read_namespaced_persistent_volume_claim.assert_called_once_with( "flush-pv-9", "namespace" ) assert [("pv-9", "flush-pv-9"), ("pv-A", "flush-pv-A"),] == [ ( call.kwargs["name"], call.kwargs["body"].spec.claim_ref and call.kwargs["body"].spec.claim_ref.name, ) for call in api.patch_persistent_volume.call_args_list ] assert [f"flush-pv-{i}" for i in "789A"] == [ call.kwargs["body"].metadata.name for call in batch_api.create_namespaced_job.call_args_list ] batch_api.read_namespaced_job.assert_called_once_with("flush-pv-7", "namespace")
mocked_test_pod.metadata.name = "test" mocked_test_pod.metadata.uid = "uid" TEST_PODS = [mocked_test_pod] pending_pod = MagicMock(spec=V1Pod) pending_pod.status = V1PodStatus(phase=PodStatus.PENDING.value) pending_pod.metadata.name = "test" pending_pod.metadata.uid = "uid" PENDING_POD = [pending_pod] TOP_USERS = [ResourceUsage(user_name="user_name", cpu_usage=2, mem_usage=1000)] event = MagicMock(spec=V1Event) event.message = "insufficient memory" event.reason = "insufficient memory" event.involved_object = V1ObjectReference(name="test-experiment") event.metadata = V1ObjectMeta(name="test-experiment") EVENTS = [event] class ViewMocks: def __init__(self, mocker): self.get_run = mocker.patch('commands.experiment.view.Run.get') self.get_run.return_value = TEST_RUNS[0] self.get_pods = mocker.patch( 'commands.experiment.view.get_namespaced_pods') self.get_pods.return_value = TEST_PODS self.get_namespace = mocker.patch( 'commands.experiment.view.get_kubectl_current_context_namespace') self.format_timestamp = mocker.patch( 'platform_resources.run.format_timestamp_for_cli')
def test_delete_detached_pvcs(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList(items=[ # pvc is attached V1Pod(spec=V1PodSpec( containers=[], volumes=[ V1Volume( name="queue", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="queue-web-3", ), ) ], ), ), # pvc not attached because spec is missing V1Pod(), # pvc not attached because volumes are missing V1Pod(spec=V1PodSpec(containers=[], ), ), # pvc not attached because volume is not persistent V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume( name="queue")]), ), # pvc not attached because pod is unschedulable due to pvc V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ]) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should delete 0-2, 3 is in attached pvcs *(V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=f"queue-web-{i}", uid=f"uid-queue-web-{i}", resource_version=f"{i}", ), spec=V1PersistentVolumeClaimSpec(volume_name=f"pv-{i}"), ) for i in range(4)), # name does not start with claim prefix V1PersistentVolumeClaim(metadata=V1ObjectMeta( name="other-web-0"), ), ]) def delete_pvc(name, namespace, body): if name == "queue-web-1": raise ApiException(reason="Conflict") if name == "queue-web-2": raise ApiException(reason="Not Found") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc pvc_cleanup_delay = timedelta(microseconds=1) delay_complete = datetime.utcnow() - pvc_cleanup_delay cache = { # wrong pv name, should be overwritten "queue-web-0": PvcCacheEntry(pv="wrong", time=delay_complete), # no longer detached, should be removed "queue-web-3": PvcCacheEntry(pv="pv-3", time=delay_complete), } delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") api.delete_namespaced_persistent_volume_claim.assert_not_called() assert {f"queue-web-{i}": f"pv-{i}" for i in range(3)} == {k: v.pv for k, v in cache.items()} api.list_namespaced_pod.reset_mock() api.list_namespaced_persistent_volume_claim.reset_mock() previous_cache = {**cache} delete_detached_pvcs(api, "namespace", "queue-", pvc_cleanup_delay, cache) api.list_namespaced_pod.assert_called_once_with("namespace") api.list_namespaced_persistent_volume_claim.assert_called_once_with( "namespace") assert previous_cache == cache assert [ (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3) ] == [( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list]
def get_reference_object(self) -> V1Deployment: """Get deployment object for outpost""" # Generate V1ContainerPort objects container_ports = [] for port in self.controller.deployment_ports: container_ports.append( V1ContainerPort( container_port=port.inner_port or port.port, name=port.name, protocol=port.protocol.upper(), ) ) meta = self.get_object_meta(name=self.name) image_name = self.controller.get_container_image() image_pull_secrets = self.outpost.config.kubernetes_image_pull_secrets version = get_full_version() return V1Deployment( metadata=meta, spec=V1DeploymentSpec( replicas=self.outpost.config.kubernetes_replicas, selector=V1LabelSelector(match_labels=self.get_pod_meta()), template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels=self.get_pod_meta( **{ # Support istio-specific labels, but also use the standard k8s # recommendations "app.kubernetes.io/version": version, "app": "authentik-outpost", "version": version, } ) ), spec=V1PodSpec( image_pull_secrets=[ V1ObjectReference(name=secret) for secret in image_pull_secrets ], containers=[ V1Container( name=str(self.outpost.type), image=image_name, ports=container_ports, env=[ V1EnvVar( name="AUTHENTIK_HOST", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="authentik_host", ) ), ), V1EnvVar( name="AUTHENTIK_HOST_BROWSER", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="authentik_host_browser", ) ), ), V1EnvVar( name="AUTHENTIK_TOKEN", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="token", ) ), ), V1EnvVar( name="AUTHENTIK_INSECURE", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="authentik_host_insecure", ) ), ), ], ) ], ), ), ), )
def test_delete_detached_pvcs(api: MagicMock): api.list_namespaced_pod.return_value = V1PodList( items=[ # pvc is attached V1Pod( spec=V1PodSpec( containers=[], volumes=[ V1Volume( name="queue", persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name="queue-web-3", ), ) ], ), ), # pvc no attached because spec is missing V1Pod(), # pvc no attached because volumes are missing V1Pod(spec=V1PodSpec(containers=[],),), # pvc no attached because volume is not persistent V1Pod(spec=V1PodSpec(containers=[], volumes=[V1Volume(name="queue")]),), # pvc not attached because pod is unschedulable due to pvc V1Pod( metadata=V1ObjectMeta( name="web-0", namespace="default", uid="uid-web-0", resource_version="1", owner_references=[V1ObjectReference(kind="StatefulSet")], ), status=V1PodStatus( phase="Pending", conditions=[ V1PodCondition( status="Not Ready", type="False", reason="Unschedulable", message='persistentvolumeclaim "queue-web-0" not found', ) ], ), ), ] ) api.list_namespaced_persistent_volume_claim.return_value = V1PersistentVolumeClaimList( items=[ # should delete 0-2, 3 is in attached pvcs *( V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=f"queue-web-{i}", uid=f"uid-queue-web-{i}", resource_version=f"{i}", ), ) for i in range(4) ), # name does not start with claim prefix V1PersistentVolumeClaim(metadata=V1ObjectMeta(name="other-web-0"),), ] ) def delete_pvc(name, namespace, body): if name == "queue-web-1": raise ApiException(reason="Conflict") if name == "queue-web-2": raise ApiException(reason="Not Found") api.delete_namespaced_persistent_volume_claim.side_effect = delete_pvc delete_detached_pvcs(api, "namespace", "queue-") api.list_namespaced_pod.called_once_with("namespace") api.list_namespaced_persistent_volume_claim.called_once_with("namespace") assert [ (f"queue-web-{i}", "namespace", f"uid-queue-web-{i}", f"{i}") for i in range(3) ] == [ ( call.kwargs["name"], call.kwargs["namespace"], call.kwargs["body"].preconditions.uid, call.kwargs["body"].preconditions.resource_version, ) for call in api.delete_namespaced_persistent_volume_claim.call_args_list ]