def test_get_pod_volumes(self): with mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True, return_value='some-volume', ): mock_volumes = [ { 'hostPath': '/nail/blah', 'containerPath': '/nail/foo' }, { 'hostPath': '/nail/thing', 'containerPath': '/nail/bar' }, ] expected_volumes = [ V1Volume( host_path=V1HostPathVolumeSource(path='/nail/blah', ), name='some-volume', ), V1Volume( host_path=V1HostPathVolumeSource(path='/nail/thing', ), name='some-volume', ), ] assert self.deployment.get_pod_volumes( mock_volumes) == expected_volumes
def get_pod_volumes( self, docker_volumes: Sequence[DockerVolume], aws_ebs_volumes: Sequence[AwsEbsVolume], ) -> Sequence[V1Volume]: pod_volumes = [] unique_docker_volumes = { self.get_docker_volume_name(docker_volume): docker_volume for docker_volume in docker_volumes } for name, docker_volume in unique_docker_volumes.items(): pod_volumes.append( V1Volume( host_path=V1HostPathVolumeSource( path=docker_volume['hostPath'], ), name=name, ), ) unique_aws_ebs_volumes = { self.get_aws_ebs_volume_name(aws_ebs_volume): aws_ebs_volume for aws_ebs_volume in aws_ebs_volumes } for name, aws_ebs_volume in unique_aws_ebs_volumes.items(): pod_volumes.append( V1Volume( aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource( volume_id=aws_ebs_volume['volume_id'], fs_type=aws_ebs_volume.get('fs_type'), partition=aws_ebs_volume.get('partition'), # k8s wants RW volume even if it's later mounted RO read_only=False, ), name=name, ), ) return pod_volumes
def get_pod_volumes(self, volumes: Sequence[DockerVolume]) -> Sequence[V1Volume]: pod_volumes = [] for volume in volumes: pod_volumes.append( V1Volume( host_path=V1HostPathVolumeSource( path=volume['hostPath'], ), name=self.get_sanitised_volume_name(volume['containerPath']), ), ) return pod_volumes
def test_get_pod_volumes(self): mock_docker_volumes = [ { 'hostPath': '/nail/blah', 'containerPath': '/nail/foo' }, { 'hostPath': '/nail/thing', 'containerPath': '/nail/bar' }, ] mock_aws_ebs_volumes = [ { 'volume_id': 'vol-ZZZZZZZZZZZZZZZZZ', 'fs_type': 'ext4', 'container_path': '/nail/qux' }, ] expected_volumes = [ V1Volume( host_path=V1HostPathVolumeSource(path='/nail/blah', ), name='host--slash-nailslash-blah', ), V1Volume( host_path=V1HostPathVolumeSource(path='/nail/thing', ), name='host--slash-nailslash-thing', ), V1Volume( aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource( volume_id='vol-ZZZZZZZZZZZZZZZZZ', fs_type='ext4', read_only=False, ), name='aws-ebs--vol-ZZZZZZZZZZZZZZZZZ', ), ] assert self.deployment.get_pod_volumes( docker_volumes=mock_docker_volumes, aws_ebs_volumes=mock_aws_ebs_volumes, ) == expected_volumes
def deploy(self, path, access_mode="ReadWriteMany"): if self.meta.labels: self.meta.labels.update(self.target_labels) else: self.meta.labels = self.target_labels pv_spec = V1PersistentVolumeSpec( access_modes=[access_mode], capacity={"storage": self.capacity}, host_path=V1HostPathVolumeSource(path=path), persistent_volume_reclaim_policy='Recycle' # claim_ref=V1ObjectReference(), ) pv = V1PersistentVolume(metadata=self.meta, spec=pv_spec) k8sclient.apiV1.create_persistent_volume(body=pv)
def get_pod_volumes(volumes: PVector['DockerVolume']) -> List[V1Volume]: """ Given a list of volume mounts, return a list corresponding to the Kubernetes objects needed to tie the mounts to a Pod. """ unique_volumes: Dict[str, 'DockerVolume'] = { get_sanitised_volume_name(f"host--{volume['host_path']}", length_limit=63): volume for volume in volumes } return [ V1Volume( host_path=V1HostPathVolumeSource(path=volume["host_path"]), name=name, ) for name, volume in unique_volumes.items() ]
name= "host--slash-aslash-bslash-cdslash-eslash-fslash-gslash-hs--f2c8", read_only=False), ]), )) def test_get_kubernetes_volume_mounts(volumes, expected): assert get_kubernetes_volume_mounts(volumes) == expected @pytest.mark.parametrize("volumes,expected", ( (v({ "container_path": "/a", "host_path": "/b", "mode": "RO" }), [V1Volume(name="host--slash-b", host_path=V1HostPathVolumeSource("/b"))]), (v( { "container_path": "/a", "host_path": "/b", "mode": "RO" }, { "container_path": "/b", "host_path": "/a/b/cd/e/f/g/h/u/j/k/l", "mode": "RW" }, ), [ V1Volume(name="host--slash-b", host_path=V1HostPathVolumeSource("/b")), V1Volume( name=
def __init__(self, name, mount, path, read_only=True): self.mount = V1VolumeMount(name=name, mount_path=mount, read_only=read_only) self.volume = V1Volume(name=name, host_path=V1HostPathVolumeSource(path=path))
def test_run(mock_get_node_affinity, k8s_executor): task_config = KubernetesTaskConfig( name="fake_task_name", uuid="fake_id", image="fake_docker_image", command="fake_command", cpus=1, memory=1024, disk=1024, volumes=[{ "host_path": "/a", "container_path": "/b", "mode": "RO" }], node_selectors={"hello": "world"}, node_affinities=[dict(key="a_label", operator="In", value=[])], labels={ "some_label": "some_label_value", }, annotations={ "paasta.yelp.com/some_annotation": "some_value", }, service_account_name="testsa", ) expected_container = V1Container( image=task_config.image, name="main", command=["/bin/sh", "-c"], args=[task_config.command], security_context=V1SecurityContext( capabilities=V1Capabilities(drop=list(task_config.cap_drop)), ), resources=V1ResourceRequirements(limits={ "cpu": 1.0, "memory": "1024.0Mi", "ephemeral-storage": "1024.0Mi", }), env=[], volume_mounts=[ V1VolumeMount( mount_path="/b", name="host--slash-a", read_only=True, ) ], ) expected_pod = V1Pod( metadata=V1ObjectMeta( name=task_config.pod_name, namespace="task_processing_tests", labels={ "some_label": "some_label_value", }, annotations={ "paasta.yelp.com/some_annotation": "some_value", }, ), spec=V1PodSpec( restart_policy=task_config.restart_policy, containers=[expected_container], volumes=[ V1Volume( host_path=V1HostPathVolumeSource(path="/a"), name="host--slash-a", ) ], share_process_namespace=True, security_context=V1PodSecurityContext( fs_group=task_config.fs_group, ), node_selector={"hello": "world"}, affinity=V1Affinity( node_affinity=mock_get_node_affinity.return_value), dns_policy="Default", service_account_name=task_config.service_account_name, ), ) assert k8s_executor.run(task_config) == task_config.pod_name assert k8s_executor.kube_client.core.create_namespaced_pod.call_args_list == [ mock.call(body=expected_pod, namespace='task_processing_tests') ] assert mock_get_node_affinity.call_args_list == [ mock.call(pvector([dict(key="a_label", operator="In", value=[])])), ]