def objects_to_yaml(self): client = ApiClient() return '\n---\n'.join( # Use a generator here to avoid building an intermediate list. yaml.dump(client.sanitize_for_serialization(obj)) for obj in self.objects)
def template_chart(args): current_dir = os.getcwd() chart_dir = os.path.join(current_dir, args.chart[0]) api = ApiClient() templates = [] fnames = [] try: templates_dir = os.path.join(chart_dir, 'templates') for fname in os.listdir(templates_dir): full_path = os.path.join(templates_dir, fname) if os.path.isfile(full_path): parts = os.path.splitext(fname) if parts[1] == '.py': tname = parts[0] spec = importlib.util.spec_from_file_location( tname, full_path) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) t = mod.template() if not isinstance(t, (list, tuple)): t = [t] templates.extend(t) fnames += [full_path] * len(t) t_sanitized = api.sanitize_for_serialization(templates) output = '\n'.join([ '---\n{}\n{}'.format( '# Source: {}'.format(os.path.relpath(fname, current_dir)), yaml.dump(t, default_flow_style=False)) for t, fname in zip(t_sanitized, fnames) ]) print(output) except FileNotFoundError: print('No templates directory found')
def test_tolerations(self): k8s_api_tolerations = [ k8s.V1Toleration(key="key", operator="Equal", value="value") ] tolerations = [{'key': "key", 'operator': 'Equal', 'value': 'value'}] k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', tolerations=tolerations, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.tolerations[0]), k8s.V1Toleration) self.assertEqual( client.sanitize_for_serialization(result)['spec']['tolerations'], tolerations) k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', tolerations=k8s_api_tolerations, ) result = k.create_pod_request_obj() self.assertEqual(type(result.spec.tolerations[0]), k8s.V1Toleration) self.assertEqual( client.sanitize_for_serialization(result)['spec']['tolerations'], tolerations)
def test_node_selector(self): node_selector = {'beta.kubernetes.io/os': 'linux'} k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', node_selector=node_selector, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.node_selector), dict) self.assertEqual( client.sanitize_for_serialization(result)['spec']['nodeSelector'], node_selector) # repeat tests using deprecated parameter k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', node_selectors=node_selector, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.node_selector), dict) self.assertEqual( client.sanitize_for_serialization(result)['spec']['nodeSelector'], node_selector)
def test_attach_to_pod(self, mock_uuid): mock_uuid.return_value = '0' pod = PodGenerator(image='airflow-worker:latest', name='base').gen_pod() secrets = [ # This should be a secretRef Secret('env', None, 'secret_a'), # This should be a single secret mounted in volumeMounts Secret('volume', '/etc/foo', 'secret_b'), # This should produce a single secret mounted in env Secret('env', 'TARGET', 'secret_b', 'source_b'), ] k8s_client = ApiClient() result = append_to_pod(pod, secrets) result = k8s_client.sanitize_for_serialization(result) self.assertEqual(result, { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'name': 'base-0'}, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [{ 'name': 'TARGET', 'valueFrom': { 'secretKeyRef': { 'key': 'source_b', 'name': 'secret_b' } } }], 'envFrom': [{'secretRef': {'name': 'secret_a'}}], 'image': 'airflow-worker:latest', 'imagePullPolicy': 'IfNotPresent', 'name': 'base', 'ports': [], 'volumeMounts': [{ 'mountPath': '/etc/foo', 'name': 'secretvol0', 'readOnly': True}] }], 'hostNetwork': False, 'imagePullSecrets': [], 'restartPolicy': 'Never', 'volumes': [{ 'name': 'secretvol0', 'secret': {'secretName': 'secret_b'} }] } })
def test_port_attach_to_pod(self, mock_uuid): mock_uuid.return_value = '0' pod = PodGenerator(image='airflow-worker:latest', name='base').gen_pod() ports = [Port('https', 443), Port('http', 80)] k8s_client = ApiClient() result = append_to_pod(pod, ports) result = k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'base-0' }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'image': 'airflow-worker:latest', 'imagePullPolicy': 'IfNotPresent', 'name': 'base', 'ports': [{ 'name': 'https', 'containerPort': 443 }, { 'name': 'http', 'containerPort': 80 }], 'volumeMounts': [], }], 'hostNetwork': False, 'imagePullSecrets': [], 'restartPolicy': 'Never', 'volumes': [] } }, result)
def test_port_attach_to_pod(self, mock_uuid): import uuid static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48') mock_uuid.return_value = static_uuid pod = PodGenerator(image='airflow-worker:latest', name='base').gen_pod() ports = [Port('https', 443), Port('http', 80)] k8s_client = ApiClient() result = append_to_pod(pod, ports) result = k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'base-' + static_uuid.hex }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'image': 'airflow-worker:latest', 'name': 'base', 'ports': [{ 'name': 'https', 'containerPort': 443 }, { 'name': 'http', 'containerPort': 80 }], 'volumeMounts': [], }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [] } }, result)
def dumps(self, data: Any) -> str: docs = [] for d in self: kc = ApiClient() doc = kc.sanitize_for_serialization(d) orderedDoc = {} # specify the key order: a,k,m,s/d for k in [ 'apiVersion', 'kind', 'metadata', 'type', 'spec', 'data', 'stringData' ]: if k in doc.keys(): orderedDoc[k] = doc[k] document = _extract_comment(d) + yaml.dump(orderedDoc, sort_keys=False) logging.debug("Found doc: {}".format(document)) docs.append(document) return "---\n" + '\n---\n'.join(docs)
class TestPodGenerator(unittest.TestCase): def setUp(self): self.static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48') self.deserialize_result = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'name': 'memory-demo', 'namespace': 'mem-example'}, 'spec': { 'containers': [ { 'args': ['--vm', '1', '--vm-bytes', '150M', '--vm-hang', '1'], 'command': ['stress'], 'image': 'apache/airflow:stress-2020.07.10-1.0.4', 'name': 'memory-demo-ctr', 'resources': {'limits': {'memory': '200Mi'}, 'requests': {'memory': '100Mi'}}, } ] }, } self.envs = {'ENVIRONMENT': 'prod', 'LOG_LEVEL': 'warning'} self.secrets = [ # This should be a secretRef Secret('env', None, 'secret_a'), # This should be a single secret mounted in volumeMounts Secret('volume', '/etc/foo', 'secret_b'), # This should produce a single secret mounted in env Secret('env', 'TARGET', 'secret_b', 'source_b'), ] self.execution_date = parser.parse('2020-08-24 00:00:00.000000') self.execution_date_label = datetime_to_label_safe_datestring(self.execution_date) self.dag_id = 'dag_id' self.task_id = 'task_id' self.try_number = 3 self.labels = { 'airflow-worker': 'uuid', 'dag_id': self.dag_id, 'execution_date': self.execution_date_label, 'task_id': self.task_id, 'try_number': str(self.try_number), 'airflow_version': __version__.replace('+', '-'), 'kubernetes_executor': 'True', } self.annotations = { 'dag_id': self.dag_id, 'task_id': self.task_id, 'execution_date': self.execution_date.isoformat(), 'try_number': str(self.try_number), } self.metadata = { 'labels': self.labels, 'name': 'pod_id-' + self.static_uuid.hex, 'namespace': 'namespace', 'annotations': self.annotations, } self.resources = k8s.V1ResourceRequirements( requests={ "cpu": 1, "memory": "1Gi", "ephemeral-storage": "2Gi", }, limits={"cpu": 2, "memory": "2Gi", "ephemeral-storage": "4Gi", 'nvidia.com/gpu': 1}, ) self.k8s_client = ApiClient() self.expected = k8s.V1Pod( api_version="v1", kind="Pod", metadata=k8s.V1ObjectMeta( namespace="default", name='myapp-pod-' + self.static_uuid.hex, labels={'app': 'myapp'}, ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name='base', image='busybox', command=['sh', '-c', 'echo Hello Kubernetes!'], env=[ k8s.V1EnvVar(name='ENVIRONMENT', value='prod'), k8s.V1EnvVar( name="LOG_LEVEL", value='warning', ), k8s.V1EnvVar( name='TARGET', value_from=k8s.V1EnvVarSource( secret_key_ref=k8s.V1SecretKeySelector(name='secret_b', key='source_b') ), ), ], env_from=[ k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_a')), k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_b')), k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name='secret_a')), ], ports=[k8s.V1ContainerPort(name="foo", container_port=1234)], resources=k8s.V1ResourceRequirements( requests={'memory': '100Mi'}, limits={ 'memory': '200Mi', }, ), ) ], security_context=k8s.V1PodSecurityContext( fs_group=2000, run_as_user=1000, ), host_network=True, image_pull_secrets=[ k8s.V1LocalObjectReference(name="pull_secret_a"), k8s.V1LocalObjectReference(name="pull_secret_b"), ], ), ) @mock.patch('uuid.uuid4') def test_gen_pod_extract_xcom(self, mock_uuid): mock_uuid.return_value = self.static_uuid path = sys.path[0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' pod_generator = PodGenerator(pod_template_file=path, extract_xcom=True) result = pod_generator.gen_pod() result_dict = self.k8s_client.sanitize_for_serialization(result) container_two = { 'name': 'airflow-xcom-sidecar', 'image': "alpine", 'command': ['sh', '-c', PodDefaults.XCOM_CMD], 'volumeMounts': [{'name': 'xcom', 'mountPath': '/airflow/xcom'}], 'resources': {'requests': {'cpu': '1m'}}, } self.expected.spec.containers.append(container_two) base_container: k8s.V1Container = self.expected.spec.containers[0] base_container.volume_mounts = base_container.volume_mounts or [] base_container.volume_mounts.append(k8s.V1VolumeMount(name="xcom", mount_path="/airflow/xcom")) self.expected.spec.containers[0] = base_container self.expected.spec.volumes = self.expected.spec.volumes or [] self.expected.spec.volumes.append( k8s.V1Volume( name='xcom', empty_dir={}, ) ) result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization(self.expected) assert result_dict == expected_dict def test_from_obj(self): result = PodGenerator.from_obj( { "pod_override": k8s.V1Pod( api_version="v1", kind="Pod", metadata=k8s.V1ObjectMeta(name="foo", annotations={"test": "annotation"}), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", volume_mounts=[ k8s.V1VolumeMount( mount_path="/foo/", name="example-kubernetes-test-volume" ) ], ) ], volumes=[ k8s.V1Volume( name="example-kubernetes-test-volume", host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), ) ], ), ) } ) result = self.k8s_client.sanitize_for_serialization(result) assert { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'foo', 'annotations': {'test': 'annotation'}, }, 'spec': { 'containers': [ { 'name': 'base', 'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}], } ], 'volumes': [{'hostPath': {'path': '/tmp/'}, 'name': 'example-kubernetes-test-volume'}], }, } == result result = PodGenerator.from_obj( { "KubernetesExecutor": { "annotations": {"test": "annotation"}, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": {"path": "/tmp/"}, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], } } ) result_from_pod = PodGenerator.from_obj( { "pod_override": k8s.V1Pod( metadata=k8s.V1ObjectMeta(annotations={"test": "annotation"}), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", volume_mounts=[ k8s.V1VolumeMount( name="example-kubernetes-test-volume", mount_path="/foo/" ) ], ) ], volumes=[k8s.V1Volume(name="example-kubernetes-test-volume", host_path="/tmp/")], ), ) } ) result = self.k8s_client.sanitize_for_serialization(result) result_from_pod = self.k8s_client.sanitize_for_serialization(result_from_pod) expected_from_pod = { 'metadata': {'annotations': {'test': 'annotation'}}, 'spec': { 'containers': [ { 'name': 'base', 'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}], } ], 'volumes': [{'hostPath': '/tmp/', 'name': 'example-kubernetes-test-volume'}], }, } assert ( result_from_pod == expected_from_pod ), "There was a discrepency between KubernetesExecutor and pod_override" assert { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': {'test': 'annotation'}, }, 'spec': { 'containers': [ { 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}], } ], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{'hostPath': {'path': '/tmp/'}, 'name': 'example-kubernetes-test-volume'}], }, } == result @mock.patch('uuid.uuid4') def test_reconcile_pods_empty_mutator_pod(self, mock_uuid): mock_uuid.return_value = self.static_uuid path = sys.path[0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' pod_generator = PodGenerator(pod_template_file=path, extract_xcom=True) base_pod = pod_generator.gen_pod() mutator_pod = None name = 'name1-' + self.static_uuid.hex base_pod.metadata.name = name result = PodGenerator.reconcile_pods(base_pod, mutator_pod) assert base_pod == result mutator_pod = k8s.V1Pod() result = PodGenerator.reconcile_pods(base_pod, mutator_pod) assert base_pod == result @mock.patch('uuid.uuid4') def test_reconcile_pods(self, mock_uuid): mock_uuid.return_value = self.static_uuid path = sys.path[0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' base_pod = PodGenerator(pod_template_file=path, extract_xcom=False).gen_pod() mutator_pod = k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="name2", labels={"bar": "baz"}, ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( image='', name='name', command=['/bin/command2.sh', 'arg2'], volume_mounts=[ k8s.V1VolumeMount(mount_path="/foo/", name="example-kubernetes-test-volume2") ], ) ], volumes=[ k8s.V1Volume( host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), name="example-kubernetes-test-volume2", ) ], ), ) result = PodGenerator.reconcile_pods(base_pod, mutator_pod) expected: k8s.V1Pod = self.expected expected.metadata.name = "name2" expected.metadata.labels['bar'] = 'baz' expected.spec.volumes = expected.spec.volumes or [] expected.spec.volumes.append( k8s.V1Volume( host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), name="example-kubernetes-test-volume2" ) ) base_container: k8s.V1Container = expected.spec.containers[0] base_container.command = ['/bin/command2.sh', 'arg2'] base_container.volume_mounts = [ k8s.V1VolumeMount(mount_path="/foo/", name="example-kubernetes-test-volume2") ] base_container.name = "name" expected.spec.containers[0] = base_container result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization(expected) assert result_dict == expected_dict @mock.patch('uuid.uuid4') def test_construct_pod(self, mock_uuid): path = sys.path[0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' worker_config = PodGenerator.deserialize_model_file(path) mock_uuid.return_value = self.static_uuid executor_config = k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name='', resources=k8s.V1ResourceRequirements(limits={'cpu': '1m', 'memory': '1G'}) ) ] ) ) result = PodGenerator.construct_pod( dag_id=self.dag_id, task_id=self.task_id, pod_id='pod_id', kube_image='airflow_image', try_number=self.try_number, date=self.execution_date, args=['command'], pod_override_object=executor_config, base_worker_pod=worker_config, namespace='test_namespace', scheduler_job_id='uuid', ) expected = self.expected expected.metadata.labels = self.labels expected.metadata.labels['app'] = 'myapp' expected.metadata.annotations = self.annotations expected.metadata.name = 'pod_id-' + self.static_uuid.hex expected.metadata.namespace = 'test_namespace' expected.spec.containers[0].args = ['command'] expected.spec.containers[0].image = 'airflow_image' expected.spec.containers[0].resources = {'limits': {'cpu': '1m', 'memory': '1G'}} expected.spec.containers[0].env.append( k8s.V1EnvVar( name="AIRFLOW_IS_K8S_EXECUTOR_POD", value='True', ) ) result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization(self.expected) assert expected_dict == result_dict @mock.patch('uuid.uuid4') def test_construct_pod_empty_executor_config(self, mock_uuid): path = sys.path[0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' worker_config = PodGenerator.deserialize_model_file(path) mock_uuid.return_value = self.static_uuid executor_config = None result = PodGenerator.construct_pod( dag_id='dag_id', task_id='task_id', pod_id='pod_id', kube_image='test-image', try_number=3, date=self.execution_date, args=['command'], pod_override_object=executor_config, base_worker_pod=worker_config, namespace='namespace', scheduler_job_id='uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) worker_config.spec.containers[0].image = "test-image" worker_config.spec.containers[0].args = ["command"] worker_config.metadata.annotations = self.annotations worker_config.metadata.labels = self.labels worker_config.metadata.labels['app'] = 'myapp' worker_config.metadata.name = 'pod_id-' + self.static_uuid.hex worker_config.metadata.namespace = 'namespace' worker_config.spec.containers[0].env.append( k8s.V1EnvVar(name="AIRFLOW_IS_K8S_EXECUTOR_POD", value='True') ) worker_config_result = self.k8s_client.sanitize_for_serialization(worker_config) assert worker_config_result == sanitized_result def test_merge_objects_empty(self): annotations = {'foo1': 'bar1'} base_obj = k8s.V1ObjectMeta(annotations=annotations) client_obj = None res = merge_objects(base_obj, client_obj) assert base_obj == res client_obj = k8s.V1ObjectMeta() res = merge_objects(base_obj, client_obj) assert base_obj == res client_obj = k8s.V1ObjectMeta(annotations=annotations) base_obj = None res = merge_objects(base_obj, client_obj) assert client_obj == res base_obj = k8s.V1ObjectMeta() res = merge_objects(base_obj, client_obj) assert client_obj == res def test_merge_objects(self): base_annotations = {'foo1': 'bar1'} base_labels = {'foo1': 'bar1'} client_annotations = {'foo2': 'bar2'} base_obj = k8s.V1ObjectMeta(annotations=base_annotations, labels=base_labels) client_obj = k8s.V1ObjectMeta(annotations=client_annotations) res = merge_objects(base_obj, client_obj) client_obj.labels = base_labels assert client_obj == res def test_extend_object_field_empty(self): ports = [k8s.V1ContainerPort(container_port=1, name='port')] base_obj = k8s.V1Container(name='base_container', ports=ports) client_obj = k8s.V1Container(name='client_container') res = extend_object_field(base_obj, client_obj, 'ports') client_obj.ports = ports assert client_obj == res base_obj = k8s.V1Container(name='base_container') client_obj = k8s.V1Container(name='base_container', ports=ports) res = extend_object_field(base_obj, client_obj, 'ports') assert client_obj == res def test_extend_object_field_not_list(self): base_obj = k8s.V1Container(name='base_container', image='image') client_obj = k8s.V1Container(name='client_container') with pytest.raises(ValueError): extend_object_field(base_obj, client_obj, 'image') base_obj = k8s.V1Container(name='base_container') client_obj = k8s.V1Container(name='client_container', image='image') with pytest.raises(ValueError): extend_object_field(base_obj, client_obj, 'image') def test_extend_object_field(self): base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_obj = k8s.V1Container(name='base_container', ports=base_ports) client_ports = [k8s.V1ContainerPort(container_port=1, name='client_port')] client_obj = k8s.V1Container(name='client_container', ports=client_ports) res = extend_object_field(base_obj, client_obj, 'ports') client_obj.ports = base_ports + client_ports assert client_obj == res def test_reconcile_containers_empty(self): base_objs = [k8s.V1Container(name='base_container')] client_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) assert base_objs == res client_objs = [k8s.V1Container(name='client_container')] base_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) assert client_objs == res res = PodGenerator.reconcile_containers([], []) assert res == [] def test_reconcile_containers(self): base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_objs = [ k8s.V1Container(name='base_container1', ports=base_ports), k8s.V1Container(name='base_container2', image='base_image'), ] client_ports = [k8s.V1ContainerPort(container_port=2, name='client_port')] client_objs = [ k8s.V1Container(name='client_container1', ports=client_ports), k8s.V1Container(name='client_container2', image='client_image'), ] res = PodGenerator.reconcile_containers(base_objs, client_objs) client_objs[0].ports = base_ports + client_ports assert client_objs == res base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_objs = [ k8s.V1Container(name='base_container1', ports=base_ports), k8s.V1Container(name='base_container2', image='base_image'), ] client_ports = [k8s.V1ContainerPort(container_port=2, name='client_port')] client_objs = [ k8s.V1Container(name='client_container1', ports=client_ports), k8s.V1Container(name='client_container2', stdin=True), ] res = PodGenerator.reconcile_containers(base_objs, client_objs) client_objs[0].ports = base_ports + client_ports client_objs[1].image = 'base_image' assert client_objs == res def test_reconcile_specs_empty(self): base_spec = k8s.V1PodSpec(containers=[]) client_spec = None res = PodGenerator.reconcile_specs(base_spec, client_spec) assert base_spec == res base_spec = None client_spec = k8s.V1PodSpec(containers=[]) res = PodGenerator.reconcile_specs(base_spec, client_spec) assert client_spec == res def test_reconcile_specs(self): base_objs = [k8s.V1Container(name='base_container1', image='base_image')] client_objs = [k8s.V1Container(name='client_container1')] base_spec = k8s.V1PodSpec(priority=1, active_deadline_seconds=100, containers=base_objs) client_spec = k8s.V1PodSpec(priority=2, hostname='local', containers=client_objs) res = PodGenerator.reconcile_specs(base_spec, client_spec) client_spec.containers = [k8s.V1Container(name='client_container1', image='base_image')] client_spec.active_deadline_seconds = 100 assert client_spec == res def test_deserialize_model_file(self): path = sys.path[0] + '/tests/kubernetes/pod.yaml' result = PodGenerator.deserialize_model_file(path) sanitized_res = self.k8s_client.sanitize_for_serialization(result) assert sanitized_res == self.deserialize_result def test_deserialize_model_string(self): fixture = """ apiVersion: v1 kind: Pod metadata: name: memory-demo namespace: mem-example spec: containers: - name: memory-demo-ctr image: apache/airflow:stress-2020.07.10-1.0.4 resources: limits: memory: "200Mi" requests: memory: "100Mi" command: ["stress"] args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"] """ result = PodGenerator.deserialize_model_file(fixture) sanitized_res = self.k8s_client.sanitize_for_serialization(result) assert sanitized_res == self.deserialize_result def test_validate_pod_generator(self): with pytest.raises(AirflowConfigException): PodGenerator(pod=k8s.V1Pod(), pod_template_file='k') with pytest.raises(AirflowConfigException): PodGenerator() PodGenerator(pod_template_file='tests/kubernetes/pod.yaml') PodGenerator(pod=k8s.V1Pod())
def test_to_v1_pod(self, mock_uuid): from airflow.contrib.kubernetes.pod import Pod as DeprecatedPod from airflow.kubernetes.volume import Volume from airflow.kubernetes.volume_mount import VolumeMount from airflow.kubernetes.secret import Secret from airflow.kubernetes.pod import Resources import uuid static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48') mock_uuid.return_value = static_uuid pod = DeprecatedPod( image="foo", name="bar", namespace="baz", image_pull_policy="Never", envs={"test_key": "test_value"}, cmds=["airflow"], resources=Resources(request_memory="1G", request_cpu="100Mi", limit_gpu="100G"), init_containers=k8s.V1Container(name="test-container", volume_mounts=k8s.V1VolumeMount( mount_path="/foo/bar", name="init-volume-secret")), volumes=[ Volume(name="foo", configs={}), { "name": "bar", 'secret': { 'secretName': 'volume-secret' } } ], secrets=[ Secret("volume", None, "init-volume-secret"), Secret('env', "AIRFLOW_SECRET", 'secret_name', "airflow_config"), Secret("volume", "/opt/airflow", "volume-secret", "secret-key") ], volume_mounts=[ VolumeMount(name="foo", mount_path="/mnt", sub_path="/", read_only=True) ]) k8s_client = ApiClient() result = pod.to_v1_kubernetes_pod() result = k8s_client.sanitize_for_serialization(result) expected = \ {'metadata': {'labels': {}, 'name': 'bar', 'namespace': 'baz'}, 'spec': {'affinity': {}, 'containers': [{'args': [], 'command': ['airflow'], 'env': [{'name': 'test_key', 'value': 'test_value'}, {'name': 'AIRFLOW_SECRET', 'valueFrom': {'secretKeyRef': {'key': 'airflow_config', 'name': 'secret_name'}}}], 'image': 'foo', 'imagePullPolicy': 'Never', 'name': 'base', 'resources': {'limits': {'nvidia.com/gpu': '100G'}, 'requests': {'cpu': '100Mi', 'memory': '1G'}}, 'volumeMounts': [{'mountPath': '/mnt', 'name': 'foo', 'readOnly': True, 'subPath': '/'}, {'mountPath': '/opt/airflow', 'name': 'secretvol' + str(static_uuid), 'readOnly': True}]}], 'hostNetwork': False, 'initContainers': {'name': 'test-container', 'volumeMounts': {'mountPath': '/foo/bar', 'name': 'init-volume-secret'}}, 'securityContext': {}, 'tolerations': [], 'volumes': [{'name': 'foo'}, {'name': 'bar', 'secret': {'secretName': 'volume-secret'}}, {'name': 'secretvolcf4a56d2-8101-4217-b027-2af6216feb48', 'secret': {'secretName': 'init-volume-secret'}}, {'name': 'secretvol' + str(static_uuid), 'secret': {'secretName': 'volume-secret'}} ]}} self.maxDiff = None self.assertEqual(expected, result)
def test_create_with_affinity(self): name_base = 'test' affinity = { 'nodeAffinity': { 'preferredDuringSchedulingIgnoredDuringExecution': [{ "weight": 1, "preference": { "matchExpressions": [{ "key": "disktype", "operator": "In", "values": ["ssd"] }] }, }] } } k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name=name_base, task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', affinity=affinity, ) result = k.create_pod_request_obj() client = ApiClient() assert isinstance(result.spec.affinity, k8s.V1Affinity) assert client.sanitize_for_serialization( result)['spec']['affinity'] == affinity k8s_api_affinity = k8s.V1Affinity(node_affinity=k8s.V1NodeAffinity( preferred_during_scheduling_ignored_during_execution=[ k8s.V1PreferredSchedulingTerm( weight=1, preference=k8s.V1NodeSelectorTerm(match_expressions=[ k8s.V1NodeSelectorRequirement( key="disktype", operator="In", values=["ssd"]) ]), ) ]), ) k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name=name_base, task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', affinity=k8s_api_affinity, ) result = k.create_pod_request_obj() assert isinstance(result.spec.affinity, k8s.V1Affinity) assert client.sanitize_for_serialization( result)['spec']['affinity'] == affinity
def test_attach_to_pod(self, mock_uuid): static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48') mock_uuid.return_value = static_uuid path = sys.path[0] + '/tests/kubernetes/pod_generator_base.yaml' pod = PodGenerator(pod_template_file=path).gen_pod() secrets = [ # This should be a secretRef Secret('env', None, 'secret_a'), # This should be a single secret mounted in volumeMounts Secret('volume', '/etc/foo', 'secret_b'), # This should produce a single secret mounted in env Secret('env', 'TARGET', 'secret_b', 'source_b'), ] k8s_client = ApiClient() pod = append_to_pod(pod, secrets) result = k8s_client.sanitize_for_serialization(pod) self.assertEqual( result, { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'labels': { 'app': 'myapp' }, 'name': 'myapp-pod-cf4a56d281014217b0272af6216feb48', 'namespace': 'default' }, 'spec': { 'containers': [{ 'command': ['sh', '-c', 'echo Hello Kubernetes!'], 'env': [{ 'name': 'ENVIRONMENT', 'value': 'prod' }, { 'name': 'LOG_LEVEL', 'value': 'warning' }, { 'name': 'TARGET', 'valueFrom': { 'secretKeyRef': { 'key': 'source_b', 'name': 'secret_b' } } }], 'envFrom': [{ 'configMapRef': { 'name': 'configmap_a' } }, { 'secretRef': { 'name': 'secret_a' } }], 'image': 'busybox', 'name': 'base', 'ports': [{ 'containerPort': 1234, 'name': 'foo' }], 'resources': { 'limits': { 'memory': '200Mi' }, 'requests': { 'memory': '100Mi' } }, 'volumeMounts': [{ 'mountPath': '/airflow/xcom', 'name': 'xcom' }, { 'mountPath': '/etc/foo', 'name': 'secretvol' + str(static_uuid), 'readOnly': True }] }, { 'command': [ 'sh', '-c', 'trap "exit 0" INT; while true; do sleep ' '30; done;' ], 'image': 'alpine', 'name': 'airflow-xcom-sidecar', 'resources': { 'requests': { 'cpu': '1m' } }, 'volumeMounts': [{ 'mountPath': '/airflow/xcom', 'name': 'xcom' }] }], 'hostNetwork': True, 'imagePullSecrets': [{ 'name': 'pull_secret_a' }, { 'name': 'pull_secret_b' }], 'securityContext': { 'fsGroup': 2000, 'runAsUser': 1000 }, 'volumes': [{ 'emptyDir': {}, 'name': 'xcom' }, { 'name': 'secretvol' + str(static_uuid), 'secret': { 'secretName': 'secret_b' } }] } })
class TestPodGenerator(unittest.TestCase): def setUp(self): self.envs = {'ENVIRONMENT': 'prod', 'LOG_LEVEL': 'warning'} self.secrets = [ # This should be a secretRef Secret('env', None, 'secret_a'), # This should be a single secret mounted in volumeMounts Secret('volume', '/etc/foo', 'secret_b'), # This should produce a single secret mounted in env Secret('env', 'TARGET', 'secret_b', 'source_b'), ] self.resources = Resources('1Gi', 1, '2Gi', 2, 1) self.k8s_client = ApiClient() self.expected = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'myapp-pod-0', 'labels': { 'app': 'myapp' }, 'namespace': 'default' }, 'spec': { 'containers': [{ 'name': 'base', 'image': 'busybox', 'args': [], 'command': ['sh', '-c', 'echo Hello Kubernetes!'], 'imagePullPolicy': 'IfNotPresent', 'env': [{ 'name': 'ENVIRONMENT', 'value': 'prod' }, { 'name': 'LOG_LEVEL', 'value': 'warning' }, { 'name': 'TARGET', 'valueFrom': { 'secretKeyRef': { 'name': 'secret_b', 'key': 'source_b' } } }], 'envFrom': [{ 'configMapRef': { 'name': 'configmap_a' } }, { 'configMapRef': { 'name': 'configmap_b' } }, { 'secretRef': { 'name': 'secret_a' } }], 'resources': { 'requests': { 'memory': '1Gi', 'cpu': 1 }, 'limits': { 'memory': '2Gi', 'cpu': 2, 'nvidia.com/gpu': 1 }, }, 'ports': [{ 'name': 'foo', 'containerPort': 1234 }], 'volumeMounts': [{ 'mountPath': '/etc/foo', 'name': 'secretvol0', 'readOnly': True }] }], 'restartPolicy': 'Never', 'volumes': [{ 'name': 'secretvol0', 'secret': { 'secretName': 'secret_b' } }], 'hostNetwork': False, 'imagePullSecrets': [{ 'name': 'pull_secret_a' }, { 'name': 'pull_secret_b' }], 'securityContext': { 'runAsUser': 1000, 'fsGroup': 2000, }, } } @mock.patch('uuid.uuid4') def test_gen_pod(self, mock_uuid): mock_uuid.return_value = '0' pod_generator = PodGenerator( labels={'app': 'myapp'}, name='myapp-pod', image_pull_secrets='pull_secret_a,pull_secret_b', image='busybox', envs=self.envs, cmds=['sh', '-c', 'echo Hello Kubernetes!'], security_context=k8s.V1PodSecurityContext( run_as_user=1000, fs_group=2000, ), namespace='default', ports=[k8s.V1ContainerPort(name='foo', container_port=1234)], configmaps=['configmap_a', 'configmap_b']) result = pod_generator.gen_pod() result = append_to_pod(result, self.secrets) result = self.resources.attach_to_pod(result) result_dict = self.k8s_client.sanitize_for_serialization(result) # sort result_dict['spec']['containers'][0]['env'].sort( key=lambda x: x['name']) result_dict['spec']['containers'][0]['envFrom'].sort( key=lambda x: list(x.values())[0]['name']) self.assertDictEqual(result_dict, self.expected) @mock.patch('uuid.uuid4') def test_gen_pod_extract_xcom(self, mock_uuid): mock_uuid.return_value = '0' pod_generator = PodGenerator( labels={'app': 'myapp'}, name='myapp-pod', image_pull_secrets='pull_secret_a,pull_secret_b', image='busybox', envs=self.envs, cmds=['sh', '-c', 'echo Hello Kubernetes!'], namespace='default', security_context=k8s.V1PodSecurityContext( run_as_user=1000, fs_group=2000, ), ports=[k8s.V1ContainerPort(name='foo', container_port=1234)], configmaps=['configmap_a', 'configmap_b']) pod_generator.extract_xcom = True result = pod_generator.gen_pod() result = append_to_pod(result, self.secrets) result = self.resources.attach_to_pod(result) result_dict = self.k8s_client.sanitize_for_serialization(result) container_two = { 'name': 'airflow-xcom-sidecar', 'image': "alpine", 'command': ['sh', '-c', PodDefaults.XCOM_CMD], 'volumeMounts': [{ 'name': 'xcom', 'mountPath': '/airflow/xcom' }] } self.expected['spec']['containers'].append(container_two) self.expected['spec']['containers'][0]['volumeMounts'].insert( 0, { 'name': 'xcom', 'mountPath': '/airflow/xcom' }) self.expected['spec']['volumes'].insert(0, { 'name': 'xcom', 'emptyDir': {} }) result_dict['spec']['containers'][0]['env'].sort( key=lambda x: x['name']) self.assertEqual(result_dict, self.expected) @mock.patch('uuid.uuid4') def test_from_obj(self, mock_uuid): mock_uuid.return_value = '0' result = PodGenerator.from_obj({ "KubernetesExecutor": { "annotations": { "test": "annotation" }, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": { "path": "/tmp/" }, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], "securityContext": { "runAsUser": 1000 } } }) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': { 'test': 'annotation' }, }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'volumeMounts': [{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume' }], }], 'imagePullSecrets': [], 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume' }], } }, result) def test_reconcile_pods(self): with mock.patch('uuid.uuid4') as mock_uuid: mock_uuid.return_value = '0' base_pod = PodGenerator( image='image1', name='name1', envs={ 'key1': 'val1' }, cmds=['/bin/command1.sh', 'arg1'], ports=k8s.V1ContainerPort(name='port', container_port=2118), volumes=[{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume1' }], volume_mounts=[{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume1' }], ).gen_pod() mutator_pod = PodGenerator(envs={ 'key2': 'val2' }, image='', name='name2', cmds=['/bin/command2.sh', 'arg2'], volumes=[{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume2' }], volume_mounts=[{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume2' }]).gen_pod() result = PodGenerator.reconcile_pods(base_pod, mutator_pod) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( result, { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'name2-0' }, 'spec': { 'containers': [{ 'args': [], 'command': ['/bin/command1.sh', 'arg1'], 'env': [{ 'name': 'key1', 'value': 'val1' }, { 'name': 'key2', 'value': 'val2' }], 'envFrom': [], 'image': 'image1', 'imagePullPolicy': 'IfNotPresent', 'name': 'base', 'ports': { 'containerPort': 2118, 'name': 'port', }, 'volumeMounts': [ { 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume1' }, { 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume2' } ] }], 'hostNetwork': False, 'imagePullSecrets': [], 'restartPolicy': 'Never', 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume1' }, { 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume2' }] } })
class TestPodGenerator(unittest.TestCase): def setUp(self): self.static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48') self.deserialize_result = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'memory-demo', 'namespace': 'mem-example' }, 'spec': { 'containers': [{ 'args': ['--vm', '1', '--vm-bytes', '150M', '--vm-hang', '1'], 'command': ['stress'], 'image': 'polinux/stress', 'name': 'memory-demo-ctr', 'resources': { 'limits': { 'memory': '200Mi' }, 'requests': { 'memory': '100Mi' } } }] } } self.envs = {'ENVIRONMENT': 'prod', 'LOG_LEVEL': 'warning'} self.secrets = [ # This should be a secretRef Secret('env', None, 'secret_a'), # This should be a single secret mounted in volumeMounts Secret('volume', '/etc/foo', 'secret_b'), # This should produce a single secret mounted in env Secret('env', 'TARGET', 'secret_b', 'source_b'), ] self.labels = { 'airflow-worker': 'uuid', 'dag_id': 'dag_id', 'execution_date': 'date', 'task_id': 'task_id', 'try_number': '3', 'airflow_version': mock.ANY, 'kubernetes_executor': 'True' } self.metadata = { 'labels': self.labels, 'name': 'pod_id-' + self.static_uuid.hex, 'namespace': 'namespace' } self.resources = Resources('1Gi', 1, '2Gi', '2Gi', 2, 1, '4Gi') self.k8s_client = ApiClient() self.expected = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'myapp-pod-' + self.static_uuid.hex, 'labels': { 'app': 'myapp' }, 'namespace': 'default' }, 'spec': { 'containers': [{ 'name': 'base', 'image': 'busybox', 'args': [], 'command': ['sh', '-c', 'echo Hello Kubernetes!'], 'env': [{ 'name': 'ENVIRONMENT', 'value': 'prod' }, { 'name': 'LOG_LEVEL', 'value': 'warning' }, { 'name': 'TARGET', 'valueFrom': { 'secretKeyRef': { 'name': 'secret_b', 'key': 'source_b' } } }], 'envFrom': [{ 'configMapRef': { 'name': 'configmap_a' } }, { 'configMapRef': { 'name': 'configmap_b' } }, { 'secretRef': { 'name': 'secret_a' } }], 'resources': { 'requests': { 'memory': '1Gi', 'cpu': 1, 'ephemeral-storage': '2Gi' }, 'limits': { 'memory': '2Gi', 'cpu': 2, 'nvidia.com/gpu': 1, 'ephemeral-storage': '4Gi' }, }, 'ports': [{ 'name': 'foo', 'containerPort': 1234 }], 'volumeMounts': [{ 'mountPath': '/etc/foo', 'name': 'secretvol' + str(self.static_uuid), 'readOnly': True }] }], 'volumes': [{ 'name': 'secretvol' + str(self.static_uuid), 'secret': { 'secretName': 'secret_b' } }], 'hostNetwork': False, 'imagePullSecrets': [{ 'name': 'pull_secret_a' }, { 'name': 'pull_secret_b' }], 'securityContext': { 'runAsUser': 1000, 'fsGroup': 2000, }, } } @mock.patch('uuid.uuid4') def test_gen_pod(self, mock_uuid): mock_uuid.return_value = self.static_uuid pod_generator = PodGenerator( labels={'app': 'myapp'}, name='myapp-pod', image_pull_secrets='pull_secret_a,pull_secret_b', image='busybox', envs=self.envs, cmds=['sh', '-c', 'echo Hello Kubernetes!'], security_context=k8s.V1PodSecurityContext( run_as_user=1000, fs_group=2000, ), namespace='default', ports=[k8s.V1ContainerPort(name='foo', container_port=1234)], configmaps=['configmap_a', 'configmap_b']) result = pod_generator.gen_pod() result = append_to_pod(result, self.secrets) result = self.resources.attach_to_pod(result) result_dict = self.k8s_client.sanitize_for_serialization(result) # sort result_dict['spec']['containers'][0]['env'].sort( key=lambda x: x['name']) result_dict['spec']['containers'][0]['envFrom'].sort( key=lambda x: list(x.values())[0]['name']) self.assertDictEqual(self.expected, result_dict) @mock.patch('uuid.uuid4') def test_gen_pod_extract_xcom(self, mock_uuid): mock_uuid.return_value = self.static_uuid pod_generator = PodGenerator( labels={'app': 'myapp'}, name='myapp-pod', image_pull_secrets='pull_secret_a,pull_secret_b', image='busybox', envs=self.envs, cmds=['sh', '-c', 'echo Hello Kubernetes!'], namespace='default', security_context=k8s.V1PodSecurityContext( run_as_user=1000, fs_group=2000, ), ports=[k8s.V1ContainerPort(name='foo', container_port=1234)], configmaps=['configmap_a', 'configmap_b'], extract_xcom=True) result = pod_generator.gen_pod() result = append_to_pod(result, self.secrets) result = self.resources.attach_to_pod(result) result_dict = self.k8s_client.sanitize_for_serialization(result) container_two = { 'name': 'airflow-xcom-sidecar', 'image': "alpine", 'command': ['sh', '-c', PodDefaults.XCOM_CMD], 'volumeMounts': [{ 'name': 'xcom', 'mountPath': '/airflow/xcom' }], 'resources': { 'requests': { 'cpu': '1m' } }, } self.expected['spec']['containers'].append(container_two) self.expected['spec']['containers'][0]['volumeMounts'].insert( 0, { 'name': 'xcom', 'mountPath': '/airflow/xcom' }) self.expected['spec']['volumes'].insert(0, { 'name': 'xcom', 'emptyDir': {} }) result_dict['spec']['containers'][0]['env'].sort( key=lambda x: x['name']) self.assertEqual(result_dict, self.expected) @mock.patch('uuid.uuid4') def test_from_obj(self, mock_uuid): mock_uuid.return_value = self.static_uuid result = PodGenerator.from_obj({ "KubernetesExecutor": { "annotations": { "test": "annotation" }, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": { "path": "/tmp/" }, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], "resources": { "requests": { "memory": "256Mi", "cpu": "500m", "ephemeral-storage": "2G", "nvidia.com/gpu": "0" }, "limits": { "memory": "512Mi", "cpu": "1000m", "ephemeral-storage": "2G", "nvidia.com/gpu": "0" } } } }) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': { 'test': 'annotation' }, }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'volumeMounts': [{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume' }], "resources": { "requests": { "memory": "256Mi", "cpu": "500m", "ephemeral-storage": "2G", "nvidia.com/gpu": "0" }, "limits": { "memory": "512Mi", "cpu": "1000m", "ephemeral-storage": "2G", "nvidia.com/gpu": "0" } } }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume' }], } }, result) @mock.patch('uuid.uuid4') def test_from_obj_with_resources_object(self, mock_uuid): mock_uuid.return_value = self.static_uuid result = PodGenerator.from_obj({ "KubernetesExecutor": { "annotations": { "test": "annotation" }, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": { "path": "/tmp/" }, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], "resources": { "requests": { "memory": "256Mi", "cpu": "500m", "ephemeral-storage": "2G", "nvidia.com/gpu": "0" }, "limits": { "memory": "512Mi", "cpu": "1000m", "ephemeral-storage": "2G", "nvidia.com/gpu": "0" } } } }) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': { 'test': 'annotation' }, }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'volumeMounts': [{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume' }], 'resources': { 'limits': { 'cpu': '1000m', 'ephemeral-storage': '2G', 'memory': '512Mi', 'nvidia.com/gpu': '0' }, 'requests': { 'cpu': '500m', 'ephemeral-storage': '2G', 'memory': '256Mi', 'nvidia.com/gpu': '0' } }, }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume' }], } }, result) @mock.patch('uuid.uuid4') def test_from_obj_with_resources(self, mock_uuid): self.maxDiff = None mock_uuid.return_value = self.static_uuid result = PodGenerator.from_obj({ "KubernetesExecutor": { "annotations": { "test": "annotation" }, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": { "path": "/tmp/" }, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], 'request_cpu': "200m", 'limit_cpu': "400m", 'request_memory': "500Mi", 'limit_memory': "1000Mi", 'limit_gpu': "2", 'request_ephemeral_storage': '2Gi', 'limit_ephemeral_storage': '4Gi', } }) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': { 'test': 'annotation' }, }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '400m', 'ephemeral-storage': '4Gi', 'memory': '1000Mi', 'nvidia.com/gpu': "2", }, 'requests': { 'cpu': '200m', 'ephemeral-storage': '2Gi', 'memory': '500Mi', }, }, 'volumeMounts': [{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume' }], }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume' }], } }, result) @mock.patch('uuid.uuid4') def test_from_obj_with_only_request_resources(self, mock_uuid): self.maxDiff = None mock_uuid.return_value = self.static_uuid result = PodGenerator.from_obj({ "KubernetesExecutor": { "annotations": { "test": "annotation" }, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": { "path": "/tmp/" }, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], 'request_cpu': "200m", 'request_memory': "500Mi", } }) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': { 'test': 'annotation' }, }, 'spec': { 'containers': [{ 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'resources': { 'requests': { 'cpu': '200m', 'memory': '500Mi', }, }, 'volumeMounts': [{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume' }], }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume' }], } }, result) @mock.patch('uuid.uuid4') def test_reconcile_pods_empty_mutator_pod(self, mock_uuid): mock_uuid.return_value = self.static_uuid base_pod = PodGenerator( image='image1', name='name1', envs={ 'key1': 'val1' }, cmds=['/bin/command1.sh', 'arg1'], ports=[k8s.V1ContainerPort(name='port', container_port=2118)], volumes=[{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume1' }], volume_mounts=[{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume1' }], ).gen_pod() mutator_pod = None name = 'name1-' + self.static_uuid.hex base_pod.metadata.name = name result = PodGenerator.reconcile_pods(base_pod, mutator_pod) self.assertEqual(base_pod, result) mutator_pod = k8s.V1Pod() result = PodGenerator.reconcile_pods(base_pod, mutator_pod) self.assertEqual(base_pod, result) @mock.patch('uuid.uuid4') def test_reconcile_pods(self, mock_uuid): mock_uuid.return_value = self.static_uuid base_pod = PodGenerator( image='image1', name='name1', envs={ 'key1': 'val1' }, cmds=['/bin/command1.sh', 'arg1'], ports=[k8s.V1ContainerPort(name='port', container_port=2118)], volumes=[{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume1' }], volume_mounts=[{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume1' }], ).gen_pod() mutator_pod = PodGenerator(envs={ 'key2': 'val2' }, image='', name='name2', cmds=['/bin/command2.sh', 'arg2'], volumes=[{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume2' }], volume_mounts=[{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume2' }]).gen_pod() result = PodGenerator.reconcile_pods(base_pod, mutator_pod) result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'name2-' + self.static_uuid.hex }, 'spec': { 'containers': [{ 'args': [], 'command': ['/bin/command2.sh', 'arg2'], 'env': [{ 'name': 'key1', 'value': 'val1' }, { 'name': 'key2', 'value': 'val2' }], 'envFrom': [], 'image': 'image1', 'name': 'base', 'ports': [{ 'containerPort': 2118, 'name': 'port', }], 'volumeMounts': [{ 'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume2' }] }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume1' }, { 'hostPath': { 'path': '/tmp/' }, 'name': 'example-kubernetes-test-volume2' }] } }, result) @mock.patch('uuid.uuid4') def test_construct_pod_empty_worker_config(self, mock_uuid): mock_uuid.return_value = self.static_uuid executor_config = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='', resources=k8s.V1ResourceRequirements(limits={ 'cpu': '1m', 'memory': '1G' })) ])) worker_config = k8s.V1Pod() result = PodGenerator.construct_pod( 'dag_id', 'task_id', 'pod_id', 3, 'date', ['command'], executor_config, worker_config, 'namespace', 'uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': self.metadata, 'spec': { 'containers': [{ 'args': [], 'command': ['command'], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '1m', 'memory': '1G' } }, 'volumeMounts': [] }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [] } }, sanitized_result) @mock.patch('uuid.uuid4') def test_construct_pod_empty_executor_config(self, mock_uuid): mock_uuid.return_value = self.static_uuid worker_config = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='', resources=k8s.V1ResourceRequirements(limits={ 'cpu': '1m', 'memory': '1G' })) ])) executor_config = None result = PodGenerator.construct_pod( 'dag_id', 'task_id', 'pod_id', 3, 'date', ['command'], executor_config, worker_config, 'namespace', 'uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': self.metadata, 'spec': { 'containers': [{ 'args': [], 'command': ['command'], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '1m', 'memory': '1G' } }, 'volumeMounts': [] }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [] } }, sanitized_result) @mock.patch('uuid.uuid4') def test_construct_pod(self, mock_uuid): mock_uuid.return_value = self.static_uuid worker_config = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name='gets-overridden-by-dynamic-args', annotations={'should': 'stay'}), spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='doesnt-override', resources=k8s.V1ResourceRequirements( limits={ 'cpu': '1m', 'memory': '1G' }), security_context=k8s.V1SecurityContext( run_as_user=1)) ])) executor_config = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='doesnt-override-either', resources=k8s.V1ResourceRequirements(limits={ 'cpu': '2m', 'memory': '2G' })) ])) result = PodGenerator.construct_pod( 'dag_id', 'task_id', 'pod_id', 3, 'date', ['command'], executor_config, worker_config, 'namespace', 'uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) self.metadata.update({'annotations': {'should': 'stay'}}) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': self.metadata, 'spec': { 'containers': [{ 'args': [], 'command': ['command'], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '2m', 'memory': '2G' } }, 'volumeMounts': [], 'securityContext': { 'runAsUser': 1 } }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [] } }, sanitized_result) @mock.patch('uuid.uuid4') def test_construct_pod_with_mutation(self, mock_uuid): mock_uuid.return_value = self.static_uuid worker_config = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name='gets-overridden-by-dynamic-args', annotations={'should': 'stay'}), spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='doesnt-override', resources=k8s.V1ResourceRequirements( limits={ 'cpu': '1m', 'memory': '1G' }), security_context=k8s.V1SecurityContext( run_as_user=1)) ])) executor_config = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='doesnt-override-either', resources=k8s.V1ResourceRequirements(limits={ 'cpu': '2m', 'memory': '2G' })) ])) result = PodGenerator.construct_pod( 'dag_id', 'task_id', 'pod_id', 3, 'date', ['command'], executor_config, worker_config, 'namespace', 'uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) self.metadata.update({'annotations': {'should': 'stay'}}) self.assertEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': self.metadata, 'spec': { 'containers': [{ 'args': [], 'command': ['command'], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '2m', 'memory': '2G' } }, 'volumeMounts': [], 'securityContext': { 'runAsUser': 1 } }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [] } }, sanitized_result) def test_merge_objects_empty(self): annotations = {'foo1': 'bar1'} base_obj = k8s.V1ObjectMeta(annotations=annotations) client_obj = None res = merge_objects(base_obj, client_obj) self.assertEqual(base_obj, res) client_obj = k8s.V1ObjectMeta() res = merge_objects(base_obj, client_obj) self.assertEqual(base_obj, res) client_obj = k8s.V1ObjectMeta(annotations=annotations) base_obj = None res = merge_objects(base_obj, client_obj) self.assertEqual(client_obj, res) base_obj = k8s.V1ObjectMeta() res = merge_objects(base_obj, client_obj) self.assertEqual(client_obj, res) def test_merge_objects(self): base_annotations = {'foo1': 'bar1'} base_labels = {'foo1': 'bar1'} client_annotations = {'foo2': 'bar2'} base_obj = k8s.V1ObjectMeta(annotations=base_annotations, labels=base_labels) client_obj = k8s.V1ObjectMeta(annotations=client_annotations) res = merge_objects(base_obj, client_obj) client_obj.labels = base_labels self.assertEqual(client_obj, res) def test_extend_object_field_empty(self): ports = [k8s.V1ContainerPort(container_port=1, name='port')] base_obj = k8s.V1Container(name='base_container', ports=ports) client_obj = k8s.V1Container(name='client_container') res = extend_object_field(base_obj, client_obj, 'ports') client_obj.ports = ports self.assertEqual(client_obj, res) base_obj = k8s.V1Container(name='base_container') client_obj = k8s.V1Container(name='base_container', ports=ports) res = extend_object_field(base_obj, client_obj, 'ports') self.assertEqual(client_obj, res) def test_extend_object_field_not_list(self): base_obj = k8s.V1Container(name='base_container', image='image') client_obj = k8s.V1Container(name='client_container') with self.assertRaises(ValueError): extend_object_field(base_obj, client_obj, 'image') base_obj = k8s.V1Container(name='base_container') client_obj = k8s.V1Container(name='client_container', image='image') with self.assertRaises(ValueError): extend_object_field(base_obj, client_obj, 'image') def test_extend_object_field(self): base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_obj = k8s.V1Container(name='base_container', ports=base_ports) client_ports = [ k8s.V1ContainerPort(container_port=1, name='client_port') ] client_obj = k8s.V1Container(name='client_container', ports=client_ports) res = extend_object_field(base_obj, client_obj, 'ports') client_obj.ports = base_ports + client_ports self.assertEqual(client_obj, res) def test_reconcile_containers_empty(self): base_objs = [k8s.V1Container(name='base_container')] client_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) self.assertEqual(base_objs, res) client_objs = [k8s.V1Container(name='client_container')] base_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) self.assertEqual(client_objs, res) res = PodGenerator.reconcile_containers([], []) self.assertEqual(res, []) def test_reconcile_containers(self): base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_objs = [ k8s.V1Container(name='base_container1', ports=base_ports), k8s.V1Container(name='base_container2', image='base_image'), ] client_ports = [ k8s.V1ContainerPort(container_port=2, name='client_port') ] client_objs = [ k8s.V1Container(name='client_container1', ports=client_ports), k8s.V1Container(name='client_container2', image='client_image'), ] res = PodGenerator.reconcile_containers(base_objs, client_objs) client_objs[0].ports = base_ports + client_ports self.assertEqual(client_objs, res) base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_objs = [ k8s.V1Container(name='base_container1', ports=base_ports), k8s.V1Container(name='base_container2', image='base_image'), ] client_ports = [ k8s.V1ContainerPort(container_port=2, name='client_port') ] client_objs = [ k8s.V1Container(name='client_container1', ports=client_ports), k8s.V1Container(name='client_container2', stdin=True), ] res = PodGenerator.reconcile_containers(base_objs, client_objs) client_objs[0].ports = base_ports + client_ports client_objs[1].image = 'base_image' self.assertEqual(client_objs, res) def test_reconcile_specs_empty(self): base_spec = k8s.V1PodSpec(containers=[]) client_spec = None res = PodGenerator.reconcile_specs(base_spec, client_spec) self.assertEqual(base_spec, res) base_spec = None client_spec = k8s.V1PodSpec(containers=[]) res = PodGenerator.reconcile_specs(base_spec, client_spec) self.assertEqual(client_spec, res) def test_reconcile_specs(self): base_objs = [ k8s.V1Container(name='base_container1', image='base_image') ] client_objs = [k8s.V1Container(name='client_container1')] base_spec = k8s.V1PodSpec(priority=1, active_deadline_seconds=100, containers=base_objs) client_spec = k8s.V1PodSpec(priority=2, hostname='local', containers=client_objs) res = PodGenerator.reconcile_specs(base_spec, client_spec) client_spec.containers = [ k8s.V1Container(name='client_container1', image='base_image') ] client_spec.active_deadline_seconds = 100 self.assertEqual(client_spec, res) def test_deserialize_model_file(self): fixture = 'tests/kubernetes/pod.yaml' result = PodGenerator.deserialize_model_file(fixture) sanitized_res = self.k8s_client.sanitize_for_serialization(result) self.assertEqual(sanitized_res, self.deserialize_result) def test_deserialize_model_string(self): fixture = """ apiVersion: v1 kind: Pod metadata: name: memory-demo namespace: mem-example spec: containers: - name: memory-demo-ctr image: polinux/stress resources: limits: memory: "200Mi" requests: memory: "100Mi" command: ["stress"] args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"] """ result = PodGenerator.deserialize_model_file(fixture) sanitized_res = self.k8s_client.sanitize_for_serialization(result) self.assertEqual(sanitized_res, self.deserialize_result) def test_validate_pod_generator(self): with self.assertRaises(AirflowConfigException): PodGenerator(image='k', pod=k8s.V1Pod()) with self.assertRaises(AirflowConfigException): PodGenerator(pod=k8s.V1Pod(), pod_template_file='k') with self.assertRaises(AirflowConfigException): PodGenerator(image='k', pod_template_file='k') PodGenerator(image='k') PodGenerator(pod_template_file='tests/kubernetes/pod.yaml') PodGenerator(pod=k8s.V1Pod()) def test_add_custom_label(self): from kubernetes.client import models as k8s pod = PodGenerator.construct_pod( namespace="test", worker_uuid="test", pod_id="test", dag_id="test", task_id="test", try_number=1, date="23-07-2020", command="test", kube_executor_config=None, worker_config=k8s.V1Pod(metadata=k8s.V1ObjectMeta( labels={"airflow-test": "airflow-task-pod"}, annotations={"my.annotation": "foo"}))) self.assertIn("airflow-test", pod.metadata.labels) self.assertIn("my.annotation", pod.metadata.annotations)
def test_node_selector(self): k8s_api_node_selector = k8s.V1NodeSelector( node_selector_terms=[ k8s.V1NodeSelectorTerm( match_expressions=[ k8s.V1NodeSelectorRequirement(key="disktype", operator="In", values=["ssd"]) ] ) ] ) node_selector = { 'nodeSelectorTerms': [ {'matchExpressions': [{'key': 'disktype', 'operator': 'In', 'values': ['ssd']}]} ] } k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', node_selector=k8s_api_node_selector, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.node_selector), k8s.V1NodeSelector) self.assertEqual(client.sanitize_for_serialization(result)['spec']['nodeSelector'], node_selector) k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', node_selector=k8s_api_node_selector, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.node_selector), k8s.V1NodeSelector) self.assertEqual(client.sanitize_for_serialization(result)['spec']['nodeSelector'], node_selector) # repeat tests using deprecated parameter k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', node_selectors=node_selector, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.node_selector), k8s.V1NodeSelector) self.assertEqual(client.sanitize_for_serialization(result)['spec']['nodeSelector'], node_selector) k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="name", task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', node_selectors=node_selector, ) result = k.create_pod_request_obj() client = ApiClient() self.assertEqual(type(result.spec.node_selector), k8s.V1NodeSelector) self.assertEqual(client.sanitize_for_serialization(result)['spec']['nodeSelector'], node_selector)