def execute(self, context): try: client = kube_client.get_kube_client( in_cluster=self.in_cluster, cluster_context=self.cluster_context, config_file=self.config_file) pod = pod_generator.PodGenerator( image=self.image, namespace=self.namespace, cmds=self.cmds, args=self.arguments, labels=self.labels, name=self.name, envs=self.env_vars, extract_xcom=self.do_xcom_push, image_pull_policy=self.image_pull_policy, node_selectors=self.node_selectors, annotations=self.annotations, affinity=self.affinity, image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, hostnetwork=self.hostnetwork, tolerations=self.tolerations, configmaps=self.configmaps, security_context=self.security_context, dnspolicy=self.dnspolicy, resources=self.resources, pod=self.full_pod_spec, ).gen_pod() pod = append_to_pod(pod, self.ports) pod = append_to_pod(pod, self.pod_runtime_info_envs) pod = append_to_pod(pod, self.volumes) pod = append_to_pod(pod, self.volume_mounts) pod = append_to_pod(pod, self.secrets) self.pod = pod launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.do_xcom_push) try: (final_state, result) = launcher.run_pod( pod, startup_timeout=self.startup_timeout_seconds, get_logs=self.get_logs) finally: if self.is_delete_operator_pod: launcher.delete_pod(pod) if final_state != State.SUCCESS: raise AirflowException( 'Pod returned a failure: {state}'.format( state=final_state)) return result except AirflowException as ex: raise AirflowException( 'Pod Launching failed: {error}'.format(error=ex))
def execute(self, context): try: client = kube_client.get_kube_client( in_cluster=self.in_cluster, cluster_context=self.cluster_context, config_file=self.config_file) gen = pod_generator.PodGenerator() for port in self.ports: gen.add_port(port) for mount in self.volume_mounts: gen.add_mount(mount) for volume in self.volumes: gen.add_volume(volume) pod = gen.make_pod( namespace=self.namespace, image=self.image, pod_id=self.name, cmds=self.cmds, arguments=self.arguments, labels=self.labels, ) pod.service_account_name = self.service_account_name pod.secrets = self.secrets pod.envs = self.env_vars pod.image_pull_policy = self.image_pull_policy pod.image_pull_secrets = self.image_pull_secrets pod.annotations = self.annotations pod.resources = self.resources pod.affinity = self.affinity pod.node_selectors = self.node_selectors pod.hostnetwork = self.hostnetwork pod.tolerations = self.tolerations pod.configmaps = self.configmaps pod.security_context = self.security_context pod.pod_runtime_info_envs = self.pod_runtime_info_envs pod.dnspolicy = self.dnspolicy launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.do_xcom_push) try: (final_state, result) = launcher.run_pod( pod, startup_timeout=self.startup_timeout_seconds, get_logs=self.get_logs) finally: if self.is_delete_operator_pod: launcher.delete_pod(pod) if final_state != State.SUCCESS: raise AirflowException( 'Pod returned a failure: {state}'.format( state=final_state)) return result except AirflowException as ex: raise AirflowException( 'Pod Launching failed: {error}'.format(error=ex))
def create_pod_request_obj(self) -> k8s.V1Pod: """ Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file` will supersede all other values. """ pod = pod_generator.PodGenerator( image=self.image, namespace=self.namespace, cmds=self.cmds, args=self.arguments, labels=self.labels, name=self.name, envs=self.env_vars, extract_xcom=self.do_xcom_push, image_pull_policy=self.image_pull_policy, node_selectors=self.node_selectors, annotations=self.annotations, affinity=self.affinity, image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, hostnetwork=self.hostnetwork, tolerations=self.tolerations, configmaps=self.configmaps, security_context=self.security_context, dnspolicy=self.dnspolicy, schedulername=self.schedulername, init_containers=self.init_containers, restart_policy='Never', priority_class_name=self.priority_class_name, pod_template_file=self.pod_template_file, pod=self.full_pod_spec, ).gen_pod() pod = append_to_pod( pod, self.pod_runtime_info_envs + self.ports + # type: ignore self.resources + self.secrets + # type: ignore self.volumes + # type: ignore self.volume_mounts # type: ignore ) return pod
def create_new_pod_for_operator( self, labels, launcher) -> Tuple[State, k8s.V1Pod, Optional[str]]: """ Creates a new pod and monitors for duration of task :param labels: labels used to track pod :param launcher: pod launcher that will manage launching and monitoring pods :return: """ if not (self.full_pod_spec or self.pod_template_file): # Add Airflow Version to the label # And a label to identify that pod is launched by KubernetesPodOperator self.labels.update({ 'airflow_version': airflow_version.replace('+', '-'), 'kubernetes_pod_operator': 'True', }) self.labels.update(labels) pod = pod_generator.PodGenerator( image=self.image, namespace=self.namespace, cmds=self.cmds, args=self.arguments, labels=self.labels, name=self.name, envs=self.env_vars, extract_xcom=self.do_xcom_push, image_pull_policy=self.image_pull_policy, node_selectors=self.node_selectors, annotations=self.annotations, affinity=self.affinity, image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, hostnetwork=self.hostnetwork, tolerations=self.tolerations, configmaps=self.configmaps, security_context=self.security_context, dnspolicy=self.dnspolicy, schedulername=self.schedulername, init_containers=self.init_containers, restart_policy='Never', priority_class_name=self.priority_class_name, pod_template_file=self.pod_template_file, pod=self.full_pod_spec, ).gen_pod() # noinspection PyTypeChecker pod = append_to_pod( pod, self.pod_runtime_info_envs + self.ports + # type: ignore self.resources + self.secrets + # type: ignore self.volumes + # type: ignore self.volume_mounts # type: ignore ) self.pod = pod self.log.debug("Starting pod:\n%s", yaml.safe_dump(pod.to_dict())) try: launcher.start_pod(pod, startup_timeout=self.startup_timeout_seconds) final_state, result = launcher.monitor_pod(pod=pod, get_logs=self.get_logs) except AirflowException: if self.log_events_on_failure: for event in launcher.read_pod_events(pod).items: self.log.error("Pod Event: %s - %s", event.reason, event.message) raise finally: if self.is_delete_operator_pod: launcher.delete_pod(pod) return final_state, pod, result
def execute(self, context): try: if self.in_cluster is not None: client = kube_client.get_kube_client( in_cluster=self.in_cluster, cluster_context=self.cluster_context, config_file=self.config_file) else: client = kube_client.get_kube_client( cluster_context=self.cluster_context, config_file=self.config_file) if not (self.full_pod_spec or self.pod_template_file): # Add Airflow Version to the label # And a label to identify that pod is launched by KubernetesPodOperator self.labels.update({ 'airflow_version': airflow_version.replace('+', '-'), 'kubernetes_pod_operator': 'True', }) pod = pod_generator.PodGenerator( image=self.image, namespace=self.namespace, cmds=self.cmds, args=self.arguments, labels=self.labels, name=self.name, envs=self.env_vars, extract_xcom=self.do_xcom_push, image_pull_policy=self.image_pull_policy, node_selectors=self.node_selectors, annotations=self.annotations, affinity=self.affinity, image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, hostnetwork=self.hostnetwork, tolerations=self.tolerations, configmaps=self.configmaps, security_context=self.security_context, dnspolicy=self.dnspolicy, schedulername=self.schedulername, init_containers=self.init_containers, restart_policy='Never', priority_class_name=self.priority_class_name, pod_template_file=self.pod_template_file, pod=self.full_pod_spec, ).gen_pod() pod = append_to_pod( pod, self.pod_runtime_info_envs + self.ports + self.resources + self.secrets + self.volumes + self.volume_mounts) self.pod = pod launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.do_xcom_push) final_state, result = None, None try: (final_state, result) = launcher.run_pod( pod, startup_timeout=self.startup_timeout_seconds, get_logs=self.get_logs) finally: if final_state != State.SUCCESS: # Before deleting the pod we get events and status of the pod (events can be fetched # after pod deletion but not statuses. For consistency both are fetched before deletion. self._log_pod_events_on_failure(pod, launcher) self._log_pod_status_on_failure(pod, launcher) if self.is_delete_operator_pod: launcher.delete_pod(pod) if final_state != State.SUCCESS: if self.retry_only_on_pod_launching_failure: self.log.info( 'Task failure due to task image, avoiding retries and failing the task.' ) ti = context.get('task_instance') ti.error() raise AirflowException( 'Pod returned a failure: {state}'.format( state=final_state)) return result except AirflowException as ex: raise AirflowException( 'Pod Launching failed: {error}'.format(error=ex))
def execute(self, context): try: if self.in_cluster is not None: client = kube_client.get_kube_client( in_cluster=self.in_cluster, cluster_context=self.cluster_context, config_file=self.config_file) else: client = kube_client.get_kube_client( cluster_context=self.cluster_context, config_file=self.config_file) if not (self.full_pod_spec or self.pod_template_file): # Add Airflow Version to the label # And a label to identify that pod is launched by KubernetesPodOperator self.labels.update({ 'airflow_version': airflow_version.replace('+', '-'), 'kubernetes_pod_operator': 'True', }) pod = pod_generator.PodGenerator( image=self.image, namespace=self.namespace, cmds=self.cmds, args=self.arguments, labels=self.labels, name=self.name, envs=self.env_vars, extract_xcom=self.do_xcom_push, image_pull_policy=self.image_pull_policy, node_selectors=self.node_selectors, annotations=self.annotations, affinity=self.affinity, image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, hostnetwork=self.hostnetwork, tolerations=self.tolerations, configmaps=self.configmaps, security_context=self.security_context, dnspolicy=self.dnspolicy, schedulername=self.schedulername, init_containers=self.init_containers, restart_policy='Never', priority_class_name=self.priority_class_name, pod_template_file=self.pod_template_file, pod=self.full_pod_spec, ).gen_pod() pod = append_to_pod( pod, self.pod_runtime_info_envs + self.ports + self.resources + self.secrets + self.volumes + self.volume_mounts) self.pod = pod launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.do_xcom_push) try: (final_state, result) = launcher.run_pod( pod, startup_timeout=self.startup_timeout_seconds, get_logs=self.get_logs) except AirflowException: if self.log_events_on_failure: for event in launcher.read_pod_events(pod).items: self.log.error("Pod Event: %s - %s", event.reason, event.message) raise finally: if self.is_delete_operator_pod: launcher.delete_pod(pod) if final_state != State.SUCCESS: if self.log_events_on_failure: for event in launcher.read_pod_events(pod).items: self.log.error("Pod Event: %s - %s", event.reason, event.message) raise AirflowException( 'Pod returned a failure: {state}'.format( state=final_state)) return result except AirflowException as ex: raise AirflowException( 'Pod Launching failed: {error}'.format(error=ex))
def test_pod_mutation_v1_pod(self): with SettingsContext(SETTINGS_FILE_POD_MUTATION_HOOK_V1_POD, "airflow_local_settings"): from airflow import settings settings.import_local_settings() # pylint: ignore from airflow.kubernetes.pod_launcher import PodLauncher self.mock_kube_client = Mock() self.pod_launcher = PodLauncher(kube_client=self.mock_kube_client) pod = pod_generator.PodGenerator(image="myimage", cmds=["foo"], namespace="baz", volume_mounts=[{ "name": "foo", "mountPath": "/mnt", "subPath": "/", "readOnly": True }], volumes=[{ "name": "foo" }]).gen_pod() sanitized_pod_pre_mutation = api_client.sanitize_for_serialization( pod) self.assertEqual( sanitized_pod_pre_mutation, { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'namespace': 'baz' }, 'spec': { 'containers': [{ 'args': [], 'command': ['foo'], 'env': [], 'envFrom': [], 'image': 'myimage', 'name': 'base', 'ports': [], 'volumeMounts': [{ 'mountPath': '/mnt', 'name': 'foo', 'readOnly': True, 'subPath': '/' }] }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'name': 'foo' }] } }) # Apply Pod Mutation Hook pod = self.pod_launcher._mutate_pod_backcompat(pod) sanitized_pod_post_mutation = api_client.sanitize_for_serialization( pod) self.assertEqual( sanitized_pod_post_mutation, { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'namespace': 'airflow-tests' }, 'spec': { 'containers': [{ 'args': [], 'command': ['foo'], 'env': [{ 'name': 'TEST_USER', 'value': 'ADMIN' }], 'envFrom': [], 'image': 'test-image', 'name': 'base', 'ports': [{ 'containerPort': 8080 }, { 'containerPort': 8081 }], 'volumeMounts': [{ 'mountPath': '/mnt', 'name': 'foo', 'readOnly': True, 'subPath': '/' }, { 'mountPath': '/opt/airflow/secrets/', 'name': 'airflow-secrets-mount', 'readOnly': True }] }], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{ 'name': 'foo' }, { 'name': 'airflow-secrets-mount', 'secret': { 'secretName': 'airflow-test-secrets' } }] } })
def test_pod_mutation_to_k8s_pod(self): with SettingsContext(SETTINGS_FILE_POD_MUTATION_HOOK, "airflow_local_settings"): from airflow import settings settings.import_local_settings() # pylint: ignore from airflow.kubernetes.pod_launcher import PodLauncher self.mock_kube_client = Mock() self.pod_launcher = PodLauncher(kube_client=self.mock_kube_client) init_container = k8s.V1Container(name="init-container", volume_mounts=[ k8s.V1VolumeMount( mount_path="/tmp", name="init-secret") ]) pod = pod_generator.PodGenerator( image="foo", name="bar", namespace="baz", image_pull_policy="Never", init_containers=[init_container], cmds=["foo"], args=["/bin/sh", "-c", "touch /tmp/healthy"], tolerations=[{ 'effect': 'NoSchedule', 'key': 'static-pods', 'operator': 'Equal', 'value': 'true' }], volume_mounts=[{ "name": "foo", "mountPath": "/mnt", "subPath": "/", "readOnly": True }], security_context=k8s.V1PodSecurityContext(fs_group=0, run_as_user=1), volumes=[k8s.V1Volume(name="foo")]).gen_pod() sanitized_pod_pre_mutation = api_client.sanitize_for_serialization( pod) self.assertEqual( sanitized_pod_pre_mutation, { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': mock.ANY, 'namespace': 'baz' }, 'spec': { 'containers': [{ 'args': ['/bin/sh', '-c', 'touch /tmp/healthy'], 'command': ['foo'], 'env': [], 'envFrom': [], 'image': 'foo', 'imagePullPolicy': 'Never', 'name': 'base', 'ports': [], 'volumeMounts': [{ 'mountPath': '/mnt', 'name': 'foo', 'readOnly': True, 'subPath': '/' }] }], 'initContainers': [{ 'name': 'init-container', 'volumeMounts': [{ 'mountPath': '/tmp', 'name': 'init-secret' }] }], 'hostNetwork': False, 'imagePullSecrets': [], 'tolerations': [{ 'effect': 'NoSchedule', 'key': 'static-pods', 'operator': 'Equal', 'value': 'true' }], 'volumes': [{ 'name': 'foo' }], 'securityContext': { 'fsGroup': 0, 'runAsUser': 1 } } }, ) # Apply Pod Mutation Hook pod = self.pod_launcher._mutate_pod_backcompat(pod) sanitized_pod_post_mutation = api_client.sanitize_for_serialization( pod) self.assertEqual( sanitized_pod_post_mutation, { "apiVersion": "v1", "kind": "Pod", 'metadata': { 'labels': { 'test_label': 'test_value' }, 'name': mock.ANY, 'namespace': 'airflow-tests' }, 'spec': { 'affinity': { 'nodeAffinity': { 'requiredDuringSchedulingIgnoredDuringExecution': { 'nodeSelectorTerms': [{ 'matchExpressions': [{ 'key': 'test/dynamic-pods', 'operator': 'In', 'values': ['true'] }] }] } } }, 'containers': [{ 'args': ['/bin/sh', '-c', 'touch /tmp/healthy2'], 'command': ['foo'], 'env': [{ 'name': 'TEST_USER', 'value': 'ADMIN' }], 'image': 'my_image', 'imagePullPolicy': 'Never', 'name': 'base', 'ports': [{ 'containerPort': 8080 }, { 'containerPort': 8081 }], 'resources': { 'limits': { 'nvidia.com/gpu': '200G' }, 'requests': { 'cpu': '200Mi', 'memory': '2G' } }, 'volumeMounts': [{ 'mountPath': '/mnt', 'name': 'foo', 'readOnly': True, 'subPath': '/' }, { 'mountPath': '/opt/airflow/secrets/', 'name': 'airflow-secrets-mount', 'readOnly': True }] }], 'hostNetwork': False, 'imagePullSecrets': [], 'initContainers': [{ 'name': 'init-container', 'securityContext': { 'runAsGroup': 50000, 'runAsUser': 50000 }, 'volumeMounts': [{ 'mountPath': '/tmp', 'name': 'init-secret' }] }], 'tolerations': [{ 'effect': 'NoSchedule', 'key': 'static-pods', 'operator': 'Equal', 'value': 'true' }, { 'effect': 'NoSchedule', 'key': 'dynamic-pods', 'operator': 'Equal', 'value': 'true' }], 'volumes': [ { 'name': 'airflow-secrets-mount', 'secret': { 'secretName': 'airflow-test-secrets' } }, { 'name': 'bar' }, { 'name': 'foo' }, ], 'securityContext': { 'runAsUser': 1 } } })
def create_pod_request_obj(self): """ Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file` will supersede all other values. """ if self.pod_template_file: pod_template = pod_generator.PodGenerator.deserialize_model_file( self.pod_template_file) else: pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta(name="name")) pod = pod_generator.PodGenerator( image=self.image, namespace=self.namespace, cmds=self.cmds, args=self.arguments, labels=self.labels, name=self.name, envs=self.env_vars, extract_xcom=self.do_xcom_push, image_pull_policy=self.image_pull_policy, node_selectors=self.node_selectors, annotations=self.annotations, affinity=self.affinity, image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, hostnetwork=self.hostnetwork, tolerations=self.tolerations, security_context=self.security_context, dnspolicy=self.dnspolicy, init_containers=self.init_containers, restart_policy='Never', schedulername=self.schedulername, priority_class_name=self.priority_class_name, ).gen_pod() # noinspection PyTypeChecker pod = append_to_pod( pod, self.pod_runtime_info_envs + # type: ignore self.ports + # type: ignore self.resources + # type: ignore self.secrets + # type: ignore self.volumes + # type: ignore self.volume_mounts # type: ignore ) env_from = pod.spec.containers[0].env_from or [] for configmap in self.configmaps: env_from.append( k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource( name=configmap))) pod.spec.containers[0].env_from = env_from if self.full_pod_spec: pod_template = PodGenerator.reconcile_pods(pod_template, self.full_pod_spec) pod = PodGenerator.reconcile_pods(pod_template, pod) # if self.do_xcom_push: # pod = PodGenerator.add_sidecar(pod) return pod