Exemplo n.º 1
0
    def test_make_pod_with_empty_executor_config(self):
        self.kube_config.kube_affinity = self.affinity_config
        self.kube_config.kube_tolerations = self.tolerations_config
        self.kube_config.kube_annotations = self.worker_annotations_config
        self.kube_config.dags_folder = 'dags'
        worker_config = WorkerConfiguration(self.kube_config)
        pod = worker_config.as_pod()

        self.assertTrue(pod.spec.affinity['podAntiAffinity'] is not None)
        self.assertEqual(
            'app', pod.spec.affinity['podAntiAffinity']
            ['requiredDuringSchedulingIgnoredDuringExecution'][0]
            ['labelSelector']['matchExpressions'][0]['key'])

        self.assertEqual(2, len(pod.spec.tolerations))
        self.assertEqual('prod', pod.spec.tolerations[1]['key'])
        self.assertEqual('role-arn',
                         pod.metadata.annotations['iam.amazonaws.com/role'])
        self.assertEqual('value', pod.metadata.annotations['other/annotation'])
Exemplo n.º 2
0
 def __init__(self, kube_config: Any,
              task_queue: 'Queue[KubernetesJobType]',
              result_queue: 'Queue[KubernetesResultsType]',
              kube_client: client.CoreV1Api, worker_uuid: str):
     super().__init__()
     self.log.debug("Creating Kubernetes executor")
     self.kube_config = kube_config
     self.task_queue = task_queue
     self.result_queue = result_queue
     self.namespace = self.kube_config.kube_namespace
     self.log.debug("Kubernetes using namespace %s", self.namespace)
     self.kube_client = kube_client
     self.launcher = PodLauncher(kube_client=self.kube_client)
     self.worker_configuration = WorkerConfiguration(
         kube_config=self.kube_config)
     self._manager = multiprocessing.Manager()
     self.watcher_queue = self._manager.Queue()
     self.worker_uuid = worker_uuid
     self.kube_watcher = self._make_kube_watcher()
Exemplo n.º 3
0
    def test_make_pod_run_as_user_0(self):
        # Tests the pod created with run-as-user 0 actually gets that in it's config
        self.kube_config.worker_run_as_user = 0
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None
        self.kube_config.worker_fs_group = None

        worker_config = WorkerConfiguration(self.kube_config)
        kube_executor_config = KubernetesExecutorConfig(annotations=[],
                                                        volumes=[],
                                                        volume_mounts=[])

        pod = worker_config.make_pod("default", str(uuid.uuid4()),
                                     "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1,
                                     "bash -c 'ls /'", kube_executor_config)

        self.assertEqual(0, pod.security_context['runAsUser'])
Exemplo n.º 4
0
    def test_make_pod_with_empty_executor_config(self):
        self.kube_config.kube_affinity = self.affinity_config
        self.kube_config.kube_tolerations = self.tolerations_config
        self.kube_config.dags_folder = 'dags'
        worker_config = WorkerConfiguration(self.kube_config)

        pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1, "bash -c 'ls /'")

        self.assertTrue(pod.spec.affinity['podAntiAffinity'] is not None)
        self.assertEqual('app',
                         pod.spec.affinity['podAntiAffinity']
                         ['requiredDuringSchedulingIgnoredDuringExecution'][0]
                         ['labelSelector']
                         ['matchExpressions'][0]
                         ['key'])

        self.assertEqual(2, len(pod.spec.tolerations))
        self.assertEqual('prod', pod.spec.tolerations[1]['key'])
Exemplo n.º 5
0
    def test_init_environment_using_git_sync_ssh_without_known_hosts(self):
        # Tests the init environment created with git-sync SSH authentication option is correct
        # without known hosts file
        self.kube_config.airflow_configmap = 'airflow-configmap'
        self.kube_config.git_ssh_secret_name = 'airflow-secrets'
        self.kube_config.git_ssh_known_hosts_configmap_name = None
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None

        worker_config = WorkerConfiguration(self.kube_config)
        init_containers = worker_config._get_init_containers()

        self.assertTrue(init_containers)  # check not empty
        env = init_containers[0]['env']

        self.assertTrue({'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh'} in env)
        self.assertTrue({'name': 'GIT_KNOWN_HOSTS', 'value': 'false'} in env)
        self.assertTrue({'name': 'GIT_SYNC_SSH', 'value': 'true'} in env)
    def test_worker_git_dags(self):
        # Tests persistence volume config created when `git_repo` is set
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_folder = '/usr/local/airflow/dags'
        self.kube_config.worker_dags_folder = '/usr/local/airflow/dags'

        self.kube_config.git_sync_container_repository = 'gcr.io/google-containers/git-sync-amd64'
        self.kube_config.git_sync_container_tag = 'v2.0.5'
        self.kube_config.git_sync_container = 'gcr.io/google-containers/git-sync-amd64:v2.0.5'
        self.kube_config.git_sync_init_container_name = 'git-sync-clone'
        self.kube_config.git_subpath = 'dags_folder'
        self.kube_config.git_sync_root = '/git'
        self.kube_config.git_dags_folder_mount_point = '/usr/local/airflow/dags/repo/dags_folder'

        worker_config = WorkerConfiguration(self.kube_config)
        volumes, volume_mounts = worker_config._get_volumes_and_mounts()

        dag_volume = [
            volume for volume in volumes.values()
            if volume['name'] == 'airflow-dags'
        ]
        dag_volume_mount = [
            mount for mount in volume_mounts.values()
            if mount['name'] == 'airflow-dags'
        ]

        self.assertTrue('emptyDir' in dag_volume[0])
        self.assertEqual(self.kube_config.git_dags_folder_mount_point,
                         dag_volume_mount[0]['mountPath'])
        self.assertTrue(dag_volume_mount[0]['readOnly'])

        init_container = worker_config._get_init_containers()[0]
        init_container_volume_mount = [
            mount for mount in init_container['volumeMounts']
            if mount['name'] == 'airflow-dags'
        ]

        self.assertEqual('git-sync-clone', init_container['name'])
        self.assertEqual('gcr.io/google-containers/git-sync-amd64:v2.0.5',
                         init_container['image'])
        self.assertEqual(1, len(init_container_volume_mount))
        self.assertFalse(init_container_volume_mount[0]['readOnly'])
Exemplo n.º 7
0
    def test_make_pod_assert_labels(self):
        # Tests the pod created has all the expected labels set
        self.kube_config.dags_folder = 'dags'

        worker_config = WorkerConfiguration(self.kube_config)
        pod = worker_config.make_pod("default", "sample-uuid", "test_pod_id",
                                     "test_dag_id", "test_task_id",
                                     "2019-11-21 11:08:22.920875", 1,
                                     "bash -c 'ls /'")
        expected_labels = {
            'airflow-worker': 'sample-uuid',
            'airflow_version': airflow_version.replace('+', '-'),
            'dag_id': 'test_dag_id',
            'execution_date': '2019-11-21 11:08:22.920875',
            'kubernetes_executor': 'True',
            'task_id': 'test_task_id',
            'try_number': '1'
        }
        self.assertEqual(pod.metadata.labels, expected_labels)
Exemplo n.º 8
0
    def test_worker_generate_dag_volume_mount_path(self):
        self.kube_config.git_dags_folder_mount_point = '/root/airflow/git/dags'
        self.kube_config.dags_folder = '/root/airflow/dags'
        worker_config = WorkerConfiguration(self.kube_config)

        self.kube_config.dags_volume_claim = 'airflow-dags'
        self.kube_config.dags_volume_host = ''
        dag_volume_mount_path = worker_config.generate_dag_volume_mount_path()
        self.assertEqual(dag_volume_mount_path, self.kube_config.dags_folder)

        self.kube_config.dags_volume_claim = ''
        self.kube_config.dags_volume_host = '/host/airflow/dags'
        dag_volume_mount_path = worker_config.generate_dag_volume_mount_path()
        self.assertEqual(dag_volume_mount_path, self.kube_config.dags_folder)

        self.kube_config.dags_volume_claim = ''
        self.kube_config.dags_volume_host = ''
        dag_volume_mount_path = worker_config.generate_dag_volume_mount_path()
        self.assertEqual(dag_volume_mount_path,
                         self.kube_config.git_dags_folder_mount_point)
Exemplo n.º 9
0
    def test_worker_container_dags(self):
        # Tests that the 'airflow-dags' persistence volume is NOT created when `dags_in_image` is set
        self.kube_config.dags_in_image = True
        self.kube_config.dags_folder = 'dags'
        worker_config = WorkerConfiguration(self.kube_config)
        volumes = worker_config._get_volumes()
        volume_mounts = worker_config._get_volume_mounts()

        dag_volume = [
            volume for volume in volumes if volume.name == 'airflow-dags'
        ]
        dag_volume_mount = [
            mount for mount in volume_mounts if mount.name == 'airflow-dags'
        ]

        init_containers = worker_config._get_init_containers()

        self.assertEqual(0, len(dag_volume))
        self.assertEqual(0, len(dag_volume_mount))
        self.assertEqual(0, len(init_containers))
Exemplo n.º 10
0
    def test_set_airflow_local_settings_configmap(self):
        """
        Test that airflow_local_settings.py can be set via configmap by
        checking volume & volume-mounts are set correctly.
        """
        self.kube_config.airflow_home = '/usr/local/airflow'
        self.kube_config.airflow_configmap = 'airflow-configmap'
        self.kube_config.airflow_local_settings_configmap = 'airflow-configmap'
        self.kube_config.dags_folder = '/workers/path/to/dags'

        worker_config = WorkerConfiguration(self.kube_config)
        pod = worker_config.make_pod("default", str(uuid.uuid4()),
                                     "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1,
                                     "bash -c 'ls /'")

        airflow_config_volume = [
            volume for volume in pod.spec.volumes
            if volume.name == 'airflow-config'
        ]
        # Test that volume_name is found
        self.assertEqual(1, len(airflow_config_volume))

        # Test that config map exists
        self.assertEqual("airflow-configmap",
                         airflow_config_volume[0].config_map.name)

        # Test Volume Mount exists
        local_setting_volume_mount = [
            volume_mount
            for volume_mount in pod.spec.containers[0].volume_mounts
            if volume_mount.name == 'airflow-config'
        ]
        self.assertEqual(1, len(local_setting_volume_mount))

        # Test Mounth Path is set correctly.
        self.assertEqual('/usr/local/airflow/config/airflow_local_settings.py',
                         local_setting_volume_mount[0].mount_path)
        self.assertEqual(True, local_setting_volume_mount[0].read_only)
        self.assertEqual('airflow_local_settings.py',
                         local_setting_volume_mount[0].sub_path)
Exemplo n.º 11
0
    def test_make_pod_git_sync_credentials_secret(self):
        # Tests the pod created with git_sync_credentials_secret will get into the init container
        self.kube_config.git_sync_credentials_secret = 'airflow-git-creds-secret'
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None
        self.kube_config.worker_fs_group = None

        worker_config = WorkerConfiguration(self.kube_config)
        kube_executor_config = KubernetesExecutorConfig(annotations=[],
                                                        volumes=[],
                                                        volume_mounts=[])

        pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1, "bash -c 'ls /'",
                                     kube_executor_config)

        username_env = {
            'name': 'GIT_SYNC_USERNAME',
            'valueFrom': {
                'secretKeyRef': {
                    'name': self.kube_config.git_sync_credentials_secret,
                    'key': 'GIT_SYNC_USERNAME'
                }
            }
        }
        password_env = {
            'name': 'GIT_SYNC_PASSWORD',
            'valueFrom': {
                'secretKeyRef': {
                    'name': self.kube_config.git_sync_credentials_secret,
                    'key': 'GIT_SYNC_PASSWORD'
                }
            }
        }

        self.assertIn(username_env, pod.init_containers[0]["env"],
                      'The username env for git credentials did not get into the init container')

        self.assertIn(password_env, pod.init_containers[0]["env"],
                      'The password env for git credentials did not get into the init container')
Exemplo n.º 12
0
    def test_worker_with_subpaths(self):
        self.kube_config.dags_volume_subpath = 'dags'
        self.kube_config.logs_volume_subpath = 'logs'
        self.kube_config.dags_volume_claim = 'dags'
        self.kube_config.dags_folder = 'dags'
        worker_config = WorkerConfiguration(self.kube_config)
        volumes = worker_config._get_volumes()
        volume_mounts = worker_config._get_volume_mounts()

        for volume in volumes:
            self.assertNotIn(
                'subPath', self.api_client.sanitize_for_serialization(volume),
                "subPath isn't valid configuration for a volume")

        for volume_mount in volume_mounts:
            if volume_mount.name != 'airflow-config':
                self.assertIn(
                    'subPath',
                    self.api_client.sanitize_for_serialization(volume_mount),
                    "subPath should've been passed to volumeMount configuration"
                )
Exemplo n.º 13
0
    def test_make_pod_with_executor_config(self):
        self.kube_config.dags_folder = 'dags'
        worker_config = WorkerConfiguration(self.kube_config)
        config_pod = PodGenerator(
            image='',
            affinity=self.affinity_config,
            tolerations=self.tolerations_config,
        ).gen_pod()

        pod = worker_config.as_pod()

        result = PodGenerator.reconcile_pods(pod, config_pod)

        self.assertTrue(result.spec.affinity['podAntiAffinity'] is not None)
        self.assertEqual(
            'app', result.spec.affinity['podAntiAffinity']
            ['requiredDuringSchedulingIgnoredDuringExecution'][0]
            ['labelSelector']['matchExpressions'][0]['key'])

        self.assertEqual(2, len(result.spec.tolerations))
        self.assertEqual('prod', result.spec.tolerations[1]['key'])
Exemplo n.º 14
0
    def test_make_pod_git_sync_credentials_secret(self):
        # Tests the pod created with git_sync_credentials_secret will get into the init container
        self.kube_config.git_sync_credentials_secret = 'airflow-git-creds-secret'
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None
        self.kube_config.worker_fs_group = None
        self.kube_config.git_dags_folder_mount_point = 'dags'
        self.kube_config.git_sync_dest = 'repo'
        self.kube_config.git_subpath = 'path'

        worker_config = WorkerConfiguration(self.kube_config)

        pod = worker_config.make_pod("default", str(uuid.uuid4()),
                                     "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1,
                                     "bash -c 'ls /'")

        username_env = k8s.V1EnvVar(
            name='GIT_SYNC_USERNAME',
            value_from=k8s.V1EnvVarSource(
                secret_key_ref=k8s.V1SecretKeySelector(
                    name=self.kube_config.git_sync_credentials_secret,
                    key='GIT_SYNC_USERNAME')))
        password_env = k8s.V1EnvVar(
            name='GIT_SYNC_PASSWORD',
            value_from=k8s.V1EnvVarSource(
                secret_key_ref=k8s.V1SecretKeySelector(
                    name=self.kube_config.git_sync_credentials_secret,
                    key='GIT_SYNC_PASSWORD')))

        self.assertIn(
            username_env, pod.spec.init_containers[0].env,
            'The username env for git credentials did not get into the init container'
        )

        self.assertIn(
            password_env, pod.spec.init_containers[0].env,
            'The password env for git credentials did not get into the init container'
        )
Exemplo n.º 15
0
    def test_get_secrets(self):
        # Test when secretRef is None and kube_secrets is not empty
        self.kube_config.kube_secrets = {
            'AWS_SECRET_KEY': 'airflow-secret=aws_secret_key',
            'POSTGRES_PASSWORD': '******'
        }
        self.kube_config.env_from_secret_ref = None
        worker_config = WorkerConfiguration(self.kube_config)
        secrets = worker_config._get_secrets()
        secrets.sort(key=lambda secret: secret.deploy_target)
        expected = [
            Secret('env', 'AWS_SECRET_KEY', 'airflow-secret',
                   'aws_secret_key'),
            Secret('env', 'POSTGRES_PASSWORD', 'airflow-secret',
                   'postgres_credentials')
        ]
        self.assertListEqual(expected, secrets)

        # Test when secret is not empty and kube_secrets is empty dict
        self.kube_config.kube_secrets = {}
        self.kube_config.env_from_secret_ref = 'secret_a,secret_b'
        worker_config = WorkerConfiguration(self.kube_config)
        secrets = worker_config._get_secrets()
        expected = [
            Secret('env', None, 'secret_a'),
            Secret('env', None, 'secret_b')
        ]
        self.assertListEqual(expected, secrets)
    def test_make_pod_with_executor_config(self):
        worker_config = WorkerConfiguration(self.kube_config)
        kube_executor_config = KubernetesExecutorConfig(
            affinity=self.affinity_config,
            tolerations=self.tolerations_config,
            annotations=[],
            volumes=[],
            volume_mounts=[])

        pod = worker_config.make_pod("default", str(uuid.uuid4()),
                                     "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1,
                                     "bash -c 'ls /'", kube_executor_config)

        self.assertTrue(pod.affinity['podAntiAffinity'] is not None)
        self.assertEqual(
            'app', pod.affinity['podAntiAffinity']
            ['requiredDuringSchedulingIgnoredDuringExecution'][0]
            ['labelSelector']['matchExpressions'][0]['key'])

        self.assertEqual(2, len(pod.tolerations))
        self.assertEqual('prod', pod.tolerations[1]['key'])
Exemplo n.º 17
0
    def test_worker_pvc_dags(self):
        # Tests persistence volume config created when `dags_volume_claim` is set
        self.kube_config.dags_volume_claim = 'airflow-dags'
        self.kube_config.dags_folder = 'dags'
        worker_config = WorkerConfiguration(self.kube_config)
        volumes = worker_config._get_volumes()
        volume_mounts = worker_config._get_volume_mounts()

        init_containers = worker_config._get_init_containers()

        dag_volume = [
            volume for volume in volumes if volume.name == 'airflow-dags'
        ]
        dag_volume_mount = [
            mount for mount in volume_mounts if mount.name == 'airflow-dags'
        ]

        self.assertEqual('airflow-dags',
                         dag_volume[0].persistent_volume_claim.claim_name)
        self.assertEqual(1, len(dag_volume_mount))
        self.assertTrue(dag_volume_mount[0].read_only)
        self.assertEqual(0, len(init_containers))
Exemplo n.º 18
0
    def test_make_pod_git_sync_rev(self):
        # Tests the pod created with git_sync_credentials_secret will get into the init container
        self.kube_config.git_sync_rev = 'sampletag'
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None
        self.kube_config.worker_fs_group = None
        self.kube_config.git_dags_folder_mount_point = 'dags'
        self.kube_config.git_sync_dest = 'repo'
        self.kube_config.git_subpath = 'path'

        worker_config = WorkerConfiguration(self.kube_config)

        pod = worker_config.as_pod()

        rev_env = k8s.V1EnvVar(
            name='GIT_SYNC_REV',
            value=self.kube_config.git_sync_rev,
        )

        self.assertIn(rev_env, pod.spec.init_containers[0].env,
                      'The git_sync_rev env did not get into the init container')
    def test_worker_pvc_dags(self):
        # Tests persistence volume config created when `dags_volume_claim` is set
        self.kube_config.dags_volume_claim = 'airflow-dags'

        worker_config = WorkerConfiguration(self.kube_config)
        volumes, volume_mounts = worker_config._get_volumes_and_mounts()

        init_containers = worker_config._get_init_containers()

        dag_volume = [
            volume for volume in volumes.values()
            if volume['name'] == 'airflow-dags'
        ]
        dag_volume_mount = [
            mount for mount in volume_mounts.values()
            if mount['name'] == 'airflow-dags'
        ]

        self.assertEqual('airflow-dags',
                         dag_volume[0]['persistentVolumeClaim']['claimName'])
        self.assertEqual(1, len(dag_volume_mount))
        self.assertTrue(dag_volume_mount[0]['readOnly'])
        self.assertEqual(0, len(init_containers))
Exemplo n.º 20
0
    def test_make_pod_git_sync_ssh_with_known_hosts(self):
        # Tests the pod created with git-sync SSH authentication option is correct with known hosts
        self.kube_config.airflow_configmap = 'airflow-configmap'
        self.kube_config.git_ssh_secret_name = 'airflow-secrets'
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None

        worker_config = WorkerConfiguration(self.kube_config)

        init_containers = worker_config._get_init_containers()
        git_ssh_known_hosts_file = next((x.value for x in init_containers[0].env
                                         if x.name == 'GIT_SSH_KNOWN_HOSTS_FILE'), None)

        volume_mount_ssh_known_hosts_file = next(
            (x.mount_path for x in init_containers[0].volume_mounts
             if x.name == worker_config.git_sync_ssh_known_hosts_volume_name),
            None)
        self.assertTrue(git_ssh_known_hosts_file)
        self.assertTrue(volume_mount_ssh_known_hosts_file)
        self.assertEqual(git_ssh_known_hosts_file,
                         volume_mount_ssh_known_hosts_file,
                         'The location where the git known hosts file is mounted'
                         ' needs to be the same as the GIT_SSH_KNOWN_HOSTS_FILE path')
Exemplo n.º 21
0
    def test_init_environment_using_git_sync_ssh_with_known_hosts(self):
        # Tests the init environment created with git-sync SSH authentication option is correct
        # with known hosts file
        self.kube_config.airflow_configmap = 'airflow-configmap'
        self.kube_config.git_ssh_key_secret_name = 'airflow-secrets'
        self.kube_config.git_ssh_known_hosts_configmap_name = 'airflow-configmap'
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None

        worker_config = WorkerConfiguration(self.kube_config)
        init_containers = worker_config._get_init_containers()

        self.assertTrue(init_containers)  # check not empty
        env = init_containers[0].env

        self.assertIn(k8s.V1EnvVar(name='GIT_SSH_KEY_FILE', value='/etc/git-secret/ssh'), env)
        self.assertIn(k8s.V1EnvVar(name='GIT_SYNC_ADD_USER', value='true'), env)
        self.assertIn(k8s.V1EnvVar(name='GIT_KNOWN_HOSTS', value='true'), env)
        self.assertIn(k8s.V1EnvVar(
            name='GIT_SSH_KNOWN_HOSTS_FILE',
            value='/etc/git-secret/known_hosts'
        ), env)
        self.assertIn(k8s.V1EnvVar(name='GIT_SYNC_SSH', value='true'), env)
Exemplo n.º 22
0
    def test_worker_git_dags(self):
        # Tests persistence volume config created when `git_repo` is set
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_folder = '/usr/local/airflow/dags'
        self.kube_config.worker_dags_folder = '/usr/local/airflow/dags'

        self.kube_config.git_sync_container_repository = 'gcr.io/google-containers/git-sync-amd64'
        self.kube_config.git_sync_container_tag = 'v2.0.5'
        self.kube_config.git_sync_container = 'gcr.io/google-containers/git-sync-amd64:v2.0.5'
        self.kube_config.git_sync_init_container_name = 'git-sync-clone'
        self.kube_config.git_subpath = 'dags_folder'
        self.kube_config.git_sync_root = '/git'
        self.kube_config.git_sync_run_as_user = 65533
        self.kube_config.git_dags_folder_mount_point = '/usr/local/airflow/dags/repo/dags_folder'

        worker_config = WorkerConfiguration(self.kube_config)
        volumes = worker_config._get_volumes()
        volume_mounts = worker_config._get_volume_mounts()

        dag_volume = [volume for volume in volumes if volume.name == 'airflow-dags']
        dag_volume_mount = [mount for mount in volume_mounts if mount.name == 'airflow-dags']

        self.assertIsNotNone(dag_volume[0].empty_dir)
        self.assertEqual(self.kube_config.git_dags_folder_mount_point, dag_volume_mount[0].mount_path)
        self.assertTrue(dag_volume_mount[0].read_only)

        init_container = worker_config._get_init_containers()[0]
        init_container_volume_mount = [mount for mount in init_container.volume_mounts
                                       if mount.name == 'airflow-dags']

        self.assertEqual('git-sync-clone', init_container.name)
        self.assertEqual('gcr.io/google-containers/git-sync-amd64:v2.0.5', init_container.image)
        self.assertEqual(1, len(init_container_volume_mount))
        self.assertFalse(init_container_volume_mount[0].read_only)
        self.assertEqual(65533, init_container.security_context.run_as_user)
Exemplo n.º 23
0
 def __init__(self, kube_config, task_queue, result_queue, kube_client,
              worker_uuid):
     self.log.debug("Creating Kubernetes executor")
     self.kube_config = kube_config
     self.task_queue = task_queue
     self.result_queue = result_queue
     self.namespace = self.kube_config.kube_namespace
     self.log.debug("Kubernetes using namespace %s", self.namespace)
     self.kube_client = kube_client
     self.launcher = PodLauncher(kube_client=self.kube_client)
     self.worker_configuration_pod = WorkerConfiguration(
         kube_config=self.kube_config).as_pod()
     self._manager = multiprocessing.Manager()
     self.watcher_queue = self._manager.Queue()
     self.worker_uuid = worker_uuid
     self.kube_watcher = self._make_kube_watcher()
    def test_get_configmaps(self):
        # Test when configmap is empty
        self.kube_config.env_from_configmap_ref = ''
        worker_config = WorkerConfiguration(self.kube_config)
        configmaps = worker_config._get_configmaps()
        self.assertListEqual([], configmaps)

        # test when configmap is not empty
        self.kube_config.env_from_configmap_ref = 'configmap_a,configmap_b'
        worker_config = WorkerConfiguration(self.kube_config)
        configmaps = worker_config._get_configmaps()
        self.assertListEqual(['configmap_a', 'configmap_b'], configmaps)
Exemplo n.º 25
0
def generate_pod_yaml(args):
    """Generates yaml files for each task in the DAG. Used for testing output of KubernetesExecutor"""

    from kubernetes.client.api_client import ApiClient

    from airflow.executors.kubernetes_executor import AirflowKubernetesScheduler, KubeConfig
    from airflow.kubernetes import pod_generator
    from airflow.kubernetes.pod_generator import PodGenerator
    from airflow.kubernetes.worker_configuration import WorkerConfiguration
    from airflow.settings import pod_mutation_hook

    execution_date = args.execution_date
    dag = get_dag(subdir=args.subdir, dag_id=args.dag_id)
    yaml_output_path = args.output_path
    kube_config = KubeConfig()
    for task in dag.tasks:
        ti = TaskInstance(task, execution_date)
        pod = PodGenerator.construct_pod(
            dag_id=args.dag_id,
            task_id=ti.task_id,
            pod_id=AirflowKubernetesScheduler._create_pod_id(  # pylint: disable=W0212
                args.dag_id, ti.task_id),
            try_number=ti.try_number,
            kube_image=kube_config.kube_image,
            date=ti.execution_date,
            command=ti.command_as_list(),
            pod_override_object=PodGenerator.from_obj(ti.executor_config),
            worker_uuid="worker-config",
            namespace=kube_config.executor_namespace,
            base_worker_pod=WorkerConfiguration(
                kube_config=kube_config).as_pod())
        pod_mutation_hook(pod)
        api_client = ApiClient()
        date_string = pod_generator.datetime_to_label_safe_datestring(
            execution_date)
        yaml_file_name = f"{args.dag_id}_{ti.task_id}_{date_string}.yml"
        os.makedirs(os.path.dirname(yaml_output_path +
                                    "/airflow_yaml_output/"),
                    exist_ok=True)
        with open(yaml_output_path + "/airflow_yaml_output/" + yaml_file_name,
                  "w") as output:
            sanitized_pod = api_client.sanitize_for_serialization(pod)
            output.write(yaml.dump(sanitized_pod))
    print(
        f"YAML output can be found at {yaml_output_path}/airflow_yaml_output/")
Exemplo n.º 26
0
    def test_kubernetes_environment_variables(self):
        # Tests the kubernetes environment variables get copied into the worker pods
        input_environment = {'ENVIRONMENT': 'prod', 'LOG_LEVEL': 'warning'}
        self.kube_config.kube_env_vars = input_environment
        worker_config = WorkerConfiguration(self.kube_config)
        env = worker_config._get_environment()
        for key in input_environment:
            self.assertIn(key, env)
            self.assertIn(input_environment[key], env.values())

        core_executor = 'AIRFLOW__CORE__EXECUTOR'
        input_environment = {core_executor: 'NotLocalExecutor'}
        self.kube_config.kube_env_vars = input_environment
        worker_config = WorkerConfiguration(self.kube_config)
        env = worker_config._get_environment()
        self.assertEqual(env[core_executor], 'LocalExecutor')
Exemplo n.º 27
0
    def test_get_env_from(self):
        # Test when configmap is empty
        self.kube_config.env_from_configmap_ref = ''
        worker_config = WorkerConfiguration(self.kube_config)
        configmaps = worker_config._get_env_from()
        self.assertListEqual([], configmaps)

        # test when configmap is not empty
        self.kube_config.env_from_configmap_ref = 'configmap_a,configmap_b'
        self.kube_config.env_from_secret_ref = 'secretref_a,secretref_b'
        worker_config = WorkerConfiguration(self.kube_config)
        configmaps = worker_config._get_env_from()
        self.assertListEqual([
            k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_a')),
            k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_b')),
            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name='secretref_a')),
            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name='secretref_b'))
        ], configmaps)
Exemplo n.º 28
0
class AirflowKubernetesScheduler(LoggingMixin):
    def __init__(self, kube_config, task_queue, result_queue, kube_client,
                 worker_uuid):
        self.log.debug("Creating Kubernetes executor")
        self.kube_config = kube_config
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.namespace = self.kube_config.kube_namespace
        self.log.debug("Kubernetes using namespace %s", self.namespace)
        self.kube_client = kube_client
        self.launcher = PodLauncher(kube_client=self.kube_client)
        self.worker_configuration = WorkerConfiguration(
            kube_config=self.kube_config)
        self._manager = multiprocessing.Manager()
        self.watcher_queue = self._manager.Queue()
        self.worker_uuid = worker_uuid
        self.kube_watcher = self._make_kube_watcher()

    def _make_kube_watcher(self):
        resource_version = KubeResourceVersion.get_current_resource_version()
        watcher = KubernetesJobWatcher(self.namespace, self.watcher_queue,
                                       resource_version, self.worker_uuid,
                                       self.kube_config)
        watcher.start()
        return watcher

    def _health_check_kube_watcher(self):
        if self.kube_watcher.is_alive():
            pass
        else:
            self.log.error('Error while health checking kube watcher process. '
                           'Process died for unknown reasons')
            self.kube_watcher = self._make_kube_watcher()

    def run_next(self, next_job):
        """

        The run_next command will check the task_queue for any un-run jobs.
        It will then create a unique job-id, launch that job in the cluster,
        and store relevant info in the current_jobs map so we can track the job's
        status
        """
        self.log.info('Kubernetes job is %s', str(next_job))
        key, command, kube_executor_config = next_job
        dag_id, task_id, execution_date, try_number = key
        self.log.debug("Kubernetes running for command %s", command)
        self.log.debug("Kubernetes launching image %s",
                       self.kube_config.kube_image)
        pod = self.worker_configuration.make_pod(
            namespace=self.namespace,
            worker_uuid=self.worker_uuid,
            pod_id=self._create_pod_id(dag_id, task_id),
            dag_id=self._make_safe_label_value(dag_id),
            task_id=self._make_safe_label_value(task_id),
            try_number=try_number,
            execution_date=self._datetime_to_label_safe_datestring(
                execution_date),
            airflow_command=command,
            kube_executor_config=kube_executor_config)
        # the watcher will monitor pods, so we do not block.
        self.launcher.run_pod_async(
            pod, **self.kube_config.kube_client_request_args)
        self.log.debug("Kubernetes Job created!")

    def delete_pod(self, pod_id):
        if self.kube_config.delete_worker_pods:
            try:
                self.kube_client.delete_namespaced_pod(
                    pod_id,
                    self.namespace,
                    body=client.V1DeleteOptions(),
                    **self.kube_config.kube_client_request_args)
            except ApiException as e:
                # If the pod is already deleted
                if e.status != 404:
                    raise

    def sync(self):
        """
        The sync function checks the status of all currently running kubernetes jobs.
        If a job is completed, it's status is placed in the result queue to
        be sent back to the scheduler.

        :return:

        """
        self._health_check_kube_watcher()
        while True:
            try:
                task = self.watcher_queue.get_nowait()
                try:
                    self.process_watcher_task(task)
                finally:
                    self.watcher_queue.task_done()
            except Empty:
                break

    def process_watcher_task(self, task):
        pod_id, state, labels, resource_version = task
        self.log.info(
            'Attempting to finish pod; pod_id: %s; state: %s; labels: %s',
            pod_id, state, labels)
        key = self._labels_to_key(labels=labels)
        if key:
            self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
            self.result_queue.put((key, state, pod_id, resource_version))

    @staticmethod
    def _strip_unsafe_kubernetes_special_chars(string):
        """
        Kubernetes only supports lowercase alphanumeric characters and "-" and "." in
        the pod name
        However, there are special rules about how "-" and "." can be used so let's
        only keep
        alphanumeric chars  see here for detail:
        https://kubernetes.io/docs/concepts/overview/working-with-objects/names/

        :param string: The requested Pod name
        :return: ``str`` Pod name stripped of any unsafe characters
        """
        return ''.join(ch.lower() for ind, ch in enumerate(string)
                       if ch.isalnum())

    @staticmethod
    def _make_safe_pod_id(safe_dag_id, safe_task_id, safe_uuid):
        """
        Kubernetes pod names must be <= 253 chars and must pass the following regex for
        validation
        "^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"

        :param safe_dag_id: a dag_id with only alphanumeric characters
        :param safe_task_id: a task_id with only alphanumeric characters
        :param random_uuid: a uuid
        :return: ``str`` valid Pod name of appropriate length
        """
        MAX_POD_ID_LEN = 253

        safe_key = safe_dag_id + safe_task_id

        safe_pod_id = safe_key[:MAX_POD_ID_LEN - len(safe_uuid) -
                               1] + "-" + safe_uuid

        return safe_pod_id

    @staticmethod
    def _make_safe_label_value(string):
        """
        Valid label values must be 63 characters or less and must be empty or begin and
        end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
        dots (.), and alphanumerics between.

        If the label value is then greater than 63 chars once made safe, or differs in any
        way from the original value sent to this function, then we need to truncate to
        53chars, and append it with a unique hash.
        """
        MAX_LABEL_LEN = 63

        safe_label = re.sub(r'^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$',
                            '', string)

        if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
            safe_hash = hashlib.md5(string.encode()).hexdigest()[:9]
            safe_label = safe_label[:MAX_LABEL_LEN - len(safe_hash) -
                                    1] + "-" + safe_hash

        return safe_label

    @staticmethod
    def _create_pod_id(dag_id, task_id):
        safe_dag_id = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
            dag_id)
        safe_task_id = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
            task_id)
        safe_uuid = AirflowKubernetesScheduler._strip_unsafe_kubernetes_special_chars(
            uuid4().hex)
        return AirflowKubernetesScheduler._make_safe_pod_id(
            safe_dag_id, safe_task_id, safe_uuid)

    @staticmethod
    def _label_safe_datestring_to_datetime(string):
        """
        Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not
        "_", let's
        replace ":" with "_"

        :param string: str
        :return: datetime.datetime object
        """
        return parser.parse(string.replace('_plus_', '+').replace("_", ":"))

    @staticmethod
    def _datetime_to_label_safe_datestring(datetime_obj):
        """
        Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but
        not "_" let's
        replace ":" with "_"
        :param datetime_obj: datetime.datetime object
        :return: ISO-like string representing the datetime
        """
        return datetime_obj.isoformat().replace(":",
                                                "_").replace('+', '_plus_')

    def _labels_to_key(self, labels):
        try_num = 1
        try:
            try_num = int(labels.get('try_number', '1'))
        except ValueError:
            self.log.warn("could not get try_number as an int: %s",
                          labels.get('try_number', '1'))

        try:
            dag_id = labels['dag_id']
            task_id = labels['task_id']
            ex_time = self._label_safe_datestring_to_datetime(
                labels['execution_date'])
        except Exception as e:
            self.log.warn(
                'Error while retrieving labels; labels: %s; exception: %s',
                labels, e)
            return None

        with create_session() as session:
            tasks = (session.query(TaskInstance).filter_by(
                execution_date=ex_time).all())
            self.log.info('Checking %s task instances.', len(tasks))
            for task in tasks:
                if (self._make_safe_label_value(task.dag_id) == dag_id and
                        self._make_safe_label_value(task.task_id) == task_id
                        and task.execution_date == ex_time):
                    self.log.info(
                        'Found matching task %s-%s (%s) with current state of %s',
                        task.dag_id, task.task_id, task.execution_date,
                        task.state)
                    dag_id = task.dag_id
                    task_id = task.task_id
                    return (dag_id, task_id, ex_time, try_num)
        self.log.warn(
            'Failed to find and match task details to a pod; labels: %s',
            labels)
        return None

    def terminate(self):
        self.watcher_queue.join()
        self._manager.shutdown()
Exemplo n.º 29
0
    def test_set_airflow_configmap_different_for_local_setting(self):
        """
        Test that airflow_local_settings.py can be set via configmap by
        checking volume & volume-mounts are set correctly when using a different
        configmap than airflow_configmap (airflow.cfg)
        """
        self.kube_config.airflow_home = '/usr/local/airflow'
        self.kube_config.airflow_configmap = 'airflow-configmap'
        self.kube_config.airflow_local_settings_configmap = 'airflow-ls-configmap'
        self.kube_config.dags_folder = '/workers/path/to/dags'

        worker_config = WorkerConfiguration(self.kube_config)
        pod = worker_config.as_pod()

        pod_spec_dict = pod.spec.to_dict()

        airflow_local_settings_volume = [
            volume for volume in pod_spec_dict['volumes'] if volume["name"] == 'airflow-local-settings'
        ]
        # Test that volume_name is found
        self.assertEqual(1, len(airflow_local_settings_volume))

        # Test that config map exists
        self.assertEqual(
            {'default_mode': None, 'items': None, 'name': 'airflow-ls-configmap', 'optional': None},
            airflow_local_settings_volume[0]['config_map']
        )

        # Test that 2 Volume Mounts exists and has 2 different mount-paths
        # One for airflow.cfg
        # Second for airflow_local_settings.py
        airflow_cfg_volume_mount = [
            volume_mount for volume_mount in pod_spec_dict['containers'][0]['volume_mounts']
            if volume_mount['name'] == 'airflow-config'
        ]

        local_setting_volume_mount = [
            volume_mount for volume_mount in pod_spec_dict['containers'][0]['volume_mounts']
            if volume_mount['name'] == 'airflow-local-settings'
        ]
        self.assertEqual(1, len(airflow_cfg_volume_mount))
        self.assertEqual(1, len(local_setting_volume_mount))

        self.assertEqual(
            [
                {
                    'mount_path': '/usr/local/airflow/config/airflow_local_settings.py',
                    'mount_propagation': None,
                    'name': 'airflow-local-settings',
                    'read_only': True,
                    'sub_path': 'airflow_local_settings.py',
                    'sub_path_expr': None
                }
            ],
            local_setting_volume_mount
        )

        self.assertEqual(
            [
                {
                    'mount_path': '/usr/local/airflow/airflow.cfg',
                    'mount_propagation': None,
                    'name': 'airflow-config',
                    'read_only': True,
                    'sub_path': 'airflow.cfg',
                    'sub_path_expr': None
                }
            ],
            airflow_cfg_volume_mount
        )
 def test_worker_adds_config(self):
     worker_config = WorkerConfiguration(self.kube_config)
     volumes = worker_config._get_volumes()
     print(volumes)