def test_reconcile_pods(self, mock_uuid):
        mock_uuid.return_value = self.static_uuid
        path = sys.path[
            0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml'

        base_pod = PodGenerator(pod_template_file=path,
                                extract_xcom=False).gen_pod()

        mutator_pod = k8s.V1Pod(
            metadata=k8s.V1ObjectMeta(
                name="name2",
                labels={"bar": "baz"},
            ),
            spec=k8s.V1PodSpec(
                containers=[
                    k8s.V1Container(
                        image='',
                        name='name',
                        command=['/bin/command2.sh', 'arg2'],
                        volume_mounts=[
                            k8s.V1VolumeMount(
                                mount_path="/foo/",
                                name="example-kubernetes-test-volume2")
                        ],
                    )
                ],
                volumes=[
                    k8s.V1Volume(
                        host_path=k8s.V1HostPathVolumeSource(path="/tmp/"),
                        name="example-kubernetes-test-volume2",
                    )
                ],
            ),
        )

        result = PodGenerator.reconcile_pods(base_pod, mutator_pod)
        expected: k8s.V1Pod = self.expected
        expected.metadata.name = "name2"
        expected.metadata.labels['bar'] = 'baz'
        expected.spec.volumes = expected.spec.volumes or []
        expected.spec.volumes.append(
            k8s.V1Volume(host_path=k8s.V1HostPathVolumeSource(path="/tmp/"),
                         name="example-kubernetes-test-volume2"))

        base_container: k8s.V1Container = expected.spec.containers[0]
        base_container.command = ['/bin/command2.sh', 'arg2']
        base_container.volume_mounts = [
            k8s.V1VolumeMount(mount_path="/foo/",
                              name="example-kubernetes-test-volume2")
        ]
        base_container.name = "name"
        expected.spec.containers[0] = base_container

        result_dict = self.k8s_client.sanitize_for_serialization(result)
        expected_dict = self.k8s_client.sanitize_for_serialization(expected)

        assert result_dict == expected_dict
    def _get_volumes(self) -> List[k8s.V1Volume]:
        def _construct_volume(name, claim, host) -> k8s.V1Volume:
            volume = k8s.V1Volume(name=name)

            if claim:
                volume.persistent_volume_claim = k8s.V1PersistentVolumeClaimVolumeSource(
                    claim_name=claim)
            elif host:
                volume.host_path = k8s.V1HostPathVolumeSource(path=host,
                                                              type='')
            else:
                volume.empty_dir = {}

            return volume

        volumes = {
            self.dags_volume_name:
            _construct_volume(self.dags_volume_name,
                              self.kube_config.dags_volume_claim,
                              self.kube_config.dags_volume_host),
            self.logs_volume_name:
            _construct_volume(self.logs_volume_name,
                              self.kube_config.logs_volume_claim,
                              self.kube_config.logs_volume_host)
        }

        if self.kube_config.dags_in_image:
            del volumes[self.dags_volume_name]

        # Get the SSH key from secrets as a volume
        if self.kube_config.git_ssh_key_secret_name:
            volumes[self.git_sync_ssh_secret_volume_name] = k8s.V1Volume(
                name=self.git_sync_ssh_secret_volume_name,
                secret=k8s.V1SecretVolumeSource(
                    secret_name=self.kube_config.git_ssh_key_secret_name,
                    items=[
                        k8s.V1KeyToPath(key=self.git_ssh_key_secret_key,
                                        path='ssh',
                                        mode=0o440)
                    ]))

        if self.kube_config.git_ssh_known_hosts_configmap_name:
            volumes[self.git_sync_ssh_known_hosts_volume_name] = k8s.V1Volume(
                name=self.git_sync_ssh_known_hosts_volume_name,
                config_map=k8s.V1ConfigMapVolumeSource(
                    name=self.kube_config.git_ssh_known_hosts_configmap_name,
                    default_mode=0o440))

        # Mount the airflow.cfg file via a configmap the user has specified
        if self.kube_config.airflow_configmap:
            config_volume_name = 'airflow-config'
            volumes[config_volume_name] = k8s.V1Volume(
                name=config_volume_name,
                config_map=k8s.V1ConfigMapVolumeSource(
                    name=self.kube_config.airflow_configmap))

        return list(volumes.values())
Esempio n. 3
0
def get_kubeconfig_volume(release: OpenshiftRelease):
    return k8s.V1Volume(
        name="kubeconfig",
        secret=k8s.V1SecretVolumeSource(
            secret_name=f"{release.get_release_name()}-kubeconfig"
        )
    )
    def test_gen_pod_extract_xcom(self, mock_uuid):
        mock_uuid.return_value = self.static_uuid
        path = sys.path[0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml'

        pod_generator = PodGenerator(pod_template_file=path, extract_xcom=True)
        result = pod_generator.gen_pod()
        result_dict = self.k8s_client.sanitize_for_serialization(result)
        container_two = {
            'name': 'airflow-xcom-sidecar',
            'image': "alpine",
            'command': ['sh', '-c', PodDefaults.XCOM_CMD],
            'volumeMounts': [{'name': 'xcom', 'mountPath': '/airflow/xcom'}],
            'resources': {'requests': {'cpu': '1m'}},
        }
        self.expected.spec.containers.append(container_two)
        base_container: k8s.V1Container = self.expected.spec.containers[0]
        base_container.volume_mounts = base_container.volume_mounts or []
        base_container.volume_mounts.append(k8s.V1VolumeMount(name="xcom", mount_path="/airflow/xcom"))
        self.expected.spec.containers[0] = base_container
        self.expected.spec.volumes = self.expected.spec.volumes or []
        self.expected.spec.volumes.append(
            k8s.V1Volume(
                name='xcom',
                empty_dir={},
            )
        )
        result_dict = self.k8s_client.sanitize_for_serialization(result)
        expected_dict = self.k8s_client.sanitize_for_serialization(self.expected)

        assert result_dict == expected_dict
Esempio n. 5
0
 def to_volume_secret(self) -> Tuple[k8s.V1Volume, k8s.V1VolumeMount]:
     """Converts to volume secret"""
     vol_id = f'secretvol{uuid.uuid4()}'
     volume = k8s.V1Volume(name=vol_id, secret=k8s.V1SecretVolumeSource(secret_name=self.secret))
     if self.items:
         volume.secret.items = self.items
     return (volume, k8s.V1VolumeMount(mount_path=self.deploy_target, name=vol_id, read_only=True))
 def test_to_volume_secret(self, mock_uuid):
     mock_uuid.return_value = '0'
     secret = Secret('volume', '/etc/foo', 'secret_b')
     assert secret.to_volume_secret() == (
         k8s.V1Volume(name='secretvol0', secret=k8s.V1SecretVolumeSource(secret_name='secret_b')),
         k8s.V1VolumeMount(mount_path='/etc/foo', name='secretvol0', read_only=True),
     )
Esempio n. 7
0
class PodDefaults:
    """
    Static defaults for Pods
    """
    XCOM_MOUNT_PATH = '/airflow/xcom'
    SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
    XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;'
    VOLUME_MOUNT = k8s.V1VolumeMount(
        name='xcom',
        mount_path=XCOM_MOUNT_PATH
    )
    VOLUME = k8s.V1Volume(
        name='xcom',
        empty_dir=k8s.V1EmptyDirVolumeSource()
    )
    SIDECAR_CONTAINER = k8s.V1Container(
        name=SIDECAR_CONTAINER_NAME,
        command=['sh', '-c', XCOM_CMD],
        image='alpine',
        volume_mounts=[VOLUME_MOUNT],
        resources=k8s.V1ResourceRequirements(
            requests={
                "cpu": "1m",
            }
        ),
    )
Esempio n. 8
0
def get_kubeconfig_volume(release: OpenshiftRelease, task_group):
    prefix=f"{task_group}-"
    return k8s.V1Volume(
        name="kubeconfig",
        secret=k8s.V1SecretVolumeSource(
            secret_name=f"{release.get_release_name()}-{prefix if 'hosted' in task_group else ''}kubeconfig"
        )
    )
 def to_volume_secret(self) -> Tuple[k8s.V1Volume, k8s.V1VolumeMount]:
     vol_id = 'secretvol{}'.format(uuid.uuid4())
     return (k8s.V1Volume(
         name=vol_id,
         secret=k8s.V1SecretVolumeSource(secret_name=self.secret)),
             k8s.V1VolumeMount(mount_path=self.deploy_target,
                               name=vol_id,
                               read_only=True))
 def test_to_k8s_object(self):
     volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}}
     volume = Volume(name='test-volume', configs=volume_config)
     expected_volume = k8s.V1Volume(
         name="test-volume",
         persistent_volume_claim={"claimName": "test-volume"})
     result = volume.to_k8s_client_obj()
     self.assertEqual(result, expected_volume)
 def test_only_mount_sub_secret(self, mock_uuid):
     mock_uuid.return_value = '0'
     items = [k8s.V1KeyToPath(key="my-username", path="/extra/path")]
     secret = Secret('volume', '/etc/foo', 'secret_b', items=items)
     assert secret.to_volume_secret() == (
         k8s.V1Volume(
             name='secretvol0', secret=k8s.V1SecretVolumeSource(secret_name='secret_b', items=items)
         ),
         k8s.V1VolumeMount(mount_path='/etc/foo', name='secretvol0', read_only=True),
     )
    def test_init_container(self):
        # GIVEN
        volume_mounts = [
            k8s.V1VolumeMount(mount_path='/etc/foo', name='test-volume', sub_path=None, read_only=True)
        ]

        init_environments = [
            k8s.V1EnvVar(name='key1', value='value1'),
            k8s.V1EnvVar(name='key2', value='value2'),
        ]

        init_container = k8s.V1Container(
            name="init-container",
            image="ubuntu:16.04",
            env=init_environments,
            volume_mounts=volume_mounts,
            command=["bash", "-cx"],
            args=["echo 10"],
        )

        volume = k8s.V1Volume(
            name='test-volume',
            persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name='test-volume'),
        )
        expected_init_container = {
            'name': 'init-container',
            'image': 'ubuntu:16.04',
            'command': ['bash', '-cx'],
            'args': ['echo 10'],
            'env': [{'name': 'key1', 'value': 'value1'}, {'name': 'key2', 'value': 'value2'}],
            'volumeMounts': [{'mountPath': '/etc/foo', 'name': 'test-volume', 'readOnly': True}],
        }

        k = KubernetesPodOperator(
            namespace='default',
            image="ubuntu:16.04",
            cmds=["bash", "-cx"],
            arguments=["echo 10"],
            labels={"foo": "bar"},
            name="test-" + str(random.randint(0, 1000000)),
            task_id="task" + self.get_current_task_name(),
            volumes=[volume],
            init_containers=[init_container],
            in_cluster=False,
            do_xcom_push=False,
        )
        context = create_context(k)
        k.execute(context)
        actual_pod = self.api_client.sanitize_for_serialization(k.pod)
        self.expected_pod['spec']['initContainers'] = [expected_init_container]
        self.expected_pod['spec']['volumes'] = [
            {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
        ]
        assert self.expected_pod == actual_pod
Esempio n. 13
0
 def to_k8s_client_obj(self):
     from kubernetes.client import models as k8s
     resp = k8s.V1Volume(name=self.name)
     for k, v in self.configs.items():
         snake_key = Volume._convert_to_snake_case(k)
         if hasattr(resp, snake_key):
             setattr(resp, snake_key, v)
         else:
             raise AttributeError(
                 "V1Volume does not have attribute {}".format(k))
     return resp
Esempio n. 14
0
 def test_to_volume_secret(self, mock_uuid):
     static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
     mock_uuid.return_value = static_uuid
     secret = Secret('volume', '/etc/foo', 'secret_b')
     self.assertEqual(
         secret.to_volume_secret(),
         (k8s.V1Volume(
             name='secretvol' + str(static_uuid),
             secret=k8s.V1SecretVolumeSource(secret_name='secret_b')),
          k8s.V1VolumeMount(mount_path='/etc/foo',
                            name='secretvol' + str(static_uuid),
                            read_only=True)))
Esempio n. 15
0
def to_volume_secret(secret: "Secret") -> typing.Tuple[k8s.V1Volume, k8s.V1VolumeMount]:
    """Converts to volume secret"""
    vol_id = f"secretvol{uuid.uuid4()}"
    volume = k8s.V1Volume(
        name=vol_id, secret=k8s.V1SecretVolumeSource(secret_name=secret.secret)
    )
    # if secret.items:
    #     volume.secret.items = self.items
    return (
        volume,
        k8s.V1VolumeMount(mount_path=secret.deploy_target, name=vol_id, read_only=True),
    )
Esempio n. 16
0
        def _construct_volume(name, claim, host) -> k8s.V1Volume:
            volume = k8s.V1Volume(name=name)

            if claim:
                volume.persistent_volume_claim = k8s.V1PersistentVolumeClaimVolumeSource(
                    claim_name=claim)
            elif host:
                volume.host_path = k8s.V1HostPathVolumeSource(path=host,
                                                              type='')
            else:
                volume.empty_dir = {}

            return volume
Esempio n. 17
0
 def to_volume_secret(self):
     import kubernetes.client.models as k8s
     vol_id = 'secretvol{}'.format(uuid.uuid4())
     if self.deploy_target:
         volume_mount = k8s.V1VolumeMount(mount_path=self.deploy_target,
                                          name=vol_id,
                                          read_only=True)
     else:
         volume_mount = None
     return (k8s.V1Volume(
         name=vol_id,
         secret=k8s.V1SecretVolumeSource(secret_name=self.secret)),
             volume_mount)
Esempio n. 18
0
def to_k8s_client_obj(
        volume_spec: typing.Dict[str, typing.Any]) -> k8s.V1Volume:
    name = volume_spec.get("name")

    volume = k8s.V1Volume(name=name)
    for k, v in volume_spec.items():
        snake_key = _convert_to_snake_case(k)
        if hasattr(volume, snake_key):
            setattr(volume, snake_key, v)
        else:
            raise AttributeError(
                "k8s.V1Volume does not have attribute {}".format(k))
    return volume
Esempio n. 19
0
    def to_k8s_client_obj(self) -> k8s.V1Volume:
        """
        Converts to k8s object.

        :return: Volume Mount k8s object
        """
        resp = k8s.V1Volume(name=self.name)
        for k, v in self.configs.items():
            snake_key = Volume._convert_to_snake_case(k)
            if hasattr(resp, snake_key):
                setattr(resp, snake_key, v)
            else:
                raise AttributeError(f"V1Volume does not have attribute {k}")
        return resp
Esempio n. 20
0
    def test_volume_mount(self):
        with mock.patch.object(PodLauncher, 'log') as mock_logger:
            volume_mount = k8s.V1VolumeMount(name='test-volume',
                                             mount_path='/tmp/test_volume',
                                             sub_path=None,
                                             read_only=False)

            volume = k8s.V1Volume(
                name='test-volume',
                persistent_volume_claim=k8s.
                V1PersistentVolumeClaimVolumeSource(claim_name='test-volume'),
            )

            args = [
                "echo \"retrieved from mount\" > /tmp/test_volume/test.txt "
                "&& cat /tmp/test_volume/test.txt"
            ]
            k = KubernetesPodOperator(
                namespace='default',
                image="ubuntu:16.04",
                cmds=["bash", "-cx"],
                arguments=args,
                labels={"foo": "bar"},
                volume_mounts=[volume_mount],
                volumes=[volume],
                name="test-" + str(random.randint(0, 1000000)),
                task_id="task" + self.get_current_task_name(),
                in_cluster=False,
                do_xcom_push=False,
            )
            context = create_context(k)
            k.execute(context=context)
            mock_logger.info.assert_any_call('retrieved from mount')
            actual_pod = self.api_client.sanitize_for_serialization(k.pod)
            self.expected_pod['spec']['containers'][0]['args'] = args
            self.expected_pod['spec']['containers'][0]['volumeMounts'] = [{
                'name':
                'test-volume',
                'mountPath':
                '/tmp/test_volume',
                'readOnly':
                False
            }]
            self.expected_pod['spec']['volumes'] = [{
                'name': 'test-volume',
                'persistentVolumeClaim': {
                    'claimName': 'test-volume'
                }
            }]
            self.assertEqual(self.expected_pod, actual_pod)
Esempio n. 21
0
 def test_set_volume_claim_to_existing_claim(self):
     helm_settings = yaml.safe_load(git_sync_existing_claim)
     res = render_chart(
         'GIT-SYNC',
         helm_settings,
         show_only=["templates/scheduler/scheduler-deployment.yaml"])
     dep: k8s.V1Deployment = render_k8s_object(res[0], k8s.V1Deployment)
     volume_map = {vol.name: vol for vol in dep.spec.template.spec.volumes}
     dag_volume = volume_map['dags']
     self.assertEqual(
         dag_volume,
         k8s.V1Volume(
             name="dags",
             persistent_volume_claim=k8s.
             V1PersistentVolumeClaimVolumeSource(claim_name='test-claim'),
         ),
     )
Esempio n. 22
0
class PodDefaults:
    """
    Static defaults for the PodGenerator
    """
    XCOM_MOUNT_PATH = '/airflow/xcom'
    SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
    XCOM_CMD = """import time
while True:
    try:
        time.sleep(3600)
    except KeyboardInterrupt:
        exit(0)
    """
    VOLUME_MOUNT = k8s.V1VolumeMount(name='xcom', mount_path=XCOM_MOUNT_PATH)
    VOLUME = k8s.V1Volume(name='xcom', empty_dir=k8s.V1EmptyDirVolumeSource())
    SIDECAR_CONTAINER = k8s.V1Container(name=SIDECAR_CONTAINER_NAME,
                                        command=['python', '-c', XCOM_CMD],
                                        image='python:3.5-alpine',
                                        volume_mounts=[VOLUME_MOUNT])
    def test_from_obj(self):
        result = PodGenerator.from_obj(
            {
                "pod_override": k8s.V1Pod(
                    api_version="v1",
                    kind="Pod",
                    metadata=k8s.V1ObjectMeta(name="foo", annotations={"test": "annotation"}),
                    spec=k8s.V1PodSpec(
                        containers=[
                            k8s.V1Container(
                                name="base",
                                volume_mounts=[
                                    k8s.V1VolumeMount(
                                        mount_path="/foo/", name="example-kubernetes-test-volume"
                                    )
                                ],
                            )
                        ],
                        volumes=[
                            k8s.V1Volume(
                                name="example-kubernetes-test-volume",
                                host_path=k8s.V1HostPathVolumeSource(path="/tmp/"),
                            )
                        ],
                    ),
                )
            }
        )
        result = self.k8s_client.sanitize_for_serialization(result)

        assert {
            'apiVersion': 'v1',
            'kind': 'Pod',
            'metadata': {
                'name': 'foo',
                'annotations': {'test': 'annotation'},
            },
            'spec': {
                'containers': [
                    {
                        'name': 'base',
                        'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}],
                    }
                ],
                'volumes': [{'hostPath': {'path': '/tmp/'}, 'name': 'example-kubernetes-test-volume'}],
            },
        } == result
        result = PodGenerator.from_obj(
            {
                "KubernetesExecutor": {
                    "annotations": {"test": "annotation"},
                    "volumes": [
                        {
                            "name": "example-kubernetes-test-volume",
                            "hostPath": {"path": "/tmp/"},
                        },
                    ],
                    "volume_mounts": [
                        {
                            "mountPath": "/foo/",
                            "name": "example-kubernetes-test-volume",
                        },
                    ],
                }
            }
        )

        result_from_pod = PodGenerator.from_obj(
            {
                "pod_override": k8s.V1Pod(
                    metadata=k8s.V1ObjectMeta(annotations={"test": "annotation"}),
                    spec=k8s.V1PodSpec(
                        containers=[
                            k8s.V1Container(
                                name="base",
                                volume_mounts=[
                                    k8s.V1VolumeMount(
                                        name="example-kubernetes-test-volume", mount_path="/foo/"
                                    )
                                ],
                            )
                        ],
                        volumes=[k8s.V1Volume(name="example-kubernetes-test-volume", host_path="/tmp/")],
                    ),
                )
            }
        )

        result = self.k8s_client.sanitize_for_serialization(result)
        result_from_pod = self.k8s_client.sanitize_for_serialization(result_from_pod)
        expected_from_pod = {
            'metadata': {'annotations': {'test': 'annotation'}},
            'spec': {
                'containers': [
                    {
                        'name': 'base',
                        'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}],
                    }
                ],
                'volumes': [{'hostPath': '/tmp/', 'name': 'example-kubernetes-test-volume'}],
            },
        }
        assert (
            result_from_pod == expected_from_pod
        ), "There was a discrepency between KubernetesExecutor and pod_override"

        assert {
            'apiVersion': 'v1',
            'kind': 'Pod',
            'metadata': {
                'annotations': {'test': 'annotation'},
            },
            'spec': {
                'containers': [
                    {
                        'args': [],
                        'command': [],
                        'env': [],
                        'envFrom': [],
                        'name': 'base',
                        'ports': [],
                        'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}],
                    }
                ],
                'hostNetwork': False,
                'imagePullSecrets': [],
                'volumes': [{'hostPath': {'path': '/tmp/'}, 'name': 'example-kubernetes-test-volume'}],
            },
        } == result
Esempio n. 24
0
secret_all_keys = Secret("env", None, "airflow-secrets-2")
volume_mount = k8s.V1VolumeMount(name="test-volume",
                                 mount_path="/root/mount_file",
                                 sub_path=None,
                                 read_only=True)

configmaps = [
    k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(
        name="test-configmap-1")),
    k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(
        name="test-configmap-2")),
]

volume = k8s.V1Volume(
    name="test-volume",
    persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(
        claim_name="test-volume"),
)

port = k8s.V1ContainerPort(name="http", container_port=80)

init_container_volume_mounts = [
    k8s.V1VolumeMount(mount_path="/etc/foo",
                      name="test-volume",
                      sub_path=None,
                      read_only=True)
]

init_environments = [
    k8s.V1EnvVar(name="key1", value="value1"),
    k8s.V1EnvVar(name="key2", value="value2"),
Esempio n. 25
0
        executor_config_volume_mount = {
            "pod_override":
            k8s.V1Pod(spec=k8s.V1PodSpec(
                containers=[
                    k8s.V1Container(
                        name="base",
                        volume_mounts=[
                            k8s.V1VolumeMount(
                                mount_path="/foo/",
                                name="example-kubernetes-test-volume")
                        ],
                    )
                ],
                volumes=[
                    k8s.V1Volume(
                        name="example-kubernetes-test-volume",
                        host_path=k8s.V1HostPathVolumeSource(path="/tmp/"),
                    )
                ],
            )),
        }

        @task(executor_config=executor_config_volume_mount)
        def test_volume_mount():
            """
            Tests whether the volume has been mounted.
            """
            with open('/foo/volume_mount_test.txt', 'w') as foo:
                foo.write('Hello')

            return_code = os.system("cat /foo/volume_mount_test.txt")
            if return_code != 0:
Esempio n. 26
0
def get_empty_dir_volume():
    return k8s.V1Volume(name="tmpdir", empty_dir=k8s.V1EmptyDirVolumeSource())
Esempio n. 27
0
def get_kubeconfig_volume(version, platform, profile):
    return k8s.V1Volume(
        name="kubeconfig",
        secret=k8s.V1SecretVolumeSource(
            secret_name=f"{version}-{platform}-{profile}-kubeconfig"))
dag = DAG(
    dag_id="kube-pod-operator",
    start_date=airflow.utils.dates.days_ago(2),
    schedule_interval="@daily",
)

start_kube_process = BashOperator(
    task_id="start_kube_process",
    bash_command="echo upload to s3",
    dag=dag,
)

kube_files_volume_config = {'name': 'minikube'}

kube_files_volume = k8s.V1Volume(name='kube-files-volume',
                                 config_map=kube_files_volume_config)
kube_files_volume_mount = k8s.V1VolumeMount(name='kube-files-volume',
                                            mount_path='/tmp/k8s')
in_cluster = False
kubernetes_min_pod = kubernetes_pod_operator.KubernetesPodOperator(
    task_id='pod-ex-minimum',
    name='pod-ex-minimum',
    cmds=['echo'],
    namespace='default',
    image='ubuntu:latest',
    in_cluster=in_cluster,
    config_file='/tmp/k8s/minikube/config',
    volumes=[kube_files_volume],
    volume_mounts=[kube_files_volume_mount])

run_another_pod = kubernetes_pod_operator.KubernetesPodOperator(
def pipeline_definition(
    hydrosphere_name="local",
    hydrosphere_address="http://hydro-serving-sidecar-serving.kubeflow.svc.cluster.local:8080",
    data_directory='/data/mnist',
    models_directory="/models/mnist",
    learning_rate="0.01",
    learning_steps="5000",
    batch_size="256",
    warmpup_count="100",
    model_name="mnist",
    application_name="mnist-app",
    signature_name="predict",
    acceptable_accuracy="0.90",
):

    data_pvc = k8s.V1PersistentVolumeClaimVolumeSource(claim_name="data")
    models_pvc = k8s.V1PersistentVolumeClaimVolumeSource(claim_name="models")
    data_volume = k8s.V1Volume(name="data", persistent_volume_claim=data_pvc)
    models_volume = k8s.V1Volume(name="models",
                                 persistent_volume_claim=models_pvc)
    data_volume_mount = k8s.V1VolumeMount(
        mount_path="{{workflow.parameters.data-directory}}", name="data")
    models_volume_mount = k8s.V1VolumeMount(
        mount_path="{{workflow.parameters.models-directory}}", name="models")

    hydrosphere_address_env = k8s.V1EnvVar(
        name="CLUSTER_ADDRESS",
        value="{{workflow.parameters.hydrosphere-address}}")
    hydrosphere_name_env = k8s.V1EnvVar(
        name="CLUSTER_NAME", value="{{workflow.parameters.hydrosphere-name}}")
    data_directory_env = k8s.V1EnvVar(
        name="MNIST_DATA_DIR", value="{{workflow.parameters.data-directory}}")
    models_directory_env = k8s.V1EnvVar(
        name="MNIST_MODELS_DIR",
        value="{{workflow.parameters.models-directory}}")
    model_name_env = k8s.V1EnvVar(name="MODEL_NAME",
                                  value="{{workflow.parameters.model-name}}")
    application_name_env = k8s.V1EnvVar(
        name="APPLICATION_NAME",
        value="{{workflow.parameters.application-name}}")
    signature_name_env = k8s.V1EnvVar(
        name="SIGNATURE_NAME", value="{{workflow.parameters.signature-name}}")
    acceptable_accuracy_env = k8s.V1EnvVar(
        name="ACCEPTABLE_ACCURACY",
        value="{{workflow.parameters.acceptable-accuracy}}")
    learning_rate_env = k8s.V1EnvVar(
        name="LEARNING_RATE", value="{{workflow.parameters.learning-rate}}")
    learning_steps_env = k8s.V1EnvVar(
        name="LEARNING_STEPS", value="{{workflow.parameters.learning-steps}}")
    batch_size_env = k8s.V1EnvVar(name="BATCH_SIZE",
                                  value="{{workflow.parameters.batch-size}}")
    warmup_count_env = k8s.V1EnvVar(
        name="WARMUP_IMAGES_AMOUNT",
        value="{{workflow.parameters.warmpup-count}}")

    # 1. Download MNIST data
    download = dsl.ContainerOp(
        name="download", image="tidylobster/mnist-pipeline-download:latest")
    download.add_volume(data_volume)
    download.add_volume_mount(data_volume_mount)
    download.add_env_variable(data_directory_env)

    # 2. Train and save a MNIST classifier using Tensorflow
    train = dsl.ContainerOp(name="train",
                            image="tidylobster/mnist-pipeline-train:latest")
    train.after(download)
    train.set_memory_request('2G')
    train.set_cpu_request('1')

    train.add_volume(data_volume)
    train.add_volume(models_volume)
    train.add_volume_mount(data_volume_mount)
    train.add_volume_mount(models_volume_mount)
    train.add_env_variable(data_directory_env)
    train.add_env_variable(models_directory_env)
    train.add_env_variable(learning_rate_env)
    train.add_env_variable(learning_steps_env)
    train.add_env_variable(batch_size_env)

    # 3. Upload trained model to the cluster
    upload = dsl.ContainerOp(
        name="upload",
        image="tidylobster/mnist-pipeline-upload:latest",
        file_outputs={"model_version": "/model_version.txt"})
    upload.after(train)

    upload.add_volume(models_volume)
    upload.add_volume_mount(models_volume_mount)
    upload.add_env_variable(models_directory_env)
    upload.add_env_variable(model_name_env)
    upload.add_env_variable(hydrosphere_name_env)
    upload.add_env_variable(hydrosphere_address_env)

    # 4. Deploy application
    deploy = dsl.ContainerOp(name="deploy",
                             image="tidylobster/mnist-pipeline-deploy:latest",
                             arguments=[upload.outputs["model_version"]])
    deploy.after(upload)

    deploy.add_env_variable(hydrosphere_name_env)
    deploy.add_env_variable(hydrosphere_address_env)
    deploy.add_env_variable(application_name_env)
    deploy.add_env_variable(model_name_env)

    # 5. Test the model
    test = dsl.ContainerOp(name="test",
                           image="tidylobster/mnist-pipeline-test:latest")
    test.after(deploy)

    test.add_volume(data_volume)
    test.add_volume_mount(data_volume_mount)
    test.add_env_variable(data_directory_env)
    test.add_env_variable(hydrosphere_address_env)
    test.add_env_variable(application_name_env)
    test.add_env_variable(signature_name_env)
    test.add_env_variable(warmup_count_env)
    test.add_env_variable(acceptable_accuracy_env)

    # 6. Clean environment
    clean = dsl.ContainerOp(name="clean",
                            image="tidylobster/mnist-pipeline-clean:latest")
    clean.after(test)

    clean.add_volume(data_volume)
    clean.add_volume_mount(data_volume_mount)
    clean.add_env_variable(data_directory_env)
    clean.add_volume(models_volume)
    clean.add_volume_mount(models_volume_mount)
    clean.add_env_variable(models_directory_env)
    dag_id='ai_training_run',
    default_args=ai_training_run_dag_default_args,
    schedule_interval=None,
    start_date=days_ago(2),
    tags=['training']
)

# Define Kubernetes namespace to execute DAG in
namespace = 'airflow'

## Define volume details (change values as necessary to match your environment)

# Dataset volume
dataset_volume_pvc_existing = 'dataset-vol'
dataset_volume = k8s.V1Volume(
    name=dataset_volume_pvc_existing,
    persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name=dataset_volume_pvc_existing),
)
dataset_volume_mount = k8s.V1VolumeMount(
    name=dataset_volume_pvc_existing, 
    mount_path='/mnt/dataset', 
    sub_path=None, 
    read_only=False
)

# Model volume
model_volume_pvc_existing = 'airflow-model-vol'
model_volume = k8s.V1Volume(
    name=model_volume_pvc_existing,
    persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name=model_volume_pvc_existing),
)
model_volume_mount = k8s.V1VolumeMount(