def test_extract_volume_secrets(self):
     # Test when secrets is not empty
     secrets = [
         Secret('volume', 'KEY1', 's1', 'key-1'),
         Secret('env', 'KEY2', 's2'),
         Secret('volume', 'KEY3', 's3', 'key-2')
     ]
     pod = Pod('v3.14', {}, [], secrets=secrets)
     self.expected['spec']['containers'][0]['volumeMounts'] = [{
         'mountPath': 'KEY1',
         'name': 'secretvol0',
         'readOnly': True
     }, {
         'mountPath': 'KEY3',
         'name': 'secretvol1',
         'readOnly': True
     }]
     self.expected['spec']['volumes'] = [{
         'name': 'secretvol0',
         'secret': {
             'secretName': 's1'
         }
     }, {
         'name': 'secretvol1',
         'secret': {
             'secretName': 's3'
         }
     }]
     KubernetesRequestFactory.extract_volume_secrets(pod, self.input_req)
     self.assertEqual(self.input_req, self.expected)
Ejemplo n.º 2
0
 def test_to_volume_secret(self, mock_uuid):
     mock_uuid.return_value = '0'
     secret = Secret('volume', '/etc/foo', 'secret_b')
     assert secret.to_volume_secret() == (
         k8s.V1Volume(name='secretvol0', secret=k8s.V1SecretVolumeSource(secret_name='secret_b')),
         k8s.V1VolumeMount(mount_path='/etc/foo', name='secretvol0', read_only=True),
     )
Ejemplo n.º 3
0
    def test_get_secrets(self):
        # Test when secretRef is None and kube_secrets is not empty
        self.kube_config.kube_secrets = {
            'AWS_SECRET_KEY': 'airflow-secret=aws_secret_key',
            'POSTGRES_PASSWORD': '******'
        }
        self.kube_config.env_from_secret_ref = None
        worker_config = WorkerConfiguration(self.kube_config)
        secrets = worker_config._get_secrets()
        secrets.sort(key=lambda secret: secret.deploy_target)
        expected = [
            Secret('env', 'AWS_SECRET_KEY', 'airflow-secret',
                   'aws_secret_key'),
            Secret('env', 'POSTGRES_PASSWORD', 'airflow-secret',
                   'postgres_credentials')
        ]
        self.assertListEqual(expected, secrets)

        # Test when secret is not empty and kube_secrets is empty dict
        self.kube_config.kube_secrets = {}
        self.kube_config.env_from_secret_ref = 'secret_a,secret_b'
        worker_config = WorkerConfiguration(self.kube_config)
        secrets = worker_config._get_secrets()
        expected = [
            Secret('env', None, 'secret_a'),
            Secret('env', None, 'secret_b')
        ]
        self.assertListEqual(expected, secrets)
Ejemplo n.º 4
0
 def test_to_env_secret(self):
     secret = Secret('env', 'name', 'secret', 'key')
     self.assertEqual(
         secret.to_env_secret(),
         k8s.V1EnvVar(name='NAME',
                      value_from=k8s.V1EnvVarSource(
                          secret_key_ref=k8s.V1SecretKeySelector(
                              name='secret', key='key'))))
Ejemplo n.º 5
0
 def test_only_mount_sub_secret(self, mock_uuid):
     mock_uuid.return_value = '0'
     items = [k8s.V1KeyToPath(key="my-username", path="/extra/path")]
     secret = Secret('volume', '/etc/foo', 'secret_b', items=items)
     assert secret.to_volume_secret() == (
         k8s.V1Volume(
             name='secretvol0', secret=k8s.V1SecretVolumeSource(secret_name='secret_b', items=items)
         ),
         k8s.V1VolumeMount(mount_path='/etc/foo', name='secretvol0', read_only=True),
     )
    def test_extract_env_and_secrets(self):
        # Test when secrets and envs are not empty
        secrets = [
            Secret('env', None, 's1'),
            Secret('volume', 'KEY2', 's2', 'key-2'),
            Secret('env', None, 's3')
        ]
        envs = {
            'ENV1': 'val1',
            'ENV2': 'val2'
        }
        configmaps = ['configmap_a', 'configmap_b']
        pod_runtime_envs = [PodRuntimeInfoEnv("ENV3", "status.podIP")]
        pod = Pod(
            image='v3.14',
            envs=envs,
            cmds=[],
            secrets=secrets,
            configmaps=configmaps,
            pod_runtime_info_envs=pod_runtime_envs)
        self.expected['spec']['containers'][0]['env'] = [
            {'name': 'ENV1', 'value': 'val1'},
            {'name': 'ENV2', 'value': 'val2'},
            {
                'name': 'ENV3',
                'valueFrom': {
                    'fieldRef': {
                        'fieldPath': 'status.podIP'
                    }
                }
            }
        ]
        self.expected['spec']['containers'][0]['envFrom'] = [{
            'secretRef': {
                'name': 's1'
            }
        }, {
            'secretRef': {
                'name': 's3'
            }
        }, {
            'configMapRef': {
                'name': 'configmap_a'
            }
        }, {
            'configMapRef': {
                'name': 'configmap_b'
            }
        }]

        KubernetesRequestFactory.extract_env_and_secrets(pod, self.input_req)
        self.input_req['spec']['containers'][0]['env'].sort(key=lambda x: x['name'])
        self.assertEqual(self.input_req, self.expected)
Ejemplo n.º 7
0
 def test_to_volume_secret(self, mock_uuid):
     static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
     mock_uuid.return_value = static_uuid
     secret = Secret('volume', '/etc/foo', 'secret_b')
     self.assertEqual(
         secret.to_volume_secret(),
         (k8s.V1Volume(
             name='secretvol' + str(static_uuid),
             secret=k8s.V1SecretVolumeSource(secret_name='secret_b')),
          k8s.V1VolumeMount(mount_path='/etc/foo',
                            name='secretvol' + str(static_uuid),
                            read_only=True)))
Ejemplo n.º 8
0
 def test_attach_to_pod(self, mock_uuid):
     mock_uuid.return_value = '0'
     pod = PodGenerator(image='airflow-worker:latest',
                        name='base').gen_pod()
     secrets = [
         # This should be a secretRef
         Secret('env', None, 'secret_a'),
         # This should be a single secret mounted in volumeMounts
         Secret('volume', '/etc/foo', 'secret_b'),
         # This should produce a single secret mounted in env
         Secret('env', 'TARGET', 'secret_b', 'source_b'),
     ]
     k8s_client = ApiClient()
     result = append_to_pod(pod, secrets)
     result = k8s_client.sanitize_for_serialization(result)
     self.assertEqual(result, {
         'apiVersion': 'v1',
         'kind': 'Pod',
         'metadata': {'name': 'base-0'},
         'spec': {
             'containers': [{
                 'args': [],
                 'command': [],
                 'env': [{
                     'name': 'TARGET',
                     'valueFrom': {
                         'secretKeyRef': {
                             'key': 'source_b',
                             'name': 'secret_b'
                         }
                     }
                 }],
                 'envFrom': [{'secretRef': {'name': 'secret_a'}}],
                 'image': 'airflow-worker:latest',
                 'imagePullPolicy': 'IfNotPresent',
                 'name': 'base',
                 'ports': [],
                 'volumeMounts': [{
                     'mountPath': '/etc/foo',
                     'name': 'secretvol0',
                     'readOnly': True}]
             }],
             'hostNetwork': False,
             'imagePullSecrets': [],
             'restartPolicy': 'Never',
             'volumes': [{
                 'name': 'secretvol0',
                 'secret': {'secretName': 'secret_b'}
             }]
         }
     })
Ejemplo n.º 9
0
    def _get_secrets(self):
        """Defines any necessary secrets for the pod executor"""
        worker_secrets = []

        for env_var_name, obj_key_pair in self.kube_config.kube_secrets.items(
        ):
            k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
            worker_secrets.append(
                Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key))

        if self.kube_config.env_from_secret_ref:
            for secret_ref in self.kube_config.env_from_secret_ref.split(','):
                worker_secrets.append(Secret('env', None, secret_ref))

        return worker_secrets
Ejemplo n.º 10
0
 def test_envs_from_secrets(self, mock_client, launcher_mock):
     # GIVEN
     from airflow.utils.state import State
     secret_ref = 'secret_name'
     secrets = [Secret('env', None, secret_ref)]
     # WHEN
     k = KubernetesPodOperator(
         namespace='default',
         image="ubuntu:16.04",
         cmds=["bash", "-cx"],
         arguments=["echo 10"],
         secrets=secrets,
         labels={"foo": "bar"},
         name="test",
         task_id="task",
         in_cluster=False,
         do_xcom_push=False,
     )
     # THEN
     launcher_mock.return_value = (State.SUCCESS, None)
     k.execute(None)
     self.assertEqual(
         launcher_mock.call_args[0][0].spec.containers[0].env_from, [
             k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
                 name=secret_ref))
         ])
Ejemplo n.º 11
0
def PodOperator(*args, **kwargs):
    # TODO: tune this, and add resource limits
    namespace = kwargs.pop("namespace", "default")

    is_gke = kwargs.pop("is_gke", False)  # we want to always pop()

    if "secrets" in kwargs:
        kwargs["secrets"] = map(lambda d: Secret(**d), kwargs["secrets"])

    if is_development() or is_gke:
        return GKEPodOperator(
            *args,
            in_cluster=False,
            project_id=
            "cal-itp-data-infra",  # there currently isn't a staging cluster
            location=kwargs.pop("pod_location", os.environ["POD_LOCATION"]),
            cluster_name=kwargs.pop("cluster_name",
                                    os.environ["POD_CLUSTER_NAME"]),
            namespace=namespace,
            image_pull_policy="Always" if is_development() else "IfNotPresent",
            **kwargs,
        )

    else:
        return KubernetesPodOperator(*args, namespace=namespace, **kwargs)
Ejemplo n.º 12
0
    def test_envs_from_secrets(self, mock_client, monitor_mock, start_mock):
        # GIVEN
        from airflow.utils.state import State

        secret_ref = 'secret_name'
        secrets = [Secret('env', None, secret_ref)]
        # WHEN
        k = KubernetesPodOperator(
            namespace='default',
            image="ubuntu:16.04",
            cmds=["bash", "-cx"],
            arguments=["echo 10"],
            secrets=secrets,
            labels={"foo": "bar"},
            name="test-" + str(random.randint(0, 1000000)),
            task_id="task" + self.get_current_task_name(),
            in_cluster=False,
            do_xcom_push=False,
        )
        # THEN
        monitor_mock.return_value = (State.SUCCESS, None)
        context = create_context(k)
        k.execute(context)
        self.assertEqual(
            start_mock.call_args[0][0].spec.containers[0].env_from,
            [
                k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
                    name=secret_ref))
            ],
        )
Ejemplo n.º 13
0
 def test_envs_from_secrets(self, mock_client, monitor_mock, start_mock):
     # GIVEN
     secret_ref = 'secret_name'
     secrets = [Secret('env', None, secret_ref)]
     # WHEN
     k = KubernetesPodOperator(
         namespace='default',
         image="ubuntu:16.04",
         cmds=["bash", "-cx"],
         arguments=["echo 10"],
         secrets=secrets,
         labels={"foo": "bar"},
         name="test",
         task_id="task",
         in_cluster=False,
         do_xcom_push=False,
     )
     # THEN
     monitor_mock.return_value = (State.SUCCESS, None, None)
     context = create_context(k)
     k.execute(context)
     assert start_mock.call_args[0][0].spec.containers[0].env_from == [
         k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
             name=secret_ref))
     ]
def _extract_env_secret(env_var):
    if env_var.value_from and env_var.value_from.secret_key_ref:
        secret = env_var.value_from.secret_key_ref  # type: k8s.V1SecretKeySelector
        name = secret.name
        key = secret.key
        return Secret("env", deploy_target=env_var.name, secret=name, key=key)
    return None
    def test_envs_from_secrets(self, mock_client, await_pod_completion_mock,
                               create_mock):
        # GIVEN
        secret_ref = 'secret_name'
        secrets = [Secret('env', None, secret_ref)]
        # WHEN
        k = KubernetesPodOperator(
            namespace='default',
            image="ubuntu:16.04",
            cmds=["bash", "-cx"],
            arguments=["echo 10"],
            secrets=secrets,
            labels={"foo": "bar"},
            name="test",
            task_id="task",
            in_cluster=False,
            do_xcom_push=False,
        )
        # THEN

        mock_pod = MagicMock()
        mock_pod.status.phase = 'Succeeded'
        await_pod_completion_mock.return_value = mock_pod
        context = create_context(k)
        k.execute(context)
        assert create_mock.call_args[1]['pod'].spec.containers[0].env_from == [
            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
                name=secret_ref))
        ]
Ejemplo n.º 16
0
    def test_envs_from_secrets(self, mock_client, await_pod_completion_mock,
                               create_pod):
        # GIVEN

        secret_ref = 'secret_name'
        secrets = [Secret('env', None, secret_ref)]
        # WHEN
        k = KubernetesPodOperator(
            namespace='default',
            image="ubuntu:16.04",
            cmds=["bash", "-cx"],
            arguments=["echo 10"],
            secrets=secrets,
            labels={"foo": "bar"},
            name="test-" + str(random.randint(0, 1000000)),
            task_id="task" + self.get_current_task_name(),
            in_cluster=False,
            do_xcom_push=False,
        )
        # THEN
        await_pod_completion_mock.return_value = None
        context = create_context(k)
        with pytest.raises(AirflowException):
            k.execute(context)
        assert create_pod.call_args[1]['pod'].spec.containers[0].env_from == [
            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(
                name=secret_ref))
        ]
Ejemplo n.º 17
0
    def test_extract_env_and_secrets(self):
        # Test when secrets and envs are not empty
        secrets = [
            Secret('env', None, 's1'),
            Secret('volume', 'KEY2', 's2', 'key-2'),
            Secret('env', None, 's3')
        ]
        envs = {'ENV1': 'val1', 'ENV2': 'val2'}
        configmaps = ['configmap_a', 'configmap_b']
        pod = Pod('v3.14', envs, [], secrets=secrets, configmaps=configmaps)
        self.expected['spec']['containers'][0]['env'] = [
            {
                'name': 'ENV1',
                'value': 'val1'
            },
            {
                'name': 'ENV2',
                'value': 'val2'
            },
        ]
        self.expected['spec']['containers'][0]['envFrom'] = [{
            'secretRef': {
                'name': 's1'
            }
        }, {
            'secretRef': {
                'name': 's3'
            }
        }, {
            'configMapRef': {
                'name': 'configmap_a'
            }
        }, {
            'configMapRef': {
                'name': 'configmap_b'
            }
        }]

        KubernetesRequestFactory.extract_env_and_secrets(pod, self.input_req)
        self.input_req['spec']['containers'][0]['env'].sort(
            key=lambda x: x['name'])
        self.assertEqual(self.input_req, self.expected)
Ejemplo n.º 18
0
 def test_add_secret_to_env(self):
     secret = Secret('env', 'target', 'my-secret', 'KEY')
     secret_list = []
     self.expected = [{
         'name': 'TARGET',
         'valueFrom': {
             'secretKeyRef': {
                 'name': 'my-secret',
                 'key': 'KEY'
             }
         }
     }]
     KubernetesRequestFactory.add_secret_to_env(secret_list, secret)
     self.assertListEqual(secret_list, self.expected)
 def test_envs_from_secrets(self, mock_client, launcher_mock):
     # GIVEN
     from airflow.utils.state import State
     secrets = [Secret('env', None, "secret_name")]
     # WHEN
     k = KubernetesPodOperator(
         namespace='default',
         image="ubuntu:16.04",
         cmds=["bash", "-cx"],
         arguments=["echo 10"],
         secrets=secrets,
         labels={"foo": "bar"},
         name="test",
         task_id="task",
     )
     # THEN
     launcher_mock.return_value = (State.SUCCESS, None)
     k.execute(None)
     self.assertEqual(launcher_mock.call_args[0][0].secrets, secrets)
Ejemplo n.º 20
0
    def test_pod_with_volume_secret(self):
        k = KubernetesPodOperator(
            namespace='default',
            image="ubuntu:16.04",
            cmds=["bash", "-cx"],
            in_cluster=False,
            labels={"foo": "bar"},
            arguments=["echo 10"],
            secrets=[
                Secret(
                    deploy_type="volume",
                    deploy_target="/var/location",
                    secret="my-secret",
                    key="content.json",
                )
            ],
            name="airflow-test-pod",
            task_id="task",
            get_logs=True,
            is_delete_operator_pod=True,
        )

        context = self.create_context(k)
        k.execute(context)
        actual_pod = self.api_client.sanitize_for_serialization(k.pod)
        self.expected_pod['spec']['containers'][0]['volumeMounts'] = [{
            'mountPath':
            '/var/location',
            'name':
            mock.ANY,
            'readOnly':
            True
        }]
        self.expected_pod['spec']['volumes'] = [{
            'name': mock.ANY,
            'secret': {
                'secretName': 'my-secret'
            }
        }]
        self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
        self.assertEqual(self.expected_pod['metadata']['labels'],
                         actual_pod['metadata']['labels'])
    "retry_delay": timedelta(minutes=5),
    "env_vars": {
        "AWS_DEFAULT_REGION": AWS_DEFAULT_REGION,
        "DB_HOSTNAME": DB_HOSTNAME,
        "DB_PORT": DB_PORT,
        "BACKUP_PATH": BACKUP_PATH,
        "DATESTRING": DATESTRING,
        "S3_BUCKET": S3_BUCKET,
        "S3_PREFIX": S3_PREFIX,
        "S3_KEY": S3_KEY,
    },
}

# Lift secrets into environment variables for datacube database connectivity
SECRET_RESTORE_INCREMENTAL_SYNC = [
    Secret("env", "DB_DATABASE", SECRET_EXPLORER_NCI_ADMIN_NAME,
           "database-name"),
    Secret("env", "DB_ADMIN_USER", SECRET_EXPLORER_NCI_ADMIN_NAME,
           "postgres-username"),
    Secret("env", "DB_ADMIN_PASSWORD", SECRET_EXPLORER_NCI_ADMIN_NAME,
           "postgres-password"),
]

SECRET_INDEXER = [
    Secret("env", "DB_DATABASE", SECRET_EXPLORER_NCI_ADMIN_NAME,
           "database-name"),
    Secret("env", "DB_USERNAME", SECRET_EXPLORER_NCI_ADMIN_NAME,
           "postgres-username"),
    Secret("env", "DB_PASSWORD", SECRET_EXPLORER_NCI_ADMIN_NAME,
           "postgres-password"),
]
Ejemplo n.º 22
0
 1,
 "retry_delay":
 timedelta(minutes=5),
 "index_sqs_queue":
 SENTINEL_2_ARD_INDEXING_SQS_QUEUE_NAME_SANDBOX_DB,
 "products":
 "s2a_ard_granule s2b_ard_granule",
 "env_vars": {
     "DB_HOSTNAME": "db-writer",
     "DB_DATABASE": "sandbox",
 },
 # Lift secrets into environment variables
 "secrets": [
     Secret(
         "env",
         "DB_USERNAME",
         "odc-writer",
         "postgres-username",
     ),
     Secret(
         "env",
         "DB_PASSWORD",
         "odc-writer",
         "postgres-password",
     ),
     Secret(
         "env",
         "AWS_DEFAULT_REGION",
         SENTINEL_2_ARD_INDEXING_AWS_USER_SECRET,
         "AWS_DEFAULT_REGION",
     ),
     Secret(
    "retry_delay":
    timedelta(minutes=5),
    "env_vars": {
        "AWS_DEFAULT_REGION": "ap-southeast-2",
        "S3_KEY": S3_KEY,
        "DB_HOSTNAME": DB_HOSTNAME,
        "DB_DATABASE": DB_DATABASE,
        "DB_PORT": "5432",
        "BACKUP_PATH": BACKUP_PATH,
        "DATESTRING": DATESTRING
    },
    # Use K8S secrets to send DB Creds
    # Lift secrets into environment variables for datacube database connectivity
    # Use this db-users to create scratch explorer-nci database
    "secrets": [
        Secret("env", "DB_USERNAME", "explorer-nci-admin",
               "postgres-username"),
        Secret("env", "DB_PASSWORD", "explorer-nci-admin",
               "postgres-password"),
    ],
}

dag = DAG(
    "k8s_nci_db_sync",
    doc_md=__doc__,
    default_args=DEFAULT_ARGS,
    catchup=False,
    concurrency=1,
    max_active_runs=1,
    tags=["k8s"],
    schedule_interval=timedelta(days=7),
)
Ejemplo n.º 24
0
    def make_task(operator: str, task_params: Dict[str, Any]) -> BaseOperator:
        """
        Takes an operator and params and creates an instance of that operator.

        :returns: instance of operator object
        """
        try:
            # class is a Callable https://stackoverflow.com/a/34578836/3679900
            operator_obj: Callable[..., BaseOperator] = import_string(operator)
        except Exception as err:
            raise Exception(f"Failed to import operator: {operator}") from err
        try:
            if operator_obj in [PythonOperator, BranchPythonOperator]:
                if not task_params.get(
                        "python_callable_name") and not task_params.get(
                            "python_callable_file"):
                    raise Exception(
                        "Failed to create task. PythonOperator and BranchPythonOperator requires \
                        `python_callable_name` and `python_callable_file` parameters."
                    )
                task_params[
                    "python_callable"]: Callable = utils.get_python_callable(
                        task_params["python_callable_name"],
                        task_params["python_callable_file"],
                    )
                # remove dag-factory specific parameters
                # Airflow 2.0 doesn't allow these to be passed to operator
                del task_params["python_callable_name"]
                del task_params["python_callable_file"]

            # Check for the custom success and failure callables in SqlSensor. These are considered
            # optional, so no failures in case they aren't found. Note: there's no reason to
            # declare both a callable file and a lambda function for success/failure parameter.
            # If both are found the object will not throw and error, instead callable file will
            # take precedence over the lambda function
            if operator_obj in [SqlSensor]:
                # Success checks
                if task_params.get("success_check_file") and task_params.get(
                        "success_check_name"):
                    task_params[
                        "success"]: Callable = utils.get_python_callable(
                            task_params["success_check_name"],
                            task_params["success_check_file"],
                        )
                    del task_params["success_check_name"]
                    del task_params["success_check_file"]
                elif task_params.get("success_check_lambda"):
                    task_params[
                        "success"]: Callable = utils.get_python_callable_lambda(
                            task_params["success_check_lambda"])
                    del task_params["success_check_lambda"]
                # Failure checks
                if task_params.get("failure_check_file") and task_params.get(
                        "failure_check_name"):
                    task_params[
                        "failure"]: Callable = utils.get_python_callable(
                            task_params["failure_check_name"],
                            task_params["failure_check_file"],
                        )
                    del task_params["failure_check_name"]
                    del task_params["failure_check_file"]
                elif task_params.get("failure_check_lambda"):
                    task_params[
                        "failure"]: Callable = utils.get_python_callable_lambda(
                            task_params["failure_check_lambda"])
                    del task_params["failure_check_lambda"]

            if operator_obj in [HttpSensor]:
                if not (task_params.get("response_check_name")
                        and task_params.get("response_check_file")
                        ) and not task_params.get("response_check_lambda"):
                    raise Exception(
                        "Failed to create task. HttpSensor requires \
                        `response_check_name` and `response_check_file` parameters \
                        or `response_check_lambda` parameter.")
                if task_params.get("response_check_file"):
                    task_params[
                        "response_check"]: Callable = utils.get_python_callable(
                            task_params["response_check_name"],
                            task_params["response_check_file"],
                        )
                    # remove dag-factory specific parameters
                    # Airflow 2.0 doesn't allow these to be passed to operator
                    del task_params["response_check_name"]
                    del task_params["response_check_file"]
                else:
                    task_params[
                        "response_check"]: Callable = utils.get_python_callable_lambda(
                            task_params["response_check_lambda"])
                    # remove dag-factory specific parameters
                    # Airflow 2.0 doesn't allow these to be passed to operator
                    del task_params["response_check_lambda"]

            # KubernetesPodOperator
            if operator_obj == KubernetesPodOperator:
                task_params["secrets"] = ([
                    Secret(**v) for v in task_params.get("secrets")
                ] if task_params.get("secrets") is not None else None)

                task_params["ports"] = ([
                    Port(**v) for v in task_params.get("ports")
                ] if task_params.get("ports") is not None else None)
                task_params["volume_mounts"] = ([
                    VolumeMount(**v) for v in task_params.get("volume_mounts")
                ] if task_params.get("volume_mounts") is not None else None)
                task_params["volumes"] = ([
                    Volume(**v) for v in task_params.get("volumes")
                ] if task_params.get("volumes") is not None else None)
                task_params["pod_runtime_info_envs"] = ([
                    PodRuntimeInfoEnv(**v)
                    for v in task_params.get("pod_runtime_info_envs")
                ] if task_params.get("pod_runtime_info_envs") is not None else
                                                        None)
                task_params["full_pod_spec"] = (
                    V1Pod(**task_params.get("full_pod_spec"))
                    if task_params.get("full_pod_spec") is not None else None)
                task_params["init_containers"] = ([
                    V1Container(**v)
                    for v in task_params.get("init_containers")
                ] if task_params.get("init_containers") is not None else None)

            if utils.check_dict_key(task_params, "execution_timeout_secs"):
                task_params["execution_timeout"]: timedelta = timedelta(
                    seconds=task_params["execution_timeout_secs"])
                del task_params["execution_timeout_secs"]

            if utils.check_dict_key(task_params, "sla_secs"):
                task_params["sla"]: timedelta = timedelta(
                    seconds=task_params["sla_secs"])
                del task_params["sla_secs"]

            if utils.check_dict_key(task_params, "execution_delta_secs"):
                task_params["execution_delta"]: timedelta = timedelta(
                    seconds=task_params["execution_delta_secs"])
                del task_params["execution_delta_secs"]

            if utils.check_dict_key(
                    task_params,
                    "execution_date_fn_name") and utils.check_dict_key(
                        task_params, "execution_date_fn_file"):
                task_params[
                    "execution_date_fn"]: Callable = utils.get_python_callable(
                        task_params["execution_date_fn_name"],
                        task_params["execution_date_fn_file"],
                    )
                del task_params["execution_date_fn_name"]
                del task_params["execution_date_fn_file"]

            # on_execute_callback is an Airflow 2.0 feature
            if utils.check_dict_key(
                    task_params, "on_execute_callback"
            ) and version.parse(AIRFLOW_VERSION) >= version.parse("2.0.0"):
                task_params["on_execute_callback"]: Callable = import_string(
                    task_params["on_execute_callback"])

            if utils.check_dict_key(task_params, "on_failure_callback"):
                task_params["on_failure_callback"]: Callable = import_string(
                    task_params["on_failure_callback"])

            if utils.check_dict_key(task_params, "on_success_callback"):
                task_params["on_success_callback"]: Callable = import_string(
                    task_params["on_success_callback"])

            if utils.check_dict_key(task_params, "on_retry_callback"):
                task_params["on_retry_callback"]: Callable = import_string(
                    task_params["on_retry_callback"])

            # use variables as arguments on operator
            if utils.check_dict_key(task_params, "variables_as_arguments"):
                variables: List[Dict[str, str]] = task_params.get(
                    "variables_as_arguments")
                for variable in variables:
                    if Variable.get(variable["variable"],
                                    default_var=None) is not None:
                        task_params[variable["attribute"]] = Variable.get(
                            variable["variable"], default_var=None)
                del task_params["variables_as_arguments"]

            task: BaseOperator = operator_obj(**task_params)
        except Exception as err:
            raise Exception(f"Failed to create {operator_obj} task") from err
        return task
from infra.images import OWS_CONFIG_IMAGE, OWS_IMAGE
from subdags.podconfig import (
    OWS_CFG_PATH,
    OWS_CFG_MOUNT_PATH,
    OWS_CFG_IMAGEPATH,
    OWS_DATACUBE_CFG,
    OWS_PYTHON_PATH,
    OWS_CFG_FOLDER_PATH,
)
from infra.podconfig import ONDEMAND_NODE_AFFINITY
from webapp_update.update_list import UPDATE_EXTENT_PRODUCTS
from infra.variables import SECRET_OWS_WRITER_NAME

OWS_SECRETS = [
    Secret("env", "DB_USERNAME", SECRET_OWS_WRITER_NAME, "postgres-username"),
    Secret("env", "DB_PASSWORD", SECRET_OWS_WRITER_NAME, "postgres-password"),
]

# MOUNT OWS_CFG via init_container
# for main container mount
ows_cfg_mount = VolumeMount("ows-config-volume",
                            mount_path=OWS_CFG_MOUNT_PATH,
                            sub_path=None,
                            read_only=False)

ows_cfg_volume_config = {}

ows_cfg_volume = Volume(name="ows-config-volume",
                        configs=ows_cfg_volume_config)
Ejemplo n.º 26
0
    def test_to_v1_pod(self, mock_uuid):
        from airflow.contrib.kubernetes.pod import Pod as DeprecatedPod
        from airflow.kubernetes.volume import Volume
        from airflow.kubernetes.volume_mount import VolumeMount
        from airflow.kubernetes.secret import Secret
        from airflow.kubernetes.pod import Resources
        import uuid
        static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
        mock_uuid.return_value = static_uuid

        pod = DeprecatedPod(
            image="foo",
            name="bar",
            namespace="baz",
            image_pull_policy="Never",
            envs={"test_key": "test_value"},
            cmds=["airflow"],
            resources=Resources(request_memory="1G",
                                request_cpu="100Mi",
                                limit_gpu="100G"),
            init_containers=k8s.V1Container(name="test-container",
                                            volume_mounts=k8s.V1VolumeMount(
                                                mount_path="/foo/bar",
                                                name="init-volume-secret")),
            volumes=[
                Volume(name="foo", configs={}), {
                    "name": "bar",
                    'secret': {
                        'secretName': 'volume-secret'
                    }
                }
            ],
            secrets=[
                Secret("volume", None, "init-volume-secret"),
                Secret('env', "AIRFLOW_SECRET", 'secret_name',
                       "airflow_config"),
                Secret("volume", "/opt/airflow", "volume-secret", "secret-key")
            ],
            volume_mounts=[
                VolumeMount(name="foo",
                            mount_path="/mnt",
                            sub_path="/",
                            read_only=True)
            ])

        k8s_client = ApiClient()

        result = pod.to_v1_kubernetes_pod()
        result = k8s_client.sanitize_for_serialization(result)

        expected = \
            {'metadata': {'labels': {}, 'name': 'bar', 'namespace': 'baz'},
             'spec': {'affinity': {},
                      'containers': [{'args': [],
                                      'command': ['airflow'],
                                      'env': [{'name': 'test_key', 'value': 'test_value'},
                                              {'name': 'AIRFLOW_SECRET',
                                               'valueFrom': {'secretKeyRef': {'key': 'airflow_config',
                                                                              'name': 'secret_name'}}}],
                                      'image': 'foo',
                                      'imagePullPolicy': 'Never',
                                      'name': 'base',
                                      'resources': {'limits': {'nvidia.com/gpu': '100G'},
                                                    'requests': {'cpu': '100Mi',
                                                                 'memory': '1G'}},
                                      'volumeMounts': [{'mountPath': '/mnt',
                                                        'name': 'foo',
                                                        'readOnly': True,
                                                        'subPath': '/'},
                                                       {'mountPath': '/opt/airflow',
                                                       'name': 'secretvol' + str(static_uuid),
                                                        'readOnly': True}]}],
                      'hostNetwork': False,
                      'initContainers': {'name': 'test-container',
                                         'volumeMounts': {'mountPath': '/foo/bar',
                                                          'name': 'init-volume-secret'}},
                      'securityContext': {},
                      'tolerations': [],
                      'volumes': [{'name': 'foo'},
                                  {'name': 'bar',
                                   'secret': {'secretName': 'volume-secret'}},
                                  {'name': 'secretvolcf4a56d2-8101-4217-b027-2af6216feb48',
                                   'secret': {'secretName': 'init-volume-secret'}},
                                  {'name': 'secretvol' + str(static_uuid),
                                   'secret': {'secretName': 'volume-secret'}}
                                  ]}}
        self.maxDiff = None
        self.assertEqual(expected, result)
Ejemplo n.º 27
0
    def make_task(operator: str, task_params: Dict[str, Any]) -> BaseOperator:
        """
        Takes an operator and params and creates an instance of that operator.

        :returns: instance of operator object
        """
        try:
            # class is a Callable https://stackoverflow.com/a/34578836/3679900
            operator_obj: Callable[..., BaseOperator] = import_string(operator)
        except Exception as err:
            raise f"Failed to import operator: {operator}" from err
        try:
            if operator_obj == PythonOperator:
                if not task_params.get(
                        "python_callable_name") and not task_params.get(
                            "python_callable_file"):
                    raise Exception(
                        "Failed to create task. PythonOperator requires `python_callable_name` \
                        and `python_callable_file` parameters.")
                task_params[
                    "python_callable"]: Callable = utils.get_python_callable(
                        task_params["python_callable_name"],
                        task_params["python_callable_file"],
                    )

            # KubernetesPodOperator
            if operator_obj == KubernetesPodOperator:
                task_params["secrets"] = ([
                    Secret(**v) for v in task_params.get("secrets")
                ] if task_params.get("secrets") is not None else None)

                task_params["ports"] = ([
                    Port(**v) for v in task_params.get("ports")
                ] if task_params.get("ports") is not None else None)
                task_params["volume_mounts"] = ([
                    VolumeMount(**v) for v in task_params.get("volume_mounts")
                ] if task_params.get("volume_mounts") is not None else None)
                task_params["volumes"] = ([
                    Volume(**v) for v in task_params.get("volumes")
                ] if task_params.get("volumes") is not None else None)
                task_params["pod_runtime_info_envs"] = ([
                    PodRuntimeInfoEnv(**v)
                    for v in task_params.get("pod_runtime_info_envs")
                ] if task_params.get("pod_runtime_info_envs") is not None else
                                                        None)
                task_params["full_pod_spec"] = (
                    V1Pod(**task_params.get("full_pod_spec"))
                    if task_params.get("full_pod_spec") is not None else None)
                task_params["init_containers"] = ([
                    V1Container(**v)
                    for v in task_params.get("init_containers")
                ] if task_params.get("init_containers") is not None else None)

            if utils.check_dict_key(task_params, "execution_timeout_secs"):
                task_params["execution_timeout"]: timedelta = timedelta(
                    seconds=task_params["execution_timeout_secs"])
                del task_params["execution_timeout_secs"]

            # use variables as arguments on operator
            if utils.check_dict_key(task_params, "variables_as_arguments"):
                variables: List[Dict[str, str]] = task_params.get(
                    "variables_as_arguments")
                for variable in variables:
                    if Variable.get(variable["variable"],
                                    default_var=None) is not None:
                        task_params[variable["attribute"]] = Variable.get(
                            variable["variable"], default_var=None)
                del task_params["variables_as_arguments"]

            task: BaseOperator = operator_obj(**task_params)
        except Exception as err:
            raise f"Failed to create {operator_obj} task" from err
        return task
Ejemplo n.º 28
0
from airflow import models
from airflow.kubernetes.secret import Secret
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator

# A Secret is an object that contains a small amount of sensitive data such as
# a password, a token, or a key. Such information might otherwise be put in a
# Pod specification or in an image; putting it in a Secret object allows for
# more control over how it is used, and reduces the risk of accidental
# exposure.

# [START composer_kubernetespodoperator_secretobject]
secret_env = Secret(
    # Expose the secret as environment variable.
    deploy_type='env',
    # The name of the environment variable, since deploy_type is `env` rather
    # than `volume`.
    deploy_target='SQL_CONN',
    # Name of the Kubernetes Secret
    secret='airflow-secrets',
    # Key of a secret stored in this Secret object
    key='sql_alchemy_conn')
secret_volume = Secret(
    deploy_type='volume',
    # Path where we mount the secret as volume
    deploy_target='/var/secrets/google',
    # Name of Kubernetes Secret
    secret='service-account',
    # Key in the form of service account file name
    key='service-account.json')
# [END composer_kubernetespodoperator_secretobject]

# If you are running Airflow in more than one time zone
    def setUp(self):
        self.static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
        self.deserialize_result = {
            'apiVersion': 'v1',
            'kind': 'Pod',
            'metadata': {'name': 'memory-demo', 'namespace': 'mem-example'},
            'spec': {
                'containers': [
                    {
                        'args': ['--vm', '1', '--vm-bytes', '150M', '--vm-hang', '1'],
                        'command': ['stress'],
                        'image': 'apache/airflow:stress-2020.07.10-1.0.4',
                        'name': 'memory-demo-ctr',
                        'resources': {'limits': {'memory': '200Mi'}, 'requests': {'memory': '100Mi'}},
                    }
                ]
            },
        }

        self.envs = {'ENVIRONMENT': 'prod', 'LOG_LEVEL': 'warning'}
        self.secrets = [
            # This should be a secretRef
            Secret('env', None, 'secret_a'),
            # This should be a single secret mounted in volumeMounts
            Secret('volume', '/etc/foo', 'secret_b'),
            # This should produce a single secret mounted in env
            Secret('env', 'TARGET', 'secret_b', 'source_b'),
        ]

        self.execution_date = parser.parse('2020-08-24 00:00:00.000000')
        self.execution_date_label = datetime_to_label_safe_datestring(self.execution_date)
        self.dag_id = 'dag_id'
        self.task_id = 'task_id'
        self.try_number = 3
        self.labels = {
            'airflow-worker': 'uuid',
            'dag_id': self.dag_id,
            'execution_date': self.execution_date_label,
            'task_id': self.task_id,
            'try_number': str(self.try_number),
            'airflow_version': __version__.replace('+', '-'),
            'kubernetes_executor': 'True',
        }
        self.annotations = {
            'dag_id': self.dag_id,
            'task_id': self.task_id,
            'execution_date': self.execution_date.isoformat(),
            'try_number': str(self.try_number),
        }
        self.metadata = {
            'labels': self.labels,
            'name': 'pod_id-' + self.static_uuid.hex,
            'namespace': 'namespace',
            'annotations': self.annotations,
        }

        self.resources = k8s.V1ResourceRequirements(
            requests={
                "cpu": 1,
                "memory": "1Gi",
                "ephemeral-storage": "2Gi",
            },
            limits={"cpu": 2, "memory": "2Gi", "ephemeral-storage": "4Gi", 'nvidia.com/gpu': 1},
        )

        self.k8s_client = ApiClient()
        self.expected = k8s.V1Pod(
            api_version="v1",
            kind="Pod",
            metadata=k8s.V1ObjectMeta(
                namespace="default",
                name='myapp-pod-' + self.static_uuid.hex,
                labels={'app': 'myapp'},
            ),
            spec=k8s.V1PodSpec(
                containers=[
                    k8s.V1Container(
                        name='base',
                        image='busybox',
                        command=['sh', '-c', 'echo Hello Kubernetes!'],
                        env=[
                            k8s.V1EnvVar(name='ENVIRONMENT', value='prod'),
                            k8s.V1EnvVar(
                                name="LOG_LEVEL",
                                value='warning',
                            ),
                            k8s.V1EnvVar(
                                name='TARGET',
                                value_from=k8s.V1EnvVarSource(
                                    secret_key_ref=k8s.V1SecretKeySelector(name='secret_b', key='source_b')
                                ),
                            ),
                        ],
                        env_from=[
                            k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_a')),
                            k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_b')),
                            k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name='secret_a')),
                        ],
                        ports=[k8s.V1ContainerPort(name="foo", container_port=1234)],
                        resources=k8s.V1ResourceRequirements(
                            requests={'memory': '100Mi'},
                            limits={
                                'memory': '200Mi',
                            },
                        ),
                    )
                ],
                security_context=k8s.V1PodSecurityContext(
                    fs_group=2000,
                    run_as_user=1000,
                ),
                host_network=True,
                image_pull_secrets=[
                    k8s.V1LocalObjectReference(name="pull_secret_a"),
                    k8s.V1LocalObjectReference(name="pull_secret_b"),
                ],
            ),
        )
Ejemplo n.º 30
0
from airflow.kubernetes.secret import Secret
from airflow.utils.dates import days_ago

default_args = {
    'owner': 'airflow',
    'depends_on_past': False,
    'start_date': days_ago(2),
    'email': ['*****@*****.**'],
    'email_on_failure': False,
    'email_on_retry': False,
    'retries': 2,
    'retry_delay': timedelta(hours=1)
}

POSTGRES_SECRETS = [
    Secret('env', 'DB_USER', 'keycloak-db-credentials', 'username'),
    Secret('env', 'DB_PASSWORD', 'keycloak-db-credentials', 'password')
]

S3_SECRETS = [
    Secret('env', 'S3_ACCESS_KEY', 's3-credentials', 'S3_ACCESS_KEY'),
    Secret('env', 'S3_SECRET_KEY', 's3-credentials', 'S3_SECRET_KEY')
]

with DAG(
    'keycloak-backups',
    default_args=default_args,
    description='DAG to manage keycloak database backups',
    schedule_interval=timedelta(hours=4)) as dag:

    backup = KubernetesPodOperator(