def setUp(self):
     self.gke_op = GKEStartPodOperator(project_id=TEST_GCP_PROJECT_ID,
                                       location=PROJECT_LOCATION,
                                       cluster_name=CLUSTER_NAME,
                                       task_id=PROJECT_TASK_ID,
                                       name=TASK_NAME,
                                       namespace=NAMESPACE,
                                       image=IMAGE)
class TestGKEPodOperator(unittest.TestCase):
    def setUp(self):
        self.gke_op = GKEStartPodOperator(project_id=TEST_GCP_PROJECT_ID,
                                          location=PROJECT_LOCATION,
                                          cluster_name=CLUSTER_NAME,
                                          task_id=PROJECT_TASK_ID,
                                          name=TASK_NAME,
                                          namespace=NAMESPACE,
                                          image=IMAGE)

    def test_template_fields(self):
        self.assertTrue(
            set(KubernetesPodOperator.template_fields).issubset(
                GKEStartPodOperator.template_fields))

    # pylint: disable=unused-argument
    @mock.patch.dict(os.environ, {})
    @mock.patch("airflow.hooks.base_hook.BaseHook.get_connections",
                return_value=[
                    Connection(extra=json.dumps({
                        "extra__google_cloud_platform__keyfile_dict":
                        '{"private_key": "r4nd0m_k3y"}'
                    }))
                ])
    @mock.patch(
        'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute'
    )
    @mock.patch(
        'airflow.providers.google.cloud.operators.kubernetes_engine.CloudBaseHook'
    )
    @mock.patch(
        'airflow.providers.google.cloud.operators.kubernetes_engine.execute_in_subprocess'
    )
    @mock.patch('tempfile.NamedTemporaryFile')
    def test_execute(self, file_mock, mock_execute_in_subprocess,
                     mock_gcp_hook, exec_mock, get_con_mock):
        type(
            file_mock.return_value.__enter__.return_value).name = PropertyMock(
                side_effect=[FILE_NAME, '/path/to/new-file'])

        self.gke_op.execute(None)

        mock_gcp_hook.return_value.provide_authorized_gcloud.assert_called_once(
        )

        mock_execute_in_subprocess.assert_called_once_with(
            GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION,
                                  TEST_GCP_PROJECT_ID).split())

        self.assertEqual(self.gke_op.config_file, FILE_NAME)
def build_gke_start_pod_operator(operator_ref, dag_ref):
    """
    Builds a DAG operator of type: GKEStartPodOperator.
    Args:
        operator_ref (string): the definition of the operator
        dag_ref (string): the reference to the dag to associate this operator
    """
    op = GKEStartPodOperator(task_id=operator_ref['task_id'],
                             name=operator_ref['name'],
                             image=operator_ref['image'],
                             cluster_name=operator_ref['cluster_name'],
                             project_id=operator_ref['project_id'],
                             location=operator_ref['location'],
                             namespace=operator_ref['namespace']
                             if 'namespace' in operator_ref else 'default',
                             dag=dag_ref)

    # populate non-default operator values
    if 'cmds' in operator_ref:
        op.cmds = operator_ref['cmds']

    if 'arguments' in operator_ref:
        op.arguments = operator_ref['arguments']

    if 'env_vars' in operator_ref:
        op.env_vars = operator_ref['env_vars']

    if 'labels' in operator_ref:
        op.env_vars = operator_ref['labels']

    if 'startup_timeout_seconds' in operator_ref:
        op.startup_timeout_seconds = operator_ref['startup_timeout_seconds']

    if 'ports' in operator_ref:
        op.ports = operator_ref['ports']

    if 'params' in operator_ref:
        op.params = operator_ref['params']

    if 'node_selectors' in operator_ref:
        op.node_selectors = operator_ref['node_selectors']

    if 'resources' in operator_ref:
        op.resources = operator_ref['resources']

    if 'annotations' in operator_ref:
        op.annotations = operator_ref['annotations']

    if 'volumes' in operator_ref:
        op.volumes = operator_ref['volumes']

    if 'volume_mounts' in operator_ref:
        op.volumes = operator_ref['volume_mounts']

    if 'affinity' in operator_ref:
        op.affinity = operator_ref['affinity']

    if 'configmaps' in operator_ref:
        op.configmaps = operator_ref['configmaps']

    # define pod secrets
    pod_secrets = []
    if 'pod_secret_refs' in operator_ref:
        for pod_secret in operator_ref['pod_secret_refs']:
            if not list(find_key_in_dict('kubernetes_secrets', payload)):
                raise ValueError(
                    f"Pod {operator_ref['name']} declares 'pod_secret_refs' but 'kubernetes_secrets' has not been defined."
                )

            secret_entry_ref = payload['kubernetes_secrets'][pod_secret]
            secret_entry = secret.Secret(
                # Deploy type: 'env' for environment  variable or 'volume'
                deploy_type=secret_entry_ref['deploy_type'],
                # The name of the environment variable or the path of the volume
                deploy_target=secret_entry_ref['deploy_target'],
                # Name of the Kubernetes Secret
                secret=secret_entry_ref['secret'],
                # Key of a secret stored in this Secret object or key in the form of service account file name
                key=secret_entry_ref['key'])
            pod_secrets.append(secret_entry)

        op.secrets = pod_secrets

        if 'image_pull_policy' in operator_ref:
            op.image_pull_policy = operator_ref['image_pull_policy']

        # define pull secrets
        image_pull_secrets = []
        if 'image_pull_secret_refs' in operator_ref:
            for image_pull_secret in operator_ref['image_pull_secret_refs']:
                if not list(find_key_in_dict('kubernetes_secrets', payload)):
                    raise ValueError(
                        f"Pod {operator_ref['name']} declares 'image_pull_secret_refs' but 'kubernetes_secrets' has not been defined."
                    )

                secret_entry_ref = payload['kubernetes_secrets'][
                    image_pull_secret]
                secret_entry = secret.Secret(
                    # Deploy type: 'env' for environment  variable or 'volume'
                    deploy_type=secret_entry_ref['deploy_type'],
                    # The name of the environment variable or the path of the volume
                    deploy_target=secret_entry_ref['deploy_target'],
                    # Name of the Kubernetes Secret
                    secret=secret_entry_ref['secret'],
                    # Key of a secret stored in this Secret object or key in the form of service account file name
                    key=secret_entry_ref['key'])
                image_pull_secrets.append(secret_entry)

            op.image_pull_secrets = image_pull_secrets

    return operator
Beispiel #4
0
                        --zone {CLUSTER_ZONE}",
    )
    # [END composer_gke_create_cluster_airflow_1]

    # [START composer_gkeoperator_minconfig_airflow_1]
    kubernetes_min_pod = GKEStartPodOperator(
        # The ID specified for the task.
        task_id="pod-ex-minimum",
        # Name of task you want to run, used to generate Pod ID.
        name="pod-ex-minimum",
        project_id=PROJECT_ID,
        location=CLUSTER_ZONE,
        cluster_name=CLUSTER_NAME,
        # Entrypoint of the container, if not specified the Docker container's
        # entrypoint is used. The cmds parameter is templated.
        cmds=["echo"],
        # The namespace to run within Kubernetes, default namespace is
        # `default`.
        namespace="default",
        # Docker image specified. Defaults to hub.docker.com, but any fully
        # qualified URLs will point to a custom repository. Supports private
        # gcr.io images if the Composer Environment is under the same
        # project-id as the gcr.io images and the service account that Composer
        # uses has permission to access the Google Container Registry
        # (the default service account has permission)
        image="gcr.io/gcp-runtimes/ubuntu_18_0_4",
    )
    # [END composer_gkeoperator_minconfig_airflow_1]

    # [START composer_gkeoperator_templateconfig_airflow_1]
    kubenetes_template_ex = GKEStartPodOperator(
        task_id="ex-kube-templates",
Beispiel #5
0
        default_args=default_args,
        schedule_interval=None,  # Override to match your needs
        tags=['example'],
) as dag:
    create_cluster = GKECreateClusterOperator(
        task_id="create_cluster",
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
        body=CLUSTER,
    )

    pod_task = GKEStartPodOperator(
        task_id="pod_task",
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
        cluster_name=CLUSTER_NAME,
        namespace="default",
        image="perl",
        name="test-pod",
    )

    pod_task_xcom = GKEStartPodOperator(
        task_id="pod_task_xcom",
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
        cluster_name=CLUSTER_NAME,
        do_xcom_push=True,
        namespace="default",
        image="alpine",
        cmds=[
            "sh", "-c",
Beispiel #6
0
import os

from airflow import models
from airflow.providers.google.cloud.operators.kubernetes_engine import GKEStartPodOperator
from airflow.utils.dates import days_ago

with models.DAG(
        "testgithubbruno",
        schedule_interval=None,  # Override to match your needs
        start_date=days_ago(1),
        tags=['example'],
) as dag:

    pod_task = GKEStartPodOperator(
        task_id="pod_task",
        project_id="telefonica-digital-cloud",
        location="europe-west2-b",
        cluster_name="europe-west2-composer-c96de2c4-gke",
        namespace="default",
        image="us-docker.pkg.dev/cloudrun/container/hello",
        name="test-pod232",
    )
class TestGKEPodOperator(unittest.TestCase):
    def setUp(self):
        self.gke_op = GKEStartPodOperator(project_id=TEST_GCP_PROJECT_ID,
                                          location=PROJECT_LOCATION,
                                          cluster_name=CLUSTER_NAME,
                                          task_id=PROJECT_TASK_ID,
                                          name=TASK_NAME,
                                          namespace=NAMESPACE,
                                          image=IMAGE)

    def test_template_fields(self):
        self.assertTrue(
            set(KubernetesPodOperator.template_fields).issubset(
                GKEStartPodOperator.template_fields))

    # pylint: disable=unused-argument
    @mock.patch("airflow.hooks.base_hook.BaseHook.get_connections",
                return_value=[Connection(extra=json.dumps({}))])
    @mock.patch(
        'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute'
    )
    @mock.patch('tempfile.NamedTemporaryFile')
    @mock.patch("subprocess.check_call")
    @mock.patch.dict(os.environ, {CREDENTIALS: '/tmp/local-creds'})
    def test_execute_conn_id_none(self, proc_mock, file_mock, exec_mock,
                                  get_conn):
        type(
            file_mock.return_value.__enter__.return_value).name = PropertyMock(
                side_effect=[FILE_NAME])

        def assert_credentials(*args, **kwargs):
            # since we passed in keyfile_path we should get a file
            self.assertIn(CREDENTIALS, os.environ)
            self.assertEqual(os.environ[CREDENTIALS], '/tmp/local-creds')

        proc_mock.side_effect = assert_credentials

        self.gke_op.execute(None)

        # Assert Environment Variable is being set correctly
        self.assertIn(KUBE_ENV_VAR, os.environ)
        self.assertEqual(os.environ[KUBE_ENV_VAR], FILE_NAME)

        # Assert the gcloud command being called correctly
        proc_mock.assert_called_once_with(
            GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION,
                                  TEST_GCP_PROJECT_ID).split())

        self.assertEqual(self.gke_op.config_file, FILE_NAME)

    # pylint: disable=unused-argument
    @mock.patch(
        "airflow.hooks.base_hook.BaseHook.get_connections",
        return_value=[
            Connection(extra=json.dumps(
                {'extra__google_cloud_platform__key_path': '/path/to/file'}))
        ])
    @mock.patch(
        'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute'
    )
    @mock.patch('tempfile.NamedTemporaryFile')
    @mock.patch("subprocess.check_call")
    @mock.patch.dict(os.environ, {})
    def test_execute_conn_id_path(self, proc_mock, file_mock, exec_mock,
                                  get_con_mock):
        type(
            file_mock.return_value.__enter__.return_value).name = PropertyMock(
                side_effect=[FILE_NAME])

        def assert_credentials(*args, **kwargs):
            # since we passed in keyfile_path we should get a file
            self.assertIn(CREDENTIALS, os.environ)
            self.assertEqual(os.environ[CREDENTIALS], '/path/to/file')

        proc_mock.side_effect = assert_credentials
        self.gke_op.execute(None)

        # Assert Environment Variable is being set correctly
        self.assertIn(KUBE_ENV_VAR, os.environ)
        self.assertEqual(os.environ[KUBE_ENV_VAR], FILE_NAME)

        # Assert the gcloud command being called correctly
        proc_mock.assert_called_once_with(
            GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION,
                                  TEST_GCP_PROJECT_ID).split())

        self.assertEqual(self.gke_op.config_file, FILE_NAME)

    # pylint: disable=unused-argument
    @mock.patch.dict(os.environ, {})
    @mock.patch("airflow.hooks.base_hook.BaseHook.get_connections",
                return_value=[
                    Connection(extra=json.dumps({
                        "extra__google_cloud_platform__keyfile_dict":
                        '{"private_key": "r4nd0m_k3y"}'
                    }))
                ])
    @mock.patch(
        'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator.execute'
    )
    @mock.patch('tempfile.NamedTemporaryFile')
    @mock.patch("subprocess.check_call")
    def test_execute_conn_id_dict(self, proc_mock, file_mock, exec_mock,
                                  get_con_mock):
        type(
            file_mock.return_value.__enter__.return_value).name = PropertyMock(
                side_effect=[FILE_NAME, '/path/to/new-file'])

        def assert_credentials(*args, **kwargs):
            # since we passed in keyfile_dict we should get a new file
            self.assertIn(CREDENTIALS, os.environ)
            self.assertEqual(os.environ[CREDENTIALS], '/path/to/new-file')

        proc_mock.side_effect = assert_credentials

        self.gke_op.execute(None)

        # Assert Environment Variable is being set correctly
        self.assertIn(KUBE_ENV_VAR, os.environ)
        self.assertEqual(os.environ[KUBE_ENV_VAR], FILE_NAME)

        # Assert the gcloud command being called correctly
        proc_mock.assert_called_once_with(
            GCLOUD_COMMAND.format(CLUSTER_NAME, PROJECT_LOCATION,
                                  TEST_GCP_PROJECT_ID).split())

        self.assertEqual(self.gke_op.config_file, FILE_NAME)
# CLUSTER = {"name": CLUSTER_NAME, "initial_node_count": 1}
# [END howto_operator_gcp_gke_create_cluster_definition]

with models.DAG(
    "example_gcp_gke",
    schedule_interval=None,  # Override to match your needs
    start_date=days_ago(1),
    tags=['example'],
) as dag:
    # [START gke_start_pod_operator]
    pod_task_dev = GKEStartPodOperator(
        task_id="create_pod_task_dev",
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
        cluster_name=CLUSTER_NAME,
        namespace="dev",
        image="gcr.io/graphical-elf-309911/demoapp1",
        image_pull_policy='Always',
        name="airflow-test-pod-dev",
        is_delete_operator_pod=True
    )
    # [END gke_start_pod_operator]
    
    pod_task_test = GKEStartPodOperator(
        task_id="create_pod_task_test",
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
        cluster_name=CLUSTER_NAME,
        namespace="test",
        image="gcr.io/graphical-elf-309911/bqapp",
        image_pull_policy='Always',