def test_delete_execute(self, mock_hook):
        operator = GKEDeleteClusterOperator(project_id=TEST_GCP_PROJECT_ID,
                                            name=CLUSTER_NAME,
                                            location=PROJECT_LOCATION,
                                            task_id=PROJECT_TASK_ID)

        operator.execute(None)
        mock_hook.return_value.delete_cluster.assert_called_once_with(
            name=CLUSTER_NAME, project_id=TEST_GCP_PROJECT_ID)
Beispiel #2
0
            "limit_cpu": "100m"
        },
        # If true, the content of /airflow/xcom/return.json from container will
        # also be pushed to an XCom when the container ends.
        do_xcom_push=False,
        # List of Volume objects to pass to the Pod.
        volumes=[],
        # List of VolumeMount objects to pass to the Pod.
        volume_mounts=[],
        # Affinity determines which nodes the Pod can run on based on the
        # config. For more information see:
        # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
        affinity={},
    )
    # [END composer_gkeoperator_fullconfig_airflow_1]
    # [START composer_gkeoperator_delete_cluster_airflow_1]
    delete_cluster = GKEDeleteClusterOperator(
        task_id="delete_cluster",
        name=CLUSTER_NAME,
        project_id=PROJECT_ID,
        location=CLUSTER_ZONE,
    )
    # [END composer_gkeoperator_delete_cluster_airflow_1]

    create_cluster >> create_node_pools >> kubernetes_min_pod >> delete_cluster
    create_cluster >> create_node_pools >> kubernetes_full_pod >> delete_cluster
    create_cluster >> create_node_pools >> kubernetes_affinity_ex >> delete_cluster
    create_cluster >> create_node_pools >> kubenetes_template_ex >> delete_cluster

# [END composer_gkeoperator_airflow_1]
 def test_delete_execute_error_location(self, mock_hook):
     with self.assertRaises(AirflowException):
         GKEDeleteClusterOperator(project_id=TEST_GCP_PROJECT_ID,
                                  name=CLUSTER_NAME,
                                  task_id=PROJECT_TASK_ID)
 def test_delete_execute_error_cluster_name(self, mock_hook):
     with self.assertRaises(AirflowException):
         GKEDeleteClusterOperator(project_id=TEST_GCP_PROJECT_ID,
                                  location=PROJECT_LOCATION,
                                  task_id=PROJECT_TASK_ID)
 def test_delete_execute_error_project_id(self, mock_hook):
     with pytest.raises(AirflowException):
         GKEDeleteClusterOperator(location=PROJECT_LOCATION,
                                  name=CLUSTER_NAME,
                                  task_id=PROJECT_TASK_ID)
Beispiel #6
0
        task_id="pod_task_xcom",
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
        cluster_name=CLUSTER_NAME,
        do_xcom_push=True,
        namespace="default",
        image="alpine",
        cmds=[
            "sh", "-c",
            'mkdir -p /airflow/xcom/;echo \'[1,2,3,4]\' > /airflow/xcom/return.json'
        ],
        name="test-pod-xcom",
    )

    pod_task_xcom_result = BashOperator(
        bash_command=
        "echo \"{{ task_instance.xcom_pull('pod_task_xcom')[0] }}\"",
        task_id="pod_task_xcom_result",
    )

    delete_cluster = GKEDeleteClusterOperator(
        task_id="delete_cluster",
        name=CLUSTER_NAME,
        project_id=GCP_PROJECT_ID,
        location=GCP_LOCATION,
    )

    create_cluster >> pod_task >> delete_cluster
    create_cluster >> pod_task_xcom >> delete_cluster
    pod_task_xcom >> pod_task_xcom_result