def test_gauge_executor_metrics(self, mock_stats_gauge, mock_trigger_tasks, mock_sync, mock_kube_config):
     executor = KubernetesExecutor()
     executor.heartbeat()
     calls = [mock.call('executor.open_slots', mock.ANY),
              mock.call('executor.queued_tasks', mock.ANY),
              mock.call('executor.running_tasks', mock.ANY)]
     mock_stats_gauge.assert_has_calls(calls)
Esempio n. 2
0
    def test_run_next_exception(self, mock_get_kube_client,
                                mock_kubernetes_job_watcher):

        # When a quota is exceeded this is the ApiException we get
        r = HTTPResponse()
        r.body = {
            "kind":
            "Status",
            "apiVersion":
            "v1",
            "metadata": {},
            "status":
            "Failure",
            "message":
            "pods \"podname\" is forbidden: " +
            "exceeded quota: compute-resources, " +
            "requested: limits.memory=4Gi, " + "used: limits.memory=6508Mi, " +
            "limited: limits.memory=10Gi",
            "reason":
            "Forbidden",
            "details": {
                "name": "podname",
                "kind": "pods"
            },
            "code":
            403
        },
        r.status = 403
        r.reason = "Forbidden"

        # A mock kube_client that throws errors when making a pod
        mock_kube_client = mock.patch('kubernetes.client.CoreV1Api',
                                      autospec=True)
        mock_kube_client.create_namespaced_pod = mock.MagicMock(
            side_effect=ApiException(http_resp=r))
        mock_get_kube_client.return_value = mock_kube_client

        kubernetesExecutor = KubernetesExecutor()
        kubernetesExecutor.start()

        # Execute a task while the Api Throws errors
        try_number = 1
        kubernetesExecutor.execute_async(key=('dag', 'task', datetime.utcnow(),
                                              try_number),
                                         command='command',
                                         executor_config={})
        kubernetesExecutor.sync()
        kubernetesExecutor.sync()

        assert mock_kube_client.create_namespaced_pod.called
        self.assertFalse(kubernetesExecutor.task_queue.empty())

        # Disable the ApiException
        mock_kube_client.create_namespaced_pod.side_effect = None

        # Execute the task without errors should empty the queue
        kubernetesExecutor.sync()
        assert mock_kube_client.create_namespaced_pod.called
        self.assertTrue(kubernetesExecutor.task_queue.empty())
Esempio n. 3
0
def _get_executor(executor_name):
    """
    Creates a new instance of the named executor. In case the executor name is not know in airflow,
    look for it in the plugins
    """
    if executor_name == Executors.LocalExecutor:
        return LocalExecutor()
    elif executor_name == Executors.SequentialExecutor:
        return SequentialExecutor()
    elif executor_name == Executors.CeleryExecutor:
        from airflow.executors.celery_executor import CeleryExecutor
        return CeleryExecutor()
    elif executor_name == Executors.DaskExecutor:
        from airflow.executors.dask_executor import DaskExecutor
        return DaskExecutor()
    elif executor_name == Executors.MesosExecutor:
        from airflow.contrib.executors.mesos_executor import MesosExecutor
        return MesosExecutor()
    elif executor_name == Executors.KubernetesExecutor:
        from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
        return KubernetesExecutor()
    else:
        # Loading plugins
        _integrate_plugins()
        executor_path = executor_name.split('.')
        if len(executor_path) != 2:
            raise AirflowException(
                "Executor {0} not supported: please specify in format plugin_module.executor"
                .format(executor_name))

        if executor_path[0] in globals():
            return globals()[executor_path[0]].__dict__[executor_path[1]]()
        else:
            raise AirflowException(
                "Executor {0} not supported.".format(executor_name))
Esempio n. 4
0
def _get_executor(executor_name):
    """
    Creates a new instance of the named executor.
    In case the executor name is not know in airflow,
    look for it in the plugins
    """
    parallelism = PARALLELISM
    if executor_name == Executors.LocalExecutor:
        return LocalExecutor(parallelism)
    elif executor_name == Executors.SequentialExecutor:
        return SequentialExecutor(parallelism)
    elif executor_name == Executors.CeleryExecutor:
        from airflow.executors.celery_executor import CeleryExecutor, execute_command
        return CeleryExecutor(parallelism, execute_command)
    elif executor_name == Executors.DaskExecutor:
        from airflow.executors.dask_executor import DaskExecutor
        cluster_address = configuration.conf.get('dask', 'cluster_address')
        tls_ca = configuration.conf.get('dask', 'tls_ca')
        tls_key = configuration.conf.get('dask', 'tls_key')
        tls_cert = configuration.conf.get('dask', 'tls_cert')
        return DaskExecutor(parallelism, cluster_address, tls_ca, tls_key,
                            tls_cert)
    elif executor_name == Executors.MesosExecutor:
        from airflow.contrib.executors.mesos_executor import MesosExecutor
        return MesosExecutor(parallelism)
    elif executor_name == Executors.KubernetesExecutor:
        from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
        return KubernetesExecutor()
    else:
        # Loading plugins
        _integrate_plugins()
        # 从插件模块中获取指定类
        args = []
        kwargs = {'parallelism': PARALLELISM}
        return create_object_from_plugin_module(executor_name, *args, **kwargs)
 def test_change_state_running(self, mock_get_kube_client,
                               mock_kubernetes_job_watcher,
                               mock_kube_config):
     executor = KubernetesExecutor()
     executor.start()
     key = ('dag_id', 'task_id', 'ex_time', 'try_number1')
     executor._change_state(key, State.RUNNING, 'pod_id')
     self.assertTrue(executor.event_buffer[key] == State.RUNNING)
 def test_change_state_failed(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher,
                              mock_kube_config):
     executor = KubernetesExecutor()
     executor.start()
     key = ('dag_id', 'task_id', 'ex_time', 'try_number3')
     executor._change_state(key, State.FAILED, 'pod_id', 'default')
     self.assertTrue(executor.event_buffer[key] == State.FAILED)
     mock_delete_pod.assert_called_with('pod_id', 'default')
 def test_change_state_success(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher,
                               mock_kube_config):
     executor = KubernetesExecutor()
     executor.start()
     test_time = timezone.utcnow()
     key = ('dag_id', 'task_id', test_time, 'try_number2')
     executor._change_state(key, State.SUCCESS, 'pod_id', 'default')
     self.assertTrue(executor.event_buffer[key] == State.SUCCESS)
     mock_delete_pod.assert_called_with('pod_id', 'default')
Esempio n. 8
0
 def test_change_state_skip_pod_deletion(self, mock_delete_pod, mock_get_kube_client,
                                         mock_kubernetes_job_watcher, mock_kube_config):
     executor = KubernetesExecutor()
     executor.kube_config.delete_worker_pods = False
     executor.start()
     key = ('dag_id', 'task_id', 'ex_time', 'try_number2')
     executor._change_state(key, State.SUCCESS, 'pod_id')
     self.assertTrue(executor.event_buffer[key] == State.SUCCESS)
     mock_delete_pod.assert_not_called()
Esempio n. 9
0
from task import sub_dag
from datetime import datetime
from airflow.models import DAG
from airflow.operators.subdag_operator import SubDagOperator
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
from airflow.executors.local_executor import LocalExecutor

PARENT_DAG_NAME = 'parent_dag'
CHILD_DAG_NAME = 'child_dag'

main_dag = DAG(dag_id=PARENT_DAG_NAME,
               schedule_interval=None,
               start_date=datetime(2016, 1, 1))

sub_dag = SubDagOperator(subdag=sub_dag(PARENT_DAG_NAME, CHILD_DAG_NAME,
                                        main_dag.start_date,
                                        main_dag.schedule_interval),
                         task_id=CHILD_DAG_NAME,
                         dag=main_dag,
                         executor=KubernetesExecutor())