def test_run_next_exception(self, mock_get_kube_client, mock_kubernetes_job_watcher): # When a quota is exceeded this is the ApiException we get r = HTTPResponse( body='{"kind": "Status", "apiVersion": "v1", "metadata": {}, "status": "Failure", ' '"message": "pods \\"podname\\" is forbidden: exceeded quota: compute-resources, ' 'requested: limits.memory=4Gi, used: limits.memory=6508Mi, limited: limits.memory=10Gi", ' '"reason": "Forbidden", "details": {"name": "podname", "kind": "pods"}, "code": 403}') r.status = 403 r.reason = "Forbidden" # A mock kube_client that throws errors when making a pod mock_kube_client = mock.patch('kubernetes.client.CoreV1Api', autospec=True) mock_kube_client.create_namespaced_pod = mock.MagicMock( side_effect=ApiException(http_resp=r)) mock_get_kube_client.return_value = mock_kube_client kubernetesExecutor = KubernetesExecutor() kubernetesExecutor.start() # Execute a task while the Api Throws errors try_number = 1 kubernetesExecutor.execute_async(key=('dag', 'task', datetime.utcnow(), try_number), command='command', executor_config={}) kubernetesExecutor.sync() kubernetesExecutor.sync() assert mock_kube_client.create_namespaced_pod.called self.assertFalse(kubernetesExecutor.task_queue.empty()) # Disable the ApiException mock_kube_client.create_namespaced_pod.side_effect = None # Execute the task without errors should empty the queue kubernetesExecutor.sync() assert mock_kube_client.create_namespaced_pod.called self.assertTrue(kubernetesExecutor.task_queue.empty())
def test_change_state_failed(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher, mock_kube_config): executor = KubernetesExecutor() executor.start() key = ('dag_id', 'task_id', 'ex_time', 'try_number3') executor._change_state(key, State.FAILED, 'pod_id') self.assertTrue(executor.event_buffer[key] == State.FAILED) mock_delete_pod.assert_called_with('pod_id')
def test_change_state_success(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher): executor = KubernetesExecutor() executor.start() test_time = timezone.utcnow() key = ('dag_id', 'task_id', test_time, 'try_number2') executor._change_state(key, State.SUCCESS, 'pod_id', 'default') self.assertTrue(executor.event_buffer[key] == State.SUCCESS) mock_delete_pod.assert_called_once_with('pod_id', 'default')
def test_change_state_skip_pod_deletion(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher, mock_kube_config): executor = KubernetesExecutor() executor.kube_config.delete_worker_pods = False executor.start() key = ('dag_id', 'task_id', 'ex_time', 'try_number2') executor._change_state(key, State.SUCCESS, 'pod_id') self.assertTrue(executor.event_buffer[key] == State.SUCCESS) mock_delete_pod.assert_not_called()
def test_change_state_failed_pod_deletion(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher): executor = KubernetesExecutor() executor.kube_config.delete_worker_pods_on_failure = True executor.start() key = ('dag_id', 'task_id', 'ex_time', 'try_number2') executor._change_state(key, State.FAILED, 'pod_id', 'test-namespace') self.assertTrue(executor.event_buffer[key] == State.FAILED) mock_delete_pod.assert_called_once_with('pod_id', 'test-namespace')
def test_change_state_failed(self, mock_delete_pod, mock_get_kube_client, mock_kubernetes_job_watcher): executor = KubernetesExecutor() executor.kube_config.delete_worker_pods = False executor.kube_config.delete_worker_pods_on_failure = False executor.start() test_time = timezone.utcnow() key = ('dag_id', 'task_id', test_time, 'try_number3') executor._change_state(key, State.FAILED, 'pod_id', 'default') self.assertTrue(executor.event_buffer[key] == State.FAILED) mock_delete_pod.assert_not_called()
def test_pending_pod_timeout(self, mock_kubescheduler, mock_get_kube_client, mock_kubernetes_job_watcher): mock_delete_pod = mock_kubescheduler.return_value.delete_pod mock_kube_client = mock_get_kube_client.return_value now = timezone.utcnow() pending_pods = [ k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="foo60", labels={"airflow-worker": "123"}, creation_timestamp=now - timedelta(seconds=60), namespace="mynamespace", ) ), k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="foo90", labels={"airflow-worker": "123"}, creation_timestamp=now - timedelta(seconds=90), namespace="mynamespace", ) ), ] mock_kube_client.list_namespaced_pod.return_value.items = pending_pods config = { ('kubernetes', 'namespace'): 'mynamespace', ('kubernetes', 'worker_pods_pending_timeout'): '75', ('kubernetes', 'worker_pods_pending_timeout_batch_size'): '5', ('kubernetes', 'kube_client_request_args'): '{"sentinel": "foo"}', } with conf_vars(config): executor = KubernetesExecutor() executor.job_id = "123" executor.start() assert 1 == len(executor.event_scheduler.queue) executor._check_worker_pods_pending_timeout() mock_kube_client.list_namespaced_pod.assert_called_once_with( 'mynamespace', field_selector='status.phase=Pending', label_selector='airflow-worker=123', limit=5, sentinel='foo', ) mock_delete_pod.assert_called_once_with('foo90', 'mynamespace')
def test_queue_command(self, test_queue, k8s_queue_cmd, celery_queue_cmd): kwargs = dict( command=['airflow', 'run', 'dag'], priority=1, queue='default', ) kwarg_values = kwargs.values() cke = CeleryKubernetesExecutor(CeleryExecutor(), KubernetesExecutor()) simple_task_instance = mock.MagicMock() simple_task_instance.queue = test_queue cke.queue_command(simple_task_instance, **kwargs) if test_queue == KUBERNETES_QUEUE: k8s_queue_cmd.assert_called_once_with(simple_task_instance, *kwarg_values) celery_queue_cmd.assert_not_called() else: celery_queue_cmd.assert_called_once_with(simple_task_instance, *kwarg_values) k8s_queue_cmd.assert_not_called()
def _get_executor(executor_name): """ Creates a new instance of the named executor. In case the executor name is not know in airflow, look for it in the plugins """ if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.MesosExecutor: from airflow.contrib.executors.mesos_executor import MesosExecutor return MesosExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() elif executor_name == Executors.DebugExecutor: from airflow.executors.debug_executor import DebugExecutor return DebugExecutor() else: # Loading plugins _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format( executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException( "Executor {0} not supported.".format(executor_name))
def setUp(self) -> None: self.kubernetes_executor = KubernetesExecutor() self.kubernetes_executor.job_id = "5"
def test_change_state_running(self, mock_get_kube_client, mock_kubernetes_job_watcher): executor = KubernetesExecutor() executor.start() key = ('dag_id', 'task_id', 'ex_time', 'try_number1') executor._change_state(key, State.RUNNING, 'pod_id', 'default') self.assertTrue(executor.event_buffer[key] == State.RUNNING)