コード例 #1
0
    def test_call_regular_interval(self):
        somefunction = mock.MagicMock()

        timers = EventScheduler()
        timers.call_regular_interval(30, somefunction)
        assert len(timers.queue) == 1
        somefunction.assert_not_called()

        # Fake a run (it won't actually pop from the queue):
        timers.queue[0].action()

        # Make sure it added another event to the queue
        assert len(timers.queue) == 2
        somefunction.assert_called_once()
        assert timers.queue[0].time < timers.queue[1].time
コード例 #2
0
class KubernetesExecutor(BaseExecutor):
    """Executor for Kubernetes"""

    supports_ad_hoc_ti_run: bool = True

    def __init__(self):
        self.kube_config = KubeConfig()
        self._manager = multiprocessing.Manager()
        self.task_queue: 'Queue[KubernetesJobType]' = self._manager.Queue()
        self.result_queue: 'Queue[KubernetesResultsType]' = self._manager.Queue(
        )
        self.kube_scheduler: Optional[AirflowKubernetesScheduler] = None
        self.kube_client: Optional[client.CoreV1Api] = None
        self.scheduler_job_id: Optional[str] = None
        self.event_scheduler: Optional[EventScheduler] = None
        self.last_handled: Dict[TaskInstanceKey, int] = {}
        super().__init__(parallelism=self.kube_config.parallelism)

    @provide_session
    def clear_not_launched_queued_tasks(self, session=None) -> None:
        """
        Tasks can end up in a "Queued" state through either the executor being
        abruptly shut down (leaving a non-empty task_queue on this executor)
        or when a rescheduled/deferred operator comes back up for execution
        (with the same try_number) before the pod of its previous incarnation
        has been fully removed (we think).

        This method checks each of those tasks to see if the corresponding pod
        is around, and if not, and there's no matching entry in our own
        task_queue, marks it for re-execution.
        """
        self.log.debug("Clearing tasks that have not been launched")
        if not self.kube_client:
            raise AirflowException(NOT_STARTED_MESSAGE)
        queued_tasks = session.query(TaskInstance).filter(
            TaskInstance.state == State.QUEUED).all()
        self.log.info('Found %s queued task instances', len(queued_tasks))

        # Go through the "last seen" dictionary and clean out old entries
        allowed_age = self.kube_config.worker_pods_queued_check_interval * 3
        for key, timestamp in list(self.last_handled.items()):
            if time.time() - timestamp > allowed_age:
                del self.last_handled[key]

        for task in queued_tasks:
            self.log.debug("Checking task %s", task)

            # Check to see if we've handled it ourselves recently
            if task.key in self.last_handled:
                continue

            # Build the pod selector
            base_label_selector = (
                f"dag_id={pod_generator.make_safe_label_value(task.dag_id)},"
                f"task_id={pod_generator.make_safe_label_value(task.task_id)},"
                f"airflow-worker={pod_generator.make_safe_label_value(str(task.queued_by_job_id))}"
            )
            kwargs = dict(label_selector=base_label_selector)
            if self.kube_config.kube_client_request_args:
                kwargs.update(**self.kube_config.kube_client_request_args)

            # Try run_id first
            kwargs[
                'label_selector'] += ',run_id=' + pod_generator.make_safe_label_value(
                    task.run_id)
            pod_list = self.kube_client.list_namespaced_pod(
                self.kube_config.kube_namespace, **kwargs)
            if pod_list.items:
                continue
            # Fallback to old style of using execution_date
            kwargs['label_selector'] = (
                f'{base_label_selector},'
                f'execution_date={pod_generator.datetime_to_label_safe_datestring(task.execution_date)}'
            )
            pod_list = self.kube_client.list_namespaced_pod(
                self.kube_config.kube_namespace, **kwargs)
            if pod_list.items:
                continue
            self.log.info(
                'TaskInstance: %s found in queued state but was not launched, rescheduling',
                task)
            session.query(TaskInstance).filter(
                TaskInstance.dag_id == task.dag_id,
                TaskInstance.task_id == task.task_id,
                TaskInstance.run_id == task.run_id,
            ).update({TaskInstance.state: State.SCHEDULED})

    def start(self) -> None:
        """Starts the executor"""
        self.log.info('Start Kubernetes executor')
        if not self.job_id:
            raise AirflowException("Could not get scheduler_job_id")
        self.scheduler_job_id = self.job_id
        self.log.debug('Start with scheduler_job_id: %s',
                       self.scheduler_job_id)
        self.kube_client = get_kube_client()
        self.kube_scheduler = AirflowKubernetesScheduler(
            self.kube_config, self.task_queue, self.result_queue,
            self.kube_client, self.scheduler_job_id)
        self.event_scheduler = EventScheduler()
        self.event_scheduler.call_regular_interval(
            self.kube_config.worker_pods_pending_timeout_check_interval,
            self._check_worker_pods_pending_timeout,
        )
        self.event_scheduler.call_regular_interval(
            self.kube_config.worker_pods_queued_check_interval,
            self.clear_not_launched_queued_tasks,
        )
        # We also call this at startup as that's the most likely time to see
        # stuck queued tasks
        self.clear_not_launched_queued_tasks()

    def execute_async(
        self,
        key: TaskInstanceKey,
        command: CommandType,
        queue: Optional[str] = None,
        executor_config: Optional[Any] = None,
    ) -> None:
        """Executes task asynchronously"""
        self.log.info('Add task %s with command %s with executor_config %s',
                      key, command, executor_config)
        try:
            kube_executor_config = PodGenerator.from_obj(executor_config)
        except Exception:
            self.log.error("Invalid executor_config for %s", key)
            self.fail(key=key, info="Invalid executor_config passed")
            return

        if executor_config:
            pod_template_file = executor_config.get("pod_template_file", None)
        else:
            pod_template_file = None
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.event_buffer[key] = (State.QUEUED, self.scheduler_job_id)
        self.task_queue.put(
            (key, command, kube_executor_config, pod_template_file))
        # We keep a temporary local record that we've handled this so we don't
        # try and remove it from the QUEUED state while we process it
        self.last_handled[key] = time.time()

    def sync(self) -> None:
        """Synchronize task state."""
        if self.running:
            self.log.debug('self.running: %s', self.running)
        if self.queued_tasks:
            self.log.debug('self.queued: %s', self.queued_tasks)
        if not self.scheduler_job_id:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.kube_scheduler:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.kube_config:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.result_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.kube_scheduler.sync()

        last_resource_version = None
        while True:
            try:
                results = self.result_queue.get_nowait()
                try:
                    key, state, pod_id, namespace, resource_version = results
                    last_resource_version = resource_version
                    self.log.info('Changing state of %s to %s', results, state)
                    try:
                        self._change_state(key, state, pod_id, namespace)
                    except Exception as e:
                        self.log.exception(
                            "Exception: %s when attempting to change state of %s to %s, re-queueing.",
                            e,
                            results,
                            state,
                        )
                        self.result_queue.put(results)
                finally:
                    self.result_queue.task_done()
            except Empty:
                break

        resource_instance = ResourceVersion()
        resource_instance.resource_version = last_resource_version or resource_instance.resource_version

        for _ in range(self.kube_config.worker_pods_creation_batch_size):
            try:
                task = self.task_queue.get_nowait()
                try:
                    self.kube_scheduler.run_next(task)
                except ApiException as e:

                    # These codes indicate something is wrong with pod definition; otherwise we assume pod
                    # definition is ok, and that retrying may work
                    if e.status in (400, 422):
                        self.log.error(
                            "Pod creation failed with reason %r. Failing task",
                            e.reason)
                        key, _, _, _ = task
                        self.change_state(key, State.FAILED, e)
                    else:
                        self.log.warning(
                            'ApiException when attempting to run task, re-queueing. Reason: %r. Message: %s',
                            e.reason,
                            json.loads(e.body)['message'],
                        )
                        self.task_queue.put(task)
                finally:
                    self.task_queue.task_done()
            except Empty:
                break

        # Run any pending timed events
        next_event = self.event_scheduler.run(blocking=False)
        self.log.debug("Next timed event is in %f", next_event)

    def _check_worker_pods_pending_timeout(self):
        """Check if any pending worker pods have timed out"""
        timeout = self.kube_config.worker_pods_pending_timeout
        self.log.debug('Looking for pending worker pods older than %d seconds',
                       timeout)

        kwargs = {
            'limit': self.kube_config.worker_pods_pending_timeout_batch_size,
            'field_selector': 'status.phase=Pending',
            'label_selector': f'airflow-worker={self.scheduler_job_id}',
            **self.kube_config.kube_client_request_args,
        }
        if self.kube_config.multi_namespace_mode:
            pending_pods = functools.partial(
                self.kube_client.list_pod_for_all_namespaces, **kwargs)
        else:
            pending_pods = functools.partial(
                self.kube_client.list_namespaced_pod,
                self.kube_config.kube_namespace, **kwargs)

        cutoff = timezone.utcnow() - timedelta(seconds=timeout)
        for pod in pending_pods().items:
            self.log.debug('Found a pending pod "%s", created "%s"',
                           pod.metadata.name, pod.metadata.creation_timestamp)
            if pod.metadata.creation_timestamp < cutoff:
                self.log.error(
                    ('Pod "%s" has been pending for longer than %d seconds.'
                     'It will be deleted and set to failed.'),
                    pod.metadata.name,
                    timeout,
                )
                self.kube_scheduler.delete_pod(pod.metadata.name,
                                               pod.metadata.namespace)

    def _change_state(self, key: TaskInstanceKey, state: Optional[str],
                      pod_id: str, namespace: str) -> None:
        if state != State.RUNNING:
            if self.kube_config.delete_worker_pods:
                if not self.kube_scheduler:
                    raise AirflowException(NOT_STARTED_MESSAGE)
                if state != State.FAILED or self.kube_config.delete_worker_pods_on_failure:
                    self.kube_scheduler.delete_pod(pod_id, namespace)
                    self.log.info('Deleted pod: %s in namespace %s', str(key),
                                  str(namespace))
            try:
                self.running.remove(key)
            except KeyError:
                self.log.debug('Could not find key: %s', str(key))
        self.event_buffer[key] = state, None

    def try_adopt_task_instances(
            self, tis: List[TaskInstance]) -> List[TaskInstance]:
        tis_to_flush = [ti for ti in tis if not ti.queued_by_job_id]
        scheduler_job_ids = {ti.queued_by_job_id for ti in tis}
        pod_ids = {ti.key: ti for ti in tis if ti.queued_by_job_id}
        kube_client: client.CoreV1Api = self.kube_client
        for scheduler_job_id in scheduler_job_ids:
            scheduler_job_id = pod_generator.make_safe_label_value(
                str(scheduler_job_id))
            kwargs = {'label_selector': f'airflow-worker={scheduler_job_id}'}
            pod_list = kube_client.list_namespaced_pod(
                namespace=self.kube_config.kube_namespace, **kwargs)
            for pod in pod_list.items:
                self.adopt_launched_task(kube_client, pod, pod_ids)
        self._adopt_completed_pods(kube_client)
        tis_to_flush.extend(pod_ids.values())
        return tis_to_flush

    def adopt_launched_task(self, kube_client: client.CoreV1Api,
                            pod: k8s.V1Pod, pod_ids: Dict[TaskInstanceKey,
                                                          k8s.V1Pod]) -> None:
        """
        Patch existing pod so that the current KubernetesJobWatcher can monitor it via label selectors

        :param kube_client: kubernetes client for speaking to kube API
        :param pod: V1Pod spec that we will patch with new label
        :param pod_ids: pod_ids we expect to patch.
        """
        self.log.info("attempting to adopt pod %s", pod.metadata.name)
        pod.metadata.labels[
            'airflow-worker'] = pod_generator.make_safe_label_value(
                str(self.scheduler_job_id))
        pod_id = annotations_to_key(pod.metadata.annotations)
        if pod_id not in pod_ids:
            self.log.error(
                "attempting to adopt taskinstance which was not specified by database: %s",
                pod_id)
            return

        try:
            kube_client.patch_namespaced_pod(
                name=pod.metadata.name,
                namespace=pod.metadata.namespace,
                body=PodGenerator.serialize_pod(pod),
            )
            pod_ids.pop(pod_id)
            self.running.add(pod_id)
        except ApiException as e:
            self.log.info("Failed to adopt pod %s. Reason: %s",
                          pod.metadata.name, e)

    def _adopt_completed_pods(self, kube_client: client.CoreV1Api) -> None:
        """

        Patch completed pod so that the KubernetesJobWatcher can delete it.

        :param kube_client: kubernetes client for speaking to kube API
        """
        kwargs = {
            'field_selector': "status.phase=Succeeded",
            'label_selector': 'kubernetes_executor=True',
        }
        pod_list = kube_client.list_namespaced_pod(
            namespace=self.kube_config.kube_namespace, **kwargs)
        for pod in pod_list.items:
            self.log.info("Attempting to adopt pod %s", pod.metadata.name)
            pod.metadata.labels[
                'airflow-worker'] = pod_generator.make_safe_label_value(
                    str(self.scheduler_job_id))
            try:
                kube_client.patch_namespaced_pod(
                    name=pod.metadata.name,
                    namespace=pod.metadata.namespace,
                    body=PodGenerator.serialize_pod(pod),
                )
            except ApiException as e:
                self.log.info("Failed to adopt pod %s. Reason: %s",
                              pod.metadata.name, e)

    def _flush_task_queue(self) -> None:
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.log.debug(
            'Executor shutting down, task_queue approximate size=%d',
            self.task_queue.qsize())
        while True:
            try:
                task = self.task_queue.get_nowait()
                # This is a new task to run thus ok to ignore.
                self.log.warning(
                    'Executor shutting down, will NOT run task=%s', task)
                self.task_queue.task_done()
            except Empty:
                break

    def _flush_result_queue(self) -> None:
        if not self.result_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.log.debug(
            'Executor shutting down, result_queue approximate size=%d',
            self.result_queue.qsize())
        while True:
            try:
                results = self.result_queue.get_nowait()
                self.log.warning('Executor shutting down, flushing results=%s',
                                 results)
                try:
                    key, state, pod_id, namespace, resource_version = results
                    self.log.info(
                        'Changing state of %s to %s : resource_version=%d',
                        results, state, resource_version)
                    try:
                        self._change_state(key, state, pod_id, namespace)
                    except Exception as e:
                        self.log.exception(
                            'Ignoring exception: %s when attempting to change state of %s to %s.',
                            e,
                            results,
                            state,
                        )
                finally:
                    self.result_queue.task_done()
            except Empty:
                break

    def end(self) -> None:
        """Called when the executor shuts down"""
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.result_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.kube_scheduler:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.log.info('Shutting down Kubernetes executor')
        self.log.debug('Flushing task_queue...')
        self._flush_task_queue()
        self.log.debug('Flushing result_queue...')
        self._flush_result_queue()
        # Both queues should be empty...
        self.task_queue.join()
        self.result_queue.join()
        if self.kube_scheduler:
            self.kube_scheduler.terminate()
        self._manager.shutdown()

    def terminate(self):
        """Terminate the executor is not doing anything."""
コード例 #3
0
ファイル: kubernetes_executor.py プロジェクト: yh-rgb/airflow
class KubernetesExecutor(BaseExecutor, LoggingMixin):
    """Executor for Kubernetes"""

    def __init__(self):
        self.kube_config = KubeConfig()
        self._manager = multiprocessing.Manager()
        self.task_queue: 'Queue[KubernetesJobType]' = self._manager.Queue()
        self.result_queue: 'Queue[KubernetesResultsType]' = self._manager.Queue()
        self.kube_scheduler: Optional[AirflowKubernetesScheduler] = None
        self.kube_client: Optional[client.CoreV1Api] = None
        self.scheduler_job_id: Optional[str] = None
        self.event_scheduler: Optional[EventScheduler] = None
        super().__init__(parallelism=self.kube_config.parallelism)

    @provide_session
    def clear_not_launched_queued_tasks(self, session=None) -> None:
        """
        If the airflow scheduler restarts with pending "Queued" tasks, the tasks may or
        may not
        have been launched. Thus on starting up the scheduler let's check every
        "Queued" task to
        see if it has been launched (ie: if there is a corresponding pod on kubernetes)

        If it has been launched then do nothing, otherwise reset the state to "None" so
        the task
        will be rescheduled

        This will not be necessary in a future version of airflow in which there is
        proper support
        for State.LAUNCHED
        """
        self.log.debug("Clearing tasks that have not been launched")
        if not self.kube_client:
            raise AirflowException(NOT_STARTED_MESSAGE)
        queued_tasks = session.query(TaskInstance).filter(TaskInstance.state == State.QUEUED).all()
        self.log.info('When executor started up, found %s queued task instances', len(queued_tasks))

        for task in queued_tasks:
            # pylint: disable=protected-access
            self.log.debug("Checking task %s", task)
            dict_string = "dag_id={},task_id={},execution_date={},airflow-worker={}".format(
                pod_generator.make_safe_label_value(task.dag_id),
                pod_generator.make_safe_label_value(task.task_id),
                pod_generator.datetime_to_label_safe_datestring(task.execution_date),
                pod_generator.make_safe_label_value(str(self.scheduler_job_id)),
            )
            # pylint: enable=protected-access
            kwargs = dict(label_selector=dict_string)
            if self.kube_config.kube_client_request_args:
                for key, value in self.kube_config.kube_client_request_args.items():
                    kwargs[key] = value
            pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs)
            if not pod_list.items:
                self.log.info(
                    'TaskInstance: %s found in queued state but was not launched, rescheduling', task
                )
                session.query(TaskInstance).filter(
                    TaskInstance.dag_id == task.dag_id,
                    TaskInstance.task_id == task.task_id,
                    TaskInstance.execution_date == task.execution_date,
                ).update({TaskInstance.state: State.NONE})

    def start(self) -> None:
        """Starts the executor"""
        self.log.info('Start Kubernetes executor')
        if not self.job_id:
            raise AirflowException("Could not get scheduler_job_id")
        self.scheduler_job_id = self.job_id
        self.log.debug('Start with scheduler_job_id: %s', self.scheduler_job_id)
        self.kube_client = get_kube_client()
        self.kube_scheduler = AirflowKubernetesScheduler(
            self.kube_config, self.task_queue, self.result_queue, self.kube_client, self.scheduler_job_id
        )
        self.event_scheduler = EventScheduler()
        self.event_scheduler.call_regular_interval(
            self.kube_config.worker_pods_pending_timeout_check_interval,
            self._check_worker_pods_pending_timeout,
        )
        self.clear_not_launched_queued_tasks()

    def execute_async(
        self,
        key: TaskInstanceKey,
        command: CommandType,
        queue: Optional[str] = None,
        executor_config: Optional[Any] = None,
    ) -> None:
        """Executes task asynchronously"""
        self.log.info('Add task %s with command %s with executor_config %s', key, command, executor_config)
        try:
            kube_executor_config = PodGenerator.from_obj(executor_config)
        except Exception:  # pylint: disable=broad-except
            self.log.error("Invalid executor_config for %s", key)
            self.fail(key=key, info="Invalid executor_config passed")
            return

        if executor_config:
            pod_template_file = executor_config.get("pod_template_file", None)
        else:
            pod_template_file = None
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.event_buffer[key] = (State.QUEUED, self.scheduler_job_id)
        self.task_queue.put((key, command, kube_executor_config, pod_template_file))

    def sync(self) -> None:
        """Synchronize task state."""
        if self.running:
            self.log.debug('self.running: %s', self.running)
        if self.queued_tasks:
            self.log.debug('self.queued: %s', self.queued_tasks)
        if not self.scheduler_job_id:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.kube_scheduler:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.kube_config:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.result_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.kube_scheduler.sync()

        last_resource_version = None
        while True:  # pylint: disable=too-many-nested-blocks
            try:
                results = self.result_queue.get_nowait()
                try:
                    key, state, pod_id, namespace, resource_version = results
                    last_resource_version = resource_version
                    self.log.info('Changing state of %s to %s', results, state)
                    try:
                        self._change_state(key, state, pod_id, namespace)
                    except Exception as e:  # pylint: disable=broad-except
                        self.log.exception(
                            "Exception: %s when attempting to change state of %s to %s, re-queueing.",
                            e,
                            results,
                            state,
                        )
                        self.result_queue.put(results)
                finally:
                    self.result_queue.task_done()
            except Empty:
                break

        resource_instance = ResourceVersion()
        resource_instance.resource_version = last_resource_version or resource_instance.resource_version

        # pylint: disable=too-many-nested-blocks
        for _ in range(self.kube_config.worker_pods_creation_batch_size):
            try:
                task = self.task_queue.get_nowait()
                try:
                    self.kube_scheduler.run_next(task)
                except ApiException as e:
                    if e.reason == "BadRequest":
                        self.log.error("Request was invalid. Failing task")
                        key, _, _, _ = task
                        self.change_state(key, State.FAILED, e)
                    else:
                        self.log.warning(
                            'ApiException when attempting to run task, re-queueing. Message: %s',
                            json.loads(e.body)['message'],
                        )
                        self.task_queue.put(task)
                finally:
                    self.task_queue.task_done()
            except Empty:
                break
        # pylint: enable=too-many-nested-blocks

        # Run any pending timed events
        next_event = self.event_scheduler.run(blocking=False)
        self.log.debug("Next timed event is in %f", next_event)

    def _check_worker_pods_pending_timeout(self):
        """Check if any pending worker pods have timed out"""
        timeout = self.kube_config.worker_pods_pending_timeout
        self.log.debug('Looking for pending worker pods older than %d seconds', timeout)

        kwargs = {
            'limit': self.kube_config.worker_pods_pending_timeout_batch_size,
            'field_selector': 'status.phase=Pending',
            'label_selector': f'airflow-worker={self.scheduler_job_id}',
            **self.kube_config.kube_client_request_args,
        }
        if self.kube_config.multi_namespace_mode:
            pending_pods = functools.partial(self.kube_client.list_pod_for_all_namespaces, **kwargs)
        else:
            pending_pods = functools.partial(
                self.kube_client.list_namespaced_pod, self.kube_config.kube_namespace, **kwargs
            )

        cutoff = timezone.utcnow() - timedelta(seconds=timeout)
        for pod in pending_pods().items:
            self.log.debug(
                'Found a pending pod "%s", created "%s"', pod.metadata.name, pod.metadata.creation_timestamp
            )
            if pod.metadata.creation_timestamp < cutoff:
                self.log.error(
                    (
                        'Pod "%s" has been pending for longer than %d seconds.'
                        'It will be deleted and set to failed.'
                    ),
                    pod.metadata.name,
                    timeout,
                )
                self.kube_scheduler.delete_pod(pod.metadata.name, pod.metadata.namespace)

    def _change_state(self, key: TaskInstanceKey, state: Optional[str], pod_id: str, namespace: str) -> None:
        if state != State.RUNNING:
            if self.kube_config.delete_worker_pods:
                if not self.kube_scheduler:
                    raise AirflowException(NOT_STARTED_MESSAGE)
                if state != State.FAILED or self.kube_config.delete_worker_pods_on_failure:
                    self.kube_scheduler.delete_pod(pod_id, namespace)
                    self.log.info('Deleted pod: %s in namespace %s', str(key), str(namespace))
            try:
                self.running.remove(key)
            except KeyError:
                self.log.debug('Could not find key: %s', str(key))
        self.event_buffer[key] = state, None

    def try_adopt_task_instances(self, tis: List[TaskInstance]) -> List[TaskInstance]:
        tis_to_flush = [ti for ti in tis if not ti.external_executor_id]
        scheduler_job_ids = [ti.external_executor_id for ti in tis]
        pod_ids = {
            create_pod_id(
                dag_id=pod_generator.make_safe_label_value(ti.dag_id),
                task_id=pod_generator.make_safe_label_value(ti.task_id),
            ): ti
            for ti in tis
            if ti.external_executor_id
        }
        kube_client: client.CoreV1Api = self.kube_client
        for scheduler_job_id in scheduler_job_ids:
            scheduler_job_id = pod_generator.make_safe_label_value(str(scheduler_job_id))
            kwargs = {'label_selector': f'airflow-worker={scheduler_job_id}'}
            pod_list = kube_client.list_namespaced_pod(namespace=self.kube_config.kube_namespace, **kwargs)
            for pod in pod_list.items:
                self.adopt_launched_task(kube_client, pod, pod_ids)
        self._adopt_completed_pods(kube_client)
        tis_to_flush.extend(pod_ids.values())
        return tis_to_flush

    def adopt_launched_task(self, kube_client, pod, pod_ids: dict):
        """
        Patch existing pod so that the current KubernetesJobWatcher can monitor it via label selectors

        :param kube_client: kubernetes client for speaking to kube API
        :param pod: V1Pod spec that we will patch with new label
        :param pod_ids: pod_ids we expect to patch.
        """
        self.log.info("attempting to adopt pod %s", pod.metadata.name)
        pod.metadata.labels['airflow-worker'] = pod_generator.make_safe_label_value(
            str(self.scheduler_job_id)
        )
        dag_id = pod.metadata.labels['dag_id']
        task_id = pod.metadata.labels['task_id']
        pod_id = create_pod_id(dag_id=dag_id, task_id=task_id)
        if pod_id not in pod_ids:
            self.log.error(
                "attempting to adopt task %s in dag %s which was not specified by database",
                task_id,
                dag_id,
            )
        else:
            try:
                kube_client.patch_namespaced_pod(
                    name=pod.metadata.name,
                    namespace=pod.metadata.namespace,
                    body=PodGenerator.serialize_pod(pod),
                )
                pod_ids.pop(pod_id)
            except ApiException as e:
                self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)

    def _adopt_completed_pods(self, kube_client: kubernetes.client.CoreV1Api):
        """

        Patch completed pod so that the KubernetesJobWatcher can delete it.

        :param kube_client: kubernetes client for speaking to kube API
        """
        kwargs = {
            'field_selector': "status.phase=Succeeded",
            'label_selector': 'kubernetes_executor=True',
        }
        pod_list = kube_client.list_namespaced_pod(namespace=self.kube_config.kube_namespace, **kwargs)
        for pod in pod_list.items:
            self.log.info("Attempting to adopt pod %s", pod.metadata.name)
            pod.metadata.labels['airflow-worker'] = pod_generator.make_safe_label_value(
                str(self.scheduler_job_id)
            )
            try:
                kube_client.patch_namespaced_pod(
                    name=pod.metadata.name,
                    namespace=pod.metadata.namespace,
                    body=PodGenerator.serialize_pod(pod),
                )
            except ApiException as e:
                self.log.info("Failed to adopt pod %s. Reason: %s", pod.metadata.name, e)

    def _flush_task_queue(self) -> None:
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.log.debug('Executor shutting down, task_queue approximate size=%d', self.task_queue.qsize())
        while True:
            try:
                task = self.task_queue.get_nowait()
                # This is a new task to run thus ok to ignore.
                self.log.warning('Executor shutting down, will NOT run task=%s', task)
                self.task_queue.task_done()
            except Empty:
                break

    def _flush_result_queue(self) -> None:
        if not self.result_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.log.debug('Executor shutting down, result_queue approximate size=%d', self.result_queue.qsize())
        while True:  # pylint: disable=too-many-nested-blocks
            try:
                results = self.result_queue.get_nowait()
                self.log.warning('Executor shutting down, flushing results=%s', results)
                try:
                    key, state, pod_id, namespace, resource_version = results
                    self.log.info(
                        'Changing state of %s to %s : resource_version=%d', results, state, resource_version
                    )
                    try:
                        self._change_state(key, state, pod_id, namespace)
                    except Exception as e:  # pylint: disable=broad-except
                        self.log.exception(
                            'Ignoring exception: %s when attempting to change state of %s to %s.',
                            e,
                            results,
                            state,
                        )
                finally:
                    self.result_queue.task_done()
            except Empty:
                break

    def end(self) -> None:
        """Called when the executor shuts down"""
        if not self.task_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.result_queue:
            raise AirflowException(NOT_STARTED_MESSAGE)
        if not self.kube_scheduler:
            raise AirflowException(NOT_STARTED_MESSAGE)
        self.log.info('Shutting down Kubernetes executor')
        self.log.debug('Flushing task_queue...')
        self._flush_task_queue()
        self.log.debug('Flushing result_queue...')
        self._flush_result_queue()
        # Both queues should be empty...
        self.task_queue.join()
        self.result_queue.join()
        if self.kube_scheduler:
            self.kube_scheduler.terminate()
        self._manager.shutdown()

    def terminate(self):
        """Terminate the executor is not doing anything."""
コード例 #4
0
    def _run_scheduler_loop(self) -> None:
        """
        The actual scheduler loop. The main steps in the loop are:
            #. Harvest DAG parsing results through DagFileProcessorAgent
            #. Find and queue executable tasks
                #. Change task instance state in DB
                #. Queue tasks in executor
            #. Heartbeat executor
                #. Execute queued tasks in executor asynchronously
                #. Sync on the states of running tasks

        Following is a graphic representation of these steps.

        .. image:: ../docs/apache-airflow/img/scheduler_loop.jpg

        :rtype: None
        """
        if not self.processor_agent:
            raise ValueError("Processor agent is not started.")
        is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')

        timers = EventScheduler()

        # Check on start up, then every configured interval
        self.adopt_or_reset_orphaned_tasks()

        timers.call_regular_interval(
            conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
            self.adopt_or_reset_orphaned_tasks,
        )

        timers.call_regular_interval(
            conf.getfloat('scheduler', 'trigger_timeout_check_interval', fallback=15.0),
            self.check_trigger_timeouts,
        )

        timers.call_regular_interval(
            conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
            self._emit_pool_metrics,
        )

        for loop_count in itertools.count(start=1):
            with Stats.timer() as timer:

                if self.using_sqlite:
                    self.processor_agent.run_single_parsing_loop()
                    # For the sqlite case w/ 1 thread, wait until the processor
                    # is finished to avoid concurrent access to the DB.
                    self.log.debug("Waiting for processors to finish since we're using sqlite")
                    self.processor_agent.wait_until_finished()

                with create_session() as session:
                    num_queued_tis = self._do_scheduling(session)

                    self.executor.heartbeat()
                    session.expunge_all()
                    num_finished_events = self._process_executor_events(session=session)

                self.processor_agent.heartbeat()

                # Heartbeat the scheduler periodically
                self.heartbeat(only_if_necessary=True)

                # Run any pending timed events
                next_event = timers.run(blocking=False)
                self.log.debug("Next timed event is in %f", next_event)

            self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)

            if not is_unit_test and not num_queued_tis and not num_finished_events:
                # If the scheduler is doing things, don't sleep. This means when there is work to do, the
                # scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
                # usage when "idle"
                time.sleep(min(self._processor_poll_interval, next_event))

            if loop_count >= self.num_runs > 0:
                self.log.info(
                    "Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
                    self.num_runs,
                    loop_count,
                )
                break
            if self.processor_agent.done:
                self.log.info(
                    "Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
                    " scheduler loops",
                    self.num_times_parse_dags,
                    loop_count,
                )
                break