コード例 #1
0
ファイル: test_database.py プロジェクト: bryson/celery
 def test_context_raises(self):
     session = Mock(name='session')
     with pytest.raises(KeyError):
         with session_cleanup(session):
             raise KeyError()
     session.rollback.assert_called_with()
     session.close.assert_called_with()
コード例 #2
0
ファイル: test_database.py プロジェクト: yinlinzh/celery
 def test_context_raises(self):
     session = Mock(name='session')
     with pytest.raises(KeyError):
         with session_cleanup(session):
             raise KeyError()
     session.rollback.assert_called_with()
     session.close.assert_called_with()
コード例 #3
0
 def _store_result(self,
                   task_id,
                   result,
                   state,
                   traceback=None,
                   max_retries=3,
                   **kwargs):
     request = kwargs.get('request', {})
     session = self.ResultSession()
     with session_cleanup(session):
         task = list(session.query(Task).filter(Task.task_id == task_id))
         task = task and task[0]
         if not task:
             task = Task(task_id)
             session.add(task)
             session.flush()
         task.result = result
         task.status = state
         task.traceback = traceback
         task.task_name = repr(getattr(request, 'task', None))
         _args = self.get_args(getattr(request, 'args', []))
         task.task_args = repr(_args)
         task.task_kwargs = repr(getattr(request, 'kwargs', None))
         session.commit()
         return result
コード例 #4
0
ファイル: celery_executor.py プロジェクト: colpal/airfloss
    def _get_many_from_db_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
        task_ids = _tasks_list_to_task_ids(async_tasks)
        session = app.backend.ResultSession()
        with session_cleanup(session):
            tasks = session.query(TaskDb).filter(TaskDb.task_id.in_(task_ids)).all()

        task_results = [app.backend.meta_from_decoded(task.to_dict()) for task in tasks]
        task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
        return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
コード例 #5
0
ファイル: celery_executor.py プロジェクト: karankale/airflow
    def _clear_stuck_queued_tasks(self,
                                  session: Session = NEW_SESSION) -> None:
        """
        Tasks can get stuck in queued state in DB while still not in
        worker. This happens when the worker is autoscaled down and
        the task is queued but has not been picked up by any worker prior to the scaling.

        In such situation, we update the task instance state to scheduled so that
        it can be queued again. We chose to use task_adoption_timeout to decide when
        a queued task is considered stuck and should be reschelduled.
        """
        if not isinstance(app.backend, DatabaseBackend):
            # We only want to do this for database backends where
            # this case has been spotted
            return
        # We use this instead of using bulk_state_fetcher because we
        # may not have the stuck task in self.tasks and we don't want
        # to clear task in self.tasks too
        session_ = app.backend.ResultSession()
        task_cls = getattr(app.backend, "task_cls", TaskDb)
        with session_cleanup(session_):
            celery_task_ids = [
                t.task_id for t in session_.query(task_cls.task_id).filter(
                    ~task_cls.status.in_(
                        [celery_states.SUCCESS, celery_states.FAILURE])).all()
            ]
        self.log.debug("Checking for stuck queued tasks")

        max_allowed_time = utcnow() - self.task_adoption_timeout

        for task in session.query(TaskInstance).filter(
                TaskInstance.state == State.QUEUED,
                TaskInstance.queued_dttm < max_allowed_time):
            if task.key in self.queued_tasks or task.key in self.running:
                continue

            if task.external_executor_id in celery_task_ids:
                # The task is still running in the worker
                continue

            self.log.info(
                'TaskInstance: %s found in queued state for more than %s seconds, rescheduling',
                task,
                self.task_adoption_timeout.total_seconds(),
            )
            task.state = State.SCHEDULED
            session.merge(task)
コード例 #6
0
ファイル: test_database.py プロジェクト: yinlinzh/celery
 def test_context(self):
     session = Mock(name='session')
     with session_cleanup(session):
         pass
     session.close.assert_called_with()
コード例 #7
0
ファイル: test_database.py プロジェクト: bryson/celery
 def test_context(self):
     session = Mock(name='session')
     with session_cleanup(session):
         pass
     session.close.assert_called_with()