示例#1
0
    def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
        """Handle exception."""
        _, _, tb = sys.exc_info()
        try:
            exc = self.retval
            # make sure we only send pickleable exceptions back to parent.
            einfo = ExceptionInfo()
            einfo.exception = get_pickleable_exception(einfo.exception)
            einfo.type = get_pickleable_etype(einfo.type)

            task.backend.mark_as_failure(
                req.id,
                exc,
                einfo.traceback,
                request=req,
                store_result=store_errors,
                call_errbacks=call_errbacks,
            )

            task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
            signals.task_failure.send(sender=task,
                                      task_id=req.id,
                                      exception=exc,
                                      args=req.args,
                                      kwargs=req.kwargs,
                                      traceback=tb,
                                      einfo=einfo)
            self._log_error(task, req, einfo)
            return einfo
        finally:
            del tb
示例#2
0
文件: trace.py 项目: yingzong/celery
    def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
        """Handle exception."""
        _, _, tb = sys.exc_info()
        try:
            exc = self.retval
            # make sure we only send pickleable exceptions back to parent.
            einfo = ExceptionInfo()
            einfo.exception = get_pickleable_exception(einfo.exception)
            einfo.type = get_pickleable_etype(einfo.type)

            task.backend.mark_as_failure(
                req.id, exc, einfo.traceback,
                request=req, store_result=store_errors,
                call_errbacks=call_errbacks,
            )

            task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
            signals.task_failure.send(sender=task, task_id=req.id,
                                      exception=exc, args=req.args,
                                      kwargs=req.kwargs,
                                      traceback=tb,
                                      einfo=einfo)
            self._log_error(task, req, einfo)
            return einfo
        finally:
            del(tb)
示例#3
0
 def handle_failure(self, task, store_errors=True):
     """Handle exception."""
     req = task.request
     type_, _, tb = sys.exc_info()
     try:
         exc = self.retval
         einfo = ExceptionInfo()
         einfo.exception = get_pickleable_exception(einfo.exception)
         einfo.type = get_pickleable_etype(einfo.type)
         if store_errors:
             task.backend.mark_as_failure(
                 req.id,
                 exc,
                 einfo.traceback,
                 request=req,
             )
         task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
         signals.task_failure.send(sender=task,
                                   task_id=req.id,
                                   exception=exc,
                                   args=req.args,
                                   kwargs=req.kwargs,
                                   traceback=tb,
                                   einfo=einfo)
         return einfo
     finally:
         del (tb)
示例#4
0
def clean_up_failure_task(result={},
                          export_provider_task_uids=[],
                          run_uid=None,
                          run_dir=None,
                          worker=None,
                          *args,
                          **kwargs):
    """
    Used to close tasks in a failed chain.

    If a task fails or is canceled, it all of the uid will be passed here and the failed object will be found and propagated,
    to the subsequent tasks in the chain. Additionally they will be finalized to ensure that the run finishes.
    """

    from eventkit_cloud.tasks.models import ExportProviderTask, ExportTaskException
    from billiard.einfo import ExceptionInfo

    task_status = None
    incomplete_export_provider_task = None
    for export_provider_task_uid in export_provider_task_uids:
        export_provider_task = ExportProviderTask.objects.get(
            uid=export_provider_task_uid)
        for export_task in export_provider_task.tasks.all():
            if TaskStates[
                    export_task.status] in TaskStates.get_incomplete_states():
                if not task_status:
                    task_status = export_task.status
                    incomplete_export_provider_task = export_provider_task.name
            else:
                if task_status:
                    export_task.status = task_status
                    try:
                        raise CancelException(
                            message=
                            "{0} could not complete because it depends on {1}".
                            format(export_provider_task.name,
                                   incomplete_export_provider_task))
                    except CancelException as ce:
                        einfo = ExceptionInfo()
                        einfo.exception = ce
                        ExportTaskException.objects.create(
                            task=export_task, exception=cPickle.dumps(einfo))
                    export_task.save()

        finalize_export_provider_task.si(
            run_uid=run_uid,
            export_provider_task_uid=export_provider_task_uid,
            worker=worker).set(queue=worker, routing_key=worker).apply_async(
                interval=1,
                max_retries=10,
                queue=worker,
                routing_key=worker,
                priority=TaskPriority.FINALIZE_PROVIDER.value)
    return result
示例#5
0
文件: trace.py 项目: kalefranz/celery
 def handle_failure(self, task, store_errors=True):
     """Handle exception."""
     req = task.request
     type_, _, tb = sys.exc_info()
     try:
         exc = self.retval
         einfo = ExceptionInfo()
         einfo.exception = get_pickleable_exception(einfo.exception)
         einfo.type = get_pickleable_etype(einfo.type)
         if store_errors:
             task.backend.mark_as_failure(req.id, exc, einfo.traceback, request=req)
         task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
         signals.task_failure.send(
             sender=task, task_id=req.id, exception=exc, args=req.args, kwargs=req.kwargs, traceback=tb, einfo=einfo
         )
         return einfo
     finally:
         del (tb)
示例#6
0
    def on_success(self, retval, task_id, args, kwargs):
        try:
            logger.info(
                'my task success and taskid is {} ,retval is{} ,args is{}.kwargs id {}'
                .format(task_id, retval, args, kwargs))
            # 如果执行成功,且有下一步,则执行下一步
            if self.do_success(retval, task_id, args,
                               kwargs) and kwargs.get('next_task_kwargs'):
                for next_task_kwarg in kwargs['next_task_kwargs']:
                    with session_scope() as ss:
                        from worker.run_task import run_celery_task
                        run_celery_task(session=ss, **next_task_kwarg)

        except Exception as e:
            einfo = ExceptionInfo()
            einfo.exception = get_pickleable_exception(einfo.exception)
            einfo.type = get_pickleable_etype(einfo.type)
            self.on_failure(e, task_id, args, kwargs, einfo)
示例#7
0
def _signal_internal_error(task, uuid, args, kwargs, request, exc):
    """Send a special `internal_error` signal to the app for outside body errors."""
    try:
        _, _, tb = sys.exc_info()
        einfo = ExceptionInfo()
        einfo.exception = get_pickleable_exception(einfo.exception)
        einfo.type = get_pickleable_etype(einfo.type)
        signals.task_internal_error.send(
            sender=task,
            task_id=uuid,
            args=args,
            kwargs=kwargs,
            request=request,
            exception=exc,
            traceback=tb,
            einfo=einfo,
        )
    finally:
        del tb
示例#8
0
def cancel_export_provider_task(result={},
                                export_provider_task_uid=None,
                                canceling_user=None):
    """
    Cancels an ExportProviderTask and terminates each subtasks execution.
    """

    from ..tasks.models import ExportProviderTask, ExportTaskException, ExportTaskResult
    from ..tasks.exceptions import CancelException
    from billiard.einfo import ExceptionInfo
    from datetime import datetime, timedelta

    export_provider_task = ExportProviderTask.objects.filter(
        uid=export_provider_task_uid).first()

    if not export_provider_task:
        result['result'] = False
        return result

    export_tasks = export_provider_task.tasks.all()

    # Loop through both the tasks in the ExportProviderTask model, as well as the Task Chain in celery
    for export_task in export_tasks.filter(~Q(
            status=TaskStates.CANCELED.value)):
        export_task.status = TaskStates.CANCELED.value
        export_task.cancel_user = canceling_user
        export_task.save()
        # This part is to populate the UI with the cancel message.  If a different mechanism is incorporated
        # to pass task information to the users, then it may make sense to replace this.
        try:
            raise CancelException(task_name=export_provider_task.name,
                                  user_name=canceling_user)
        except CancelException as ce:
            einfo = ExceptionInfo()
            einfo.exception = ce
            ExportTaskException.objects.create(task=export_task,
                                               exception=cPickle.dumps(einfo))

        # Remove the ExportTaskResult, which will clean up the files.
        task_result = ExportTaskResult.objects.filter(task=export_task).first()
        if task_result:
            task_result.delete()

        if export_task.pid and export_task.worker:
            kill_task.apply_async(
                kwargs={
                    "task_pid": export_task.pid,
                    "celery_uid": export_task.celery_uid
                },
                queue="{0}.cancel".format(export_task.worker),
                priority=TaskPriority.CANCEL.value,
                routing_key="{0}.cancel".format(export_task.worker))

    export_provider_task.status = TaskStates.CANCELED.value
    export_provider_task.save()

    # Because the task is revoked the follow on is never run... if using revoke this is required, if using kill,
    # this can probably be removed as the task will simply fail and the follow on task from the task_factory will
    # pick up the task.
    run_uid = export_provider_task.run.uid
    worker = export_provider_task.tasks.first().worker
    # Because we don't care about the files in a canceled task the stage dir can be the run dir,
    # which will be cleaned up in final steps.
    stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT.rstrip('\/'),
                             str(run_uid))

    finalize_export_provider_task.si(
        run_uid=run_uid,
        stage_dir=stage_dir,
        export_provider_task_uid=export_provider_task_uid,
        worker=worker).set(queue=worker, routing_key=worker).apply_async(
            interval=1,
            max_retries=10,
            expires=datetime.now() + timedelta(days=2),
            priority=TaskPriority.FINALIZE_PROVIDER.value,
            routing_key=worker,
            queue=worker)
    return result