Ejemplo n.º 1
0
def rq_error_handler(job: Job, exc_type, exc_value, traceback):
    statsd.increment('worker_error',
                     tags=[
                         'job:%s' % job.func_name,
                         'error_type:%s' % exc_type,
                         'error:%s' % exc_value
                     ])
    log.error('worker error in job',
              func_name=job.func_name,
              args=job.args,
              exc_info=(exc_type, exc_value, traceback))

    # reset job state and retry the job
    if exc_type != PersistentError:
        job.set_status(JobStatus.QUEUED)
        job.exc_info = None
        q.enqueue_job(job)
    else:
        statsd.increment('worker_persistent_error',
                         tags=[
                             'job:%s' % job.func_name,
                             'error_type:%s' % exc_type,
                             'error:%s' % exc_value
                         ])
        log.error('PersistentError: not retrying', e=exc_value)
Ejemplo n.º 2
0
def _retry_handler(job: Job, *exc_info: Tuple[Union[str, bytes], ...]) -> bool:
    retries = job.meta.get("remaining_retries", 2)

    if retries > 0:
        retries -= 1
        job.meta["remaining_retries"] = retries
        job.set_status(JobStatus.QUEUED)
        job.exc_info = exc_info
        job.save()
        q = Queue(name=job.origin, connection=job.connection)
        q.enqueue_job(job)
        logger.info(f"Retrying job {job.id}")
    else:
        logger.error(f"Failing job {job.id}")
        q = Queue(name=job.origin, connection=job.connection)
        failed_queue = FailedJobRegistry(queue=q)
        failed_queue.add(job, exc_string=exc_info)
    return False