def inner(*args, **kwargs): try: if new_session and not conf.test: db.session.close() logbook.debug("Start task {} with args: {} {}", fn.__name__, args, kwargs) try: h = "%8x" % abs(hash(args)) except TypeError: from pprint import pformat h = hash(pformat(args)) h = "%8x" % abs(h) request_id = "%s-%s" % (fn.__name__, h[0:4]) def inject_request_id(record): record.extra['request_id'] = request_id with logbook.Processor(inject_request_id): res = fn(*args, **kwargs) if auto_commit: db.session.commit() logbook.debug("Result of task {}: {}", fn.__name__, res) return res except OperationalError as operation_error: logbook.warning("Database is down {}: {}", conf.database.uri, operation_error, exc_info=True) logbook.error("Database is down {}: {}", conf.database.uri, operation_error) db.session.close() current_task.retry(exc=operation_error, countdown=calc_exp_countdown()) except Exception as exc: logbook.warning("{} failed. Retrying...", fn.__name__, exc_info=True) current_task.retry(exc=exc, countdown=calc_exp_countdown())
def wrapped(*args, **kwargs): is_true = func(*args, **kwargs) if not is_true: try: current_task.retry() except MaxRetriesExceededError: raise RuntimeError('Task %s failed to retry' % current_task.name) return is_true
def update_refs(filename): # get refs refs = GetRefs() p = OSMParser(concurrency=4, coords_callback=refs.coords) p.parse(filename) if len(Ref.objects.filter(need_update=True))>0: current_task.retry(args=[filename]) return True
def __enter__(self): if self.acquire_lock(): return self try: # max_retries should be big enough to retry until lock expired # this guaranties that task will be executed rather than failed current_task.retry(countdown=self.opt('retry_delay'), max_retries=10000) except MaxRetriesExceededError as e: six.reraise(Throttled, e)
def retry_current_celery_task(original_function_name, task_id, exc, countdown, polling_task=False): task_logger = create_task_logger(logger, task_id) task_logger.warning('{0}.retry {1} in {2} seconds'.format( original_function_name, task_id, countdown)) try: current_task = get_current_celery_task() current_task.retry(exc=exc, countdown=countdown) except celery.exceptions.RetryTaskError: # This is normal operation, Celery is signaling to the Worker # that this task should be retried by throwing an RetryTaskError # Just re-raise it try: if not polling_task: task = Task.objects.get(pk=task_id) task.retry_count = current_task.request.retries + 1 logger.debug("Retry is: %s", current_task.request.retries) task.save() except Exception: # Never fail on saving this logger.exception("Failed on saving retry_count for task %d", task_id) raise except Exception as ex: if ex is exc: # The same exception we passed to retry() has been re-raised # This means the max_retry limit has been exceeded task_logger.error( "{0}.retry {1} exceeded retry limit - changing status to error" .format(original_function_name, task_id)) else: # Some other Exception occured, log the details task_logger.exception( ("{0}.retry {1} failed - changing status to error".format( original_function_name, task_id))) mark_task_as_error(task_id, str(ex)) raise
def spawn_ready_tasks(job_id): if job_id is None: logger.info( "spawn_ready_tasks received no job_id. Skipping processing.") return None job_logger = create_job_logger(logger, job_id) job_logger.info("Starting spawn_ready_tasks for Job %s", job_id) try: job = EngineJob.objects.get(pk=job_id) ready_tasks = job.ready_tasks() logger.debug(ready_tasks) aborting = job.is_workflow_aborting if aborting: abort_job(job) for task in ready_tasks: task.set_status(STATUS_ABORTED) task.save() else: spawn_status = {} for task in ready_tasks: spawn_status[task.pk] = spawn_task(task) if not all(spawn_status.values()): not_spawned = [e[0] for e in spawn_status.items() if not e[1]] job_logger.info("Couldn't spawn tasks: %s", not_spawned) current_task = get_current_celery_task() current_task.retry(countdown=TASK_LIMIT_REACHED_RETRY_INTERVAL) # need to update task.job.status here when all tasks for job spawned ? job_logger.info("Finished spawn_ready_tasks for Job %s", job_id) return job_id except celery.exceptions.RetryTaskError: # This is normal operation, Celery is signaling to the Worker # that this task should be retried by throwing an RetryTaskError # Just re-raise it raise except Exception: job_logger.exception( "Exception when spawning tasks for job {0}".format(job_id)) mark_job_as_error(job_id) raise
def spawn_ready_tasks(job_id): if job_id is None: logger.info("spawn_ready_tasks received no job_id. Skipping processing.") return None job_logger = create_job_logger(logger, job_id) job_logger.info("Starting spawn_ready_tasks for Job %s", job_id) try: job = EngineJob.objects.get(pk=job_id) ready_tasks = job.ready_tasks() logger.debug(ready_tasks) aborting = job.is_workflow_aborting if aborting: abort_job(job) for task in ready_tasks: task.set_status(STATUS_ABORTED) task.save() else: spawn_status = {} for task in ready_tasks: spawn_status[task.pk] = spawn_task(task) if not all(spawn_status.values()): not_spawned = [e[0] for e in spawn_status.items() if not e[1]] job_logger.info("Couldn't spawn tasks: %s", not_spawned) current_task = get_current_celery_task() current_task.retry(countdown=TASK_LIMIT_REACHED_RETRY_INTERVAL) # need to update task.job.status here when all tasks for job spawned ? job_logger.info("Finished spawn_ready_tasks for Job %s", job_id) return job_id except celery.exceptions.RetryTaskError: # This is normal operation, Celery is signaling to the Worker # that this task should be retried by throwing an RetryTaskError # Just re-raise it raise except Exception: job_logger.exception("Exception when spawning tasks for job {0}".format(job_id)) mark_job_as_error(job_id) raise
def retry_current_celery_task(original_function_name, task_id, exc, countdown, polling_task=False): task_logger = create_task_logger(logger, task_id) task_logger.warning("{0}.retry {1} in {2} seconds".format(original_function_name, task_id, countdown)) try: current_task = get_current_celery_task() current_task.retry(exc=exc, countdown=countdown) except celery.exceptions.RetryTaskError: # This is normal operation, Celery is signaling to the Worker # that this task should be retried by throwing an RetryTaskError # Just re-raise it try: if not polling_task: task = Task.objects.get(pk=task_id) task.retry_count = current_task.request.retries + 1 logger.debug("Retry is: %s", current_task.request.retries) task.save() except Exception: # Never fail on saving this logger.exception("Failed on saving retry_count for task %d", task_id) raise except Exception as ex: if ex is exc: # The same exception we passed to retry() has been re-raised # This means the max_retry limit has been exceeded task_logger.error( "{0}.retry {1} exceeded retry limit - changing status to error".format(original_function_name, task_id) ) else: # Some other Exception occured, log the details task_logger.exception( ("{0}.retry {1} failed - changing status to error".format(original_function_name, task_id)) ) mark_task_as_error(task_id, str(ex)) raise
def insert_church_way(data): """ Insert church based on way(s). Needs references to points for creating the way(s) """ # are all refs in database? ref_tuples = [] for ref in data.get('refs'): try: x = Ref.objects.get(osm_id=ref) except Ref.DoesNotExist: x = None except Ref.MultipleObjectsReturned: x = Ref.objects.filter(osm_id=ref) x[1].delete() x = x[0] if x and not x.need_update: ref_tuples.append(x.point.tuple) else: # not yet done / postpone return current_task.retry(countdown=600) try: kosm, created = KircheOsm.objects.get_or_create(osm_id=data['id']) except KircheOsm.MultipleObjectsReturned: x = KircheOsm.objects.filter(osm_id=data['id']) x[1].delete() kosm = x[0] kosm = set_tags(kosm, data.get('tags')) kosm.osm_type = 'W' try: kosm.mpoly = MultiPolygon(Polygon(tuple(ref_tuples))) kosm.point = kosm.mpoly.centroid kosm.lon, kosm.lat = kosm.point.tuple except: try: # add first point at the end to close the ring. tpl.append(tpl[0]) kosm.mpoly = MultiPolygon(Polygon(tuple(ref_tuples))) kosm.point = kosm.mpoly.centroid kosm.lon, kosm.lat = kosm.point.tuple except: kosm.lon, kosm.lat = ref_tuples[0] kosm.set_geo() kosm.save() return True
def retrying(*args, **kwargs): try: task(*args, **kwargs) except Exception as e: current_task.retry(exc=e, countdown=30)