def list_active_task(): i = inspect() active = i.active() if active is not None: for v in active.values(): for t in v: r = BaseAsyncResult(t['id']) r.task_name = t['name'] tasks_info.append({'result': r})
def result(key): print(key) result = BaseAsyncResult(key, app=tasks.celery) if result.ready(): return render_template('result.html', result=result.get()) elif result.failed(): return result.traceback else: return render_template('processing.html')
def AsyncResult(self, task_id, backend=None, task_name=None): """Create :class:`celery.result.BaseAsyncResult` instance.""" from celery.result import BaseAsyncResult return BaseAsyncResult(task_id, app=self, task_name=task_name, backend=backend or self.backend)
def AsyncResult(self, task_id): """Get AsyncResult instance for this kind of task. :param task_id: Task id to get result for. """ return BaseAsyncResult(task_id, backend=self.backend)
def downloads_list(request): expected_downloads = Download.objects.filter(user=request.user, status="expected") for d in expected_downloads: task = BaseAsyncResult(d.task_id) if task.ready(): if task.successful(): d.file_path = task.result d.status = 'complite' else: status = task.status pass d.save() downloads = Download.objects.filter(user=request.user).order_by('-created') return render_to_response("download_list.html", {'downloads': downloads}, context_instance=RequestContext(request))
def delay(self, *args, **kwargs): """ Put this task on the Celery queue as a singleton. Only one of this type of task with its distinguishing args/kwargs will be allowed on the queue at a time. Subsequent duplicate tasks called while this task is still running will just latch on to the results of the running task by synchronizing the task uuid. Additionally, identical task calls will return those results for the next ``cache_duration`` seconds. Passing a ``cache_duration`` keyword argument controls how long identical task calls will latch on to previously cached results. """ self._validate_required_class_vars() cache_key = self._get_cache_key(**kwargs) # Check for an already-computed and cached result task_id = cache.get(cache_key) # Check for the cached result if task_id: # We've already built this result, just latch on to the task that # did the work logging.info('Found existing cached and completed task: %s', task_id) return BaseAsyncResult(task_id, self.backend) # Check for an in-progress equivalent task to avoid duplicating work task_id = cache.get('herd:%s' % cache_key) if task_id: logging.info('Found existing in-progress task: %s', task_id) return BaseAsyncResult(task_id, self.backend) # It's not cached and it's not already running. Use an atomic lock to # start the task, ensuring there isn't a race condition that could # result in multiple identical tasks being fired at once. with acquire_lock('lock:%s' % cache_key): task_meta = super(JobtasticTask, self).delay(*args, **kwargs) logging.info('Current status: %s', task_meta.status) if task_meta.status in [PROGRESS, states.PENDING]: cache.set('herd:%s' % cache_key, task_meta.task_id, timeout=self.herd_avoidance_timeout) logging.info('Setting herd-avoidance cache for task: %s', cache_key) return task_meta
def ajax_account_creation_done(request, task_id): result = BaseAsyncResult(task_id, CacheBackend(default_app)) if not result.ready(): return {"ok": True, "ready": False} result = result.get() return { "ok": True, "ready": True, "slug": result['slug'], "mt4_id": result['mt4_id'], "mt4_password": result['mt4_password'], "redirect": reverse("mt4_account_welcome", args=[result['slug'], result['mt4_id']]), }
def delay_or_fail(self, *args, **kwargs): """ Attempt to call self.delay, but if that fails with an exception, we fake the task completion using the exception as the result. This allows us to seamlessly handle errors on task creation the same way we handle errors when a task runs, simplifying the user interface. """ try: return self.delay(*args, **kwargs) except IOError as e: # Take this exception and store it as an async result. This means that # errors connecting the broker can be handled with the same client-side code # that handles error that occur on workers. self.backend.store_result(self.task_id, exception, status=states.FAILURE) return BaseAsyncResult(self.task_id, self.backend)
def task_status_is_progress(self, **kwargs): task_id = self.request.id meta = BaseAsyncResult(task_id) assert meta.status == PROGRESS
def task(self): return BaseAsyncResult(self.task_id)
def async_result(self): """Return the Celery AsyncResult task object for this task, if any""" if self.task_id: return BaseAsyncResult(self.task_id) else: return None