def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] in ['worker_handler', 'celery_handler']: # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED, JobStatus.SUCCESS, JobStatus.ERROR]: # Set the job status to canceling ModelImporter.model('job', 'jobs').updateJob(job, status=CustomJobStatus.CANCELING) # Send the revoke request. asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp()) asyncResult.revoke()
def export_status(self, taskid): "export status" result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: msg = _('The task status requested has expired or does not exist') flash(msg) log.info(msg) redirect(url(controller='accounts', action='index')) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): msg = _('Error occured in processing %s') % result.result if c.user.is_superadmin: flash_alert(msg) log.info(msg) else: flash_alert(_('Backend error occured during processing.')) log.info(msg) redirect(url(controller='accounts', action='index')) results = dict( f=True if not result.result['global_error'] else False, id=taskid, global_error=result.result['global_error']) audit_log(c.user.username, 5, unicode(auditmsgs.ACCOUNTEXPORT_MSG), request.host, request.remote_addr, arrow.utcnow().datetime) else: try: session['acexport-count'] += 1 except KeyError: session['acexport-count'] = 1 session.save() if (session['acexport-count'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() del session['acexport-count'] session.save() msg = _('The export could not be processed, try again later') flash_alert(msg) log.info(msg) redirect(url(controller='accounts', action='index')) finished = False results = dict(f=None, global_error=None) c.finished = finished c.results = results c.success = result.successful() dwn = request.GET.get('d', None) if finished and (dwn and dwn == 'y'): response.content_type = 'text/csv' response.headers['Cache-Control'] = 'max-age=0' csvdata = result.result['f'] disposition = 'attachment; filename=accounts-export-%s.csv' % \ taskid response.headers['Content-Disposition'] = str(disposition) response.headers['Content-Length'] = len(csvdata) return csvdata return self.render('/accounts/exportstatus.html')
def killJobByJobId(jobId): try: job = AsyncResult(jobId) job.revoke(terminate=True) return True except: return False
def abort(self, model, view, parameters): if (model.async == True): job = AsyncResult(parameters['jobID']) job.revoke(terminate=True) return {'state': job.state} else: return {}
def abort(self, model, view, parameters): if (model. async == True): job = AsyncResult(parameters['jobID']) job.revoke(terminate=True) return {'state': job.state} else: return {}
def restfulapi_cancel(auth, **kwargs): if u'restfulapi' not in session.data: session.data[u'restfulapi'] = {} if u'task_list' not in session.data[u'restfulapi']: session.data[u'restfulapi'][u'task_list'] = [] cancel_count = 0 for task_id in session.data[u'restfulapi'][u'task_list']: task = AsyncResult(task_id) if not task.ready(): # Raise SoftTimeLimitExceeded exception on the task task.revoke(terminate=True, signal='SIGUSR1') cancel_count += 1 session.data[u'restfulapi'][u'task_list'] = [] session.save() if cancel_count == 0: return { 'status': 'No download tasks', 'message': 'There are no active download tasks.' } # Recent activity log node = kwargs.get('node') pid = kwargs.get('pid') node.add_log(action='restfulapi_cancel', params={ 'node': pid, 'project': pid }, auth=auth) return {'status': 'OK', 'message': 'Download task has been cancelled.'}
def initialize_in_judge(self): lock = cache.lock("testcase_{}_{}_{}_initialize_in_judge".format( self.problem.problem.pk, self.problem.pk, self.pk), timeout=60) if lock.acquire(blocking=False): try: refreshed_obj = type(self).objects.with_transaction(self._transaction).get(pk=self.pk) if refreshed_obj.judge_initialization_successful: return if self.judge_initialization_task_id: result = AsyncResult(self.judge_initialization_task_id) if result.failed() or result.successful(): self.judge_initialization_task_id = None self.save() elif result.state == "PENDING": result.revoke() self.judge_initialization_task_id = None self.save() else: logger.debug("Waiting for task {} in state {}".format( self.judge_initialization_task_id, result.state )) if not self.judge_initialization_task_id: self.judge_initialization_task_id = TestCaseJudgeInitialization().delay(self).id self.save() finally: lock.release()
def coupon_edit(request, pk): coupon_object = models.Coupon.objects.filter(id=pk, deleted=False).first() if not coupon_object or coupon_object.status != 1: return HttpResponse('优惠券不存在或优惠券已开始申请') if request.method == 'GET': form = CouponModelForm(instance=coupon_object) return render(request, 'web/coupon_form.html', {'form': form}) form = CouponModelForm(instance=coupon_object, data=request.POST) if form.is_valid(): if "apply_start_date" in form.changed_data: async_result = AsyncResult(id=coupon_object.apply_start_task_id, app=celery_app) async_result.revoke() eta = datetime.datetime.utcfromtimestamp( form.instance.apply_start_date.timestamp()) start_task_id = tasks.coupon_start_apply.apply_async( args=[coupon_object.id], eta=eta).id form.instance.apply_start_task_id = start_task_id if "apply_stop_date" in form.changed_data: async_result = AsyncResult(id=coupon_object.apply_stop_task_id, app=celery_app) async_result.revoke() eta = datetime.datetime.utcfromtimestamp( form.instance.apply_stop_date.timestamp()) start_task_id = tasks.coupon_stop_apply.apply_async( args=[coupon_object.id], eta=eta).id form.instance.apply_start_task_id = start_task_id form.save() return redirect('coupon_list') return render(request, 'web/coupon_form.html', {'form': form})
def post(self, request, pk): old_obj = models.Discounts.objects.filter(pk=pk).first() disconut_obj = myfrom.DiscountModelForm(instance=old_obj, data=request.POST, files=request.FILES) if disconut_obj.is_valid(): if "start" in disconut_obj.changed_data: async_result = AsyncResult(id=disconut_obj.apply_start_task_id, app=celery_app) async_result.revoke() eta = datetime.datetime.utcfromtimestamp( disconut_obj.instance.start.timestamp()) start_task_id = task.apply_start_discount.apply_async( args=[disconut_obj.id], eta=eta).id disconut_obj.instance.apply_start_task_id = start_task_id if "end" in disconut_obj.changed_data: async_result = AsyncResult(id=disconut_obj.apply_stop_task_id, app=celery_app) async_result.revoke() eta = datetime.datetime.utcfromtimestamp( disconut_obj.instance.end.timestamp()) start_task_id = task.apply_stop_discount.apply_async( args=[disconut_obj.id], eta=eta).id disconut_obj.instance.apply_stop_task_id = start_task_id disconut_obj.save() return JsonResponse({'status': True})
def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] in ['worker_handler', 'celery_handler']: # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED, JobStatus.SUCCESS, JobStatus.ERROR]: # Set the job status to canceling Job().updateJob(job, status=CustomJobStatus.CANCELING) # Send the revoke request. asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp()) asyncResult.revoke()
def twitter(**kwargs): queries = fetch_queries() if getattr(settings, 'KRAL_TWITTER_FIREHOSE', False) is not True: for query in queries: if '_' in query: queries.append(query.replace('_','')) cache_name = "facebookstream_%s" % query if cache.get(cache_name): previous_result = AsyncResult(cache.get(cache_name)) if previous_result.ready(): result = twitter_feed.delay(query) cache.set(cache_name,result.task_id) else: result = twitter_feed.delay(query) cache.set(cache_name,result.task_id) if cache.get('twitterfeed'): previous_queries = pickle.loads(cache.get('twitterfeed_queries')) previous_result = AsyncResult(cache.get('twitterfeed')) if previous_result.ready(): result = twitter_stream.delay(queries) cache.set('twitterfeed',result.task_id) if queries != previous_queries: result = twitter_stream.delay(queries) previous_result.revoke() cache.set('twitterfeed_queries',pickle.dumps(queries)) cache.set('twitterfeed',result.task_id) else: result = twitter_stream.delay(queries) cache.set('twitterfeed_queries',pickle.dumps(queries)) cache.set('twitterfeed',result.task_id) return
def experiments(request): """Render user summary page with information on results of each experiment.""" current_user = request.user if request.method == "POST": # Get details on user experiment to check POST Query summary_table_body, objs = create_summary_table(current_user) for idx, obj in enumerate(objs): if request.POST.get(f"Delete_{idx}"): obj.delete() if request.POST.get(f"Stop_{idx}"): task = AsyncResult(id=obj.get_task_id()) task.revoke(terminate=True) parent = task.parent while parent is not None: parent.revoke(terminate=True) parent = parent.parent obj.result_status = 0 obj.error_message = "Stopped by User." obj.save() summary_table_body, objs = create_summary_table(current_user) context = {"headers": RESULTS_METADATA_HEADER, "body": summary_table_body} return render(request=request, template_name="viewer/experiments.html", context=context)
def _revoke_job(self, request_id: str) -> None or streaming_pb2.ResultDetails: job_details: dict = self.cache.get( CacheKeysTemplates.JOB_DETAILS.format(request_id=request_id)) if not job_details: revoke_details = dict( tracking_id=request_id, has_been_sent=self.pb2.RevokeSignalStatus.REQUEST_NOT_FOUND) return self.pb2.RevokeDetails(**revoke_details) reference_id: str = job_details['reference_id'] signal_sent: bool = self.cache.get( CacheKeysTemplates.FORCE_STOP_REQUEST.format( request_id=request_id)) if not signal_sent: self.cache.set( CacheKeysTemplates.FORCE_STOP_REQUEST.format( request_id=request_id), json.dumps(True)) # celery result id result_id: str = self.cache.get( CacheKeysTemplates.REQUEST_RESULT_ID.format( request_id=request_id), decode=False) if result_id: async_result = AsyncResult(result_id, app=celery_app) if async_result and not async_result.ready(): async_result.revoke() revoke_details = dict( tracking_id=request_id, reference_id=reference_id, has_been_sent=self.pb2.RevokeSignalStatus.REVOKE_SIGNAL_SENT) return self.pb2.RevokeDetails(**revoke_details)
def stop_handover_job(handover_token): """[Stop celery job for given handover token] Args: handover_token ([type]): [description] Returns: [dict]: [task status with handover spec] """ try: status = get_celery_task_id(handover_token) if not status['status']: return status #get celery task id task_id = status['task_id'] spec = status['spec'] task = AsyncResult(task_id) if task.state not in ['FAILURE', 'REVOKED']: task.revoke(terminate=True) log_and_publish( make_report('ERROR', f"Handover failed, Job Revoked", spec, "")) except Exception as e: return {'status': False, 'error': f"{str(e)}", 'spec': spec} return status
def cancel_scan(*, task_id: str) -> Mapping[str, str]: task_result = AsyncResult(task_id, app=celery_app) task_result.revoke(terminate=True) scan = Scan.objects.get(uuid=task_id) scan.state = task_result.state scan.save() return ScanSerializer(scan).data
def cancel_task(task_id): res = AsyncResult(task_id) res.revoke(terminate=True) return { 'state': res.state, 'meta': res.info, 'cancelled': True }
def handle(self, *args, **options): res = AsyncResult(options['task-id'], app=app) pdb.set_trace() res.revoke(terminate=True, signal="SIGKILL") self.stdout.write(self.style.SUCCESS( 'Successfully killed task "%s"' % res.id))
def testdestination(self, destinationid): "Test mail destination server" server = self._get_server(destinationid) if not server: abort(404) taskid = request.GET.get('taskid', None) if not taskid: to_addr = 'postmaster@%s' % server.domains.name task = test_smtp_server.apply_async(args=[ server.address, server.port, '<>', to_addr, server.id, 3]) taskid = task.task_id if not 'taskids' in session: session['taskids'] = [] session['taskids'].append(taskid) session['testdest-count'] = 1 session.save() redirect(url.current(taskid=taskid)) else: result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: flash(_('The connection test failed try again later')) redirect(url('domain-detail', domainid=server.domain_id)) if result.ready(): if ('smtp' in result.result and 'ping' in result.result and result.result['smtp'] and result.result['ping']): flash(_('The server: %s is up and accepting mail from us' % server.address)) else: if 'ping' in result.result['errors']: errors = result.result['errors']['ping'] else: errors = result.result['errors']['smtp'] flash(_('The server: %s is not accepting mail from us: %s') % (server.address, errors)) redirect(url('domain-detail', domainid=server.domain_id)) else: session['testdest-count'] += 1 session.save() if (session['testdest-count'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() del session['testdest-count'] session.save() flash_alert('Failed to initialize backend,' ' try again later') redirect(url('domain-detail', domainid=server.domain_id)) c.server = server c.domainid = server.domain_id c.taskid = taskid c.finished = False return render('/domains/testdestination.html')
def revoke_task(self, update=False): if self.task_id: LOGGER.info("Deleting task " + self.task_id) result = AsyncResult(self.task_id) result.revoke() if update: self.task_id = None self.success = False self.save()
def stop(job_id): '''Stop the automatos execute status''' job = TestJob.query.filter(job_id == TestJob.id).first_or_404() result = AsyncResult(job.task_id, backend=celery.backend) result.revoke() celery.control.revoke(job.task_id, terminate=True, signal='SIGKILL') return "cancel success"
def revoke_celery_task(task_id): """ 终止celery任务 """ try: task = AsyncResult(task_id) task.revoke(terminate=True) except Exception as e: logger.error(u'revoke_celery_task(Exception): %s' % e)
def perform_update(self, serializer): instance = self.get_object() task = AsyncResult(instance.task_id) task.revoke() result = send_email.apply_async( (self.request.user.email, self.request.data['title']), eta=datetime.strptime(self.request.data['datetime'], "%Y-%m-%dT%H:%M:%S.%fZ") - timedelta(hours=1)) serializer.save(task_id=result.id)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): super().save(force_insert, force_update, using, update_fields) task = tasks.send_message.apply_async(args=[self.channel.bot.token, self.channel.channel_id, self.text or None, None if not self.image else self.image.path], eta=self.send_time) if self.task_id: old_task = AsyncResult(self.task_id) old_task.revoke() self.task_id = task.id
def testdestination(self, destinationid): "Test mail destination server" server = self._get_server(destinationid) if not server: abort(404) taskid = request.GET.get('taskid', None) if not taskid: to_addr = 'postmaster@%s' % server.domains.name task = test_smtp_server.apply_async(args=[ server.address, server.port, '<>', to_addr, server.id, 3 ]) taskid = task.task_id session['taskids'].append(taskid) session['testdest-count'] = 1 session.save() redirect(url.current(taskid=taskid)) else: result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: flash(_('The connection test failed try again later')) redirect(url('domain-detail', domainid=server.domain_id)) if result.ready(): if ('smtp' in result.result and 'ping' in result.result and result.result['smtp'] and result.result['ping']): flash( _('The server: %s is up and accepting mail from us' % server.address)) else: if 'ping' in result.result['errors']: errors = result.result['errors']['ping'] else: errors = result.result['errors']['smtp'] flash( _('The server: %s is not accepting mail from us: %s') % (server.address, errors)) redirect(url('domain-detail', domainid=server.domain_id)) else: session['testdest-count'] += 1 session.save() if (session['testdest-count'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() del session['testdest-count'] session.save() flash_alert('Failed to initialize backend,' ' try again later') redirect(url('domain-detail', domainid=server.domain_id)) c.server = server c.domainid = server.domain_id c.taskid = taskid c.finished = False return render('/domains/testdestination.html')
def kill_task(request): input_json = json.loads(request.body) res = AsyncResult(input_json['task_id'], app=celery_app) res.revoke(terminate=True, signal="SIGKILL") return JsonResponse({ 'state': 'REVOKED', 'info': None, 'message': "Done!" })
def find_kill_children(traceid): criteria = {'object.parent_id': traceid} task_obj = Task() result = task_obj.getAll(criteria) for t in result: res = AsyncResult(t.extid) if(res.state not in ['SUCCESS', 'FAILURE']): t.status = Task.STATUS_ERROR t.save() t.current_status_msg = 'terminated' res.revoke(terminate=True, signal='SIGKILL')
def _cancel_task(task_id=None): task_status = {} worker_tasks = _get_tasks() if not worker_tasks: return {} for worker, tasks in worker_tasks: for task in tasks: res = AsyncResult(task['id']) task_status[task['name']] = {'state': res.state, 'meta': res.info} if not task_id or task_id == task['id']: res.revoke(terminate=True) task_status[task['name']]['canceled'] = True return task_status
def revoke_task_by_id(task_id): """ revoke a celery task by task id :param task_id: task id :return: aborted task dict """ result = AsyncResult(task_id) result.revoke(terminate=True) return { 'id': task_id, 'status': 'aborted', 'payload': '' }
def export_status(self, taskid): "export status" result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: flash(_('The task status requested has expired or does not exist')) redirect(url(controller='domains', action='index')) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): if c.user.is_superadmin: flash_alert( _('Error occured in processing %s') % result.result) else: flash_alert(_('Backend error occured during processing.')) redirect(url(controller='domains')) results = dict( f=True if not result.result['global_error'] else False, id=taskid, global_error=result.result['global_error']) else: session['dexport-count'] += 1 if (session['dexport-count'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() flash_alert( _('The export could not be processed,' ' try again later')) del session['dexport-count'] session.save() redirect(url(controller='domains')) finished = False results = dict(f=None, global_error=None) c.finished = finished c.results = results c.success = result.successful() d = request.GET.get('d', None) if finished and (d and d == 'y'): info = EXPORTDOM_MSG % dict(d='all') audit_log(c.user.username, 5, info, request.host, request.remote_addr, now()) response.content_type = 'text/csv' response.headers['Cache-Control'] = 'max-age=0' csvdata = result.result['f'] disposition = 'attachment; filename=domains-export-%s.csv' % taskid response.headers['Content-Disposition'] = str(disposition) response.headers['Content-Length'] = len(csvdata) return csvdata return render('/domains/exportstatus.html')
def revoke(self): task = AsyncResult(self.task_id, app=sim_worker.celery.app) task.revoke(terminate=True) task.forget() if self.stage == "Configuration": self.stage = "Modification" self.status = "NA" elif self.stage == "Detail Configuration": self.stage = "Modification" self.status = "NA" elif self.stage == "Simulation": self.stage = "Configuration" self.status = "Success" self.save()
def testdestination(self, destinationid): "Test mail destination server" server = self._get_server(destinationid) if not server: abort(404) taskid = request.GET.get("taskid", None) if not taskid: to_addr = "postmaster@%s" % server.domains.name task = test_smtp_server.apply_async(args=[server.address, server.port, "<>", to_addr, server.id, 3]) taskid = task.task_id session["taskids"].append(taskid) session["testdest-count"] = 1 session.save() redirect(url.current(taskid=taskid)) else: result = AsyncResult(taskid) if result is None or taskid not in session["taskids"]: flash(_("The connection test failed try again later")) redirect(url("domain-detail", domainid=server.domain_id)) if result.ready(): if ( "smtp" in result.result and "ping" in result.result and result.result["smtp"] and result.result["ping"] ): flash(_("The server: %s is up and accepting mail from us" % server.address)) else: if "ping" in result.result["errors"]: errors = result.result["errors"]["ping"] else: errors = result.result["errors"]["smtp"] flash(_("The server: %s is not accepting mail from us: %s") % (server.address, errors)) redirect(url("domain-detail", domainid=server.domain_id)) else: session["testdest-count"] += 1 session.save() if session["testdest-count"] >= 10 and result.state in ["PENDING", "RETRY", "FAILURE"]: result.revoke() del session["testdest-count"] session.save() flash_alert("Failed to initialize backend," " try again later") redirect(url("domain-detail", domainid=server.domain_id)) c.server = server c.domainid = server.domain_id c.taskid = taskid c.finished = False return render("/domains/testdestination.html")
def stop(request, project_id=None): if not project_id: return JsonResponse({"data": "No project."}) try: job_pool = JobPool.objects.get(project_id=project_id) job_id = job_pool.job_id except JobPool.DoesNotExist: return JsonResponse({"data": "No task."}) job = AsyncResult(str(job_id)) job.revoke(terminate=True) return JsonResponse({"data": "Task stopped", "jobId": job_id})
def import_status(self, taskid): "import domains status" result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: msg = _('The task status requested has expired or does not exist') flash(msg) log.info(msg) redirect(url(controller='organizations', action='index')) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): msg = _('Error occured in processing %s') % result.result if c.user.is_superadmin: flash_alert(msg) log.info(msg) else: flash_alert(_('Backend error occured during processing.')) log.info(msg) redirect(url(controller='organizations')) update_serial.delay() info = auditmsgs.IMPORTORG_MSG % dict(o='-') audit_log(c.user.username, 3, unicode(info), request.host, request.remote_addr, arrow.utcnow().datetime) else: session['dimport-counter'] += 1 session.save() if (session['dimport-counter'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() try: os.unlink(session['dimport-file']) except OSError: pass del session['dimport-file'] del session['dimport-counter'] session.save() flash_alert( _('The import could not be processed,' ' try again later')) redirect(url(controller='organizations')) finished = False c.finished = finished c.results = result.result c.success = result.successful() return self.render('/organizations/importstatus.html')
def audit_export_status(self, taskid): "Audit log export status" result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: msg = _('The task status requested has expired or does not exist') flash(msg) log.info(msg) redirect(url('status-audit-logs')) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): msg = _('Error occured in processing %s') % result.result if c.user.is_superadmin: flash_alert(msg) log.info(msg) else: flash_alert(_('Backend error occured during processing.')) log.info(msg) redirect(url('status-audit-logs')) else: session['exportauditlog-counter'] += 1 session.save() if (session['exportauditlog-counter'] >= 20 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() del session['exportauditlog-counter'] session.save() flash_alert(_('The audit log export failed, try again later')) redirect(url('status-audit-logs')) finished = False c.finished = finished c.results = result.result c.success = result.successful() dwn = request.GET.get('d', None) if finished and (dwn and dwn == 'y'): audit_log(c.user.username, 5, unicode(AUDITLOGEXPORT_MSG), request.host, request.remote_addr, arrow.utcnow().datetime) response.content_type = result.result['content_type'] response.headers['Cache-Control'] = 'max-age=0' respdata = result.result['f'] disposition = 'attachment; filename=%s' % result.result['filename'] response.headers['Content-Disposition'] = str(disposition) response.headers['Content-Length'] = len(respdata) return respdata return self.render('/status/auditexportstatus.html')
def revoke_task(self, task_id): ''' 取消一个任务的执行 ''' task_result = {} try: aResult = AsyncResult(task_id) aResult.revoke(terminate=True) task_result['status'] = 'success' update_task(task_id, 'REVOKED', revoked=datetime.now()) except Exception as ex: task_result['status'] = 'fail' task_result['result'] = {'msg': str(ex)} return task_result
def delete(self, id): """ Deletes task associated with {id} passed in """ resp = flask.make_response() try: res = AsyncResult(id) res.revoke(terminate=True) res.forget() resp.status_code = 200 return resp except Exception: app.logger.exception('Caught exception deleting result') resp.status_code = 500 return resp
def import_status(self, taskid): "import domains status" result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: msg = _('The task status requested has expired or does not exist') flash(msg) log.info(msg) redirect(url(controller='organizations', action='index')) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): msg = _('Error occured in processing %s') % result.result if c.user.is_superadmin: flash_alert(msg) log.info(msg) else: flash_alert(_('Backend error occured during processing.')) log.info(msg) redirect(url(controller='organizations')) update_serial.delay() info = auditmsgs.IMPORTORG_MSG % dict(o='-') audit_log(c.user.username, 3, unicode(info), request.host, request.remote_addr, arrow.utcnow().datetime) else: session['dimport-counter'] += 1 session.save() if (session['dimport-counter'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() try: os.unlink(session['dimport-file']) except OSError: pass del session['dimport-file'] del session['dimport-counter'] session.save() flash_alert(_('The import could not be processed,' ' try again later')) redirect(url(controller='organizations')) finished = False c.finished = finished c.results = result.result c.success = result.successful() return self.render('/organizations/importstatus.html')
def _cancel_task(task_id=None): task_status = {} worker_tasks = _get_tasks() if not worker_tasks: return {} for worker, tasks in worker_tasks: for task in tasks: res = AsyncResult(task['id']) task_status[task['name']] = { 'state': res.state, 'meta': res.info } if not task_id or task_id == task['id']: res.revoke(terminate=True) task_status[task['name']]['canceled'] = True return task_status
def export_status(self, taskid): "export status" result = AsyncResult(taskid) if result is None or taskid not in session["taskids"]: flash(_("The task status requested has expired or does not exist")) redirect(url(controller="domains", action="index")) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): if c.user.is_superadmin: flash_alert(_("Error occured in processing %s") % result.result) else: flash_alert(_("Backend error occured during processing.")) redirect(url(controller="domains")) results = dict( f=True if not result.result["global_error"] else False, id=taskid, global_error=result.result["global_error"], ) else: session["dexport-count"] += 1 if session["dexport-count"] >= 10 and result.state in ["PENDING", "RETRY", "FAILURE"]: result.revoke() flash_alert(_("The export could not be processed," " try again later")) del session["dexport-count"] session.save() redirect(url(controller="domains")) finished = False results = dict(f=None, global_error=None) c.finished = finished c.results = results c.success = result.successful() d = request.GET.get("d", None) if finished and (d and d == "y"): info = EXPORTDOM_MSG % dict(d="all") audit_log(c.user.username, 5, info, request.host, request.remote_addr, datetime.now()) response.content_type = "text/csv" response.headers["Cache-Control"] = "max-age=0" csvdata = result.result["f"] disposition = "attachment; filename=domains-export-%s.csv" % taskid response.headers["Content-Disposition"] = disposition response.headers["Content-Length"] = len(csvdata) return csvdata return render("/domains/exportstatus.html")
def coupon_delete(request, pk): """ 删除优惠券 """ coupon_object = models.Coupon.objects.filter(id=pk, deleted=False).first() if not coupon_object: return JsonResponse({'status': False, 'error': '优惠券不存在'}) start_result = AsyncResult(id=coupon_object.apply_start_task_id, app=celery_app) start_result.revoke() stop_result = AsyncResult(id=coupon_object.apply_stop_task_id, app=celery_app) stop_result.revoke() models.Coupon.objects.filter(id=pk, deleted=False).update(deleted=True) return JsonResponse({'status': True})
def _sync_user_book_notes(user, book): if not (user.enable_sync and user.evernote_access_token): return cache_key = 'sync_book_notes_{user_id}_{book_id}'.format( user_id=user.id, book_id=book.id ) last_task_id = cache.get(cache_key) if last_task_id: # cancel last task result = AsyncResult(last_task_id) if result: result.revoke() result = tasks.sync_book_notes.apply_async( args=[user.id, book], countdown=300 ) cache.set(cache_key, result.id, timeout=300)
def run(self, task_id, *args, **kwargs): #revoke task #delete files #delete stats/entry a = AsyncResult(task_id) a.revoke(terminate=True) jobdir = settings.CRAWLER_DIRS['jobdir'] logdir = settings.CRAWLER_DIRS['logdir'] try: rmtree(join(jobdir, task_id)) except: pass try: unlink(join(logdir, (task_id + '.log'))) except: pass return a
def async_state(self, request, task_id, **kwargs): """ Task state. If request method is GET, it returns a JSON dict with state. If task has completed, that dict also contains ``result_uri`` entry. If request method is DELETE and task hasn't run yet, it revokes this task. See http://celery.readthedocs.org/en/latest/userguide/workers.html#persistent-revokes for details about running workers with persitent revokes. If task can't be revoked (is in progress or finished), we return response with HTTP Bad Request state. Other methods are forbidden. """ if not getattr(settings, 'CELERY_ALWAYS_EAGER'): task = AsyncResult(task_id) else: task = EAGER_RESULTS[task_id] if request.method == 'GET': data = { 'state': task.state, 'id': task.id, 'resource_uri': request.get_full_path()} if task.ready(): data['result_uri'] = self._build_reverse_url( 'api_async_result', kwargs={ 'api_name': self._meta.api_name, 'resource_name': self._meta.resource_name, 'task_id': task_id}) return self.create_response(request, data) elif request.method == 'DELETE': if not task.ready(): try: task.revoke(terminate=True) return http.HttpGone() except: pass return http.HttpBadRequest() else: return http.HttpForbidden()
def import_status(self, taskid): "import status" result = AsyncResult(taskid) if result is None or taskid not in session['taskids']: flash(_('The task status requested has expired or does not exist')) redirect(url(controller='accounts', action='index')) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): if c.user.is_superadmin: flash_alert(_('Error occured in processing %s') % result.result) else: flash_alert(_('Backend error occured during processing.')) redirect(url(controller='accounts')) update_serial.delay() audit_log(c.user.username, 3, unicode(ACCOUNTIMPORT_MSG), request.host, request.remote_addr, now()) else: session['acimport-count'] += 1 if (session['acimport-count'] >= 10 and result.state in ['PENDING', 'RETRY', 'FAILURE']): result.revoke() try: os.unlink(session['acimport-file']) except OSError: pass del session['acimport-count'] session.save() flash_alert(_('The import could not be processed,' ' try again later')) redirect(url(controller='accounts')) finished = False c.finished = finished c.results = result.result c.success = result.successful() return render('/accounts/importstatus.html')
def import_status(self, taskid): "import domains status" result = AsyncResult(taskid) if result is None or taskid not in session["taskids"]: flash(_("The task status requested has expired or does not exist")) redirect(url(controller="organizations", action="index")) if result.ready(): finished = True flash.pop_messages() if isinstance(result.result, Exception): if c.user.is_superadmin: flash_alert(_("Error occured in processing %s") % result.result) else: flash_alert(_("Backend error occured during processing.")) redirect(url(controller="organizations")) update_serial.delay() info = IMPORTORG_MSG % dict(o="-") audit_log(c.user.username, 3, info, request.host, request.remote_addr, now()) else: session["dimport-counter"] += 1 session.save() if session["dimport-counter"] >= 10 and result.state in ["PENDING", "RETRY", "FAILURE"]: result.revoke() try: os.unlink(session["dimport-file"]) except OSError: pass del session["dimport-file"] del session["dimport-counter"] session.save() flash_alert(_("The import could not be processed," " try again later")) redirect(url(controller="organizations")) finished = False c.finished = finished c.results = result.result c.success = result.successful() return render("/organizations/importstatus.html")
class QueryTask(object): MAX_RETRIES = 5 # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this STATUSES = { 'PENDING': 1, 'STARTED': 2, 'SUCCESS': 3, 'FAILURE': 4, 'REVOKED': 4 } def __init__(self, job_id=None, async_result=None): if async_result: self._async_result = async_result else: self._async_result = AsyncResult(job_id, app=celery) @property def id(self): return self._async_result.id @classmethod def add_task(cls, query, data_source, scheduled=False, metadata={}): query_hash = gen_query_hash(query) logging.info("[Manager][%s] Inserting job", query_hash) logging.info("[Manager] Metadata: [%s]", metadata) try_count = 0 job = None while try_count < cls.MAX_RETRIES: try_count += 1 pipe = redis_connection.pipeline() try: pipe.watch(cls._job_lock_id(query_hash, data_source.id)) job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id)) if job_id: logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id) job = cls(job_id=job_id) if job.ready(): logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status) redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id)) job = None if not job: pipe.multi() if scheduled: queue_name = data_source.scheduled_queue_name else: queue_name = data_source.queue_name result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name) job = cls(async_result=result) logging.info("[Manager][%s] Created new job: %s", query_hash, job.id) pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME) pipe.execute() break except redis.WatchError: continue if not job: logging.error("[Manager][%s] Failed adding job for query.", query_hash) return job def to_dict(self): if self._async_result.status == 'STARTED': updated_at = self._async_result.result.get('start_time', 0) else: updated_at = 0 if self._async_result.failed() and isinstance(self._async_result.result, Exception): error = self._async_result.result.message elif self._async_result.status == 'REVOKED': error = 'Query execution cancelled.' else: error = '' if self._async_result.successful(): query_result_id = self._async_result.result else: query_result_id = None return { 'id': self._async_result.id, 'updated_at': updated_at, 'status': self.STATUSES[self._async_result.status], 'error': error, 'query_result_id': query_result_id, } @property def is_cancelled(self): return self._async_result.status == 'REVOKED' @property def celery_status(self): return self._async_result.status def ready(self): return self._async_result.ready() def cancel(self): return self._async_result.revoke(terminate=True) @staticmethod def _job_lock_id(query_hash, data_source_id): return "query_hash_job:%s:%s" % (data_source_id, query_hash)
class QueryTask(object): # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this STATUSES = { 'PENDING': 1, 'STARTED': 2, 'SUCCESS': 3, 'FAILURE': 4, 'REVOKED': 4 } def __init__(self, job_id=None, async_result=None): if async_result: self._async_result = async_result else: self._async_result = AsyncResult(job_id, app=celery) @property def id(self): return self._async_result.id def to_dict(self): task_info = self._async_result._get_task_meta() result, task_status = task_info['result'], task_info['status'] if task_status == 'STARTED': updated_at = result.get('start_time', 0) else: updated_at = 0 status = self.STATUSES[task_status] if isinstance(result, (TimeLimitExceeded, SoftTimeLimitExceeded)): error = "Query exceeded Redash query execution time limit." status = 4 elif isinstance(result, Exception): error = result.message status = 4 elif task_status == 'REVOKED': error = 'Query execution cancelled.' else: error = '' if task_status == 'SUCCESS' and not error: query_result_id = result else: query_result_id = None return { 'id': self._async_result.id, 'updated_at': updated_at, 'status': status, 'error': error, 'query_result_id': query_result_id, } @property def is_cancelled(self): return self._async_result.status == 'REVOKED' @property def celery_status(self): return self._async_result.status def ready(self): return self._async_result.ready() def cancel(self): return self._async_result.revoke(terminate=True, signal='SIGINT')
def romanescoStopRun(jobId, params): task = AsyncResult(jobId, backend=getCeleryApp().backend) task.revoke(getCeleryApp().broker_connection(), terminate=True) return {"status": task.state}
def stop(self, message=None): if self.is_running: task = AsyncResult(self.celery_task_id) task.revoke(terminate=True, signal='SIGKILL') self.on_stop(message=message)