def ret_results(task_id): """ Given a task id, return the dictionary result of all urls one level down The AsyncResult object returned looks like the following: Format: Async.groupresults[].Async.result.nextresult.groupresults[].Async.result Args: task_id: task_id of rabbitmq task Returns: List of Result objects of top level urls and all sub urls combined """ # initiate the results dictionary ret_list = [] # list of result tuples to be processed group_list = [] res = AsyncResult(task_id, app=app_celery) if not res: raise TaskNotFoundException("Task id %s not found or has expired." % task_id) # remove the async wrapper if res.ready(): group_res = res.get() else: raise TaskNotStartedException("Task id %s has not started. Please try again later." % task_id) # process top urls and place them in results list for group in group_res: comb_res = process_top_urls(group) ret_list.append(comb_res) if comb_res.next_results: group_list.append((comb_res, comb_res.next_results)) # if there are results to process while group_list: # res_tup is a (Result, AsyncResult) res_tup = group_list.pop() # comb_res represents the top level url result to be returned comb_res = res_tup[0] # res is AsyncResult res = res_tup[1] res = AsyncResult(res.id, app=app_celery) # remove async wrapper if res.ready(): res = res.get() else: comb_res.ready = False # if there is one group that is not ready, set the result to not ready # disabled for now since you can't check on the group in the docker vm #if not res.ready(): # comb_res.ready = False # if the top level url is ready to be processed if comb_res.ready: # group_list is an GroupResult for group in res: next_results = process_inner_urls(comb_res, group) if next_results: group_list.append((comb_res, next_results)) # make sure all urls collected in each group is unique for comb_res in ret_list: comb_res.results = list(set(comb_res.results)) return ret_list
def wait_for_task(celery_task): # Normally celery would die with "Never call result.get() within a task!" # See http://docs.celeryq.org/en/latest/userguide/tasks.html#task-synchronous-subtasks with celery.result.allow_join_result(): #logging.info("*** Wait_for_task: %s", celery_task) result = AsyncResult(celery_task) result.get(timeout=MINUTE_SECS * 5)
def main(rowid): id = models.ServiceDeployStatus.objects.get(id=rowid).service_id configinfo = models.ServiceDeployConfig.objects.get(id=id) hostinfo = models.ServiceManager.objects.filter( servicename=configinfo.service.servicename).values_list( 'inhost_id__hostname', 'number') tmplist = [ configinfo.service.servicename, configinfo.image.imagename, configinfo.project.projectname, ] data = [] for item in hostinfo: tmplist1 = tmplist[:] tmplist1.append(item[0]) tmplist1.append(item[1]) data.append(tmplist1) res = tasks.deploy.delay( json.dumps(data)) # 同一个服务中的多个docker容器需要串行执行,灰度发布模式 taskid = res.id while True: res = AsyncResult(id=taskid) # 在任务完成之前会阻塞 print(res.get()) if res.get() == 0: models.ServiceDeployStatus.objects.filter(id=rowid).update( status=False) print('false') return 0 else: return 1
def task_queue_handler(id_task, from_queue): if from_queue == "celery": res = AsyncResult(id_task) status = { "PENDING": 0, "STARTED": 1, "RETRY": 1, "SUCCESS": 2, "FAILURE": 3 } result = {"status": int(status.get(res.status)), "ml_units": 0} elif from_queue == "ai-platform": res = get_train_job_status(id_task) status = { "QUEUED": 0, "PREPARING": 0, "RUNNING": 1, "SUCCEEDED": 2, "FAILED": 3, "CANCELLING": 3, "CANCELLED": 3, "STATE_UNSPECIFIED": 3, } result = { "status": int(status.get(res.get("state"))), "ml_units": res.get("trainingOutput", {}).get("consumedMLUnits", 0), } else: raise ValidationError("Invalid queue") return result
def get_task(id_): """ Get the result or status of a single task. :param id_: id of the task """ # lazy load the task app to avoid circular imports from ibutsu_server.tasks.queues import app async_result = AsyncResult(id_, app=app) response = {"state": async_result.state} if async_result.state == "SUCCESS": response["message"] = "Task has succeeded" result = async_result.get() if result: response.update(async_result.get()) elif async_result.state == "PENDING": response["message"] = "Task not yet started or invalid, check back later" elif async_result.state == "STARTED": response["message"] = "Task has started but is still running, check back later" elif async_result.state == "RETRY": response["message"] = "Task has been retried, possibly due to failure" else: response["message"] = "Task has failed!" response["error"] = async_result.traceback.split("\n") return response, _STATE_TO_CODE.get(async_result.state)
def get(self, request, task_id, format=None): if task_id: result = AsyncResult(task_id) if result.state == 'SUCCESS': print(result.get('file', '')) return Response({ "status": "Success", "result": { "state": result.state, "done": result.info, "result": result.get(), "file": "{}{}".format(settings.MEDIA_URL, result.get()) } }) return Response({ "status": "Success", "result": { "state": result.state, "done": result.info } }) return Response({ "status": "Error", "message": "Task id is not provided." })
def getTasksInfo(tasks, forgetIfFinished=True): if tasks is None: return None, False, None if isinstance(tasks, str): tasks = json.loads(tasks) errors = [] for t in range(len(tasks)): result = AsyncResult(tasks[t]['id']) if result.ready(): tasks[t]['successful'] = result.successful() if tasks[t]['successful']: tasks[t]['info'] = None else: try: error = str(result.get()) errors.append(error) except Exception as e: error = str(e) errors.append(error) tasks[t]['info'] = {} tasks[t]['info']['message'] = error if forgetIfFinished: result.forget() elif result.info is not None: tasks[t]['info'] = result.info if result.status is not None: tasks[t]['status'] = result.status if 'children' in tasks[t]: numDone = 0 for key in tasks[t]['children']: cResult = AsyncResult(tasks[t]['children'][key]['id']) if cResult.ready(): numDone += 1 tasks[t]['children'][key][ 'successful'] = cResult.successful() if tasks[t]['children'][key]['successful']: tasks[t]['children'][key]['info'] = None else: try: error = str(cResult.get()) errors.append(error) except Exception as e: error = str(e) errors.append(error) tasks[t]['children'][key]['info'] = {} tasks[t]['children'][key]['info'][ 'message'] = error if forgetIfFinished: cResult.forget() elif cResult.info is not None: tasks[t]['children'][key]['info'] = cResult.info if cResult.status is not None: tasks[t]['children'][key]['status'] = cResult.status tasks[t]['num_done'] = numDone lastResult = AsyncResult(tasks[-1]['id']) hasFinished = lastResult.ready() return tasks, hasFinished, errors
def test_get_sync_subtask_option(self, task_join_will_block): task_join_will_block.return_value = True tid = uuid() backend = _MockBackend() res_subtask_async = AsyncResult(tid, backend=backend) with pytest.raises(RuntimeError): res_subtask_async.get() res_subtask_async.get(disable_sync_subtasks=False)
def test_get(self): ok_res = AsyncResult(self.task1["id"]) ok2_res = AsyncResult(self.task2["id"]) nok_res = AsyncResult(self.task3["id"]) self.assertEquals(ok_res.get(), "the") self.assertEquals(ok2_res.get(), "quick") self.assertRaises(KeyError, nok_res.get)
def test_get_timeout(self): res = AsyncResult(self.task4['id']) # has RETRY state with self.assertRaises(TimeoutError): res.get(timeout=0.1) pending_res = AsyncResult(uuid()) with self.assertRaises(TimeoutError): pending_res.get(timeout=0.1)
def test_get(self): ok_res = AsyncResult(self.task1["id"]) ok2_res = AsyncResult(self.task2["id"]) nok_res = AsyncResult(self.task3["id"]) nok2_res = AsyncResult(self.task4["id"]) self.assertEqual(ok_res.get(), "the") self.assertEqual(ok2_res.get(), "quick") self.assertRaises(KeyError, nok_res.get) self.assertIsInstance(nok2_res.result, KeyError)
def test_drain_events_decodes_exceptions_in_meta(self): tid = uuid() b = self.create_backend(serializer="json") b.store_result(tid, RuntimeError("aap"), states.FAILURE) result = AsyncResult(tid, backend=b) with self.assertRaises(Exception) as cm: result.get() self.assertEqual(cm.exception.__class__.__name__, "RuntimeError") self.assertEqual(str(cm.exception), "aap")
def test_drain_events_decodes_exceptions_in_meta(self): tid = uuid() b = self.create_backend(serializer='json') b.store_result(tid, RuntimeError('aap'), states.FAILURE) result = AsyncResult(tid, backend=b) with pytest.raises(Exception) as excinfo: result.get() assert excinfo.value.__class__.__name__ == 'RuntimeError' assert str(excinfo.value) == 'aap'
def test_get(self): ok_res = AsyncResult(self.task1["id"]) ok2_res = AsyncResult(self.task2["id"]) nok_res = AsyncResult(self.task3["id"]) nok2_res = AsyncResult(self.task4["id"]) self.assertEqual(ok_res.get(), "the") self.assertEqual(ok2_res.get(), "quick") self.assertRaises(KeyError, nok_res.get) self.assertIsInstance(nok2_res.result, KeyError) self.assertEqual(ok_res.info, "the")
def test_get(self): ok_res = AsyncResult(self.task1["id"]) ok2_res = AsyncResult(self.task2["id"]) nok_res = AsyncResult(self.task3["id"]) nok2_res = AsyncResult(self.task4["id"]) self.assertEqual(ok_res.get(), "the") self.assertEqual(ok2_res.get(), "quick") with self.assertRaises(KeyError): nok_res.get() self.assertTrue(nok_res.get(propagate=False)) self.assertIsInstance(nok2_res.result, KeyError) self.assertEqual(ok_res.info, "the")
def consolidate_metadata(self, wait=True): """ Tries to find an abstract for the paper, if none is available yet, possibly by fetching it from Zotero via doi-cache. """ if self.task is None: task = send_task('consolidate_paper', [], {'pk':self.id}) self.task = task.id self.save(update_fields=['task']) else: task = AsyncResult(self.task) if wait: task.get()
def consolidate_metadata(self, wait=True): """ Tries to find an abstract for the paper, if none is available yet, possibly by fetching it from Zotero via doi-cache. """ if self.task is None: task = send_task('consolidate_paper', [], {'pk': self.id}) self.task = task.id self.save(update_fields=['task']) else: task = AsyncResult(self.task) if wait: task.get()
def test_get(self): ok_res = AsyncResult(self.task1['id']) ok2_res = AsyncResult(self.task2['id']) nok_res = AsyncResult(self.task3['id']) nok2_res = AsyncResult(self.task4['id']) self.assertEqual(ok_res.get(), 'the') self.assertEqual(ok2_res.get(), 'quick') with self.assertRaises(KeyError): nok_res.get() self.assertTrue(nok_res.get(propagate=False)) self.assertIsInstance(nok2_res.result, KeyError) self.assertEqual(ok_res.info, 'the')
def head(self, request, *args, **kwargs): self.query, self.last_item, self.first_item = self.get_query(request) result = cache.get(self.get_cache_key()) if result is not None and "task" in result: search_task = AsyncResult(result["task"]) try: search_task.get(self.timeout) except exceptions.TimeoutError: return http.HttpResponse(status=202) # 202: still waiting for task return http.HttpResponse(status=201) # 201: search results ready elif result is not None: return http.HttpResponse(status=201) # 201: search results ready else: return http.HttpResponseBadRequest() # 400: no search is being performed
def get(self, request, *args, **kw): # these are not really necessary, the important is the id name = kw['service_name'] tname = kw['task_name'] id = kw['job_id'] #f= open(os.path.dirname(__file__)+PROCESSING_SETTINGS_URL) #parsed_json = json.load(f) #query result backend for details try: result = AsyncResult(id=id) state = result.state if state.lower() in ["pending", "started"]: s = {"jobId": id, "jobStatus": state.lower(), "content": []} elif state.lower() == "revoked": s = {"jobId": id, "jobStatus": "revoked", "content": []} elif state.lower() == "retry": s = { "jobId": id, "jobStatus": "retry", "content": result.get() } elif state.lower() == "failure": s = { "jobId": id, "jobStatus": "failure", "content": result.get() } elif state.lower() == "success": s = { "jobId": id, "jobStatus": "success", "content": result.get() } else: s = { "jobId": id, "jobStatus": "unknown", "content": result.get() } return Response(s) except Exception as e: #this will intercept the error raised by pysdss in the form ["error","error info here"] s = {"jobId": id, "jobStatus": 'failure', "content": eval(str(e))} return Response(s)
def retrieve(self, request, pk=None): res = AsyncResult(pk) if res.state == 'SUCCESS': content = {'state': res.state, 'result': res.get()} return Response(content, status=status.HTTP_200_OK) elif res.state == 'FAILURE': try: res.get() except Exception as e: content = {'state': res.state, 'cause': str(e)} return Response(content, status=status.HTTP_400_BAD_REQUEST) else: content = {'state': res.state} return Response(content, status=status.HTTP_409_CONFLICT)
def get(self, taskid): """ Get a task result **Example request**: .. sourcecode:: http GET /api/task/result/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1 Host: localhost:5555 **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Length: 84 Content-Type: application/json; charset=UTF-8 { "result": 3, "state": "SUCCESS", "task-id": "c60be250-fe52-48df-befb-ac66174076e6" } :query timeout: how long to wait, in seconds, before the operation times out :reqheader Authorization: optional OAuth token to authenticate :statuscode 200: no error :statuscode 401: unauthorized request :statuscode 503: result backend is not configured """ timeout = self.get_argument('timeout', None) timeout = float(timeout) if timeout is not None else None result = AsyncResult(taskid) if not self.backend_configured(result): raise HTTPError(503) response = { 'task-id': taskid, 'state': result.state, 'result': result.result } if timeout: result.get(timeout=timeout, propagate=False) self.update_response_result(response, result) elif result.ready(): self.update_response_result(response, result) self.write(response)
def test_task_exception(swh_scheduler_celery_app, swh_scheduler_celery_worker, swh_scheduler): task_type = swh_scheduler.get_task_type("swh-test-error") assert task_type assert task_type["backend_name"] == TASK_ERROR swh_scheduler.create_tasks([create_task_dict("swh-test-error", "oneshot")]) backend_tasks = run_ready_tasks(swh_scheduler, swh_scheduler_celery_app) assert len(backend_tasks) == 1 task = backend_tasks[0] result = AsyncResult(id=task["backend_id"]) with pytest.raises(NotImplementedError): result.get()
def instancestate(request): """ Give a POST request with ``task_id`` and ``instance_state`` fields, return JSON with updated value (given the task has completed or the same as provided) value for the ``instance_state`` field and the same value for the ``task_id``. ``task_id`` is to correspond to the ID of the background task. If instance state is not available, return ``Not available`` as the value for ``instance_state``. """ task_id = request.POST.get('task_id', None) instance_state = request.POST.get('instance_state', 'pending') # Preserve current state state = {'task_id': None, 'instance_state': instance_state, 'error': ''} # Reset info to be sent if task_id: # If we have a running task, check on instance state result = AsyncResult(task_id) if result.ready(): state = result.get() state['task_id'] = None # Reset but make sure it exists else: # If task not ready, send back the task_id state['task_id'] = task_id elif 'ec2data' in request.session: # We have no task ID, so start a task to get instance state form = request.session["ec2data"] cloud = form['cloud'] a_key = form["access_key"] s_key = form["secret_key"] instance_id = form["instance_id"] r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id) state['task_id'] = r.id else: state = {'instance_state': 'Not available'} return HttpResponse(simplejson.dumps(state), mimetype="application/json")
def getCaseRunningResult(id): print "called getCaseRunningResult" # do any things here print "thread start successfully and sleep for 5 seconds" running = SuiteRunningModel.objects.get(pk=id) #running = SuiteRunningModel.objects.all().order_by('-id')[0] print running.pk case_account = running.case_runnings.count() print "case_account:", case_account case_finished = 0 case_failure = 0 for item in running.case_runnings.all(): print item print "task-id:", item.task_id print "get-result-timeout:", item.case.timeout res = AsyncResult(item.task_id, app=app) tc_running = res.get(timeout=item.case.timeout, propagate=False) print "tc_running", tc_running item.ret_code = tc_running.ret_code if item.ret_code <> 0: case_failure += 1 print "hahahahahhahahahhahah", tc_running.error_msg item.error_msg = tc_running.error_msg item.save() case_finished += 1 if case_failure == 0 and case_finished > 0: running.ret = 0 print "set running result:", running.ret else: print "set running result:", running.ret running.ret = 1 running.progress = case_finished * 100 / case_account running.status = "Completed" running.save()
def get(self,request,uuid): tk = AsyncResult(uuid) if tk.ready(): res = 'Task result is:%s' % str(tk.get()) else: res = 'Task not finished!' return Response(res, status=status.HTTP_200_OK)
def state(self): result = AsyncResult(self.task_id) if not result.ready(): return result.state if result.get() == 'ERROR': return 'FAILED' return result.state
def status(request, task_id): """ Returns status of a queued task. """ res={} task_log=TaskAuthentication.objects.get(task_id=task_id) if task_log.user_id==get_user_id(request): result=AsyncResult(task_id) res['status']=TASK_NEW #no such tasks in queue if result: if(result.ready()): if(result.successful()): result=result.get() if isinstance(result,dict): res['status']=TASK_FAILED res['error_message']=result['error_message'] else: res['status']=TASK_SUCCESSFUL #succcessfull else: res['status']=TASK_RUNNING #waiting in queue else: logging.debug("Unauthorized User") res['status']=TASK_UNKNOWN response = json.dumps(res) return HttpResponse(response, mimetype="application/json")
def update_clusters(request): """ Given a task ID as part of the ``request`` (as ``request_id``), check on the status of a job retrieving clusters' persistent data. Return a JSON with the following fields: ``task_id``: return the job request ID ``ready``: ``True`` if the job has completed; ``False`` otherwise ``clusters_list``: a list of clusters' persistent data (if the job has completed) or an empty list otherwise """ task_id = request.POST.get("task_id", "") result = AsyncResult(task_id) fetching_data_text_list = [ "Fetching data... please wait", "Fetching data...", "Still fetching data...", "Hopefully done soon!", ] fdt = fetching_data_text_list[randint(0, len(fetching_data_text_list) - 1)] r = {"task_id": task_id, "ready": result.ready(), "clusters_list": [], "wait_text": fdt, "error": None} if result.ready(): clusters_pd = result.get() r["clusters_list"] = clusters_pd.get("clusters", []) if clusters_pd.get("error", None): r["error"] = clusters_pd["error"] return HttpResponse(simplejson.dumps(r), mimetype="application/json")
def error_handler(uuid): """Error handler function.""" result = AsyncResult(uuid) exc = result.get(propagate=False) logger.info("Task %s raised exception: %s\n%s" % (uuid, exc, result.traceback))
def _get_asynchronous_payload(self): new_task = False # fetch the task from celery task_id = self.async_restore_task_id_cache.get_value() task = AsyncResult(task_id) task_exists = task.status == ASYNC_RESTORE_SENT if not task_exists: # start a new task # NOTE this starts a nested timer (wait_for_task_to_start), # which will be stopped by self.generate_payload(async_task) # in the asynchronous task. It is expected that # get_async_restore_payload.delay(self) will serialize this # RestoreConfig and it's associated TimingContext before it # returns, and thereby fork the timing context. The timing # context associated with this side of the fork will not be # recorded since it is async (see self.get_response). with self.timing_context("wait_for_task_to_start"): task = get_async_restore_payload.delay(self, self.domain, self.restore_user.username) new_task = True # store the task id in cache self.async_restore_task_id_cache.set_value(task.id) try: response = task.get(timeout=self._get_task_timeout(new_task)) except TimeoutError: # return a 202 with progress response = AsyncRestoreResponse(task, self.restore_user.username) return response
def instancestate(request): task_id = request.POST.get("task_id", None) instance_state = request.POST.get("instance_state", "pending") # Preserve current state state = {"task_id": None, "instance_state": instance_state} # Reset info to be sent if task_id: # If we have a running task, check on instance state result = AsyncResult(task_id) if result.ready(): state = result.get() state["task_id"] = None # Reset but make sure it exists else: # If task not ready, send back the task_id state["task_id"] = task_id elif "ec2data" in request.session: # We have no task ID, so start a task to get instance state form = request.session["ec2data"] cloud = form["cloud"] a_key = form["access_key"] s_key = form["secret_key"] instance_id = form["instance_id"] r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id) state["task_id"] = r.id else: state = {"instance_state": "Not available"} return HttpResponse(simplejson.dumps(state), mimetype="application/json")
def get(self, request, uuid): res = AsyncResult(uuid) # Reconstruct a AsyncResult object with task id. if res.ready(): # Judge whether the task is finished,if result is ready,get it with get() method. task_res = res.get() # Get task result. else: task_res = "Task not finished!" return Response(task_res, status=status.HTTP_200_OK)
def update_clusters(request): """ Given a task ID as part of the ``request`` (as ``request_id``), check on the status of a job retrieving clusters' persistent data. Return a JSON with the following fields: ``task_id``: return the job request ID ``ready``: ``True`` if the job has completed; ``False`` otherwise ``clusters_list``: a list of clusters' persistent data (if the job has completed) or an empty list otherwise """ task_id = request.POST.get('task_id', '') result = AsyncResult(task_id) fetching_data_text_list = ['Fetching data... please wait', 'Fetching data...', 'Still fetching data...', 'Hopefully done soon!'] fdt = fetching_data_text_list[randint(0, len(fetching_data_text_list) - 1)] r = {'task_id': task_id, 'ready': result.ready(), 'clusters_list': [], 'wait_text': fdt, 'error': None} if task_id == 'missing_form_data': # See `fetch_clusters` method r['error'] = "Missing form data. Please supply the data and try again." elif result.ready(): clusters_pd = result.get() r['clusters_list'] = clusters_pd.get('clusters', []) if clusters_pd.get('error', None): r['error'] = clusters_pd['error'] return HttpResponse(simplejson.dumps(r), mimetype="application/json")
def get(self, request, *args, **kwargs): task_id = kwargs.pop('task_id') async_result = AsyncResult(task_id) try: # Poll the task to see if the result is available result = async_result.get(timeout=20) except TimeoutError: result = None task_status = TaskStatus.objects.get(task_id=task_id) # Strip parentheses and get the name of the function to call task_name = re.sub(r'\([^)]*\)', '', task_status.signature) if task_name in request.session['tasks']: del(request.session['tasks'][task_name]) request.session.modified = True status = task_status.status # Setup the response response = {'task_id': task_id, 'task_status': status, 'task_name': task_name, 'task_result': result} return HttpResponse(json.dumps(response), content_type='application/json')
def sentiment_results_page(request): result_id= request.session['result']# define result result = AsyncResult(result_id, app=app)# get the result of the task data_str = result.get() json_data = json.loads(data_str) print(json_data) if request.method == 'POST': return render(request, 'document_sentiment/sentiment_results_page.html', {'json_data':json_data}) else: docs = Sentiment_Documents.objects.filter(author=request.user.id, document__contains=".csv") return render(request, 'document_sentiment/sentiment_preview_data_file.html', {'docs':docs}) ######################################################################### '''
def get_async_result(id, backend="celery"): if backend == 'celery': res = AsyncResult(id) if res.ready(): return res.get() raise ValueError("no result")
def results(id): result = AsyncResult(id, app=celery) """ Format the final results page and return template.""" png = re.compile("png$") txt = re.compile("triPOD_Results.txt$") log = re.compile("triPOD_log.txt$") bed = re.compile("bed$") error = re.compile("FAILED.+") command, exitstatus, stdout, stderr, timer = result.get() stdout.replace('\n','') outdir = command['out'].split('=')[-1] if (exitstatus == 3) or (exitstatus == 5): errmesg = re.search(error,stdout).group(0) flash(u"Please check your input file: {0}".format(errmesg), 'error') return redirect(url_for('upload')) elif exitstatus == 4: flash(u"No regions of abnormal parental contribution were detected at this alpha level.") if not any([re.search('.resize.png', file) for file in os.listdir(outdir)]): bulkResize(outdir, width=640, height=480) images = [] for path, dirs, files in os.walk(outdir): for file in files: if re.search(png, file): images.append(file) elif re.search(txt, file): f = open(os.path.join(outdir, file), 'r') textresults = f.readlines() f.close() f = open(os.path.join(outdir, file), 'w') for line in textresults: f.write(line.replace(installpath, '')) f.close() with open(os.path.join(outdir, file), 'r') as f: table = extract_table(f) txtfile = file elif re.search(bed, file): bedfile = file else: continue r = re.compile('resize.png$') thumbnails = filter(r.search, images) buildfile = command['build'].split('=')[-1] build = os.path.basename(buildfile).split('_')[0] return render_template('results.html', id=os.path.basename(outdir), name=os.path.basename(command['filepath']), build=build, txtfile=txtfile, bedfile=bedfile, images=reversed(thumbnails), table=table, tablerange=range(0,len(table['Sample']) + 1), timer=timer)
def get_async_csv_submission_status(job_uuid): """ Gets CSV Submision progress or result Can be used to pol long running submissions :param str job_uuid: The submission job uuid returned by _submit_csv.delay :return: Dict with import progress info (insertions & total) :rtype: Dict """ if not job_uuid: return async_status(FAILED, u'Empty job uuid') job = AsyncResult(job_uuid) try: # result = (job.result or job.state) if job.state not in ['SUCCESS', 'FAILURE']: response = async_status(celery_state_to_status(job.state)) response.update(job.info) return response if job.state == 'FAILURE': return async_status(celery_state_to_status(job.state), text(job.result)) except BacklogLimitExceeded: return async_status(celery_state_to_status('PENDING')) return job.get()
def get_context_data(self, **kwargs): context = super(TaskResultView, self).get_context_data(**kwargs) task_id = kwargs['task_id'] async_res = AsyncResult(task_id) context['result'] = async_res.get() context['task_id'] = task_id return context
def task_result(request): post = json.loads(request.body) task_id = post.get("task_id") result = AsyncResult(task_id) if not result.ready(): return json_response(None) return json_response(result.get(timeout=1))
def onedl(dlid): """Get details on one download""" res = AsyncResult(dlid, app=tasks.app) finres = {'id': dlid, 'state': res.state, 'celery_state': res.state, 'url': downloads.get(dlid), 'result': None, 'error': None, 'links': {'self': url_for('onedl', dlid=dlid)}, 'ignoreerrors': None} if res.ready(): #try: fullres = res.get() if fullres.get('error'): finres['error'] = fullres['error'] finres['result'] = False if finres['state'] == 'SUCCESS': finres['state'] = 'FAILURE' else: finres['result'] = fullres.get('data') if not finres.get('url') and fullres.get('url'): finres['url'] = fullres['url'] finres['ignoreerrors'] = fullres.get('ignoreerrors') # TODO parse and return other relevant sections... #except YoutubeDLError as e: # finres['result'] = False # finres['error'] = str(e) #if request.args.get('fmt') == 'html': # return render_template("check.html", finres=finres) return jsonify(finres)
def instancestate(request): """ Given a POST request with ``task_id`` and ``instance_state`` fields, check if the task has completed. If so, return JSON with updated value for the ``instance_state`` field and start a new task, appropriately setting the value of ``task_id``. If the initial ``task_id`` has not completed, return the same value for the ``task_id`` field. """ task_id = request.POST.get('task_id', None) instance_state = request.POST.get('instance_state', 'pending') # Preserve current state state = {'task_id': None, 'instance_state': instance_state, 'error': ''} # Reset info to be sent if task_id: # If we have a running task, check on instance state result = AsyncResult(task_id) if result.ready(): state = result.get() state['task_id'] = None # Reset but make sure it exists else: # If task not ready, send back the task_id state['task_id'] = task_id elif 'ec2data' in request.session: # We have no task ID, so start a task to get instance state form = request.session["ec2data"] cloud = form.get('cloud', None) a_key = form.get("access_key", None) s_key = form.get("secret_key", None) instance_id = form.get("instance_id", None) if not instance_id: state['error'] = "Missing instance ID, cannot check the state." r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id) state['task_id'] = r.id else: state = {'instance_state': 'Not available'} return HttpResponse(simplejson.dumps(state), mimetype="application/json")
def check(self, request, *args, **kwargs): try: task_id = request.data['id'] uuid.UUID(task_id) except KeyError: raise ValidationError("You should specify id") except ValueError: raise ValidationError("Not valid id") async_result = AsyncResult(task_id) try: result = async_result.get(timeout=0.5, propagate=False) except TimeoutError: result = None status = async_result.status if isinstance(result, Exception): return Response({ 'status': status, 'error': str(result), }, status=HTTP_200_OK) elif result is None: return Response({'status': status}, status=HTTP_204_NO_CONTENT) else: return Response({ 'status': status, 'result': result, }, status=HTTP_200_OK)
class Progress(object): def __init__(self, task_id): self.task_id = task_id self.result = AsyncResult(task_id) def get_info(self): if self.result.ready(): success = self.result.successful() with allow_join_result(): return { 'complete': True, 'success': success, 'progress': _get_completed_progress(), 'result': self.result.get(self.task_id) if success else str(self.result.info), } elif self.result.state == PROGRESS_STATE: return { 'complete': False, 'success': None, 'progress': self.result.info, } elif self.result.state in ['PENDING', 'STARTED']: return { 'complete': False, 'success': None, 'progress': _get_unknown_progress(), } return self.result.info
def check(self, request, *args, **kwargs): try: task_id = request.data['id'] uuid.UUID(task_id) except KeyError: raise ValidationError("You should specify id") except ValueError: raise ValidationError("Not valid id") async_result = AsyncResult(task_id) try: result = async_result.get(timeout=0.5, propagate=False) except TimeoutError: result = None status = async_result.status if isinstance(result, Exception): return Response({ 'status': status, 'error': str(result), }, status=HTTP_200_OK) elif result is None: return Response({ 'status': status }, status=HTTP_204_NO_CONTENT) else: return Response({ 'status': status, 'result': result, }, status=HTTP_200_OK)
def get(self, request, *args, **kwargs): task_id = kwargs.pop('task_id') async_result = AsyncResult(task_id) try: # Poll the task to see if the result is available result = async_result.get(timeout=20) except TimeoutError: result = None task_status = TaskStatus.objects.get(task_id=task_id) # Strip parentheses and get the name of the function to call task_name = re.sub(r'\([^)]*\)', '', task_status.signature) if task_name in request.session['tasks']: del (request.session['tasks'][task_name]) request.session.modified = True status = task_status.status # Setup the response response = { 'task_id': task_id, 'task_status': status, 'task_name': task_name, 'task_result': result } return HttpResponse(json.dumps(response), content_type='application/json')
def launch_status(request): """ Given a task ID of a launch process/task, check if the task has completed. Return a JSON object with the following keys: ``task_id``, ``ready``, ``error``, and ``starting_text``. """ # task_id = request.POST.get('task_id', '') task_id = request.session["ec2data"]["task_id"] r = {"task_id": "", "ready": "", "error": "", "starting_text": "", "instance_id": "", "sg_name": "", "kp_name": ""} if task_id: r["task_id"] = task_id result = AsyncResult(task_id) r["ready"] = result.ready() if r["ready"]: # The task completed; let's get the outcome # Set session data based on the task result # TODO: this should always return JSON and not mess with the session # Then, need to redo how monitor page is displayed... response = result.get() if response.get("error", ""): r["error"] = response["error"] else: request.session["ec2data"]["cluster_name"] = response["cluster_name"] request.session["ec2data"]["instance_id"] = response["instance_id"] request.session["ec2data"]["public_ip"] = response["instance_ip"] request.session["ec2data"]["image_id"] = response["image_id"] request.session["ec2data"]["kp_name"] = response["kp_name"] request.session["ec2data"]["kp_material"] = response["kp_material"] request.session["ec2data"]["sg_name"] = response["sg_names"][0] request.session["ec2data"]["password"] = response["password"] # Pass data needed for the additional instance information table # on the monitor page r["instance_id"] = response["instance_id"] r["sg_name"] = response["sg_names"][0] r["kp_name"] = response["kp_name"] r["image_id"] = response["image_id"] # Add an entry to the Usage table now try: u = models.Usage( cloud_name=response["cloud_name"], cloud_type=response["cloud_type"], image_id=response["image_id"], instance_type=response["instance_type"], user_id=response["access_key"], email=response.get("institutional_email", ""), ) u.save() except Exception, e: log.debug("Trouble saving Usage data: {0}".format(e)) else: starting_text_list = [ "Starting an instance... please wait", "Really starting!", "Still starting.", "Hopefully done soon!", ] st = starting_text_list[randint(0, len(starting_text_list) - 1)] r["starting_text"] = st
def error_handler(uuid, task_id): print('Tasks::error_handler', uuid, task_id) from celery.result import AsyncResult result = AsyncResult(uuid) exc = result.get(propagate=False) print('Task {0} raised exception: {1!r}\n{2!r}'.format( uuid, exc, result.traceback)) app.backend.mark_as_failure(task_id, exc, traceback=result.traceback)
def task_result(request): task_id = request.GET.get('task') result = AsyncResult(task_id) if result.ready(): data = result.get() return HttpResponse("<htmL><body><h1>Result: %s</h1></body></html>" % data) else: return HttpResponse("<htmL><body><h1>Not Ready</h1></body></html>")