Beispiel #1
0
def results(owner, app_name, job_id):
    cluster_type = get_cluster_type(owner, app_name)
    if cluster_type == "single-core":
        async_result = AsyncResult(job_id)
        if async_result.ready() and async_result.successful():
            return json.dumps(async_result.result)
        elif async_result.failed():
            print("traceback", async_result.traceback)
            return json.dumps({
                "status": "WORKER_FAILURE",
                "traceback": async_result.traceback
            })
        else:
            return make_response("not ready", 202)
    elif cluster_type == "dask":
        addr = dask_scheduler_address(owner, app_name)
        with Client(addr) as client:
            fut = Future(job_id, client=client)
            if fut.done() and fut.status != "error":
                return fut.result()
            elif fut.done() and fut.status in ("error", "cancelled"):
                return json.dumps({
                    "status": "WORKER_FAILURE",
                    "traceback": fut.traceback()
                })
            else:
                return make_response("not ready", 202)
    else:
        return json.dumps({"error": "model does not exist."}), 404
Beispiel #2
0
    def download(self, *args, **kwargs):
        cf = get_object_or_404(CachedFile, id=kwargs['cfid'])
        if cf.file:
            resp = ChunkBasedFileResponse(cf.file.file, content_type=cf.type)
            resp['Content-Disposition'] = 'attachment; filename="{}"'.format(cf.filename).encode("ascii", "ignore")
            return resp
        elif not settings.HAS_CELERY:
            return Response(
                {'status': 'failed', 'message': 'Unknown file ID or export failed'},
                status=status.HTTP_410_GONE
            )

        res = AsyncResult(kwargs['asyncid'])
        if res.failed():
            if isinstance(res.info, dict) and res.info['exc_type'] == 'ExportError':
                msg = res.info['exc_message']
            else:
                msg = 'Internal error'
            return Response(
                {'status': 'failed', 'message': msg},
                status=status.HTTP_410_GONE
            )

        return Response(
            {
                'status': 'running' if res.state in ('PROGRESS', 'STARTED', 'SUCCESS') else 'waiting',
                'percentage': res.result.get('value', None) if res.result else None,
            },
            status=status.HTTP_409_CONFLICT
        )
Beispiel #3
0
    def get(self, request, **kwargs):
        """Reports task status"""
        task_id = str(kwargs.get("task_id"))
        async_result = AsyncResult(id=task_id, app=current_app)
        data = {
            'task': {
                'id': async_result.id,
                'ready': async_result.ready(),
                'celery': self.celery_available
            }
        }
        if async_result.failed():
            data['task']['failed'] = True
            if request.user.is_authenticated() and request.user.is_superuser:
                data['task']['traceback'] = async_result.traceback
            else:
                data['task']['traceback'] = _("Sync error")
        else:
            data['task']['failed'] = False
            data['task']['output'] = {
                'user_count': User.objects.all().count(),
                'label': _("User count")
            }
        if not self.celery_available:  # stop
            data['task']['ready'] = True
            data['task']['failed'] = True
            data['task']['traceback'] = _("celery: unavailable service")

        return JsonResponse(data)
Beispiel #4
0
    def repo_check(self, repo_name):
        c.repo = repo_name
        task_id = request.GET.get('task_id')

        if task_id and task_id not in ['None']:
            from kallithea import CELERY_ON
            from celery.result import AsyncResult
            if CELERY_ON:
                task = AsyncResult(task_id)
                if task.failed():
                    raise HTTPInternalServerError(task.traceback)

        repo = Repository.get_by_repo_name(repo_name)
        if repo and repo.repo_state == Repository.STATE_CREATED:
            if repo.clone_uri:
                clone_uri = repo.clone_uri_hidden
                h.flash(_('Created repository %s from %s') %
                        (repo.repo_name, clone_uri),
                        category='success')
            else:
                repo_url = h.link_to(
                    repo.repo_name,
                    h.url('summary_home', repo_name=repo.repo_name))
                fork = repo.fork
                if fork:
                    fork_name = fork.repo_name
                    h.flash(h.literal(
                        _('Forked repository %s as %s') %
                        (fork_name, repo_url)),
                            category='success')
                else:
                    h.flash(h.literal(_('Created repository %s') % repo_url),
                            category='success')
            return {'result': True}
        return {'result': False}
Beispiel #5
0
    def get(self, request, task_id):
        cae = None
        if task_id is None:
            result = {"status": "error", "message": "Task id not provided"}
        else:
            res = AsyncResult(task_id)
            if res.ready():
                cae = CustomActionExecution.objects.get(task_id=task_id)
                cae.status = "task finished"
                runtime_delta = datetime.now() - cae.created
                cae.runtime = runtime_delta.seconds
                status = res.status
                if res.failed():
                    result = {"status": "error", "message": "Task failed"}
                    cae.status = "task failed"
                elif res.successful():
                    cae.status = "task succeeded"
                    task_result = res.result
                    cae.task_result = json.dumps(task_result)
                    download_link = self._get_download_link(task_id)
                    result = {
                        "status": "completed",
                        "download_link": download_link
                    }
                else:
                    result = {"status": "error", "message": status}
            else:
                result = {"status": "waiting"}

        if cae:
            cae.save()
        return Response(result)
Beispiel #6
0
def get_task_status(task_id):
    """
    Given a task ID, return a dictionary with status information.
    """
    task = AsyncResult(task_id)
    status = {
        'successful': task.successful(),
        'result': str(task.result),  # result can be any picklable object
        'status': task.status,
        'ready': task.ready(),
        'failed': task.failed(),
    }

    if task.failed():
        status['traceback'] = task.traceback
    return status
Beispiel #7
0
def upload_providers_status(task_id: str):
    task = AsyncResult(task_id)
    if task.failed():
        return JSONResponse(
            status_code=409,
            content={
                "task_id": task_id,
                "status": "failed",
                "details": task.result.args,
            },
        )
    elif not task.ready():
        return JSONResponse(status_code=202,
                            content={
                                "task_id": task_id,
                                "status": "processing"
                            })
    result = task.get()
    print(result)
    return {
        "task_id":
        task_id,
        "status":
        "success",
        "providers":
        jsonable_encoder(result, exclude_defaults=True, exclude_none=True),
    }
Beispiel #8
0
def get_job_state(request):
    """
    Una vista para informar al usuario del avance o el estado del job celery
    """
    data={}
    if 'job_id' in request.GET:
        job = AsyncResult(request.GET['job_id'])

        #if (job.state == 'FAILURE'):
        if (job.failed()):
            data['errors'] = True
            error_descrip = job.get(propagate=False)
            data['error_descrip'] = str(error_descrip)

        #if (job.state == 'SUCCESS'):
        elif (job.ready()):
            data['success'] = True

        elif not (job.ready()):
            if job.state == 'PENDING':
                data['pending'] = True
            else:
                data['running'] = True
                data['progress'] = job.info

    return JsonResponse(data, safe=False)
Beispiel #9
0
    def repo_check(self, repo_name):
        c.repo = repo_name
        task_id = request.GET.get('task_id')

        if task_id and task_id not in ['None']:
            from kallithea import CELERY_ON
            from celery.result import AsyncResult
            if CELERY_ON:
                task = AsyncResult(task_id)
                if task.failed():
                    raise HTTPInternalServerError(task.traceback)

        repo = Repository.get_by_repo_name(repo_name)
        if repo and repo.repo_state == Repository.STATE_CREATED:
            if repo.clone_uri:
                clone_uri = repo.clone_uri_hidden
                h.flash(_('Created repository %s from %s')
                        % (repo.repo_name, clone_uri), category='success')
            else:
                repo_url = h.link_to(repo.repo_name,
                                     h.url('summary_home',
                                           repo_name=repo.repo_name))
                fork = repo.fork
                if fork:
                    fork_name = fork.repo_name
                    h.flash(h.literal(_('Forked repository %s as %s')
                            % (fork_name, repo_url)), category='success')
                else:
                    h.flash(h.literal(_('Created repository %s') % repo_url),
                            category='success')
            return {'result': True}
        return {'result': False}
Beispiel #10
0
    def list(self, request):
        task_id = request.GET.get('task')
        username = task_id[37:]
        user = self.request.user

        if not user.is_staff and user.username != username:
            return Response(status=status.HTTP_403_FORBIDDEN)

        task = AsyncResult(task_id)
        result_format = request.GET.get('result')

        if task.successful():
            result = task.get()
            if result_format == 'json':
                response =  HttpResponse(result.json, content_type="application/json")
                response['Content-Encoding'] = 'gzip'
                return response
            elif result_format == 'report':
                return HttpResponse(result.report)
            else:
                return HttpResponse(result.detailed_summary)
        elif task.failed():
            return Response({'exception': str(task.result)}, status=status.HTTP_400_BAD_REQUEST)
        else:
            return Response({'task': task.id, 'state': task.state})
Beispiel #11
0
def admin_post_publications(request):
    settings = request.registry.settings
    db_conn_str = settings[config.CONNECTION_STRING]

    states = []
    with psycopg2.connect(db_conn_str) as db_conn:
        with db_conn.cursor() as cursor:
            cursor.execute("""\
SELECT ident_hash(m.uuid, m.major_version, m.minor_version),
       m.name, bpsa.created, bpsa.result_id::text
FROM document_baking_result_associations AS bpsa
     INNER JOIN modules AS m USING (module_ident)
ORDER BY bpsa.created DESC LIMIT 100""")
            for row in cursor.fetchall():
                message = ''
                result_id = row[-1]
                result = AsyncResult(id=result_id)
                if result.failed():  # pragma: no cover
                    message = result.traceback
                states.append({
                    'ident_hash': row[0],
                    'title': row[1],
                    'created': row[2],
                    'state': result.state,
                    'state_message': message,
                })

    return {'states': states}
Beispiel #12
0
    def _get_async_result(self, node_name, node_id):  # pylint: disable=invalid-name,redefined-builtin
        cache = Config.async_result_cache[self._flow_name]
        trace_msg = {
            'flow_name': self._flow_name,
            'node_args': self._node_args,
            'parent': self._parent,
            'dispatcher_id': self._dispatcher_id,
            'queue': Config.dispatcher_queues[self._flow_name],
            'node_id': node_id,
            'node_name': node_name,
            'selective': self._selective
        }

        with self._node_state_cache_lock.get_lock(self._flow_name):
            try:
                Trace.log(Trace.NODE_STATE_CACHE_GET, trace_msg)
                res = cache.get(node_id)
                Trace.log(Trace.NODE_STATE_CACHE_HIT, trace_msg)
            except CacheMissError:
                Trace.log(Trace.NODE_STATE_CACHE_MISS, trace_msg)
                res = AsyncResult(id=node_id)
                # we can cache only results of tasks that have finished or failed, not the ones that are going to
                # be processed
                if res.successful() or res.failed():
                    Trace.log(Trace.NODE_STATE_CACHE_ADD, trace_msg)
                    cache.add(node_id, res)

        return res
Beispiel #13
0
def failed(request, event_url_name, task_id):
    event = get_object_or_404(Event, url_name=event_url_name)

    # check permission
    if not event.is_admin(request.user):
        return nopermission(request)

    # check if badge system is active
    if not event.badges:
        return notactive(request)

    # get result
    result = AsyncResult(task_id)

    error = None
    latex_output = None

    if result.failed():
        error = _("Internal Server Error. The admins were notified.")
        mail_admins("Badge generation error",
                    str(result.result),
                    fail_silently=True)
    elif result.state == "CREATOR_ERROR":
        error = result.info['error']
        latex_output = result.info['latex_output']

    # return error message
    context = {'event': event, 'error': error, 'latex_output': latex_output}
    return render(request, 'badges/failed.html', context)
Beispiel #14
0
def check_tasks(request):
    """
    Checking tasks status
    """
    try:
        tasks = json.loads(request.data['tasks'])

    except:
        return Response({}, status=status.HTTP_400_BAD_REQUEST)

    failed_tasks, success_tasks, ready_tasks = [], [], []

    for task in tasks:

        task_result = AsyncResult(task)

        if task_result.failed():
            failed_tasks.append({
                'task': task,
                'message': task_result.info.args[0]
            })

        if task_result.successful():
            success_tasks.append(task)

        if task_result.ready():
            ready_tasks.append(task)

    if len(tasks) == len(success_tasks):
        return Response({'code': 'SUCCESS'})  # all task success

    if len(tasks) != len(ready_tasks):
        return Response({'code': 'WAIT'})

    return Response({'code': 'FAILED', 'tasks': failed_tasks})
Beispiel #15
0
def query_results(owner, app_name, job_id):

    cluster_type = get_cluster_type(owner, app_name)
    if cluster_type == "single-core":
        async_result = AsyncResult(job_id)
        print("celery result", async_result.state)
        if async_result.ready() and async_result.successful():
            return "YES"
        elif async_result.failed():
            return "FAIL"
        else:
            return "NO"
    elif cluster_type == "dask":
        addr = dask_scheduler_address(owner, app_name)
        with Client(addr) as client:
            fut = Future(job_id, client=client)
            print("dask result", fut.status)
            if fut.done() and fut.status != "error":
                return "YES"
            elif fut.done() and fut.status in ("error", "cancelled"):
                return "FAIL"
            else:
                return "NO"
    else:
        return json.dumps({"error": "model does not exist."}), 404
Beispiel #16
0
 def initialize_in_judge(self):
     lock = cache.lock("testcase_{}_{}_{}_initialize_in_judge".format(
         self.problem.problem.pk, self.problem.pk, self.pk), timeout=60)
     if lock.acquire(blocking=False):
         try:
             refreshed_obj = type(self).objects.with_transaction(self._transaction).get(pk=self.pk)
             if refreshed_obj.judge_initialization_successful:
                 return
             if self.judge_initialization_task_id:
                 result = AsyncResult(self.judge_initialization_task_id)
                 if result.failed() or result.successful():
                     self.judge_initialization_task_id = None
                     self.save()
                 elif result.state == "PENDING":
                     result.revoke()
                     self.judge_initialization_task_id = None
                     self.save()
                 else:
                     logger.debug("Waiting for task {} in state {}".format(
                         self.judge_initialization_task_id, result.state
                     ))
             if not self.judge_initialization_task_id:
                 self.judge_initialization_task_id = TestCaseJudgeInitialization().delay(self).id
                 self.save()
         finally:
             lock.release()
Beispiel #17
0
def show_feed(id):
    result = AsyncResult(id, backend=backend)
    if result.ready():
        return render_template('feed.html', feed=result.get())
    elif result.failed():
        return result.traceback
    else:
        return render_template('processing.html')
Beispiel #18
0
def show_feed(id):
    result = AsyncResult(id, backend=backend)
    if result.ready():
        return render_template('feed.html', feed=result.get())
    elif result.failed():
        return result.traceback
    else:
        return render_template('processing.html')
Beispiel #19
0
        def get_response():
            failure_reason = 'An error has occurred.'

            serializer = serializers.ExecutionStateSerializer(
                data=request.query_params)
            if serializer.is_valid(raise_exception=True):
                result = AsyncResult(request.user.username)
                if result is not None:
                    # if result.children is None:
                    #     failure_reason = 'Could not connect to the execution backend.'
                    #     result.forget()
                    if result.ready():
                        exec_result = CeleryExecutionResult(*result.result)
                        result.forget(
                        )  # Don't cache the result if we send it out
                        if not exec_result.mainExecError:
                            if exec_result.finished:
                                reqs = [(res[0], TestResult(*res[1]))
                                        for res in exec_result.results]
                                profile = models.BaseProfile.objects.get(
                                    user=request.user)
                                section = models.Section.objects.get(
                                    id=exec_result.section_id)
                                num_complete = 0
                                for req in reqs:
                                    if req[1].success:
                                        num_complete += 1
                                        profile.completed_section_requirements.add(
                                            section.requirements.get(
                                                id=req[0]))
                                if len(reqs
                                       ) == num_complete and len(reqs) != 0:
                                    profile.completed_sections.add(section)
                                raw_reqs = [req[1] for req in reqs]
                                return ExecutionResult(
                                    status=ExecutionState.success,
                                    result=exec_result.mainExecOutput,
                                    results=raw_reqs)
                            else:
                                return ExecutionResult(
                                    status=ExecutionState.input_required,
                                    result=exec_result.mainExecOutput)
                        else:
                            return ExecutionResult(
                                status=ExecutionState.failed,
                                result=exec_result.mainExecOutput,
                                error=exec_result.mainExecError)
                    else:
                        if result.failed():
                            failure_reason = 'The code attempted to run, but an error occurred.'
                        else:
                            return ExecutionResult(
                                status=ExecutionState.running)
                else:
                    failure_reason = 'The provided token is invalid.'

            return ExecutionResult(status=ExecutionState.metafail,
                                   reason=failure_reason)
Beispiel #20
0
 def is_processing(self):
     if self._background_process_ids:
         for process_id in self._background_process_ids.copy():
             res = AsyncResult(process_id)
             if res.successful() or res.failed():
                 self.remove_processing(process_id)
             else:
                 return True
     return bool(self._background_process_ids)
Beispiel #21
0
def status(id):
    """ Wait until the job is finished and report success."""
    result = AsyncResult(id, app=celery)
    while not result.ready():
        if result.failed():
            flash(u"error running triPOD. please check input file", 'error')
            return redirect(url_for('upload'))
        time.sleep(5)
    return 'success'
Beispiel #22
0
def status(id):
    """ Wait until the job is finished and report success."""
    result = AsyncResult(id, app=celery)
    while not result.ready():
        if result.failed():
            flash(u"error running triPOD. please check input file", 'error')
            return redirect(url_for('upload'))
        time.sleep(5)
    return 'success'
Beispiel #23
0
    def get(self, request, import_queue=None):  # pylint: disable=too-many-return-statements
        task_id = request.GET.get('task')
        result_format = request.GET.get('result')
        username = request.GET.get('username')
        user = self.request.user

        if task_id:
            parsed_task = parse_bulk_import_task_id(task_id)
            username = parsed_task['username']

            if not user.is_staff and user.username != username:
                return Response(status=status.HTTP_403_FORBIDDEN)

            task = AsyncResult(task_id)

            if task.successful():
                result = task.get()
                if result_format == 'json':
                    response = Response(result.json,
                                        content_type="application/json")
                    response['Content-Encoding'] = 'gzip'
                    return response
                if result_format == 'report':
                    return Response(result.report)
                return Response(result.detailed_summary)
            if task.failed():
                return Response(dict(exception=str(task.result)),
                                status=status.HTTP_400_BAD_REQUEST)
            if task.state == 'PENDING' and not task_exists(task_id):
                return Response(dict(exception='task ' + task_id +
                                     ' not found'),
                                status=status.HTTP_404_NOT_FOUND)

            return Response(dict(task=task.id,
                                 state=task.state,
                                 username=username,
                                 queue=parsed_task['queue']),
                            status=status.HTTP_202_ACCEPTED)

        flower_tasks = flower_get('api/tasks').json()
        tasks = []
        for task_id, value in flower_tasks.items():
            if not value['name'].startswith('tasks.bulk_import'):
                continue

            task = parse_bulk_import_task_id(task_id)

            if user.is_staff or user.username == task['username']:
                if (not import_queue or task['queue'] == import_queue) and \
                        (not username or task['username'] == username):
                    tasks.append(
                        dict(task=task_id,
                             state=value['state'],
                             queue=task['queue'],
                             username=task['username']))

        return Response(tasks)
Beispiel #24
0
 def get(self, request, *args, **kwargs):
   jobid = self.kwargs['jobid']
   job = AsyncResult(jobid)
   completed = job.ready()
   db_entry = UserTask_photometry.objects.get(jobid=jobid)
   if job.failed():
     #We will let the client-side jquery/ajax handle the error that will come 
     #from trying to parse the results of a failed job.
     #We only care to delete our record of the job.
     db_entry.delete()
   lastline = db_entry.logfile_line_number
   with open(os.path.join(MEDIA_ROOT,jobid,'logfile'),'r') as f:
     lines = f.readlines()
   if not completed or lastline < len(lines):
       loglines = ''.join(lines[lastline:]).strip()
       loglines = loglines.replace('\n','<br />')
       db_entry.logfile_line_number = len(lines)
       db_entry.save()
       context = {'completed':False,'log':loglines}
   else:
     results = job.get()
     mag = round(results['APP'][2],2)
     mag_err = round(results['APP'][3],2)
     hashtable = {
           'MAG_APP': lambda d: round(d['APP'][2],2) if d['APP'] else None,
           'MAG_APP_ERR': lambda d: round(d['APP'][3],2) if d['APP'] else None,
           'MAG_PSF': lambda d: round(d['PSF'][2],2) if d['PSF'] else None,
           'MAG_PSF_ERR': lambda d: round(d['PSF'][3],2) if d['PSF'] else None,
           'CALIB_SCHEME': lambda d: d['CALIB_SCHEME'],
           'CALIB_FILE': lambda d: d['CALIB_FILE'],
     }
     fields = {}
     fields['user'] = request.user
     fields['BAND'] = db_entry.band
     astrosource = AstroSource.objects.get(sourceID=db_entry.sourceID)
     imageheader = ImageHeader.objects.filter(TARGETID=db_entry.targetID).filter(OB=db_entry.OB).filter(FILTER=db_entry.band)[0]
     fields['imageheader'] = imageheader
     fields['astrosource'] = astrosource
     fields.update( {'MAG_APP':hashtable['MAG_APP'](results) if 'APP' in results else None} )
     fields.update( {'MAG_APP_ERR':hashtable['MAG_APP_ERR'](results) if 'APP' in results else None} )
     fields.update( {'MAG_PSF':hashtable['MAG_PSF'](results) if 'PSF' in results else None} )
     fields.update( {'MAG_PSF_ERR':hashtable['MAG_PSF_ERR'](results) if 'PSF' in results else None} )
     fields.update( {'CALIB_SCHEME':hashtable['CALIB_SCHEME'](results)}  )
     fields.update( {'CALIB_FILE':hashtable['CALIB_FILE'](results)} )
     p = Photometry(**fields)
     p.save()
     db_entry.delete() #Delete database entry, this should eventually be tied to the redis backend!
     context = {'completed':True,'jobid':jobid,
                'PSF':hashtable['MAG_PSF'](results) if 'PSF' in results else None,
                'PSF_ERR':hashtable['MAG_PSF_ERR'](results) if 'PSF' in results else None,
                'APP':hashtable['MAG_APP'](results) if 'APP' in results else None,
                'APP_ERR':hashtable['MAG_APP_ERR'](results) if 'APP' in results else None,
                'band':db_entry.band,
                'OB':db_entry.OB,
                'targetID':db_entry.targetID}
   return self.render_to_response(context)
Beispiel #25
0
def get_ratings_orientation(id: str) -> Tuple[str, int]:
    task = AsyncResult(id, app=celery)

    if not task.ready():
        return make_response(error=task.state, status=202)

    if task.failed():
        return make_response(error=task.state, status=500)

    return make_response(result=task.result)
Beispiel #26
0
def query_results():
    job_id = request.args.get('job_id', '')
    async_result = AsyncResult(job_id)
    print('async_result', async_result.state)
    if async_result.ready() and async_result.successful():
        return 'YES'
    elif async_result.failed():
        return 'FAIL'
    else:
        return 'NO'
Beispiel #27
0
def query_results():
    job_id = request.args.get("job_id", "")
    async_result = AsyncResult(job_id)
    print("async_result", async_result.state)
    if async_result.ready() and async_result.successful():
        return "YES"
    elif async_result.failed():
        return "FAIL"
    else:
        return "NO"
Beispiel #28
0
def show_avg(steamid):
    result = AsyncResult(steamid, app=celery)
    print result
    print result.ready()
    if result.ready():
        return render_template('avg.html', avg=result.get())
    elif result.failed():
        return result.traceback
    else:
        return render_template('processing.html')
Beispiel #29
0
def results():
    job_id = request.args.get("job_id", "")
    async_result = AsyncResult(job_id)
    if async_result.ready() and async_result.successful():
        return json.dumps(async_result.result)
    elif async_result.failed():
        print("traceback", async_result.traceback)
        return async_result.traceback
    else:
        resp = make_response("not ready", 202)
        return resp
Beispiel #30
0
def dropq_results():
    job_id = request.args.get('job_id', '')
    async_result = AsyncResult(job_id)
    if async_result.ready() and async_result.successful():
        return async_result.result
    elif async_result.failed():
        print('traceback', async_result.traceback)
        return async_result.traceback
    else:
        resp = make_response('not ready', 202)
        return resp
Beispiel #31
0
 def initialize_in_judge(self):
     if self.judge_initialization_task_id:
         if not self.judge_initialization_successful:
             result = AsyncResult(self.judge_initialization_task_id)
             if result.failed() or result.successful():
                 self.judge_initialization_task_id = None
                 self.save()
     if not self.judge_initialization_task_id:
         self.judge_initialization_task_id = ProblemJudgeInitialization(
         ).delay(self).id
         self.save()
Beispiel #32
0
 def is_processing(self):
     if self._background_process_ids:
         for process_id in tuple(self._background_process_ids):
             res = AsyncResult(process_id)
             if (res.successful() or res.failed()):
                 self.remove_processing(process_id)
             else:
                 return True
     if self._background_process_ids:
         return True
     else:
         return False
Beispiel #33
0
    def list(self, request):
        task = AsyncResult(request.GET.get('task'))

        if task.successful():
            broken_references = task.get()
            serializer = serializers.ReferenceListSerializer(
                instance=broken_references)
            return Response(serializer.data)
        elif task.failed():
            return Response({'exception': str(task.result)}, status=status.HTTP_400_BAD_REQUEST)
        else:
            return Response({'task': task.id, 'state': task.state})
Beispiel #34
0
 def is_processing(self):
     if self._background_process_ids:
         for process_id in tuple(self._background_process_ids):
             res = AsyncResult(process_id)
             if (res.successful() or res.failed()):
                 self.remove_processing(process_id)
             else:
                 return True
     if self._background_process_ids:
         return True
     else:
         return False
    def get(self, job_id: str) -> tuple:
        job_result = AsyncResult(job_id, app=celery_app)
        if not job_result:
            abort(404, "Invalid job_id")

        if job_result.ready():
            return job_result.get(), 200

        elif job_result.failed():
            return {'state': 'FAILED'}, 500

        else:
            return {'state': 'PENDING'}, 206
Beispiel #36
0
def process_top_urls(async_res):
    """
    Process top layer urls
    Args:
        async_res: AsyncResult to be processed
    Returns:
        Result object
    """
    async_res = AsyncResult(async_res.id, app=app_celery)
    # there's no way to get the url
    if not async_res.ready():
        return Result("", [], ready=False)
    # fail gracefully by skipping errored
    if async_res.failed():
        return Result("", ["error"], ready=True)
    return async_res.get()
Beispiel #37
0
def celery_task_log(request, task_id):
    task = AsyncResult(task_id)
    out = dict()
    if task.ready() and task.successful():
        out = {'status':'SUCCESS'}
    elif task.ready() and task.failed():
        out = {'status': 'FAILED', 'output': str(task.result)}
    elif task.status == "log":
        if 'log' in task.result:
            task_log = task.result['log']
        else:
            task_log = []
        out = {'status':'log', 'output':task_log}
    else:
        out = {'status': task.status}
    out['taskId'] = task_id;
    return JsonResponse(out)
Beispiel #38
0
def _cleanup_tables(instance):
    q = Q(instance=instance, is_lost=False)
    ievents = (list(TreeImportEvent.objects.filter(q)) +
               list(SpeciesImportEvent.objects.filter(q)))

    for ie in ievents:
        mark_lost = False
        if ie.has_not_been_processed_recently():
            mark_lost = True
        elif ie.task_id != '' and ie.is_running():
            result = AsyncResult(ie.task_id)
            if not result or result.failed():
                mark_lost = True

        if mark_lost:
            ie.is_lost = True
            ie.mark_finished_and_save()
Beispiel #39
0
    def get_campaign_status(campaign_id):
        campaign = CampaignsModel.get_by_id(campaign_id)
        if 'task_id' in campaign:
            result = AsyncResult(campaign['task_id'])

            if result.status == "SUCCESS":
                del campaign['task_id']
                CampaignsModel.campaign_collection.save(campaign)
                return result.get()
            elif result.status == "STARTED":
                return {"status": "in_rocess"}
            elif result.failed():
                del campaign['task_id']
                CampaignsModel.campaign_collection.save(campaign)
                result.get()
                return {"status": "failed", "msg": result.traceback}
            elif result.status == "RETRY":
                return {"status": "in_process"}
Beispiel #40
0
def create_task_result_dict(initial_data):
    """
    Convert initial data we put in session to dict for REST API.
    This will use the id to look up current data about task to return
    to user.

    Args:
        task (dict): Initial data about task stored in session.
    Returns:
        dict: Updated data about task.
    """
    initial_state = initial_data['initial_state']
    task_id = initial_data['id']
    task_type = initial_data['task_type']
    task_info = initial_data['task_info']

    state = "processing"
    result = None
    # initial_state is a workaround for EagerResult used in testing.
    # In production initial_state should usually be pending.
    async_result = AsyncResult(task_id)

    if initial_state == SUCCESS:
        state = "success"
        result = initial_data['result']
    elif initial_state in (FAILURE, REVOKED):
        state = "failure"
        result = initial_data['result']
    elif async_result.successful():
        state = "success"
        result = async_result.get()
    elif async_result.failed():
        state = "failure"
        result = {'error': str(async_result.result)}

    return {
        "id": task_id,
        "status": state,
        "result": result,
        "task_type": task_type,
        "task_info": task_info
    }
Beispiel #41
0
def get_task(project_slug, task_id):
    """Get task information.
    """
    job = AsyncResult(task_id, app=celery)
    if job.successful():
        result = job.get()
    elif job.failed():
        try:
            job.get()
        except RemoteExecuteError as exception:
            result = exception.message
        except URLError as exception:
            result = 'URLError: ' + str(exception)
    else:
        result = None
    return jsonify(task=dict(
        id=task_id,
        status=job.status,
        retval=result,
    ))
Beispiel #42
0
def process_inner_urls(comb_res, async_res):
    """
    Process second layer urls and append to the combined results object
    Args:
        comb_res: combined result object for parent url
        async_res: an AsyncResult
    Returns:
        next result task on current Result object
    """
    async_res = AsyncResult(async_res.id, app=app_celery)
    # added as a hack since the data can't be checked as a group Result
    if not async_res.ready():
        comb_res.ready = False
        return None
        # skip over errored results, ignore them for now
    if async_res.failed():
        return None
    result = async_res.get()
    comb_res.results.extend(result.results)
    return result.next_results
Beispiel #43
0
def get_async_task_status(request):
    logger = logging.getLogger("api.get_async_task_status")
    if not request.user.is_authenticated():
        logger.error("permission denied for user: %s" % request.user)
        raise PermissionDenied()
    result_id = request.GET.get('result_id', None)
    if not result_id:
        raise jsonview.exceptions.BadRequest('Need a result_id parameter')

    result = AsyncResult(result_id)
    return_value = {"result_id": result_id}
    if not result.ready():
        return_value["ready"] = False
        return return_value
    return_value["ready"] = True
    if result.failed():
        return_value["failed"] = True
        return return_value
    return_value["failed"] = False
    return_value["result"] = result.result
    return return_value
Beispiel #44
0
 def retrieve(self, request, pk=None):
     """ A view to report the progress to the user """
     # TODO: figure out a way to do group results. Same with Chain and such
     # if 'job_type' in request.query_params and request.query_params.get('job_type') == 'group':
     #     job = GroupResult(pk)
     #
     #     if job and job.ready():
     #         return Response(data=
     #                         {
     #                             'id': job.id,
     #                             'results': "{}".format(job.join()),
     #                             'completed_count': job.completed_count(),
     #                         })
     #     else:
     #         return Response(data=
     #                         {
     #                             'id': job.id,
     #                             'results': "",
     #                             'completed_count': job.completed_count(),
     #                         })
     # else:
     job = AsyncResult(pk)
     if job.ready():
         if job.failed():
             # TODO: just return the message of the last traceback exception
             formatted_lines = job.traceback.splitlines()[-1]
             return Response(
                 data={
                     'id': job.id,
                     'result': "{}".format(job.result),
                     'status': job.status,
                     'traceback': formatted_lines
                 }
             )
         else:
             result = job.get()
     else:
         result = None
     return Response(data={'id': job.id, 'result': result, 'status': job.status})
Beispiel #45
0
    def describeTasks(self, job):
        '''
        @param job
        '''
        #logging.debug('*'*80)
        #logging.debug('*'*80)
        logging.debug("describeTasks() job = {0}".format(job))
        database = self.get_database(job)

        try:
            result = database.describetask(job.cloudDatabaseID, JobDatabaseConfig.TABLE_NAME)

            if result is not None and job.cloudDatabaseID in result and result[job.cloudDatabaseID]['status'] == 'active' and job.resource in self.SUPPORTED_CLOUD_RESOURCES:
                try:
                    celery_app = CelerySingleton().app
                    result2 = AsyncResult(job.celeryPID)
                    logging.debug('describeTasks(): AsyncResult.status = {0}'.format(result2.status))
                    logging.debug('describeTasks(): AsyncResult.failed() = {0}'.format(result2.failed()))
                    logging.debug('describeTasks(): AsyncResult.ready() = {0}'.format(result2.ready()))
                    #if result2.failed() or (job.status.lower() != 'pending'):
                    if result2.failed():
                        result[job.cloudDatabaseID]["status"] = "failed"
                except Exception as e:
                    logging.debug('describeTasks(): AsyncResult raised exception')
                    logging.exception(e)
                    result[job.cloudDatabaseID]["status"] = "failed"

        except Exception as e:
            logging.error(e)
            logging.debug('describeTasks() return result=None')
            #logging.debug('*'*80)
            #logging.debug('*'*80)
            return None
        logging.debug('describeTasks() return result={0}'.format(result))
        #logging.debug('*'*80)
        #logging.debug('*'*80)
        return result
Beispiel #46
0
 def task(self, task_id, task_name=None):
     from celery.result import AsyncResult
     from ..worker import celery
     task = AsyncResult(task_id, app=celery, task_name=task_name)
     return dict(id=task_id, state=task.state ,status=task.status, name=task.task_name, failed=task.failed(),
                 successful=task.successful(), result=task.result, ready=task.ready())
Beispiel #47
0
class QueryTask(object):
    MAX_RETRIES = 5

    # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
    STATUSES = {
        'PENDING': 1,
        'STARTED': 2,
        'SUCCESS': 3,
        'FAILURE': 4,
        'REVOKED': 4
    }

    def __init__(self, job_id=None, async_result=None):
        if async_result:
            self._async_result = async_result
        else:
            self._async_result = AsyncResult(job_id, app=celery)

    @property
    def id(self):
        return self._async_result.id

    @classmethod
    def add_task(cls, query, data_source, scheduled=False, metadata={}):
        query_hash = gen_query_hash(query)
        logging.info("[Manager][%s] Inserting job", query_hash)
        logging.info("[Manager] Metadata: [%s]", metadata)
        try_count = 0
        job = None
        
        while try_count < cls.MAX_RETRIES:
            try_count += 1

            pipe = redis_connection.pipeline()
            try:
                pipe.watch(cls._job_lock_id(query_hash, data_source.id))
                job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
                if job_id:
                    logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)

                    job = cls(job_id=job_id)
                    if job.ready():
                        logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                        redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
                        job = None

                if not job:
                    pipe.multi()

                    if scheduled:
                        queue_name = data_source.scheduled_queue_name
                    else:
                        queue_name = data_source.queue_name

                    result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
                    job = cls(async_result=result)
                    
                    logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
                    pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                    pipe.execute()
                break

            except redis.WatchError:
                continue

        if not job:
            logging.error("[Manager][%s] Failed adding job for query.", query_hash)

        return job

    def to_dict(self):
        if self._async_result.status == 'STARTED':
            updated_at = self._async_result.result.get('start_time', 0)
        else:
            updated_at = 0

        if self._async_result.failed() and isinstance(self._async_result.result, Exception):
            error = self._async_result.result.message
        elif self._async_result.status == 'REVOKED':
            error = 'Query execution cancelled.'
        else:
            error = ''

        if self._async_result.successful():
            query_result_id = self._async_result.result
        else:
            query_result_id = None

        return {
            'id': self._async_result.id,
            'updated_at': updated_at,
            'status': self.STATUSES[self._async_result.status],
            'error': error,
            'query_result_id': query_result_id,
        }

    @property
    def is_cancelled(self):
        return self._async_result.status == 'REVOKED'

    @property
    def celery_status(self):
        return self._async_result.status

    def ready(self):
        return self._async_result.ready()

    def cancel(self):
        return self._async_result.revoke(terminate=True)

    @staticmethod
    def _job_lock_id(query_hash, data_source_id):
        return "query_hash_job:%s:%s" % (data_source_id, query_hash)