Beispiel #1
1
def ret_results(task_id):
    """
    Given a task id, return the dictionary result of all urls one level down
    The AsyncResult object returned looks like the following:
        Format: Async.groupresults[].Async.result.nextresult.groupresults[].Async.result
    Args:
        task_id: task_id of rabbitmq task
    Returns:
        List of Result objects of top level urls and all sub urls combined
    """
    # initiate the results dictionary
    ret_list = []
    # list of result tuples to be processed
    group_list = []
    res = AsyncResult(task_id, app=app_celery)
    if not res:
        raise TaskNotFoundException("Task id %s not found or has expired." % task_id)
    # remove the async wrapper
    if res.ready():
        group_res = res.get()
    else:
        raise TaskNotStartedException("Task id %s has not started. Please try again later." % task_id)
    # process top urls and place them in results list
    for group in group_res:
        comb_res = process_top_urls(group)
        ret_list.append(comb_res)
        if comb_res.next_results:
            group_list.append((comb_res, comb_res.next_results))
    # if there are results to process
    while group_list:
        # res_tup is a (Result, AsyncResult)
        res_tup = group_list.pop()
        # comb_res represents the top level url result to be returned
        comb_res = res_tup[0]
        # res is AsyncResult
        res = res_tup[1]
        res = AsyncResult(res.id, app=app_celery)
        # remove async wrapper
        if res.ready():
            res = res.get()
        else:
            comb_res.ready = False
        # if there is one group that is not ready, set the result to not ready
        # disabled for now since you can't check on the group in the docker vm
        #if not res.ready():
        #    comb_res.ready = False
        # if the top level url is ready to be processed
        if comb_res.ready:
            # group_list is an GroupResult
            for group in res:
                next_results = process_inner_urls(comb_res, group)
                if next_results:
                    group_list.append((comb_res, next_results))
    # make sure all urls collected in each group is unique
    for comb_res in ret_list:
        comb_res.results = list(set(comb_res.results))
    return ret_list
Beispiel #2
0
def update_clusters(request):
    """
    Given a task ID as part of the ``request`` (as ``request_id``), check on the
    status of a job retrieving clusters' persistent data. Return a JSON with the
    following fields:
        ``task_id``: return the job request ID
        ``ready``: ``True`` if the job has completed; ``False`` otherwise
        ``clusters_list``: a list of clusters' persistent data (if the job
            has completed) or an empty list otherwise
    """
    task_id = request.POST.get('task_id', '')
    result = AsyncResult(task_id)
    fetching_data_text_list = ['Fetching data... please wait', 'Fetching data...',
                               'Still fetching data...', 'Hopefully done soon!']
    fdt = fetching_data_text_list[randint(0, len(fetching_data_text_list) - 1)]
    r = {'task_id': task_id,
         'ready': result.ready(),
         'clusters_list': [],
         'wait_text': fdt,
         'error': None}
    if task_id == 'missing_form_data':  # See `fetch_clusters` method
        r['error'] = "Missing form data. Please supply the data and try again."
    elif result.ready():
        clusters_pd = result.get()
        r['clusters_list'] = clusters_pd.get('clusters', [])
        if clusters_pd.get('error', None):
            r['error'] = clusters_pd['error']
    return HttpResponse(simplejson.dumps(r), mimetype="application/json")
Beispiel #3
0
def cleanup_tasks():
    in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
    for tracker in in_progress:
        result = AsyncResult(tracker.task_id)

        if result.ready():
            logging.info("in progress tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')

    # Maintain constant size of the finished tasks list:
    removed = 1000
    while removed > 0:
        removed = QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)

    waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
    for tracker in waiting:
        if tracker is None:
            continue

        result = AsyncResult(tracker.task_id)

        if result.ready():
            logging.info("waiting tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')
Beispiel #4
0
def twitter(**kwargs):
    queries = fetch_queries()
    if getattr(settings, 'KRAL_TWITTER_FIREHOSE', False) is not True:
        for query in queries:
            if '_' in query:
                queries.append(query.replace('_',''))
                cache_name = "facebookstream_%s" % query
                if cache.get(cache_name):
                    previous_result = AsyncResult(cache.get(cache_name))
                    if previous_result.ready():
                        result = twitter_feed.delay(query)
                        cache.set(cache_name,result.task_id)
                else:
                    result = twitter_feed.delay(query)
                    cache.set(cache_name,result.task_id)
    if cache.get('twitterfeed'):
        previous_queries = pickle.loads(cache.get('twitterfeed_queries'))
        previous_result = AsyncResult(cache.get('twitterfeed'))
        if previous_result.ready():
            result = twitter_stream.delay(queries)
            cache.set('twitterfeed',result.task_id)
        if queries != previous_queries:
            result = twitter_stream.delay(queries)
            previous_result.revoke()
            cache.set('twitterfeed_queries',pickle.dumps(queries))
            cache.set('twitterfeed',result.task_id)
    else:
        result = twitter_stream.delay(queries)
        cache.set('twitterfeed_queries',pickle.dumps(queries))
        cache.set('twitterfeed',result.task_id)
        return
Beispiel #5
0
def cleanup_tasks():
    in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
    for tracker in in_progress:
        result = AsyncResult(tracker.task_id)

        # If the AsyncResult status is PENDING it means there is no celery task object for this tracker, and we can
        # mark it as "dead":
        if result.status == 'PENDING':
            logging.info("In progress tracker for %s is no longer enqueued, cancelling (task: %s).",
                         tracker.query_hash, tracker.task_id)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='cancelled')

        if result.ready():
            logging.info("in progress tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')

    waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
    for tracker in waiting:
        result = AsyncResult(tracker.task_id)

        if result.ready():
            logging.info("waiting tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')

    # Maintain constant size of the finished tasks list:
    QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
Beispiel #6
0
def update_clusters(request):
    """
    Given a task ID as part of the ``request`` (as ``request_id``), check on the
    status of a job retrieving clusters' persistent data. Return a JSON with the
    following fields:
        ``task_id``: return the job request ID
        ``ready``: ``True`` if the job has completed; ``False`` otherwise
        ``clusters_list``: a list of clusters' persistent data (if the job
            has completed) or an empty list otherwise
    """
    task_id = request.POST.get("task_id", "")
    result = AsyncResult(task_id)
    fetching_data_text_list = [
        "Fetching data... please wait",
        "Fetching data...",
        "Still fetching data...",
        "Hopefully done soon!",
    ]
    fdt = fetching_data_text_list[randint(0, len(fetching_data_text_list) - 1)]
    r = {"task_id": task_id, "ready": result.ready(), "clusters_list": [], "wait_text": fdt, "error": None}
    if result.ready():
        clusters_pd = result.get()
        r["clusters_list"] = clusters_pd.get("clusters", [])
        if clusters_pd.get("error", None):
            r["error"] = clusters_pd["error"]
    return HttpResponse(simplejson.dumps(r), mimetype="application/json")
Beispiel #7
0
 def get_result(self, request):
     from celery.result import AsyncResult
     res = AsyncResult(request.GET.get('async_id'))
     if 'ajax' in self.request.GET:
         data = {
             'async_id': res.id,
             'ready': res.ready()
         }
         if res.ready():
             if res.successful():
                 smes = self.get_success_message(res.info)
                 if smes:
                     messages.success(self.request, smes)
                 # TODO: Do not store message if the ajax client stats that it will not redirect
                 # but handle the mssage itself
                 data.update({
                     'redirect': self.get_success_url(res.info),
                     'message': self.get_success_message(res.info)
                 })
             else:
                 messages.error(self.request, self.get_error_message(res.info))
                 # TODO: Do not store message if the ajax client stats that it will not redirect
                 # but handle the mssage itself
                 data.update({
                     'redirect': self.get_error_url(),
                     'message': self.get_error_message(res.info)
                 })
         return JsonResponse(data)
     else:
         if res.ready():
             if res.successful():
                 return self.success(res.info)
             else:
                 return self.error(res.info)
         return render(request, 'pretixpresale/waiting.html')
Beispiel #8
0
    def get(self, request, *args, **kwargs):

        result = AsyncResult(kwargs['taskid'])
        log_html = u''

        if result.ready():
            if result.successful():
                rows, failed, remaining = result.result
                log_html = []
                has_error = False
                for i, row in enumerate(rows):
                    if i:
                        has_error = has_error or not not row[-1]
                        row_tmpl = u'<tr><td>%s</td></tr>'
                        col_join = u'</td><td>'
                    else:
                        row_tmpl = u'<tr><th>%s</th></tr>'
                        col_join = u'</th><th>'

                    log_html.append(row_tmpl % col_join.join(escape(x) for x in row))

                log_html = u'<table class="table">%s</table>' % u''.join(log_html)

                if has_error:
                    log_html = u'<div class="alert alert-danger" role="alert">At least one row was not transferred. Please see log below for details.</div>' + log_html
                else:
                    log_html = u'<div class="alert alert-success" role="alert">All rows successfully added.</div>' + log_html
            else:
                log_html = u'<div class="alert alert-danger" role="alert">%s</div>' % escape(unicode(result.result))
        context_dict = {
            'completed': result.ready(),
            'log_html': log_html,
        }

        return self.render_json_response(context_dict)
Beispiel #9
0
def show_avg(steamid):
    result = AsyncResult(steamid, app=celery)
    print result
    print result.ready()
    if result.ready():
        return render_template('avg.html', avg=result.get())
    elif result.failed():
        return result.traceback
    else:
        return render_template('processing.html')
Beispiel #10
0
def check_and_upload(support_upload_pk, auth_task, gen_task):
    auth = AsyncResult(auth_task)
    gen_csa = AsyncResult(gen_task)
    if not (auth.ready() and gen_csa.ready()):
        check_and_upload.retry(countdown=1)
    elif auth.status == "SUCCESS" and gen_csa.status == "SUCCESS":
        if auth.result:
            upload_to_support(support_upload_pk)
    else:
        return
Beispiel #11
0
    def get(self, request, *args, **kwargs):
        """Start or report on a cart-zipping task.

        This method is expecting one of two possible query parameters:
            task=[uuid]: Return information on the status of a
            cart zipping task, including a path to download the zip
            if it is done.

            extensions[]: Start a new cart-zipping task for the requesting
            user. Return a task_id, which can be used in the above query.
        """
        if request.GET.get('task'):
            task_id = request.GET['task']
            task = AsyncResult(task_id)

            if task.status == "PENDING":
                return Response({'ready': task.ready(),
                                 'status': task.status,
                                 'progress': 0})

            elif task.status == "SUCCESS":
                return Response({'ready': task.ready(),
                                 'status': "SUCCESS",
                                 'progress': 100,
                                 'path': task.result})

            elif task.state == "FAILURE":
                server_error = status.HTTP_500_INTERNAL_SERVER_ERROR
                return Response({'ready': task.ready(),
                                 'status': "FAILURE"}, status=server_error)

            else:
                meta = task._get_task_meta()
                progress = meta.get('result', {}).get('progress', 0)
                return Response({'ready': task.ready(),
                                 'progress': progress,
                                 'status': "PROGRESS"})

        if request.GET.get('extensions[]'):
            extensions = request.GET.getlist('extensions[]')
            make_dirs = request.GET.get('make_dirs')
            if make_dirs == 'false':
                make_dirs = False
            else:
                make_dirs = True
                
            cart = request.session.get("cart", {})
            task_id = str(uuid.uuid4())
            tasks.zip_files.apply_async(args=[cart, extensions, request.user.username, make_dirs], task_id=task_id)
            return Response({"task": task_id}, status=status.HTTP_200_OK)

        return Response(status=status.HTTP_200_OK)
Beispiel #12
0
    def get(self, request, task_id, *args, **kwargs):
        sync_task = AsyncResult(task_id)
        data = {
            'ready': sync_task.ready()
        }
        if sync_task.ready():
            result = str(sync_task.result)
            if result.strip():
                data['error'] = result.strip()
            else:
                data['info'] = _('Completed.')

        return HttpResponse(json.dumps(data), content_type='application/json')
Beispiel #13
0
    def POST(self):
        x = web.input(master_id='', project_id='', task_id='')
        project = models.Project.get(id=x.project_id)
        if not project:
            raise web.notfound()

        master = models.Master.get(id=x.master_id)
        if not master:
            return web.notfound()

        if x.task_id:
            from celery.result import AsyncResult
            from metapolator.config import celery
            res = AsyncResult(x.task_id, backend=celery.backend)

            if res.ready():
                master.task_completed = True
                web.ctx.orm.commit()
                return ujson.dumps({'done': True})
            else:
                master.task_updated = datetime.datetime.now()
                web.ctx.orm.commit()
                return ujson.dumps({'done': False, 'task_id': x.task_id})

        master.task_completed = True
        web.ctx.orm.commit()
        return ujson.dumps({'done': True})
Beispiel #14
0
def task_result(request):
    post = json.loads(request.body)
    task_id = post.get("task_id")
    result = AsyncResult(task_id)
    if not result.ready():
        return json_response(None)
    return json_response(result.get(timeout=1))
Beispiel #15
0
def instancestate(request):
    task_id = request.POST.get("task_id", None)
    instance_state = request.POST.get("instance_state", "pending")  # Preserve current state
    state = {"task_id": None, "instance_state": instance_state}  # Reset info to be sent
    if task_id:
        # If we have a running task, check on instance state
        result = AsyncResult(task_id)
        if result.ready():
            state = result.get()
            state["task_id"] = None  # Reset but make sure it exists
        else:
            # If task not ready, send back the task_id
            state["task_id"] = task_id
    elif "ec2data" in request.session:
        # We have no task ID, so start a task to get instance state
        form = request.session["ec2data"]
        cloud = form["cloud"]
        a_key = form["access_key"]
        s_key = form["secret_key"]
        instance_id = form["instance_id"]
        r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id)
        state["task_id"] = r.id
    else:
        state = {"instance_state": "Not available"}
    return HttpResponse(simplejson.dumps(state), mimetype="application/json")
Beispiel #16
0
 def get(self, slug):
     print slug
     res = AsyncResult(slug)
     if res.ready():
         self.render("results.html", username=self.current_user.replace('\"',''), jobid=slug)
     else:
         self.render("results.html", username=self.current_user.replace('\"',''), jobid='00')
Beispiel #17
0
def get_async_result(id, backend="celery"):
    if backend == 'celery':
        res = AsyncResult(id)
        if res.ready():
            return res.get()

    raise ValueError("no result")
Beispiel #18
0
 def state(self):
     result = AsyncResult(self.task_id)
     if not result.ready():
         return result.state
     if result.get() == 'ERROR':
         return 'FAILED'
     return result.state
Beispiel #19
0
def analytics_status(request, graph_slug):
    analytics_results = dict()
    analytics_request = request.GET.get("analytics_request")
    analytics_executing = json.loads(analytics_request)
    if request.is_ajax() and analytics_executing is not None:
        for task_id in analytics_executing:
            task = AsyncResult(task_id)
            # We need to take into account if the task has been revoked
            if task.ready() and task.status != REVOKED:
                # Sometimes, the execution is faster than the revoked flag. So
                # we need to control that (ValueError exception)
                try:
                    analytic = Analytic.objects.filter(dump__graph__slug=graph_slug, task_id=task_id).latest()
                    analytics_results[task_id] = [
                        STATUS_OK,
                        analytic.results.url,
                        analytic.id,
                        analytic.task_start,
                        analytic.algorithm,
                        analytic.values.url,
                    ]
                except ValueError:
                    analytics_results[task_id] = [REVOKED]
            elif task.status == REVOKED:
                analytics_results[task_id] = [REVOKED]
    data = analytics_results
    json_data = json.dumps(data)
    return HttpResponse(json_data, content_type="application/json")
Beispiel #20
0
    def getTaskResult(self, task_id):
        async_result = AsyncResult(task_id)
        can_forget = async_result.ready()

        try:
            try:
                result = async_result.result
                if isinstance(result, Exception):
                    result_cls_name = result.__class__.__name__
                    try:
                        errno = ERRNO_NS[result_cls_name]
                    except KeyError:
                        LOGGER.error('Undefined errno: %s', result_cls_name)
                        raise spec.ServerError()

                    value = [errno, result.message]
                else:
                    value = result

            except Exception as exc:
                LOGGER.exception(exc)
                raise spec.ServerError()

            status = getattr(spec.ResultStatus, async_result.status)
            return spec.AsyncResult(status=status, value=json.dumps(value))

        finally:
            if can_forget:
                async_result.forget()
                LOGGER.info('Forgot the result of task %s', task_id)
Beispiel #21
0
def task_list(request):
    from celery.result import AsyncResult

    runs = CeleryRun.objects.filter(hide=False)
    if runs:
        header = get_header(runs)
        header.append('started')
        header.append('created')
        header.remove('hide')
    else:
        header = []

    #assert False, header
    druns = runs.values()

    data = []
    for drun in druns:
        name = drun['name']

        result = AsyncResult(name)
        drun['started'] = result.ready()
        #assert False,

        data.append([drun[h] for h in header])

    return render_to_response('celery_task_list.html', {
        'data': data,
        'header': header,
        'title': 'tasks',
    })
def check_status(task_id):
    task = AsyncResult(task_id, app=tasks.celery)
    if task.ready():
        if task.successful():  # Task finished successfully
            status, result_value = task.result
            if status:  # Valid result
                app.logger.info("result: %s", pprint.pformat(result_value))
                return flask.jsonify(result_value)
            else:  # Handled exception
                response = flask.make_response(result_value, 400)
                response.mimetype = 'text/plain'
                return response
        else:  # Unhandled exception
            exc = task.result
            response = flask.make_response(
                traceback.format_exception_only(type(exc), exc),
                500)
            response.mimetype = 'text/plain'
            return response
    else:  # if task.ready()
        status = {
            'result_id': task.id,
            'state': 'PENDING',
            'progress': 0,
        }
        if task.state == 'PROGRESS':
            status.update(task.result)
        response = flask.jsonify(status)
        response.status_code = 202
        return response
Beispiel #23
0
def taskReady(jobObj, redirect="error"):
    """Checks if celery task is ready.

    Args:
        celeryID: the id of a celery task.
        redirect: page to redirect to on error.
    Returns:
        True,None: celery task finished successfully.
        False, HttpResponseRedirect: celerytask failed.
        False,False: celery task is still processing.

    """

    task = AsyncResult(jobObj.celeryUID)
    if task.ready():
        if task.successful():
            return True, None
        else:
            return False, HttpResponseRedirect(reverse(redirect))
    # In the case the task isn't ready, we don't really have a clear idea why.
    # It is possible the task has left the celery task table, and there for is marked as Pending,
    # but it is really done.  We store an alternate flag in our own tables to manage this.
    if jobObj.state == symTyperTask.DONE:
        return True, None
    elif jobObj.state == symTyperTask.ERROR :
        return False, HttpResponseRedirect(reverse(redirect))
    else:
        return False, None
Beispiel #24
0
def instancestate(request):
    """
    Give a POST request with ``task_id`` and ``instance_state`` fields, return
    JSON with updated value (given the task has completed or the same as provided)
    value for the ``instance_state`` field and the same value for the ``task_id``.
    ``task_id`` is to correspond to the ID of the background task.
    If instance state is not available, return ``Not available`` as the value
    for ``instance_state``.
    """
    task_id = request.POST.get('task_id', None)
    instance_state = request.POST.get('instance_state', 'pending')  # Preserve current state
    state = {'task_id': None, 'instance_state': instance_state, 'error': ''}  # Reset info to be sent
    if task_id:
        # If we have a running task, check on instance state
        result = AsyncResult(task_id)
        if result.ready():
            state = result.get()
            state['task_id'] = None  # Reset but make sure it exists
        else:
            # If task not ready, send back the task_id
            state['task_id'] = task_id
    elif 'ec2data' in request.session:
        # We have no task ID, so start a task to get instance state
        form = request.session["ec2data"]
        cloud = form['cloud']
        a_key = form["access_key"]
        s_key = form["secret_key"]
        instance_id = form["instance_id"]
        r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id)
        state['task_id'] = r.id
    else:
        state = {'instance_state': 'Not available'}
    return HttpResponse(simplejson.dumps(state), mimetype="application/json")
 def __call__(self):
     result = AsyncResult(self.request.form['task_id'])
     self.request.response.setHeader('Content-Type', 'text/plain')
     if result.ready():
         return 'Done! Result is %s' % result.result
     else:
         return 'Still in progress, retry later!'
Beispiel #26
0
 def get(self, request, uuid):
     res = AsyncResult(uuid)                 # Reconstruct a AsyncResult object with task id.
     if res.ready():                         # Judge whether the task is finished,if result is ready,get it with get() method.
         task_res = res.get()                # Get task result.
     else:
         task_res = "Task not finished!"
     return Response(task_res, status=status.HTTP_200_OK)
Beispiel #27
0
def retrieve_mbs_result(target_task_id):
    logger = Task.get_logger()
    r = AsyncResult(target_task_id)
    sr = SimulationResult.objects.get(task_id__exact=target_task_id)
#    sr = SimulationResult.objects.get(sim_id__exact=r['sim_id'])
    logger.info(r)

    while not r.ready():
        time.sleep(0.1)

    result = json.loads(r.result)
    
    if result['exit_code'] == 0:
        ## success
        sr.sim_id = result['sim_id']
        
        ## these are rewrite if you add log collections
        sr.collections = json.dumps([
                "%s_nwk" % sr.sim_id,
                "%s_node" % sr.sim_id,
                "%s_msg" % sr.sim_id,
                "%s_usr" % sr.sim_id,
                "%s_map" % sr.sim_id,
                ])
        sr.task_progress = 100
        sr.task_status = "SUCCESS"
        sr.save()
    else:
        sr.sim_id = "NO SIM_ID (FAILED)"
        sr.task_status = "FAILED"
        sr.task_progress = 0
        sr.save()
Beispiel #28
0
def instancestate(request):
    """
    Given a POST request with ``task_id`` and ``instance_state`` fields, check if
    the task has completed. If so, return JSON with updated value for the
    ``instance_state`` field and start a new task, appropriately setting the
    value of ``task_id``. If the initial ``task_id`` has not completed, return
    the same value for the ``task_id`` field.
    """
    task_id = request.POST.get('task_id', None)
    instance_state = request.POST.get('instance_state', 'pending')  # Preserve current state
    state = {'task_id': None, 'instance_state': instance_state, 'error': ''}  # Reset info to be sent
    if task_id:
        # If we have a running task, check on instance state
        result = AsyncResult(task_id)
        if result.ready():
            state = result.get()
            state['task_id'] = None  # Reset but make sure it exists
        else:
            # If task not ready, send back the task_id
            state['task_id'] = task_id
    elif 'ec2data' in request.session:
        # We have no task ID, so start a task to get instance state
        form = request.session["ec2data"]
        cloud = form.get('cloud', None)
        a_key = form.get("access_key", None)
        s_key = form.get("secret_key", None)
        instance_id = form.get("instance_id", None)
        if not instance_id:
            state['error'] = "Missing instance ID, cannot check the state."
        r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id)
        state['task_id'] = r.id
    else:
        state = {'instance_state': 'Not available'}
    return HttpResponse(simplejson.dumps(state), mimetype="application/json")
Beispiel #29
0
 def get(self,request,uuid):
     tk = AsyncResult(uuid)
     if tk.ready():
         res = 'Task result is:%s' % str(tk.get())
     else:
         res = 'Task not finished!'
     return Response(res, status=status.HTTP_200_OK)
Beispiel #30
0
 def async_results(result_id):
     async_result = AsyncResult(result_id)
     if not async_result.ready():
         raise DSProcessUnfinished("Result with id {} is not ready.".format(result_id))
     if async_result.status != TaskStates.SUCCESS:
         raise DSProcessError("An error occurred during background processing.")
     return async_result.result
Beispiel #31
0
from agam_test_celery.celery import app
from celery.result import AsyncResult
res = AsyncResult("b39b5a64-7eea-410e-b498-47ad21529265")
print(res.ready())
Beispiel #32
0
def job_ready_byid(id):
    res = AsyncResult(id, app=cel_app)
    return res.ready()
Beispiel #33
0
def check_status(request):
    task_id = request.GET.get('task_id')
    res = AsyncResult(task_id)
    res.ready()
    print('res=', res, res.status)
    return render(request, 'status_badge.html', {'status': res.status})
Beispiel #34
0
def get_task(task_id):
    result = AsyncResult(task_id, app=celery)
    task = {'id': result.id, 'state': result.state}
    if result.ready():
        task.update({'info': result.info})
    return {'task': task}
Beispiel #35
0
def concurrent(url, param_value, req_id, schema):
    # time.sleep(10)
    with schema_context(schema_name=schema):
        skema = connection.schema_name
        print(connection.schema_name)
        result = current_task.request.id
        tgl = datetime.datetime.now()
        print('sysdate %s' % tgl)
        get_data = ConcurrentList.objects.get(id=req_id)
        get_data.task_id = result
        get_data.actual_start_date = tgl
        get_data.phase = 'S'
        get_data.save()

        json_acceptable_string = param_value.replace("'", "\"")
        x = json.loads(json_acceptable_string)
        print("http://" + schema + ".harpa.com:8000/login/")
        headers1 = {'Content-type': 'application/json', 'Accept': 'text/plain'}
        r1 = requests.post(
            "http://" + schema + ".harpa.com:8000/login/", {
                "user_name": "sysadmin@" + schema + ".harpa.com",
                "password": "******"
            })
        # print("return API %s" % r1 )
        response_data = r1.json()

        print(response_data)

        # # print("=================================================")
        a = 'JWT ' + response_data['token']
        # # print("=================================================")
        # # print("=================================================")
        # # print("=================================================")
        print('JWT %s' % a)
        # # print("=================================================")
        # # print("=================================================")
        # # print("=================================================")
        headers = {
            "Content-type": 'application/json',
            'Accept': 'application/json',
            'Authorization': a
        }
        # # print("=================================================")
        # # print("=================================================")
        # # print("=================================================")
        #
        # # print('ini headers %s'%headers)
        # # print('data=json.dumps(x) %s'% json.dumps(x))
        r = requests.post(url, data=json.dumps(x), headers=headers)
        # # print(r.text)
        response_data2 = r.json()
        print('response_data2 = %s' % r)
        #
        # # print(result)
        # print('schema name %s' % (connection.schema_name))
        res = AsyncResult(result)
        res.ready()
        # print ('status nya %s'%res.ready())
        # print ('status nya %s'% AsyncResult(current_task.request.id).state)
        # print ('sysdate %s' % tgl)
    return 'berhasil'
Beispiel #36
0
class QueryTask(object):
    MAX_RETRIES = 5

    # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
    STATUSES = {
        'PENDING': 1,
        'STARTED': 2,
        'SUCCESS': 3,
        'FAILURE': 4,
        'REVOKED': 4
    }

    def __init__(self, job_id=None, async_result=None):
        if async_result:
            self._async_result = async_result
        else:
            self._async_result = AsyncResult(job_id, app=celery)

    @property
    def id(self):
        return self._async_result.id

    @classmethod
    def add_task(cls, query, data_source, scheduled=False, metadata={}):
        query_hash = gen_query_hash(query)
        logging.info("[Manager][%s] Inserting job", query_hash)
        logging.info("[Manager] Metadata: [%s]", metadata)
        try_count = 0
        job = None
        
        while try_count < cls.MAX_RETRIES:
            try_count += 1

            pipe = redis_connection.pipeline()
            try:
                pipe.watch(cls._job_lock_id(query_hash, data_source.id))
                job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
                if job_id:
                    logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)

                    job = cls(job_id=job_id)
                    if job.ready():
                        logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                        redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
                        job = None

                if not job:
                    pipe.multi()

                    if scheduled:
                        queue_name = data_source.scheduled_queue_name
                    else:
                        queue_name = data_source.queue_name

                    result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
                    job = cls(async_result=result)
                    
                    logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
                    pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                    pipe.execute()
                break

            except redis.WatchError:
                continue

        if not job:
            logging.error("[Manager][%s] Failed adding job for query.", query_hash)

        return job

    def to_dict(self):
        if self._async_result.status == 'STARTED':
            updated_at = self._async_result.result.get('start_time', 0)
        else:
            updated_at = 0

        if self._async_result.failed() and isinstance(self._async_result.result, Exception):
            error = self._async_result.result.message
        elif self._async_result.status == 'REVOKED':
            error = 'Query execution cancelled.'
        else:
            error = ''

        if self._async_result.successful():
            query_result_id = self._async_result.result
        else:
            query_result_id = None

        return {
            'id': self._async_result.id,
            'updated_at': updated_at,
            'status': self.STATUSES[self._async_result.status],
            'error': error,
            'query_result_id': query_result_id,
        }

    @property
    def is_cancelled(self):
        return self._async_result.status == 'REVOKED'

    @property
    def celery_status(self):
        return self._async_result.status

    def ready(self):
        return self._async_result.ready()

    def cancel(self):
        return self._async_result.revoke(terminate=True, signal='SIGINT')

    @staticmethod
    def _job_lock_id(query_hash, data_source_id):
        return "query_hash_job:%s:%s" % (data_source_id, query_hash)
Beispiel #37
0
def get_job_status(job_id):
    res = AsyncResult(job_id, backend=celery_object.backend)
    return jsonify({"status": res.ready()})
Beispiel #38
0
def checkStatus(req):
    task_id = req.GET.get('task_id')
    if task_id:
        async_result = AsyncResult(task_id)
        return JsonResponse({'status': async_result.ready()})
    return JsonResponse({'status': False})
Beispiel #39
0
class QueryTask(object):
    # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
    STATUSES = {
        "PENDING": 1,
        "STARTED": 2,
        "SUCCESS": 3,
        "FAILURE": 4,
        "REVOKED": 4
    }

    def __init__(self, job_id=None, async_result=None):
        if async_result:
            self._async_result = async_result
        else:
            self._async_result = AsyncResult(job_id, app=celery)

    @property
    def id(self):
        return self._async_result.id

    def to_dict(self):
        task_info = self._async_result._get_task_meta()
        result, task_status = task_info["result"], task_info["status"]
        if task_status == "STARTED":
            updated_at = result.get("start_time", 0)
        else:
            updated_at = 0

        status = self.STATUSES[task_status]

        if isinstance(result, (TimeLimitExceeded, SoftTimeLimitExceeded)):
            error = TIMEOUT_MESSAGE
            status = 4
        elif isinstance(result, Exception):
            error = str(result)
            status = 4
        elif task_status == "REVOKED":
            error = "Query execution cancelled."
        else:
            error = ""

        if task_status == "SUCCESS" and not error:
            query_result_id = result
        else:
            query_result_id = None

        return {
            "id": self._async_result.id,
            "updated_at": updated_at,
            "status": status,
            "error": error,
            "query_result_id": query_result_id,
        }

    @property
    def is_cancelled(self):
        return self._async_result.status == "REVOKED"

    @property
    def celery_status(self):
        return self._async_result.status

    def ready(self):
        return self._async_result.ready()

    def cancel(self):
        return self._async_result.revoke(terminate=True, signal="SIGINT")
Beispiel #40
0
def task_finished(task_id):
    """Returns whether specified task has already finished"""
    result = AsyncResult(task_id)
    return result.ready()
Beispiel #41
0
def task_result(task_id):
    """Returns result of the task. Returns None if no result yet"""
    result = AsyncResult(task_id)
    if not result.ready():
        return None
    return result.get(timeout=1)
Beispiel #42
0
def task_result(request, task_id):
    result = AsyncResult(task_id)
    if result.ready():
        return HttpResponse('Result is: %s' % (result.result, ))
    else:
        return HttpResponse('Result is not ready yet!')
Beispiel #43
0
def submission_results(request, user, slug):
    """ Returns existing submissions results for IO test cases. """
    challenge = get_object_or_404(Challenge, slug=slug)
    submission = get_object_or_404(
        Submission,
        challenge=challenge,
        author=user,
    )
    if not submission.has_view_results_permission(request, for_user=user):
        return HttpResponseForbidden()

    trace = ""
    result = None
    for tr in TestResult.objects.filter(submission=submission, status="PD"):
        async_result = AsyncResult(tr.task_id)
        if async_result.ready():
            result = async_result.result
            trace = str(result)
            # regular run task result is either Exception instance or dict
            #if isinstance(result, Exception):  # for regular run task
            # run_popen check if in result string there is Exception traceback
            if "Traceback" in trace:
                tr.status = 'EX'
            else:
                # run_popen task result is json string so convert it to dict
                result = json.loads(trace)
                tr.status = result['status']
                tr.memory = result['memory']
                tr.cputime = result['cputime']
                tr.result = check_output(tr.test_case.output.path,
                                         result['outpath'])
            # remove celery task meta from the database
            try:
                tm = TaskMeta.objects.get(task_id=tr.task_id)
            except TaskMeta.DoesNotExist:
                pass
            else:
                tm.delete()
            os.remove(result['outpath'])
            os.remove(result['errpath'])
            tr.save()
    #if there are no pending test results delete program
    if len(TestResult.objects.filter(submission=submission, status="PD")) == 0:
        if result is not None:
            os.remove(result['program'])
    test_results = TestResult.objects.filter(submission=submission)
    trs = [{
        'status': dict(TestResult.STATUSES)[tr.status],
        'status_code': tr.status.lower(),
        'result': dict(TestResult.RESULTS)[tr.result],
        'result_code': tr.result.lower(),
        'memory': tr.memory,
        'cputime': tr.cputime,
        'test_case': tr.test_case,
    } for tr in test_results]
    if (len(test_results) > 0 and len(test_results.filter(status="PD")) == 0
            and submission.status != "TS"):
        submission.status = "TS"  # Tested
        submission.save()
    return render_to_response(
        "onlinejudge/partials/submission_results.html",
        {
            "submission": submission,
            "submission_result_code": submission.result.lower(),
            "submission_result": dict(Submission.RESULTS)[submission.result],
            "test_results": trs,
            "trace": trace,
        },
        RequestContext(request),
    )
Beispiel #44
0
import sys
from celery.result import AsyncResult
from tasks import add, pdf_site


if __name__ == "__main__":
    if len(sys.argv) != 3:
        raise Exception('Call with 3 arguments!')

    if sys.argv[1].lower() == 'download':
        site = sys.argv[2].lower()
        result = pdf_site.delay(site, 'downloads/{}.pdf'.format(site.replace('.','_')))
        print('PDF for {} will be generated. Check with this UUID:{}'.format(site, result.task_id))

    elif sys.argv[1].lower() == 'check':
        task_id = sys.argv[2].lower()
        res = AsyncResult(task_id)
        if res.ready():
            print('PDF for {} is located in download/{}.pdf'.format(res.result, res.result.replace('.','_')))
        else:
            print('Still not ready')

Beispiel #45
0
    def read_experiment_search_results(channel_name, view, request, user):
        channel = Channel(channel_name)

        start = 0
        end = None
        compendium = CompendiumDatabase.objects.get(id=request['compendium_id'])
        task_running = False
        operation = 'search_experiment_public_db'
        if request['page_size']:
            start = (request['page'] - 1) * request['page_size']
            end = start + request['page_size']
        try:
            task_id = ViewTask.objects.using(compendium.compendium_nick_name).get(view=request['view'],
                                                                                  operation=operation)
            task = AsyncResult(task_id.task_id)
            task_running = not task.ready()
        except Exception as e:
            pass

        order = ''
        if request['ordering'] == 'DESC':
            order = '-'
        query_response = ExperimentSearchResult.objects.using(compendium.compendium_nick_name). \
            filter(Q(organism__icontains=request['filter']) |
                   Q(experiment_access_id__icontains=request['filter']) |
                   Q(experiment_alternative_access_id__icontains=request['filter']) |
                   Q(platform__icontains=request['filter']) |
                   Q(scientific_paper_ref__icontains=request['filter']) |
                   Q(type__icontains=request['filter']) |
                   Q(description__icontains=request['filter']) |
                   Q(experiment_name__icontains=request['filter'])).order_by(
            order + request['ordering_value'])
        total = query_response.count()
        query_response = query_response[start:end]

        experiments = []
        for exp in query_response:
            e = exp.to_dict()
            module_name, class_name = '.'.join(exp.data_source.python_class.split('.')[:-1]), \
                                      exp.data_source.python_class.split('.')[-1]
            python_class = getattr(importlib.import_module(module_name), class_name)()
            e['experiment_accession_base_link'] = python_class.experiment_accession_base_link
            e['platform_accession_base_link'] = python_class.platform_accession_base_link
            e['scientific_paper_accession_base_link'] = python_class.scientific_paper_accession_base_link
            e['tag'] = ''
            if exp.platform:
                for plt_acc_id in exp.platform.split(';'):
                    try:
                        plt = Platform.objects.using(compendium.compendium_nick_name).\
                            get(platform_access_id=plt_acc_id)
                        if plt.biofeaturereporter_set.count() > 0:
                            e['tag'] = 'platform'
                    except Exception as exc:
                        pass
            try:
                already_present_exp = Experiment.objects.using(compendium.compendium_nick_name).\
                    get(experiment_access_id=exp.experiment_access_id)
                if already_present_exp.status.name == 'experiment_excluded':
                    e['tag'] = 'excluded'
                else:
                    e['tag'] = 'present'
                if e['status']['name'] != 'scheduled' and e['status']['name'] != 'experiment_downloading':
                    e['status'] = already_present_exp.status.to_dict()
            except Exception as exc:
                pass
            experiments.append(e)

        channel.send({
            'text': json.dumps({
                'stream': view,
                'payload': {
                    'request': request,
                    'data': {
                        'experiments': experiments,
                        'task_running': task_running,
                        'total': total
                    }
                }
            })
        })
Beispiel #46
0
def check(request, task_id):
    task = AsyncResult(task_id)
    return JsonResponse(
        {"questions_count": task.get() if task.ready() else None})
Beispiel #47
0
 def get(self, uuid):
     result = AsyncResult(id=uuid, app=celery_app)
     return {
         "finished": result.ready(),
         "status": result.state,
     }
Beispiel #48
0
    'Flo ne spune buna dimineata',
    'Mitza flexeaza muschiul',
    'Geo tureaza Bavarian Motor Works',
]

print("---- SENDER ----")

try:
    while True:
        message = random.sample(MESSAGES, 1)[0]

        print(f"Sending: \"{message}\"")
        response = app.send_task('bla.some', (message, ))

        # what is the id
        print(f"Task ID: {response.id}")

        response = AsyncResult(id=response.id, app=app)

        for i in range(5):
            if response.ready():
                result = response.get(timeout=1)
                print(f"Result is {result}")
                break

            time.sleep(0.5)

        time.sleep(1)
except KeyboardInterrupt:
    print("Warm interrupt of the sender")
Beispiel #49
0
    def testdestination(self, destinationid):
        "Test mail destination server"
        server = self._get_server(destinationid)
        if not server:
            abort(404)

        taskid = request.GET.get('taskid', None)
        if not taskid:
            to_addr = 'postmaster@%s' % server.domains.name
            task = test_smtp_server.apply_async(args=[
                server.address, server.port, '<>', to_addr, server.id, 3
            ])
            taskid = task.task_id
            if 'taskids' not in session:
                session['taskids'] = []
            session['taskids'].append(taskid)
            session['testdest-count'] = 1
            session.save()
            redirect(url.current(taskid=taskid))
        else:
            result = AsyncResult(taskid)
            if result is None or taskid not in session['taskids']:
                flash(_('The connection test failed try again later'))
                redirect(url('domain-detail', domainid=server.domain_id))
            if result.ready():
                if ('smtp' in result.result and 'ping' in result.result
                        and result.result['smtp'] and result.result['ping']):
                    msg = _('The server: %s is up and accepting mail from us' %
                            server.address)
                    flash(msg)
                    log.info(msg)
                else:
                    if 'ping' in result.result['errors']:
                        errors = result.result['errors']['ping']
                    else:
                        errors = result.result['errors']['smtp']
                    msg = _('The server: %s is not accepting mail from us: %s') \
                            % (server.address, errors)
                    flash(msg)
                    log.info(msg)
                redirect(url('domain-detail', domainid=server.domain_id))
            else:
                try:
                    session['testdest-count'] += 1
                except KeyError:
                    session['testdest-count'] = 1
                session.save()
                if (session['testdest-count'] >= 10
                        and result.state in ['PENDING', 'RETRY', 'FAILURE']):
                    result.revoke()
                    del session['testdest-count']
                    session.save()
                    msg = 'Failed to initialize backend, try again later'
                    flash_alert(msg)
                    log.info(msg)
                    redirect(url('domain-detail', domainid=server.domain_id))

        c.server = server
        c.domainid = server.domain_id
        c.taskid = taskid
        c.finished = False
        return self.render('/domains/testdestination.html')
Beispiel #50
0
def download_gradebook(req, teacher, results=None):
    """
    Download the wanted gradebook.

    Parameters
    ----------
    req : HttpRequest
        Request with:
            parameters:
                task_id: str
                    Id of the celery task responsible for the gradebook
                    generation sent with the first request for a gradebook
    teacher : Teacher
        Teacher instance returned by `teacher_required` (not used)
    results : Optional[Dict[str, Any]]
        Either
            If group gradebook
                {
                    group: str
                        Title of the group
                    assignments: List[str]
                        Assignment identifier
                    school_id_needed: bool
                        If a school id is needed
                    results: [{
                        school_id: Optional[str]
                            School id if needed
                        email: str
                            Student email
                        assignments: [{
                            n_completed: Optional[int]
                                Number of completed questions
                            n_correct: Optional[int]
                                Number of correct questions
                        }]
                    }]
                }
            If assignment gradebook
                {
                    group: str
                        Title of the group
                    assignment: str
                        Title of the assignment
                    questions: List[str]
                        Question title
                    school_id_needed: bool
                        If a school id is needed
                    results: [{
                        school_id: Optional[str]
                            School id if needed
                        email: str
                            Student email
                        questions: List[Optional[float]]
                            Grade for each question
                    }]
                }

    Returns
    -------
    StreamingHttpResponse
        csv file with the gradebook results
    """
    if results is None:
        args = get_json_params(req, args=["task_id"])
        if isinstance(args, HttpResponse):
            return args
        (task_id, ), _ = args

        result = AsyncResult(task_id)

        try:
            if not result.ready():
                return response_400(
                    req,
                    msg="The gradebook isn't ready.",
                    logger_msg="Not completed gradebook {}".format(task_id) +
                    " accessed by teacher {}".format(teacher.user.username),
                )
        except AttributeError:
            return response_500(
                req,
                msg="There is no gradebook corresponding to this url. "
                "Please ask for a new one.",
                logger_msg="Celery error getting gradebook"
                " for teacher {}".format(teacher.user.username) +
                " and task {}.".format(task_id),
                log=logger.warning,
                use_template=False,
            )

        results = result.result

        if RunningTask.objects.filter(id=task_id):
            RunningTask.objects.get(id=task_id).delete()

    if "assignment" in results:
        filename = "myDALITE_gradebook_{}_{}.csv".format(
            results["group"], results["assignment"])
    else:
        filename = "myDALITE_gradebook_{}.csv".format(results["group"])
    gradebook_gen = convert_gradebook_to_csv(results)
    data = chain(iter((filename + "\n", )), gradebook_gen)
    resp = StreamingHttpResponse(data, content_type="text/csv")
    return resp
Beispiel #51
0
def configure_course(request, course_key=None, course=None):
    context = {
        "hierarchy":
        ((settings.APP_NAME, reverse("index")),
         (course.name, reverse("course",
                               kwargs={"course_key":
                                       course.key})), ("Configure", None)),
        "course":
        course,
        "provider_data": [
            {
                "description":
                "{:s}, all submission data are retrieved from here".format(
                    course.provider_name),
                "path":
                settings.PROVIDERS[course.provider].get("host", "UNKNOWN"),
            },
            {
                "description":
                "Data providers should POST the IDs of new submissions to this path in order to have them automatically downloaded by Radar",
                "path":
                request.build_absolute_uri(
                    reverse("hook_submission",
                            kwargs={"course_key": course.key})),
            },
            {
                "description":
                "Login requests using the LTI-protocol should be made to this path",
                "path": request.build_absolute_uri(reverse("lti_login")),
            },
        ],
        "errors": [],
    }

    # The state of the API read task is contained in this dict
    pending_api_read = {
        "task_id": None,
        "poll_URL": reverse("configure_course",
                            kwargs={"course_key": course.key}),
        "ready": False,
        "poll_interval_seconds": 5,
        "config_type": "automatic"
    }

    if request.method == "GET":
        if "true" in request.GET.get("success", ''):
            # All done, show success message
            context["change_success"] = True
        pending_api_read["json"] = json.dumps(pending_api_read)
        context["pending_api_read"] = pending_api_read
        return render(request, "review/configure.html", context)

    if request.method != "POST":
        return HttpResponseBadRequest()

    p_config = provider_config(course.provider)

    if "create-exercises" in request.POST or "overwrite-exercises" in request.POST:
        # API data has been fetched in a previous step, now the user wants to add exercises that were shown in the table
        if "create-exercises" in request.POST:
            # Pre-configured, read-only table
            exercises = json.loads(request.POST["exercises-json"])
            for exercise_data in exercises:
                key_str = str(exercise_data["exercise_key"])
                exercise = course.get_exercise(key_str)
                exercise.set_from_config(exercise_data)
                exercise.save()
                # Queue fetch and match for all submissions for this exercise
                full_reload = configured_function(p_config, "full_reload")
                full_reload(exercise, p_config)
        elif "overwrite-exercises" in request.POST:
            # Manual configuration, editable table, overwrite existing
            checked_rows = (key.split("-", 1)[0] for key in request.POST
                            if key.endswith("enabled"))
            exercises = ({
                "exercise_key":
                exercise_key,
                "name":
                request.POST[exercise_key + "-name"],
                "template_source":
                request.POST.get(exercise_key + "-template-source", ''),
                "tokenizer":
                request.POST[exercise_key + "-tokenizer"],
                "minimum_match_tokens":
                request.POST[exercise_key + "-min-match-tokens"]
            } for exercise_key in checked_rows)
            for exercise_data in exercises:
                key = str(exercise_data["exercise_key"])
                course.exercises.filter(key=key).delete()
                exercise = course.get_exercise(key)
                exercise.set_from_config(exercise_data)
                exercise.save()
                full_reload = configured_function(p_config, "full_reload")
                full_reload(exercise, p_config)
        return redirect(
            reverse("configure_course", kwargs={"course_key": course.key}) +
            "?success=true")

    if not request.is_ajax():
        return HttpResponseBadRequest("Unknown POST request")

    pending_api_read = json.loads(request.body.decode("utf-8"))

    if pending_api_read["task_id"]:
        # Task is pending, check state and return result if ready
        async_result = AsyncResult(pending_api_read["task_id"])
        if async_result.ready():
            pending_api_read["ready"] = True
            pending_api_read["task_id"] = None
            if async_result.state == "SUCCESS":
                exercise_data = async_result.get()
                async_result.forget()
                config_table = template_loader.get_template(
                    "review/configure_table.html")
                exercise_data["config_type"] = pending_api_read["config_type"]
                pending_api_read["resultHTML"] = config_table.render(
                    exercise_data, request)
            else:
                pending_api_read["resultHTML"] = ''
        return JsonResponse(pending_api_read)

    if pending_api_read["ready"]:
        # The client might be polling a few times even after it has received the results
        return JsonResponse(pending_api_read)

    # Put full read of provider API on task queue and store the task id for tracking
    has_radar_config = pending_api_read["config_type"] == "automatic"
    async_api_read = configured_function(p_config, "async_api_read")
    pending_api_read["task_id"] = async_api_read(request, course,
                                                 has_radar_config)
    return JsonResponse(pending_api_read)
Beispiel #52
0
    def __poll_tasks(self, project):
        status = {}
        task_ongoing = False

        if not project in self.messages:
            return status, task_ongoing

        for key in self.messages[project].keys():
            job = self.messages[project][key]
            msg = self.celery_app.backend.get_task_meta(key)
            if not len(msg):
                continue

            # check for worker failures
            if msg['status'] == celery.states.FAILURE:
                # append failure message
                if 'meta' in msg:  #TODO: and isinstance(msg['meta'], BaseException):
                    info = {'message': html.escape(str(msg['meta']))}
                else:
                    info = {'message': 'an unknown error occurred'}
            else:
                info = msg['result']

            status[key] = {
                'type': job['type'],
                'submitted':
                job['submitted'],  #TODO: not broadcast across AIController threads...
                'status': msg['status'],
                'meta': info
            }
            if 'subjobs' in job:
                subjobEntries = []
                for subjob in job['subjobs']:
                    if isinstance(subjob, GroupResult):
                        entry = {'id': subjob.id}
                        subEntries = []
                        for res in subjob.results:
                            subEntry = {
                                'id':
                                res.id,
                                'status':
                                res.status,
                                'meta': ('complete' if res.status == 'SUCCESS'
                                         else str(res.result))  #TODO
                            }
                            subEntries.append(subEntry)
                        entry['subjobs'] = subEntries
                    else:
                        entry = {
                            'id':
                            subjob.id,
                            'status':
                            subjob.status,
                            'meta': ('complete' if subjob.status == 'SUCCESS'
                                     else str(subjob.result))  #TODO
                        }
                    subjobEntries.append(entry)
                status[key]['subjobs'] = subjobEntries

            # check if ongoing
            result = AsyncResult(key)
            if result.ready():  #TODO: chains somehow get stuck in 'PENDING'...
                # done; remove from queue
                result.forget()
                status[key]['status'] = 'SUCCESS'
            elif result.failed():
                # failed
                result.forget()
                status[key]['status'] = 'FAILURE'
            else:
                task_ongoing = True
        return status, task_ongoing
Beispiel #53
0
class QueryTask(object):
    # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
    STATUSES = {
        'PENDING': 1,
        'STARTED': 2,
        'SUCCESS': 3,
        'FAILURE': 4,
        'REVOKED': 4
    }

    def __init__(self, job_id=None, async_result=None):
        if async_result:
            self._async_result = async_result
        else:
            self._async_result = AsyncResult(job_id, app=celery)

    @property
    def id(self):
        return self._async_result.id

    def to_dict(self):
        task_info = self._async_result._get_task_meta()
        result, task_status = task_info['result'], task_info['status']
        if task_status == 'STARTED':
            updated_at = result.get('start_time', 0)
        else:
            updated_at = 0

        status = self.STATUSES[task_status]

        if isinstance(result, (TimeLimitExceeded, SoftTimeLimitExceeded)):
            error = "Query exceeded Redash query execution time limit."
            status = 4
        elif isinstance(result, Exception):
            error = result.message
            status = 4
        elif task_status == 'REVOKED':
            error = 'Query execution cancelled.'
        else:
            error = ''

        if task_status == 'SUCCESS' and not error:
            query_result_id = result
        else:
            query_result_id = None

        return {
            'id': self._async_result.id,
            'updated_at': updated_at,
            'status': status,
            'error': error,
            'query_result_id': query_result_id,
        }

    @property
    def is_cancelled(self):
        return self._async_result.status == 'REVOKED'

    @property
    def celery_status(self):
        return self._async_result.status

    def ready(self):
        return self._async_result.ready()

    def cancel(self):
        return self._async_result.revoke(terminate=True, signal='SIGINT')
Beispiel #54
0
 def requeue_subsequent_tasks(self, task_id):
     result = AsyncResult(task_id)
     if not result.ready():
         self.retry()
     result.maybe_reraise()
     return result.get()
Beispiel #55
0
class CombineBackgroundTask(models.Model):
    '''
    Model for long running, background tasks
    '''

    name = models.CharField(max_length=255, null=True, default=None)
    task_type = models.CharField(
        max_length=255,
        choices=[('job_delete', 'Job Deletion'),
                 ('record_group_delete', 'Record Group Deletion'),
                 ('org_delete', 'Organization Deletion'),
                 ('validation_report', 'Validation Report Generation'),
                 ('export_mapped_fields', 'Export Mapped Fields'),
                 ('export_documents', 'Export Documents'),
                 ('job_reindex', 'Job Reindex Records'),
                 ('job_new_validations', 'Job New Validations'),
                 ('job_remove_validation', 'Job Remove Validation')],
        default=None,
        null=True)
    celery_task_id = models.CharField(max_length=128, null=True, default=None)
    celery_task_output = models.TextField(null=True, default=None)
    task_params_json = models.TextField(null=True, default=None)
    task_output_json = models.TextField(null=True, default=None)
    start_timestamp = models.DateTimeField(null=True, auto_now_add=True)
    finish_timestamp = models.DateTimeField(null=True,
                                            default=None,
                                            auto_now_add=False)
    completed = models.BooleanField(default=False)

    def __str__(self):
        return 'CombineBackgroundTask: %s, ID #%s, Celery Task ID #%s' % (
            self.name, self.id, self.celery_task_id)

    def update(self):
        '''
        Method to update completed status, and affix task to instance
        '''

        # get async task from Redis
        try:

            self.celery_task = AsyncResult(self.celery_task_id)
            self.celery_status = self.celery_task.status

            if not self.completed:

                # if ready (finished)
                if self.celery_task.ready():

                    # set completed
                    self.completed = True

                    # update timestamp
                    self.finish_timestamp = datetime.datetime.now()

                    # handle result type
                    if isinstance(self.celery_task.result, Exception):
                        result = str(self.celery_task.result)
                    else:
                        result = self.celery_task.result

                    # save json of async task output
                    task_output = {
                        'result': result,
                        'status': self.celery_task.status,
                        'task_id': self.celery_task.task_id,
                        'traceback': self.celery_task.traceback
                    }
                    self.celery_task_output = json.dumps(task_output)

                    # save
                    self.save()

        except Exception as err:
            self.celery_task = None
            self.celery_status = 'STOPPED'
            self.completed = True
            self.save()
            LOGGER.debug(str(err))

    def calc_elapsed_as_string(self):

        # determine time elapsed in seconds

        # completed, with timestamp
        if self.completed and self.finish_timestamp != None:
            # use finish timestamp
            seconds_elapsed = (
                self.finish_timestamp.replace(tzinfo=None) -
                self.start_timestamp.replace(tzinfo=None)).seconds

        # marked as completed, but not timestamp, set to zero
        elif self.completed:
            seconds_elapsed = 0

        # else, calc until now
        else:
            seconds_elapsed = (
                datetime.datetime.now() -
                self.start_timestamp.replace(tzinfo=None)).seconds

        # return as string
        minutes, seconds = divmod(seconds_elapsed, 60)
        hours, minutes = divmod(minutes, 60)

        return "%d:%02d:%02d" % (hours, minutes, seconds)

    @property
    def task_params(self):
        '''
        Property to return JSON params as dict
        '''

        if self.task_params_json:
            return json.loads(self.task_params_json)
        return {}

    def update_task_params(self, update_d, save=True):
        '''
        Method to update tasks params

        Args:
            update_d (dict): Dictionary to update self.task_params with

        Returns:
            None
        '''

        # update json
        task_params = self.task_params
        task_params.update(update_d)
        self.task_params_json = json.dumps(task_params)

        # save
        if save:
            self.save()

    @property
    def task_output(self):
        '''
        Property to return JSON output as dict
        '''

        if self.task_output_json:
            return json.loads(self.task_output_json)
        return {}

    def cancel(self):
        '''
        Method to cancel background task
        '''

        # attempt to stop any Spark jobs
        if 'job_id' in self.task_params.keys():
            job_id = self.task_params['job_id']
            LOGGER.debug('attempt to kill spark jobs related to Job: %s',
                         job_id)
            job = mod_job.Job.objects.get(pk=int(job_id))
            job.stop_job(cancel_livy_statement=False, kill_spark_jobs=True)

        # revoke celery task
        if self.celery_task_id:
            revoke(self.celery_task_id, terminate=True)

        # update status
        self.refresh_from_db()
        self.completed = True
        self.save()

    @classmethod
    def to_rerun_jobs(cls, job_ids):
        task = cls(name="Rerun Jobs Prep",
                   task_type='rerun_jobs_prep',
                   task_params_json=json.dumps(
                       {'ordered_job_rerun_set': job_ids}))
        task.save()
        return task
    def getTasksInfo(tasks, forgetIfFinished=True):
        if tasks is None:
            return None, False, None
        if isinstance(tasks, str):
            tasks = json.loads(tasks)
        errors = []
        for t in range(len(tasks)):
            result = AsyncResult(tasks[t]['id'])
            if result.ready():
                tasks[t]['successful'] = result.successful()
                if tasks[t]['successful']:
                    tasks[t]['info'] = None
                else:
                    try:
                        error = str(result.get())
                        errors.append(error)
                    except Exception as e:
                        error = str(e)
                        errors.append(error)
                    tasks[t]['info'] = {}
                    tasks[t]['info']['message'] = error
                if forgetIfFinished:
                    result.forget()
            elif result.info is not None:
                tasks[t]['info'] = result.info
            if result.status is not None:
                tasks[t]['status'] = result.status
            if 'children' in tasks[t]:
                numDone = 0
                numChildren = len(tasks[t]['children'])
                for key in range(numChildren):
                    cResult = AsyncResult(tasks[t]['children'][key]['id'])
                    if cResult.ready():
                        numDone += 1
                        tasks[t]['children'][key][
                            'successful'] = cResult.successful()
                        if tasks[t]['children'][key]['successful']:
                            tasks[t]['children'][key]['info'] = None
                        else:
                            try:
                                error = str(cResult.get())
                                errors.append(error)
                            except Exception as e:
                                error = str(e)
                                errors.append(error)
                            tasks[t]['children'][key]['info'] = {}
                            tasks[t]['children'][key]['info'][
                                'message'] = error
                        if forgetIfFinished:
                            cResult.forget()
                    elif cResult.info is not None:
                        tasks[t]['children'][key]['info'] = cResult.info
                    if cResult.status is not None:
                        tasks[t]['children'][key]['status'] = cResult.status
                tasks[t]['num_done'] = numDone
                if numDone == numChildren:
                    tasks[t]['status'] = 'SUCCESSFUL'

        lastResult = AsyncResult(tasks[-1]['id'])
        hasFinished = lastResult.ready()

        return tasks, hasFinished, errors
Beispiel #57
0
def get_task_status(task_id):
    result = AsyncResult(task_id)
    return result.ready()
Beispiel #58
0
def launch_status(request):
    """
    Given a task ID of a launch process/task, check if the task has completed.
    Return a JSON object with the following keys: ``task_id``, ``ready``,
    ``error``, and ``starting_text``.
    """
    task_id = request.session.get('ec2data', {}).get('task_id')
    r = {
        'task_id': '',
        'ready': '',
        'error': '',
        'starting_text': '',
        'instance_id': '',
        'sg_name': '',
        'kp_name': ''
    }
    if task_id:
        r['task_id'] = task_id
        result = AsyncResult(task_id)
        r['ready'] = result.ready()
        if r['ready']:  # The task completed; let's get the outcome
            # Set session data based on the task result
            # TODO: this should always return JSON and not mess with the session
            #       Then, need to redo how monitor page is displayed...
            response = result.get()
            if response.get("error", ""):
                # If a key got created, allow the user to download it even if
                # an error was encountered.
                if response.get('kp_material'):
                    request.session['ec2data']['kp_name'] = response['kp_name']
                    request.session['ec2data']['kp_material'] = response[
                        'kp_material']
                    r['kp_name'] = response['kp_name']
                    r['kp_material'] = response['kp_material']
                r['error'] = response['error']
            else:
                request.session['ec2data']['cluster_name'] = response[
                    'cluster_name']
                request.session['ec2data']['instance_id'] = response[
                    'instance_id']
                request.session['ec2data']['public_ip'] = response[
                    'instance_ip']
                request.session['ec2data']['image_id'] = response['image_id']
                request.session['ec2data']['kp_name'] = response['kp_name']
                request.session['ec2data']['kp_material'] = response[
                    'kp_material']
                request.session['ec2data']['sg_name'] = response['sg_names'][0]
                request.session['ec2data']['password'] = response['password']

                # Pass data needed for the additional instance information table
                # on the monitor page
                r['instance_id'] = response['instance_id']
                r['sg_name'] = response['sg_names'][0]
                r['kp_name'] = response['kp_name']
                r['kp_material'] = response.get('kp_material', '')
                r['image_id'] = response['image_id']

                # Add an entry to the Usage table now
                try:
                    cluster_type = response['cluster_type']
                    storage_type = response['storage_type']
                    storage_size = response['storage_size']
                    # Depending on the cluster type, set null values vs.
                    # form defaults
                    if cluster_type == 'None':
                        cluster_type = None
                        storage_type = None
                        storage_size = None
                    elif response['storage_type'] == 'transient':
                        storage_size = None
                    u = models.Usage(cloud_name=response["cloud_name"],
                                     cloud_type=response["cloud_type"],
                                     image_id=response['image_id'],
                                     instance_type=response['instance_type'],
                                     cluster_type=cluster_type,
                                     storage_type=storage_type,
                                     storage_size=storage_size,
                                     user_id=response["access_key"],
                                     email=response.get(
                                         'institutional_email', ''))
                    u.save()
                except Exception, e:
                    log.debug("Trouble saving Usage data: {0}".format(e))
        else:
            starting_text_list = [
                'Starting an instance... please wait', 'Really starting!',
                'Still starting.', 'Hopefully done soon!'
            ]
            st = starting_text_list[randint(0, len(starting_text_list) - 1)]
            r['starting_text'] = st