Beispiel #1
0
def update_clusters(request):
    """
    Given a task ID as part of the ``request`` (as ``request_id``), check on the
    status of a job retrieving clusters' persistent data. Return a JSON with the
    following fields:
        ``task_id``: return the job request ID
        ``ready``: ``True`` if the job has completed; ``False`` otherwise
        ``clusters_list``: a list of clusters' persistent data (if the job
            has completed) or an empty list otherwise
    """
    task_id = request.POST.get('task_id', '')
    result = AsyncResult(task_id)
    fetching_data_text_list = ['Fetching data... please wait', 'Fetching data...',
                               'Still fetching data...', 'Hopefully done soon!']
    fdt = fetching_data_text_list[randint(0, len(fetching_data_text_list) - 1)]
    r = {'task_id': task_id,
         'ready': result.ready(),
         'clusters_list': [],
         'wait_text': fdt,
         'error': None}
    if task_id == 'missing_form_data':  # See `fetch_clusters` method
        r['error'] = "Missing form data. Please supply the data and try again."
    elif result.ready():
        clusters_pd = result.get()
        r['clusters_list'] = clusters_pd.get('clusters', [])
        if clusters_pd.get('error', None):
            r['error'] = clusters_pd['error']
    return HttpResponse(simplejson.dumps(r), mimetype="application/json")
Beispiel #2
0
 def get(self,request,uuid):
     tk = AsyncResult(uuid)
     if tk.ready():
         res = 'Task result is:%s' % str(tk.get())
     else:
         res = 'Task not finished!'
     return Response(res, status=status.HTTP_200_OK)
Beispiel #3
0
def cleanup_tasks():
    in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
    for tracker in in_progress:
        result = AsyncResult(tracker.task_id)

        if result.ready():
            logging.info("in progress tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')

    # Maintain constant size of the finished tasks list:
    removed = 1000
    while removed > 0:
        removed = QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)

    waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
    for tracker in waiting:
        if tracker is None:
            continue

        result = AsyncResult(tracker.task_id)

        if result.ready():
            logging.info("waiting tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')
Beispiel #4
0
 def get_context_data(self, **kwargs):
     context = super(TaskResultView, self).get_context_data(**kwargs)
     task_id = kwargs['task_id']
     async_res = AsyncResult(task_id)
     context['result'] = async_res.get()
     context['task_id'] = task_id
     return context
    def getTaskResult(self, task_id):
        async_result = AsyncResult(task_id)
        can_forget = async_result.ready()

        try:
            try:
                result = async_result.result
                if isinstance(result, Exception):
                    result_cls_name = result.__class__.__name__
                    try:
                        errno = ERRNO_NS[result_cls_name]
                    except KeyError:
                        LOGGER.error('Undefined errno: %s', result_cls_name)
                        raise spec.ServerError()

                    value = [errno, result.message]
                else:
                    value = result

            except Exception as exc:
                LOGGER.exception(exc)
                raise spec.ServerError()

            status = getattr(spec.ResultStatus, async_result.status)
            return spec.AsyncResult(status=status, value=json.dumps(value))

        finally:
            if can_forget:
                async_result.forget()
                LOGGER.info('Forgot the result of task %s', task_id)
Beispiel #6
0
def analytics_status(request, graph_slug):
    analytics_results = dict()
    analytics_request = request.GET.get("analytics_request")
    analytics_executing = json.loads(analytics_request)
    if request.is_ajax() and analytics_executing is not None:
        for task_id in analytics_executing:
            task = AsyncResult(task_id)
            # We need to take into account if the task has been revoked
            if task.ready() and task.status != REVOKED:
                # Sometimes, the execution is faster than the revoked flag. So
                # we need to control that (ValueError exception)
                try:
                    analytic = Analytic.objects.filter(dump__graph__slug=graph_slug, task_id=task_id).latest()
                    analytics_results[task_id] = [
                        STATUS_OK,
                        analytic.results.url,
                        analytic.id,
                        analytic.task_start,
                        analytic.algorithm,
                        analytic.values.url,
                    ]
                except ValueError:
                    analytics_results[task_id] = [REVOKED]
            elif task.status == REVOKED:
                analytics_results[task_id] = [REVOKED]
    data = analytics_results
    json_data = json.dumps(data)
    return HttpResponse(json_data, content_type="application/json")
Beispiel #7
0
def instancestate(request):
    """
    Given a POST request with ``task_id`` and ``instance_state`` fields, check if
    the task has completed. If so, return JSON with updated value for the
    ``instance_state`` field and start a new task, appropriately setting the
    value of ``task_id``. If the initial ``task_id`` has not completed, return
    the same value for the ``task_id`` field.
    """
    task_id = request.POST.get('task_id', None)
    instance_state = request.POST.get('instance_state', 'pending')  # Preserve current state
    state = {'task_id': None, 'instance_state': instance_state, 'error': ''}  # Reset info to be sent
    if task_id:
        # If we have a running task, check on instance state
        result = AsyncResult(task_id)
        if result.ready():
            state = result.get()
            state['task_id'] = None  # Reset but make sure it exists
        else:
            # If task not ready, send back the task_id
            state['task_id'] = task_id
    elif 'ec2data' in request.session:
        # We have no task ID, so start a task to get instance state
        form = request.session["ec2data"]
        cloud = form.get('cloud', None)
        a_key = form.get("access_key", None)
        s_key = form.get("secret_key", None)
        instance_id = form.get("instance_id", None)
        if not instance_id:
            state['error'] = "Missing instance ID, cannot check the state."
        r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id)
        state['task_id'] = r.id
    else:
        state = {'instance_state': 'Not available'}
    return HttpResponse(simplejson.dumps(state), mimetype="application/json")
Beispiel #8
0
 def test_forget(self):
     tb = DatabaseBackend(backend="memory://")
     tid = uuid()
     tb.mark_as_done(tid, {"foo": "bar"})
     x = AsyncResult(tid)
     x.forget()
     self.assertIsNone(x.result)
Beispiel #9
0
def task_result(request):
    post = json.loads(request.body)
    task_id = post.get("task_id")
    result = AsyncResult(task_id)
    if not result.ready():
        return json_response(None)
    return json_response(result.get(timeout=1))
Beispiel #10
0
def cancel(event):
    """
    This is bound to the "jobs.cancel" event, and will be triggered any time
    a job is canceled. This handler will process any job that has the
    handler field set to "worker_handler".
    """
    job = event.info
    if job['handler'] in ['worker_handler', 'celery_handler']:
        # Stop event propagation and prevent default, we are using a custom state
        event.stopPropagation().preventDefault()

        celeryTaskId = job.get('celeryTaskId')

        if celeryTaskId is None:
            msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id."
                   % job['_id'])
            logger.warn(msg)
            return

        if job['status'] not in [CustomJobStatus.CANCELING, JobStatus.CANCELED,
                                 JobStatus.SUCCESS, JobStatus.ERROR]:
            # Set the job status to canceling
            ModelImporter.model('job', 'jobs').updateJob(job, status=CustomJobStatus.CANCELING)

            # Send the revoke request.
            asyncResult = AsyncResult(celeryTaskId, app=getCeleryApp())
            asyncResult.revoke()
Beispiel #11
0
 def test_forget(self):
     tb = CacheBackend(backend="memory://")
     tid = gen_unique_id()
     tb.mark_as_done(tid, {"foo": "bar"})
     x = AsyncResult(tid, backend=tb)
     x.forget()
     self.assertIsNone(x.result)
Beispiel #12
0
    def POST(self):
        x = web.input(master_id='', project_id='', task_id='')
        project = models.Project.get(id=x.project_id)
        if not project:
            raise web.notfound()

        master = models.Master.get(id=x.master_id)
        if not master:
            return web.notfound()

        if x.task_id:
            from celery.result import AsyncResult
            from metapolator.config import celery
            res = AsyncResult(x.task_id, backend=celery.backend)

            if res.ready():
                master.task_completed = True
                web.ctx.orm.commit()
                return ujson.dumps({'done': True})
            else:
                master.task_updated = datetime.datetime.now()
                web.ctx.orm.commit()
                return ujson.dumps({'done': False, 'task_id': x.task_id})

        master.task_completed = True
        web.ctx.orm.commit()
        return ujson.dumps({'done': True})
Beispiel #13
0
    def check(self, request, *args, **kwargs):

        try:
            task_id = request.data['id']
            uuid.UUID(task_id)
        except KeyError:
            raise ValidationError("You should specify id")
        except ValueError:
            raise ValidationError("Not valid id")

        async_result = AsyncResult(task_id)

        try:
            result = async_result.get(timeout=0.5, propagate=False)
        except TimeoutError:
            result = None
        status = async_result.status

        if isinstance(result, Exception):
            return Response({
                'status': status,
                'error': str(result),
            }, status=HTTP_200_OK)
        elif result is None:
            return Response({
                'status': status
            }, status=HTTP_204_NO_CONTENT)
        else:
            return Response({
                'status': status,
                'result': result,
            }, status=HTTP_200_OK)
Beispiel #14
0
 def get(self, slug):
     print slug
     res = AsyncResult(slug)
     if res.ready():
         self.render("results.html", username=self.current_user.replace('\"',''), jobid=slug)
     else:
         self.render("results.html", username=self.current_user.replace('\"',''), jobid='00')
Beispiel #15
0
def task_list(request):
    from celery.result import AsyncResult

    runs = CeleryRun.objects.filter(hide=False)
    if runs:
        header = get_header(runs)
        header.append('started')
        header.append('created')
        header.remove('hide')
    else:
        header = []

    #assert False, header
    druns = runs.values()

    data = []
    for drun in druns:
        name = drun['name']

        result = AsyncResult(name)
        drun['started'] = result.ready()
        #assert False,

        data.append([drun[h] for h in header])

    return render_to_response('celery_task_list.html', {
        'data': data,
        'header': header,
        'title': 'tasks',
    })
def verify_task(task_id):
    result = AsyncResult(task_id)
    if result.read():
        # do_something_with(result.get())
        pass
    else:
        verify_task.retry(countdown=1)
Beispiel #17
0
 def state(self):
     result = AsyncResult(self.task_id)
     if not result.ready():
         return result.state
     if result.get() == 'ERROR':
         return 'FAILED'
     return result.state
Beispiel #18
0
    def repo_check(self, repo_name):
        c.repo = repo_name
        task_id = request.GET.get('task_id')

        if task_id and task_id not in ['None']:
            from kallithea import CELERY_ON
            from celery.result import AsyncResult
            if CELERY_ON:
                task = AsyncResult(task_id)
                if task.failed():
                    raise HTTPInternalServerError(task.traceback)

        repo = Repository.get_by_repo_name(repo_name)
        if repo and repo.repo_state == Repository.STATE_CREATED:
            if repo.clone_uri:
                clone_uri = repo.clone_uri_hidden
                h.flash(_('Created repository %s from %s')
                        % (repo.repo_name, clone_uri), category='success')
            else:
                repo_url = h.link_to(repo.repo_name,
                                     h.url('summary_home',
                                           repo_name=repo.repo_name))
                fork = repo.fork
                if fork:
                    fork_name = fork.repo_name
                    h.flash(h.literal(_('Forked repository %s as %s')
                            % (fork_name, repo_url)), category='success')
                else:
                    h.flash(h.literal(_('Created repository %s') % repo_url),
                            category='success')
            return {'result': True}
        return {'result': False}
Beispiel #19
0
 def async_results(result_id):
     async_result = AsyncResult(result_id)
     if not async_result.ready():
         raise DSProcessUnfinished("Result with id {} is not ready.".format(result_id))
     if async_result.status != TaskStates.SUCCESS:
         raise DSProcessError("An error occurred during background processing.")
     return async_result.result
Beispiel #20
0
def get_async_result(id, backend="celery"):
    if backend == 'celery':
        res = AsyncResult(id)
        if res.ready():
            return res.get()

    raise ValueError("no result")
Beispiel #21
0
def taskReady(jobObj, redirect="error"):
    """Checks if celery task is ready.

    Args:
        celeryID: the id of a celery task.
        redirect: page to redirect to on error.
    Returns:
        True,None: celery task finished successfully.
        False, HttpResponseRedirect: celerytask failed.
        False,False: celery task is still processing.

    """

    task = AsyncResult(jobObj.celeryUID)
    if task.ready():
        if task.successful():
            return True, None
        else:
            return False, HttpResponseRedirect(reverse(redirect))
    # In the case the task isn't ready, we don't really have a clear idea why.
    # It is possible the task has left the celery task table, and there for is marked as Pending,
    # but it is really done.  We store an alternate flag in our own tables to manage this.
    if jobObj.state == symTyperTask.DONE:
        return True, None
    elif jobObj.state == symTyperTask.ERROR :
        return False, HttpResponseRedirect(reverse(redirect))
    else:
        return False, None
def check_status(task_id):
    task = AsyncResult(task_id, app=tasks.celery)
    if task.ready():
        if task.successful():  # Task finished successfully
            status, result_value = task.result
            if status:  # Valid result
                app.logger.info("result: %s", pprint.pformat(result_value))
                return flask.jsonify(result_value)
            else:  # Handled exception
                response = flask.make_response(result_value, 400)
                response.mimetype = 'text/plain'
                return response
        else:  # Unhandled exception
            exc = task.result
            response = flask.make_response(
                traceback.format_exception_only(type(exc), exc),
                500)
            response.mimetype = 'text/plain'
            return response
    else:  # if task.ready()
        status = {
            'result_id': task.id,
            'state': 'PENDING',
            'progress': 0,
        }
        if task.state == 'PROGRESS':
            status.update(task.result)
        response = flask.jsonify(status)
        response.status_code = 202
        return response
Beispiel #23
0
 def get(self, request, uuid):
     res = AsyncResult(uuid)                 # Reconstruct a AsyncResult object with task id.
     if res.ready():                         # Judge whether the task is finished,if result is ready,get it with get() method.
         task_res = res.get()                # Get task result.
     else:
         task_res = "Task not finished!"
     return Response(task_res, status=status.HTTP_200_OK)
Beispiel #24
0
def instancestate(request):
    """
    Give a POST request with ``task_id`` and ``instance_state`` fields, return
    JSON with updated value (given the task has completed or the same as provided)
    value for the ``instance_state`` field and the same value for the ``task_id``.
    ``task_id`` is to correspond to the ID of the background task.
    If instance state is not available, return ``Not available`` as the value
    for ``instance_state``.
    """
    task_id = request.POST.get('task_id', None)
    instance_state = request.POST.get('instance_state', 'pending')  # Preserve current state
    state = {'task_id': None, 'instance_state': instance_state, 'error': ''}  # Reset info to be sent
    if task_id:
        # If we have a running task, check on instance state
        result = AsyncResult(task_id)
        if result.ready():
            state = result.get()
            state['task_id'] = None  # Reset but make sure it exists
        else:
            # If task not ready, send back the task_id
            state['task_id'] = task_id
    elif 'ec2data' in request.session:
        # We have no task ID, so start a task to get instance state
        form = request.session["ec2data"]
        cloud = form['cloud']
        a_key = form["access_key"]
        s_key = form["secret_key"]
        instance_id = form["instance_id"]
        r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id)
        state['task_id'] = r.id
    else:
        state = {'instance_state': 'Not available'}
    return HttpResponse(simplejson.dumps(state), mimetype="application/json")
Beispiel #25
0
def update_clusters(request):
    """
    Given a task ID as part of the ``request`` (as ``request_id``), check on the
    status of a job retrieving clusters' persistent data. Return a JSON with the
    following fields:
        ``task_id``: return the job request ID
        ``ready``: ``True`` if the job has completed; ``False`` otherwise
        ``clusters_list``: a list of clusters' persistent data (if the job
            has completed) or an empty list otherwise
    """
    task_id = request.POST.get("task_id", "")
    result = AsyncResult(task_id)
    fetching_data_text_list = [
        "Fetching data... please wait",
        "Fetching data...",
        "Still fetching data...",
        "Hopefully done soon!",
    ]
    fdt = fetching_data_text_list[randint(0, len(fetching_data_text_list) - 1)]
    r = {"task_id": task_id, "ready": result.ready(), "clusters_list": [], "wait_text": fdt, "error": None}
    if result.ready():
        clusters_pd = result.get()
        r["clusters_list"] = clusters_pd.get("clusters", [])
        if clusters_pd.get("error", None):
            r["error"] = clusters_pd["error"]
    return HttpResponse(simplejson.dumps(r), mimetype="application/json")
 def __call__(self):
     result = AsyncResult(self.request.form['task_id'])
     self.request.response.setHeader('Content-Type', 'text/plain')
     if result.ready():
         return 'Done! Result is %s' % result.result
     else:
         return 'Still in progress, retry later!'
Beispiel #27
0
def cleanup_tasks():
    in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
    for tracker in in_progress:
        result = AsyncResult(tracker.task_id)

        # If the AsyncResult status is PENDING it means there is no celery task object for this tracker, and we can
        # mark it as "dead":
        if result.status == 'PENDING':
            logging.info("In progress tracker for %s is no longer enqueued, cancelling (task: %s).",
                         tracker.query_hash, tracker.task_id)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='cancelled')

        if result.ready():
            logging.info("in progress tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')

    waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
    for tracker in waiting:
        result = AsyncResult(tracker.task_id)

        if result.ready():
            logging.info("waiting tracker %s finished", tracker.query_hash)
            _unlock(tracker.query_hash, tracker.data_source_id)
            tracker.update(state='finished')

    # Maintain constant size of the finished tasks list:
    QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
Beispiel #28
0
def retrieve_mbs_result(target_task_id):
    logger = Task.get_logger()
    r = AsyncResult(target_task_id)
    sr = SimulationResult.objects.get(task_id__exact=target_task_id)
#    sr = SimulationResult.objects.get(sim_id__exact=r['sim_id'])
    logger.info(r)

    while not r.ready():
        time.sleep(0.1)

    result = json.loads(r.result)
    
    if result['exit_code'] == 0:
        ## success
        sr.sim_id = result['sim_id']
        
        ## these are rewrite if you add log collections
        sr.collections = json.dumps([
                "%s_nwk" % sr.sim_id,
                "%s_node" % sr.sim_id,
                "%s_msg" % sr.sim_id,
                "%s_usr" % sr.sim_id,
                "%s_map" % sr.sim_id,
                ])
        sr.task_progress = 100
        sr.task_status = "SUCCESS"
        sr.save()
    else:
        sr.sim_id = "NO SIM_ID (FAILED)"
        sr.task_status = "FAILED"
        sr.task_progress = 0
        sr.save()
Beispiel #29
0
    def get(self, request, *args, **kwargs):
        task_id = kwargs.pop('task_id')

        async_result = AsyncResult(task_id)

        try:
            # Poll the task to see if the result is available
            result = async_result.get(timeout=20)
        except TimeoutError:
            result = None

        task_status = TaskStatus.objects.get(task_id=task_id)

        # Strip parentheses and get the name of the function to call
        task_name = re.sub(r'\([^)]*\)', '', task_status.signature)

        if task_name in request.session['tasks']:
            del(request.session['tasks'][task_name])
            request.session.modified = True

        status = task_status.status

        # Setup the response
        response = {'task_id': task_id, 'task_status': status, 'task_name': task_name, 'task_result': result}

        return HttpResponse(json.dumps(response), content_type='application/json')
Beispiel #30
0
def instancestate(request):
    task_id = request.POST.get("task_id", None)
    instance_state = request.POST.get("instance_state", "pending")  # Preserve current state
    state = {"task_id": None, "instance_state": instance_state}  # Reset info to be sent
    if task_id:
        # If we have a running task, check on instance state
        result = AsyncResult(task_id)
        if result.ready():
            state = result.get()
            state["task_id"] = None  # Reset but make sure it exists
        else:
            # If task not ready, send back the task_id
            state["task_id"] = task_id
    elif "ec2data" in request.session:
        # We have no task ID, so start a task to get instance state
        form = request.session["ec2data"]
        cloud = form["cloud"]
        a_key = form["access_key"]
        s_key = form["secret_key"]
        instance_id = form["instance_id"]
        r = tasks.instance_state.delay(cloud, a_key, s_key, instance_id)
        state["task_id"] = r.id
    else:
        state = {"instance_state": "Not available"}
    return HttpResponse(simplejson.dumps(state), mimetype="application/json")
Beispiel #31
0
 def test_reduce_direct(self):
     x = AsyncResult('1', app=self.app)
     fun, args = x.__reduce__()
     self.assertEqual(fun(*args), x)
Beispiel #32
0
def get_task_status(strategy_id):
    return AsyncResult(find_task(strategy_id)).status 
Beispiel #33
0
 def stop(self, message=None):
     if self.is_running:
         task = AsyncResult(self.celery_task_id)
         task.revoke(terminate=True, signal='SIGKILL')
     self.on_stop(message=message)
Beispiel #34
0
 def test_without_id(self):
     with self.assertRaises(ValueError):
         AsyncResult(None, app=self.app)
Beispiel #35
0
class QueryTask(object):
    # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
    STATUSES = {
        'PENDING': 1,
        'STARTED': 2,
        'SUCCESS': 3,
        'FAILURE': 4,
        'REVOKED': 4
    }

    def __init__(self, job_id=None, async_result=None):
        if async_result:
            self._async_result = async_result
        else:
            self._async_result = AsyncResult(job_id, app=celery)

    @property
    def id(self):
        return self._async_result.id

    def to_dict(self):
        if self._async_result.status == 'STARTED':
            updated_at = self._async_result.result.get('start_time', 0)
        else:
            updated_at = 0

        status = self.STATUSES[self._async_result.status]

        if isinstance(self._async_result.result, Exception):
            error = self._async_result.result.message
            status = 4
        elif self._async_result.status == 'REVOKED':
            error = 'Query execution cancelled.'
        else:
            error = ''

        if self._async_result.successful() and not error:
            query_result_id = self._async_result.result
        else:
            query_result_id = None

        return {
            'id': self._async_result.id,
            'updated_at': updated_at,
            'status': status,
            'error': error,
            'query_result_id': query_result_id,
        }

    @property
    def is_cancelled(self):
        return self._async_result.status == 'REVOKED'

    @property
    def celery_status(self):
        return self._async_result.status

    def ready(self):
        return self._async_result.ready()

    def cancel(self):
        return self._async_result.revoke(terminate=True, signal='SIGINT')
Beispiel #36
0
def task_processing(filename):
    """Process the image endpoint."""
    task = processing.apply_async((filename, ))
    async_result = AsyncResult(id=task.task_id, app=celery)
    processing_result = async_result.get()
    return render_template('result.html', video_name=processing_result)
Beispiel #37
0
def _get_async_result(task_id):
    """
    Use this minor indirection to facilitate mocking the AsyncResult in tests.
    """
    return AsyncResult(task_id)
Beispiel #38
0
def edit_upload(request, upload_id):
    qobuz_upload = QobuzUpload.objects.get(id=upload_id)
    if request.method == 'POST':
        form = EditUploadForm(request.POST, qobuz_upload=qobuz_upload)
        if form.is_valid():
            qobuz_upload.artists = form.cleaned_data['artists']
            qobuz_upload.album_name = form.cleaned_data['album_name']
            tracks = []
            for i, track in enumerate(qobuz_upload.track_data):
                qobuz_track = qobuz_upload.album_data['tracks']['items'][i]
                title_field = 'track_{0}_{1}_title'.format(
                    qobuz_track['media_number'], i + 1)
                artists_field = 'track_{0}_{1}_artists'.format(
                    qobuz_track['media_number'], i + 1)
                tracks.append({
                    'media_number': qobuz_track['media_number'],
                    'artists': form.cleaned_data[artists_field],
                    'title': form.cleaned_data[title_field],
                })
            qobuz_upload.track_data_json = ujson.dumps(tracks)
            qobuz_upload.save()
    else:
        form = EditUploadForm(qobuz_upload=qobuz_upload)
    download_error = ''
    if qobuz_upload.download_task_id:
        async_result = AsyncResult(qobuz_upload.download_task_id)
        if async_result.state == states.PENDING:
            download_status = 'Waiting to start'
        elif async_result.state == states.STARTED:
            download_status = 'Started'
        elif async_result.state == states.SUCCESS:
            download_status = 'Completed'
        elif async_result.state == states.FAILURE:
            download_status = 'Failed'
            download_error = '{0}: {1}'.format(
                type(async_result.result).__name__,
                async_result.result.message)
        else:
            download_status = 'Unknown Status'
    else:
        download_status = 'not_started'
    try:
        spectral_files = sorted(os.listdir(wm_str(
            qobuz_upload.spectrals_path)))
    except OSError:
        spectral_files = []
    try:
        cover_files = sorted([
            f for f in os.listdir(wm_str(qobuz_upload.temp_media_path))
            if f.endswith('.jpg')
        ],
                             reverse=True)
    except OSError:
        cover_files = []
    data = {
        'upload': qobuz_upload,
        'form': form,
        'download_status': download_status,
        'download_error': download_error,
        'spectral_files': spectral_files,
        'cover_files': cover_files,
    }
    return render(request, 'qobuz/edit_upload.html', data)
Beispiel #39
0
 def __init__(self,task_id):
     self.task_id=task_id
     self.task_object = AsyncResult(self.task_id)
Beispiel #40
0
def get_job_status(job_id):
    res = AsyncResult(job_id, backend=celery_object.backend)
    return jsonify({"status": res.ready()})
Beispiel #41
0
 def setUp(self):
     self.ts = TaskSetResult(
         gen_unique_id(),
         [AsyncResult(gen_unique_id()),
          AsyncResult(gen_unique_id())])
Beispiel #42
0
 def setUp(self):
     self.task = AsyncResult(gen_unique_id())
Beispiel #43
0
def make_mock_taskset(size=10):
    tasks = [mock_task("ts%d" % i, states.SUCCESS, i) for i in xrange(size)]
    [save_result(task) for task in tasks]
    return [AsyncResult(task["id"]) for task in tasks]
Beispiel #44
0
    def test_successful(self):
        ok_res = AsyncResult(self.task1["id"])
        nok_res = AsyncResult(self.task3["id"])
        nok_res2 = AsyncResult(self.task4["id"])

        self.assertTrue(ok_res.successful())
        self.assertFalse(nok_res.successful())
        self.assertFalse(nok_res2.successful())

        pending_res = AsyncResult(gen_unique_id())
        self.assertFalse(pending_res.successful())
Beispiel #45
0
 def test_get_timeout_longer(self):
     res = AsyncResult(self.task4["id"])  # has RETRY status
     self.assertRaises(TimeoutError, res.get, timeout=1)
Beispiel #46
0
 def test_reduce_direct(self):
     x = AsyncResult('1', app=self.app)
     fun, args = x.__reduce__()
     assert fun(*args) == x
Beispiel #47
0
    def test_get_timeout(self):
        res = AsyncResult(self.task4["id"])  # has RETRY status
        self.assertRaises(TimeoutError, res.get, timeout=0.1)

        pending_res = AsyncResult(gen_unique_id())
        self.assertRaises(TimeoutError, pending_res.get, timeout=0.1)
Beispiel #48
0
 def test_join_timeout(self):
     ar = MockAsyncResultSuccess(gen_unique_id())
     ar2 = MockAsyncResultSuccess(gen_unique_id())
     ar3 = AsyncResult(gen_unique_id())
     ts = TaskSetResult(gen_unique_id(), [ar, ar2, ar3])
     self.assertRaises(TimeoutError, ts.join, timeout=0.0000001)
Beispiel #49
0
def wait_task(task_id):
    task = AsyncResult(task_id)
    return task.get(timeout=10)
Beispiel #50
0
class test_RedisBackend_chords_complex(basetest_RedisBackend):
    @pytest.fixture(scope="function", autouse=True)
    def complex_header_result(self):
        with patch("celery.result.GroupResult.restore") as p:
            yield p

    @pytest.mark.parametrize(
        ['results', 'assert_save_called'],
        [
            # No results in the header at all - won't call `save()`
            (tuple(), False),
            # Simple results in the header - won't call `save()`
            ((AsyncResult("foo"), ), False),
            # Many simple results in the header - won't call `save()`
            ((AsyncResult("foo"), ) * 42, False),
            # A single complex result in the header - will call `save()`
            ((GroupResult("foo", []), ), True),
            # Many complex results in the header - will call `save()`
            ((GroupResult("foo"), ) * 42, True),
            # Mixed simple and complex results in the header - will call `save()`
            (itertools.islice(
                itertools.cycle((
                    AsyncResult("foo"),
                    GroupResult("foo"),
                )),
                42,
            ), True),
        ])
    def test_apply_chord_complex_header(self, results, assert_save_called):
        mock_group_result = Mock()
        mock_group_result.return_value.results = results
        self.app.GroupResult = mock_group_result
        header_result_args = ("gid11", results)
        self.b.apply_chord(header_result_args, None)
        if assert_save_called:
            mock_group_result.return_value.save.assert_called_once_with(
                backend=self.b)
        else:
            mock_group_result.return_value.save.assert_not_called()

    def test_on_chord_part_return_timeout(self, complex_header_result):
        tasks = [self.create_task(i) for i in range(10)]
        random.shuffle(tasks)
        try:
            self.app.conf.result_chord_join_timeout += 1.0
            for task, result_val in zip(tasks, itertools.cycle((42, ))):
                self.b.on_chord_part_return(
                    task.request,
                    states.SUCCESS,
                    result_val,
                )
        finally:
            self.app.conf.result_chord_join_timeout -= 1.0

        join_func = complex_header_result.return_value.join_native
        join_func.assert_called_once_with(timeout=4.0, propagate=True)

    @pytest.mark.parametrize("supports_native_join", (True, False))
    def test_on_chord_part_return(
        self,
        complex_header_result,
        supports_native_join,
    ):
        mock_result_obj = complex_header_result.return_value
        mock_result_obj.supports_native_join = supports_native_join

        tasks = [self.create_task(i) for i in range(10)]
        random.shuffle(tasks)

        with self.chord_context(10) as (tasks, request, callback):
            for task, result_val in zip(tasks, itertools.cycle((42, ))):
                self.b.on_chord_part_return(
                    task.request,
                    states.SUCCESS,
                    result_val,
                )
                # Confirm that `zadd` was called even though we won't end up
                # using the data pushed into the sorted set
                assert self.b.client.zadd.call_count == 1
                self.b.client.zadd.reset_mock()
        # Confirm that neither `zrange` not `lrange` were called
        self.b.client.zrange.assert_not_called()
        self.b.client.lrange.assert_not_called()
        # Confirm that the `GroupResult.restore` mock was called
        complex_header_result.assert_called_once_with(request.group)
        # Confirm the the callback was called with the `join()`ed group result
        if supports_native_join:
            expected_join = mock_result_obj.join_native
        else:
            expected_join = mock_result_obj.join
        callback.delay.assert_called_once_with(expected_join())
Beispiel #51
0
def get_task(task_id):
    return AsyncResult(task_id)
Beispiel #52
0
 def test_without_id(self):
     with pytest.raises(ValueError):
         AsyncResult(None, app=self.app)
Beispiel #53
0
 def test_execute_ignore_result(self):
     task_id = uuid()
     ret = jail(task_id, MyTaskIgnoreResult.name, [4], {})
     self.assertEqual(ret, 256)
     self.assertFalse(AsyncResult(task_id).ready())
Beispiel #54
0
def test_celery_curl():
    if request.method == 'GET':
        task = example_celery_method.delay()
        async_result = AsyncResult(id=task.task_id, app=celery)
        processing_result = async_result.get()
        return str(processing_result)
Beispiel #55
0
def get_payload(ts_id):
    '''Goes through the task_ids for an analysis and determines the status
    of each task (pre-processing, processing and cleanup)

    '''
    #print "get_payload \n"
    
    payload = []
    ts = AsyncResult( ts_id )
    if ts:
        if ts.result:
            if type(ts.result) ==type(dict()):
                if type(ts.result['message']) ==type(dict()):
                    temp_ret = ts.result['message']
                else:
                    temp_ret = ts.result
                temp_ret['state'] = ts.state
                temp_ret['task_id'] = ts.task_id
                payload.append(temp_ret)
            elif (ts.result.__class__.__name__ == 'TaskSetResult'):
                n_tasks = len(ts.result.results)
                if n_tasks > 0:
                    for j in range(0,n_tasks):
                        temp_ret = {};
                        if ts.result.results[j].result:
                            if ts.result.results[j].result.__class__.__name__ == 'FileStoreItem':
                                temp_ret['state'] = ts.result.results[j].state
                                temp_ret['task_id'] = ts.result.results[j].task_id
                                payload.append(temp_ret)
                            else:
                                # if result returns as dictionary
                                if type(ts.result.results[j].result) ==type(dict()):
                                    temp_ret = ts.result.results[j].result
                                    
                                # if result is just a string
                                else: 
                                    #print "-------------"
                                    #print "PAYLOAD TEST"
                                    #print "j"
                                    #print j
                                    #print "results all"
                                    #print ts.result.results[j]
                                    #print "result"
                                    #print ts.result.results[j].result
                                    #print "state"
                                    #print ts.result.results[j].state
                                    
                                    temp_ret = {}
                                    
                                temp_ret['state'] = str(ts.result.results[j].state)
                                temp_ret['task_id'] = str(ts.result.results[j].task_id)
                                payload.append(temp_ret)
                                
                        else:
                            temp_ret['state'] = ts.result.results[j].state
                            temp_ret['task_id'] = ts.result.results[j].task_id
                            payload.append(temp_ret)
                #else:
                #    print "00000 tasks"
                #    print ts.result
            else:
                temp_ret = {'state':ts.state, 'info':str(ts.result), 'task_id':ts.task_id}
                payload.append(temp_ret)
        else:
            temp_ret = {'state':ts.state, 'task_id':ts.task_id}
            payload.append(temp_ret)
    else:
        temp_ret = {'state':"### WAITING ###"}
        payload.append(temp_ret)
    
    #print "payload called"
    #print "################################"
    #print payload 
    #print len(payload)
    #print "################################"
            
    return payload
Beispiel #56
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 开发人员   :Davis Niu
# 开发时间   :6/30/2020  11:25 PM
# 文件名称   :result.py
from celery.result import AsyncResult
from celery_task import cel

async_result = AsyncResult(id='8628904d-e02a-473f-966e-9eca4286ba4d', app=cel)

if async_result.successful():
    result = async_result.get()
    print(result)
elif async_result.failed():
    print('failed')
elif async_result.status == 'PENDING':
    print('PENDING')
else:
    print('other')
Beispiel #57
0
 def __init__(self, job_id=None, async_result=None):
     if async_result:
         self._async_result = async_result
     else:
         self._async_result = AsyncResult(job_id, app=celery)
Beispiel #58
0
# ------------------------------
# @Time    : 2020/5/5
# @Author  : gao
# @File    : result.py
# @Project : AmazingQuant
# ------------------------------
import time
from celery.result import AsyncResult
from test.celery.run import app

from test.celery.celery_app_task import add
result = add.delay(1, 1)
print(result.id)
a = 3
async1 = AsyncResult(id=result.id, app=app)

while True:
    if async1.successful():
        result = async1.get()
        print(result)
        print('执行完成', time.time())
        # async1.forget() # 将结果删除
        break
    elif async1.failed():
        print('执行失败')
    elif async1.status == 'PENDING':
        print('任务等待中被执行')
    elif async1.status == 'RETRY':
        print('任务异常后正在重试')
    elif async1.status == 'STARTED':
Beispiel #59
0
class QueryTask(object):
    MAX_RETRIES = 5

    # TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
    STATUSES = {
        'PENDING': 1,
        'STARTED': 2,
        'SUCCESS': 3,
        'FAILURE': 4,
        'REVOKED': 4
    }

    def __init__(self, job_id=None, async_result=None):
        if async_result:
            self._async_result = async_result
        else:
            self._async_result = AsyncResult(job_id, app=celery)

    @property
    def id(self):
        return self._async_result.id

    @classmethod
    def add_task(cls, query, data_source, scheduled=False, metadata={}):
        query_hash = gen_query_hash(query)
        logging.info("[Manager][%s] Inserting job", query_hash)
        logging.info("[Manager] Metadata: [%s]", metadata)
        try_count = 0
        job = None
        
        while try_count < cls.MAX_RETRIES:
            try_count += 1

            pipe = redis_connection.pipeline()
            try:
                pipe.watch(cls._job_lock_id(query_hash, data_source.id))
                job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id))
                if job_id:
                    logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id)

                    job = cls(job_id=job_id)
                    if job.ready():
                        logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
                        redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id))
                        job = None

                if not job:
                    pipe.multi()

                    if scheduled:
                        queue_name = data_source.scheduled_queue_name
                    else:
                        queue_name = data_source.queue_name

                    result = execute_query.apply_async(args=(query, data_source.id, metadata), queue=queue_name)
                    job = cls(async_result=result)
                    
                    logging.info("[Manager][%s] Created new job: %s", query_hash, job.id)
                    pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
                    pipe.execute()
                break

            except redis.WatchError:
                continue

        if not job:
            logging.error("[Manager][%s] Failed adding job for query.", query_hash)

        return job

    def to_dict(self):
        if self._async_result.status == 'STARTED':
            updated_at = self._async_result.result.get('start_time', 0)
        else:
            updated_at = 0

        if self._async_result.failed() and isinstance(self._async_result.result, Exception):
            error = self._async_result.result.message
        elif self._async_result.status == 'REVOKED':
            error = 'Query execution cancelled.'
        else:
            error = ''

        if self._async_result.successful():
            query_result_id = self._async_result.result
        else:
            query_result_id = None

        return {
            'id': self._async_result.id,
            'updated_at': updated_at,
            'status': self.STATUSES[self._async_result.status],
            'error': error,
            'query_result_id': query_result_id,
        }

    @property
    def is_cancelled(self):
        return self._async_result.status == 'REVOKED'

    @property
    def celery_status(self):
        return self._async_result.status

    def ready(self):
        return self._async_result.ready()

    def cancel(self):
        return self._async_result.revoke(terminate=True, signal='SIGINT')

    @staticmethod
    def _job_lock_id(query_hash, data_source_id):
        return "query_hash_job:%s:%s" % (data_source_id, query_hash)
    def update_main_task(self, main_task_id: str):
        all_task_info_rows = self.filter(
            Q(id=main_task_id) | Q(main_task_id=main_task_id)) \
            .exclude(name__in=self.EXCLUDE_FROM_TRACKING) \
            .values_list('propagate_exceptions', 'own_status', 'own_progress', 'own_date_done',
                         'run_after_sub_tasks_finished')

        total_status_non_propagating_exceptions = None
        total_status_propagating_exceptions = None
        found_propagating_exceptions = False
        total_progress = 0
        total_done = True
        total_date_done = None
        sub_tasks_finished = True
        has_deferred_tasks = False
        for propagate_exceptions, status, progress, date_done, run_after_sub_tasks_finished in all_task_info_rows:
            if propagate_exceptions:
                found_propagating_exceptions = True
                total_status_propagating_exceptions = status \
                    if total_status_propagating_exceptions is None \
                    else max(total_status_propagating_exceptions,
                             status,
                             key=precedence_propagating_exceptions)
            else:
                total_status_non_propagating_exceptions = status \
                    if total_status_non_propagating_exceptions is None \
                    else max(total_status_non_propagating_exceptions,
                             status,
                             key=precedence_non_propagating_exceptions)

            if not run_after_sub_tasks_finished:
                sub_tasks_finished = sub_tasks_finished and status in READY_STATES
            else:
                has_deferred_tasks = True

            if progress is None:
                progress = 0
            if status in READY_STATES:
                progress = 100

            total_progress += progress

            # if there is at least one date_done = None then we are not done
            if date_done is None:
                total_done = False

            # if we still did not got any date_done = None (meaning we are done)
            # then calc total date done as the max date done we got
            if total_done:
                total_date_done = max(
                    total_date_done,
                    date_done) if total_date_done else date_done
            else:
                total_date_done = None

        total_progress = (
            total_progress /
            len(all_task_info_rows)) if len(all_task_info_rows) else 100

        if total_status_propagating_exceptions in PROPAGATE_STATES:
            # we have error in an important sub-task
            total_status = total_status_propagating_exceptions
        elif found_propagating_exceptions:
            # we don't have errors in important sub-tasks
            # but there were important sub-tasks and we should take their statuses into account
            # (this is mostly to take care of None state in total_status_propagating_exceptions
            # meaning PENDING)
            total_status = max(total_status_propagating_exceptions,
                               total_status_non_propagating_exceptions,
                               key=precedence_non_propagating_exceptions)
        else:
            # we did not see any important sub-tasks and should not take into account
            # None in total_status_propagating_exceptions
            total_status = total_status_non_propagating_exceptions

        if has_deferred_tasks and sub_tasks_finished:
            if total_status not in PROPAGATE_STATES:
                self.run_after_sub_tasks(main_task_id)
            else:
                self \
                    .filter(main_task_id=main_task_id, run_after_sub_tasks_finished=True) \
                    .update(status=total_status, date_done=total_date_done)

        self.filter(id=main_task_id).update(date_done=total_date_done,
                                            status=total_status,
                                            completed=total_progress == 100,
                                            progress=total_progress)
        if total_status in READY_STATES:
            try:
                main_task = self.get(id=main_task_id)  # type: Task
                if total_status == SUCCESS:
                    main_task.write_log(
                        '{0} #{1}: all sub-tasks have been processed successfully'
                        .format(main_task.name, main_task_id))
                else:
                    main_task.write_log(
                        '{0} #{1}: some/all of sub-tasks have been crashed'.
                        format(main_task.name, main_task_id),
                        level='error')
            except:
                import logging
                logging.error(
                    'Was unable to log SUCCESS/FAILURE to task log. Task id: {0}'
                    .format(main_task_id))

        if total_status_propagating_exceptions in PROPAGATE_STATES:
            revoke_task(AsyncResult(main_task_id))