示例#1
0
def remote_fetch_result(task_id, state=False):
    '''

    - Grab the AsyncResult.

    - Returns state or result

    - Usage:

         - remote_fetch_result(task_id='e7fce5d9-ccbd-4d08-ae12-7888e6910215',
         state=True)
    '''
    try:
        if state:
            res = result.AsyncResult(task_id).state
            LOGGER.info("Task %s status %s ", task_id, res)
        else:
            LOGGER.info('RESULT request')
            res = result.AsyncResult(task_id).result
            LOGGER.info("Task %s result: %s ", task_id, str(res))
    except TypeError as type_err:
        LOGGER.error(type_err)
        res = False
    except ConnectionError as conn_e:
        LOGGER.error(conn_e)
        res = False
    if res:
        return res
示例#2
0
def workflow_publish(request):

    openid = request.authenticated_userid
    (server, username) = utils.decompose_openid(openid)

    submission_id = request.matchdict['workflow_id']

    submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()
    if submission is None:
        return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }

    if submission.task_id is not None:
        ar = result.AsyncResult(id=submission.task_id, app=celery_app)
        if ar.state != 'SUCCESS' and ar.state != 'FAILURE':
            return {'status': 'Running', 'message': 'Another task, %s, is still running' % submission.task_name }

    metadata = DBSession.query(SubmissionMetadata).\
            filter(SubmissionMetadata.submission_id==submission_id).\
            filter(SubmissionMetadata.name=='datanode').first()

    ar = publish.delay(openid=openid, datanode=metadata.value, submission_id='%s' % submission.id, path=submission.path)
    submission.task_id = ar.id
    submission.task_name = 'publish'

    return {'status': 'Success', 'message': 'Publish task %s started successfully' % ar.id }
示例#3
0
def workflow_transfer(request):

    openid = request.authenticated_userid
    submission_id = request.matchdict['workflow_id']
    if request.method != 'POST':
        return HttpResponse('Error:')
    data = json.loads(request.body.decode('utf-8'))

    endpoint = data.get('endpoint')
    path = data.get('path')
    access_token = data.get('access_token')

    submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()
    if submission is None:
        return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }

    if submission.task_id is not None:
        ar = result.AsyncResult(id=submission.task_id, app=celery_app)
        if ar.state != 'SUCCESS' and ar.state != 'FAILURE':
            return {'status': 'Running', 'message': 'Another task, %s, is still running' % submission.task_name }

    metadata = DBSession.query(SubmissionMetadata).\
            filter(SubmissionMetadata.submission_id==submission_id).\
            filter(SubmissionMetadata.name=='datanode').first()


    ar = transfer.delay(openid=openid, datanode=metadata.value, submission_id='%s' % submission.id)
    submission.task_id = ar.id
    submission.task_name = 'transfer'
    if path:
        submission.path = path

    return {'status': 'Success', 'message': 'Scan task %s started successfully' % ar.id }
示例#4
0
def delete_job(id):
    jb = Job.objects.get(pk=id)
    assert_django_job(jb.status, "delete")
    if jb.status == "SCHEDULED":
        res = result.AsyncResult(jb.task_id)
        res.revoke()
        ref_count_delete(jb)
    jb.delete()
示例#5
0
    def apply_async(self, args, kwargs, **options):
        args, kw = self.serialize_args(args, kwargs)

        # Let's see if this is a retry. An existing task means yes.
        # If it is one, we'll call _apply_async directly later on.
        task = getattr(self.request, 'task', None)
        task_id = options.get('task_id', None)

        # if task is not None we are in a retry and site_path and
        # authorized_userid are already in kw
        if task is None:
            kw['site_path'] = '/'.join(api.portal.get().getPhysicalPath())
            kw['authorized_userid'] = api.user.get_current().getId()

        without_transaction = options.pop('without_transaction', False)

        celery = getCelery()
        if task_id is None:
            # Here we cheat a little: since we will not start the task
            # up until the transaction is done,
            # we cannot give back to whoever called apply_async
            # its much beloved AsyncResult.
            # But we can actually pass the task a specific task_id
            # (although it's not very documented)
            # and an AsyncResult at this point is just that id, basically.
            task_id = uuid()
        else:
            # If this is a retry, task_id will be in the options.
            # Get rid of it to avoid an error.
            del options['task_id']

        # Construct a fake result
        if celery.conf.task_always_eager:
            result_ = EagerResult(task_id, None, states.PENDING, None)
        else:
            result_ = result.AsyncResult(task_id)

        # Note: one might be tempted to turn this into a datamanager.
        # This would result in two wrong things happening:
        # * A "commit within a commit" triggered by the function runner
        #   when CELERY_TASK_ALWAYS_EAGER is set,
        #   leading to the first invoked commit cleanup failing
        #   because the inner commit already cleaned up.
        # * An async task failing in eager mode would also rollback
        #   the whole transaction, which is not desiderable.
        #   Consider the case where the syncronous code constructs an object
        #   and the async task updates it, if we roll back everything
        #   then also the original content construction goes away
        #   (even if, in and by itself, worked)
        if without_transaction or celery.conf.task_always_eager or task:
            return self._apply_async(args, kw, result_, celery, task_id,
                                     options)
        else:
            queue_task_after_commit(args, kw, self, task_id, options)
            # Return the "fake" result ID
            return result_
示例#6
0
def _compile_vals_list_for_batch(prev_batch_task_id):
    """
    This is used when a task runs as an error callback. When that
    happens, Celery passes the ID of the task that failed as the first
    argument instead of a list of return values for the successful
    children tasks in the chord; so, to proceed, the error callback has
    to fetch the return values manually.
    """
    res = result.AsyncResult(prev_batch_task_id)
    return [c.result for c in res.children[0].children if c.successful()]
示例#7
0
def __get_asyncresult(polling_url=None, task_id=None):
    if polling_url == task_id == None:
        raise ValueError("Either polling_url or task_id must be passed a value")
    if task_id is None:
        task_id = get_taskid_from_url(polling_url)
    from celery import result
    result = result.AsyncResult(task_id)
    if result.task_id == None:
        return None
    return result    
def get_task_status(task_id):
    task = result.AsyncResult(str(task_id))
    if isinstance(task.result, BaseException):
        task_result = str(task.result)
    else:
        task_result = task.result
    return {
        "task_id": str(task.id),
        "ready": task.ready(),
        "status": task.status,
        "result": task_result,
        "error": str(task.traceback)
    }
示例#9
0
def deleteConsultaResults(request):
    if not request.user.is_authenticated:
        return redirect('login')
    request.session['isIndividual'] = 0
    request.session['isResumen'] = 0
    asyncKey = request.session.get('asyncKey', None)
    if (asyncKey and not asyncKey == -404):
        asyncResult = result.AsyncResult(asyncKey)
        if (asyncResult):
            asyncResult.revoke(terminate=True)
            asyncResult.forget()
            request.session['asyncKey'] = -404
    request.session['calculationStatus'] = -1
    request.session['current'] = -1
    request.session.save()
    return redirect('index')
示例#10
0
def workflow_status(request):
    submission_id = request.matchdict['workflow_id']

    submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()
    if submission is None:
        return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }

    if submission.task_id is None:
        return { 'status': 'Error', 'message': 'No task has been submitted yet' }

    ar = result.AsyncResult(id=submission.task_id, app=celery_app)
    if ar.state != 'SUCCESS' and ar.state != 'FAILURE':
        return {'status': 'Running', 'message': 'Task %s is still running' % submission.task_name }

    r = ar.get()
    return r
示例#11
0
    def apply_async(self, args, kwargs, **options):
        args, kw = self.serialize_args(args, kwargs)
        kw['site_path'] = '/'.join(api.portal.get().getPhysicalPath())
        kw['authorized_userid'] = api.user.get_current().getId()

        without_transaction = options.pop('without_transaction', False)

        celery = getCelery()
        # Here we cheat a little: since we will not start the task
        # up until the transaction is done,
        # we cannot give back to whoever called apply_async
        # its much beloved AsyncResult.
        # But we can actually pass the task a specific task_id
        # (although it's not very documented)
        # and an AsyncResult at this point is just that id, basically.
        task_id = uuid()

        # Construct a fake result
        if celery.conf.CELERY_ALWAYS_EAGER:
            result_ = EagerResult(task_id, None, states.PENDING, None)
        else:
            result_ = result.AsyncResult(task_id)

        # Note: one might be tempted to turn this into a datamanager.
        # This would result in two wrong things happening:
        # * A "commit within a commit" triggered by the function runner
        #   when CELERY_ALWAYS_EAGER is set,
        #   leading to the first invoked commit cleanup failing
        #   because the inner commit already cleaned up.
        # * An async task failing in eager mode would also rollback
        #   the whole transaction, which is not desiderable.
        #   Consider the case where the syncronous code constructs an object
        #   and the async task updates it, if we roll back everything
        #   then also the original content construction goes away
        #   (even if, in and by itself, worked)
        def hook(success):
            if success:
                self._apply_async(args, kw, result_, celery, task_id, options)

        if without_transaction or celery.conf.CELERY_ALWAYS_EAGER:
            return self._apply_async(args, kw, result_, celery, task_id,
                                     options)
        else:
            transaction.get().addAfterCommitHook(hook)
            # Return the "fake" result ID
            return result_
示例#12
0
def workflow_scan(request):

    openid = request.authenticated_userid
    submission_id = request.matchdict['workflow_id']
    if request.method != 'POST':
        return HttpResponse('Error:')
    data = json.loads(request.body.decode('utf-8'))

    path = data.get('path')


    submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()
    if submission is None:
        return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }

    if submission.task_id is not None:
        ar = result.AsyncResult(id=submission.task_id, app=celery_app)
        if ar.state != 'SUCCESS' and ar.state != 'FAILURE':
            return {'status': 'Running', 'message': 'Another task, %s, is still running' % submission.task_name }

    metadata = DBSession.query(SubmissionMetadata).\
            filter(SubmissionMetadata.submission_id==submission_id).\
            filter(SubmissionMetadata.name=='datanode').first()

    facets = DBSession.query(FacetName, FacetValue, SubmissionFacet).\
            filter(FacetName.id==FacetValue.name_id).\
            filter(FacetValue.id==SubmissionFacet.value_id).\
            filter(SubmissionFacet.submission_id==submission_id).order_by(FacetName.id).all()

    # Start a scan process for the specified workflow and facets
    fnv = []
    for fname, fvalue, sfacet in facets:
        fnv.append({ 'name': fname.name, 'value': fvalue.value })
        log.info('%d: %s:%s' % (submission.id, fname.name, fvalue.value))

    ar = scan.delay(openid=openid, submission_id='%s' % submission.id, facets=fnv, path=path)
    submission.task_id = ar.id
    submission.task_name = 'scan'
    if path:
        submission.path = path

    return {'status': 'Success', 'message': 'Scan task %s started successfully' % ar.id }
示例#13
0
    def get(self, request, format=None):
        print("--------------- IrisTrain get --------")

        print(request)
        print(request.GET)

        train_task_id = request.GET.get("train_task_id")
        print("train_task_id=%s" % train_task_id)

        train_promise = result.AsyncResult(train_task_id)
        if train_promise.ready():
            # wait for train over
            train_fit_data = train_promise.get()
            print("------------- train over!! ------------")
            print(train_fit_data)

            # transfer data to client
            train_result = [{
                "sepal_len": one_fit_data['features'][0],
                "sepal_width": one_fit_data['features'][1],
                "petal_len": one_fit_data['features'][2],
                "petal_width": one_fit_data['features'][3],
                "cluster": one_fit_data['prediction']
            } for one_fit_data in train_fit_data]

            print(train_result[0])
            print(len(train_result))

            train_task_status = {
                'status': train_promise.state,
                'result': train_result
            }
        else:
            train_task_status = {'status': train_promise.state, 'result': []}

        print(train_task_status)

        respData = json.dumps(train_task_status)

        return Response(respData)
示例#14
0
def update_job(id, data, user):
    jb = Job.objects.get(pk=id)
    assert_django_job(jb.status, "update")
    tsk = tasks_validation(data["tasks"], "update", jb)
    jb.name = data["name"]
    date_time = datetime.datetime.strptime(data["schedule"],
                                           "%Y-%m-%dT%H:%M:%S")
    cur_time = datetime.datetime.utcnow()
    if cur_time >= date_time:
        raise IgniteException("Schedule time has elapsed")


#   set timezone information
    date_time = timezone.make_aware(date_time, pytz.timezone('UTC'))
    res = result.AsyncResult(jb.task_id)
    res.revoke()
    jb.schedule = date_time
    jb.tasks = data["tasks"]
    jb.updated_by = user
    jb.task_id = celery_tasks.run_single_job.apply_async(
        [jb.id, jb.schedule], eta=jb.schedule).task_id
    jb.save()
    jb.tasks = tsk
    return jb
示例#15
0
def progress(request, task_id):
    """Show progress for a running import."""
    template = "progress.html" if not request.is_ajax() \
            else "_progress.html"
    async = result.AsyncResult(task_id)
    context = dict(async=async)
示例#16
0
文件: models.py 项目: dparmer/ncg
 def is_complete(self):
     task_result = result.AsyncResult(id=self.task_id)
     print('TaskManager:is_complete', task_result.ready())
     return task_result.ready()
示例#17
0
def status_task(self, task_id):
    return result.AsyncResult(task_id)
示例#18
0
import sys
from celery import result, Celery

app = Celery('watcher',
             backend='amqp',
             broker='amqp://*****:*****@localhost//')
res = result.AsyncResult(id=sys.argv[1], app=app)
print(res.state)
print(res.info)
示例#19
0
文件: models.py 项目: dparmer/ncg
 def status(self):
     task_status = result.AsyncResult(self.task_id)
     print('TaskManager:is_running task_status', task_status.status)
     return task_status.status