コード例 #1
0
ファイル: test_result.py プロジェクト: westurner/celery
 def test_save_restore(self):
     subs = [MockAsyncResultSuccess(uuid()), MockAsyncResultSuccess(uuid())]
     ts = TaskSetResult(uuid(), subs)
     ts.save()
     with self.assertRaises(AttributeError):
         ts.save(backend=object())
     self.assertEqual(
         TaskSetResult.restore(ts.taskset_id).subtasks, ts.subtasks)
     ts.delete()
     self.assertIsNone(TaskSetResult.restore(ts.taskset_id))
     with self.assertRaises(AttributeError):
         TaskSetResult.restore(ts.taskset_id, backend=object())
コード例 #2
0
ファイル: test_result.py プロジェクト: harmv/celery
 def test_save_restore(self):
     subs = [MockAsyncResultSuccess(uuid()),
             MockAsyncResultSuccess(uuid())]
     ts = TaskSetResult(uuid(), subs)
     ts.save()
     self.assertRaises(AttributeError, ts.save, backend=object())
     self.assertEqual(TaskSetResult.restore(ts.taskset_id).subtasks,
                      ts.subtasks)
     ts.delete()
     self.assertIsNone(TaskSetResult.restore(ts.taskset_id))
     self.assertRaises(AttributeError,
                       TaskSetResult.restore, ts.taskset_id,
                       backend=object())
コード例 #3
0
ファイル: chords.py プロジェクト: OddBloke/celery
def _unlock_chord(setid, callback, interval=1, max_retries=None):
    result = TaskSetResult.restore(setid)
    if result.ready():
        subtask(callback).delay(result.join())
        result.delete()
    else:
        _unlock_chord.retry(countdown=interval, max_retries=max_retries)
コード例 #4
0
def _unlock_chord(setid, callback, interval=1, max_retries=None):
    result = TaskSetResult.restore(setid)
    if result.ready():
        subtask(callback).delay(result.join())
        result.delete()
    else:
        _unlock_chord.retry(countdown=interval, max_retries=max_retries)
コード例 #5
0
ファイル: views.py プロジェクト: haugvald/baruwa
def task_status(request, taskid):
    """
    Return task status based on:
    djcelery.views.task_status
    """
    result = TaskSetResult.restore(taskid)
    percent = "0.0"
    status = 'PROGRESS'
    results = []
    if result.ready():
        finished = True
        results = result.join()
    else:
        finished = False
        percent = "%.1f" % (
            (1.0 * int(result.completed_count()) / int(result.total)) * 100)
    rdict = {
        'taskid': taskid,
        'finished': finished,
        'results': results,
        'status': status,
        'completed': percent
    }
    if request.is_ajax():
        response = anyjson.dumps(rdict)
        return HttpResponse(
            response, content_type='application/javascript; charset=utf-8')
    return render_to_response('mail/messages/task_status.html',
                              rdict,
                              context_instance=RequestContext(request))
コード例 #6
0
ファイル: pyredis.py プロジェクト: eldondev/celery
 def on_chord_part_return(self, task, keyprefix="chord-unlock-%s"):
     setid = task.request.taskset
     key = keyprefix % setid
     deps = TaskSetResult.restore(setid, backend=task.backend)
     if self.client.incr(key) >= deps.total:
         subtask(task.request.chord).delay(deps.join())
         deps.delete()
     self.client.expire(key, 86400)
コード例 #7
0
ファイル: pyredis.py プロジェクト: kornholi/celery
 def on_chord_part_return(self, task, keyprefix="chord-unlock-%s"):
     setid = task.request.taskset
     key = keyprefix % setid
     deps = TaskSetResult.restore(setid, backend=task.backend)
     if self.client.incr(key) >= deps.total:
         subtask(task.request.chord).delay(deps.join())
         deps.delete()
     self.client.expire(key, 86400)
コード例 #8
0
def join_taskset(setid, callback, interval=10, max_retries=None, propagate=True):
    '''
    Task to poll if the TaskSet ``setid`` has finished.

    Pass results of the TaskSet to ``callback``.
    '''
    result = TaskSetResult.restore(setid)
    if result.ready():
        return subtask(callback).delay(result.join(propagate=propagate))
    join_taskset.retry(countdown=interval, max_retries=max_retries)
コード例 #9
0
ファイル: redis.py プロジェクト: FreakTheMighty/celery
 def on_chord_part_return(self, task, propagate=False,
         keyprefix="chord-unlock-%s"):
     from celery.task.sets import subtask
     from celery.result import TaskSetResult
     setid = task.request.taskset
     key = keyprefix % setid
     deps = TaskSetResult.restore(setid, backend=task.backend)
     if self.client.incr(key) >= deps.total:
         subtask(task.request.chord).delay(deps.join(propagate=propagate))
         deps.delete()
     self.client.expire(key, 86400)
コード例 #10
0
ファイル: cache.py プロジェクト: bmihelac/celery
 def on_chord_part_return(self, task, propagate=False):
     from celery import subtask
     from celery.result import TaskSetResult
     setid = task.request.taskset
     if not setid:
         return
     key = self.get_key_for_chord(setid)
     deps = TaskSetResult.restore(setid, backend=task.backend)
     if self.client.incr(key) >= deps.total:
         subtask(task.request.chord).delay(deps.join(propagate=propagate))
         deps.delete()
         self.client.delete(key)
コード例 #11
0
ファイル: models.py プロジェクト: mmcardle/MServe
    def save(self):
        if not self.id:
            self.id = utils.random_id()
        if self.taskset_id:
            tsr = TaskSetResult.restore(self.taskset_id)
            if tsr is not None and hasattr(tsr,"taskset_id"):
                subtasks = tsr.subtasks
                if subtasks:
                    for st in subtasks:
                        jasr = JobASyncResult(async_id=st.task_id,job=self)
                        jasr.save()

        super(Job, self).save()
コード例 #12
0
ファイル: redis.py プロジェクト: WoLpH/celery
 def on_chord_part_return(self,
                          task,
                          propagate=False,
                          keyprefix="chord-unlock-%s"):
     from celery.task.sets import subtask
     from celery.result import TaskSetResult
     setid = task.request.taskset
     key = keyprefix % setid
     deps = TaskSetResult.restore(setid, backend=task.backend)
     if self.client.incr(key) >= deps.total:
         subtask(task.request.chord).delay(deps.join(propagate=propagate))
         deps.delete()
     self.client.expire(key, 86400)
コード例 #13
0
ファイル: test_result.py プロジェクト: mozilla/webifyme-lib
 def test_save_restore(self):
     subs = [
         MockAsyncResultSuccess(gen_unique_id()),
         MockAsyncResultSuccess(gen_unique_id())
     ]
     ts = TaskSetResult(gen_unique_id(), subs)
     ts.save()
     self.assertRaises(AttributeError, ts.save, backend=object())
     self.assertEqual(
         TaskSetResult.restore(ts.taskset_id).subtasks, ts.subtasks)
     self.assertRaises(AttributeError,
                       TaskSetResult.restore,
                       ts.taskset_id,
                       backend=object())
コード例 #14
0
ファイル: views.py プロジェクト: friendofrobots/air-toolkit
def status(request):
    if request.user.is_authenticated():
        profile = request.user.profile
        """
        I need to execute 2 separate tasks:
        1) Download and save (set) - this should already be started
        2) Calculate and save pmi information (set)
        """
        if profile.stage==0:
            response_data = {
                "stage":0,
                "state":'not yet started',
                }
        elif profile.stage==1:
            result = TaskSetResult.restore(profile.task_id)
            response_data = {
                "stage":1,
                "completed": result.completed_count(),
                "total": result.total,
                }
        elif profile.stage==2:
            result = TaskSetResult.restore(profile.task_id)
            response_data = {
                "stage":2,
                "completed": result.completed_count(),
                "total": result.total,
                }
        else:
            response_data = {
                "stage":3,
                "state": "completed",
                }
    else:
        response_data = {
            "error": "user must be logged in"
            }
    return HttpResponse(json.dumps(response_data), mimetype="application/json")
コード例 #15
0
ファイル: views.py プロジェクト: friendofrobots/air-toolkit
def startDownload(request):
    """
    Ajax call to start the download
    """
    if request.user.is_authenticated():
        if request.method == 'POST':
            profile = request.user.profile
            if profile.stage > 0:
                if profile.stage < 3:
                    result = TaskSetResult.restore(profile.task_id)
                    response_data = {
                        "error": "download already started",
                        "stage" : profile.stage,
                        "completed" : result.completed_count(),
                        "total" : result.total,
                        }
                else:
                    reponse_data = {
                        "error": "download already finished",
                        "stage" : profile.stage,
                        "state" : "completed",
                        }
            else:
                graphapi = facebook.GraphAPI(profile.fblogin.access_token)
                me = graphapi.get_object('me')
                friends = [(f['id'],f['name']) for f in graphapi.get_connections('me','friends')['data']]
                friends.append((me['id'],me['name']))

                subtasks = [tasks.dlUser.subtask((profile.id,graphapi,fbid,name)) for (fbid,name) in friends]
                result = TaskSet(tasks=subtasks).apply_async()
                result.save()
                profile.stage = 1
                profile.task_id = result.taskset_id
                profile.save()
                r = tasks.checkTaskSet.delay(result,profile.id)
                response_data = {
                    "stage":1,
                    "completed": result.completed_count(),
                    "total": result.total,
                    }
        else:
            response_data = {
                "error": "must be a post request"
                }
    else:
        response_data = {
            "error": "user must be logged in"
            }
    return HttpResponse(json.dumps(response_data), mimetype="application/json")
コード例 #16
0
    def process(self, taskid, format=None):
        "process a taskset"
        result = TaskSetResult.restore(taskid, backend=dbbackend)
        if (result is None or
            'taskids' not in session or
            taskid not in session['taskids']):
            if format == 'json':
                return ajax_code(404,
                        _('The task status requested '
                        'has expired or does not exist'))
            flash(_('The task status requested has expired or does not exist'))
            redirect(url(controller='messages', action='quarantine'))
        percent = "0.0"
        status = 'PROGRESS'
        results = []
        #print '=' * 20, result.completed_count(), result.total, result.ready()
        if result.ready():
            finished = True
            results = result.join()
        else:
            session['bulkprocess-count'] += 1
            if (session['bulkprocess-count'] >= 10 and
                result.completed_count() == 0):
                result.revoke()
                del session['bulkprocess-count']
                session.save()
                if format == 'json':
                    return ajax_code(503,
                            _('An error occured in processing, try again later'))
                flash_alert(_('An error occured in processing, try again later'))
                redirect(url(controller='messages', action='quarantine'))
            finished = False
            percent = "%.1f" % ((1.0 * int(result.completed_count()) /
                                int(result.total)) * 100)

        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = dict(finished=finished,
                        results=results,
                        status=status,
                        completed=percent)
            return json.dumps(data)

        c.finished = finished
        c.results = results
        c.status = status
        c.completed = percent
        return render('/messages/taskstatus.html')
コード例 #17
0
ファイル: messages.py プロジェクト: TetraAsh/baruwa2
    def process(self, taskid, format=None):
        "process a taskset"
        result = TaskSetResult.restore(taskid, backend=dbbackend)
        if (result is None or
            'taskids' not in session or
            taskid not in session['taskids']):
            if format == 'json':
                return ajax_code(404,
                        _('The task status requested '
                        'has expired or does not exist'))
            flash(_('The task status requested has expired or does not exist'))
            redirect(url(controller='messages', action='quarantine'))
        percent = "0.0"
        status = 'PROGRESS'
        results = []
        #print '=' * 20, result.completed_count(), result.total, result.ready()
        if result.ready():
            finished = True
            results = result.join()
        else:
            session['bulkprocess-count'] += 1
            if (session['bulkprocess-count'] >= 10 and
                result.completed_count() == 0):
                result.revoke()
                del session['bulkprocess-count']
                session.save()
                if format == 'json':
                    return ajax_code(503,
                            _('An error occured in processing, try again later'))
                flash_alert(_('An error occured in processing, try again later'))
                redirect(url(controller='messages', action='quarantine'))
            finished = False
            percent = "%.1f" % ((1.0 * int(result.completed_count()) /
                                int(result.total)) * 100)

        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = dict(finished=finished,
                        results=results,
                        status=status,
                        completed=percent)
            return json.dumps(data)

        c.finished = finished
        c.results = results
        c.status = status
        c.completed = percent
        return render('/messages/taskstatus.html')
コード例 #18
0
ファイル: models.py プロジェクト: it-innovation/MServe
    def tasks(self):
        tsr = TaskSetResult.restore(self.taskset_id)
        dict = {}
        if tsr is not None and hasattr(tsr, "taskset_id"):

            dict["taskset_id"] = tsr.taskset_id
            results = []
            if tsr.subtasks:
                for subtask in tsr.subtasks:
                    results.append(
                        {
                            "name": subtask.task_name,
                            "result": subtask.result,
                            "success": subtask.successful(),
                            "state": subtask.state,
                        }
                    )
                dict["completed_count"] = tsr.completed_count()
                dict["failed"] = tsr.failed()
                dict["total"] = tsr.total
                if tsr.total != 0:
                    dict["percent"] = float(tsr.completed_count()) / float(tsr.total) * 100
                else:
                    dict["percent"] = 0
                dict["ready"] = tsr.ready()
                dict["successful"] = tsr.successful()
                dict["waiting"] = tsr.waiting()
            else:
                dict["completed_count"] = 0
                dict["failed"] = 0
                dict["total"] = 0
                dict["percent"] = 0
                dict["successful"] = False
                dict["waiting"] = False
            dict["result"] = results
            return dict
        else:
            dict["taskset_id"] = ""
            dict["completed_count"] = 0
            dict["failed"] = 0
            dict["percent"] = 0
            dict["ready"] = True
            dict["successful"] = False
            dict["total"] = 0
            dict["waiting"] = False
            return dict
コード例 #19
0
ファイル: base.py プロジェクト: westurner/celery
 def on_chord_part_return(self, task, propagate=False):
     if not self.implements_incr:
         return
     from celery import subtask
     from celery.result import TaskSetResult
     setid = task.request.taskset
     if not setid:
         return
     key = self.get_key_for_chord(setid)
     deps = TaskSetResult.restore(setid, backend=task.backend)
     val = self.incr(key)
     if val >= deps.total:
         subtask(task.request.chord).delay(deps.join(propagate=propagate))
         deps.delete()
         self.client.delete(key)
     else:
         self.expire(key, 86400)
コード例 #20
0
ファイル: views.py プロジェクト: heartshare/baruwa
def task_status(request, taskid):
    """
    Return task status based on:
    djcelery.views.task_status
    """
    result = TaskSetResult.restore(taskid)
    percent = "0.0"
    status = 'PROGRESS'
    results = []
    if result.ready():
        finished = True
        results = result.join()
    else:
        finished = False
        percent = "%.1f" % ((1.0 * int(result.completed_count()) / int(result.total)) * 100)
    rdict = {'taskid': taskid, 'finished': finished, 'results': results,
    'status': status, 'completed': percent}
    if request.is_ajax():
        response = anyjson.dumps(rdict)
        return HttpResponse(response,
        content_type='application/javascript; charset=utf-8')
    return render_to_response('messages/task_status.html', rdict,
    context_instance=RequestContext(request))
コード例 #21
0
def get_task_group_state(task_group_id):
    """return a list containing states of all tasks given a task set ID"""
    task_group_state = []
    percent_done = 0

    taskset = TaskSetResult.restore(task_group_id)
    if not taskset:
        logger.error("TaskSet with UUID '%s' doesn't exist", task_group_id)
        return task_group_state

    for task in taskset.results:
        # AsyncResult.info does not contain task state after task has finished
        if task.state == celery.states.SUCCESS:
            percent_done = 100
        elif task.info:
            try:
                percent_done = task.info.get('percent_done') or 0
            except AttributeError:
                logger.error("Task %s failed: %s", task, task.info)
        task_group_state.append({
            'state': task.state,
            'percent_done': percent_done,
        })
    return task_group_state
コード例 #22
0
ファイル: models.py プロジェクト: mmcardle/MServe
 def failed(self):
     tsr = TaskSetResult.restore(self.taskset_id)
     if tsr is not None and hasattr(tsr,"taskset_id"):
         return tsr.failed()
     else:
         return False
コード例 #23
0
def run_analysis(analysis_uuid):
    """Manage analysis execution"""
    RETRY_INTERVAL = 5  # seconds

    try:
        analysis = Analysis.objects.get(uuid=analysis_uuid)
    except (Analysis.DoesNotExist, Analysis.MultipleObjectsReturned) as exc:
        logger.error("Can not retrieve analysis with UUID '%s': '%s'",
                     analysis_uuid, exc)
        run_analysis.update_state(state=celery.states.FAILURE)
        return

    # if cancelled by user
    if analysis.failed():
        return

    try:
        analysis_status = AnalysisStatus.objects.get(analysis=analysis)
    except (AnalysisStatus.DoesNotExist,
            AnalysisStatus.MultipleObjectsReturned) as exc:
        logger.error("Can not retrieve status for analysis '%s': '%s'",
                     analysis, exc)
        run_analysis.update_state(state=celery.states.FAILURE)
        return

    if not analysis_status.refinery_import_task_group_id:
        logger.info("Starting analysis '%s'", analysis)
        analysis.set_status(Analysis.RUNNING_STATUS)
        logger.info("Starting input file import tasks for analysis '%s'",
                    analysis)
        refinery_import_tasks = []
        for input_file_uuid in analysis.get_input_file_uuid_list():
            refinery_import_task = import_file.subtask(
                    (input_file_uuid, False, ))
            refinery_import_tasks.append(refinery_import_task)
        refinery_import = TaskSet(tasks=refinery_import_tasks).apply_async()
        refinery_import.save()
        analysis_status.refinery_import_task_group_id = \
            refinery_import.taskset_id
        analysis_status.save()
        run_analysis.retry(countdown=RETRY_INTERVAL)

    # check if all files were successfully imported into Refinery
    refinery_import = TaskSetResult.restore(
            analysis_status.refinery_import_task_group_id)
    if not refinery_import.ready():
        logger.debug("Input file import pending for analysis '%s'", analysis)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    elif not refinery_import.successful():
        logger.error("Analysis '%s' failed during file import", analysis)
        analysis.set_status(Analysis.FAILURE_STATUS)
        analysis.send_email()
        refinery_import.delete()
        return

    # import files into Galaxy and start analysis
    if not analysis_status.galaxy_import_task_group_id:
        logger.debug("Starting analysis execution in Galaxy")
        try:
            analysis.prepare_galaxy()
        except (requests.exceptions.ConnectionError,
                galaxy.client.ConnectionError):
            logger.error("Analysis '%s' failed during preparation in Galaxy",
                         analysis)
            analysis.set_status(Analysis.FAILURE_STATUS)
            analysis.send_email()
            refinery_import.delete()
            return
        galaxy_import_tasks = [
            start_galaxy_analysis.subtask((analysis_uuid, )),
        ]
        galaxy_import = TaskSet(tasks=galaxy_import_tasks).apply_async()
        galaxy_import.save()
        analysis_status.galaxy_import_task_group_id = \
            galaxy_import.taskset_id
        analysis_status.set_galaxy_history_state(AnalysisStatus.PROGRESS)
        run_analysis.retry(countdown=RETRY_INTERVAL)

    # check if data files were successfully imported into Galaxy
    galaxy_import = TaskSetResult.restore(
            analysis_status.galaxy_import_task_group_id)
    if not galaxy_import.ready():
        logger.debug("Analysis '%s' pending in Galaxy", analysis)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    elif not galaxy_import.successful():
        logger.error("Analysis '%s' failed in Galaxy", analysis)
        analysis.set_status(Analysis.FAILURE_STATUS)
        analysis_status.set_galaxy_history_state(AnalysisStatus.ERROR)
        analysis.send_email()
        refinery_import.delete()
        galaxy_import.delete()
        analysis.galaxy_cleanup()
        return

    # check if analysis has finished running in Galaxy
    try:
        percent_complete = analysis.galaxy_progress()
    except RuntimeError:
        analysis_status.set_galaxy_history_state(AnalysisStatus.ERROR)
        analysis.send_email()
        refinery_import.delete()
        galaxy_import.delete()
        analysis.galaxy_cleanup()
        return
    except galaxy.client.ConnectionError:
        analysis_status.set_galaxy_history_state(AnalysisStatus.UNKNOWN)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    else:
        # workaround to avoid moving the progress bar backward
        if analysis_status.galaxy_history_progress < percent_complete:
            analysis_status.galaxy_history_progress = percent_complete
            analysis_status.save()
        if percent_complete < 100:
            analysis_status.set_galaxy_history_state(AnalysisStatus.PROGRESS)
            run_analysis.retry(countdown=RETRY_INTERVAL)
        else:
            analysis_status.set_galaxy_history_state(AnalysisStatus.OK)

    # retrieve analysis results from Galaxy
    if not analysis_status.galaxy_export_task_group_id:
        galaxy_export_tasks = get_galaxy_download_tasks(analysis)
        logger.info("Starting downloading of results from Galaxy for analysis "
                    "'%s'", analysis)
        galaxy_export = TaskSet(tasks=galaxy_export_tasks).apply_async()
        galaxy_export.save()
        analysis_status.galaxy_export_task_group_id = galaxy_export.taskset_id
        analysis_status.save()
        run_analysis.retry(countdown=RETRY_INTERVAL)

    # check if analysis results have finished downloading from Galaxy
    galaxy_export = TaskSetResult.restore(
            analysis_status.galaxy_export_task_group_id)
    if not galaxy_export.ready():
        logger.debug("Results download pending for analysis '%s'", analysis)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    # all tasks must have succeeded or failed
    elif not galaxy_export.successful():
        logger.error("Analysis '%s' failed while downloading results from "
                     "Galaxy", analysis)
        analysis.set_status(Analysis.FAILURE_STATUS)
        analysis.send_email()
        refinery_import.delete()
        galaxy_import.delete()
        galaxy_export.delete()
        analysis.galaxy_cleanup()
        return

    # attach workflow outputs back to dataset isatab graph
    if analysis.workflow.type == Workflow.ANALYSIS_TYPE:
        analysis.attach_outputs_dataset()
    elif analysis.workflow.type == Workflow.DOWNLOAD_TYPE:
        analysis.attach_outputs_downloads()
    else:
        logger.warning("Unknown workflow type '%s' in analysis '%s'",
                       analysis.workflow.type, analysis.name)

    analysis.set_status(Analysis.SUCCESS_STATUS)
    analysis.rename_results()
    analysis.send_email()
    logger.info("Analysis '%s' finished successfully", analysis)
    analysis.galaxy_cleanup()
    refinery_import.delete()
    galaxy_import.delete()
    galaxy_export.delete()

    # Update file count and file size of the corresponding data set
    analysis.data_set.file_count = analysis.data_set.get_file_count()
    analysis.data_set.file_size = analysis.data_set.get_file_size()
    analysis.data_set.save()
コード例 #24
0
ファイル: tasks.py プロジェクト: pvanheus/refinery-platform
def get_taskset_result(task_group_id):
    return TaskSetResult.restore(task_group_id)
コード例 #25
0
ファイル: tasks.py プロジェクト: ShuhratBek/refinery-platform
def run_analysis(analysis_uuid):
    """Manage analysis execution"""
    RETRY_INTERVAL = 5  # seconds

    try:
        analysis = Analysis.objects.get(uuid=analysis_uuid)
    except (Analysis.DoesNotExist, Analysis.MultipleObjectsReturned) as exc:
        logger.error("Can not retrieve analysis with UUID '%s': '%s'",
                     analysis_uuid, exc)
        run_analysis.update_state(state=celery.states.FAILURE)
        return

    # if cancelled by user
    if analysis.failed():
        return

    try:
        analysis_status = AnalysisStatus.objects.get(analysis=analysis)
    except (AnalysisStatus.DoesNotExist,
            AnalysisStatus.MultipleObjectsReturned) as exc:
        logger.error("Can not retrieve status for analysis '%s': '%s'",
                     analysis, exc)
        run_analysis.update_state(state=celery.states.FAILURE)
        return

    if not analysis_status.refinery_import_task_group_id:
        logger.info("Starting analysis '%s'", analysis)
        analysis.set_status(Analysis.RUNNING_STATUS)
        logger.info("Starting input file import tasks for analysis '%s'",
                    analysis)
        refinery_import_tasks = []
        for input_file_uuid in analysis.get_input_file_uuid_list():
            refinery_import_task = import_file.subtask((input_file_uuid, ))
            refinery_import_tasks.append(refinery_import_task)
        refinery_import = TaskSet(tasks=refinery_import_tasks).apply_async()
        refinery_import.save()
        analysis_status.refinery_import_task_group_id = \
            refinery_import.taskset_id
        analysis_status.save()
        run_analysis.retry(countdown=RETRY_INTERVAL)

    # check if all files were successfully imported into Refinery
    refinery_import = TaskSetResult.restore(
        analysis_status.refinery_import_task_group_id)
    if not refinery_import.ready():
        logger.debug("Input file import pending for analysis '%s'", analysis)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    elif not refinery_import.successful():
        error_msg = "Analysis '{}' failed during file import".format(analysis)
        logger.error(error_msg)
        analysis.set_status(Analysis.FAILURE_STATUS, error_msg)
        analysis.send_email()
        refinery_import.delete()
        return

    # import files into Galaxy and start analysis
    if not analysis_status.galaxy_import_task_group_id:
        logger.debug("Starting analysis execution in Galaxy")
        try:
            analysis.prepare_galaxy()
        except (requests.exceptions.ConnectionError,
                galaxy.client.ConnectionError):
            error_msg = "Analysis '{}' failed during preparation in " \
                        "Galaxy".format(analysis)
            logger.error(error_msg)
            analysis.set_status(Analysis.FAILURE_STATUS, error_msg)
            analysis.send_email()
            refinery_import.delete()
            return
        galaxy_import_tasks = [
            start_galaxy_analysis.subtask((analysis_uuid, )),
        ]
        galaxy_import = TaskSet(tasks=galaxy_import_tasks).apply_async()
        galaxy_import.save()
        analysis_status.galaxy_import_task_group_id = \
            galaxy_import.taskset_id
        analysis_status.set_galaxy_history_state(AnalysisStatus.PROGRESS)
        run_analysis.retry(countdown=RETRY_INTERVAL)

    # check if data files were successfully imported into Galaxy
    galaxy_import = TaskSetResult.restore(
        analysis_status.galaxy_import_task_group_id)
    if not galaxy_import.ready():
        logger.debug("Analysis '%s' pending in Galaxy", analysis)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    elif not galaxy_import.successful():
        error_msg = "Analysis '{}' failed in Galaxy".format(analysis)
        logger.error(error_msg)
        analysis.set_status(Analysis.FAILURE_STATUS, error_msg)
        analysis_status.set_galaxy_history_state(AnalysisStatus.ERROR)
        analysis.send_email()
        refinery_import.delete()
        galaxy_import.delete()
        analysis.galaxy_cleanup()
        return

    # check if analysis has finished running in Galaxy
    try:
        percent_complete = analysis.galaxy_progress()
    except RuntimeError:
        analysis_status.set_galaxy_history_state(AnalysisStatus.ERROR)
        analysis.send_email()
        refinery_import.delete()
        galaxy_import.delete()
        analysis.galaxy_cleanup()
        return
    except galaxy.client.ConnectionError:
        analysis_status.set_galaxy_history_state(AnalysisStatus.UNKNOWN)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    else:
        # workaround to avoid moving the progress bar backward
        if analysis_status.galaxy_history_progress < percent_complete:
            analysis_status.galaxy_history_progress = percent_complete
            analysis_status.save()
        if percent_complete < 100:
            analysis_status.set_galaxy_history_state(AnalysisStatus.PROGRESS)
            run_analysis.retry(countdown=RETRY_INTERVAL)
        else:
            analysis_status.set_galaxy_history_state(AnalysisStatus.OK)

    # retrieve analysis results from Galaxy
    if not analysis_status.galaxy_export_task_group_id:
        galaxy_export_tasks = get_galaxy_download_tasks(analysis)
        logger.info(
            "Starting downloading of results from Galaxy for analysis "
            "'%s'", analysis)
        galaxy_export = TaskSet(tasks=galaxy_export_tasks).apply_async()
        galaxy_export.save()
        analysis_status.galaxy_export_task_group_id = galaxy_export.taskset_id
        analysis_status.save()
        run_analysis.retry(countdown=RETRY_INTERVAL)

    # check if analysis results have finished downloading from Galaxy
    galaxy_export = TaskSetResult.restore(
        analysis_status.galaxy_export_task_group_id)
    if not galaxy_export.ready():
        logger.debug("Results download pending for analysis '%s'", analysis)
        run_analysis.retry(countdown=RETRY_INTERVAL)
    # all tasks must have succeeded or failed
    elif not galaxy_export.successful():
        error_msg = "Analysis '%s' failed while downloading results from  " \
                    "Galaxy".format(analysis)
        logger.error(error_msg)
        analysis.set_status(Analysis.FAILURE_STATUS, error_msg)
        analysis.send_email()
        refinery_import.delete()
        galaxy_import.delete()
        galaxy_export.delete()
        analysis.galaxy_cleanup()
        return

    # attach workflow outputs back to dataset isatab graph
    if analysis.workflow.type == Workflow.ANALYSIS_TYPE:
        analysis.attach_outputs_dataset()
    elif analysis.workflow.type == Workflow.DOWNLOAD_TYPE:
        analysis.attach_outputs_downloads()
    else:
        logger.warning("Unknown workflow type '%s' in analysis '%s'",
                       analysis.workflow.type, analysis.name)

    analysis.set_status(Analysis.SUCCESS_STATUS)
    analysis.rename_results()
    analysis.send_email()
    logger.info("Analysis '%s' finished successfully", analysis)
    analysis.galaxy_cleanup()
    refinery_import.delete()
    galaxy_import.delete()
    galaxy_export.delete()

    # Update file count and file size of the corresponding data set
    analysis.data_set.file_count = analysis.data_set.get_file_count()
    # FIXME: line below is causing analyses to be marked as failed
    # analysis.data_set.file_size = analysis.data_set.get_file_size()
    analysis.data_set.save()
コード例 #26
0
ファイル: models.py プロジェクト: mmcardle/MServe
 def successful(self):
     tsr = TaskSetResult.restore(self.taskset_id)
     if tsr is not None and hasattr(tsr,"taskset_id"):
         return tsr.successful()
     else:
         return False