コード例 #1
0
 def test_run_experiment_twice(self):
     # create experiment
     form = self.form.get_form()
     form.request.form.update({
         'form.buttons.save': 'Create and start',
     })
     # update form with updated request
     form.update()
     # start experiment
     jt = IJobTracker(self.experiments['my-experiment'])
     self.assertEqual(jt.state, u'QUEUED')
     #error
     state = jt.start_job(form.request)
     self.assertEqual(state[0], 'error')
     # finish current job
     transaction.commit()
     self.assertEqual(jt.state, u'COMPLETED')
     # TODO: after commit tasks cause site to disappear and the
     # following code will fail, bceause without site we can't find
     # a catalog without whchi we can't finde the toolkit by uuid
     jt.start_job(form.request)
     # FIXME: why is this running? (would a transaction abort work as well? to refresh my object?)
     self.assertEqual(jt.state, u'RUNNING')
     transaction.commit()
     self.assertEqual(jt.state, u'COMPLETED')
コード例 #2
0
 def __call__(self):
     # TODO: could also just submit current context (the experiment)
     #       with all infos accessible from it
     jt = IJobTracker(self.context)
     msgtype, msg = jt.start_job(self.request)
     if msgtype is not None:
         IStatusMessage(self.request).add(msg, type=msgtype)
     self.request.response.redirect(self.context.absolute_url())
コード例 #3
0
ファイル: job.py プロジェクト: sarahrichmond/org.bccvl.site
 def __call__(self):
     # TODO: could also just submit current context (the experiment)
     #       with all infos accessible from it
     jt = IJobTracker(self.context)
     msgtype, msg = jt.start_job(self.request)
     if msgtype is not None:
         IStatusMessage(self.request).add(msg, type=msgtype)
     self.request.response.redirect(self.context.absolute_url())
コード例 #4
0
 def handle_delete(self, action):
     title = self.context.Title()
     parent = aq_parent(aq_inner(self.context))
     ## removed file working on frontend Javascript
     if hasattr(self.context, "file"):
         self.context.file = None
     jt = IJobTracker(self.context)
     jt.state = 'REMOVED'
     self.context.reindexObject()
     #####
     IStatusMessage(self.request).add(u'{0[title]} has been removed.'.format({u'title': title}))
     self.request.response.redirect(aq_parent(parent).absolute_url())
コード例 #5
0
 def handle_delete(self, action):
     title = self.context.Title()
     parent = aq_parent(aq_inner(self.context))
     ## removed file working on frontend Javascript
     if hasattr(self.context, "file"):
         self.context.file = None
     jt = IJobTracker(self.context)
     jt.state = 'REMOVED'
     self.context.reindexObject()
     #####
     IStatusMessage(self.request).add(
         u'{0[title]} has been removed.'.format({u'title': title}))
     self.request.response.redirect(aq_parent(parent).absolute_url())
コード例 #6
0
 def test_add_experiment(self):
     form = self.form.get_form()
     form.request.form.update({
         'form.buttons.save': 'Create and start',
     })
     # update form with updated request
     form.update()
     self.assertEqual(form.status, u'')
     self.assertEqual(len(form.widgets.errors), 0)
     self.assertIn('my-en-experiment', self.experiments)
     exp = self.experiments['my-en-experiment']
     self.assertEqual(exp.datasets, {
         unicode(self.form.sdmexp.UID()):
         [unicode(self.form.sdmproj.UID())]
     })
     # get result container: (there is only one)
     self.assertEqual(len(exp.objectIds()), 1)
     result = exp.objectValues()[0]
     # FIXME: test result.job_params
     self.assertEqual(result.job_params['datasets'],
                      [unicode(self.form.sdmproj.UID())])
     self.assertEqual(result.job_params['resolution'], u'Resolution30s')
     # no result files yet
     self.assertEqual(len(result.keys()), 0)
     # test job state
     jt = IJobTracker(exp)
     self.assertEqual(jt.state, u'QUEUED')
     # after transaction commit the job should finish
     transaction.commit()
     self.assertEqual(jt.state, u'COMPLETED')
     # and we should have a result as well
     self.assertGreaterEqual(len(result.keys()), 1)
コード例 #7
0
 def test_add_experiment(self):
     form = self.form.get_form()
     form.request.form.update({
         'form.buttons.save': 'Create and start',
     })
     # update form with updated request
     form.update()
     self.assertEqual(form.status, u'')
     self.assertEqual(len(form.widgets.errors), 0)
     self.assertIn('my-st-experiment', self.experiments)
     exp = self.experiments['my-st-experiment']
     # TODO: update asserts
     self.assertEqual(exp.data_table, unicode(self.form.traitsds.UID()))
     self.assertEqual(exp.algorithm, unicode(self.form.algorithm.UID()))
     self.assertEqual(exp.formula, u'Z ~ X + Y')
     # FIXME: submitting with an empty model list doesn't cause form to fail
     # get result container: (there is only one)
     self.assertEqual(len(exp.objectIds()), 1)
     result = exp.objectValues()[0]
     # FIXME: test result.job_params
     self.assertEqual(result.job_params['algorithm'],
                      self.form.algorithm.getId())
     self.assertEqual(result.job_params['data_table'],
                      unicode(self.form.traitsds.UID()))
     # no result files yet
     self.assertEqual(len(result.keys()), 0)
     # test job state
     jt = IJobTracker(exp)
     self.assertEqual(jt.state, u'QUEUED')
     # after transaction commit the job should finish
     transaction.commit()
     self.assertEqual(jt.state, u'COMPLETED')
     # and we should have a result as well
     self.assertGreaterEqual(len(result.keys()), 1)
コード例 #8
0
 def test_add_experiment(self):
     form = self.form.get_form()
     form.request.form.update({
         'form.buttons.save': 'Create and start',
     })
     # update form with updated request
     form.update()
     self.assertEqual(form.status, u'')
     self.assertEqual(len(form.widgets.errors), 0)
     self.assertIn('my-experiment', self.experiments)
     exp = self.experiments['my-experiment']
     self.assertEqual(exp.environmental_datasets.keys(),
                      [unicode(self.form.current.UID())])
     self.assertEqual(exp.environmental_datasets.values(),
                      [set([u'B01', u'B02'])])
     # get result container: (there is only one)
     self.assertEqual(len(exp.objectIds()), 1)
     result = exp.objectValues()[0]
     # FIXME: test result.job_params
     self.assertEqual(result.job_params['function'], 'bioclim')
     self.assertEqual(result.job_params['environmental_datasets'],
                      exp.environmental_datasets)
     # no result files yet
     self.assertEqual(len(result.keys()), 0)
     # test job state
     jt = IJobTracker(exp)
     self.assertEqual(jt.state, u'QUEUED')
     # after transaction commit the job sholud finish
     transaction.commit()
     self.assertEqual(jt.state, u'COMPLETED')
     # and we should have a result as well
     self.assertGreaterEqual(len(result.keys()), 1)
コード例 #9
0
 def start_job(self, request):
     if not self.is_active():
         # get utility to execute this experiment
         method = queryUtility(IComputeMethod,
                               name=IProjectionExperiment.__identifier__)
         if method is None:
             # TODO: lookup by script type (Perl, Python, etc...)
             return ('error',
                     u"Can't find method to run Projection Experiment")
         expuuid = self.context.species_distribution_models.keys()[0]
         exp = uuidToObject(expuuid)
         # TODO: what if two datasets provide the same layer?
         # start a new job for each sdm and future dataset
         for sdmuuid in self.context.species_distribution_models[expuuid]:
             for dsuuid in self.context.future_climate_datasets:
                 dsbrain = uuidToCatalogBrain(dsuuid)
                 dsmd = IBCCVLMetadata(dsbrain.getObject())
                 futurelayers = set(dsmd['layers'].keys())
                 # match sdm exp layers with future dataset layers
                 projlayers = {}
                 for ds, dslayerset in exp.environmental_datasets.items():
                     # add matching layers
                     projlayers.setdefault(dsuuid, set()).update(dslayerset.intersection(futurelayers))
                     # remove matching layers
                     projlayers[ds] = dslayerset - futurelayers
                     if not projlayers[ds]:
                         # remove if all layers replaced
                         del projlayers[ds]
                 # create result
                 result = self._create_result_container(sdmuuid, dsbrain, projlayers)
                 # update provenance
                 self._createProvenance(result)
                 # submit job
                 LOG.info("Submit JOB project to queue")
                 method(result, "project")  # TODO: wrong interface
                 resultjt = IJobTracker(result)
                 resultjt.new_job('TODO: generate id',
                                  'generate taskname: projection experiment')
                 resultjt.set_progress('PENDING',
                                       u'projection pending')
         return 'info', u'Job submitted {0} - {1}'.format(self.context.title, self.state)
     else:
         # TODO: in case there is an error should we abort the transaction
         #       to cancel previously submitted jobs?
         return 'error', u'Current Job is still running'
コード例 #10
0
 def states(self):
     states = []
     for item in self.context.values():
         jt = IJobTracker(item, None)
         if jt is None:
             continue
         state = jt.state
         if state:
             states.append((item.getId(), state))
     return states
コード例 #11
0
    def start_job(self, request):
        if not self.is_active():
            # get utility to execute this experiment
            method = queryUtility(IComputeMethod,
                                  name=IEnsembleExperiment.__identifier__)
            if method is None:
                return ('error',
                        u"Can't find method to run Ensemble Experiment")

            # create result container
            title = u'{} - ensemble {}'.format(
                self.context.title, datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
            result = createContentInContainer(
                self.context,
                'Folder',
                title=title)

            # build job_params and store on result
            # FIXME: probably should split ensemble jobs based on resolution
            #        for now pick first one to make result import work
            #        datasets is dict with expkey and list of datasets...
            #           can probably get resolution from exp or ds
            dsid = self.context.datasets.values()[0][0]
            dsmd = IBCCVLMetadata(uuidToObject(dsid))
            result.job_params = {
                'datasets': list(chain.from_iterable(self.context.datasets.values())),
                'resolution': dsmd['resolution']
            }
            # update provenance
            self._createProvenance(result)

            # submit job to queue
            LOG.info("Submit JOB Ensemble to queue")
            method(result, "ensemble")  # TODO: wrong interface
            resultjt = IJobTracker(result)
            resultjt.new_job('TODO: generate id',
                             'generate taskname: ensemble')
            resultjt.set_progress('PENDING',
                                  'ensemble pending')
            return 'info', u'Job submitted {0} - {1}'.format(self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #12
0
    def start_job(self, request):
        # split sdm jobs across multiple algorithms,
        # and multiple species input datasets
        # TODO: rethink and maybe split jobs based on enviro input datasets?
        if not self.is_active():
            for func in (uuidToObject(f) for f in self.context.functions):
                # get utility to execute this experiment
                method = queryUtility(IComputeMethod,
                                      name=ISDMExperiment.__identifier__)
                if method is None:
                    return ('error',
                            u"Can't find method to run SDM Experiment")
                # create result object:
                # TODO: refactor this out into helper method
                title = u'{} - {} {}'.format(self.context.title, func.getId(),
                                             datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                result = self._create_result_container(title)

                # Build job_params store them on result and submit job
                result.job_params = {
                    'resolution': IBCCVLMetadata(self.context)['resolution'],
                    'function': func.getId(),
                    'species_occurrence_dataset': self.context.species_occurrence_dataset,
                    'species_absence_dataset': self.context.species_absence_dataset,
                    'species_pseudo_absence_points': self.context.species_pseudo_absence_points,
                    'species_number_pseudo_absence_points': self.context.species_number_pseudo_absence_points,
                    'environmental_datasets': self.context.environmental_datasets,
                }
                # add toolkit params:
                result.job_params.update(self.context.parameters[IUUID(func)])
                self._createProvenance(result)
                # submit job
                LOG.info("Submit JOB %s to queue", func.getId())
                method(result, func)
                resultjt = IJobTracker(result)
                resultjt.new_job('TODO: generate id',
                                 'generate taskname: sdm_experiment')
                resultjt.set_progress('PENDING',
                                      u'{} pending'.format(func.getId()))
            return 'info', u'Job submitted {0} - {1}'.format(self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #13
0
    def handleStartJob(self, action):
        data, errors = self.extractData()

        if errors:
            self.status = self.formErrorsMessage
            return

        msgtype, msg = IJobTracker(self.context).start_job(self.request)
        if msgtype is not None:
            IStatusMessage(self.request).add(msg, type=msgtype)
        self.request.response.redirect(self.context.absolute_url())
コード例 #14
0
    def start_job(self, request):
        if not self.is_active():
            # get utility to execute this experiment
            method = queryUtility(IComputeMethod,
                                  name=ISpeciesTraitsExperiment.__identifier__)
            if method is None:
                return ('error',
                        u"Can't find method to run Species Traits Experiment")
            # iterate over all datasets and group them by emsc,gcm,year
            algorithm = uuidToCatalogBrain(self.context.algorithm)

            # create result object:
            # TODO: refactor this out into helper method
            title = u'{} - {} {}'.format(self.context.title, algorithm.id,
                                     datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
            result = createContentInContainer(
                self.context,
                'Folder',
                title=title)

            # Build job_params store them on result and submit job
            result.job_params = {
                'algorithm': algorithm.id,
                'formula': self.context.formula,
                'data_table': self.context.data_table,
            }
            # add toolkit params:
            result.job_params.update(self.context.parameters[algorithm.UID])
            # update provenance
            self._createProvenance(result)
            # submit job
            LOG.info("Submit JOB %s to queue", algorithm.id)
            method(result, algorithm.getObject())
            resultjt = IJobTracker(result)
            resultjt.new_job('TODO: generate id',
                             'generate taskname: sdm_experiment')
            resultjt.set_progress('PENDING',
                                  u'{} pending'.format(algorithm.id))
            return 'info', u'Job submitted {0} - {1}'.format(self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #15
0
    def pullOccurrenceFromALA(self, lsid, taxon,  common=None):
        # TODO: check permisions?
        # 1. create new dataset with taxon, lsid and common name set
        portal = getToolByName(self.context, 'portal_url').getPortalObject()
        dscontainer = portal[defaults.DATASETS_FOLDER_ID][defaults.DATASETS_SPECIES_FOLDER_ID]

        title = [taxon]
        if common:
            title.append(u"({})".format(common))
        # TODO: check whether title will be updated in transmog import?
        #       set title now to "Whatever (import pending)"?
        # TODO: make sure we get a better content id that dataset-x
        ds = createContentInContainer(dscontainer,
                                      'org.bccvl.content.dataset',
                                      title=u' '.join(title))
        # TODO: add number of occurences to description
        ds.description = u' '.join(title) + u' imported from ALA'
        md = IBCCVLMetadata(ds)
        # TODO: provenance ... import url?
        # FIXME: verify input parameters before adding to graph
        md['genre'] = 'DataGenreSpeciesOccurrence'
        md['species'] = {
            'scientificName': taxon,
            'taxonID': lsid,
        }
        if common:
            md['species']['vernacularName'] = common
        IStatusMessage(self.request).add('New Dataset created',
                                         type='info')

        # 2. create and push alaimport job for dataset
        # TODO: make this named adapter
        jt = IJobTracker(ds)
        status, message = jt.start_job()
        # reindex object to make sure everything is up to date
        ds.reindexObject()
        # Job submission state notifier
        IStatusMessage(self.request).add(message, type=status)

        return (status, message)
コード例 #16
0
 def handleAdd(self, action):
     data, errors = self.extractData()
     self.validateAction(data)
     if errors:
         self.status = self.formErrorsMessage
         return
     # TODO: this is prob. a bug in base form, because createAndAdd
     #       does not return the wrapped object.
     obj = self.createAndAdd(data)
     if obj is None:
         # TODO: this is probably an error here?
         #       object creation/add failed for some reason
         return
     # get wrapped instance fo new object (see above)
     obj = self.context[obj.id]
     # mark only as finished if we get the new object
     self._finishedAdd = True
     IStatusMessage(self.request).addStatusMessage(_(u"Item created"),
                                                   "info")
     # auto start job here
     jt = IJobTracker(obj)
     msgtype, msg = jt.start_job(self.request)
     if msgtype is not None:
         IStatusMessage(self.request).add(msg, type=msgtype)
コード例 #17
0
 def get_state_css(self, itemob=None):
     itemob = itemob or self.context
     if ICatalogBrain.providedBy(itemob):
         itemob = itemob.getObject()
     css_map = {
         None: 'success',
         'QUEUED': 'info',
         'RUNNING': 'info',
         'COMPLETED': 'success',
         'FAILED': 'error',
         'REMOVED': 'removed'
     }
     # check job_state and return either success, error or block
     job_state = IJobTracker(itemob).state
     return css_map.get(job_state, 'info')
コード例 #18
0
 def test_import_view_ala_import(self):
     # TODO: this test needs a running DataMover. (see below))
     testdata = {
         'taxonID': 'urn:lsid:biodiversity.org.au:afd.taxon:dadb5555-d286-4862-b1dd-ea549b1c05a5',
         'scientificName': 'Pteria penguin',
         'vernacularName': 'Black Banded Winged Pearl Shell'
     }
     view =  self.getview()
     view.request.form.update({
         'import': 'Import',
         'lsid': testdata['taxonID'],
         'taxon': testdata['scientificName'],
         'common': testdata['vernacularName']
     })
     # call view:
     view()
     # response should redirect to datasets
     self.assertEqual(view.request.response.getStatus(), 302)
     self.assertEqual(view.request.response.getHeader('Location'),
                      self.portal.datasets.absolute_url())
     # get new dataset and check state?
     ds = self.portal.datasets.species['org-bccvl-content-dataset']
     # check metadata
     from org.bccvl.site.interfaces import IBCCVLMetadata
     md = IBCCVLMetadata(ds)
     self.assertEqual(md['species'], testdata)
     # check job state
     from org.bccvl.site.interfaces import IJobTracker
     jt =  IJobTracker(ds)
     self.assertEqual(jt.state, 'QUEUED')
     # commit transaction to start job
     # TODO: this test needs a running DataMover. (see below))
     # TODO: we should Mock org.bccvl.tasks.datamover.DataMover (generate files as requested?)
     #       and maybe org.bccvl.tasks.plone.import_ala
     transaction.commit()
     # celery should run in eager mode so our job state should be up to date as well
     self.assertEqual(jt.state, 'COMPLETED')
     # we should have a bit more metadat and still the same as before import
     self.assertEqual(md['species'], testdata)
     self.assertEqual(md['genre'], 'DataGenreSpeciesOccurrence')
     self.assertEqual(md['rows'], 23)
     self.assertEqual(md['headers'], ['species', 'lon', 'lat'])
     self.assertEqual(md['bounds'], {'top': -5.166, 'right': 167.68167, 'left': 48.218334197998, 'bottom': -28.911835})
     # check that there is a file as well
     self.assertIsNotNone(ds.file)
     self.assertIsNotNone(ds.file.data)
     self.assertGreater(len(ds.file.data), 0)
コード例 #19
0
    def test_add_experiment(self):
        form = self.form.get_form()
        form.request.form.update({
            'form.buttons.save': 'Create and start',
        })
        # update form with updated request
        form.update()
        self.assertEqual(form.status, u'')
        self.assertEqual(len(form.widgets.errors), 0)
        self.assertIn('my-bd-experiment', self.experiments)
        exp = self.experiments['my-bd-experiment']
        # TODO: update asserts
        self.assertEqual(
            exp.projection, {
                unicode(self.form.sdmexp.UID()): {
                    unicode(self.form.sdmproj.UID()): {
                        'value': Decimal('0.0'),
                        'label': '0.0'
                    }
                }
            })
        # FIXME: submitting with an empty model list doesn't cause form to fail
        self.assertEqual(exp.cluster_size, 5000)

        # get result container: (there is only one)
        self.assertEqual(len(exp.objectIds()), 1)
        result = exp.objectValues()[0]
        # FIXME: test result.job_params
        self.assertEqual(result.job_params['cluster_size'], 5000)
        self.assertEqual(result.job_params['projections'],
                         [{
                             "dataset": self.form.sdmproj.UID(),
                             "threshold": {
                                 'label': '0.0',
                                 'value': Decimal('0.0')
                             }
                         }])
        # no result files yet
        self.assertEqual(len(result.keys()), 0)
        # test job state
        jt = IJobTracker(exp)
        self.assertEqual(jt.state, u'QUEUED')
        # after transaction commit the job should finish
        transaction.commit()
        self.assertEqual(jt.state, u'COMPLETED')
        # and we should have a result as well
        self.assertGreaterEqual(len(result.keys()), 1)
コード例 #20
0
    def test_add_experiment(self):
        form = self.form.get_form()
        form.request.form.update({
            'form.buttons.save': 'Create and start',
        })
        # update form with updated request
        form.update()
        self.assertEqual(form.status, u'')
        self.assertEqual(len(form.widgets.errors), 0)
        self.assertIn('my-cc-experiment', self.experiments)
        exp = self.experiments['my-cc-experiment']
        # TODO: update asserts
        self.assertEqual(exp.future_climate_datasets,
                         [unicode(self.form.future.UID())])
        # FIXME: submitting with an empty model list doesn't cause form to fail
        self.assertEqual(exp.species_distribution_models,
                         {self.form.sdmexp.UID(): [self.form.sdmmodel.UID()]})

        # get result container: (there is only one)
        self.assertEqual(len(exp.objectIds()), 1)
        result = exp.objectValues()[0]
        # FIXME: test result.job_params
        self.assertEqual(
            result.job_params['future_climate_datasets'],
            {exp.future_climate_datasets[0]: set([u'B01', u'B02'])})
        self.assertEqual(result.job_params['species_distribution_models'],
                         exp.species_distribution_models.values()[0]
                         [0])  # only one experiment so only first model
        self.assertEqual(result.job_params['resolution'], u'Resolution30m')
        self.assertEqual(result.job_params['emsc'], u'RCP3PD')
        self.assertEqual(result.job_params['gcm'], u'cccma-cgcm31')
        self.assertEqual(result.job_params['year'], u'2015')
        # no result files yet
        self.assertEqual(len(result.keys()), 0)
        # test job state
        jt = IJobTracker(exp)
        self.assertEqual(jt.state, u'QUEUED')
        # after transaction commit the job should finish
        transaction.commit()
        self.assertEqual(jt.state, u'COMPLETED')
        # and we should have a result as well
        self.assertGreaterEqual(len(result.keys()), 1)
コード例 #21
0
 def __call__(self, **kw):
     jt = IJobTracker(self.context)
     # TODO: if state is empty check if there is a downloadable file
     #       Yes: COMPLETED
     #       No: FAILED
     state = jt.state
     if not state:
         if IBlobDataset.providedBy(self.context):
             # we have no state, may happen for imported datasets,
             # let's check if we have a file
             if self.context.file is not None:
                 state = 'COMPLETED'
             else:
                 state = 'FAILED'
         elif IRemoteDataset.providedBy(self.context):
             if self.context.remoteUrl:
                 state = 'COMPLETED'
             else:
                 state = 'FAILED'
     return state
コード例 #22
0
    def start_job(self, request):
        # split sdm jobs across multiple algorithms,
        # and multiple species input datasets
        # TODO: rethink and maybe split jobs based on enviro input datasets?
        if not self.is_active():
            for func in (uuidToObject(f) for f in self.context.functions):
                # get utility to execute this experiment
                method = queryUtility(IComputeMethod,
                                      name=ISDMExperiment.__identifier__)
                if method is None:
                    return ('error',
                            u"Can't find method to run SDM Experiment")
                # create result object:
                # TODO: refactor this out into helper method
                title = u'{} - {} {}'.format(
                    self.context.title, func.getId(),
                    datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                result = self._create_result_container(title)

                # Build job_params store them on result and submit job
                result.job_params = {
                    'resolution': IBCCVLMetadata(self.context)['resolution'],
                    'function': func.getId(),
                    'species_occurrence_dataset':
                    self.context.species_occurrence_dataset,
                    'species_absence_dataset':
                    self.context.species_absence_dataset,
                    'species_pseudo_absence_points':
                    self.context.species_pseudo_absence_points,
                    'species_number_pseudo_absence_points':
                    self.context.species_number_pseudo_absence_points,
                    'environmental_datasets':
                    self.context.environmental_datasets,
                }
                # add toolkit params:
                result.job_params.update(self.context.parameters[IUUID(func)])
                self._createProvenance(result)
                # submit job
                LOG.info("Submit JOB %s to queue", func.getId())
                method(result, func)
                resultjt = IJobTracker(result)
                resultjt.new_job('TODO: generate id',
                                 'generate taskname: sdm_experiment')
                resultjt.set_progress('PENDING',
                                      u'{} pending'.format(func.getId()))
            return 'info', u'Job submitted {0} - {1}'.format(
                self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #23
0
 def start_job(self, request):
     if not self.is_active():
         # get utility to execute this experiment
         method = queryUtility(IComputeMethod,
                               name=IProjectionExperiment.__identifier__)
         if method is None:
             # TODO: lookup by script type (Perl, Python, etc...)
             return ('error',
                     u"Can't find method to run Projection Experiment")
         expuuid = self.context.species_distribution_models.keys()[0]
         exp = uuidToObject(expuuid)
         # TODO: what if two datasets provide the same layer?
         # start a new job for each sdm and future dataset
         for sdmuuid in self.context.species_distribution_models[expuuid]:
             for dsuuid in self.context.future_climate_datasets:
                 dsbrain = uuidToCatalogBrain(dsuuid)
                 dsmd = IBCCVLMetadata(dsbrain.getObject())
                 futurelayers = set(dsmd['layers'].keys())
                 # match sdm exp layers with future dataset layers
                 projlayers = {}
                 for ds, dslayerset in exp.environmental_datasets.items():
                     # add matching layers
                     projlayers.setdefault(dsuuid, set()).update(
                         dslayerset.intersection(futurelayers))
                     # remove matching layers
                     projlayers[ds] = dslayerset - futurelayers
                     if not projlayers[ds]:
                         # remove if all layers replaced
                         del projlayers[ds]
                 # create result
                 result = self._create_result_container(
                     sdmuuid, dsbrain, projlayers)
                 # update provenance
                 self._createProvenance(result)
                 # submit job
                 LOG.info("Submit JOB project to queue")
                 method(result, "project")  # TODO: wrong interface
                 resultjt = IJobTracker(result)
                 resultjt.new_job(
                     'TODO: generate id',
                     'generate taskname: projection experiment')
                 resultjt.set_progress('PENDING', u'projection pending')
         return 'info', u'Job submitted {0} - {1}'.format(
             self.context.title, self.state)
     else:
         # TODO: in case there is an error should we abort the transaction
         #       to cancel previously submitted jobs?
         return 'error', u'Current Job is still running'
コード例 #24
0
    def start_job(self, request):
        if not self.is_active():
            # get utility to execute this experiment
            method = queryUtility(IComputeMethod,
                                  name=IEnsembleExperiment.__identifier__)
            if method is None:
                return ('error',
                        u"Can't find method to run Ensemble Experiment")

            # create result container
            title = u'{} - ensemble {}'.format(
                self.context.title,
                datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
            result = createContentInContainer(self.context,
                                              'Folder',
                                              title=title)

            # build job_params and store on result
            # FIXME: probably should split ensemble jobs based on resolution
            #        for now pick first one to make result import work
            #        datasets is dict with expkey and list of datasets...
            #           can probably get resolution from exp or ds
            dsid = self.context.datasets.values()[0][0]
            dsmd = IBCCVLMetadata(uuidToObject(dsid))
            result.job_params = {
                'datasets':
                list(chain.from_iterable(self.context.datasets.values())),
                'resolution':
                dsmd['resolution']
            }
            # update provenance
            self._createProvenance(result)

            # submit job to queue
            LOG.info("Submit JOB Ensemble to queue")
            method(result, "ensemble")  # TODO: wrong interface
            resultjt = IJobTracker(result)
            resultjt.new_job('TODO: generate id',
                             'generate taskname: ensemble')
            resultjt.set_progress('PENDING', 'ensemble pending')
            return 'info', u'Job submitted {0} - {1}'.format(
                self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #25
0
    def start_job(self, request):
        if not self.is_active():
            # get utility to execute this experiment
            method = queryUtility(IComputeMethod,
                                  name=ISpeciesTraitsExperiment.__identifier__)
            if method is None:
                return ('error',
                        u"Can't find method to run Species Traits Experiment")
            # iterate over all datasets and group them by emsc,gcm,year
            algorithm = uuidToCatalogBrain(self.context.algorithm)

            # create result object:
            # TODO: refactor this out into helper method
            title = u'{} - {} {}'.format(
                self.context.title, algorithm.id,
                datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
            result = createContentInContainer(self.context,
                                              'Folder',
                                              title=title)

            # Build job_params store them on result and submit job
            result.job_params = {
                'algorithm': algorithm.id,
                'formula': self.context.formula,
                'data_table': self.context.data_table,
            }
            # add toolkit params:
            result.job_params.update(self.context.parameters[algorithm.UID])
            # update provenance
            self._createProvenance(result)
            # submit job
            LOG.info("Submit JOB %s to queue", algorithm.id)
            method(result, algorithm.getObject())
            resultjt = IJobTracker(result)
            resultjt.new_job('TODO: generate id',
                             'generate taskname: sdm_experiment')
            resultjt.set_progress('PENDING',
                                  u'{} pending'.format(algorithm.id))
            return 'info', u'Job submitted {0} - {1}'.format(
                self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #26
0
    def start_job(self, request):
        # TODO: split biodiverse job across years, gcm, emsc
        if not self.is_active():
            # get utility to execute this experiment
            method = queryUtility(IComputeMethod,
                                  name=IBiodiverseExperiment.__identifier__)
            if method is None:
                return ('error',
                        u"Can't find method to run Biodiverse Experiment")

            # iterate over all datasets and group them by emsc,gcm,year
            # FIXME: add resolution grouping?
            datasets = {}
            for projds, threshold in chain.from_iterable(map(lambda x: x.items(), self.context.projection.itervalues())):
                dsobj = uuidToObject(projds)
                dsmd = IBCCVLMetadata(dsobj)

                emsc = dsmd.get('emsc')
                gcm = dsmd.get('gcm')
                period = dsmd.get('temporal')
                resolution = dsmd.get('resolution')
                if not period:
                    year = 'current'
                else:
                    year = Period(period).start if period else None
                key = (emsc, gcm, year, resolution)
                datasets.setdefault(key, []).append((projds, threshold))

            # create one job per dataset group
            for key, datasets in datasets.items():
                (emsc, gcm, year, resolution) = key

                # create result object:
                if year == 'current':
                    title = u'{} - biodiverse {} {}'.format(
                        self.context.title, year,
                        datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                else:
                    title = u'{} - biodiverse {}_{}_{} {}'.format(
                        self.context.title, emsc, gcm, year,
                        datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                result = createContentInContainer(
                    self.context,
                    'Folder',
                    title=title)

                dss = []
                for ds, thresh in datasets:
                    dss.append({
                        'dataset': ds,
                        'threshold': thresh
                    })

                # build job_params and store on result
                result.job_params = {
                    # datasets is a list of dicts with 'threshold' and 'uuid'
                    'projections': dss,
                    'cluster_size': self.context.cluster_size,
                }
                # update provenance
                self._createProvenance(result)

                # submit job to queue
                LOG.info("Submit JOB Biodiverse to queue")
                method(result, "biodiverse")  # TODO: wrong interface
                resultjt = IJobTracker(result)
                resultjt.new_job('TODO: generate id',
                                 'generate taskname: biodiverse')
                resultjt.set_progress('PENDING',
                                      'biodiverse pending')
            return 'info', u'Job submitted {0} - {1}'.format(self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #27
0
    def start_job(self, request):
        # TODO: split biodiverse job across years, gcm, emsc
        if not self.is_active():
            # get utility to execute this experiment
            method = queryUtility(IComputeMethod,
                                  name=IBiodiverseExperiment.__identifier__)
            if method is None:
                return ('error',
                        u"Can't find method to run Biodiverse Experiment")

            # iterate over all datasets and group them by emsc,gcm,year
            # FIXME: add resolution grouping?
            datasets = {}
            for projds, threshold in chain.from_iterable(
                    map(lambda x: x.items(),
                        self.context.projection.itervalues())):
                dsobj = uuidToObject(projds)
                dsmd = IBCCVLMetadata(dsobj)

                emsc = dsmd.get('emsc')
                gcm = dsmd.get('gcm')
                period = dsmd.get('temporal')
                resolution = dsmd.get('resolution')
                if not period:
                    year = 'current'
                else:
                    year = Period(period).start if period else None
                key = (emsc, gcm, year, resolution)
                datasets.setdefault(key, []).append((projds, threshold))

            # create one job per dataset group
            for key, datasets in datasets.items():
                (emsc, gcm, year, resolution) = key

                # create result object:
                if year == 'current':
                    title = u'{} - biodiverse {} {}'.format(
                        self.context.title, year,
                        datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                else:
                    title = u'{} - biodiverse {}_{}_{} {}'.format(
                        self.context.title, emsc, gcm, year,
                        datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                result = createContentInContainer(self.context,
                                                  'Folder',
                                                  title=title)

                dss = []
                for ds, thresh in datasets:
                    dss.append({'dataset': ds, 'threshold': thresh})

                # build job_params and store on result
                result.job_params = {
                    # datasets is a list of dicts with 'threshold' and 'uuid'
                    'projections': dss,
                    'cluster_size': self.context.cluster_size,
                }
                # update provenance
                self._createProvenance(result)

                # submit job to queue
                LOG.info("Submit JOB Biodiverse to queue")
                method(result, "biodiverse")  # TODO: wrong interface
                resultjt = IJobTracker(result)
                resultjt.new_job('TODO: generate id',
                                 'generate taskname: biodiverse')
                resultjt.set_progress('PENDING', 'biodiverse pending')
            return 'info', u'Job submitted {0} - {1}'.format(
                self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
コード例 #28
0
 def job_state(self):
     return IJobTracker(self.context).state
コード例 #29
0
 def getJobStates(self):
     return IJobTracker(self.context).states