Ejemplo n.º 1
0
    def rat(self):
        uuid = self.request.form.get('uuid')
        layer = self.request.form.get('layer')
        brain = None
        try:
            brain = uuidToCatalogBrain(uuid)
        except Exception as e:
            LOG.error('Caught exception %s', e)

        if not brain:
            self.record_error('Not Found', 404, 'dataset not found',
                              {'parameter': 'uuid'})
            raise NotFound(self, 'metadata', self.request)
        md = IBCCVLMetadata(brain.getObject())
        if not layer and layer not in md.get('layers', {}):
            self.record_error('Bad Request', 400, 'Missing parameter layer',
                              {'parameter': 'layer'})
            raise BadRequest('Missing parameter layer')
        try:
            rat = md.get('layers', {}).get(layer, {}).get('rat')
            rat = json.loads(unicode(rat))
            return rat
        except Exception as e:
            LOG.warning(
                "Couldn't decode Raster Attribute Table from metadata. %s: %s",
                self.context, repr(e))
        raise NotFound(self, 'rat', self.request)
Ejemplo n.º 2
0
def dataset_environmental_layer(obj, **kw):
    md = IBCCVLMetadata(obj)
    # if we have 'layers_used' index it
    if 'layers_used' in md:
        return md['layers_used']
    # otherwise index list of layers provided by dataset
    return md.get('layers', None)
Ejemplo n.º 3
0
def getdatasetparams(uuid):
    # return dict with:
    #    filename
    #    downloadurl
    #    dm_accessurl-> maybe add url rewrite to datamover?
    #    # occurrence specific:
    #    species
    #    # raster specific:
    #    layers ... need to split this up
    dsobj = uuidToObject(uuid)
    if dsobj is None:
        return None
    dsinfo = getDatasetInfo(dsobj, uuid)
    # if we have species info add it

    dsmdr = IBCCVLMetadata(dsobj)
    species = dsmdr.get('species', {}).get('scientificName')
    if species:
        dsinfo['species'] = species
    # if we can get layermetadata, let's add it
    biomod = getdsmetadata(dsobj)
    layers = biomod.get('layers', [])

    if len(layers) > 0:
        for lk, lv in biomod['layers'].items():
            if lv is not None:
                dsinfo.setdefault('layers', {})[lk] = {
                    'filename': lv.get('filename', biomod['filename']),
                    'datatype': lv.get('datatype', None)
                }
    # return infoset
    return dsinfo
Ejemplo n.º 4
0
def getdatasetparams(uuid):
    # return dict with:
    #    filename
    #    downloadurl
    #    dm_accessurl-> maybe add url rewrite to datamover?
    #    # occurrence specific:
    #    species
    #    # raster specific:
    #    layers ... need to split this up
    dsobj = uuidToObject(uuid)
    if dsobj is None:
        return None
    dsinfo = getDatasetInfo(dsobj, uuid)
    # if we have species info add it

    dsmdr = IBCCVLMetadata(dsobj)
    species = dsmdr.get('species', {}).get('scientificName')
    if species:
        dsinfo['species'] = species
    # if we can get layermetadata, let's add it
    biomod = getdsmetadata(dsobj)
    layers = biomod.get('layers', [])

    if len(layers) > 0:
        for lk, lv in biomod['layers'].items():
            if lv is not None:
                dsinfo.setdefault('layers', {})[lk] = {
                    'filename': lv.get('filename', biomod['filename']),
                    'datatype': lv.get('datatype', None)}
    # return infoset
    return dsinfo
Ejemplo n.º 5
0
    def items(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        if self.value:
            for experiment_uuid, model_uuids in self.value.items():
                item = {}
                expbrain = uuidToCatalogBrain(experiment_uuid)
                item['title'] = expbrain.Title
                item['uuid'] = expbrain.UID

                # TODO: what else wolud I need from an experiment?
                exp = expbrain.getObject()
                expmd = IBCCVLMetadata(exp)
                item['resolution'] = expmd.get('resolution')
                item['brain'] = expbrain

                # now search all models within and add infos
                pc = getToolByName(self.context, 'portal_catalog')
                brains = pc.searchResults(path=expbrain.getPath(),
                                          BCCDataGenre=self.genre)
                # TODO: maybe as generator?
                item['datasets'] = [{
                    'uuid':
                    brain.UID,
                    'title':
                    brain.Title,
                    'obj':
                    brain.getObject(),
                    'md':
                    IBCCVLMetadata(brain.getObject()),
                    'selected':
                    brain.UID in self.value[experiment_uuid]
                } for brain in brains]
                yield item
def biodiverse_listing_details(expbrain):
    details = {}
    exp = expbrain.getObject()
    species = set()
    years = set()
    emscs = set()
    gcms = set()
    for dsuuid in chain.from_iterable(map(lambda x: x.keys(), exp.projection.itervalues())):
        dsobj = uuidToObject(dsuuid)
        # TODO: should inform user about missing dataset
        if dsobj:
            md = IBCCVLMetadata(dsobj)
            species.add(md.get("species", {}).get("scientificName", ""))
            period = md.get("temporal")
            if period:
                years.add(Period(period).start)
            gcm = md.get("gcm")
            if gcm:
                gcms.add(gcm)
            emsc = md.get("emsc")
            if emsc:
                emscs.add(emsc)
    details.update(
        {
            "type": "BIODIVERSE",
            "functions": "endemism, redundancy",
            "species_occurrence": ", ".join(sorted(species)),
            "species_absence": "{}, {}".format(", ".join(sorted(emscs)), ", ".join(sorted(gcms))),
            "years": ", ".join(sorted(years)),
        }
    )
    return details
Ejemplo n.º 7
0
 def details(self, context=None):
     # fetch details about dataset, if attributes are unpopulated
     # get data from associated collection
     if context is None:
         context = self.context
     coll = context
     while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)):
         coll = coll.__parent__
     # we have either hit siteroot or found a collection
     ret = {
         'title': context.title,
         'description': context.description or coll.description,
         'attribution': context.attribution or getattr(coll, 'attribution'),
         'rights': context.rights or coll.rights,
         'external_description': context.external_description or getattr(coll, 'external_description'),
     }
     md = IBCCVLMetadata(context)
     if 'layers' in md:
         layers = []
         for layer in sorted(md.get('layers', ())):
             try:
                 layers.append(self.layer_vocab.getTerm(layer))
             except:
                 layers.append(SimpleTerm(layer, layer, layer))
         if layers:
             ret['layers'] = layers
     return ret
Ejemplo n.º 8
0
 def details(self, context=None):
     # fetch details about dataset, if attributes are unpopulated
     # get data from associated collection
     if context is None:
         context = self.context
     coll = context
     while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)):
         coll = coll.__parent__
     # we have either hit siteroot or found a collection
     ret = {
         'title':
         context.title,
         'description':
         context.description or coll.description,
         'attribution':
         context.attribution or getattr(coll, 'attribution'),
         'rights':
         context.rights or coll.rights,
         'external_description':
         context.external_description
         or getattr(coll, 'external_description'),
     }
     md = IBCCVLMetadata(context)
     if 'layers' in md:
         layers = []
         for layer in sorted(md.get('layers', ())):
             try:
                 layers.append(self.layer_vocab.getTerm(layer))
             except:
                 layers.append(SimpleTerm(layer, layer, layer))
         if layers:
             ret['layers'] = layers
     return ret
Ejemplo n.º 9
0
    def rat(self):
        uuid = self.request.form.get('uuid')
        layer = self.request.form.get('layer')
        brain = None
        try:
            brain = uuidToCatalogBrain(uuid)
        except Exception as e:
            LOG.error('Caught exception %s', e)

        if not brain:
            self.record_error('Not Found', 404,
                              'dataset not found',
                              {'parameter': 'uuid'})
            raise NotFound(self, 'metadata', self.request)
        md = IBCCVLMetadata(brain.getObject())
        if not layer and layer not in md.get('layers', {}):
            self.record_error('Bad Request', 400,
                              'Missing parameter layer',
                              {'parameter': 'layer'})
            raise BadRequest('Missing parameter layer')
        try:
            rat = md.get('layers', {}).get(layer, {}).get('rat')
            rat = json.loads(unicode(rat))
            return rat
        except Exception as e:
            LOG.warning(
                "Couldn't decode Raster Attribute Table from metadata. %s: %s",
                self.context, repr(e))
        raise NotFound(self, 'rat', self.request)
Ejemplo n.º 10
0
    def items(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        if self.value:
            for experiment_uuid, model_uuids in self.value.items():
                item = {}
                expbrain = uuidToCatalogBrain(experiment_uuid)
                item['title'] = expbrain.Title
                item['uuid'] = expbrain.UID

                # TODO: what else wolud I need from an experiment?
                exp = expbrain.getObject()
                expmd = IBCCVLMetadata(exp)
                item['resolution'] = expmd.get('resolution')
                item['brain'] = expbrain

                # now search all models within and add infos
                pc = getToolByName(self.context, 'portal_catalog')
                brains = pc.searchResults(path=expbrain.getPath(),
                                          BCCDataGenre=self.genre)
                # TODO: maybe as generator?
                item['datasets'] = [{'uuid': brain.UID,
                                     'title': brain.Title,
                                     'obj': brain.getObject(),
                                     'md': IBCCVLMetadata(brain.getObject()),
                                     'selected': brain.UID in self.value[experiment_uuid]}
                                                 for brain in brains]
                yield item
Ejemplo n.º 11
0
 def subitems(self, dsbrain):
     # return a generator of selectable items within dataset
     md = IBCCVLMetadata(dsbrain.getObject())
     layer_vocab = self.dstools.layer_vocab
     selectedsubitems = self.value.get(dsbrain.UID) or ()
     if md.get('genre') != 'DataGenreSpeciesCollection':
         for layer in sorted(md.get('layers', ())):
             subitem = {
                 'id':
                 layer,
                 'title':
                 layer_vocab.getTerm(layer).title
                 if layer in layer_vocab else layer,
                 'selected':
                 layer in selectedsubitems,
             }
             yield subitem
     for subdsid in sorted(getattr(dsbrain.getObject(), 'parts', ())):
         part = uuidToCatalogBrain(subdsid)
         # TODO: should we just ignore it?
         if not part:
             continue
         subitem = {
             'id': subdsid,
             'title': part.Title,
             'selected': subdsid in selectedsubitems
         }
         yield subitem
Ejemplo n.º 12
0
def addSpeciesInfo(bccvlmd, result):
    if ISDMExperiment.providedBy(result.__parent__):
        spds = uuidToObject(result.job_params['species_occurrence_dataset'])
    if IProjectionExperiment.providedBy(result.__parent__):
        spds = uuidToObject(result.job_params['species_distribution_models'])
    speciesmd = IBCCVLMetadata(spds).get('species', None)
    if speciesmd:
        bccvlmd['species'] = speciesmd.copy()
Ejemplo n.º 13
0
def addSpeciesInfo(bccvlmd, result):
    if ISDMExperiment.providedBy(result.__parent__):
        spds = uuidToObject(result.job_params['species_occurrence_dataset'])
    if IProjectionExperiment.providedBy(result.__parent__):
        spds = uuidToObject(result.job_params['species_distribution_models'])
    speciesmd = IBCCVLMetadata(spds).get('species', None)
    if speciesmd:
        bccvlmd['species'] = speciesmd.copy()
Ejemplo n.º 14
0
def year(obj, **kw):
    # FIXME: this indexer is meant for future projection only ....
    # - make sure we don't index any other datasets. i.e. environmental and current datasets, which may have a date attached to it, but it is meaningless for future projections
    md = IBCCVLMetadata(obj)
    year = md.get('year', None)
    if year:
        year = str(year)
    return year
Ejemplo n.º 15
0
 def test_filemetadata(self):
     ds = self.get_dataset(defaults.DATASETS_SPECIES_FOLDER_ID,
                           'ABT', 'occurrence.csv')
     from org.bccvl.site.interfaces import IBCCVLMetadata
     md = IBCCVLMetadata(ds)
     self.assertEqual(md.get('rows'), 3)
     self.assertEqual(md.get('bounds'), {'bottom': 1, 'left': 1, 'top': 3, 'right': 3})
     self.assertEqual(md.get('headers'), ['Name', 'lon', 'lat'])
     self.assertIn('species', md) # check if species attribute exists
Ejemplo n.º 16
0
 def test_filemetadata(self):
     ds = self.get_dataset(defaults.DATASETS_SPECIES_FOLDER_ID,
                           'ABT', 'occurrence.csv')
     md = IBCCVLMetadata(ds)
     self.assertEqual(md.get('rows'), 3)
     self.assertEqual(md.get('bounds'), {
                      'bottom': 1, 'left': 1, 'top': 3, 'right': 3})
     self.assertEqual(md.get('headers'), ['Name', 'lon', 'lat'])
     self.assertIn('species', md)  # check if species attribute exists
Ejemplo n.º 17
0
def scientific_category(obj, **kw):
    md = IBCCVLMetadata(obj)
    vocab = getUtility(IVocabularyFactory, 'scientific_category_source')(obj)
    path = set()
    for cat in md.get('categories', ()):
        path.update(vocab.getTermPath(cat))
    if path:
        return tuple(path)
    return None
Ejemplo n.º 18
0
    def items(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        if self.value:
            for experiment_uuid, model_uuids in self.value.items():
                item = {}
                expbrain = uuidToCatalogBrain(experiment_uuid)
                # TODO: we have an experiment_uuid, but can't access the
                #       experiment (deleted?, access denied?)
                #       shall we at least try to get some details?
                if expbrain is None:
                    continue
                item['title'] = expbrain.Title
                item['uuid'] = expbrain.UID
                item['brain'] = expbrain

                # TODO: what else wolud I need from an experiment?
                exp = expbrain.getObject()
                expmd = IBCCVLMetadata(exp)
                item['resolution'] = expmd.get('resolution')

                # now search all datasets within and add infos
                pc = getToolByName(self.context, 'portal_catalog')
                results = pc.searchResults(path=expbrain.getPath(),
                                           portal_type='Folder',
                                           job_state='COMPLETED')
                brains = pc.searchResults(path=[r.getPath() for r in results],
                                          BCCDataGenre=self.genre)
                # TODO: maybe as generator?
                item['subitems'] = []
                for brain in brains:
                    # FIXME: I need a different list of thresholds for display;
                    # esp. don't look up threshold, but take vales (threshold
                    # id and value) from field as is
                    thresholds = dataset.getThresholds(brain.UID)[brain.UID]
                    threshold = self.value[experiment_uuid].get(brain.UID)
                    # is threshold in list?
                    if threshold and threshold['label'] not in thresholds:
                        # maybe a custom entered number?
                        # ... I guess we don't really care as long as we produce the same the user entered. (validate?)
                        thresholds[threshold['label']] = threshold['label']
                    dsobj = brain.getObject()
                    dsmd = IBCCVLMetadata(dsobj)
                    item['subitems'].append({
                        'uuid': brain.UID,
                        'title': brain.Title,
                        'selected': brain.UID in self.value[experiment_uuid],
                        'threshold': threshold,
                        'thresholds': thresholds,
                        'brain': brain,
                        'md': dsmd,
                        'obj': dsobj,
                        # TODO: this correct? only one layer ever?
                        'layermd': dsmd['layers'].values()[0]
                    })
                yield item
Ejemplo n.º 19
0
    def items(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        if self.value:
            for experiment_uuid, model_uuids in self.value.items():
                item = {}
                expbrain = uuidToCatalogBrain(experiment_uuid)
                item['title'] = expbrain.Title
                item['uuid'] = expbrain.UID
                item['brain'] = expbrain

                # TODO: what else wolud I need from an experiment?
                exp = expbrain.getObject()
                expmd = IBCCVLMetadata(exp)
                item['resolution'] = expmd.get('resolution')

                # now search all datasets within and add infos
                pc = getToolByName(self.context, 'portal_catalog')
                brains = pc.searchResults(path=expbrain.getPath(),
                                          BCCDataGenre=self.genre)
                # TODO: maybe as generator?
                item['datasets'] = []
                for brain in brains:
                    # FIXME: I need a different list of thresholds for display; esp. don't look up threshold, but take vales (threshold id and value) from field as is
                    thresholds = dataset.getThresholds(brain.UID)[brain.UID]
                    threshold = self.value[experiment_uuid].get(brain.UID)
                    # is threshold in list?
                    if threshold and threshold['label'] not in thresholds:
                        # maybe a custom entered number?
                        # ... I guess we don't really care as long as we produce the same the user entered. (validate?)
                        thresholds[threshold['label']] = threshold['label']
                    dsobj = brain.getObject()
                    dsmd = IBCCVLMetadata(dsobj)
                    item['datasets'].append({
                        'uuid':
                        brain.UID,
                        'title':
                        brain.Title,
                        'selected':
                        brain.UID in self.value[experiment_uuid],
                        'threshold':
                        threshold,
                        'thresholds':
                        thresholds,
                        'brain':
                        brain,
                        'md':
                        dsmd,
                        'obj':
                        dsobj,
                        # TODO: this correct? only one layer ever?
                        'layermd':
                        dsmd['layers'].values()[0]
                    })
                yield item
Ejemplo n.º 20
0
 def getGenreSchemata(self):
     schemata = []
     md = IBCCVLMetadata(self.context)
     genre = md.get('genre')
     if genre in self.genre_interface_map:
         schemata.append(self.genre_interface_map[genre])
     if IBlobDataset.providedBy(self.context):
         schemata.append(IBlobDataset)
     if IRemoteDataset.providedBy(self.context):
         schemata.append(IRemoteDataset)
     return schemata
Ejemplo n.º 21
0
 def getGenreSchemata(self):
     schemata = []
     md = IBCCVLMetadata(self.context)
     genre = md.get('genre')
     if genre in self.genre_interface_map:
         schemata.append(self.genre_interface_map[genre])
     if IBlobDataset.providedBy(self.context):
         schemata.append(IBlobDataset)
     if IRemoteDataset.providedBy(self.context):
         schemata.append(IRemoteDataset)
     return schemata
Ejemplo n.º 22
0
 def subitems(self, dsbrain):
     # return a generator of selectable items within dataset
     md = IBCCVLMetadata(dsbrain.getObject())
     layer_vocab = self.dstools.layer_vocab
     selectedlayers = self.value.get(dsbrain.UID) or ()
     for layer in sorted(md.get('layers', ())):
         subitem = {
             'id': layer,
             'title': layer_vocab.getTerm(layer).title,
             'selected': layer in selectedlayers,
         }
         yield subitem
Ejemplo n.º 23
0
 def subitems(self, dsbrain):
     # return a generator of selectable items within dataset
     md = IBCCVLMetadata(dsbrain.getObject())
     layer_vocab = self.dstools.layer_vocab
     selectedlayers = self.value.get(dsbrain.UID) or ()
     for layer in sorted(md.get('layers', ())):
         subitem = {
             'id': layer,
             'title': layer_vocab.getTerm(layer).title,
             'selected': layer in selectedlayers,
         }
         yield subitem
Ejemplo n.º 24
0
def get_project_params(result):
    params = deepcopy(result.job_params)
    # get metadata for species_distribution_models
    uuid = params['species_distribution_models']
    params['species_distribution_models'] = getdatasetparams(uuid)
    # do biomod name mangling of species name
    params['species_distribution_models']['species'] = re.sub(u"[ _'\"/\(\)\{\}\[\]]", u".", params['species_distribution_models'].get('species', u"Unknown"))
    # we need the layers from sdm to fetch correct files for climate_models
    # TODO: getdatasetparams should fetch 'layers'
    sdmobj = uuidToObject(uuid)
    sdmmd = IBCCVLMetadata(sdmobj)
    params['species_distribution_models']['layers'] = sdmmd.get('layers_used', None)
    # do future climate layers
    climatelist = []
    for uuid, layers in params['future_climate_datasets'].items():
        dsinfo = getdatasetparams(uuid)
        for layer in layers:
            dsdata = {
                'uuid': dsinfo['uuid'],
                'filename': dsinfo['filename'],
                'downloadurl': dsinfo['downloadurl'],
                'layer': layer,
                'zippath': dsinfo['layers'][layer]['filename'],
                # TODO: add year, gcm, emsc here?
                'type': dsinfo['layers'][layer]['datatype'],
            }
            # if this is a zip file we'll have to set zippath as well
            # FIXME: poor check whether this is a zip file
            if dsinfo['filename'].endswith('.zip'):
                dsdata['zippath'] = dsinfo['layers'][layer]['filename']
            climatelist.append(dsdata)
    # replace climate_models parameter
    params['future_climate_datasets'] = climatelist
    params['selected_models'] = 'all'
    # projection.name from dsinfo
    # FIXME: workaround to get future projection name back, but this works only for file naming scheme with current available data
    params['projection_name'], _ = os.path.splitext(dsinfo['filename'])

    # TODO: quick fix Decimal json encoding through celery (where is my custom json encoder gone?)
    for key, item in params.items():
        if isinstance(item, Decimal):
            params[key] = float(item)

    # add hints for worker
    workerhints = {
        'files': ('species_distribution_models', 'future_climate_datasets')
    }
    return {'env': {}, 'params': params, 'worker': workerhints}
Ejemplo n.º 25
0
    def items(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        if self.value:
            for experiment_uuid, model_uuids in self.value.items():
                item = {}
                expbrain = uuidToCatalogBrain(experiment_uuid)
                item['title'] = expbrain.Title
                item['uuid'] = expbrain.UID

                # TODO: what else wolud I need from an experiment?
                exp = expbrain.getObject()
                expmd = IBCCVLMetadata(exp)
                item['resolution'] = expmd.get('resolution')
                item['brain'] = expbrain

                # now search all models within and add infos
                pc = getToolByName(self.context, 'portal_catalog')
                brains = pc.searchResults(path=expbrain.getPath(),
                                          BCCDataGenre=self.genre)

                filtered_brains = []
                for brain in brains:
                    # get algorithm term
                    algoid = getattr(brain.getObject(), 'job_params',
                                     {}).get('function')
                    # Filter out geographic models
                    if algoid not in [
                            'circles', 'convhull', 'geoDist', 'geoIDW',
                            'voronoiHull'
                    ]:
                        filtered_brains.append(brain)
                brains = filtered_brains

                # TODO: maybe as generator?
                item['subitems'] = [{
                    'uuid':
                    brain.UID,
                    'title':
                    brain.Title,
                    'obj':
                    brain.getObject(),
                    'md':
                    IBCCVLMetadata(brain.getObject()),
                    'selected':
                    brain.UID in self.value[experiment_uuid]
                } for brain in brains]
                yield item
Ejemplo n.º 26
0
 def test_mixed_resolution(self, mock_run_script):
     future_1k_uuid = unicode(self.datasets[
         defaults.DATASETS_CLIMATE_FOLDER_ID]['future_1k'].UID())
     form = self.form.get_form()
     form.request.form.update({
         'form.buttons.save':
         'Create and start',
         # select 1k dataset as well
         'form.widgets.future_climate_datasets': [future_1k_uuid],
     })
     form.update()
     # setup mock_run_script
     mock_run_script.side_effect = self.form.mock_run_script
     # run experiment
     transaction.commit()
     exp = self.experiments['my-cc-experiment']
     result = exp.values()[0]
     expmd = IBCCVLMetadata(exp)
     # We should have the missing layers filled by sdm env layer datasets
     self.assertEqual(
         result.job_params['future_climate_datasets'], {
             future_1k_uuid: set([u'B01']),
             self.form.sdmexp.environmental_datasets.keys()[0]: set(
                 [u'B02'])
         })
     # resolution should be set to the lowest of selected datasets
     self.assertEqual(expmd['resolution'], 'Resolution2_5m')
Ejemplo n.º 27
0
def getdsmetadata(ds):
    # TODO: support brain, obj and uuid string (URI as well?)
    # extract info about files
    if ICatalogBrain.providedBy(ds):
        ds = ds.getObject()
        # TODO: try to use brains only here
        #    url: ds.getURL()
        #    id: ds.UID,
        #    description: ds.Description
    # start with metadata annotation
    md = {
        #'@context': { },
        '@id': IUUID(ds),
        '@type': ds.portal_type,
        'url': ds.absolute_url(),
        'id': IUUID(ds),
        'title': ds.title,
        'description': ds.description,
    }
    md.update(IBCCVLMetadata(ds))
    dlinfo = IDownloadInfo(ds)
    md.update({
        'mimetype': dlinfo['contenttype'],
        'filename': dlinfo['filename'],
        'file': dlinfo['url'],
        'vizurl': dlinfo['alturl'][0]
    })
    return md
Ejemplo n.º 28
0
    def __iter__(self):
        for item in self.previous:
            pathkey = self.pathkey(*item.keys())[0]
            # no path .. can't do anything
            if not pathkey:
                yield item
                continue

            path = item[pathkey]
            # Skip the Plone site object itself
            if not path:
                yield item
                continue

            obj = self.context.unrestrictedTraverse(path.encode().lstrip('/'),
                                                    None)

            # path doesn't exist
            if obj is None:
                yield item
                continue

            bccvlmd = item.get(self.bccvlmdkey)
            if not bccvlmd:
                yield item
                continue
            # apply bccvl metadata
            # FIXME: replace or update?
            IBCCVLMetadata(obj).update(bccvlmd)
            yield item
Ejemplo n.º 29
0
    def __iter__(self):
        # exhaust previous iterator
        for item in self.previous:
            yield item

        filename = self.context.file.filename
        item = {
            self.pathkey: '/'.join(self.context.getPhysicalPath()),
            '_type': self.context.portal_type,
            'file': {
                'file': filename,
            },
            # TODO: consider deepcopy here (for now it's safe because all are normal dicts; no persistent dicts)
            'bccvlmetadata': dict(IBCCVLMetadata(self.context)),
            '_files': {
                filename: {
                    # FIXME: there is some chaos here... do I really need name and filename?
                    'name': self.context.file.filename,
                    'filename': self.context.file.filename,
                    'contenttype': self.context.file.contentType,
                    # data is a readable file like object
                    # it may be an uncommitted blob file
                    'data': self.context.file.open('r')
                }
            }
        }
        yield item
Ejemplo n.º 30
0
 def validateAction(self, data):
     # TODO: check data ...
     # ...
     datasets = data.get('projection', {})
     if not tuple(chain.from_iterable(x for x in datasets.values())):
         raise WidgetActionExecutionError(
             'projection', Invalid('No projection dataset selected.'))
     # check if threshold values are in range
     for dataset in (x for x in datasets.values()):
         if not dataset:
             raise WidgetActionExecutionError(
                 'projection',
                 Invalid(
                     'Please select at least one dataset within experiment')
             )
         # key: {label, value}
         dsuuid = dataset.keys()[0]
         ds = uuidToObject(dsuuid)
         value = dataset[dsuuid]['value']
         md = IBCCVLMetadata(ds)
         # ds should be a projection output which has only one layer
         # FIXME: error message is not clear enough and
         #        use widget.errors instead of exception
         # also it will only verify if dateset has min/max values in
         # metadata
         layermd = md['layers'].values()[0]
         if 'min' in layermd and 'max' in layermd:
             # FIXME: at least layermd['min'] may be a string '0', when
             # comparing to decimal from threshold selector, this comparison
             # fails and raises the widget validation error
             if value <= float(layermd['min']) or value >= float(
                     layermd['max']):
                 raise WidgetActionExecutionError(
                     'projection',
                     Invalid('Selected threshold is out of range'))
Ejemplo n.º 31
0
 def setUp(self, mock_run_script):
     self.portal = self.layer['portal']
     self.experiments = self.portal[defaults.EXPERIMENTS_FOLDER_ID]
     # create and run sdm experiment
     formhelper = SDMExperimentHelper(self.portal)
     sdmform = formhelper.get_form()
     sdmform.request.form.update({
         'form.buttons.save': 'Create and start',
     })
     # update form with updated request
     sdmform.update()
     # setup mock_run_script
     mock_run_script.side_effect = formhelper.mock_run_script
     # We should have only one SDM
     sdmexp = self.experiments.values()[0]
     transaction.commit()
     # setup som threshold values our projection
     sdmproj = sdmexp.values()[0]['proj_test.tif']
     md = IBCCVLMetadata(sdmproj)
     # there is only one layer
     layermd = md['layers'].values()[0]
     layermd['min'] = 0.0
     layermd['max'] = 1.0
     transaction.commit()
     self.form = BiodiverseExperimentHelper(self.portal, sdmexp)
Ejemplo n.º 32
0
 def item(self):
     # return dict with keys for experiment
     # and subkey 'models' for models within experiment
     item = {}
     if self.value:
         experiment_uuid = self.value.keys()[0]
         expbrain = uuidToCatalogBrain(experiment_uuid)
         item['title'] = expbrain.Title
         item['uuid'] = expbrain.UID
         exp = expbrain.getObject()
         item['layers'] = set((chain(*exp.environmental_datasets.values())))
         expmd = IBCCVLMetadata(exp)
         item['resolution'] = expmd['resolution']
         # now search all models within and add infos
         pc = getToolByName(self.context, 'portal_catalog')
         brains = pc.searchResults(path=expbrain.getPath(),
                                   BCCDataGenre=self.genre)
         # TODO: maybe as generator?
         item['models'] = [{
             'item':
             brain,
             'uuid':
             brain.UID,
             'title':
             brain.Title,
             'selected':
             brain.UID in self.value[experiment_uuid]
         } for brain in brains]
     return item
Ejemplo n.º 33
0
def get_project_params(result):
    params = deepcopy(result.job_params)
    # get metadata for species_distribution_models
    uuid = params['species_distribution_models']
    params['species_distribution_models'] = getdatasetparams(uuid)
    # do biomod name mangling of species name
    params['species_distribution_models']['species'] = re.sub(
        u"[ _]", u".",
        params['species_distribution_models'].get('species', u"Unknown"))
    # we need the layers from sdm to fetch correct files for climate_models
    # TODO: getdatasetparams should fetch 'layers'
    sdmobj = uuidToObject(uuid)
    sdmmd = IBCCVLMetadata(sdmobj)
    params['species_distribution_models']['layers'] = sdmmd.get(
        'layers_used', None)
    # do future climate layers
    climatelist = []
    for uuid, layers in params['future_climate_datasets'].items():
        dsinfo = getdatasetparams(uuid)
        for layer in layers:
            dsdata = {
                'uuid': dsinfo['uuid'],
                'filename': dsinfo['filename'],
                'downloadurl': dsinfo['downloadurl'],
                'internalurl': dsinfo['internalurl'],
                'layer': layer,
                'zippath': dsinfo['layers'][layer]['filename'],
                # TODO: add year, gcm, emsc here?
                'type': dsinfo['layers'][layer]['datatype'],
            }
            # if this is a zip file we'll have to set zippath as well
            # FIXME: poor check whether this is a zip file
            if dsinfo['filename'].endswith('.zip'):
                dsdata['zippath'] = dsinfo['layers'][layer]['filename']
            climatelist.append(dsdata)
    # replace climate_models parameter
    params['future_climate_datasets'] = climatelist
    params['selected_models'] = 'all'
    # projection.name from dsinfo
    # FIXME: workaround to get future projection name back, but this works only for file naming scheme with current available data
    params['projection_name'], _ = os.path.splitext(dsinfo['filename'])
    # add hints for worker
    workerhints = {
        'files': ('species_distribution_models', 'future_climate_datasets')
    }
    return {'env': {}, 'params': params, 'worker': workerhints}
Ejemplo n.º 34
0
    def test_upload_zip(self):
        # upload a zip in bccvl bagit format
        view = self.getview()
        from ZPublisher.HTTPRequest import FileUpload
        from cgi import FieldStorage
        from StringIO import StringIO
        data = resource_string(__name__, 'spc_obl_merc.zip')
        env = {'REQUEST_METHOD': 'PUT'}
        headers = {'content-type': 'text/csv',
                   'content-length': str(len(data)),
                   'content-disposition': 'attachment; filename=spc_obl_merc.zip'}
        fileupload = FileUpload(FieldStorage(fp=StringIO(data),
                                             environ=env, headers=headers))

        view.request.form.update({
            'climatefuture.buttons.save': u'Save',
            'climatefuture.widgets.description': u'some test.tif file',
            'climatefuture.widgets.file': fileupload,
            'climatefuture.widgets.title': u'test smulti layer title',
            'climatefuture.widgets.legalcheckbox': [u'selected'],
            'climatefuture.widgets.legalcheckbox-empty-marker': u'1',
            'climatefuture.widgets.rightsstatement': u'test rights',
            'climatefuture.widgets.rightsstatement.mimeType': u'text/html',
            'climatefuture.widgets.emsc': u'SRESB2',
            'climatefuture.widgets.gcm': u'cccma-cgcm31',
            'climatefuture.widgets.resolution': u'Resolution5m',
            'climatefuture.widgets.temporal': u'2015',
        })
        _ = view()
        self.assertEqual(self.portal.REQUEST.response.status, 302)
        self.assertEqual(self.portal.REQUEST.response.getHeader('Location'),
                         'http://nohost/plone/datasets')
        ds = self.portal.datasets['spc_obl_merc.zip']
        self.assertEqual(ds.rightsstatement.raw, u'test rights')
        self.assertEqual(ds.file.data, data)
        from org.bccvl.site.interfaces import IBCCVLMetadata
        md = IBCCVLMetadata(ds)
        self.assertEqual(md['genre'], 'DataGenreFC')
        self.assertEqual(md['resolution'], u'Resolution5m')
        self.assertEqual(md['temporal'], u'2015')
        self.assertEqual(md['emsc'], u'SRESB2')
        self.assertEqual(md['gcm'], u'cccma-cgcm31')
        layermd = md['layers']['spc_obl_merc/data/spc_obl_merc_1.tif']
        self.assertEqual(layermd['filename'], 'spc_obl_merc/data/spc_obl_merc_1.tif')
        self.assertEqual(layermd['min'], 19.0)
        self.assertEqual(layermd['max'], 128.0)
        self.assertEqual(layermd['datatype'], 'continuous')
        self.assertEqual(layermd['height'], 200)
        self.assertEqual(layermd['width'], 200)
        self.assertEqual(layermd['srs'], None)
        layermd = md['layers']['spc_obl_merc/data/spc_obl_merc_2.tif']
        self.assertEqual(layermd['filename'], 'spc_obl_merc/data/spc_obl_merc_2.tif')
        self.assertEqual(layermd['min'], 19.0)
        self.assertEqual(layermd['max'], 128.0)
        self.assertEqual(layermd['datatype'], 'continuous')
        self.assertEqual(layermd['height'], 200)
        self.assertEqual(layermd['width'], 200)
        self.assertEqual(layermd['srs'], None)
Ejemplo n.º 35
0
 def getRAT(self, datasetid, layer=None):
     query = {'UID': datasetid}
     dsbrain = dataset.query(self.context, brains=True, **query)
     if dsbrain:
         # get first brain from list
         dsbrain = next(dsbrain, None)
     if not dsbrain:
         raise NotFound(self.context, datasetid, self.request)
     md = IBCCVLMetadata(dsbrain.getObject())
     rat = md.get('layers', {}).get(layer, {}).get('rat')
     # if we have a rat, let's try and parse it
     if rat:
         try:
             rat = json.loads(unicode(rat))
         except Exception as e:
             LOG.warning("Couldn't decode Raster Attribute Table from metadata. %s: %s", self.context, repr(e))
             rat = None
     return rat
Ejemplo n.º 36
0
def find_projections(ctx, emission_scenarios, climate_models, years, resolution=None):
        """Find Projection datasets for given criteria"""
        pc = getToolByName(ctx, 'portal_catalog')
        result = []
        params = {
            'BCCEmissionScenario': emission_scenarios,
            'BCCGlobalClimateModel': climate_models,
            'BCCDataGenre': 'DataGenreFC'
        }
        if resolution:
            params['BCCResolution'] = resolution
        brains = pc.searchResults(**params)
        for brain in brains:
            md = IBCCVLMetadata(brain.getObject())
            year = md.get('temporal', None)
            if year in years:
                # TODO: yield?
                result.append(brain)
        return result
Ejemplo n.º 37
0
def get_project_params(result):
    params = deepcopy(result.job_params)
    # get metadata for species_distribution_models
    uuid = params["species_distribution_models"]
    params["species_distribution_models"] = getdatasetparams(uuid)
    # do biomod name mangling of species name
    params["species_distribution_models"]["species"] = re.sub(
        u"[ _'\"/\(\)\{\}\[\]]", u".", params["species_distribution_models"].get("species", u"Unknown")
    )
    # we need the layers from sdm to fetch correct files for climate_models
    # TODO: getdatasetparams should fetch 'layers'
    sdmobj = uuidToObject(uuid)
    sdmmd = IBCCVLMetadata(sdmobj)
    params["species_distribution_models"]["layers"] = sdmmd.get("layers_used", None)
    # do future climate layers
    climatelist = []
    for uuid, layers in params["future_climate_datasets"].items():
        dsinfo = getdatasetparams(uuid)
        for layer in layers:
            dsdata = {
                "uuid": dsinfo["uuid"],
                "filename": dsinfo["filename"],
                "downloadurl": dsinfo["downloadurl"],
                "internalurl": dsinfo["internalurl"],
                "layer": layer,
                "zippath": dsinfo["layers"][layer]["filename"],
                # TODO: add year, gcm, emsc here?
                "type": dsinfo["layers"][layer]["datatype"],
            }
            # if this is a zip file we'll have to set zippath as well
            # FIXME: poor check whether this is a zip file
            if dsinfo["filename"].endswith(".zip"):
                dsdata["zippath"] = dsinfo["layers"][layer]["filename"]
            climatelist.append(dsdata)
    # replace climate_models parameter
    params["future_climate_datasets"] = climatelist
    params["selected_models"] = "all"
    # projection.name from dsinfo
    # FIXME: workaround to get future projection name back, but this works only for file naming scheme with current available data
    params["projection_name"], _ = os.path.splitext(dsinfo["filename"])
    # add hints for worker
    workerhints = {"files": ("species_distribution_models", "future_climate_datasets")}
    return {"env": {}, "params": params, "worker": workerhints}
Ejemplo n.º 38
0
def DatasetSearchableText(obj, **kw):
    md = IBCCVLMetadata(obj)
    entries = [safe_unicode(obj.id), safe_unicode(obj.title) or u"", safe_unicode(obj.description) or u""]
    if "layers" in md:
        layer_vocab = getUtility(IVocabularyFactory, "layer_source")(obj)
        for key in md["layers"]:
            if key not in layer_vocab:
                continue
            entries.append(safe_unicode(layer_vocab.getTerm(key).title) or u"")
    if "species" in md:
        entries.extend(
            (
                safe_unicode(md.get("species", {}).get("scientificName")) or u"",
                safe_unicode(md.get("species", {}).get("vernacularName")) or u"",
            )
        )
    if md.get("genre") == "DataGenreFC":
        # year, gcm, emsc
        emsc_vocab = getUtility(IVocabularyFactory, "emsc_source")(obj)
        gcm_vocab = getUtility(IVocabularyFactory, "gcm_source")(obj)
        year = unicode(md.get("year", u""))
        month = unicode(md.get("month", u""))
        if md["emsc"] in emsc_vocab:
            entries.append(safe_unicode(emsc_vocab.getTerm(md["emsc"]).title) or u"")
        if md["gcm"] in gcm_vocab:
            entries.append(safe_unicode(gcm_vocab.getTerm(md["gcm"]).title) or u"")
        entries.append(year)
        entries.append(month)
    elif md.get("genre") == "DataGenreCC":
        entries.append(u"current")
    return u" ".join(entries)
Ejemplo n.º 39
0
    def items(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        if self.value:
            for experiment_uuid, model_uuids in self.value.items():
                item = {}
                expbrain = uuidToCatalogBrain(experiment_uuid)
                item['title'] = expbrain.Title
                item['uuid'] = expbrain.UID

                # TODO: what else wolud I need from an experiment?
                exp = expbrain.getObject()
                expmd = IBCCVLMetadata(exp)
                item['resolution'] = expmd.get('resolution')
                item['brain'] = expbrain

                # now search all models within and add infos
                pc = getToolByName(self.context, 'portal_catalog')
                brains = pc.searchResults(path=expbrain.getPath(),
                                          BCCDataGenre=self.genre)

                filtered_brains = []
                for brain in brains:
                    # get algorithm term
                    algoid = getattr(brain.getObject(), 'job_params',
                                     {}).get('function')
                    # Filter out geographic models
                    if algoid not in ['circles', 'convhull', 'geoDist', 'geoIDW', 'voronoiHull']:
                        filtered_brains.append(brain)
                brains = filtered_brains

                # TODO: maybe as generator?
                item['subitems'] = [{'uuid': brain.UID,
                                     'title': brain.Title,
                                     'obj': brain.getObject(),
                                     'md': IBCCVLMetadata(brain.getObject()),
                                     'selected': brain.UID in self.value[experiment_uuid]}
                                    for brain in brains]
                yield item
Ejemplo n.º 40
0
 def subitems(self, dsbrain):
     # return a generator of selectable items within dataset
     md = IBCCVLMetadata(dsbrain.getObject())
     layer_vocab = self.dstools.layer_vocab
     selectedsubitems = self.value.get(dsbrain.UID) or ()
     for layer in sorted(md.get('layers', ())):
         subitem = {
             'id': layer,
             'title': layer_vocab.getTerm(layer).title if layer in layer_vocab else layer,
             'selected': not selectedsubitems or layer in selectedsubitems,
         }
         yield subitem
     for subdsid in sorted(getattr(dsbrain.getObject(), 'parts', ())):
         part = uuidToCatalogBrain(subdsid)
         # TODO: should we just ignore it?
         if not part:
             continue
         subitem = {
             'id': subdsid,
             'title': part.Title,
             'selected': not selectedsubitems or subdsid in selectedsubitems
         }
         yield subitem
Ejemplo n.º 41
0
 def create(self, data):
     # Dexterity base AddForm bypasses self.applyData and uses form.applyData directly,
     # we'll have to override it to find a place to apply our algo_group data'
     newob = super(SDMAdd, self).create(data)
     # apply values to algo dict manually to make sure we don't write data on read
     new_params = {}
     for group in self.param_groups:
         if group.toolkit in data['functions']:
             content = group.getContent()
             applyChanges(group, content, data)
             new_params[group.toolkit] = content
     newob.parameters = new_params
     IBCCVLMetadata(newob)['resolution'] = data['resolution']
     return newob
Ejemplo n.º 42
0
    def rat(self):
        uuid = self.request.form.get("uuid")
        layer = self.request.form.get("layer")
        brain = None
        try:
            brain = uuidToCatalogBrain(uuid)
        except Exception as e:
            LOG.error("Caught exception %s", e)

        if not brain:
            self.record_error("Not Found", 404, "dataset not found", {"parameter": "uuid"})
            raise NotFound(self, "metadata", self.request)
        md = IBCCVLMetadata(brain.getObject())
        if not layer and layer not in md.get("layers", {}):
            self.record_error("Bad Request", 400, "Missing parameter layer", {"parameter": "layer"})
            raise BadRequest("Missing parameter layer")
        try:
            rat = md.get("layers", {}).get(layer, {}).get("rat")
            rat = json.loads(unicode(rat))
            return rat
        except Exception as e:
            LOG.warning("Couldn't decode Raster Attribute Table from metadata. %s: %s", self.context, repr(e))
        raise NotFound(self, "rat", self.request)
Ejemplo n.º 43
0
def DatasetSearchableText(obj, **kw):
    md = IBCCVLMetadata(obj)
    entries = [
        safe_unicode(obj.id),
        safe_unicode(obj.title) or u"",
        safe_unicode(obj.description) or u""
    ]
    if 'layers' in md:
        layer_vocab = getUtility(IVocabularyFactory, 'layer_source')(obj)
        for key in md['layers']:
            if key not in layer_vocab:
                continue
            entries.append(
                safe_unicode(layer_vocab.getTerm(key).title) or u""
            )
    if 'species' in md:
        entries.extend((
            safe_unicode(md.get('species', {}).get('scientificName')) or u"",
            safe_unicode(md.get('species', {}).get('vernacularName')) or u"",
        ))
    if md.get('genre') == "DataGenreFC":
        # year, gcm, emsc
        emsc_vocab = getUtility(IVocabularyFactory, 'emsc_source')(obj)
        gcm_vocab = getUtility(IVocabularyFactory, 'gcm_source')(obj)
        year = Period(md.get('period','')).start
        if md['emsc'] in emsc_vocab:
            entries.append(
                safe_unicode(emsc_vocab.getTerm(md['emsc']).title) or u""
            )
        if md['gcm'] in gcm_vocab:
            entries.append(
                safe_unicode(gcm_vocab.getTerm(md['gcm']).title) or u""
            )
        entries.append(safe_unicode(year) or u"")
    elif md.get('genre') == "DataGenreCC":
        entries.append(u"current")
    return u" ".join(entries)
Ejemplo n.º 44
0
def getThresholds(datasets, thresholds=None):
    # dataset to get thresholds for
    # thresholds a list of threshold names to return (if None return all)
    if not isinstance(datasets, list):
        datasets = [datasets]
    result = {}  # we have to return per experiment, per dataset/result
    for dataset in datasets:
        dataobj = uuidToObject(dataset)
        if dataobj is None:
            continue
        datamd = IBCCVLMetadata(dataobj)
        if datamd['genre'] in ('DataGenreFP', 'DataGenreFP_ENVLOP'):
            # we have a future projection ... go look for thresholds at SDM result
            sdmuuid = dataobj.__parent__.job_params['species_distribution_models']
            # get sdm result container
            sdmresult = uuidToObject(sdmuuid).__parent__
        elif datamd['genre'] in ['DataGenreCP', 'DataGenreCP_ENVLOP', 'DataGenreSDMModel']:
            # we have a current projection ...
            sdmresult = dataobj.__parent__
        else:
            continue
        # We have the sdm result container ... find thresholds now
        pc = getToolByName(dataobj, 'portal_catalog')
        # find all model eval datasets
        thresholds = {}
        for evalbrain in pc.searchResults(path='/'.join(sdmresult.getPhysicalPath()),
                                          BCCDataGenre='DataGenreSDMEval'):
            evalmd = IBCCVLMetadata(evalbrain.getObject())
            # FIXME: ideally we got only datasets with thresholds back here, but
            #        at the moment DataGenreSDMEval is aso used for graphs (png  files)
            #        generated by the algorithms
            if 'thresholds' not in evalmd:
                continue
            # TODO: merging of thresholds is random here
            thresholds.update(evalmd['thresholds'])
        result[dataset] = thresholds
    return result
Ejemplo n.º 45
0
    def start_job(self, request):
        # split sdm jobs across multiple algorithms,
        # and multiple species input datasets
        # TODO: rethink and maybe split jobs based on enviro input datasets?
        if not self.is_active():
            for func in (uuidToObject(f) for f in self.context.functions):
                # get utility to execute this experiment
                method = queryUtility(IComputeMethod,
                                      name=ISDMExperiment.__identifier__)
                if method is None:
                    return ('error',
                            u"Can't find method to run SDM Experiment")
                # create result object:
                # TODO: refactor this out into helper method
                title = u'{} - {} {}'.format(
                    self.context.title, func.getId(),
                    datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))
                result = self._create_result_container(title)

                # Build job_params store them on result and submit job
                result.job_params = {
                    'resolution': IBCCVLMetadata(self.context)['resolution'],
                    'function': func.getId(),
                    'species_occurrence_dataset':
                    self.context.species_occurrence_dataset,
                    'species_absence_dataset':
                    self.context.species_absence_dataset,
                    'species_pseudo_absence_points':
                    self.context.species_pseudo_absence_points,
                    'species_number_pseudo_absence_points':
                    self.context.species_number_pseudo_absence_points,
                    'environmental_datasets':
                    self.context.environmental_datasets,
                }
                # add toolkit params:
                result.job_params.update(self.context.parameters[IUUID(func)])
                self._createProvenance(result)
                # submit job
                LOG.info("Submit JOB %s to queue", func.getId())
                method(result, func)
                resultjt = IJobTracker(result)
                resultjt.new_job('TODO: generate id',
                                 'generate taskname: sdm_experiment')
                resultjt.set_progress('PENDING',
                                      u'{} pending'.format(func.getId()))
            return 'info', u'Job submitted {0} - {1}'.format(
                self.context.title, self.state)
        else:
            return 'error', u'Current Job is still running'
Ejemplo n.º 46
0
def build_traits_import_task(dataset, request):
    # creates task chain to import ala dataset
    """
    context ... a dictionary with keys:
      - context: path to context object
      - userid: zope userid
    """
    # we need site-path, context-path and lsid for this job
    dataset_path = '/'.join(dataset.getPhysicalPath())
    member = api.user.get_current()
    context = {
        'context': dataset_path,
        'dataSource': dataset.dataSource,
        'user': {
            'id': member.getUserName(),
            'email': member.getProperty('email'),
            'fullname': member.getProperty('fullname')
        }
    }

    results_dir = get_results_dir(dataset, request)
    if dataset.dataSource == 'aekos':
        md = IBCCVLMetadata(dataset)
        return datamover.pull_traits_from_aekos.si(
            traits=md['traits'],
            species=[sp['scientificName'] for sp in md['species']],
            envvars=md['environ'],
            dest_url=results_dir,
            context=context)
    elif dataset.dataSource == 'zoatrack':
        md = IBCCVLMetadata(dataset)
        return datamover.pull_traits_from_zoatrack.si(
            species=[sp['scientificName'] for sp in md['species']],
            src_url=md['dataurl'],
            dest_url=results_dir,
            context=context)
Ejemplo n.º 47
0
 def start_job(self, request):
     if not self.is_active():
         # get utility to execute this experiment
         method = queryUtility(IComputeMethod,
                               name=IProjectionExperiment.__identifier__)
         if method is None:
             # TODO: lookup by script type (Perl, Python, etc...)
             return ('error',
                     u"Can't find method to run Projection Experiment")
         expuuid = self.context.species_distribution_models.keys()[0]
         exp = uuidToObject(expuuid)
         # TODO: what if two datasets provide the same layer?
         # start a new job for each sdm and future dataset
         for sdmuuid in self.context.species_distribution_models[expuuid]:
             for dsuuid in self.context.future_climate_datasets:
                 dsbrain = uuidToCatalogBrain(dsuuid)
                 dsmd = IBCCVLMetadata(dsbrain.getObject())
                 futurelayers = set(dsmd['layers'].keys())
                 # match sdm exp layers with future dataset layers
                 projlayers = {}
                 for ds, dslayerset in exp.environmental_datasets.items():
                     # add matching layers
                     projlayers.setdefault(dsuuid, set()).update(
                         dslayerset.intersection(futurelayers))
                     # remove matching layers
                     projlayers[ds] = dslayerset - futurelayers
                     if not projlayers[ds]:
                         # remove if all layers replaced
                         del projlayers[ds]
                 # create result
                 result = self._create_result_container(
                     sdmuuid, dsbrain, projlayers)
                 # update provenance
                 self._createProvenance(result)
                 # submit job
                 LOG.info("Submit JOB project to queue")
                 method(result, "project")  # TODO: wrong interface
                 resultjt = IJobTracker(result)
                 resultjt.new_job(
                     'TODO: generate id',
                     'generate taskname: projection experiment')
                 resultjt.set_progress('PENDING', u'projection pending')
         return 'info', u'Job submitted {0} - {1}'.format(
             self.context.title, self.state)
     else:
         # TODO: in case there is an error should we abort the transaction
         #       to cancel previously submitted jobs?
         return 'error', u'Current Job is still running'
Ejemplo n.º 48
0
class DatasetMetadataAdapter(object):
    """
    Gives z3c.form datamanagers attribute access to the metadata object.

    This class takes care of properly updateing the underlying storage object.
    """

    # There is a datamanager problem.
    # The Datamanager is looked up for the context (which returns an AttributeField manager)
    # but then the Datamanager adapts the context to this adapter and tries attribute
    # access which fails.

    def __init__(self, context):
        self._data = IBCCVLMetadata(context)

    def __getattr__(self, name):
        ob = self._data
        try:
            if name in ('scientificName', 'taxonID', 'vernacularName'):
                return ob['species'][name]
            else:
                return ob[name]
        except:
            raise AttributeError('Attribute %s not found' % name)

    def __setattr__(self, name, value):
        if name == '_data':
            self.__dict__['_data'] = value
            # shortcut here to not store _data in metadata dictionary
            return
        if name in ('scientificName', 'taxonID', 'vernacularName'):
            # FIXME: need a new dict here?
            ob = self._data.setdefault('species', {})
        else:
            ob = self._data
        ob[name] = value

    def __delattr__(self, name):
        if name in ('scientificName', 'taxonID', 'vernacularName'):
            # FIXME: update dict?
            ob = self._data['species']
            del ob[name]
            if not ob:
                del self._data['species']
        else:
            del self._data[name]
Ejemplo n.º 49
0
def DatasetSearchableText(obj, **kw):
    md = IBCCVLMetadata(obj)
    entries = [
        safe_unicode(obj.id),
        safe_unicode(obj.title) or u"",
        safe_unicode(obj.description) or u""
    ]
    if 'layers' in md:
        layer_vocab = getUtility(IVocabularyFactory, 'layer_source')(obj)
        for key in md['layers']:
            if key not in layer_vocab:
                continue
            entries.append(
                safe_unicode(layer_vocab.getTerm(key).title) or u""
            )
    if 'species' in md:
        entries.extend((
            safe_unicode(md.get('species', {}).get('scientificName')) or u"",
            safe_unicode(md.get('species', {}).get('vernacularName')) or u"",
        ))
    if "Future datasets" in obj.subject:
        # year, gcm, emsc
        emsc_vocab = getUtility(IVocabularyFactory, 'emsc_source')(obj)
        gcm_vocab = getUtility(IVocabularyFactory, 'gcm_source')(obj)
        year = unicode(md.get('year', u''))
        month = unicode(md.get('month', u''))
        if md.get('emsc') in emsc_vocab:
            entries.append(
                safe_unicode(emsc_vocab.getTerm(md['emsc']).title) or u""
            )
        if md.get('gcm') in gcm_vocab:
            entries.append(
                safe_unicode(gcm_vocab.getTerm(md['gcm']).title) or u""
            )
        entries.append(year)
        entries.append(month)
    elif "Current datasets" in obj.subject:
        entries.append(u"current")
    return u" ".join(entries)
Ejemplo n.º 50
0
def biodiverse_listing_details(expbrain):
    details = {}
    exp = expbrain.getObject()
    species = set()
    years = set()
    months = set()
    emscs = set()
    gcms = set()
    for dsuuid in chain.from_iterable(map(lambda x: x.keys(), exp.projection.itervalues())):
        dsobj = uuidToObject(dsuuid)
        # TODO: should inform user about missing dataset
        if dsobj:
            md = IBCCVLMetadata(dsobj)
            species.add(md.get('species', {}).get('scientificName', u'(Unavailable)'))
            year = md.get('year')
            if year:
                years.add(year)
            month = md.get('month')
            if month:
                months.add(month)
            gcm = md.get('gcm')
            if gcm:
                gcms.add(gcm)
            emsc = md.get('emsc')
            if emsc:
                emscs.add(emsc)
    details.update({
        'type': 'BIODIVERSE',
        'functions': 'endemism, redundancy',
        'species_occurrence': ', '.join(sorted(species)),
        'species_absence': '{}, {}'.format(', '.join(sorted(emscs)),
                                           ', '.join(sorted(gcms))),
        'years': ', '.join(sorted(years)),
        'months': ', '.join(sorted(months))
    })
    return details
Ejemplo n.º 51
0
def get_project_params(result):
    params = deepcopy(result.job_params)
    # get metadata for species_distribution_models
    uuid = params['species_distribution_models']
    params['species_distribution_models'] = getdatasetparams(uuid)
    # do biomod name mangling of species name
    params['species_distribution_models']['species'] = re.sub(u"[ _\-'\"/\(\)\{\}\[\]]", u".", params['species_distribution_models'].get('species', u"Unknown"))
    # we need the layers from sdm to fetch correct files for climate_models
    # TODO: getdatasetparams should fetch 'layers'
    sdmobj = uuidToObject(uuid)
    sdmmd = IBCCVLMetadata(sdmobj)
    params['species_distribution_models']['layers'] = sdmmd.get('layers_used', None)

    # do SDM projection results
    sdm_projections = []
    for resuuid in params['sdm_projections']:
         sdm_projections.append(getdatasetparams(resuuid))
    params['sdm_projections'] = sdm_projections

    # do future climate layers
    climatelist = []
    for uuid, layers in params['future_climate_datasets'].items():
        dsinfo = getdatasetparams(uuid)
        for layer in layers:
            dsdata = {
                'uuid': dsinfo['uuid'],
                'filename': dsinfo['filename'],
                'downloadurl': dsinfo['downloadurl'],
                'layer': layer,
                # TODO: add year, gcm, emsc here?
                'type': dsinfo['layers'][layer]['datatype'],
            }
            # if this is a zip file we'll have to set zippath as well
            # FIXME: poor check whether this is a zip file
            if dsinfo['filename'].endswith('.zip'):
                dsdata['zippath'] = dsinfo['layers'][layer]['filename']

            # FIXME: workaround to get future projection name back, but this works only for file naming scheme with current available data
            if params['selected_future_layers'] and layer in params['selected_future_layers']:
                params['projection_name'], _ = os.path.splitext(dsinfo['filename'])
            climatelist.append(dsdata)
    # replace climate_models parameter
    params['future_climate_datasets'] = climatelist
    params['selected_models'] = 'all'

    # In case no future climate layer is selected
    if not params.get('projection_name'):
        params['projection_name'], _ = os.path.splitext(dsinfo['filename'])

    # TODO: quick fix Decimal json encoding through celery (where is my custom json encoder gone?)
    for key, item in params.items():
        if isinstance(item, Decimal):
            params[key] = float(item)

    # Get the content of the projection_region BlobFile.
    # Note: deepcopy does not copy the content of BlobFile.
    params['projection_region'] = { 
            'uuid': IUUID(result),
            'filename': 'projection_region.json',
            'downloadurl': '{0}/API/em/v1/constraintregion?uuid={1}'.format(getSite().absolute_url(), IUUID(result)),
    }

    # add hints for worker
    workerhints = {
        'files': ('species_distribution_models', 'future_climate_datasets', 'sdm_projections', 'projection_region',)
    }
    return {'env': {}, 'params': params, 'worker': workerhints}
Ejemplo n.º 52
0
 def __init__(self, context):
     self._data = IBCCVLMetadata(context)
Ejemplo n.º 53
0
    def item(self):
        # return dict with keys for experiment
        # and subkey 'models' for models within experiment
        item = {}
        if self.value:
            experiment_uuid = self.value.keys()[0]
            expbrain = uuidToCatalogBrain(experiment_uuid)
            if expbrain is None:
                return {
                    'title': u'Not Available',
                    'uuid': experiment_uuid,
                    'subitems': []  # models
                }
            item['title'] = expbrain.Title
            item['uuid'] = expbrain.UID
            exp = expbrain.getObject()
            # TODO: To get layers of all subsets?
            if getattr(exp, 'datasubsets', None):
                env_datasets = exp.datasubsets[0].get('environmental_datasets')
                item['layers'] = set((chain(*env_datasets.values())))
            else:
                item['layers'] = set((chain(*exp.environmental_datasets.values())))
            expmd = IBCCVLMetadata(exp)
            item['resolution'] = expmd.get('resolution')
            # now search all models within and add infos
            pc = getToolByName(self.context, 'portal_catalog')
            # only get result folders that are completed
            brains = pc.searchResults(path={'query': expbrain.getPath(), 'depth': 1},
                                      portal_type='Folder',
                                      job_state='COMPLETED')
            # TODO: maybe as generator?
            item['subitems'] = []
            for fldbrain in brains:
                # Get the SDM model from result folder
                brain = pc.searchResults(path=fldbrain.getPath(),
                                         BCCDataGenre=self.genre)
                if not brain:
                    # ignore this folder, as it does not have a result we want
                    continue
                brain = brain[0]
                # get algorithm term
                algoid = getattr(brain.getObject(), 'job_params',
                                 {}).get('function')
                algobrain = self.algo_dict.get(algoid, None)
                # Filter out geographic models
                if algobrain.getObject().algorithm_category == 'geographic':
                    continue
                # FIXME: I need a different list of thresholds for display;
                # esp. don't look up threshold, but take vales (threshold
                # id and value) from field as is
                thresholds = dataset.getThresholds(brain.UID)[brain.UID]
                threshold = self.value[experiment_uuid].get(brain.UID)
                # is threshold in list?
                if threshold and threshold['label'] not in thresholds:
                    # maybe a custom entered number?
                    # ... I guess we don't really care as long as we produce the same the user entered. (validate?)
                    thresholds[threshold['label']] = threshold['label']

                # current projection tiff file, and its metadata
                cpbrains = pc.searchResults(path=expbrain.getPath(),
                                          BCCDataGenre=['DataGenreCP'])
                cpmd = IBCCVLMetadata(cpbrains[0].getObject())

                item['subitems'].append(
                    {'item': brain,
                     'uuid': brain.UID,
                     'title': brain.Title,
                     'selected': brain.UID in self.value[experiment_uuid],
                     'algorithm': algobrain,
                     'threshold': threshold,
                     'thresholds': thresholds,
                     'layermd': cpmd['layers'].values()[0]
                     }
                )
        return item
Ejemplo n.º 54
0
def month(obj, **kw):
    # FIXME: see year indexer above
    md = IBCCVLMetadata(obj)
    return md.get('month', None)
Ejemplo n.º 55
0
def headers(obj, **kw):
    md = IBCCVLMetadata(obj)
    return md.get('headers', None)
Ejemplo n.º 56
0
    def export_to_ala(self):
        uuid = self.request.form.get("uuid", None)
        try:
            if uuid:
                brain = uuidToCatalogBrain(uuid)
                if brain is None:
                    raise Exception("Brain not found")

                obj = brain.getObject()
            else:
                obj = self.context

            # get username
            member = ploneapi.user.get_current()
            if member.getId():
                user = {
                    "id": member.getUserName(),
                    "email": member.getProperty("email"),
                    "fullname": member.getProperty("fullname"),
                }
            else:
                raise Exception("Invalid user")

            # verify dataset
            if obj.portal_type not in (
                "org.bccvl.content.dataset",
                "org.bccvl.content.remotedataset",
                "org.bccvl.content.multispeciesdataset",
            ):
                raise Exception("Invalid UUID (content type)")
            md = IBCCVLMetadata(obj)
            if md.get("genre") not in ("DataGenreSpeciesOccurrence", "DataGenreTraits"):
                raise Exception("Invalid UUID (data type)")
            # get download url
            dlinfo = IDownloadInfo(obj)

            # download file
            from org.bccvl import movelib
            from org.bccvl.movelib.utils import build_source, build_destination
            import tempfile

            destdir = tempfile.mkdtemp(prefix="export_to_ala")
            try:
                from org.bccvl.tasks.celery import app

                settings = app.conf.get("bccvl", {})
                dest = os.path.join(destdir, os.path.basename(dlinfo["url"]))
                movelib.move(
                    build_source(dlinfo["url"], user["id"], settings), build_destination("file://{}".format(dest))
                )

                csvfile = None

                if dlinfo["contenttype"] == "application/zip":
                    # loox at 'layers' to find file within zip
                    arc = md["layers"].keys()[0]

                    import zipfile

                    zf = zipfile.ZipFile(dest, "r")
                    csvfile = zf.open(arc, "r")
                else:
                    csvfile = open(dest, "rb")

                import requests

                # "Accept:application/json" "Origin:http://example.com"
                res = requests.post(
                    settings["ala"]["sandboxurl"],
                    files={"file": csvfile},
                    headers={"apikey": settings["ala"]["apikey"], "Accept": "application/json"},
                )
                if res.status_code != 200:
                    self.record_error(res.reason, res.status_code)
                    raise Exception("Upload failed")
                retval = res.json()
                # TODO: do error checking
                #  keys: sandboxUrl, fileName, message, error: Bool, fileId
                return retval
            finally:
                import shutil

                shutil.rmtree(destdir)

        except Exception as e:
            self.record_error(str(e), 500)
            raise