def __call__(self): # respect field level security as defined in plone.autoform # check if attribute access would be allowed! # url = guarded_getattr(self.context, 'remoteUrl', None) exp = self.context if IProjectionExperiment.providedBy(exp): if exp.projection_region: return exp.projection_region.data if not exp.species_distribution_models: raise NotFound(self, 'species_distribution_models', self.request) # Return the SDM's modelling region sdmuuid = exp.species_distribution_models.keys()[0] sdmobj = uuidToObject(sdmuuid) if sdmobj and sdmobj.modelling_region: return sdmobj.modelling_region.data elif IExperiment.providedBy(exp): if exp.modelling_region: return exp.modelling_region.data else: # Move one level up if this is an exp result file if not IExperiment.providedBy(exp.__parent__): exp = exp.__parent__ if IExperiment.providedBy(exp.__parent__): # this is the result folder if IProjectionExperiment.providedBy(exp.__parent__): if exp.job_params['projection_region']: return exp.job_params['projection_region'].data # Get constraint from SDM experiment result file. # Use the modelling_region.json file in the sdm result if available. if not 'species_distribution_models' in exp.job_params: raise NotFound(self, 'species_distribution_models', self.request) sdmuuid = exp.job_params['species_distribution_models'] sdmobj = uuidToObject(sdmuuid).__parent__ else: sdmobj = exp # Return the modelling_region attribute only if no modelling_region.json file if not 'modelling_region.json' in sdmobj.keys(): return sdmobj.modelling_region.data # Redirect to download the modelling_region.json constraint_region = sdmobj.get('modelling_region.json') remoteUrl = getattr(constraint_region, 'remoteUrl', None) if remoteUrl is None: raise NotFound(self, 'remoteUrl', self.request) # Generate temp url tool = getUtility(ISwiftUtility) try: url = tool.generate_temp_url(url=remoteUrl) except: url = remoteUrl return self.request.RESPONSE.redirect(url.encode('utf-8')) else: raise NotFound(self, 'constraint_region', self.request)
def constraintregion(self): uuid = self.request.form.get('uuid') exp = uuidToObject(uuid) if not exp: self.record_error('Not Found', 404, 'Experiment not found', {'parameter': 'uuid'}) raise NotFound(self, 'constraintregion', self.request) downloadurl = None if IExperiment.providedBy(exp): downloadurl = '{}/@@download/modelling_region.json'.format( exp.absolute_url()) elif not IExperiment.providedBy(exp.__parent__): # this is an exp result file, so get exp folder exp = exp.__parent__ if IExperiment.providedBy(exp.__parent__): # This is an exp result folder if IProjectionExperiment.providedBy(exp.__parent__): if exp.job_params['projection_region']: downloadurl = '{}/@@download/modelling_region.json'.format( exp.absolute_url()) else: # Get constraint from SDM experiment result file. # Use the modelling_region.json file in the sdm result if available. if not 'species_distribution_models' in exp.job_params: self.record_error( 'NotFound', 404, 'SDM model not found', {'parameter': 'species_distribution_models'}) raise NotFound(self, 'species_distribution_models', self.request) sdmuuid = exp.job_params['species_distribution_models'] sdmobj = uuidToObject(sdmuuid).__parent__ # Return the modelling_region attribute only if no modelling_region.json file if not 'modelling_region.json' in sdmobj.keys(): downloadurl = '{}/@@download/modelling_region.json'.format( sdmobj.absolute_url()) else: # Redirect to download the modelling_region.json constraint_region = sdmobj.get('modelling_region.json') remoteUrl = getattr(constraint_region, 'remoteUrl', None) if remoteUrl is None: raise NotFound(self, 'remoteUrl', self.request) # Generate temp url tool = getUtility(ISwiftUtility) try: downloadurl = tool.generate_temp_url(url=remoteUrl) except: downloadurl = remoteUrl else: downloadurl = '{}/@@download/modelling_region.json'.format( exp.absolute_url()) if downloadurl is None: self.record_error('Not Found', 404, 'Constraint region not found', {'parameter': 'uuid'}) raise NotFound(self, 'constraintregion', self.request) return self.request.RESPONSE.redirect(downloadurl.encode('utf-8'))
def addSpeciesInfo(bccvlmd, result): if ISDMExperiment.providedBy(result.__parent__): spds = uuidToObject(result.job_params['species_occurrence_dataset']) if IProjectionExperiment.providedBy(result.__parent__): spds = uuidToObject(result.job_params['species_distribution_models']) speciesmd = IBCCVLMetadata(spds).get('species', None) if speciesmd: bccvlmd['species'] = speciesmd.copy()
def constraintregion(self): uuid = self.request.form.get('uuid') exp = uuidToObject(uuid) if not exp: self.record_error('Not Found', 404, 'Experiment not found', {'parameter': 'uuid'}) raise NotFound(self, 'constraintregion', self.request) downloadurl = None if IExperiment.providedBy(exp): downloadurl = '{}/@@download/modelling_region.json'.format(exp.absolute_url()) elif not IExperiment.providedBy(exp.__parent__): # this is an exp result file, so get exp folder exp = exp.__parent__ if IExperiment.providedBy(exp.__parent__): # This is an exp result folder if IProjectionExperiment.providedBy(exp.__parent__): if exp.job_params['projection_region']: downloadurl = '{}/@@download/modelling_region.json'.format(exp.absolute_url()) else: # Get constraint from SDM experiment result file. # Use the modelling_region.json file in the sdm result if available. if not 'species_distribution_models' in exp.job_params: self.record_error('NotFound', 404, 'SDM model not found', {'parameter': 'species_distribution_models'}) raise NotFound(self, 'species_distribution_models', self.request) sdmuuid = exp.job_params['species_distribution_models'] sdmobj = uuidToObject(sdmuuid).__parent__ # Return the modelling_region attribute only if no modelling_region.json file if not 'modelling_region.json' in sdmobj.keys(): downloadurl = '{}/@@download/modelling_region.json'.format(sdmobj.absolute_url()) else: # Redirect to download the modelling_region.json constraint_region = sdmobj.get('modelling_region.json') remoteUrl = getattr(constraint_region, 'remoteUrl', None) if remoteUrl is None: raise NotFound(self, 'remoteUrl', self.request) # Generate temp url tool = getUtility(ISwiftUtility) try: downloadurl = tool.generate_temp_url(url=remoteUrl) except: downloadurl = remoteUrl else: downloadurl = '{}/@@download/modelling_region.json'.format(exp.absolute_url()) if downloadurl is None: self.record_error('Not Found', 404, 'Constraint region not found', {'parameter': 'uuid'}) raise NotFound(self, 'constraintregion', self.request) return self.request.RESPONSE.redirect(downloadurl.encode('utf-8'))
def experiment_reference_indexer(object, **kw): # TODO: Add Ensemble -> SDM, Proj, Biodiv, Biodiverse -> SDM, Proj if IProjectionExperiment.providedBy(object): return object.species_distribution_models.keys() elif IEnsembleExperiment.providedBy(object): return object.datasets.keys() elif IBiodiverseExperiment.providedBy(object): return object.projection.keys() else: pass
def experiment_inputs(self, context=None): # return visualisable input datasets for experiment # - used in overlay and compare pages if context is None: context = self.context pc = getToolByName(self.context, 'portal_catalog') if ISDMExperiment.providedBy(context): # for sdm we return selected occurrence and absence dataset # TODO: once available include pesudo absences from result for dsuuid in (context.species_occurrence_dataset, context.species_absence_dataset): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IMMExperiment.providedBy(context): # for mme we return selected occurrence dataset only # TODO: once available include pesudo absences from result for dsuuid in (context.species_occurrence_dataset,): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IMSDMExperiment.providedBy(context): # muilt species sdm inputs for dsuuid in (context.species_occurrence_collections): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IProjectionExperiment.providedBy(context): # one experiment - multiple models for sdmuuid, models in context.species_distribution_models.items(): sdm = uuidToObject(sdmuuid) if not sdm: continue for model in models: # yield current projections for each model model_brain = uuidToCatalogBrain(model) if not model_brain: continue # Return only constraint SDM projection res_path = model_brain.getPath().rsplit('/', 1) for projection in pc.searchResults(path=res_path, BCCDataGenre='DataGenreCP'): yield projection
def experiment_inputs(self, context=None): # return visualisable input datasets for experiment # - used in overlay and compare pages if context is None: context = self.context pc = getToolByName(self.context, 'portal_catalog') if ISDMExperiment.providedBy(context): # for sdm we return selected occurrence and absence dataset # TODO: once available include pesudo absences from result for dsuuid in (context.species_occurrence_dataset, context.species_absence_dataset): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IMMExperiment.providedBy(context): # for mme we return selected occurrence dataset only # TODO: once available include pesudo absences from result for dsuuid in (context.species_occurrence_dataset, ): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IMSDMExperiment.providedBy(context): # muilt species sdm inputs for dsuuid in (context.species_occurrence_collections): brain = uuidToCatalogBrain(dsuuid) if brain: yield brain elif IProjectionExperiment.providedBy(context): # one experiment - multiple models for sdmuuid, models in context.species_distribution_models.items(): sdm = uuidToObject(sdmuuid) if not sdm: continue for model in models: # yield current projections for each model model_brain = uuidToCatalogBrain(model) if not model_brain: continue # Return only constraint SDM projection res_path = model_brain.getPath().rsplit('/', 1) for projection in pc.searchResults( path=res_path, BCCDataGenre='DataGenreCP'): yield projection
def __createExpmetadata(self, job_params): # To do: add other R package versions dynamically # Get experiment title self.md['Model specifications'] = { 'Title': self.context.title, 'Date/time run': self.context.creation_date.__str__(), 'Description': self.context.description or '' } # iterate over all input datasets and add them as entities self.md['Input datasets:'] = {} for key in ('species_occurrence_dataset', 'species_absence_dataset', 'traits_dataset'): spmd = {} if not job_params.has_key(key): continue dsbrain = uuidToCatalogBrain(job_params[key]) if not dsbrain: continue ds = dsbrain.getObject() mdata = IBCCVLMetadata(ds) if mdata and mdata.get('rows', None): spmd = {'Title': "{} ({})".format(ds.title, mdata.get('rows'))} else: spmd = {'Title': ds.title} info = IDownloadInfo(ds) spmd['Download URL'] = info['url'] coll = ds while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)): coll = coll.__parent__ spmd['Description'] = ds.description or coll.description or '' attribution = ds.attribution or getattr(coll, 'attribution') or '' if isinstance(attribution, list): attribution = '\n'.join([att.raw for att in attribution]) spmd['Attribution'] = attribution self.md['Input datasets:'][key] = spmd key = 'traits_dataset_params' if key in job_params: self.md['Input datasets:'][key] = job_params.get(key, {}) # pseudo-absence metadata. key = u"pseudo_absence_dataset" pa_file = self.context.get('pseudo_absences.csv') pa_url = "" pa_title = "" if pa_file: pa_title = pa_file.title pa_url = pa_file.absolute_url() pa_url = '{}/@@download/{}'.format(pa_url, os.path.basename(pa_url)) pamd = { 'Title': pa_title, 'Download URL': pa_url, 'Pseudo-absence Strategy': job_params.get('pa_strategy', ''), 'Pseudo-absence Ratio': str(job_params.get('pa_ratio', '')) } if job_params.get('pa_strategy', '') == 'disc': pamd['Minimum distance'] = str( job_params.get('pa_disk_min', '')) pamd['Maximum distance'] = str( job_params.get('pa_disk_max', '')) if job_params.get('pa_strategy', '') == 'sre': pamd['Quantile'] = str(job_params.get('pa_sre_quant', '')) self.md['Input datasets:'][key] = pamd for key in ['environmental_datasets', 'future_climate_datasets']: if key not in job_params: continue env_list = [] layer_vocab = getUtility(IVocabularyFactory, 'layer_source')(self.context) for uuid, layers in job_params[key].items(): ds = uuidToObject(uuid) coll = ds while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)): coll = coll.__parent__ description = ds.description or coll.description attribution = ds.attribution or getattr(coll, 'attribution') or '' if isinstance(attribution, list): attribution = '\n'.join([att.raw for att in attribution]) layer_titles = [ layer_vocab.getLayerTitle(layer) for layer in layers ] env_list.append({ 'Title': ds.title, 'Layers': u'\n'.join(layer_titles), 'Description': description, 'Attribution': attribution }) self.md['Input datasets:'][key] = env_list key = "datasets" if key in job_params: dataset_list = [] for uid in job_params[key]: dsbrain = uuidToCatalogBrain(uid) if dsbrain: ds = dsbrain.getObject() # get the source experiment source_exp = ds.__parent__ while not IExperiment.providedBy(source_exp): source_exp = source_exp.__parent__ dataset_list.append({ 'Source experiment': source_exp.title, 'Title': ds.title, 'Description': ds.description, 'Download URL': '{}/@@download/file/{}'.format( ds.absolute_url(), os.path.basename(ds.absolute_url())), 'Algorithm': ds.__parent__.job_params.get('function', ''), 'Species': IBCCVLMetadata(ds).get('species', {}).get('scientificName', ''), 'Resolution': IBCCVLMetadata(ds).get('resolution', '') }) self.md['Input datasets:'][key] = dataset_list key = 'species_distribution_models' if key in job_params: dsbrain = uuidToCatalogBrain(job_params[key]) if dsbrain: ds = dsbrain.getObject() # get the source experiment source_exp = ds.__parent__ while not IExperiment.providedBy(source_exp): source_exp = source_exp.__parent__ # get the threshold threshold = self.context.species_distribution_models.get( source_exp.UID(), {}).get(ds.UID()) self.md['Input datasets:'][key] = { 'Source experiment': source_exp.title, 'Title': ds.title, 'Description': ds.description, 'Download URL': '{}/@@download/file/{}'.format( ds.absolute_url(), os.path.basename(ds.absolute_url())), 'Algorithm': ds.__parent__.job_params.get('function', ''), 'Species': IBCCVLMetadata(ds).get('species', {}).get('scientificName', ''), 'Threshold': "{}({})".format(threshold.get('label', ''), str(threshold.get('value', ''))) } key = 'projections' if key in job_params: for pds in job_params[key]: threshold = pds.get('threshold', {}) dsbrain = uuidToCatalogBrain(pds.get('dataset')) if dsbrain: ds = dsbrain.getObject() # get the source experiment source_exp = ds.__parent__ while not IExperiment.providedBy(source_exp): source_exp = source_exp.__parent__ self.md['Input datasets:'][key] = { 'Source experiment': source_exp.title, 'Title': ds.title, 'Description': ds.description, 'Download URL': '{}/@@download/file/{}'.format( ds.absolute_url(), os.path.basename(ds.absolute_url())), 'Algorithm': ds.__parent__.job_params.get('function', ''), 'Species': IBCCVLMetadata(ds).get('species', {}).get('scientificName', ''), 'Threshold': "{}({})".format(threshold.get('label', ''), str(threshold.get('value', ''))), 'Biodiverse Cell size (m)': str(job_params.get('cluster_size', '')) } # Projection experiment does not have algorithm as input if not IProjectionExperiment.providedBy(self.context.__parent__): for key in ['function', 'algorithm']: if key in job_params: self.md['Algorithm settings:'] = { 'Algorithm Name': job_params[key], 'Configuration options': self.__algoConfigOption(job_params[key], job_params) } # Construct the text mdtext = StringIO.StringIO() for heading in [ 'BCCVL model outputs guide', 'System specifications', 'Model specifications', 'Input datasets:', 'Algorithm settings:', 'Model outputs:' ]: mdtext.write(self.__getMetadataText(heading, self.md)) return mdtext.getvalue()
def __createExpmetadata(self, job_params): # To do: add other R package versions dynamically # Get experiment title self.md['Model specifications'] = { 'Title': self.context.title, 'Date/time run': self.context.creation_date.__str__(), 'Description': self.context.description or '' } # iterate over all input datasets and add them as entities self.md['Input datasets:'] = {} for key in ('species_occurrence_dataset', 'species_absence_dataset', 'traits_dataset'): spmd = {} if not job_params.has_key(key): continue dsbrain = uuidToCatalogBrain(job_params[key]) if not dsbrain: continue ds = dsbrain.getObject() mdata = IBCCVLMetadata(ds) if mdata and mdata.get('rows', None): spmd = {'Title': "{} ({})".format(ds.title, mdata.get('rows'))} else: spmd = {'Title': ds.title} info = IDownloadInfo(ds) spmd['Download URL'] = info['url'] coll = ds while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)): coll = coll.__parent__ spmd['Description'] = ds.description or coll.description or '' attribution = ds.attribution or getattr(coll, 'attribution') or '' if isinstance(attribution, list): attribution = '\n'.join([att.raw for att in attribution]) spmd['Attribution'] = attribution self.md['Input datasets:'][key] = spmd key = 'traits_dataset_params' if key in job_params: self.md['Input datasets:'][key] = job_params.get(key, {}) # pseudo-absence metadata. key = u"pseudo_absence_dataset" pa_file = self.context.get('pseudo_absences.csv') pa_url = "" pa_title = "" if pa_file: pa_title = pa_file.title pa_url = pa_file.absolute_url() pa_url = '{}/@@download/{}'.format(pa_url, os.path.basename(pa_url)) pamd = { 'Title': pa_title, 'Download URL': pa_url, 'Pseudo-absence Strategy': job_params.get('pa_strategy', ''), 'Pseudo-absence Ratio' : str(job_params.get('pa_ratio', '')) } if job_params.get('pa_strategy', '') == 'disc': pamd['Minimum distance'] = str(job_params.get('pa_disk_min', '')) pamd['Maximum distance'] = str(job_params.get('pa_disk_max', '')) if job_params.get('pa_strategy', '') == 'sre': pamd['Quantile'] = str(job_params.get('pa_sre_quant', '')) self.md['Input datasets:'][key] = pamd for key in ['environmental_datasets', 'future_climate_datasets']: if key not in job_params: continue env_list = [] layer_vocab = getUtility(IVocabularyFactory, 'layer_source')(self.context) for uuid, layers in job_params[key].items(): ds = uuidToObject(uuid) coll = ds while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)): coll = coll.__parent__ description = ds.description or coll.description attribution = ds.attribution or getattr(coll, 'attribution') or '' if isinstance(attribution, list): attribution = '\n'.join([att.raw for att in attribution]) layer_titles = [layer_vocab.getLayerTitle(layer) for layer in layers] env_list.append({ 'Title': ds.title, 'Layers': u'\n'.join(layer_titles), 'Description': description, 'Attribution': attribution }) self.md['Input datasets:'][key] = env_list key = "datasets" if key in job_params: dataset_list = [] for uid in job_params[key]: dsbrain = uuidToCatalogBrain(uid) if dsbrain: ds = dsbrain.getObject() # get the source experiment source_exp = ds.__parent__ while not IExperiment.providedBy(source_exp): source_exp = source_exp.__parent__ dataset_list.append({ 'Source experiment': source_exp.title, 'Title': ds.title, 'Description': ds.description, 'Download URL': '{}/@@download/file/{}'.format(ds.absolute_url(), os.path.basename(ds.absolute_url())) , 'Algorithm': ds.__parent__.job_params.get('function', ''), 'Species': IBCCVLMetadata(ds).get('species', {}).get('scientificName', ''), 'Resolution': IBCCVLMetadata(ds).get('resolution', '') }) self.md['Input datasets:'][key] = dataset_list key = 'species_distribution_models' if key in job_params: dsbrain = uuidToCatalogBrain(job_params[key]) if dsbrain: ds = dsbrain.getObject() # get the source experiment source_exp = ds.__parent__ while not IExperiment.providedBy(source_exp): source_exp = source_exp.__parent__ # get the threshold threshold = self.context.species_distribution_models.get(source_exp.UID(), {}).get(ds.UID()) self.md['Input datasets:'][key] = { 'Source experiment': source_exp.title, 'Title': ds.title, 'Description': ds.description, 'Download URL': '{}/@@download/file/{}'.format(ds.absolute_url(), os.path.basename(ds.absolute_url())) , 'Algorithm': ds.__parent__.job_params.get('function', ''), 'Species': IBCCVLMetadata(ds).get('species', {}).get('scientificName', ''), 'Threshold': "{}({})".format(threshold.get('label', ''), str(threshold.get('value', ''))) } key = 'projections' if key in job_params: for pds in job_params[key]: threshold = pds.get('threshold', {}) dsbrain = uuidToCatalogBrain(pds.get('dataset')) if dsbrain: ds = dsbrain.getObject() # get the source experiment source_exp = ds.__parent__ while not IExperiment.providedBy(source_exp): source_exp = source_exp.__parent__ self.md['Input datasets:'][key] = { 'Source experiment': source_exp.title, 'Title': ds.title, 'Description': ds.description, 'Download URL': '{}/@@download/file/{}'.format(ds.absolute_url(), os.path.basename(ds.absolute_url())) , 'Algorithm': ds.__parent__.job_params.get('function', ''), 'Species': IBCCVLMetadata(ds).get('species', {}).get('scientificName', ''), 'Threshold': "{}({})".format(threshold.get('label', ''), str(threshold.get('value', ''))), 'Biodiverse Cell size (m)': str(job_params.get('cluster_size', '')) } # Projection experiment does not have algorithm as input if not IProjectionExperiment.providedBy(self.context.__parent__): for key in ['function', 'algorithm']: if key in job_params: self.md['Algorithm settings:'] = { 'Algorithm Name': job_params[key], 'Configuration options': self.__algoConfigOption(job_params[key], job_params) } # Construct the text mdtext = StringIO.StringIO() for heading in [ 'BCCVL model outputs guide', 'System specifications', 'Model specifications', 'Input datasets:', 'Algorithm settings:', 'Model outputs:']: mdtext.write(self.__getMetadataText(heading, self.md)) return mdtext.getvalue()