def rat(self): uuid = self.request.form.get('uuid') layer = self.request.form.get('layer') brain = None try: brain = uuidToCatalogBrain(uuid) except Exception as e: LOG.error('Caught exception %s', e) if not brain: self.record_error('Not Found', 404, 'dataset not found', {'parameter': 'uuid'}) raise NotFound(self, 'metadata', self.request) md = IBCCVLMetadata(brain.getObject()) if not layer and layer not in md.get('layers', {}): self.record_error('Bad Request', 400, 'Missing parameter layer', {'parameter': 'layer'}) raise BadRequest('Missing parameter layer') try: rat = md.get('layers', {}).get(layer, {}).get('rat') rat = json.loads(unicode(rat)) return rat except Exception as e: LOG.warning( "Couldn't decode Raster Attribute Table from metadata. %s: %s", self.context, repr(e)) raise NotFound(self, 'rat', self.request)
def setUpWidgets(self, ignore_request=False): self.set_untranslatable_fields_for_display() #get the translation if available language = self.request.get("language") translation = get_translation_for(self.context, language) if translation: self.is_translation = True else: self.is_translation = False context = copy(removeSecurityProxy(self.context)) for field_translation in translation: setattr(context, field_translation.field_name, field_translation.field_text) self.widgets = formlib.form.setUpEditWidgets( self.form_fields, self.prefix, context, self.request, adapters=self.adapters, ignore_request=ignore_request) if language is not None: widget = self.widgets["language"] try: self.language = language widget.vocabulary = CurrentLanguageVocabulary().__call__(self) widget.vocabulary.getTermByToken(language) except LookupError: raise BadRequest("No such language token: '%s'" % language) # if the term exists in the vocabulary, set the value on # the widget widget.setRenderedValue(language) # for translations, add a ``render_original`` method to each # widget, which will render the display widget bound to the # original (HEAD) document head = self.context form_fields = ui.setUpFields(self.context.__class__, "view") for widget in self.widgets: form_field = form_fields.get(widget.context.__name__) if form_field is None: form_field = formlib.form.Field(widget.context) # bind field to head document field = form_field.field.bind(head) # create custom widget or instantiate widget using # component lookup if form_field.custom_widget is not None: display_widget = form_field.custom_widget(field, self.request) else: display_widget = component.getMultiAdapter( (field, self.request), IDisplayWidget) display_widget.setRenderedValue(field.get(head)) # attach widget as ``render_original`` widget.render_original = display_widget
def state(self): jobid = self.request.form.get('jobid', None) uuid = self.request.form.get('uuid', None) job = None try: jobtool = getUtility(IJobUtility) if jobid: job = jobtool.get_job_by_id(jobid) elif uuid: job = jobtool.find_job_by_uuid(uuid) else: raise BadRequest('Reqired parameter jobid or uuid missing') except KeyError: LOG.warning("Can't find job with id %s", jobid) # check current user permissions: # TODO: should we check if we have view permissions in case we look at job state for content object? # only give access to job state if manager or owner user = ploneapi.user.get_current() if user.getId() != job.userid: roles = user.getRoles() # intersect required roles with user roles if not (set(roles) & set(('Manager', 'SiteAdministrator'))): job = None if job: return job.state # No job found raise NotFound(self, 'state', self.request)
def parse_json(body): "Return parsed json, otherwise raise BadRequest" try: parsed_body = simplejson.loads(body) except ValueError: raise BadRequest('Content could not be parsed') return parsed_body
def query(self): # FIXME: add owner check here -> probably easiest to make userid query # parameter part of jobtool query function? ; could also look inteo # allowed_roles in catalog? query = self.request.form if not query: raise BadRequest('No query parameters supplied') jobtool = getUtility(IJobUtility) # add current userid to query user = ploneapi.user.get_current() roles = user.getRoles() # intersect required roles with user roles if not (set(roles) & set(('Manager', 'SiteAdministrator'))): query['userid'] = user.getId() brains = jobtool.query(**query) if brains: brain = brains[0] return {'id': brain.id, 'state': brain.state} else: return {}
def import_ala_data(self): if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') context = None # get import context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # import location context = self.context.restrictedTraverse("/".join( (defaults.DATASETS_FOLDER_ID, defaults.DATASETS_SPECIES_FOLDER_ID, 'ala'))) else: # custom context.... let's use in context = self.context # do user check first member = ploneapi.user.get_current() if member.getId(): user = { 'id': member.getUserName(), 'email': member.getProperty('email'), 'fullname': member.getProperty('fullname') } else: # We need at least a valid user raise Unauthorized("Invalid user") # check permission if not checkPermission('org.bccvl.AddDataset', context): raise Unauthorized("User not allowed in this context") params = self.request.form.get('data') if not params: raise BadRequest("At least on of traits or environ has to be set") if params is None: self.record_error('Bad Request', 400, 'Missing parameter data', {'parameter': 'data'}) if not params: self.record_error('Bad Request', 400, 'Empty parameter data', {'parameter': 'data'}) # TODO: should validate objects inside as well? (or use json schema # validation?) # all good so far # pull dataset from aekos # TODO: get better name here title = params[0].get('name', 'ALA import') # determine dataset type # 1. test if it is a multi species import species = set() for query in params: biocache_url = '{}/occurrences/search'.format(query['url']) query = { 'q': query['query'], 'pageSize': 0, 'limit': 2, 'facets': 'species_guid', 'fq': 'species_guid:*' # skip results without species guid } res = requests.get(biocache_url, params=query) res = res.json() # FIXME: do we need to treat sandbox downloads differently? if res.get('facetResults'): # do we have some results at all? for guid in res['facetResults'][0]['fieldResult']: species.add(guid['label']) if len(species) > 1: portal_type = 'org.bccvl.content.multispeciesdataset' else: portal_type = 'org.bccvl.content.dataset' swiftsettings = getUtility(IRegistry).forInterface(ISwiftSettings) if swiftsettings.storage_url: portal_type = 'org.bccvl.content.remotedataset' # create content ds = createContent(portal_type, title=title) ds.dataSource = 'ala' ds.description = u' '.join([title, u' imported from ALA']) ds.import_params = params ds = addContentToContainer(context, ds) md = IBCCVLMetadata(ds) if IMultiSpeciesDataset.providedBy(ds): md['genre'] = 'DataGenreSpeciesCollection' md['categories'] = ['multispecies'] else: # species dataset md['genre'] = 'DataGenreSpeciesOccurrence' md['categories'] = ['occurrence'] # TODO: populate this correctly as well md['species'] = [{'scientificName': 'qid', 'taxonID': 'qid'}] # FIXME: IStatusMessage should not be in API call from Products.statusmessages.interfaces import IStatusMessage IStatusMessage(self.request).add('New Dataset created', type='info') # start import job jt = IExperimentJobTracker(ds) status, message = jt.start_job() # reindex ojebct to make sure everything is up to date ds.reindexObject() # FIXME: IStatutsMessage should not be in API call IStatusMessage(self.request).add(message, type=status) # FIXME: API should not return a redirect # 201: new resource created ... location may point to resource from Products.CMFCore.utils import getToolByName portal = getToolByName(self.context, 'portal_url').getPortalObject() nexturl = portal[defaults.DATASETS_FOLDER_ID].absolute_url() self.request.response.setStatus(201) self.request.response.setHeader('Location', nexturl) # FIXME: should return a nice json representation of success or error return { 'status': status, 'message': message, 'jobid': IJobTracker(ds).get_job().id }
def import_trait_data(self): if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') source = self.request.form.get('source', None) species = self.request.form.get('species', None) traits = self.request.form.get('traits', None) environ = self.request.form.get('environ', None) dataurl = self.request.form.get('url', None) context = None if not source or source not in ('aekos', 'zoatrack'): raise BadRequest("source parameter must be 'aekos' or 'zoatrack'") # get import context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # import location context = self.context.restrictedTraverse("/".join( (defaults.DATASETS_FOLDER_ID, defaults.DATASETS_SPECIES_FOLDER_ID, str(source)))) else: # custom context.... let's use in context = self.context # do user check first member = ploneapi.user.get_current() if member.getId(): user = { 'id': member.getUserName(), 'email': member.getProperty('email'), 'fullname': member.getProperty('fullname') } else: # We need at least a valid user raise Unauthorized("Invalid user") # check permission if not checkPermission('org.bccvl.AddDataset', context): raise Unauthorized("User not allowed in this context") # check parameters if not species or not isinstance(species, (basestring, list)): raise BadRequest("Missing or invalid species parameter") elif isinstance(species, basestring): species = [species] # for zoatrack, url needs to be set if source == 'zoatrack' and not dataurl: raise BadRequest("url has to be set") # for aekos, at least a trait or environment variable must be specified. if source == 'aekos' and not traits and not environ: raise BadRequest( "At least a trait or environent variable has to be set") if not traits: traits = [] elif isinstance(traits, basestring): traits = [traits] if not environ: environ = [] elif isinstance(environ, basestring): environ = [environ] # all good so far # pull dataset from aekos title = ' '.join(species) # determine dataset type portal_type = 'org.bccvl.content.dataset' swiftsettings = getUtility(IRegistry).forInterface(ISwiftSettings) if swiftsettings.storage_url: portal_type = 'org.bccvl.content.remotedataset' # create content ds = createContent(portal_type, title=title) ds.dataSource = source ds.description = u' '.join([ title, ','.join(traits), ','.join(environ), u' imported from {}'.format(source.upper()) ]) ds = addContentToContainer(context, ds) md = IBCCVLMetadata(ds) md['genre'] = 'DataGenreTraits' md['categories'] = ['traits'] md['species'] = [{ 'scientificName': spec, 'taxonID': spec } for spec in species] md['traits'] = traits md['environ'] = environ md['dataurl'] = dataurl # FIXME: IStatusMessage should not be in API call from Products.statusmessages.interfaces import IStatusMessage IStatusMessage(self.request).add('New Dataset created', type='info') # start import job jt = IExperimentJobTracker(ds) status, message = jt.start_job() # reindex ojebct to make sure everything is up to date ds.reindexObject() # FIXME: IStatutsMessage should not be in API call IStatusMessage(self.request).add(message, type=status) # FIXME: API should not return a redirect # 201: new resource created ... location may point to resource from Products.CMFCore.utils import getToolByName portal = getToolByName(self.context, 'portal_url').getPortalObject() nexturl = portal[defaults.DATASETS_FOLDER_ID].absolute_url() self.request.response.setStatus(201) self.request.response.setHeader('Location', nexturl) # FIXME: should return a nice json representation of success or error return { 'status': status, 'message': message, 'jobid': IJobTracker(ds).get_job().id }
def __call__(self): raise BadRequest('Missing parameter')
def pullOccurrenceFromALA(self, lsid, taxon, dataSrc='ala', common=None): # TODO: check permisions? # 1. create new dataset with taxon, lsid and common name set portal = getToolByName(self.context, 'portal_url').getPortalObject() if dataSrc == 'ala': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['ala'] elif dataSrc == 'gbif': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['gbif'] elif dataSrc == 'aekos': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['aekos'] elif dataSrc == 'obis': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['obis'] else: raise BadRequest('Invalid data source {0}'.format(dataSrc)) title = [taxon] if common: title.append(u"({})".format(common)) # TODO: move content creation into IALAJobTracker? # remotedataset? swiftsettings = getUtility(IRegistry).forInterface(ISwiftSettings) if swiftsettings.storage_url: portal_type = 'org.bccvl.content.remotedataset' else: portal_type = 'org.bccvl.content.dataset' # TODO: make sure we get a better content id that dataset-x title = u' '.join(title) ds = createContent(portal_type, title=title) ds.dataSource = dataSrc # Either ALA or GBIF as source # TODO: add number of occurences to description ds.description = u' '.join( (title, u'imported from', unicode(dataSrc.upper()))) ds = addContentToContainer(dscontainer, ds) md = IBCCVLMetadata(ds) # TODO: provenance ... import url? # FIXME: verify input parameters before adding to graph md['genre'] = 'DataGenreSpeciesOccurrence' md['species'] = { 'scientificName': taxon, 'taxonID': lsid, } if common: md['species']['vernacularName'] = common IStatusMessage(self.request).add('New Dataset created', type='info') # 2. create and push alaimport job for dataset # TODO: make this named adapter jt = IExperimentJobTracker(ds) status, message = jt.start_job() # reindex object to make sure everything is up to date ds.reindexObject() # Job submission state notifier IStatusMessage(self.request).add(message, type=status) return (status, message)
def demosdm(self): lsid = self.request.form.get('lsid') # Run SDM on a species given by lsid (from ALA), followed by a Climate # Change projection. if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': raise BadRequest('Request must be POST') # Swift params swiftsettings = getUtility(IRegistry).forInterface(ISwiftSettings) # get parameters if not lsid: raise BadRequest('Required parameter lsid missing') # we have an lsid,.... we can't really verify but at least some # data is here # find rest of parameters # FIXME: hardcoded path to environmental datasets # Get the future climate for climate change projection portal = ploneapi.portal.get() dspath = '/'.join([defaults.DATASETS_FOLDER_ID, defaults.DATASETS_CLIMATE_FOLDER_ID, 'australia', 'australia_1km', 'RCP85_ukmo-hadgem1_2085.zip']) ds = portal.restrictedTraverse(dspath) dsuuid = IUUID(ds) dlinfo = IDownloadInfo(ds) dsmd = IBCCVLMetadata(ds) futureclimatelist = [] for layer in ('B05', 'B06', 'B13', 'B14'): futureclimatelist.append({ 'uuid': dsuuid, 'filename': dlinfo['filename'], 'downloadurl': dlinfo['url'], 'layer': layer, 'type': dsmd['layers'][layer]['datatype'], 'zippath': dsmd['layers'][layer]['filename'] }) # Climate change projection name cc_projection_name = os.path.splitext(dlinfo['filename'])[0] # Get the current climate for SDM dspath = '/'.join([defaults.DATASETS_FOLDER_ID, defaults.DATASETS_CLIMATE_FOLDER_ID, 'australia', 'australia_1km', 'current.76to05.zip']) ds = portal.restrictedTraverse(dspath) dsuuid = IUUID(ds) dlinfo = IDownloadInfo(ds) dsmd = IBCCVLMetadata(ds) envlist = [] for layer in ('B05', 'B06', 'B13', 'B14'): envlist.append({ 'uuid': dsuuid, 'filename': dlinfo['filename'], 'downloadurl': dlinfo['url'], 'layer': layer, 'type': dsmd['layers'][layer]['datatype'], 'zippath': dsmd['layers'][layer]['filename'] }) # FIXME: we don't use a IJobTracker here for now # get toolkit and func = portal[defaults.TOOLKITS_FOLDER_ID]['demosdm'] # build job_params: job_params = { 'resolution': IBCCVLMetadata(ds)['resolution'], 'function': func.getId(), 'species_occurrence_dataset': { 'uuid': 'ala_occurrence_dataset', 'species': u'demoSDM', 'downloadurl': 'ala://ala?lsid={}'.format(lsid), }, 'environmental_datasets': envlist, 'future_climate_datasets': futureclimatelist, 'cc_projection_name': cc_projection_name } # add toolkit parameters: (all default values) # get toolkit schema schema = loadString(func.schema).schema for name, field in getFields(schema).items(): if field.default is not None: job_params[name] = field.default # add other default parameters job_params.update({ 'rescale_all_models': False, 'selected_models': 'all', 'modeling_id': 'bccvl', }) # generate script to run script = u'\n'.join([ resource_string('org.bccvl.compute', 'rscripts/bccvl.R'), resource_string('org.bccvl.compute', 'rscripts/eval.R'), func.script]) # where to store results. result = { 'results_dir': 'swift+{}/wordpress/{}/'.format(swiftsettings.storage_url, urllib.quote_plus(lsid)), 'outputs': json.loads(func.output) } # worker hints: worker = { 'script': { 'name': '{}.R'.format(func.getId()), 'script': script }, 'files': ( 'species_occurrence_dataset', 'environmental_datasets', 'future_climate_datasets' ) } # put everything together jobdesc = { 'env': {}, 'params': job_params, 'worker': worker, 'result': result, } # create job jobtool = getUtility(IJobUtility) job = jobtool.new_job( lsid=lsid, toolkit=IUUID(func), function=func.getId(), type='demosdm' ) # create job context object member = ploneapi.user.get_current() context = { # we use the site object as context 'context': '/'.join(portal.getPhysicalPath()), 'jobid': job.id, 'user': { 'id': member.getUserName(), 'email': member.getProperty('email'), 'fullname': member.getProperty('fullname') }, } # all set to go build task chain now from org.bccvl.tasks.compute import demo_task from org.bccvl.tasks.plone import after_commit_task, HIGH_PRIORITY after_commit_task(demo_task, HIGH_PRIORITY, jobdesc, context) # let's hope everything works, return result # We don't create an experiment object, so we don't count stats here # let's do it manually getUtility(IStatsUtility).count_experiment( user=member.getId(), portal_type='demosdm', ) return { 'state': os.path.join(result['results_dir'], 'state.json'), 'result': os.path.join(result['results_dir'], 'proj_metadata.json'), 'jobid': job.id }
def submitsdm(self): # TODO: catch UNAuthorized correctly and return json error if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') # make sure we have the right context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # experiments location context = self.context.restrictedTraverse( defaults.EXPERIMENTS_FOLDER_ID) else: # custom context.... let's use in context = self.context # parse request body params = self.request.form # validate input # TODO: should validate type as well..... (e.g. string has to be # string) # TODO: validate dataset and layer id's existence if possible props = {} if not params.get('title', None): self.record_error('Bad Request', 400, 'Missing parameter title', {'parameter': 'title'}) else: props['title'] = params['title'] props['description'] = params.get('description', '') if not params.get('occurrence_data', None): self.record_error('Bad Request', 400, 'Missing parameter occurrence_data', {'parameter': 'occurrence_data'}) else: # FIXME: should properly support source / id # for now only bccvl source is supported props['species_occurrence_dataset'] = params[ 'occurrence_data']['id'] # FIXME: should properly support source/id for onw only bccvl source is # supported props['species_absence_dataset'] = params.get( 'absence_data', {}).get('id', None) props['scale_down'] = params.get('scale_down', False) if not params.get('environmental_data', None): self.record_error('Bad Request', 400, 'Missing parameter environmental_data', {'parameter': 'environmental_data'}) else: props['environmental_datasets'] = params['environmental_data'] if params.get('modelling_region', ''): props['modelling_region'] = NamedBlobFile( data=json.dumps(params['modelling_region'])) else: props['modelling_region'] = None if not params.get('algorithms', None): self.record_error('Bad Request', 400, 'Missing parameter algorithms', {'parameter': 'algorithms'}) else: portal = ploneapi.portal.get() props['functions'] = {} # FIXME: make sure we get the default values from our func object for algo, algo_params in params['algorithms'].items(): if algo_params is None: algo_params = {} toolkit = portal[defaults.FUNCTIONS_FOLDER_ID][algo] toolkit_model = loadString(toolkit.schema) toolkit_schema = toolkit_model.schema func_props = {} for field_name in toolkit_schema.names(): field = toolkit_schema.get(field_name) value = algo_params.get(field_name, field.missing_value) if value == field.missing_value: func_props[field_name] = field.default else: func_props[field_name] = value props['functions'][IUUID(toolkit)] = func_props if self.errors: raise BadRequest("Validation Failed") # create experiment with data as form would do # TODO: make sure self.context is 'experiments' folder? from plone.dexterity.utils import createContent, addContentToContainer experiment = createContent("org.bccvl.content.sdmexperiment", **props) experiment = addContentToContainer(context, experiment) # TODO: check if props and algo params have been applied properly experiment.parameters = dict(props['functions']) # FIXME: need to get resolution from somewhere IBCCVLMetadata(experiment)['resolution'] = 'Resolution30m' # submit newly created experiment # TODO: handle background job submit .... at this stage we wouldn't # know the model run job ids # TODO: handle submit errors and other errors that may happen above? # generic exceptions could behandled in returnwrapper retval = { 'experiment': { 'url': experiment.absolute_url(), 'uuid': IUUID(experiment) }, 'jobs': [], } jt = IExperimentJobTracker(experiment) msgtype, msg = jt.start_job(self.request) if msgtype is not None: retval['message'] = { 'type': msgtype, 'message': msg } for result in experiment.values(): jt = IJobTracker(result) retval['jobs'].append(jt.get_job().id) return retval
def submittraits(self): # TODO: catch UNAuthorized correctly and return json error if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') # make sure we have the right context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # experiments location context = self.context.restrictedTraverse( defaults.EXPERIMENTS_FOLDER_ID) else: # custom context.... let's use in context = self.context # parse request body params = self.request.form # validate input # TODO: should validate type as well..... (e.g. string has to be # string) # TODO: validate dataset and layer id's existence if possible props = {} if params.get('species_list', None): props['species_list'] = params['species_list'] else: self.record_error('Bad Request', 400, 'Missing parameter speciesList', {'parameter': 'speciesList'}) if not params.get('title', None): self.record_error('Bad Request', 400, 'Missing parameter title', {'parameter': 'title'}) else: props['title'] = params['title'] props['description'] = params.get('description', '') if not params.get('traits_data', None): self.record_error('Bad Request', 400, 'Missing parameter traits_data', {'parameter': 'traits_data'}) else: # FIXME: should properly support source / id # for now only bccvl source is supported props['species_traits_dataset'] = params[ 'traits_data']['id'] props['species_traits_dataset_params'] = {} for col_name, col_val in params.get("columns", {}).items(): if col_val not in ('lat', 'lon', 'species', 'trait_con', 'trait_ord', 'trait_nom', 'env_var_con', 'env_var_cat', 'random_con', 'random_cat'): continue props['species_traits_dataset_params'][col_name] = col_val if not props['species_traits_dataset_params']: self.record_error('Bad Request', 400, 'Invalid values for columns', {'parameter': 'columns'}) # Check for species-level trait data i.e. species is not specified if 'species' not in props['species_traits_dataset_params'].values(): props['species_list'] = [] props['scale_down'] = params.get('scale_down', False) # env data is optional props['environmental_datasets'] = params.get('environmental_data', None) if not (props['environmental_datasets'] or 'env_var_con' not in props['species_traits_dataset_params'].values() or 'env_var_cat' not in props['species_traits_dataset_params'].values()): self.record_error('Bad Request', 400, 'No Environmental data selected', {'parameter': 'environmental_datasets'}) if params.get('modelling_region', ''): props['modelling_region'] = NamedBlobFile( data=json.dumps(params['modelling_region'])) else: props['modelling_region'] = None if not params.get('algorithms', None): self.record_error('Bad Request', 400, 'Missing parameter algorithms', {'parameter': 'algorithms'}) else: props['algorithms_species'] = {} props['algorithms_diff'] = {} funcs_env = getUtility( IVocabularyFactory, 'traits_functions_species_source')(context) funcs_species = getUtility( IVocabularyFactory, 'traits_functions_diff_source')(context) # FIXME: make sure we get the default values from our func object for algo_uuid, algo_params in params['algorithms'].items(): if algo_params is None: algo_params = {} toolkit = uuidToObject(algo_uuid) toolkit_model = loadString(toolkit.schema) toolkit_schema = toolkit_model.schema func_props = {} for field_name in toolkit_schema.names(): field = toolkit_schema.get(field_name) value = algo_params.get(field_name, field.missing_value) if value == field.missing_value: func_props[field_name] = field.default else: func_props[field_name] = value if algo_uuid in funcs_env: props['algorithms_species'][algo_uuid] = func_props elif algo_uuid in funcs_species: props['algorithms_diff'][algo_uuid] = func_props else: LOG.warn( 'Algorithm {} not in allowed list of functions'.format(toolkit.id)) if not (props['algorithms_species'] or props['algorithms_diff']): self.record_error('Bad Request', 400, 'Iinvalid algorithms selected', {'parameter': 'algorithms'}) if self.errors: raise BadRequest("Validation Failed") # create experiment with data as form would do # TODO: make sure self.context is 'experiments' folder? from plone.dexterity.utils import createContent, addContentToContainer experiment = createContent( "org.bccvl.content.speciestraitsexperiment", **props) experiment = addContentToContainer(context, experiment) experiment.parameters = dict(props['algorithms_species']) experiment.parameters.update(dict(props['algorithms_diff'])) # FIXME: need to get resolution from somewhere IBCCVLMetadata(experiment)['resolution'] = 'Resolution30m' # submit newly created experiment # TODO: handle background job submit .... at this stage we wouldn't # know the model run job ids # TODO: handle submit errors and other errors that may happen above? # generic exceptions could behandled in returnwrapper retval = { 'experiment': { 'url': experiment.absolute_url(), 'uuid': IUUID(experiment) }, 'jobs': [], } jt = IExperimentJobTracker(experiment) msgtype, msg = jt.start_job(self.request) if msgtype is not None: retval['message'] = { 'type': msgtype, 'message': msg } for result in experiment.values(): jt = IJobTracker(result) retval['jobs'].append(jt.get_job().id) return retval
def submitcc(self): # TODO: catch UNAuthorized correctly and return json error if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') # make sure we have the right context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # experiments location context = self.context.restrictedTraverse( defaults.EXPERIMENTS_FOLDER_ID) else: # custom context.... let's use in context = self.context # parse request body params = self.request.form # validate input # TODO: should validate type as well..... (e.g. string has to be # string) # TODO: validate dataset and layer id's existence if possible props = {} if not params.get('title', None): self.record_error('Bad Request', 400, 'Missing parameter title', {'parameter': 'title'}) else: props['title'] = params['title'] props['description'] = params.get('description', '') if not params.get('species_distribution_models', None): self.record_error('Bad Request', 400, 'Missing parameter species_distribution_models', {'parameter': 'species_distribution_models'}) else: props['species_distribution_models'] = params[ 'species_distribution_models'] if not params.get('future_climate_datasets', None): self.record_error('Bad Request', 400, 'Missing parameter future_climate_datasets', {'parameter': 'future_climate_datasets'}) else: props['future_climate_datasets'] = params[ 'future_climate_datasets'] if params.get('projection_region', ''): props['projection_region'] = NamedBlobFile( data=json.dumps(params['projection_region'])) else: props['projection_region'] = None if self.errors: raise BadRequest("Validation Failed") # create experiment with data as form would do # TODO: make sure self.context is 'experiments' folder? from plone.dexterity.utils import createContent, addContentToContainer experiment = createContent("org.bccvl.content.projectionexperiment", **props) experiment = addContentToContainer(context, experiment) # FIXME: need to get resolution from somewhere IBCCVLMetadata(experiment)['resolution'] = 'Resolution30m' # submit newly created experiment # TODO: handle background job submit .... at this stage we wouldn't # know the model run job ids # TODO: handle submit errors and other errors that may happen above? # generic exceptions could behandled in returnwrapper retval = { 'experiment': { 'url': experiment.absolute_url(), 'uuid': IUUID(experiment) }, 'jobs': [], } jt = IExperimentJobTracker(experiment) msgtype, msg = jt.start_job(self.request) if msgtype is not None: retval['message'] = { 'type': msgtype, 'message': msg } for result in experiment.values(): jt = IJobTracker(result) retval['jobs'].append(jt.get_job().id) return retval
def validate_proper_contenttype(request): if request.getHeader('Content-Type', '').lower() != 'application/json; charset=utf-8': raise BadRequest('Content is not of type: application/json; charset=utf-8')
def pullOccurrenceFromALA(self, lsid, taxon, dataSrc='ala', common=None): # TODO: check permisions? # 1. create new dataset with taxon, lsid and common name set portal = getToolByName(self.context, 'portal_url').getPortalObject() if dataSrc == 'ala': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['ala'] elif dataSrc == 'gbif': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['gbif'] elif dataSrc == 'aekos': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['aekos'] elif dataSrc == 'obis': dscontainer = portal[defaults.DATASETS_FOLDER_ID][ defaults.DATASETS_SPECIES_FOLDER_ID]['obis'] else: raise BadRequest('Invalid data source {0}'.format(dataSrc)) title = [taxon] if common: title.append(u"({})".format(common)) # determine dataset type # 1. test if it is a multi species import species = set() if dataSrc == 'ala': params = [{ 'query': 'lsid:{}'.format(lsid), 'url': 'https://biocache-ws.ala.org.au/ws' }] for query in params: biocache_url = '{}/occurrences/search'.format(query['url']) query = { 'q': query['query'], 'pageSize': 0, 'limit': 2, 'facets': 'species_guid', 'fq': 'species_guid:*' # skip results without species guid } res = requests.get(biocache_url, params=query) res = res.json() if res.get('facetResults'): # do we have some results at all? for guid in res['facetResults'][0]['fieldResult']: species.add(guid['label']) elif dataSrc == 'gbif': genusChildren_url = 'https://api.gbif.org/v1/species/{}/children?offset=0&limit=40'.format( lsid) res = requests.get(genusChildren_url) res = res.json() if res.get('results'): for sp in res.get('results'): if sp.get('speciesKey'): species.add(sp['speciesKey']) elif dataSrc == 'obis': genusChildren_url = 'https://backend.iobis.org/children/{}'.format( lsid) res = requests.get(genusChildren_url) res = res.json() for sp in res: if sp.get('rank_name', '') != 'Species': continue if sp.get('valid_id'): species.add(sp['valid_id']) if len(species) > 1: portal_type = 'org.bccvl.content.multispeciesdataset' else: swiftsettings = getUtility(IRegistry).forInterface(ISwiftSettings) if swiftsettings.storage_url: portal_type = 'org.bccvl.content.remotedataset' else: portal_type = 'org.bccvl.content.dataset' # TODO: make sure we get a better content id that dataset-x title = u' '.join(title) ds = createContent(portal_type, title=title) ds.dataSource = dataSrc # Either ALA or GBIF as source # TODO: add number of occurences to description ds.description = u' '.join( (title, u'imported from', unicode(dataSrc.upper()))) ds = addContentToContainer(dscontainer, ds) md = IBCCVLMetadata(ds) # TODO: provenance ... import url? # FIXME: verify input parameters before adding to graph if IMultiSpeciesDataset.providedBy(ds): md['genre'] = 'DataGenreSpeciesCollection' md['categories'] = ['multispecies'] else: md['genre'] = 'DataGenreSpeciesOccurrence' md['categories'] = ['occurrence'] md['species'] = { 'scientificName': taxon, 'taxonID': lsid, } if common: md['species']['vernacularName'] = common IStatusMessage(self.request).add('New Dataset created', type='info') # 2. create and push alaimport job for dataset # TODO: make this named adapter jt = IExperimentJobTracker(ds) status, message = jt.start_job() # reindex object to make sure everything is up to date ds.reindexObject() # Job submission state notifier IStatusMessage(self.request).add(message, type=status) return (status, message)